From dcf9247bc8a257f60699cefd9f0dd3a74638137f Mon Sep 17 00:00:00 2001 From: Vitaliy Urusovskij Date: Wed, 13 Dec 2023 18:45:19 +0400 Subject: [PATCH] Clean up of `tests/ov_helpers` leftovers (#21416) * CLean up ov_helpers headers from ngraph/ * Move `ngraph/` includes from ov_helpers to tests * Remove include of all opsets in builders.hpp * Remove opsets includes from ov_helpers * Fix GNA tests * Delete comments * ClangFormat * Fix build * Fix `-fpermissive` * Fix build #2 * Fix `<` && `>` in includes * Fix build #3 * Build fix --- .../infer_request_dynamic.cpp | 6 +- .../infer_request_dynamic.cpp | 10 +- .../behavior/plugin/caching_tests.cpp | 26 +- .../execution_graph_tests/add_output.cpp | 2 +- .../concat_transformation.cpp | 2 +- .../depth_to_space_transformation.cpp | 7 +- .../single_layer_tests/depth_to_space.cpp | 6 +- .../single_layer_tests/psroi_pooling.cpp | 4 +- .../functional/single_layer_tests/range.cpp | 10 +- .../subgraph_tests/src/concat_sdp.cpp | 2 +- .../subgraph_tests/src/rotary_pos_emb.cpp | 56 +-- .../grid_sample_shape_inference_test.cpp | 6 +- .../x64/convert_to_interaction.cpp | 82 ++-- .../engines/gna/i16_quantisation_test.cpp | 103 ++-- .../cnn_ngraph_impl_tests.cpp | 142 +++--- .../import_export_act_conv_act.cpp | 6 +- .../import_export_batch_size.cpp | 6 +- .../import_export_memory_layer.cpp | 10 +- .../import_export_multi_inputs.cpp | 2 +- .../import_reshape_permute_conv.cpp | 18 +- .../functional/pass_tests/4d_eltwise.cpp | 22 +- .../pass_tests/act_maxpool_reordering.cpp | 2 +- .../pass_tests/broadcast_const_with_fq.cpp | 4 +- .../pass_tests/concat_memory_param.cpp | 13 +- .../pass_tests/concat_restrictions.cpp | 32 +- .../pass_tests/conv_with_padding.cpp | 18 +- .../convert_dwsc_to_scaleshifts.cpp | 6 +- .../convert_matmul_to_fullyconnected.cpp | 20 +- .../convert_matmul_to_pointwise_conv.cpp | 65 +-- .../convert_padded_to_valid_conv.cpp | 1 + .../pass_tests/convolution_align_filter.cpp | 21 +- .../pass_tests/convolution_crop_axis_h.cpp | 4 +- .../pass_tests/decompose_2d_conv.cpp | 6 +- .../functional/pass_tests/decompose_mvn.cpp | 2 + .../pass_tests/diagonal_insertion_test.cpp | 28 +- .../functional/pass_tests/fq_activation.cpp | 28 +- .../fq_fusion_with_multiple_weights.cpp | 71 ++- .../pass_tests/fq_fusion_with_sigmoid.cpp | 4 +- .../pass_tests/fq_maxpool_reordering.cpp | 62 +-- .../pass_tests/fq_outputs_activation_.cpp | 14 +- .../fq_with_multiple_out_connections.cpp | 44 +- .../insert_copy_layer_before_self_concat.cpp | 6 +- .../insert_transpose_before_matmul.cpp | 25 +- .../insert_transpose_between_convs.cpp | 28 +- .../pass_tests/layers_restrictions.cpp | 4 +- .../remove_permutations_NHWC_to_NCHW_pass.cpp | 111 ++--- .../preprocess_tests/precision_convert.cpp | 4 +- .../add_overload_correction.cpp | 37 +- .../scale_factors_tests/const_input_add.cpp | 2 +- .../scale_factors_tests/eltwise_act_fq.cpp | 33 +- .../matmul_overload_correction.cpp | 53 +- .../perchannel_quant_test.cpp | 19 +- .../test_fq_scale_factors.cpp | 23 +- .../weighable_layer_without_fq.cpp | 31 +- .../ov_executable_network/get_metric.cpp | 16 +- .../execution_graph_tests/add_output.cpp | 4 +- .../single_layer_tests/activation.cpp | 11 +- .../single_layer_tests/gru_cell.cpp | 26 +- .../single_layer_tests/gru_sequence.cpp | 32 +- .../single_layer_tests/lstm_sequence.cpp | 34 +- .../gna_executable_network_metrics_test.cpp | 8 +- .../unit/gna_get_aligned_split_sizes.cpp | 16 +- .../tests/unit/gna_hw_precision_test.cpp | 8 +- .../tests/unit/gna_infer_request_test.cpp | 2 +- .../tests/unit/gna_memory_alignment.cpp | 8 +- .../tests/unit/gna_memory_compact_test.cpp | 19 +- .../unit/gna_plugin_load_network_test.cpp | 6 +- .../intel_gna/tests/unit/ops/util_test.cpp | 4 +- .../gather_sinking_test_utils.hpp | 4 +- .../transformations/gna_broadcast_const.cpp | 48 +- .../gna_convert_dwsc_to_scaleshifts.cpp | 220 ++++----- ...onvert_matmul_to_pointwise_convolution.cpp | 81 ++- ...na_convert_padded_to_valid_convolution.cpp | 177 ++++--- .../gna_decompose_2d_convolution.cpp | 339 +++++++------ .../transformations/gna_decompose_mvn.cpp | 221 +++++---- .../gna_handle_transposes_around_matmul.cpp | 232 +++++---- .../transformations/gna_insert_copy_layer.cpp | 460 +++++++++--------- .../gna_insert_reshape_around_matmul.cpp | 62 +-- ...transpose_after_convolution_or_pooling.cpp | 284 ++++++----- .../tests/unit/transformations/gna_pwl.cpp | 54 +- .../transformations/gna_remove_convert.cpp | 68 +-- .../gna_remove_extra_reshapes.cpp | 51 +- .../gna_remove_single_input_concat.cpp | 17 +- .../gna_reorder_activation_and_pooling.cpp | 341 +++++++------ ...lit_convolution_with_large_buffer_size.cpp | 65 ++- .../transformations/gna_split_eltwise.cpp | 52 +- .../gna_substitute_softsign.cpp | 63 ++- .../transformations/gna_swap_input_matmul.cpp | 61 +-- .../gna_unfuse_reshape_and_transpose.cpp | 90 ++-- .../infer_request_dynamic.cpp | 2 +- .../concat_transformation.cpp | 2 +- .../depth_to_space_transformation.cpp | 7 +- .../pad_transformation.cpp | 20 +- .../single_layer_tests/broadcast.cpp | 2 +- .../single_layer_tests/convolution.cpp | 2 +- .../convolution_backprop_data.cpp | 4 +- .../single_layer_tests/group_convolution.cpp | 8 +- .../single_layer_tests/gru_sequence.cpp | 6 +- .../single_layer_tests/interpolate.cpp | 80 +-- .../single_layer_tests/lstm_sequence.cpp | 6 +- .../single_layer_tests/matrix_nms.cpp | 10 +- .../single_layer_tests/normalize_l2.cpp | 6 +- .../single_layer_tests/pooling.cpp | 32 +- .../single_layer_tests/rnn_sequence.cpp | 6 +- .../scatter_elements_update.cpp | 1 - .../quantized_convolution_backprop_data.cpp | 4 +- ...ntized_group_convolution_backprop_data.cpp | 4 +- .../dynamic/depth_to_space.cpp | 12 +- .../behavior/infer_request/io_blob.hpp | 6 +- .../behavior/plugin/core_integration.hpp | 28 +- .../include/behavior/plugin/preprocessing.hpp | 10 +- .../behavior/plugin/set_preprocess.hpp | 66 +-- .../concat_transformation.hpp | 4 +- .../depth_to_space_transformation.hpp | 2 +- .../pad_transformation.hpp | 2 +- .../include/single_layer_tests/loop.hpp | 26 +- .../include/subgraph_tests/basic_lstm.hpp | 2 +- .../executable_network/exec_graph_info.cpp | 4 +- .../behavior/infer_request/memory_states.cpp | 24 +- .../infer_request/set_io_blob_precision.cpp | 4 +- .../ov_infer_request/batched_tensors.cpp | 6 +- .../behavior/ov_infer_request/inference.cpp | 6 +- .../src/behavior/plugin/caching_tests.cpp | 14 +- .../src/execution_graph_tests/keep_assign.cpp | 13 +- .../nms_transformation_for_last_node.cpp | 6 +- .../num_inputs_fusing_bin_conv.cpp | 8 +- .../remove_parameter.cpp | 4 +- .../runtime_precision.cpp | 8 +- .../depth_to_space_transformation.cpp | 12 +- .../interpolate_transformation.cpp | 2 +- .../output_layers_concat.cpp | 16 +- .../output_layers_concat_multi_channel.cpp | 16 +- ...put_layers_handling_in_transformations.cpp | 10 +- .../pad_transformation.cpp | 6 +- .../reduce_max_transformation.cpp | 2 +- .../reduce_mean_transformation.cpp | 2 +- .../reduce_min_transformation.cpp | 2 +- .../reduce_sum_transformation.cpp | 2 +- .../shared/src/snippets/codegen_bert.cpp | 18 +- .../fake_quantize_decomposition_test.cpp | 4 +- .../single_layer/binary_convolution.hpp | 2 +- .../single_layer/broadcast.hpp | 2 +- .../single_layer/convolution.hpp | 2 +- .../single_layer/convolution_backprop.hpp | 2 +- .../convolution_backprop_data.hpp | 2 +- .../single_layer/deformable_convolution.hpp | 2 +- .../single_layer/depth_to_space.hpp | 2 +- .../single_layer/detection_output.hpp | 4 +- .../single_layer/extract_image_patches.hpp | 2 +- .../single_layer/fake_quantize.hpp | 2 +- .../single_layer/grid_sample.hpp | 6 +- .../single_layer/group_convolution.hpp | 2 +- .../group_convolution_backprop_data.hpp | 4 +- .../single_layer/group_normalization.hpp | 2 +- .../single_layer/gru_sequence.hpp | 2 +- .../single_layer/lstm_sequence.hpp | 2 +- .../single_layer/matrix_nms.hpp | 6 +- .../single_layer/memory.hpp | 8 +- .../single_layer/multiclass_nms.hpp | 2 +- .../single_layer/non_max_suppression.hpp | 4 +- .../single_layer/normalize_l2.hpp | 2 +- .../single_layer/pooling.hpp | 8 +- .../single_layer/rnn_sequence.hpp | 2 +- .../single_layer/select.hpp | 2 +- .../single_layer/space_to_depth.hpp | 2 +- .../single_layer/tensor_iterator.hpp | 2 +- .../shared_test_classes/single_layer/topk.hpp | 4 +- .../permute_concat_concat_permute.hpp | 2 +- .../src/base/utils/generate_inputs.cpp | 42 +- .../src/single_layer/activation.cpp | 2 +- .../src/single_layer/adaptive_pooling.cpp | 4 +- .../src/single_layer/batch_to_space.cpp | 2 +- .../src/single_layer/binary_convolution.cpp | 6 +- .../src/single_layer/bucketize.cpp | 8 +- .../src/single_layer/clamp.cpp | 6 +- .../src/single_layer/concat.cpp | 4 +- .../src/single_layer/constant.cpp | 4 +- .../src/single_layer/convolution.cpp | 8 +- .../src/single_layer/convolution_backprop.cpp | 12 +- .../convolution_backprop_data.cpp | 12 +- .../src/single_layer/ctc_greedy_decoder.cpp | 4 +- .../ctc_greedy_decoder_seq_len.cpp | 4 +- .../src/single_layer/ctc_loss.cpp | 4 +- .../src/single_layer/cum_sum.cpp | 8 +- .../single_layer/deformable_convolution.cpp | 18 +- .../single_layer/deformable_psroi_pooling.cpp | 6 +- .../src/single_layer/depth_to_space.cpp | 16 +- .../src/single_layer/detection_output.cpp | 6 +- .../src/single_layer/dft.cpp | 4 +- .../src/single_layer/einsum.cpp | 2 +- .../embedding_bag_offsets_sum.cpp | 6 +- .../single_layer/embedding_bag_packed_sum.cpp | 4 +- .../single_layer/embedding_segments_sum.cpp | 6 +- ...xperimental_detectron_detection_output.cpp | 8 +- .../single_layer/extract_image_patches.cpp | 10 +- .../src/single_layer/eye.cpp | 8 +- .../src/single_layer/fake_quantize.cpp | 8 +- .../src/single_layer/gather.cpp | 34 +- .../src/single_layer/gather_elements.cpp | 2 +- .../src/single_layer/gather_nd.cpp | 8 +- .../src/single_layer/gather_tree.cpp | 4 +- .../src/single_layer/grid_sample.cpp | 22 +- .../src/single_layer/grn.cpp | 4 +- .../src/single_layer/group_convolution.cpp | 8 +- .../group_convolution_backprop_data.cpp | 22 +- .../src/single_layer/gru_cell.cpp | 2 +- .../src/single_layer/gru_sequence.cpp | 12 +- .../src/single_layer/interpolate.cpp | 64 +-- .../src/single_layer/log_softmax.cpp | 4 +- .../src/single_layer/logical.cpp | 2 +- .../src/single_layer/loop.cpp | 100 ++-- .../src/single_layer/low_precision.cpp | 16 +- .../src/single_layer/lrn.cpp | 6 +- .../src/single_layer/lstm_cell.cpp | 4 +- .../src/single_layer/lstm_cell_basic.cpp | 4 +- .../src/single_layer/lstm_sequence.cpp | 14 +- .../src/single_layer/mat_mul.cpp | 4 +- .../src/single_layer/matrix_nms.cpp | 2 +- .../src/single_layer/minimum_maximum.cpp | 2 +- .../src/single_layer/multinomial.cpp | 4 +- .../src/single_layer/mvn.cpp | 8 +- .../src/single_layer/non_max_suppression.cpp | 15 +- .../src/single_layer/nonzero.cpp | 6 +- .../src/single_layer/normalize_l2.cpp | 6 +- .../src/single_layer/one_hot.cpp | 2 +- .../src/single_layer/pad.cpp | 2 +- .../src/single_layer/pooling.cpp | 36 +- .../src/single_layer/power.cpp | 4 +- .../src/single_layer/prior_box.cpp | 10 +- .../src/single_layer/prior_box_clustered.cpp | 10 +- .../src/single_layer/proposal.cpp | 6 +- .../src/single_layer/psroi_pooling.cpp | 4 +- .../src/single_layer/random_uniform.cpp | 4 +- .../src/single_layer/range.cpp | 8 +- .../src/single_layer/rdft.cpp | 4 +- .../src/single_layer/reduce_ops.cpp | 4 +- .../src/single_layer/region_yolo.cpp | 6 +- .../src/single_layer/reorg_yolo.cpp | 6 +- .../src/single_layer/reshape.cpp | 8 +- .../src/single_layer/result.cpp | 2 +- .../src/single_layer/reverse.cpp | 2 +- .../src/single_layer/reverse_sequence.cpp | 6 +- .../src/single_layer/rnn_cell.cpp | 2 +- .../src/single_layer/rnn_sequence.cpp | 12 +- .../src/single_layer/roi_align.cpp | 20 +- .../src/single_layer/roi_pooling.cpp | 2 +- .../src/single_layer/roll.cpp | 10 +- .../src/single_layer/scatter_ND_update.cpp | 6 +- .../single_layer/scatter_elements_update.cpp | 6 +- .../src/single_layer/scatter_update.cpp | 6 +- .../src/single_layer/select.cpp | 10 +- .../src/single_layer/shape_of.cpp | 4 +- .../src/single_layer/shuffle_channels.cpp | 2 +- .../src/single_layer/space_to_batch.cpp | 2 +- .../src/single_layer/space_to_depth.cpp | 16 +- .../src/single_layer/split.cpp | 4 +- .../src/single_layer/squeeze_unsqueeze.cpp | 4 +- .../src/single_layer/strided_slice.cpp | 2 +- .../src/single_layer/tensor_iterator.cpp | 46 +- .../src/single_layer/tile.cpp | 2 +- .../src/single_layer/topk.cpp | 16 +- .../src/single_layer/transpose.cpp | 6 +- .../src/single_layer/variadic_split.cpp | 4 +- .../src/single_op/broadcast.cpp | 2 +- .../src/single_op/convolution.cpp | 2 +- .../src/single_op/normalize_l2.cpp | 4 +- .../subgraph/activation_concats_eltwise.cpp | 4 +- .../src/subgraph/activation_fq.cpp | 4 +- .../src/subgraph/basic_lstm.cpp | 22 +- .../src/subgraph/broadcast_power.cpp | 8 +- .../src/subgraph/cascade_concat.cpp | 28 +- .../src/subgraph/clamp_fq.cpp | 8 +- .../src/subgraph/concat_conv.cpp | 12 +- .../src/subgraph/concat_multi_input.cpp | 28 +- ...ntization_during_memory_requantization.cpp | 32 +- .../src/subgraph/concat_qunatization.cpp | 26 +- .../subgraph/connect_split_concat_concat.cpp | 10 +- .../src/subgraph/const_conv_concat.cpp | 10 +- .../src/subgraph/conv_eltwise_fusion.cpp | 61 ++- .../src/subgraph/conv_fq_eltwise.cpp | 22 +- .../src/subgraph/conv_fq_relu.cpp | 22 +- .../subgraph/convolution_relu_sequence.cpp | 8 +- .../src/subgraph/copy_before_squeeze.cpp | 20 +- .../src/subgraph/delayed_copy_layer.cpp | 40 +- .../src/subgraph/eltwise_conv_eltwise.cpp | 58 +-- .../subgraph/eltwise_reshape_activation.cpp | 8 +- .../src/subgraph/fc_conv_fc.cpp | 44 +- .../subgraph/first_connect_input_concat.cpp | 6 +- .../src/subgraph/fq_conv_fq_affine.cpp | 30 +- .../src/subgraph/fq_with_mixed_levels.cpp | 6 +- .../subgraph/handling_orientation_conv.cpp | 32 +- .../src/subgraph/input_conv.cpp | 10 +- .../src/subgraph/input_split_concat.cpp | 8 +- .../src/subgraph/matmul_act_add.cpp | 8 +- .../src/subgraph/matmul_squeeze_add.cpp | 14 +- .../src/subgraph/memory_LSTMCell.cpp | 1 - .../memory_eltwise_reshape_concat.cpp | 20 +- .../src/subgraph/memory_fq_concat_prelu.cpp | 10 +- .../src/subgraph/multi_crops_to_concat.cpp | 8 +- .../src/subgraph/multi_input_scale.cpp | 2 +- .../multioutput_eltwise_squeeze_eltwise.cpp | 10 +- .../multiple_connect_split_concat.cpp | 14 +- .../src/subgraph/multiple_input_fq.cpp | 2 +- .../subgraph/negative_memory_layer_offset.cpp | 16 +- .../src/subgraph/parameter_reshape_result.cpp | 4 +- .../src/subgraph/parameter_result.cpp | 4 +- .../src/subgraph/parameter_shapeof_result.cpp | 6 +- .../permute_concat_concat_permute.cpp | 16 +- .../src/subgraph/permute_concat_permute.cpp | 18 +- .../quantized_convolution_backprop_data.cpp | 2 +- ...ntized_group_convolution_backprop_data.cpp | 2 +- .../src/subgraph/reduce_eltwise.cpp | 6 +- .../src/subgraph/relu_split_reshape.cpp | 6 +- .../src/subgraph/reshape_permute_reshape.cpp | 12 +- .../src/subgraph/scale_shift.cpp | 8 +- .../subgraph/scaleshift_conv_scaleshift.cpp | 44 +- .../src/subgraph/softsign.cpp | 24 +- .../subgraph/split_concat_multi_inputs.cpp | 4 +- .../src/subgraph/split_conv.cpp | 18 +- .../src/subgraph/split_relu.cpp | 4 +- .../subgraph/split_trivial_permute_concat.cpp | 8 +- .../src/subgraph/strided_slice.cpp | 4 +- .../src/subgraph/stridedslice_concat.cpp | 2 +- .../src/subgraph/stridedslice_conv.cpp | 2 +- .../src/subgraph/tensor_names.cpp | 8 +- .../src/subgraph/transpose_add.cpp | 6 +- .../transpose_conv_transpose_squeeze.cpp | 20 +- .../src/subgraph/trivial_concat.cpp | 14 +- .../two_fake_quantize_to_fullyconnected.cpp | 18 +- .../include/ov_lpt_models/markup_bias.hpp | 2 +- .../align_concat_quantization_parameters.cpp | 2 +- .../src/assign_and_read_value.cpp | 8 +- .../ov_helpers/ov_lpt_models/src/avg_pool.cpp | 2 +- .../ov_lpt_models/src/batch_to_space.cpp | 2 +- .../ov_helpers/ov_lpt_models/src/clamp.cpp | 2 +- .../ov_lpt_models/src/common/builders.cpp | 2 +- .../src/common/dequantization_operations.cpp | 2 +- .../src/common/fake_quantize_on_data.cpp | 2 +- .../src/common/fake_quantize_on_weights.cpp | 2 +- .../ov_lpt_models/src/common/multiply.cpp | 2 +- .../src/compose_fake_quantize.cpp | 2 +- .../ov_helpers/ov_lpt_models/src/concat.cpp | 6 +- .../ov_lpt_models/src/convolution.cpp | 6 +- .../src/convolution_backprop_data.cpp | 2 +- ...twise_with_multi_parent_dequantization.cpp | 2 +- .../ov_lpt_models/src/fake_quantize.cpp | 2 +- .../src/fake_quantize_and_convolution.cpp | 4 +- ...d_two_output_branches_with_convolution.cpp | 2 +- ...ntize_on_weights_and_unsupported_child.cpp | 2 +- .../src/fake_quantize_precision_selection.cpp | 2 +- .../ov_lpt_models/src/fold_fake_quantize.cpp | 2 +- .../ov_lpt_models/src/fuse_convert.cpp | 2 +- .../ov_lpt_models/src/fuse_fake_quantize.cpp | 2 +- .../fuse_fake_quantize_and_scale_shift.cpp | 2 +- .../src/fuse_multiply_to_fake_quantize.cpp | 2 +- .../src/fuse_subtract_to_fake_quantize.cpp | 2 +- .../ov_helpers/ov_lpt_models/src/gather.cpp | 6 +- .../ov_lpt_models/src/group_convolution.cpp | 2 +- .../src/markup_avg_pool_precisions.cpp | 2 +- .../ov_lpt_models/src/markup_bias.cpp | 2 +- .../ov_helpers/ov_lpt_models/src/mat_mul.cpp | 2 +- ..._with_optimized_constant_fake_quantize.cpp | 2 +- .../ov_helpers/ov_lpt_models/src/max_pool.cpp | 2 +- .../src/move_dequantization_after.cpp | 2 +- .../ov_lpt_models/src/move_fake_quantize.cpp | 2 +- .../ov_helpers/ov_lpt_models/src/multiply.cpp | 2 +- .../src/multiply_partial_function.cpp | 2 +- .../src/multiply_with_one_parent.cpp | 2 +- .../ov_helpers/ov_lpt_models/src/mvn.cpp | 6 +- .../ov_lpt_models/src/normalize_l2.cpp | 2 +- .../src/precision_propagation.cpp | 2 +- .../ov_helpers/ov_lpt_models/src/precomp.hpp | 4 +- .../ov_helpers/ov_lpt_models/src/prelu.cpp | 2 +- .../ov_helpers/ov_lpt_models/src/relu.cpp | 2 +- .../ov_helpers/ov_lpt_models/src/reshape.cpp | 2 +- .../ov_helpers/ov_lpt_models/src/round.cpp | 2 +- .../ov_lpt_models/src/shuffle_channels.cpp | 2 +- .../ov_lpt_models/src/space_to_batch.cpp | 2 +- .../ov_helpers/ov_lpt_models/src/squeeze.cpp | 2 +- .../ov_helpers/ov_lpt_models/src/subtract.cpp | 2 +- .../src/subtract_multiply_to_multiply_add.cpp | 2 +- .../src/transformations_after_split.cpp | 2 +- .../ov_lpt_models/src/transpose.cpp | 2 +- .../src/transpose_after_mat_mul.cpp | 2 +- .../ov_lpt_models/src/unsqueeze.cpp | 2 +- .../ov_models/include/ov_models/builders.hpp | 14 +- .../include/ov_models/utils/ov_helpers.hpp | 4 +- .../ov_helpers/ov_models/src/eltwise.cpp | 2 +- .../src/precision_propagation.cpp | 2 +- .../src/precision_propagation_convertion.cpp | 2 +- .../ov_snippets_models/src/precomp.hpp | 4 +- .../src/subgraph_roll_matmul_roll.cpp | 2 +- .../ov_snippets_models/src/two_binary_ops.cpp | 2 +- 393 files changed, 3551 insertions(+), 3642 deletions(-) diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp index 7401dfff81fe9e..e941a856190002 100644 --- a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp @@ -24,13 +24,13 @@ std::shared_ptr getFunction2() { auto in2add = ngraph::builder::makeConstant(ngPrc, {1, 2, 1, 1}, std::vector{}, true); auto add = ngraph::builder::makeEltwise(split->output(0), in2add, ngraph::helpers::EltwiseTypes::ADD); - auto relu1 = std::make_shared(add); + auto relu1 = std::make_shared(add); auto in2mult = ngraph::builder::makeConstant(ngPrc, {1, 2, 1, 1}, std::vector{}, true); auto mult = ngraph::builder::makeEltwise(split->output(1), in2mult, ngraph::helpers::EltwiseTypes::MULTIPLY); - auto relu2 = std::make_shared(mult); + auto relu2 = std::make_shared(mult); - auto concat = std::make_shared(ngraph::OutputVector{relu1->output(0), relu2->output(0)}, 3); + auto concat = std::make_shared(ngraph::OutputVector{relu1->output(0), relu2->output(0)}, 3); concat->get_output_tensor(0).set_names({"concat"}); return std::make_shared(concat, params, "SplitAddConcat"); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp index eb35d1ded6d1db..227e0dd40874cf 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp @@ -29,9 +29,9 @@ std::shared_ptr getFunction1() { auto in2add = ngraph::builder::makeConstant(ngPrc, {1, 4, 1, 1}, std::vector{}, true); auto add = ngraph::builder::makeEltwise(params[0], in2add, ngraph::helpers::EltwiseTypes::ADD); - auto relu1 = std::make_shared(add->output(0)); + auto relu1 = std::make_shared(add->output(0)); relu1->get_output_tensor(0).set_names({"relu1"}); - auto relu2 = std::make_shared(add->output(0)); + auto relu2 = std::make_shared(add->output(0)); relu2->get_output_tensor(0).set_names({"relu2"}); ngraph::NodeVector results{relu1, relu2}; @@ -51,13 +51,13 @@ std::shared_ptr getFunction2() { auto in2add = ngraph::builder::makeConstant(ngPrc, {1, 2, 1, 1}, std::vector{}, true); auto add = ngraph::builder::makeEltwise(split->output(0), in2add, ngraph::helpers::EltwiseTypes::ADD); - auto relu1 = std::make_shared(add); + auto relu1 = std::make_shared(add); auto in2mult = ngraph::builder::makeConstant(ngPrc, {1, 2, 1, 1}, std::vector{}, true); auto mult = ngraph::builder::makeEltwise(split->output(1), in2mult, ngraph::helpers::EltwiseTypes::MULTIPLY); - auto relu2 = std::make_shared(mult); + auto relu2 = std::make_shared(mult); - auto concat = std::make_shared(ngraph::OutputVector{relu1->output(0), relu2->output(0)}, 3); + auto concat = std::make_shared(ngraph::OutputVector{relu1->output(0), relu2->output(0)}, 3); concat->get_output_tensor(0).set_names({"concat"}); return std::make_shared(concat, params, "SplitAddConcat"); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/caching_tests.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/caching_tests.cpp index dfcac55e191f13..7bc6f7b10d3512 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/caching_tests.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/caching_tests.cpp @@ -40,37 +40,37 @@ namespace { }; static std::shared_ptr simple_function_non_max_suppression_internal(ngraph::element::Type, size_t) { - auto boxes = std::make_shared(element::f32, Shape{1, 1000, 4}); - auto scores = std::make_shared(element::f32, Shape{1, 1, 1000}); - auto max_output_boxes_per_class = opset1::Constant::create(element::i32, Shape{1}, {10}); - auto iou_threshold = opset1::Constant::create(element::f32, Shape{1}, {0.75}); - auto score_threshold = opset1::Constant::create(element::f32, Shape{1}, {0.7}); + auto boxes = std::make_shared(element::f32, Shape{1, 1000, 4}); + auto scores = std::make_shared(element::f32, Shape{1, 1, 1000}); + auto max_output_boxes_per_class = ov::op::v0::Constant::create(element::i32, Shape{1}, {10}); + auto iou_threshold = ov::op::v0::Constant::create(element::f32, Shape{1}, {0.75}); + auto score_threshold = ov::op::v0::Constant::create(element::f32, Shape{1}, {0.7}); auto nms = std::make_shared(boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, 0, true, element::i32); - auto res = std::make_shared(nms); + auto res = std::make_shared(nms); auto func = std::make_shared(NodeVector{nms}, ParameterVector{boxes, scores}); return func; } static std::shared_ptr simple_function_matrix_nms_internal(ngraph::element::Type, size_t) { - auto boxes = std::make_shared(element::f32, Shape{1, 1000, 4}); - auto scores = std::make_shared(element::f32, Shape{1, 1, 1000}); + auto boxes = std::make_shared(element::f32, Shape{1, 1000, 4}); + auto scores = std::make_shared(element::f32, Shape{1, 1, 1000}); ov::op::v8::MatrixNms::Attributes attr; // convert_precision does not support internal op 'NmsStaticShapeIE' attr.output_type = element::i32; auto nms = std::make_shared>(boxes, scores, attr); - auto res = std::make_shared(nms); + auto res = std::make_shared(nms); auto func = std::make_shared(NodeVector{nms}, ParameterVector{boxes, scores}); return func; } static std::shared_ptr simple_function_multiclass_nms_internal(ngraph::element::Type, size_t) { - auto boxes = std::make_shared(element::f32, Shape{1, 1000, 4}); - auto scores = std::make_shared(element::f32, Shape{1, 1, 1000}); - op::util::MulticlassNmsBase::Attributes attr; + auto boxes = std::make_shared(element::f32, Shape{1, 1000, 4}); + auto scores = std::make_shared(element::f32, Shape{1, 1, 1000}); + ov::op::util::MulticlassNmsBase::Attributes attr; attr.output_type = element::i32; auto nms = std::make_shared(boxes, scores, attr); - auto res = std::make_shared(nms); + auto res = std::make_shared(nms); auto func = std::make_shared(NodeVector{nms}, ParameterVector{boxes, scores}); return func; } diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/execution_graph_tests/add_output.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/execution_graph_tests/add_output.cpp index 7f3e8cbea3a67d..3de84defe4b609 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/execution_graph_tests/add_output.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/execution_graph_tests/add_output.cpp @@ -18,7 +18,7 @@ inline InferenceEngine::CNNNetwork getTargetNetwork() { auto input = std::make_shared(type, shape); auto mem_i = std::make_shared(type, shape, 0); auto mem_r = std::make_shared(mem_i, "id"); - auto mul = std::make_shared(mem_r, input); + auto mul = std::make_shared(mem_r, input); auto mem_w = std::make_shared(mul, "id"); auto sigm = std::make_shared(mul); mem_r->set_friendly_name("Memory"); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_transformation.cpp index 852d4811963389..43068e06ead893 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_transformation.cpp @@ -108,7 +108,7 @@ const std::vector testValues = { {}, { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, {}, - std::make_shared(ov::element::u8, ov::Shape{1, 3, 16, 16}, std::vector(3 * 16 * 16, 1.0)), + std::make_shared(ov::element::u8, ov::Shape{1, 3, 16, 16}, std::vector(3 * 16 * 16, 1.0)), {}, { { ov::element::f16 }, diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/depth_to_space_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/depth_to_space_transformation.cpp index 53ecd904f12b3b..df4bccfc05931e 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/depth_to_space_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/depth_to_space_transformation.cpp @@ -9,7 +9,6 @@ using namespace LayerTestsDefinitions; using namespace InferenceEngine::details; -using namespace ngraph::opset1; namespace { const std::vector precisions = { @@ -17,9 +16,9 @@ const std::vector precisions = { // ngraph::element::f16 }; -const std::vector modes = { - DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, - DepthToSpace::DepthToSpaceMode::DEPTH_FIRST +const std::vector modes = { + ov::op::v0::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, + ov::op::v0::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST }; const std::vector inputShapesBS2 = { diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/depth_to_space.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/depth_to_space.cpp index 5e84ede53312b5..95b9f4587518b0 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/depth_to_space.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/depth_to_space.cpp @@ -15,9 +15,9 @@ const std::vector model_types = { ov::element::i16, }; -const std::vector modes = { - DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, - DepthToSpace::DepthToSpaceMode::DEPTH_FIRST +const std::vector modes = { + ov::op::v0::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, + ov::op::v0::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST }; const std::vector> input_shapes_bs2_static = { diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/psroi_pooling.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/psroi_pooling.cpp index 9d0841f15fb6f0..8217db402b4e04 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/psroi_pooling.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/psroi_pooling.cpp @@ -89,13 +89,13 @@ class PSROIPoolingLayerCPUTest : public testing::WithParamInterface(ngraph::element::f32, proposalShape, proposal); ov::ParameterVector params {std::make_shared(ngraph::element::f32, ov::Shape(featureMapShape))}; - auto psroi = std::make_shared(params[0], coords, outputDim, groupSize, + auto psroi = std::make_shared(params[0], coords, outputDim, groupSize, spatialScale, spatialBinsX, spatialBinsY, mode); psroi->get_rt_info() = getCPUInfo(); selectedType = getPrimitiveType() + "_" + inPrc.name(); threshold = 1e-2f; - const ngraph::ResultVector results{std::make_shared(psroi)}; + const ngraph::ResultVector results{std::make_shared(psroi)}; function = std::make_shared(results, params, "PSROIPooling"); } }; diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/range.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/range.cpp index 2596f18b459550..cc6fb0277dc1e0 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/range.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/range.cpp @@ -112,17 +112,17 @@ // step = std::get<2>(rangeInputs); // auto ngOutPr = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(outPrc); // auto ngNetPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inPrc); -// auto startPar = std::make_shared(ngNetPrc, ngraph::Shape{}); -// auto stopPar = std::make_shared(ngNetPrc, ngraph::Shape{}); -// auto stepPar = std::make_shared(ngNetPrc, ngraph::Shape{}); -// auto range = std::make_shared(startPar, stopPar, stepPar, ngOutPr); +// auto startPar = std::make_shared(ngNetPrc, ngraph::Shape{}); +// auto stopPar = std::make_shared(ngNetPrc, ngraph::Shape{}); +// auto stepPar = std::make_shared(ngNetPrc, ngraph::Shape{}); +// auto range = std::make_shared(startPar, stopPar, stepPar, ngOutPr); // range->get_rt_info() = getCPUInfo(); // selectedType = std::string("ref_any_") + (inPrc == outPrc ? inPrc.name() : "FP32"); // startPar->set_friendly_name("start"); // stopPar->set_friendly_name("stop"); // stepPar->set_friendly_name("step"); // -// const ngraph::ResultVector results{std::make_shared(range)}; +// const ngraph::ResultVector results{std::make_shared(range)}; // function = std::make_shared(results, ngraph::ParameterVector { // startPar, stopPar, stepPar}, "Range"); // functionRefs = ngraph::clone_function(*function); diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/concat_sdp.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/concat_sdp.cpp index 77a2a6d94dbbbf..93aedea977c79c 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/concat_sdp.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/concat_sdp.cpp @@ -102,7 +102,7 @@ class ConcatSDPTest : public testing::WithParamInterface, v auto concatV = std::make_shared(OutputVector{pastv, inputParams[2]}, 2); auto sdp = std::make_shared(inputParams[0], concatK, concatV, false); sdp->set_friendly_name("mha"); - auto add = std::make_shared(sdp, op::v0::Constant::create(inType, {1}, {1.0f})); + auto add = std::make_shared(sdp, op::v0::Constant::create(inType, {1}, {1.0f})); auto pastk_assign = std::make_shared(concatK, var_k); auto pastv_assign = std::make_shared(concatV, var_v); pastk_assign->set_friendly_name("pastk_w"); diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/rotary_pos_emb.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/rotary_pos_emb.cpp index 32e253875c9dd3..e03e0672c31267 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/rotary_pos_emb.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/rotary_pos_emb.cpp @@ -65,29 +65,29 @@ static std::shared_ptr buildROPE_Llama2(const int batch, auto Constant585 = cos_sin_cache[1]; // concat KV length - auto transpose_Transpose = makeOP({input, {0, 2, 1, 3}}); - auto slice_Unsqueeze_426 = makeOP({pos_id_end, 0}); - auto ScatterUpdate_152236 = makeOP({{0, 0, 0}, {2}, slice_Unsqueeze_426, {0}}); - auto slice_Slice = makeOP({Constant582, {0, 0, 0}, ScatterUpdate_152236, {1, 1, 1}}, + auto transpose_Transpose = makeOP({input, {0, 2, 1, 3}}); + auto slice_Unsqueeze_426 = makeOP({pos_id_end, 0}); + auto ScatterUpdate_152236 = makeOP({{0, 0, 0}, {2}, slice_Unsqueeze_426, {0}}); + auto slice_Slice = makeOP({Constant582, {0, 0, 0}, ScatterUpdate_152236, {1, 1, 1}}, {{"begin_mask", {1, 1, 0}}, {"end_mask", {1, 1, 0}}, {"new_axis_mask", {}}, {"shrink_axis_mask", {}}, {"ellipsis_mask", {}}}); - auto squeeze_Squeeze = makeOP({slice_Slice, 1}); - auto squeeze_Squeeze_435 = makeOP({squeeze_Squeeze, 0}); - auto index_441_Gather = makeOP({squeeze_Squeeze_435, pos_ids, 0}, {{"batch_dims", 0}}); - auto unsqueeze_Unsqueeze = makeOP({index_441_Gather, 1}); + auto squeeze_Squeeze = makeOP({slice_Slice, 1}); + auto squeeze_Squeeze_435 = makeOP({squeeze_Squeeze, 0}); + auto index_441_Gather = makeOP({squeeze_Squeeze_435, pos_ids, 0}, {{"batch_dims", 0}}); + auto unsqueeze_Unsqueeze = makeOP({index_441_Gather, 1}); auto mul_Multiply = - makeOP({transpose_Transpose, unsqueeze_Unsqueeze}, {{"auto_broadcast", "numpy"}}); - auto size_ShapeOf_448 = makeOP({transpose_Transpose}, {{"output_type", "i32"}}); - auto size_Gather_450 = makeOP({size_ShapeOf_448, 3, 0}, {{"batch_dims", 0}}); + makeOP({transpose_Transpose, unsqueeze_Unsqueeze}, {{"auto_broadcast", "numpy"}}); + auto size_ShapeOf_448 = makeOP({transpose_Transpose}, {{"output_type", "i32"}}); + auto size_Gather_450 = makeOP({size_ShapeOf_448, 3, 0}, {{"batch_dims", 0}}); auto floor_divide_Divide = - makeOP({size_Gather_450, 2}, {{"auto_broadcast", "numpy"}, {"m_pythondiv", true}}); - auto floor_divide_Floor = makeOP({floor_divide_Divide}); - auto slice_Unsqueeze_452 = makeOP({floor_divide_Floor, 0}); - auto ScatterUpdate_152312 = makeOP({{0, 0, 0, 0}, {3}, slice_Unsqueeze_452, {0}}); - auto slice_Slice_459 = makeOP( + makeOP({size_Gather_450, 2}, {{"auto_broadcast", "numpy"}, {"m_pythondiv", true}}); + auto floor_divide_Floor = makeOP({floor_divide_Divide}); + auto slice_Unsqueeze_452 = makeOP({floor_divide_Floor, 0}); + auto ScatterUpdate_152312 = makeOP({{0, 0, 0, 0}, {3}, slice_Unsqueeze_452, {0}}); + auto slice_Slice_459 = makeOP( {transpose_Transpose, ScatterUpdate_152312, {0ll, 0ll, 0ll, LLONG_MAX}, {1, 1, 1, 1}}, {{"begin_mask", {1, 1, 1, 0}}, {"end_mask", {1, 1, 1, 0}}, @@ -102,30 +102,30 @@ static std::shared_ptr buildROPE_Llama2(const int batch, 1, }), {-1.000000f}); - auto neg_Multiply = makeOP({slice_Slice_459, Constant_182988}, {{"auto_broadcast", "numpy"}}); - auto ScatterUpdate_152368 = makeOP({{0, 0, 0, 0}, {3}, slice_Unsqueeze_452, {0}}); + auto neg_Multiply = makeOP({slice_Slice_459, Constant_182988}, {{"auto_broadcast", "numpy"}}); + auto ScatterUpdate_152368 = makeOP({{0, 0, 0, 0}, {3}, slice_Unsqueeze_452, {0}}); auto slice_Slice2 = - makeOP({transpose_Transpose, {0, 0, 0, 0}, ScatterUpdate_152368, {1, 1, 1, 1}}, + makeOP({transpose_Transpose, {0, 0, 0, 0}, ScatterUpdate_152368, {1, 1, 1, 1}}, {{"begin_mask", {1, 1, 1, 0}}, {"end_mask", {1, 1, 1, 0}}, {"new_axis_mask", {}}, {"shrink_axis_mask", {}}, {"ellipsis_mask", {}}}); - auto cat_Concat = makeOP({neg_Multiply, slice_Slice2}, {{"axis", -1}}); - auto ScatterUpdate_152421 = makeOP({{0, 0, 0}, {2}, slice_Unsqueeze_426, {0}}); - auto slice_Slice_433 = makeOP({Constant585, {0, 0, 0}, ScatterUpdate_152421, {1, 1, 1}}, + auto cat_Concat = makeOP({neg_Multiply, slice_Slice2}, {{"axis", -1}}); + auto ScatterUpdate_152421 = makeOP({{0, 0, 0}, {2}, slice_Unsqueeze_426, {0}}); + auto slice_Slice_433 = makeOP({Constant585, {0, 0, 0}, ScatterUpdate_152421, {1, 1, 1}}, {{"begin_mask", {1, 1, 0}}, {"end_mask", {1, 1, 0}}, {"new_axis_mask", {}}, {"shrink_axis_mask", {}}, {"ellipsis_mask", {}}}); - auto squeeze_Squeeze_436 = makeOP({slice_Slice_433, 1}); - auto squeeze_Squeeze_437 = makeOP({squeeze_Squeeze_436, 0}); - auto index_446_Gather = makeOP({squeeze_Squeeze_437, pos_ids, 0}, {{"batch_dims", 0}}); - auto unsqueeze_Unsqueeze_447 = makeOP({index_446_Gather, 1}); + auto squeeze_Squeeze_436 = makeOP({slice_Slice_433, 1}); + auto squeeze_Squeeze_437 = makeOP({squeeze_Squeeze_436, 0}); + auto index_446_Gather = makeOP({squeeze_Squeeze_437, pos_ids, 0}, {{"batch_dims", 0}}); + auto unsqueeze_Unsqueeze_447 = makeOP({index_446_Gather, 1}); auto mul_Multiply_463 = - makeOP({cat_Concat, unsqueeze_Unsqueeze_447}, {{"auto_broadcast", "numpy"}}); - auto add_Add = makeOP({mul_Multiply, mul_Multiply_463}, {{"auto_broadcast", "numpy"}}); + makeOP({cat_Concat, unsqueeze_Unsqueeze_447}, {{"auto_broadcast", "numpy"}}); + auto add_Add = makeOP({mul_Multiply, mul_Multiply_463}, {{"auto_broadcast", "numpy"}}); return std::make_shared(ov::NodeVector{add_Add}, ov::ParameterVector{input, pos_id_end, pos_ids}); } diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/grid_sample_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/grid_sample_shape_inference_test.cpp index 66a28bd1cb3e72..2c2c4d7cf2c4b5 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/grid_sample_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/grid_sample_shape_inference_test.cpp @@ -11,11 +11,11 @@ using namespace ov; using namespace ov::intel_cpu; -class GridSampleStaticShapeInferenceTest : public OpStaticShapeInferenceTest {}; +class GridSampleStaticShapeInferenceTest : public OpStaticShapeInferenceTest {}; TEST_F(GridSampleStaticShapeInferenceTest, GridSample) { - const auto data = std::make_shared(element::i32, PartialShape{-1, -1, -1, -1}); - const auto grid = std::make_shared(element::f32, PartialShape{-1, -1, -1, -1}); + const auto data = std::make_shared(element::i32, PartialShape{-1, -1, -1, -1}); + const auto grid = std::make_shared(element::f32, PartialShape{-1, -1, -1, -1}); op = make_op(data, grid, opset9::GridSample::Attributes{}); diff --git a/src/plugins/intel_cpu/tests/unit/transformations/x64/convert_to_interaction.cpp b/src/plugins/intel_cpu/tests/unit/transformations/x64/convert_to_interaction.cpp index f9406850b4cc67..13b8bba7f848f8 100644 --- a/src/plugins/intel_cpu/tests/unit/transformations/x64/convert_to_interaction.cpp +++ b/src/plugins/intel_cpu/tests/unit/transformations/x64/convert_to_interaction.cpp @@ -25,12 +25,12 @@ using namespace testing; using namespace ov::intel_cpu; using namespace ov; -static std::shared_ptr createFQ(const std::shared_ptr& input) { - auto input_low = std::make_shared(element::f32, ov::Shape{1}, std::vector{0}); - auto input_high = std::make_shared(element::f32, ov::Shape{1}, std::vector{49.4914f}); - auto output_low = std::make_shared(element::f32, ov::Shape{1}, std::vector{0}); - auto output_high = std::make_shared(element::f32, ov::Shape{1}, std::vector{49.4914f}); - return std::make_shared(input, input_low, input_high, output_low, output_high, 256); +static std::shared_ptr createFQ(const std::shared_ptr& input) { + auto input_low = std::make_shared(element::f32, ov::Shape{1}, std::vector{0}); + auto input_high = std::make_shared(element::f32, ov::Shape{1}, std::vector{49.4914f}); + auto output_low = std::make_shared(element::f32, ov::Shape{1}, std::vector{0}); + auto output_high = std::make_shared(element::f32, ov::Shape{1}, std::vector{49.4914f}); + return std::make_shared(input, input_low, input_high, output_low, output_high, 256); } static std::shared_ptr makeInteraction(const ov::PartialShape& inputShape, bool intraFQ = false, bool postFQ = false) { @@ -55,24 +55,24 @@ static std::shared_ptr makeInteraction(const ov::PartialShape& inputS features.push_back(sparse_feat); inputsParams.push_back(sparse_input); } - auto shapeof = std::make_shared(dense_feature); - auto gather_batch_indices = std::make_shared(element::i32, ov::Shape{1}, std::vector{0}); - auto gather_batch_axis = std::make_shared(element::i32, ov::Shape{}, 0); - auto gather_batch = std::make_shared(shapeof, gather_batch_indices, gather_batch_axis); + auto shapeof = std::make_shared(dense_feature); + auto gather_batch_indices = std::make_shared(element::i32, ov::Shape{1}, std::vector{0}); + auto gather_batch_axis = std::make_shared(element::i32, ov::Shape{}, 0); + auto gather_batch = std::make_shared(shapeof, gather_batch_indices, gather_batch_axis); - auto gather_feature_indices = std::make_shared(element::i32, ov::Shape{1}, std::vector{1}); - auto gather_feature_axis = std::make_shared(element::i32, ov::Shape{1}, 0); - auto gather_feature = std::make_shared(shapeof, gather_feature_indices, gather_feature_axis); + auto gather_feature_indices = std::make_shared(element::i32, ov::Shape{1}, std::vector{1}); + auto gather_feature_axis = std::make_shared(element::i32, ov::Shape{1}, 0); + auto gather_feature = std::make_shared(shapeof, gather_feature_indices, gather_feature_axis); - auto reshape_dim2 = std::make_shared(element::i64, ov::Shape{1}, std::vector{-1}); - auto reshape_shape = std::make_shared(NodeVector{gather_batch, reshape_dim2, gather_feature}, 0); + auto reshape_dim2 = std::make_shared(element::i64, ov::Shape{1}, std::vector{-1}); + auto reshape_shape = std::make_shared(NodeVector{gather_batch, reshape_dim2, gather_feature}, 0); - auto concat1 = std::make_shared(features, 1); - auto reshape = std::make_shared(concat1, reshape_shape, true); + auto concat1 = std::make_shared(features, 1); + auto reshape = std::make_shared(concat1, reshape_shape, true); std::vector transpose1_value = {0, 2, 1}; - auto transpose1_shape = std::make_shared(element::i32, ov::Shape{3}, transpose1_value); - auto transpose1 = std::make_shared(reshape, transpose1_shape); - auto matmul = std::make_shared(reshape, transpose1); + auto transpose1_shape = std::make_shared(element::i32, ov::Shape{3}, transpose1_value); + auto transpose1 = std::make_shared(reshape, transpose1_shape); + auto matmul = std::make_shared(reshape, transpose1); std::shared_ptr inter = nullptr; if (intraFQ) { inter = createFQ(matmul); @@ -80,11 +80,11 @@ static std::shared_ptr makeInteraction(const ov::PartialShape& inputS inter = matmul; } std::vector transpose2_value = {1, 2, 0}; - auto transpose2_shape = std::make_shared(element::i32, ov::Shape{3}, transpose2_value); - auto transpose2 = std::make_shared(inter, transpose2_shape); + auto transpose2_shape = std::make_shared(element::i32, ov::Shape{3}, transpose2_value); + auto transpose2 = std::make_shared(inter, transpose2_shape); std::vector reshape2_value = {729, -1}; - auto reshape2_shape = std::make_shared(element::i32, ov::Shape{2}, reshape2_value); - auto reshape2 = std::make_shared(transpose2, reshape2_shape, true); + auto reshape2_shape = std::make_shared(element::i32, ov::Shape{2}, reshape2_value); + auto reshape2 = std::make_shared(transpose2, reshape2_shape, true); std::vector gather_indices_value; for (int i = 1; i < 27; i++) { @@ -92,29 +92,29 @@ static std::shared_ptr makeInteraction(const ov::PartialShape& inputS gather_indices_value.push_back(i * 27 + j); } } - auto gather_indices = std::make_shared(element::i32, ov::Shape{351}, gather_indices_value); - auto gather_axis = std::make_shared(element::i32, ov::Shape{}, 0); - auto gather = std::make_shared(reshape2, gather_indices, gather_axis); - auto reshape3_dim1 = std::make_shared(element::i64, ov::Shape{1}, std::vector{-1}); - auto reshape3_shape = std::make_shared(NodeVector{reshape3_dim1, gather_batch}, 0); - auto reshape3 = std::make_shared(gather, reshape3_shape, true); + auto gather_indices = std::make_shared(element::i32, ov::Shape{351}, gather_indices_value); + auto gather_axis = std::make_shared(element::i32, ov::Shape{}, 0); + auto gather = std::make_shared(reshape2, gather_indices, gather_axis); + auto reshape3_dim1 = std::make_shared(element::i64, ov::Shape{1}, std::vector{-1}); + auto reshape3_shape = std::make_shared(NodeVector{reshape3_dim1, gather_batch}, 0); + auto reshape3 = std::make_shared(gather, reshape3_shape, true); std::vector transpose3_value = {1, 0}; - auto transpose3_shape = std::make_shared(element::i32, ov::Shape{2}, transpose3_value); - auto transpose3 = std::make_shared(reshape3, transpose3_shape); + auto transpose3_shape = std::make_shared(element::i32, ov::Shape{2}, transpose3_value); + auto transpose3 = std::make_shared(reshape3, transpose3_shape); std::vector reshape4_value = {-1, 351}; - auto reshape4_shape = std::make_shared(element::i32, ov::Shape{2}, reshape4_value); - auto reshape4 = std::make_shared(transpose3, reshape4_shape, true); - auto concat2 = std::make_shared(NodeVector{dense_feature, reshape4}, 1); + auto reshape4_shape = std::make_shared(element::i32, ov::Shape{2}, reshape4_value); + auto reshape4 = std::make_shared(transpose3, reshape4_shape, true); + auto concat2 = std::make_shared(NodeVector{dense_feature, reshape4}, 1); std::shared_ptr model; if (postFQ) { - auto input_low = std::make_shared(element::f32, ov::Shape{1}, std::vector{-5.12978f}); - auto input_high = std::make_shared(element::f32, ov::Shape{1}, std::vector{5.08965f}); - auto output_low = std::make_shared(element::f32, ov::Shape{1}, std::vector{-128}); - auto output_high = std::make_shared(element::f32, ov::Shape{1}, std::vector{127}); - auto fq = std::make_shared>( - opset8::FakeQuantize(concat2, input_low, input_high, output_low, output_high, 256), + auto input_low = std::make_shared(element::f32, ov::Shape{1}, std::vector{-5.12978f}); + auto input_high = std::make_shared(element::f32, ov::Shape{1}, std::vector{5.08965f}); + auto output_low = std::make_shared(element::f32, ov::Shape{1}, std::vector{-128}); + auto output_high = std::make_shared(element::f32, ov::Shape{1}, std::vector{127}); + auto fq = std::make_shared>( + ov::op::v0::FakeQuantize(concat2, input_low, input_high, output_low, output_high, 256), element::i8); model = std::make_shared(fq, inputsParams, "interaction"); } else { diff --git a/src/plugins/intel_gna/tests/deprecated/unit/engines/gna/i16_quantisation_test.cpp b/src/plugins/intel_gna/tests/deprecated/unit/engines/gna/i16_quantisation_test.cpp index 5a64a2c3bff795..252952a44b2e50 100644 --- a/src/plugins/intel_gna/tests/deprecated/unit/engines/gna/i16_quantisation_test.cpp +++ b/src/plugins/intel_gna/tests/deprecated/unit/engines/gna/i16_quantisation_test.cpp @@ -14,6 +14,7 @@ #include "frontend/model_quantizer.hpp" #include "gna_matcher.hpp" #include "ov_models/builders.hpp" +#include "openvino/opsets/opset8.hpp" using namespace InferenceEngine; using namespace ov::intel_gna::limitations; @@ -223,12 +224,12 @@ TEST_F(I16QuantisationTest, EltwiseToMemory_ActivationInsertion) { } TEST_F(I16QuantisationTest, SplitFollowedByActivation_DummyDiagonalAffineInsertion) { - auto input_params = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 20}); - const auto axis_node = ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{}, {1}); - auto split = std::make_shared(input_params, axis_node, 2); - auto tanh = std::make_shared(split->outputs()[0]); - auto add = std::make_shared(split->outputs()[1], tanh); - auto result = std::make_shared(add); + auto input_params = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 20}); + const auto axis_node = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{}, {1}); + auto split = std::make_shared(input_params, axis_node, 2); + auto tanh = std::make_shared(split->outputs()[0]); + auto add = std::make_shared(split->outputs()[1], tanh); + auto result = std::make_shared(add); auto function = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); assert_that() @@ -328,10 +329,10 @@ TEST_F(I16QuantisationTest, ScaleShift_Affine_WillResultInIdentityInsertion) { } TEST_F(I16QuantisationTest, ClampFollowedByTanh_ResultInDiagonalInsertion) { - auto input_params = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 10}); - auto clamp = std::make_shared(input_params, -50, 50); - auto tanh = std::make_shared(clamp); - auto result = std::make_shared(tanh); + auto input_params = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 10}); + auto clamp = std::make_shared(input_params, -50, 50); + auto tanh = std::make_shared(clamp); + auto result = std::make_shared(tanh); auto function = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); assert_that() @@ -346,16 +347,16 @@ TEST_F(I16QuantisationTest, ClampFollowedByTanh_ResultInDiagonalInsertion) { } TEST_F(I16QuantisationTest, EltwiseWithMemoryAndActivationInput_ResultInTwoDiagonalsInsertion) { - auto input_params = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 10}); - const auto constant = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{10, 10}, {1}); - auto matmul = std::make_shared(input_params, constant); + auto input_params = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 10}); + const auto constant = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{10, 10}, {1}); + auto matmul = std::make_shared(input_params, constant); auto mem_i = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 10}, 0); auto mem_r = std::make_shared(mem_i, "r_27-28"); - auto tanh = std::make_shared(matmul); - auto add = std::make_shared(tanh, mem_r); + auto tanh = std::make_shared(matmul); + auto add = std::make_shared(tanh, mem_r); tanh->add_control_dependency(mem_r); auto mem_w = std::make_shared(tanh, "r_27-28"); - auto result = std::make_shared(add); + auto result = std::make_shared(add); mem_w->add_control_dependency(mem_r); result->add_control_dependency(mem_w); auto function = @@ -490,16 +491,16 @@ TEST_F(I16QuantisationTest, fp16tofp32_on_fullyConnected_model) { TEST_F(I16QuantisationTest, MultipleActivationsAfterAffineWithIdentityActivation_MultipleDiagonalLayersWithActivaitons) { - auto input_params = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 10}); - const auto constant = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{10, 10}, {1}); - auto matmul1 = std::make_shared(input_params, constant); - auto matmul2 = std::make_shared(input_params, constant); - auto add = std::make_shared(matmul2, matmul1); - auto sigmoid = std::make_shared(matmul2); - auto relu = std::make_shared(matmul2); - auto mul = std::make_shared(sigmoid, relu); - auto add2 = std::make_shared(add, mul); - auto result = std::make_shared(add); + auto input_params = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 10}); + const auto constant = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{10, 10}, {1}); + auto matmul1 = std::make_shared(input_params, constant); + auto matmul2 = std::make_shared(input_params, constant); + auto add = std::make_shared(matmul2, matmul1); + auto sigmoid = std::make_shared(matmul2); + auto relu = std::make_shared(matmul2); + auto mul = std::make_shared(sigmoid, relu); + auto add2 = std::make_shared(add, mul); + auto result = std::make_shared(add); auto function = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); // identiy came from automatic insertion due to @@ -514,13 +515,13 @@ TEST_F(I16QuantisationTest, } TEST_F(I16QuantisationTest, MultipleActivationsAfterAffine_ResultInMultipleDiagonalLayersWithActivaitons) { - auto input_params = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 10}); - const auto constant = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{10, 10}, {1}); - auto matmul = std::make_shared(input_params, constant); - auto sigmoid = std::make_shared(matmul); - auto relu = std::make_shared(matmul); - auto mul = std::make_shared(sigmoid, relu); - auto result = std::make_shared(mul); + auto input_params = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 10}); + const auto constant = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{10, 10}, {1}); + auto matmul = std::make_shared(input_params, constant); + auto sigmoid = std::make_shared(matmul); + auto relu = std::make_shared(matmul); + auto mul = std::make_shared(sigmoid, relu); + auto result = std::make_shared(mul); auto function = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); // extra identity inserted for affine @@ -596,13 +597,13 @@ TEST_F(I16QuantisationTest, PowerWithScaleFactorPropagateForward) { } TEST_F(I16QuantisationTest, ConcatWithDifferentInputScaleFactorsPropagateForward) { - auto input_params = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 20}); - const auto axis_node = ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{}, {1}); - auto split = std::make_shared(input_params, axis_node, 2); - auto sigmoid = std::make_shared(split->outputs()[0]); - auto tanh = std::make_shared(split->outputs()[1]); - auto concat = std::make_shared(ngraph::OutputVector{sigmoid, tanh}, 1); - auto result = std::make_shared(concat); + auto input_params = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 20}); + const auto axis_node = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{}, {1}); + auto split = std::make_shared(input_params, axis_node, 2); + auto sigmoid = std::make_shared(split->outputs()[0]); + auto tanh = std::make_shared(split->outputs()[1]); + auto concat = std::make_shared(ngraph::OutputVector{sigmoid, tanh}, 1); + auto result = std::make_shared(concat); auto function = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); assert_that() @@ -627,14 +628,14 @@ TEST_F(I16QuantisationTest, TI_quantize) { } TEST_F(I16QuantisationTest, TI_PropagateForward) { - auto input_params = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 10}); - auto mul = std::make_shared( + auto input_params = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 10}); + auto mul = std::make_shared( input_params, std::make_shared(ngraph::element::f32, ngraph::Shape{1, 10})); - auto add = std::make_shared( + auto add = std::make_shared( mul, std::make_shared(ngraph::element::f32, ngraph::Shape{1, 10})); - auto reshape = std::make_shared( + auto reshape = std::make_shared( add, std::make_shared(ngraph::element::i64, ngraph::Shape{3}, std::vector{1, 1, 10}), false); @@ -646,11 +647,11 @@ TEST_F(I16QuantisationTest, TI_PropagateForward) { auto H_init = ngraph::builder::makeConstant(ngraph::element::f32, {batch_size, hiddenSize}, {}, true); auto C_init = ngraph::builder::makeConstant(ngraph::element::f32, {batch_size, hiddenSize}, {}, true); - auto H_t = std::make_shared(ngraph::element::f32, ngraph::Shape{batch_size, hiddenSize}); - auto C_t = std::make_shared(ngraph::element::f32, ngraph::Shape{batch_size, hiddenSize}); + auto H_t = std::make_shared(ngraph::element::f32, ngraph::Shape{batch_size, hiddenSize}); + auto C_t = std::make_shared(ngraph::element::f32, ngraph::Shape{batch_size, hiddenSize}); // Body - auto X = std::make_shared(ngraph::element::f32, + auto X = std::make_shared(ngraph::element::f32, ngraph::Shape{batch_size, 1, reshape_shape[2]}); auto weightsNode = ngraph::builder::makeConstant(ngraph::element::f32, {4 * hiddenSize, reshape_shape[2]}, {}, true); @@ -659,9 +660,9 @@ TEST_F(I16QuantisationTest, TI_PropagateForward) { // lstm auto constantX = - ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {batch_size, reshape_shape[2]}); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {batch_size, reshape_shape[2]}); auto lstm1 = - std::make_shared(std::make_shared(X, constantX, false), + std::make_shared(std::make_shared(X, constantX, false), H_t, C_t, weightsNode, @@ -674,7 +675,7 @@ TEST_F(I16QuantisationTest, TI_PropagateForward) { auto body = std::make_shared(ngraph::OutputVector{H_o, C_o}, ngraph::ParameterVector{X, H_t, C_t}); - auto tensor_iterator = std::make_shared(); + auto tensor_iterator = std::make_shared(); tensor_iterator->set_body(body); tensor_iterator->set_sliced_input(X, reshape, 0, 1, 1, -1, 1); @@ -691,7 +692,7 @@ TEST_F(I16QuantisationTest, TI_PropagateForward) { {hiddenSize, output_size}, {1}, {1}); - auto result = std::make_shared(fc); + auto result = std::make_shared(fc); auto function = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); assert_that() diff --git a/src/plugins/intel_gna/tests/deprecated/unit/inference_engine_tests/cnn_ngraph_impl_tests.cpp b/src/plugins/intel_gna/tests/deprecated/unit/inference_engine_tests/cnn_ngraph_impl_tests.cpp index 38abd6b94ba981..253036bfaf5d6c 100644 --- a/src/plugins/intel_gna/tests/deprecated/unit/inference_engine_tests/cnn_ngraph_impl_tests.cpp +++ b/src/plugins/intel_gna/tests/deprecated/unit/inference_engine_tests/cnn_ngraph_impl_tests.cpp @@ -25,9 +25,9 @@ #include #include #include -#include -#include -#include +#include +#include +#include #include #include #include @@ -47,10 +47,10 @@ using namespace InferenceEngine; TEST(CNNNGraphImplTests, TestReshapeWithSameShape) { std::shared_ptr f; { - auto input = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1000, 4}); + auto input = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1000, 4}); input->set_friendly_name("input"); - auto shape = ngraph::opset5::Constant::create(ngraph::element::i64, {2}, {1, 4000}); - auto reshape = std::make_shared(input, shape, true); + auto shape = ov::op::v0::Constant::create(ngraph::element::i64, {2}, {1, 4000}); + auto reshape = std::make_shared(input, shape, true); f = std::make_shared(ngraph::OutputVector{reshape}, ngraph::ParameterVector{input}); } @@ -81,10 +81,10 @@ TEST(CNNNGraphImplTests, TestTwoResultsFromOneTensor) { TEST(CNNNGraphImplTests, TestInvalidReshape) { std::shared_ptr f; { - auto input = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1000, 4}); + auto input = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1000, 4}); input->set_friendly_name("input"); - auto shape = ngraph::opset5::Constant::create(ngraph::element::i64, {2}, {1, 4000}); - auto reshape = std::make_shared(input, shape, true); + auto shape = ov::op::v0::Constant::create(ngraph::element::i64, {2}, {1, 4000}); + auto reshape = std::make_shared(input, shape, true); f = std::make_shared(ngraph::OutputVector{reshape}, ngraph::ParameterVector{input}); } @@ -100,18 +100,18 @@ TEST(CNNNGraphImplTests, TestInvalidReshape) { TEST(CNNNGraphImplTests, TestNMS5OutputNames) { std::shared_ptr f; { - auto boxes = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1000, 4}); - auto scores = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1, 1000}); - auto max_output_boxes_per_class = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{}, {10}); - auto iou_threshold = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.75}); - auto score_threshold = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.7}); - auto nms = std::make_shared( + auto boxes = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1000, 4}); + auto scores = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1, 1000}); + auto max_output_boxes_per_class = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{}, {10}); + auto iou_threshold = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.75}); + auto score_threshold = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.7}); + auto nms = std::make_shared( boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, - ngraph::opset5::NonMaxSuppression::BoxEncodingType::CORNER, + ov::opset5::NonMaxSuppression::BoxEncodingType::CORNER, true); nms->set_friendly_name("nms"); f = std::make_shared(ngraph::OutputVector{nms->output(0), nms->output(1), nms->output(2)}, @@ -1072,15 +1072,15 @@ TEST(CNNNGraphImplTests, TestCheckStats) { TEST(CNNNGraphImplTests, CanSetBatchReadValue) { std::shared_ptr ngraph; { - auto input = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 2}); - auto constant = std::make_shared(ngraph::element::f32, + auto input = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 2}); + auto constant = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 2}, std::vector{1, 2}); - auto read_value = std::make_shared(constant, "variable_id"); - auto assign = std::make_shared(read_value, "variable_id"); + auto read_value = std::make_shared(constant, "variable_id"); + auto assign = std::make_shared(read_value, "variable_id"); assign->add_control_dependency(read_value); - auto add = std::make_shared(input, read_value); + auto add = std::make_shared(input, read_value); auto result = std::make_shared(add); ngraph::ParameterVector params = {input}; @@ -1101,10 +1101,10 @@ TEST(CNNNGraphImplTests, addSameOutput) { { ngraph::PartialShape shape({1, 3, 22, 22}); ngraph::element::Type type(ngraph::element::Type_t::f32); - auto param = std::make_shared(type, shape); - auto relu = std::make_shared(param); - auto shapeof = std::make_shared(param); - auto reshape = std::make_shared(relu, shapeof, true); + auto param = std::make_shared(type, shape); + auto relu = std::make_shared(param); + auto shapeof = std::make_shared(param); + auto reshape = std::make_shared(relu, shapeof, true); reshape->set_friendly_name("reshape"); auto result = std::make_shared(reshape); @@ -1128,12 +1128,12 @@ TEST(CNNNGraphImplTests, addOutput) { { ngraph::PartialShape shape({1, 3, 22, 22}); ngraph::element::Type type(ngraph::element::Type_t::f32); - auto param = std::make_shared(type, shape); - auto relu = std::make_shared(param); - auto shapeof = std::make_shared(param); - auto reshape = std::make_shared(relu, shapeof, true); + auto param = std::make_shared(type, shape); + auto relu = std::make_shared(param); + auto shapeof = std::make_shared(param); + auto reshape = std::make_shared(relu, shapeof, true); reshape->set_friendly_name("reshape"); - auto relu2 = std::make_shared(reshape); + auto relu2 = std::make_shared(reshape); auto result = std::make_shared(relu2); ngraph::ParameterVector params = {param}; @@ -1156,9 +1156,9 @@ TEST(CNNNGraphImplTests, addOutputForParameter) { { ngraph::PartialShape shape({1, 3, 22, 22}); ngraph::element::Type type(ngraph::element::Type_t::f32); - auto param = std::make_shared(type, shape); + auto param = std::make_shared(type, shape); param->set_friendly_name("param"); - auto relu = std::make_shared(param); + auto relu = std::make_shared(param); auto result = std::make_shared(relu); ngraph::ParameterVector params = {param}; @@ -1855,27 +1855,27 @@ TEST(CNNNGraphImplTests, SaveOriginalResultNameForMultiOutputOpOpset6) { TEST(CNNNGraphImplTests, CheckUniqueNames) { std::shared_ptr f; { - auto boxes = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1000, 4}); + auto boxes = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1000, 4}); boxes->set_friendly_name("boxes"); - auto scores = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1, 1000}); + auto scores = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1, 1000}); scores->set_friendly_name("scores"); - auto max_output_boxes_per_class = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{}, {10}); - auto iou_threshold = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.75}); - auto score_threshold = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.7}); - auto nms = std::make_shared( + auto max_output_boxes_per_class = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{}, {10}); + auto iou_threshold = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.75}); + auto score_threshold = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.7}); + auto nms = std::make_shared( boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, - ngraph::opset5::NonMaxSuppression::BoxEncodingType::CORNER, + ov::opset5::NonMaxSuppression::BoxEncodingType::CORNER, true); - auto result1 = std::make_shared(nms->output(0)); + auto result1 = std::make_shared(nms->output(0)); result1->set_friendly_name("result1"); - auto result2 = std::make_shared(nms->output(1)); + auto result2 = std::make_shared(nms->output(1)); result2->set_friendly_name("result2"); - auto result3 = std::make_shared(nms->output(2)); + auto result3 = std::make_shared(nms->output(2)); result3->set_friendly_name("result3"); nms->set_friendly_name("nms"); f = std::make_shared(ngraph::ResultVector{result1, result2, result3}, @@ -1888,27 +1888,27 @@ TEST(CNNNGraphImplTests, CheckUniqueNames) { TEST(CNNNGraphImplTests, CheckNonUniqueParameterName) { std::shared_ptr f; { - auto boxes = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1000, 4}); + auto boxes = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1000, 4}); boxes->set_friendly_name("boxes"); - auto scores = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1, 1000}); + auto scores = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1, 1000}); scores->set_friendly_name("boxes"); - auto max_output_boxes_per_class = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{}, {10}); - auto iou_threshold = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.75}); - auto score_threshold = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.7}); - auto nms = std::make_shared( + auto max_output_boxes_per_class = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{}, {10}); + auto iou_threshold = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.75}); + auto score_threshold = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.7}); + auto nms = std::make_shared( boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, - ngraph::opset5::NonMaxSuppression::BoxEncodingType::CORNER, + ov::opset5::NonMaxSuppression::BoxEncodingType::CORNER, true); - auto result1 = std::make_shared(nms->output(0)); + auto result1 = std::make_shared(nms->output(0)); result1->set_friendly_name("result1"); - auto result2 = std::make_shared(nms->output(1)); + auto result2 = std::make_shared(nms->output(1)); result2->set_friendly_name("result2"); - auto result3 = std::make_shared(nms->output(2)); + auto result3 = std::make_shared(nms->output(2)); result3->set_friendly_name("result3"); nms->set_friendly_name("nms"); f = std::make_shared(ngraph::ResultVector{result1, result2, result3}, @@ -1921,27 +1921,27 @@ TEST(CNNNGraphImplTests, CheckNonUniqueParameterName) { TEST(CNNNGraphImplTests, CheckNonUniqueResultName) { std::shared_ptr f; { - auto boxes = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1000, 4}); + auto boxes = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1000, 4}); boxes->set_friendly_name("nms.1"); - auto scores = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1, 1000}); + auto scores = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1, 1000}); scores->set_friendly_name("scores"); - auto max_output_boxes_per_class = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{}, {10}); - auto iou_threshold = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.75}); - auto score_threshold = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.7}); - auto nms = std::make_shared( + auto max_output_boxes_per_class = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{}, {10}); + auto iou_threshold = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.75}); + auto score_threshold = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.7}); + auto nms = std::make_shared( boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, - ngraph::opset5::NonMaxSuppression::BoxEncodingType::CORNER, + ov::opset5::NonMaxSuppression::BoxEncodingType::CORNER, true); - auto result1 = std::make_shared(nms->output(0)); + auto result1 = std::make_shared(nms->output(0)); result1->set_friendly_name("result1"); - auto result2 = std::make_shared(nms->output(1)); + auto result2 = std::make_shared(nms->output(1)); result2->set_friendly_name("result2"); - auto result3 = std::make_shared(nms->output(2)); + auto result3 = std::make_shared(nms->output(2)); result3->set_friendly_name("result3"); nms->set_friendly_name("nms"); f = std::make_shared(ngraph::ResultVector{result1, result2, result3}, @@ -1954,25 +1954,25 @@ TEST(CNNNGraphImplTests, CheckNonUniqueResultName) { TEST(CNNNGraphImplTests, CheckNonUniqueNewResultName) { std::shared_ptr f; { - auto boxes = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1000, 4}); + auto boxes = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1000, 4}); boxes->set_friendly_name("nms.1"); - auto scores = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1, 1000}); + auto scores = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1, 1000}); scores->set_friendly_name("scores"); - auto max_output_boxes_per_class = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{}, {10}); - auto iou_threshold = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.75}); - auto score_threshold = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.7}); - auto nms = std::make_shared( + auto max_output_boxes_per_class = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{}, {10}); + auto iou_threshold = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.75}); + auto score_threshold = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.7}); + auto nms = std::make_shared( boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, - ngraph::opset5::NonMaxSuppression::BoxEncodingType::CORNER, + ov::opset5::NonMaxSuppression::BoxEncodingType::CORNER, true); - auto result1 = std::make_shared(nms->output(0)); + auto result1 = std::make_shared(nms->output(0)); result1->set_friendly_name("result1"); - auto result3 = std::make_shared(nms->output(2)); + auto result3 = std::make_shared(nms->output(2)); result3->set_friendly_name("result3"); nms->set_friendly_name("nms"); f = std::make_shared(ngraph::ResultVector{result1, result3}, diff --git a/src/plugins/intel_gna/tests/functional/Import_export_tests/import_export_act_conv_act.cpp b/src/plugins/intel_gna/tests/functional/Import_export_tests/import_export_act_conv_act.cpp index ca671b4b759c59..19db46d5a6457d 100644 --- a/src/plugins/intel_gna/tests/functional/Import_export_tests/import_export_act_conv_act.cpp +++ b/src/plugins/intel_gna/tests/functional/Import_export_tests/import_export_act_conv_act.cpp @@ -119,7 +119,7 @@ class ImportActConvActTest : public testing::WithParamInterface(ngPrc, ov::Shape(inputShape))}; - auto relu1 = std::make_shared(params[0]); + auto relu1 = std::make_shared(params[0]); size_t num_out_channels = 8; size_t kernel_size = 8; @@ -137,8 +137,8 @@ class ImportActConvActTest : public testing::WithParamInterface(conv); - ngraph::ResultVector results{std::make_shared(relu2)}; + auto relu2 = std::make_shared(conv); + ngraph::ResultVector results{std::make_shared(relu2)}; function = std::make_shared(results, params, "ExportImportNetwork"); } diff --git a/src/plugins/intel_gna/tests/functional/Import_export_tests/import_export_batch_size.cpp b/src/plugins/intel_gna/tests/functional/Import_export_tests/import_export_batch_size.cpp index 6af98b35808808..e2b5c188eaa405 100644 --- a/src/plugins/intel_gna/tests/functional/Import_export_tests/import_export_batch_size.cpp +++ b/src/plugins/intel_gna/tests/functional/Import_export_tests/import_export_batch_size.cpp @@ -38,8 +38,8 @@ class ImportBatchTest : public FuncTestUtils::ImportNetworkTestBase { ov::test::utils::generate_float_numbers(2048 * inputShape[1], -0.1f, 0.1f), false); - auto matmul_1 = std::make_shared(params[0], mul_const_1); - auto sigmoid_1 = std::make_shared(matmul_1); + auto matmul_1 = std::make_shared(params[0], mul_const_1); + auto sigmoid_1 = std::make_shared(matmul_1); auto mul_const_2 = ngraph::builder::makeConstant(ngPrc, @@ -47,7 +47,7 @@ class ImportBatchTest : public FuncTestUtils::ImportNetworkTestBase { ov::test::utils::generate_float_numbers(2048 * 3425, -0.1f, 0.1f), false); - auto matmul_2 = std::make_shared(sigmoid_1, mul_const_2); + auto matmul_2 = std::make_shared(sigmoid_1, mul_const_2); function = std::make_shared(matmul_2, params, "ExportImportNetwork"); } diff --git a/src/plugins/intel_gna/tests/functional/Import_export_tests/import_export_memory_layer.cpp b/src/plugins/intel_gna/tests/functional/Import_export_tests/import_export_memory_layer.cpp index 9bfeb3a9f61953..5042883e351475 100644 --- a/src/plugins/intel_gna/tests/functional/Import_export_tests/import_export_memory_layer.cpp +++ b/src/plugins/intel_gna/tests/functional/Import_export_tests/import_export_memory_layer.cpp @@ -103,15 +103,15 @@ class ImportMemoryTest : public testing::WithParamInterface(ngPrc, ov::Shape{1, 336})}; auto mem_c = ngraph::builder::makeConstant(ngPrc, {1, 336}, std::vector{1}); - auto mem_r = std::make_shared(mem_c, "id"); + auto mem_r = std::make_shared(mem_c, "id"); - auto mul = std::make_shared(params[0], mem_r); - auto mem_w = std::make_shared(mul, "id"); + auto mul = std::make_shared(params[0], mem_r); + auto mem_w = std::make_shared(mul, "id"); - auto relu = std::make_shared(mul); + auto relu = std::make_shared(mul); mem_w->add_control_dependency(mem_r); relu->add_control_dependency(mem_w); - ngraph::ResultVector results{std::make_shared(relu)}; + ngraph::ResultVector results{std::make_shared(relu)}; function = std::make_shared(results, params, "ExportImportNetwork"); } diff --git a/src/plugins/intel_gna/tests/functional/Import_export_tests/import_export_multi_inputs.cpp b/src/plugins/intel_gna/tests/functional/Import_export_tests/import_export_multi_inputs.cpp index fd207630f4d5f5..005e2b5350f6e8 100644 --- a/src/plugins/intel_gna/tests/functional/Import_export_tests/import_export_multi_inputs.cpp +++ b/src/plugins/intel_gna/tests/functional/Import_export_tests/import_export_multi_inputs.cpp @@ -25,7 +25,7 @@ class ImportMultiInput : public FuncTestUtils::ImportNetworkTestBase { ov::ParameterVector input{std::make_shared(ngPrc, ov::Shape(inputShape)), std::make_shared(ngPrc, ov::Shape(inputShape))}; auto mul1 = ngraph::builder::makeEltwise(input[0], input[1], ngraph::helpers::EltwiseTypes::ADD); - auto result = std::make_shared(mul1); + auto result = std::make_shared(mul1); function = std::make_shared(ngraph::ResultVector{result}, input, "multiple_input"); } diff --git a/src/plugins/intel_gna/tests/functional/Import_export_tests/import_reshape_permute_conv.cpp b/src/plugins/intel_gna/tests/functional/Import_export_tests/import_reshape_permute_conv.cpp index b57cbb822950e9..da9ee1d225a196 100644 --- a/src/plugins/intel_gna/tests/functional/Import_export_tests/import_reshape_permute_conv.cpp +++ b/src/plugins/intel_gna/tests/functional/Import_export_tests/import_reshape_permute_conv.cpp @@ -20,12 +20,12 @@ class ImportReshapePermuteConv : public FuncTestUtils::ImportNetworkTestBase { std::vector outFormShapes1 = {1, 1, 168, 2}; auto pattern1 = - std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{4}, outFormShapes1); - auto reshape1 = std::make_shared(params[0], pattern1, false); + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{4}, outFormShapes1); + auto reshape1 = std::make_shared(params[0], pattern1, false); - auto permute1 = std::make_shared( + auto permute1 = std::make_shared( reshape1, - ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 3, 1, 2})); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 3, 1, 2})); auto conv1 = ngraph::builder::makeConvolution(permute1, ngPrc, @@ -37,16 +37,16 @@ class ImportReshapePermuteConv : public FuncTestUtils::ImportNetworkTestBase { ngraph::op::PadType::VALID, 12); - auto permute2 = std::make_shared( + auto permute2 = std::make_shared( conv1, - ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 2, 3, 1})); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 2, 3, 1})); std::vector outFormShapes2 = {1, 1932}; auto pattern2 = - std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{2}, outFormShapes2); - auto reshape2 = std::make_shared(permute2, pattern2, false); + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{2}, outFormShapes2); + auto reshape2 = std::make_shared(permute2, pattern2, false); - ngraph::ResultVector results{std::make_shared(reshape2)}; + ngraph::ResultVector results{std::make_shared(reshape2)}; function = std::make_shared(results, params, "ExportImportNetwork"); }; }; diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/4d_eltwise.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/4d_eltwise.cpp index 5fb77c679a3451..c3ade845a12ba6 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/4d_eltwise.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/4d_eltwise.cpp @@ -57,18 +57,18 @@ class Eltwise4dBroadcast : public testing::WithParamInterface, pu std::vector outFormShapes1 = {1, 1, 6, 12}; auto pattern1 = - std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{4}, outFormShapes1); - auto reshape1 = std::make_shared(params[0], pattern1, false); + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{4}, outFormShapes1); + auto reshape1 = std::make_shared(params[0], pattern1, false); auto constant1 = ngraph::builder::makeConstant(ngPrc, {1, 1, 1, 12}, {}, true); auto eltwise = ngraph::builder::makeEltwise(reshape1, constant1, eltwiseType); std::vector outFormShapes2 = {1, 72}; auto pattern2 = - std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{2}, outFormShapes2); - auto reshape2 = std::make_shared(eltwise, pattern2, false); + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{2}, outFormShapes2); + auto reshape2 = std::make_shared(eltwise, pattern2, false); - ngraph::ResultVector results{std::make_shared(reshape2)}; + ngraph::ResultVector results{std::make_shared(reshape2)}; function = std::make_shared(results, params, "Eltwise4dBroadcast"); } }; @@ -106,19 +106,19 @@ class Eltwise4dMultipleInput : public testing::WithParamInterface std::make_shared(ngPrc, ov::Shape{1, 72})}; std::vector outFormShapes1 = {1, 1, 6, 12}; auto pattern1 = - std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{4}, outFormShapes1); - auto reshape1 = std::make_shared(params[0], pattern1, false); + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{4}, outFormShapes1); + auto reshape1 = std::make_shared(params[0], pattern1, false); - auto reshape2 = std::make_shared(params[1], pattern1, false); + auto reshape2 = std::make_shared(params[1], pattern1, false); auto eltwise = ngraph::builder::makeEltwise(reshape1, reshape2, eltwiseType); std::vector outFormShapes2 = {1, 72}; auto pattern2 = - std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{2}, outFormShapes2); - auto reshape3 = std::make_shared(eltwise, pattern2, false); + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{2}, outFormShapes2); + auto reshape3 = std::make_shared(eltwise, pattern2, false); - ngraph::ResultVector results{std::make_shared(reshape3)}; + ngraph::ResultVector results{std::make_shared(reshape3)}; function = std::make_shared(results, params, "Eltwise4dMultipleInput"); } }; diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/act_maxpool_reordering.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/act_maxpool_reordering.cpp index b92438407d7289..2c95884493e754 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/act_maxpool_reordering.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/act_maxpool_reordering.cpp @@ -101,7 +101,7 @@ class ActMaxpoolReordering : public testing::WithParamInterface(maxpool)}; + ngraph::ResultVector results{std::make_shared(maxpool)}; function = std::make_shared(results, inputVector, "ActMaxpoolReordering"); } }; diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/broadcast_const_with_fq.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/broadcast_const_with_fq.cpp index b28926831e5657..46fb55bf594f90 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/broadcast_const_with_fq.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/broadcast_const_with_fq.cpp @@ -61,8 +61,8 @@ class BroadcastConstWithFq : public testing::WithParamInterface(ngPrc, inputShape2, {}, true); auto fakeQuantize2 = ngraph::builder::makeFakeQuantize(constant, ngPrc, level, {}, {-0.5}, {0.5}, {-0.5}, {0.5}); - auto add = std::make_shared(fakeQuantize1, fakeQuantize2); - ngraph::ResultVector results{std::make_shared(add)}; + auto add = std::make_shared(fakeQuantize1, fakeQuantize2); + ngraph::ResultVector results{std::make_shared(add)}; function = std::make_shared(results, params, "BroadcastConstWithFq"); } }; diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/concat_memory_param.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/concat_memory_param.cpp index ce3945dc849723..0f182484e33dbb 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/concat_memory_param.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/concat_memory_param.cpp @@ -11,6 +11,7 @@ #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/blob_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" +#include "openvino/opsets/opset9.hpp" #include "ov_models/builders.hpp" #include "ov_models/pass/convert_prc.hpp" #include "ov_models/utils/ov_helpers.hpp" @@ -68,8 +69,8 @@ class ConcatMemoryTest : public testing::WithParamInterface(ng_prc, ov::Shape{1, in_total_dims_size})}; auto reshape_pattern = - std::make_shared(ov::element::Type_t::i64, ov::Shape{2}, input_shape); - auto reshape = std::make_shared(params[0], reshape_pattern, false); + std::make_shared(ov::element::Type_t::i64, ov::Shape{2}, input_shape); + auto reshape = std::make_shared(params[0], reshape_pattern, false); ov::op::util::VariableInfo vi{}; vi.data_shape = ov::PartialShape(input_shape); @@ -78,7 +79,7 @@ class ConcatMemoryTest : public testing::WithParamInterface(vi); std::vector initial_state = ov::test::utils::generate_float_numbers(in_total_dims_size, -3.f, 3.f); auto initial_state_node = ngraph::builder::makeConstant(ov::element::Type_t::f32, input_shape, initial_state); - auto readValue = std::make_shared(initial_state_node, var); + auto readValue = std::make_shared(initial_state_node, var); const int axis = 1; ov::OutputVector to_concat{readValue, reshape}; @@ -89,14 +90,14 @@ class ConcatMemoryTest : public testing::WithParamInterface(concat, etlwise_node); + auto etlwise_result_node = std::make_shared(concat, etlwise_node); - ov::ResultVector results{std::make_shared(etlwise_result_node)}; + ov::ResultVector results{std::make_shared(etlwise_result_node)}; auto split_axis_op = std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{axis}); auto split_node = std::make_shared(concat, split_axis_op, 2); - auto assign_node = std::make_shared(split_node->output(1), var); + auto assign_node = std::make_shared(split_node->output(1), var); ngraph::SinkVector sinks{assign_node}; function = std::make_shared(results, sinks, params); } diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/concat_restrictions.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/concat_restrictions.cpp index f749287d38ee7c..f261b5a5e9cda2 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/concat_restrictions.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/concat_restrictions.cpp @@ -64,7 +64,7 @@ struct ReLUConcatAxis { concatInputs.push_back(constNode); auto concat = std::make_shared(concatInputs, axis); - ov::ResultVector results{std::make_shared(concat)}; + ov::ResultVector results{std::make_shared(concat)}; return std::make_shared(results, params, getName()); } static const char* getMatch() { @@ -110,7 +110,7 @@ struct MatmulConcatAxis { concatInputs.push_back(matmul2); auto concat = std::make_shared(concatInputs, axis); - ov::ResultVector results{std::make_shared(concat)}; + ov::ResultVector results{std::make_shared(concat)}; return std::make_shared(results, params, getName()); } static const char* getMatch() { @@ -152,7 +152,7 @@ struct ConvNCHWConcatAxis { concatInputs.push_back(constNode); auto concat = std::make_shared(concatInputs, axis); - ov::ResultVector results{std::make_shared(concat)}; + ov::ResultVector results{std::make_shared(concat)}; return std::make_shared(results, params, getName()); } static const char* getMatch() { @@ -171,7 +171,7 @@ struct ConvNHWCConcatAxis { ov::OutputVector concatInputs; ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto transposeInOrder = ov::opset10::Constant::create(ov::element::i64, ov::Shape{4}, {0, 3, 1, 2}); + auto transposeInOrder = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{4}, {0, 3, 1, 2}); auto transposeIn = std::make_shared(params[0], transposeInOrder); size_t numOutChannels = 8; size_t kernelSize = 1; @@ -188,7 +188,7 @@ struct ConvNHWCConcatAxis { numOutChannels, true, filterWeights); - auto transposeOutOrder = ov::opset10::Constant::create(ov::element::i64, ov::Shape{4}, {0, 2, 3, 1}); + auto transposeOutOrder = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{4}, {0, 2, 3, 1}); auto transposeOut = std::make_shared(conv, transposeOutOrder); concatInputs.push_back(transposeOut); @@ -198,7 +198,7 @@ struct ConvNHWCConcatAxis { concatInputs.push_back(constNode); auto concat = std::make_shared(concatInputs, axis); - ov::ResultVector results{std::make_shared(concat)}; + ov::ResultVector results{std::make_shared(concat)}; return std::make_shared(results, params, getName()); } static const char* getMatch() { @@ -217,7 +217,7 @@ struct ConvConcatNHWCAxis { ov::OutputVector concatInputs; ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto transposeInOrder = ov::opset10::Constant::create(ov::element::i64, ov::Shape{4}, {0, 3, 1, 2}); + auto transposeInOrder = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{4}, {0, 3, 1, 2}); auto transposeIn1 = std::make_shared(params[0], transposeInOrder); auto transposeIn2 = std::make_shared(params[0], transposeInOrder); size_t numOutChannels = 8; @@ -253,10 +253,10 @@ struct ConvConcatNHWCAxis { concatInputs.push_back(conv2); auto concat = std::make_shared(concatInputs, axis); - auto transposeOutOrder = ov::opset10::Constant::create(ov::element::i64, ov::Shape{4}, {0, 2, 3, 1}); + auto transposeOutOrder = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{4}, {0, 2, 3, 1}); auto transposeOut = std::make_shared(concat, transposeOutOrder); - ov::ResultVector results{std::make_shared(transposeOut)}; + ov::ResultVector results{std::make_shared(transposeOut)}; return std::make_shared(results, params, getName()); } static const char* getMatch() { @@ -275,7 +275,7 @@ struct ConvConcatConcatNHWCAxis { ov::OutputVector concat1Inputs, concat2Inputs; ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto transposeInOrder = ov::opset10::Constant::create(ov::element::i64, ov::Shape{4}, {0, 3, 1, 2}); + auto transposeInOrder = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{4}, {0, 3, 1, 2}); auto transposeIn1 = std::make_shared(params[0], transposeInOrder); auto transposeIn2 = std::make_shared(params[0], transposeInOrder); size_t numOutChannels = 64; @@ -307,7 +307,7 @@ struct ConvConcatConcatNHWCAxis { true, filterWeights2); - auto transposeOutOrder = ov::opset10::Constant::create(ov::element::i64, ov::Shape{4}, {0, 2, 3, 1}); + auto transposeOutOrder = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{4}, {0, 2, 3, 1}); auto transposeOut1 = std::make_shared(conv1, transposeOutOrder); auto transposeOut2 = std::make_shared(conv2, transposeOutOrder); @@ -316,7 +316,7 @@ struct ConvConcatConcatNHWCAxis { auto concat1 = std::make_shared(concat1Inputs, 2); auto squeeze = std::make_shared( concat1, - ov::opset10::Constant::create(ov::element::i64, ov::Shape{2}, {0, 1})); + ov::op::v0::Constant::create(ov::element::i64, ov::Shape{2}, {0, 1})); size_t totalSize = ov::shape_size(squeeze->get_shape()); auto constValues = ov::test::utils::generate_float_numbers(totalSize, -0.0001f, 0.0001f); @@ -327,12 +327,12 @@ struct ConvConcatConcatNHWCAxis { auto concat2 = std::make_shared(concat2Inputs, axis); auto reshape = std::make_shared( concat2, - ov::opset10::Constant::create(ov::element::i64, - ov::Shape{2}, - ov::Shape{1, shape_size(concat2->get_shape())}), + ov::op::v0::Constant::create(ov::element::i64, + ov::Shape{2}, + ov::Shape{1, shape_size(concat2->get_shape())}), false); - ov::ResultVector results{std::make_shared(reshape)}; + ov::ResultVector results{std::make_shared(reshape)}; return std::make_shared(results, params, getName()); } static const char* getMatch() { diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/conv_with_padding.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/conv_with_padding.cpp index 999dd3574126f7..fc77b4ab6f0515 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/conv_with_padding.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/conv_with_padding.cpp @@ -70,16 +70,16 @@ class ConvWithPadding : public testing::WithParamInterface(ng_precision, ngraph::Shape{input_shape}); + auto input = std::make_shared(ng_precision, ngraph::Shape{input_shape}); auto filter = ngraph::builder::makeConstant(ng_precision, filter_shape, {1.f}); - auto conv = std::make_shared(input, - filter, - ov::Strides{1, 1}, - padding_size, - padding_size, - ov::Strides{}); - - auto res = std::make_shared(conv); + auto conv = std::make_shared(input, + filter, + ov::Strides{1, 1}, + padding_size, + padding_size, + ov::Strides{}); + + auto res = std::make_shared(conv); function = std::make_shared(ngraph::ResultVector{res}, ngraph::ParameterVector{input}); } }; diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/convert_dwsc_to_scaleshifts.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/convert_dwsc_to_scaleshifts.cpp index 33446d1577f417..fb7843e04706d3 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/convert_dwsc_to_scaleshifts.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/convert_dwsc_to_scaleshifts.cpp @@ -17,7 +17,7 @@ #include "transformations/init_node_info.hpp" using namespace ngraph; -using namespace ngraph::opset7; +using namespace ov::opset1; namespace LayerTestsDefinitions { @@ -99,7 +99,7 @@ class DWSCToScaleShiftsTest : public testing::WithParamInterface(ngPrc, ov::Shape(inputShape))}; - auto transposeInOrder = op::Constant::create(element::i64, Shape{4}, {0, 3, 1, 2}); + auto transposeInOrder = Constant::create(element::i64, Shape{4}, {0, 3, 1, 2}); auto transposeIn = std::make_shared(input[0], transposeInOrder); auto filterSize = std::accumulate(std::begin(filter), std::end(filter), 1ull, std::multiplies()); auto filterWeights = @@ -118,7 +118,7 @@ class DWSCToScaleShiftsTest : public testing::WithParamInterface(dwsc, transposeOutOrder); if (model == modelType::TranspDWSCBiasTransp) { diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/convert_matmul_to_fullyconnected.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/convert_matmul_to_fullyconnected.cpp index 65db77afeaa207..08c81e80862766 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/convert_matmul_to_fullyconnected.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/convert_matmul_to_fullyconnected.cpp @@ -67,9 +67,9 @@ class ConvertMatmulToFcPass : public testing::WithParamInterface(ngPrc, inputShape[0], weights); auto const_eltwise = ngraph::builder::makeConstant(ngPrc, {inputShape[0][0], inputShape[1][1]}, {1.0f}); - auto matmul = std::make_shared(const_mult2, params[0], false, false); + auto matmul = std::make_shared(const_mult2, params[0], false, false); - auto eltwise = std::make_shared(matmul, const_eltwise); + auto eltwise = std::make_shared(matmul, const_eltwise); function = std::make_shared(eltwise, params, "ConvertMatmulToFC"); } }; @@ -116,26 +116,26 @@ class ConvertMatmulToFcWithTransposesPass : public testing::WithParamInterface(ngPrc, ov::Shape({1, inputShape[1][0] * inputShape[1][1]}))}; - auto reshape1 = std::make_shared( + auto reshape1 = std::make_shared( params[0], ngraph::builder::makeConstant(ngraph::element::i64, {inputShape[1].size()}, inputShape[1]), false); - auto transpose1 = std::make_shared( + auto transpose1 = std::make_shared( reshape1, - ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{2}, std::vector{1, 0})); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{2}, std::vector{1, 0})); std::vector weights = ov::test::utils::generate_float_numbers(inputShape[0][0] * inputShape[0][1], -0.1f, 0.1f); auto const_mult2 = ngraph::builder::makeConstant(ngPrc, inputShape[0], weights); - auto matmul = std::make_shared(const_mult2, transpose1, false, false); - auto relu = std::make_shared(matmul); + auto matmul = std::make_shared(const_mult2, transpose1, false, false); + auto relu = std::make_shared(matmul); - auto transpose2 = std::make_shared( + auto transpose2 = std::make_shared( relu, - ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{2}, std::vector{1, 0})); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{2}, std::vector{1, 0})); auto transpose_output_shape = transpose2->get_output_shape(0); ngraph::Shape output_shape = {1, transpose_output_shape[0] * transpose_output_shape[1]}; - auto reshape2 = std::make_shared( + auto reshape2 = std::make_shared( transpose2, ngraph::builder::makeConstant(ngraph::element::i64, {output_shape.size()}, output_shape), false); diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/convert_matmul_to_pointwise_conv.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/convert_matmul_to_pointwise_conv.cpp index 85dc52b42f1fe2..702bfbb628c2ec 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/convert_matmul_to_pointwise_conv.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/convert_matmul_to_pointwise_conv.cpp @@ -13,6 +13,7 @@ #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/blob_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" +#include "openvino/opsets/opset7.hpp" #include "ov_models/builders.hpp" #include "ov_models/pass/convert_prc.hpp" #include "ov_models/utils/ov_helpers.hpp" @@ -79,19 +80,19 @@ class ConvertMatmulToPointwiseConv : public testing::WithParamInterface weights = ov::test::utils::generate_float_numbers(elemNum * elemNum, -0.1f, 0.1f); - auto weightsNode = std::make_shared(ngPrc, ngraph::Shape{elemNum, elemNum}, weights); + auto weightsNode = std::make_shared(ngPrc, ngraph::Shape{elemNum, elemNum}, weights); auto matmul = std::make_shared(params[0], weightsNode, false, true); auto bias = ngraph::builder::makeConstant(ngPrc, std::vector{1, batch, 1}, std::vector{1.0f}); auto add = ngraph::builder::makeEltwise(matmul, bias, ngraph::helpers::EltwiseTypes::ADD); - auto pattern = std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{inputShape.size()}, - inputShape); - auto reshape = std::make_shared(matmul, pattern, false); - auto relu = std::make_shared(reshape); + auto pattern = std::make_shared(ngraph::element::Type_t::i64, + ngraph::Shape{inputShape.size()}, + inputShape); + auto reshape = std::make_shared(matmul, pattern, false); + auto relu = std::make_shared(reshape); - ngraph::ResultVector results{std::make_shared(relu)}; + ngraph::ResultVector results{std::make_shared(relu)}; function = std::make_shared(results, params, "ConvertMatmulToPointwiseConv"); } }; @@ -145,29 +146,29 @@ class ConvertMatmulToPointwiseConvWithFqNeg ngraph::builder::makeConstant(ngPrc, std::vector{1}, std::vector{inputDataMin}); auto inputHighNode = ngraph::builder::makeConstant(ngPrc, std::vector{1}, std::vector{inputDataMax}); - auto inputFQ = std::make_shared(params[0], - inputLowNode, - inputHighNode, - inputLowNode, - inputHighNode, - UINT16_MAX); + auto inputFQ = std::make_shared(params[0], + inputLowNode, + inputHighNode, + inputLowNode, + inputHighNode, + UINT16_MAX); size_t elemNum = inputShape[inputShape.size() - 1]; const float weightsMin = -0.2f; const float weightsMax = 0.2f; std::vector weights = ov::test::utils::generate_float_numbers(elemNum * elemNum, weightsMin, weightsMax); - auto weightsNode = std::make_shared(ngPrc, ngraph::Shape{elemNum, elemNum}, weights); + auto weightsNode = std::make_shared(ngPrc, ngraph::Shape{elemNum, elemNum}, weights); auto weightsLowNode = ngraph::builder::makeConstant(ngPrc, std::vector{1}, std::vector{weightsMin}); auto weightsHighNode = ngraph::builder::makeConstant(ngPrc, std::vector{1}, std::vector{weightsMax}); - auto weightsFQNode = std::make_shared(weightsNode, - weightsLowNode, - weightsHighNode, - weightsLowNode, - weightsHighNode, - UINT16_MAX); + auto weightsFQNode = std::make_shared(weightsNode, + weightsLowNode, + weightsHighNode, + weightsLowNode, + weightsHighNode, + UINT16_MAX); auto matmul = std::make_shared(inputFQ, weightsFQNode, false, true); auto bias = ngraph::builder::makeConstant(ngPrc, std::vector{1, 1, 1}, std::vector{1.0f}); @@ -179,21 +180,21 @@ class ConvertMatmulToPointwiseConvWithFqNeg auto outputHighNode = ngraph::builder::makeConstant(ngPrc, std::vector{1}, std::vector{inputDataMax * weightsMax * elemNum}); - auto outputFQ = std::make_shared(add, - outputLowNode, - outputHighNode, - outputLowNode, - outputHighNode, - UINT16_MAX); + auto outputFQ = std::make_shared(add, + outputLowNode, + outputHighNode, + outputLowNode, + outputHighNode, + UINT16_MAX); - auto pattern = std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{inputShape.size()}, - inputShape); - auto reshape = std::make_shared(outputFQ, pattern, false); + auto pattern = std::make_shared(ngraph::element::Type_t::i64, + ngraph::Shape{inputShape.size()}, + inputShape); + auto reshape = std::make_shared(outputFQ, pattern, false); - auto relu = std::make_shared(reshape); + auto relu = std::make_shared(reshape); - ngraph::ResultVector results{std::make_shared(relu)}; + ngraph::ResultVector results{std::make_shared(relu)}; function = std::make_shared(results, params, "ConvertMatmulToPointwiseConv"); } }; diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/convert_padded_to_valid_conv.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/convert_padded_to_valid_conv.cpp index b862a60f2d7a91..905ac67d98ab34 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/convert_padded_to_valid_conv.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/convert_padded_to_valid_conv.cpp @@ -13,6 +13,7 @@ #include "../shared_tests_instances/skip_tests_check.hpp" #include "common_test_utils/test_common.hpp" +#include "ngraph/opsets/opset7.hpp" #include "ov_models/builders.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "transformations/init_node_info.hpp" diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/convolution_align_filter.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/convolution_align_filter.cpp index 65a23654f87909..0e23c8954d4240 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/convolution_align_filter.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/convolution_align_filter.cpp @@ -11,6 +11,7 @@ #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/blob_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" +#include "openvino/opsets/opset8.hpp" #include "ov_models/builders.hpp" #include "ov_models/pass/convert_prc.hpp" #include "ov_models/utils/ov_helpers.hpp" @@ -68,22 +69,22 @@ class ConvolutionAlignFilterTest : public testing::WithParamInterface()); ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape{1, in_total_dims_size})}; auto pattern1 = - std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{2}, splitInputShape); - auto reshape1 = std::make_shared(params[0], pattern1, false); + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{2}, splitInputShape); + auto reshape1 = std::make_shared(params[0], pattern1, false); OPENVINO_SUPPRESS_DEPRECATED_START auto split = ngraph::builder::makeSplit(reshape1, ngPrc, 2, 0); OPENVINO_SUPPRESS_DEPRECATED_END - auto relu1 = std::make_shared(split->output(0)); - auto relu2 = std::make_shared(split->output(1)); + auto relu1 = std::make_shared(split->output(0)); + auto relu2 = std::make_shared(split->output(1)); - auto concat = std::make_shared(ngraph::OutputVector{relu1, relu2}, 0); - auto pattern2 = std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{2}, - ngraph::Shape{1, in_total_dims_size}); - auto reshape2 = std::make_shared(concat, pattern2, false); + auto concat = std::make_shared(ngraph::OutputVector{relu1, relu2}, 0); + auto pattern2 = std::make_shared(ngraph::element::Type_t::i64, + ngraph::Shape{2}, + ngraph::Shape{1, in_total_dims_size}); + auto reshape2 = std::make_shared(concat, pattern2, false); - ngraph::ResultVector results{std::make_shared(reshape2)}; + ngraph::ResultVector results{std::make_shared(reshape2)}; function = std::make_shared(results, params, "ConvAlignFilter"); functionRefs = ngraph::clone_function(*function); } diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/convolution_crop_axis_h.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/convolution_crop_axis_h.cpp index e415b69e7588f9..936b7a9af9f35e 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/convolution_crop_axis_h.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/convolution_crop_axis_h.cpp @@ -55,7 +55,7 @@ class CropAfterConvolutionTest : public testing::WithParamInterface, auto reshape_pattern_size = ngraph::Shape{inputShape.size()}; auto reshape_pattern = ngraph::builder::makeConstant(ov::element::i64, reshape_pattern_size, inputShape); ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto input_reshape = std::make_shared(params[0], reshape_pattern, false); + auto input_reshape = std::make_shared(params[0], reshape_pattern, false); const std::vector filterSize{1, 1}; const std::vector strides{1, 1}; @@ -102,7 +102,7 @@ class CropAfterConvolutionTest : public testing::WithParamInterface, numOutChannels, false, weights2_values); - ngraph::ResultVector results{std::make_shared(convolution_node2)}; + ngraph::ResultVector results{std::make_shared(convolution_node2)}; function = std::make_shared(results, params, "CropAfterConvolutionTest"); } }; diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/decompose_2d_conv.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/decompose_2d_conv.cpp index a61d23b14a0a2d..bff69b08bcc3f9 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/decompose_2d_conv.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/decompose_2d_conv.cpp @@ -18,7 +18,7 @@ #include "transformations/init_node_info.hpp" using namespace ngraph; -using namespace opset1; +using namespace ov::opset1; namespace LayerTestsDefinitions { @@ -130,7 +130,7 @@ class Decompose2DConvTest : public testing::WithParamInterface(ngPrc, ov::Shape(inputShape))}; - auto transposeInOrder = opset7::Constant::create(element::i64, Shape{4}, {0, 3, 1, 2}); + auto transposeInOrder = ov::op::v0::Constant::create(element::i64, Shape{4}, {0, 3, 1, 2}); auto transposeIn = std::make_shared(input[0], transposeInOrder); auto filterSize = std::accumulate(std::begin(kernel), std::end(kernel), 1ull, std::multiplies()); auto filterWeights = @@ -146,7 +146,7 @@ class Decompose2DConvTest : public testing::WithParamInterface biasConst = std::make_shared(ngPrc, biasShape, biasWeights); Output lastOp = std::make_shared(conv, transposeOutOrder); diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/decompose_mvn.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/decompose_mvn.cpp index 07c99e7a23df6e..7ad1d05ea4cad3 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/decompose_mvn.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/decompose_mvn.cpp @@ -12,6 +12,8 @@ #include #include "common_test_utils/test_common.hpp" +#include "ngraph/opsets/opset2.hpp" +#include "ngraph/opsets/opset8.hpp" #include "ov_models/builders.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "transformations/init_node_info.hpp" diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/diagonal_insertion_test.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/diagonal_insertion_test.cpp index c92aad2e8a11e3..cc2b903ceb2a8c 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/diagonal_insertion_test.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/diagonal_insertion_test.cpp @@ -24,7 +24,6 @@ using namespace ngraph; using namespace ngraph::builder; using namespace ngraph::element; using namespace ngraph::op; -using namespace ngraph::opset9; using namespace std; using DiagonalInsertionTestParams = tuple, // Configuration @@ -68,25 +67,26 @@ class DiagonalInsertionTest : public testing::WithParamInterface(type, ov::Shape(shapes))}; } - shared_ptr CreateFQNode(const Type& type, - const shared_ptr& node, - float fq_min, - float fq_max, - std::size_t levels) { + shared_ptr CreateFQNode(const Type& type, + const shared_ptr& node, + float fq_min, + float fq_max, + std::size_t levels) { // auto fq_inp_min = makeConstant(type, {1}, {fq_min}); auto fq_inp_max = makeConstant(type, {1}, {fq_max}); auto fq_out_min = makeConstant(type, {1}, {fq_min}); auto fq_out_max = makeConstant(type, {1}, {fq_max}); - return make_shared(node, fq_inp_min, fq_inp_max, fq_out_min, fq_out_max, levels); + return make_shared(node, fq_inp_min, fq_inp_max, fq_out_min, fq_out_max, levels); } - std::shared_ptr CreateReshapeNode(element::Type in_type, - shared_ptr input_node, - std::vector target_shape_vect) { + std::shared_ptr CreateReshapeNode(element::Type in_type, + shared_ptr input_node, + std::vector target_shape_vect) { // - const auto target_shape_const = Constant::create(in_type, Shape{target_shape_vect.size()}, target_shape_vect); - return std::make_shared(input_node, target_shape_const, false); + const auto target_shape_const = + ov::op::v0::Constant::create(in_type, Shape{target_shape_vect.size()}, target_shape_vect); + return std::make_shared(input_node, target_shape_const, false); } bool IsDebugEnabled(map& configuration) { @@ -143,10 +143,10 @@ class DiagonalInsertionTest : public testing::WithParamInterface(precision, {height}, {}, true); auto add_const_fq = CreateFQNode(precision, add_const, fq_min_max[3][0], fq_min_max[3][1], fq_levels); - auto add = make_shared(add_const_fq, add_mm_reshape); + auto add = make_shared(add_const_fq, add_mm_reshape); auto add_fq = CreateFQNode(precision, add, fq_min_max[4][0], fq_min_max[4][1], fq_levels); - auto relu = make_shared(add_fq); + auto relu = make_shared(add_fq); function = make_shared(relu, input_vect, "DiagonalInsertion"); } diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/fq_activation.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/fq_activation.cpp index b672884319a7fa..5527be1a36dc68 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/fq_activation.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/fq_activation.cpp @@ -77,23 +77,23 @@ class FQActivation : public testing::WithParamInterface, pub auto inputHighNode = ngraph::builder::makeConstant(ngPrc, {1}, {inputMinMax.second}); ov::ParameterVector inputVector{std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto inputFQNode = std::make_shared(inputVector[0], - inputLowNode, - inputHighNode, - inputLowNode, - inputHighNode, - levels.first); + auto inputFQNode = std::make_shared(inputVector[0], + inputLowNode, + inputHighNode, + inputLowNode, + inputHighNode, + levels.first); auto relu = ngraph::builder::makeActivation(inputFQNode, ngraph::element::f32, ngraph::helpers::ActivationTypes::Relu); - auto reluFQNode = std::make_shared(relu, - inputLowNode, - inputHighNode, - inputLowNode, - inputHighNode, - levels.second); - - ngraph::ResultVector results{std::make_shared(reluFQNode)}; + auto reluFQNode = std::make_shared(relu, + inputLowNode, + inputHighNode, + inputLowNode, + inputHighNode, + levels.second); + + ngraph::ResultVector results{std::make_shared(reluFQNode)}; function = std::make_shared(results, inputVector, "FQActivation"); } }; diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/fq_fusion_with_multiple_weights.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/fq_fusion_with_multiple_weights.cpp index 168e411ddfe9f1..63be0f64a7316a 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/fq_fusion_with_multiple_weights.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/fq_fusion_with_multiple_weights.cpp @@ -11,6 +11,7 @@ #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/blob_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" +#include "openvino/opsets/opset7.hpp" #include "ov_models/builders.hpp" #include "ov_models/pass/convert_prc.hpp" #include "ov_models/utils/ov_helpers.hpp" @@ -75,31 +76,31 @@ class FQFusionWithMultipleWeights : public testing::WithParamInterface(ngPrc, {1}, {weightsMinMax.first * 2}); auto weightsHighNode = ngraph::builder::makeConstant(ngPrc, {1}, {weightsMinMax.second * 2}); - auto weightsFQ = std::make_shared(weights, - weightsLowNode, - weightsHighNode, - weightsLowNode, - weightsHighNode, - levels); - - auto conv1 = std::make_shared(params[0], - weightsFQ, - std::vector{1, 1}, - std::vector{0, 0}, - std::vector{0, 0}, - std::vector{1, 1}, - ngraph::op::PadType::VALID); - auto add1 = std::make_shared( + auto weightsFQ = std::make_shared(weights, + weightsLowNode, + weightsHighNode, + weightsLowNode, + weightsHighNode, + levels); + + auto conv1 = std::make_shared(params[0], + weightsFQ, + std::vector{1, 1}, + std::vector{0, 0}, + std::vector{0, 0}, + std::vector{1, 1}, + ngraph::op::PadType::VALID); + auto add1 = std::make_shared( conv1, ngraph::builder::makeConstant(ngPrc, {}, std::vector{0.0f})); - auto conv2 = std::make_shared(params[1], - weightsFQ, - std::vector{1, 1}, - std::vector{0, 0}, - std::vector{0, 0}, - std::vector{1, 1}, - ngraph::op::PadType::VALID); - auto add2 = std::make_shared( + auto conv2 = std::make_shared(params[1], + weightsFQ, + std::vector{1, 1}, + std::vector{0, 0}, + std::vector{0, 0}, + std::vector{1, 1}, + ngraph::op::PadType::VALID); + auto add2 = std::make_shared( conv2, ngraph::builder::makeConstant(ngPrc, {}, std::vector{0.0f})); @@ -107,22 +108,14 @@ class FQFusionWithMultipleWeights : public testing::WithParamInterface(ngPrc, {1}, {-weightsMinMax.second * kernelSize * 10.0f}); auto outHighNode = ngraph::builder::makeConstant(ngPrc, {1}, {weightsMinMax.second * kernelSize * 10.0f}); - auto fq1 = std::make_shared(add1, - outLowNode, - outHighNode, - outLowNode, - outHighNode, - levels); - auto fq2 = std::make_shared(add2, - outLowNode, - outHighNode, - outLowNode, - outHighNode, - levels); - - auto add3 = std::make_shared(fq1, fq2); - - ngraph::ResultVector results{std::make_shared(add3)}; + auto fq1 = + std::make_shared(add1, outLowNode, outHighNode, outLowNode, outHighNode, levels); + auto fq2 = + std::make_shared(add2, outLowNode, outHighNode, outLowNode, outHighNode, levels); + + auto add3 = std::make_shared(fq1, fq2); + + ngraph::ResultVector results{std::make_shared(add3)}; function = std::make_shared(results, params, "FQFusionWithMultipleWeights"); } }; diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/fq_fusion_with_sigmoid.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/fq_fusion_with_sigmoid.cpp index 0222ac0bb7d487..3b3824c19bc9b4 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/fq_fusion_with_sigmoid.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/fq_fusion_with_sigmoid.cpp @@ -35,7 +35,7 @@ class FqFusionWithSigmoidTest : public LayerTestsUtils::LayerTestsCommon, ov::ParameterVector input{std::make_shared(ngPrc, ov::Shape{1, inputSize})}; auto constant = ngraph::builder::makeConstant(ngPrc, {1, inputSize}, std::vector{1}); auto mul1 = ngraph::builder::makeEltwise(input[0], constant, ngraph::helpers::EltwiseTypes::ADD); - auto sigmoid1 = std::make_shared(mul1); + auto sigmoid1 = std::make_shared(mul1); auto mul2 = ngraph::builder::makeEltwise(input[0], sigmoid1, ngraph::helpers::EltwiseTypes::MULTIPLY); auto fake3 = ngraph::builder::makeFakeQuantize(sigmoid1, ngPrc, @@ -46,7 +46,7 @@ class FqFusionWithSigmoidTest : public LayerTestsUtils::LayerTestsCommon, {minMaxFq.first}, {minMaxFq.second}); auto mul3 = ngraph::builder::makeEltwise(mul2, fake3, ngraph::helpers::EltwiseTypes::ADD); - auto result = std::make_shared(mul3); + auto result = std::make_shared(mul3); function = std::make_shared(ngraph::ResultVector{result}, input, "fq_fusion_with_sigmoid"); } diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/fq_maxpool_reordering.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/fq_maxpool_reordering.cpp index f9a8204cedc00d..1af3289e17351a 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/fq_maxpool_reordering.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/fq_maxpool_reordering.cpp @@ -94,12 +94,12 @@ class FQMaxpoolReordering : public testing::WithParamInterface(ngPrc, ov::Shape(inputShape))}; - auto inputFQ = std::make_shared(inputVector[0], - inputLowNode1, - inputHighNode1, - inputLowNode1, - inputHighNode1, - levels); + auto inputFQ = std::make_shared(inputVector[0], + inputLowNode1, + inputHighNode1, + inputLowNode1, + inputHighNode1, + levels); auto filterWeightsNode = ngraph::builder::makeConstant(ngPrc, {8, inputShape[1], 1, 8}, {1.0f}); auto convLowNode = ngraph::builder::makeConstant(ngraph::element::f32, @@ -108,30 +108,30 @@ class FQMaxpoolReordering : public testing::WithParamInterface{1}, std::vector{inputDataMax1 * 35}); - auto convWeightsFQNode = std::make_shared(filterWeightsNode, - convLowNode, - convHighNode, - convLowNode, - convHighNode, - levels); - auto convWeightsFQ = std::dynamic_pointer_cast(convWeightsFQNode); - - auto conv = std::make_shared(inputFQ, - convWeightsFQ, - std::vector{1, 1}, - std::vector{0, 0}, - std::vector{0, 0}, - std::vector{1, 1}, - ngraph::op::PadType::VALID); + auto convWeightsFQNode = std::make_shared(filterWeightsNode, + convLowNode, + convHighNode, + convLowNode, + convHighNode, + levels); + auto convWeightsFQ = std::dynamic_pointer_cast(convWeightsFQNode); + + auto conv = std::make_shared(inputFQ, + convWeightsFQ, + std::vector{1, 1}, + std::vector{0, 0}, + std::vector{0, 0}, + std::vector{1, 1}, + ngraph::op::PadType::VALID); auto biasesWeightsNode = ngraph::builder::makeConstant(ngPrc, {}, std::vector{0.0f}); - auto add = std::make_shared(conv, biasesWeightsNode); + auto add = std::make_shared(conv, biasesWeightsNode); - auto convFQNode = std::make_shared(add, - inputLowNode2, - inputHighNode2, - inputLowNode2, - inputHighNode2, - levels); + auto convFQNode = std::make_shared(add, + inputLowNode2, + inputHighNode2, + inputLowNode2, + inputHighNode2, + levels); std::shared_ptr node_before_pooling = convFQNode; if (reshape) { @@ -139,9 +139,9 @@ class FQMaxpoolReordering : public testing::WithParamInterface()); auto reshapeConst1 = ngraph::builder::makeConstant(ngraph::element::i64, std::vector{2}, ngraph::Shape{1, total}); - auto reshapeNode1 = std::make_shared(convFQNode, reshapeConst1, false); + auto reshapeNode1 = std::make_shared(convFQNode, reshapeConst1, false); auto reshapeConst2 = ngraph::builder::makeConstant(ngraph::element::i64, std::vector{4}, shape); - auto reshapeNode2 = std::make_shared(reshapeNode1, reshapeConst2, false); + auto reshapeNode2 = std::make_shared(reshapeNode1, reshapeConst2, false); node_before_pooling = reshapeNode2; } @@ -157,7 +157,7 @@ class FQMaxpoolReordering : public testing::WithParamInterface(maxpool)}; + ngraph::ResultVector results{std::make_shared(maxpool)}; function = std::make_shared(results, inputVector, "FQMaxPoolReorder"); } }; diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/fq_outputs_activation_.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/fq_outputs_activation_.cpp index d0fc0e250a4e8f..ea646f5e3dea00 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/fq_outputs_activation_.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/fq_outputs_activation_.cpp @@ -89,13 +89,13 @@ class FQOutputsActivation : public testing::WithParamInterfaceoutput(i), ngraph::element::f32, ngraph::helpers::ActivationTypes::Sigmoid); - auto reluFQNode = std::make_shared(relu, - inputLowNode, - inputHighNode, - inputLowNode, - inputHighNode, - levels); - results.push_back(std::make_shared(reluFQNode)); + auto reluFQNode = std::make_shared(relu, + inputLowNode, + inputHighNode, + inputLowNode, + inputHighNode, + levels); + results.push_back(std::make_shared(reluFQNode)); } function = std::make_shared(results, inputVector, "FQOutputsActivation"); } diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/fq_with_multiple_out_connections.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/fq_with_multiple_out_connections.cpp index 0704d75983a55d..752b78acffd97c 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/fq_with_multiple_out_connections.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/fq_with_multiple_out_connections.cpp @@ -11,6 +11,7 @@ #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/blob_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" +#include "openvino/opsets/opset8.hpp" #include "ov_models/builders.hpp" #include "ov_models/pass/convert_prc.hpp" #include "ov_models/utils/ov_helpers.hpp" @@ -53,32 +54,31 @@ class FQWithMultipleOutConnections : public testing::WithParamInterface(ngPrc, ov::Shape(shape))}; - auto pattern1 = std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{3}, - ngraph::Shape{1, 2, 64}); - auto reshape1 = std::make_shared(params[0], pattern1, false); + auto pattern1 = std::make_shared(ngraph::element::Type_t::i64, + ngraph::Shape{3}, + ngraph::Shape{1, 2, 64}); + auto reshape1 = std::make_shared(params[0], pattern1, false); - auto relu1 = std::make_shared(reshape1); + auto relu1 = std::make_shared(reshape1); auto lowNode = ngraph::builder::makeConstant(ngPrc, {1}, {-10.0f}); auto highNode = ngraph::builder::makeConstant(ngPrc, {1}, {10.0f}); - auto fq = std::make_shared(relu1, - lowNode, - highNode, - lowNode, - highNode, - std::numeric_limits::max()); - - auto pattern2 = std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{shape.size()}, - shape); - auto reshape2 = std::make_shared(fq, pattern2, false); - - auto relu2 = std::make_shared(fq); - auto reshape3 = std::make_shared(relu2, pattern2, false); - - ngraph::ResultVector results{std::make_shared(reshape2), - std::make_shared(reshape3)}; + auto fq = std::make_shared(relu1, + lowNode, + highNode, + lowNode, + highNode, + std::numeric_limits::max()); + + auto pattern2 = + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{shape.size()}, shape); + auto reshape2 = std::make_shared(fq, pattern2, false); + + auto relu2 = std::make_shared(fq); + auto reshape3 = std::make_shared(relu2, pattern2, false); + + ngraph::ResultVector results{std::make_shared(reshape2), + std::make_shared(reshape3)}; function = std::make_shared(results, params, "FQFusionWithMultipleWeights"); } }; diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/insert_copy_layer_before_self_concat.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/insert_copy_layer_before_self_concat.cpp index d347c226910fb1..b7162a36c35b64 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/insert_copy_layer_before_self_concat.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/insert_copy_layer_before_self_concat.cpp @@ -90,9 +90,9 @@ class InsertCopyBeforeSelfConcatTest : public testing::WithParamInterface(concatInputs, axis); - auto relu = std::make_shared(concat); - results.push_back(std::make_shared(relu)); + auto concat = std::make_shared(concatInputs, axis); + auto relu = std::make_shared(concat); + results.push_back(std::make_shared(relu)); } function = std::make_shared(results, params, "InsertCopyBeforeSelfConcat"); } diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/insert_transpose_before_matmul.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/insert_transpose_before_matmul.cpp index 9acaef8ca98a3a..482161a72884cb 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/insert_transpose_before_matmul.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/insert_transpose_before_matmul.cpp @@ -72,24 +72,22 @@ class InsertTransposeBeforeMatmul : public testing::WithParamInterface(ngraph::element::Type_t::i64, ngraph::Shape{2}, matmul_in_shape); - auto reshape = std::make_shared(params[0], pattern, false); + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{2}, matmul_in_shape); + auto reshape = std::make_shared(params[0], pattern, false); std::shared_ptr weights_node; if (firstInConst) { std::vector weights = ov::test::utils::generate_float_numbers(matmul_in_shape[0], -0.2f, 0.2f); - weights_node = - std::make_shared(ngPrc, ngraph::Shape{1, matmul_in_shape[0]}, weights); + weights_node = std::make_shared(ngPrc, ngraph::Shape{1, matmul_in_shape[0]}, weights); } else { std::vector weights = ov::test::utils::generate_float_numbers(matmul_in_shape[1], -0.2f, 0.2f); - weights_node = - std::make_shared(ngPrc, ngraph::Shape{matmul_in_shape[1], 1}, weights); + weights_node = std::make_shared(ngPrc, ngraph::Shape{matmul_in_shape[1], 1}, weights); } auto matmul = firstInConst ? std::make_shared(weights_node, reshape, false, false) : std::make_shared(reshape, weights_node, false, false); - ngraph::ResultVector results{std::make_shared(matmul)}; + ngraph::ResultVector results{std::make_shared(matmul)}; function = std::make_shared(results, params, "InsertTransposeBeforeMatmul"); } }; @@ -164,24 +162,23 @@ class InsertTransposeBeforeConcatConcat : public testing::WithParamInterface(ngPrc, ov::Shape{1, inputShape})}; auto matmul_in_shape = ngraph::Shape{inputShape / 8, 8}; auto pattern = - std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{2}, matmul_in_shape); - auto reshape = std::make_shared(params[0], pattern, false); + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{2}, matmul_in_shape); + auto reshape = std::make_shared(params[0], pattern, false); std::vector data = ov::test::utils::generate_float_numbers(ngraph::shape_size(matmul_in_shape), -0.2f, 0.2f); - auto concat_const = std::make_shared(ngPrc, matmul_in_shape, data); + auto concat_const = std::make_shared(ngPrc, matmul_in_shape, data); ngraph::OutputVector concat_chunks{reshape, concat_const}; - auto concat = std::make_shared(concat_chunks, 0); + auto concat = std::make_shared(concat_chunks, 0); std::shared_ptr weights_node; std::vector weights = ov::test::utils::generate_float_numbers(matmul_in_shape[0] * 2, -0.2f, 0.2f); - weights_node = - std::make_shared(ngPrc, ngraph::Shape{1, matmul_in_shape[0] * 2}, weights); + weights_node = std::make_shared(ngPrc, ngraph::Shape{1, matmul_in_shape[0] * 2}, weights); auto matmul = firstInConst ? std::make_shared(weights_node, concat, false, false) : std::make_shared(concat, weights_node, false, false); - ngraph::ResultVector results{std::make_shared(matmul)}; + ngraph::ResultVector results{std::make_shared(matmul)}; function = std::make_shared(results, params, "InsertTransposeBeforeConcatConcat"); } }; diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/insert_transpose_between_convs.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/insert_transpose_between_convs.cpp index 9e8ec3a61b7f94..7f79573fb087f6 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/insert_transpose_between_convs.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/insert_transpose_between_convs.cpp @@ -68,8 +68,8 @@ class InsertTransposeBetweenConvs : public testing::WithParamInterface(ngPrc, ov::Shape(inputShape_2d))}; auto pattern1 = - std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{4}, inputShape); - auto reshape1 = std::make_shared(params[0], pattern1, false); + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{4}, inputShape); + auto reshape1 = std::make_shared(params[0], pattern1, false); size_t num_out_channels = 8; size_t kernal_size = 8; @@ -92,8 +92,8 @@ class InsertTransposeBetweenConvs : public testing::WithParamInterface(ngraph::element::Type_t::i64, ngraph::Shape{4}, pattern2_shape); - auto reshape2 = std::make_shared(conv1, pattern2, false); + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{4}, pattern2_shape); + auto reshape2 = std::make_shared(conv1, pattern2, false); std::vector filter_weights_2 = ov::test::utils::generate_float_numbers(num_out_channels * kernal_size, -0.2f, 0.2f); @@ -113,10 +113,10 @@ class InsertTransposeBetweenConvs : public testing::WithParamInterface(ngraph::element::Type_t::i64, ngraph::Shape{2}, pattern3_shape); - auto reshape3 = std::make_shared(conv2, pattern3, false); + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{2}, pattern3_shape); + auto reshape3 = std::make_shared(conv2, pattern3, false); - ngraph::ResultVector results{std::make_shared(reshape3)}; + ngraph::ResultVector results{std::make_shared(reshape3)}; function = std::make_shared(results, params, "InsertTransposeBetweenConvs"); } }; @@ -164,8 +164,8 @@ class InsertTransposeBetweenConvsWithPool : public testing::WithParamInterface(ngPrc, ov::Shape(inputShape_2d))}; auto pattern1 = - std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{4}, inputShape); - auto reshape1 = std::make_shared(params[0], pattern1, false); + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{4}, inputShape); + auto reshape1 = std::make_shared(params[0], pattern1, false); size_t num_out_channels = 8; size_t kernal_size = 8; @@ -200,8 +200,8 @@ class InsertTransposeBetweenConvsWithPool : public testing::WithParamInterface(ngraph::element::Type_t::i64, ngraph::Shape{4}, pattern2_shape); - auto reshape2 = std::make_shared(pool, pattern2, false); + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{4}, pattern2_shape); + auto reshape2 = std::make_shared(pool, pattern2, false); std::vector filter_weights_2 = ov::test::utils::generate_float_numbers(num_out_channels * kernal_size, -0.2f, 0.2f); @@ -221,10 +221,10 @@ class InsertTransposeBetweenConvsWithPool : public testing::WithParamInterface(ngraph::element::Type_t::i64, ngraph::Shape{2}, pattern3_shape); - auto reshape3 = std::make_shared(conv2, pattern3, false); + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{2}, pattern3_shape); + auto reshape3 = std::make_shared(conv2, pattern3, false); - ngraph::ResultVector results{std::make_shared(reshape3)}; + ngraph::ResultVector results{std::make_shared(reshape3)}; function = std::make_shared(results, params, "InsertTransposeBetweenConvs"); } }; diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/layers_restrictions.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/layers_restrictions.cpp index 48a9724f25600c..47216d5bcc2005 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/layers_restrictions.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/layers_restrictions.cpp @@ -31,7 +31,7 @@ struct FullyConnectedBatchSizeMoreThan8 { ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; auto weights = ov::test::utils::generate_float_numbers(inputShape[1] * inputShape[1], -0.0001f, 0.0001f); auto fullyConnected = ngraph::builder::makeFullyConnected(params[0], ngPrc, inputShape[1], false, {}, weights); - ngraph::ResultVector results{std::make_shared(fullyConnected)}; + ngraph::ResultVector results{std::make_shared(fullyConnected)}; return std::make_shared(results, params, getName()); } static const char* getMatch() { @@ -49,7 +49,7 @@ struct FullyConnectedBatchSizeLessThanOrEqual8 { ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; auto weights = ov::test::utils::generate_float_numbers(inputShape[1] * inputShape[1], -0.0001f, 0.0001f); auto fullyConnected = ngraph::builder::makeFullyConnected(params[0], ngPrc, inputShape[1], false, {}, weights); - ngraph::ResultVector results{std::make_shared(fullyConnected)}; + ngraph::ResultVector results{std::make_shared(fullyConnected)}; return std::make_shared(results, params, getName()); } }; diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/remove_permutations_NHWC_to_NCHW_pass.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/remove_permutations_NHWC_to_NCHW_pass.cpp index 4cad39bd6c9f70..0ada71e606928b 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/remove_permutations_NHWC_to_NCHW_pass.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/remove_permutations_NHWC_to_NCHW_pass.cpp @@ -66,9 +66,9 @@ std::shared_ptr CreateTranspose(std::shared_ptr inpu } else { permute_order = shape_size == 4 ? std::vector{0, 2, 3, 1} : std::vector{0, 2, 1}; } - return std::make_shared( + return std::make_shared( input, - ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{shape_size}, permute_order)); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{shape_size}, permute_order)); } ngraph::Shape GetLayerTransposedOutputShape(std::shared_ptr layer) { @@ -122,7 +122,7 @@ std::shared_ptr CreateConvolution(const ngraph::Output(pool) : pool; + return withActivation ? std::make_shared(pool) : pool; } class RemovePermutationsNHWCToNCHWPassTest : public testing::WithParamInterface, @@ -175,10 +175,9 @@ class RemovePermutationsNHWCToNCHWPassTest : public testing::WithParamInterface< std::accumulate(std::begin(inputShape), std::end(inputShape), 1, std::multiplies()); ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape{1, in_total_dims_size})}; - auto pattern1 = std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{shape_size}, - inputShape); - auto reshape1 = std::make_shared(params[0], pattern1, false); + auto pattern1 = + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{shape_size}, inputShape); + auto reshape1 = std::make_shared(params[0], pattern1, false); auto permute1 = CreateTranspose(reshape1, shape_size, true); auto conv = CreateConvolution(permute1, ngPrc, inputShape, output1D); @@ -188,12 +187,12 @@ class RemovePermutationsNHWCToNCHWPassTest : public testing::WithParamInterface< std::end(conv->get_output_shape(0)), size_t(1), std::multiplies()); - auto pattern2 = std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{2}, - ngraph::Shape{1, conv_out_size}); - auto reshape2 = std::make_shared(permute2, pattern2, false); + auto pattern2 = std::make_shared(ngraph::element::Type_t::i64, + ngraph::Shape{2}, + ngraph::Shape{1, conv_out_size}); + auto reshape2 = std::make_shared(permute2, pattern2, false); - ngraph::ResultVector results{std::make_shared(reshape2)}; + ngraph::ResultVector results{std::make_shared(reshape2)}; function = std::make_shared(results, params, "RemovePermutationsTest"); if (transpose_to_reshape) { ngraph::pass::Manager manager; @@ -239,7 +238,7 @@ class RemovePermutationsNHWCToNCHWPassNoReshapesTest : public testing::WithParam auto conv = CreateConvolution(permute1, ngPrc, inputShape); auto permute2 = CreateTranspose(conv, shape_size, false); - ngraph::ResultVector results{std::make_shared(permute2)}; + ngraph::ResultVector results{std::make_shared(permute2)}; function = std::make_shared(results, params, "RemovePermutationPassNoReshapes"); } @@ -313,10 +312,9 @@ class RemovePermutationsWithPoolAndActTest : public testing::WithParamInterface< std::accumulate(std::begin(inputShape), std::end(inputShape), 1, std::multiplies()); ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape{1, in_total_dims_size})}; - auto pattern1 = std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{shape_size}, - inputShape); - auto reshape1 = std::make_shared(params[0], pattern1, false); + auto pattern1 = + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{shape_size}, inputShape); + auto reshape1 = std::make_shared(params[0], pattern1, false); auto permute1 = CreateTranspose(reshape1, shape_size, true); auto conv = CreateConvolution(permute1, ngPrc, inputShape, false, true, true); @@ -326,12 +324,12 @@ class RemovePermutationsWithPoolAndActTest : public testing::WithParamInterface< std::end(conv->get_output_shape(0)), size_t(1), std::multiplies()); - auto pattern2 = std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{2}, - ngraph::Shape{1, conv_out_size}); - auto reshape2 = std::make_shared(permute2, pattern2, false); + auto pattern2 = std::make_shared(ngraph::element::Type_t::i64, + ngraph::Shape{2}, + ngraph::Shape{1, conv_out_size}); + auto reshape2 = std::make_shared(permute2, pattern2, false); - ngraph::ResultVector results{std::make_shared(reshape2)}; + ngraph::ResultVector results{std::make_shared(reshape2)}; function = std::make_shared(results, params, "RemovePermutationsWithPoolAndActTest"); if (transpose_to_reshape) { @@ -400,10 +398,9 @@ class RemovePermutationsWithTwoConvTest : public testing::WithParamInterface()); ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape{1, in_total_dims_size})}; - auto pattern1 = std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{shape_size}, - inputShape); - auto reshape1 = std::make_shared(params[0], pattern1, false); + auto pattern1 = + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{shape_size}, inputShape); + auto reshape1 = std::make_shared(params[0], pattern1, false); auto permute1 = CreateTranspose(reshape1, shape_size, true); auto conv1 = CreateConvolution(permute1, ngPrc, inputShape); @@ -414,12 +411,12 @@ class RemovePermutationsWithTwoConvTest : public testing::WithParamInterfaceget_output_shape(0)), size_t(1), std::multiplies()); - auto pattern2 = std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{2}, - ngraph::Shape{1, conv_out_size}); - auto reshape2 = std::make_shared(permute2, pattern2, false); + auto pattern2 = std::make_shared(ngraph::element::Type_t::i64, + ngraph::Shape{2}, + ngraph::Shape{1, conv_out_size}); + auto reshape2 = std::make_shared(permute2, pattern2, false); - ngraph::ResultVector results{std::make_shared(reshape2)}; + ngraph::ResultVector results{std::make_shared(reshape2)}; function = std::make_shared(results, params, "RemovePermutationPass"); } }; @@ -487,35 +484,33 @@ class RemovePermutationsWithEltwiseTest : public testing::WithParamInterface(ov::element::Type_t::i64, ov::Shape{}, std::vector{1}); auto split = std::make_shared(params[0], split_axis_op, 2); - auto pattern1 = std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{shape_size}, - inputShape); - auto reshape1 = std::make_shared(split->output(0), pattern1, false); + auto pattern1 = + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{shape_size}, inputShape); + auto reshape1 = std::make_shared(split->output(0), pattern1, false); auto permute1 = CreateTranspose(reshape1, shape_size, true); auto conv1 = CreateConvolution(permute1, ngPrc, inputShape); - auto pattern2 = std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{shape_size}, - inputShape); - auto reshape2 = std::make_shared(split->output(1), pattern2, false); + auto pattern2 = + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{shape_size}, inputShape); + auto reshape2 = std::make_shared(split->output(1), pattern2, false); auto permute2 = CreateTranspose(reshape2, shape_size, true); auto conv2 = CreateConvolution(permute2, ngPrc, inputShape); - auto add = std::make_shared(conv1, conv2); + auto add = std::make_shared(conv1, conv2); auto permute3 = CreateTranspose(add, add->get_output_shape(0).size(), false); auto conv_out_size = std::accumulate(std::begin(add->get_output_shape(0)), std::end(add->get_output_shape(0)), size_t(1), std::multiplies()); - auto pattern3 = std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{2}, - ngraph::Shape{1, conv_out_size}); - auto reshape3 = std::make_shared(permute3, pattern3, false); + auto pattern3 = std::make_shared(ngraph::element::Type_t::i64, + ngraph::Shape{2}, + ngraph::Shape{1, conv_out_size}); + auto reshape3 = std::make_shared(permute3, pattern3, false); - ngraph::ResultVector results{std::make_shared(reshape3)}; + ngraph::ResultVector results{std::make_shared(reshape3)}; function = std::make_shared(results, params, "RemovePermutationPass"); } }; @@ -587,10 +582,10 @@ class RemoveSharedPermutationTest : public testing::WithParamInterface 1 ? 1 : (inputShape.size() - 2); multipleInputShape[mul_dim] *= splits_num; - auto pattern = std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{multipleInputShape.size()}, - multipleInputShape); - auto reshape = std::make_shared(params[0], pattern, false); + auto pattern = std::make_shared(ngraph::element::Type_t::i64, + ngraph::Shape{multipleInputShape.size()}, + multipleInputShape); + auto reshape = std::make_shared(params[0], pattern, false); auto permute = CreateTranspose(reshape, shape_size, true); OPENVINO_SUPPRESS_DEPRECATED_START auto split = ngraph::builder::makeSplit( @@ -606,10 +601,10 @@ class RemoveSharedPermutationTest : public testing::WithParamInterfaceget_output_shape(0)), size_t(1), std::multiplies()); - auto pattern1 = std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{2}, - ngraph::Shape{1, conv1_out_size}); - auto reshape1 = std::make_shared(permute1, pattern1, false); + auto pattern1 = std::make_shared(ngraph::element::Type_t::i64, + ngraph::Shape{2}, + ngraph::Shape{1, conv1_out_size}); + auto reshape1 = std::make_shared(permute1, pattern1, false); auto conv2 = CreateConvolution(split->output(1), ngPrc, inputShape); auto permute2 = CreateTranspose(conv2, conv2->get_output_shape(0).size(), false); @@ -617,14 +612,14 @@ class RemoveSharedPermutationTest : public testing::WithParamInterfaceget_output_shape(0)), size_t(1), std::multiplies()); - auto pattern2 = std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{2}, - ngraph::Shape{1, conv2_out_size}); - auto reshape2 = std::make_shared(permute2, pattern2, false); + auto pattern2 = std::make_shared(ngraph::element::Type_t::i64, + ngraph::Shape{2}, + ngraph::Shape{1, conv2_out_size}); + auto reshape2 = std::make_shared(permute2, pattern2, false); auto concat = std::make_shared(ov::NodeVector{reshape1, reshape2}, 1); - ngraph::ResultVector results{std::make_shared(concat)}; + ngraph::ResultVector results{std::make_shared(concat)}; function = std::make_shared(results, params, "RemoveSharedPermutationTest"); } }; diff --git a/src/plugins/intel_gna/tests/functional/preprocess_tests/precision_convert.cpp b/src/plugins/intel_gna/tests/functional/preprocess_tests/precision_convert.cpp index 2dffe679072639..eb224e20c78890 100644 --- a/src/plugins/intel_gna/tests/functional/preprocess_tests/precision_convert.cpp +++ b/src/plugins/intel_gna/tests/functional/preprocess_tests/precision_convert.cpp @@ -61,8 +61,8 @@ class PreprocessGNATest : public testing::WithParamInterface(paramsOuts, 1); - ngraph::ResultVector results{std::make_shared(concat)}; + auto concat = std::make_shared(paramsOuts, 1); + ngraph::ResultVector results{std::make_shared(concat)}; function = std::make_shared(results, params, "concat"); } }; diff --git a/src/plugins/intel_gna/tests/functional/scale_factors_tests/add_overload_correction.cpp b/src/plugins/intel_gna/tests/functional/scale_factors_tests/add_overload_correction.cpp index 6a11f56b7b308f..1642b372fcf98b 100644 --- a/src/plugins/intel_gna/tests/functional/scale_factors_tests/add_overload_correction.cpp +++ b/src/plugins/intel_gna/tests/functional/scale_factors_tests/add_overload_correction.cpp @@ -11,6 +11,7 @@ #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/blob_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" +#include "openvino/opsets/opset8.hpp" #include "ov_models/builders.hpp" #include "ov_models/pass/convert_prc.hpp" #include "ov_models/utils/ov_helpers.hpp" @@ -74,39 +75,31 @@ class AddOverloadCorrectionTest : public testing::WithParamInterface(ngPrc, {1}, {-10.0f}); auto highNodeIn = ngraph::builder::makeConstant(ngPrc, {1}, {10.0f}); - auto fqIn = std::make_shared(params[0], - lowNodeIn, - highNodeIn, - lowNodeIn, - highNodeIn, - levels16); + auto fqIn = std::make_shared(params[0], + lowNodeIn, + highNodeIn, + lowNodeIn, + highNodeIn, + levels16); auto constant = ngraph::builder::makeConstant(ngPrc, inputShape, ov::test::utils::generate_float_numbers(inputShape[1], -1.0f, 1.0f)); - auto mul = std::make_shared(params[1], constant); + auto mul = std::make_shared(params[1], constant); auto lowNodeMul = ngraph::builder::makeConstant(ngPrc, {1}, {-1.0f}); auto highNodeMul = ngraph::builder::makeConstant(ngPrc, {1}, {1.0f}); - auto fqMul = std::make_shared(mul, - lowNodeMul, - highNodeMul, - lowNodeMul, - highNodeMul, - levels16); + auto fqMul = + std::make_shared(mul, lowNodeMul, highNodeMul, lowNodeMul, highNodeMul, levels16); - auto add = std::make_shared(fqIn, fqMul); + auto add = std::make_shared(fqIn, fqMul); auto lowNodeOut = ngraph::builder::makeConstant(ngPrc, {1}, {-11.0f}); auto highNodeOut = ngraph::builder::makeConstant(ngPrc, {1}, {11.0f}); - auto fqOut = std::make_shared(add, - lowNodeOut, - highNodeOut, - lowNodeOut, - highNodeOut, - levels16); - - ngraph::ResultVector results{std::make_shared(fqOut)}; + auto fqOut = + std::make_shared(add, lowNodeOut, highNodeOut, lowNodeOut, highNodeOut, levels16); + + ngraph::ResultVector results{std::make_shared(fqOut)}; function = std::make_shared(results, params, "AddOverloadCorrection"); } diff --git a/src/plugins/intel_gna/tests/functional/scale_factors_tests/const_input_add.cpp b/src/plugins/intel_gna/tests/functional/scale_factors_tests/const_input_add.cpp index 56f2d879e7c2a2..7b561d5dbf1704 100644 --- a/src/plugins/intel_gna/tests/functional/scale_factors_tests/const_input_add.cpp +++ b/src/plugins/intel_gna/tests/functional/scale_factors_tests/const_input_add.cpp @@ -71,7 +71,7 @@ class ConstInputAddTest : public testing::WithParamInterface(ngPrc, shape, {}, true, constRange.second, constRange.first); auto eltwise = ngraph::builder::makeEltwise(constant, params[0], ngraph::helpers::EltwiseTypes::ADD); - ngraph::ResultVector results{std::make_shared(eltwise)}; + ngraph::ResultVector results{std::make_shared(eltwise)}; function = std::make_shared(results, params, "InputConstAdd"); } diff --git a/src/plugins/intel_gna/tests/functional/scale_factors_tests/eltwise_act_fq.cpp b/src/plugins/intel_gna/tests/functional/scale_factors_tests/eltwise_act_fq.cpp index 70334b08f0e3c3..0106d80fec5820 100644 --- a/src/plugins/intel_gna/tests/functional/scale_factors_tests/eltwise_act_fq.cpp +++ b/src/plugins/intel_gna/tests/functional/scale_factors_tests/eltwise_act_fq.cpp @@ -11,6 +11,7 @@ #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/blob_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" +#include "openvino/opsets/opset8.hpp" #include "ov_models/builders.hpp" #include "ov_models/pass/convert_prc.hpp" #include "ov_models/utils/ov_helpers.hpp" @@ -91,35 +92,35 @@ class EltwiseActFqTest : public testing::WithParamInterface, auto lowNodeIn = ngraph::builder::makeConstant(ngPrc, {1}, {100 * -inputDataMax}); auto highNodeIn = ngraph::builder::makeConstant(ngPrc, {1}, {100 * inputDataMax}); - auto fqIn = std::make_shared(params[0], - lowNodeIn, - highNodeIn, - lowNodeIn, - highNodeIn, - levels16); + auto fqIn = std::make_shared(params[0], + lowNodeIn, + highNodeIn, + lowNodeIn, + highNodeIn, + levels16); auto constant = ngraph::builder::makeConstant( ngPrc, shape, ov::test::utils::generate_float_numbers(shape[1], inputDataMin, inputDataMax)); - auto add = std::make_shared(fqIn, constant); + auto add = std::make_shared(fqIn, constant); auto lowNode = ngraph::builder::makeConstant(ngPrc, {1}, {2 * inputDataMin}); auto highNode = ngraph::builder::makeConstant(ngPrc, {1}, {2 * inputDataMax}); - auto fq = std::make_shared(add, lowNode, highNode, lowNode, highNode, levels32); + auto fq = std::make_shared(add, lowNode, highNode, lowNode, highNode, levels32); auto tanh = ngraph::builder::makeActivation(fq, ngPrc, act); auto lowNodeOut = ngraph::builder::makeConstant(ngPrc, {1}, {std::tanh(2 * inputDataMin)}); auto highNodeOut = ngraph::builder::makeConstant(ngPrc, {1}, {std::tanh(2 * inputDataMax)}); - auto fqOut = std::make_shared(tanh, - lowNodeOut, - highNodeOut, - lowNodeOut, - highNodeOut, - levels16); - - ngraph::ResultVector results{std::make_shared(fqOut)}; + auto fqOut = std::make_shared(tanh, + lowNodeOut, + highNodeOut, + lowNodeOut, + highNodeOut, + levels16); + + ngraph::ResultVector results{std::make_shared(fqOut)}; function = std::make_shared(results, params, "TanhFq"); } diff --git a/src/plugins/intel_gna/tests/functional/scale_factors_tests/matmul_overload_correction.cpp b/src/plugins/intel_gna/tests/functional/scale_factors_tests/matmul_overload_correction.cpp index fd4112bf6c6b1f..afb739876c7793 100644 --- a/src/plugins/intel_gna/tests/functional/scale_factors_tests/matmul_overload_correction.cpp +++ b/src/plugins/intel_gna/tests/functional/scale_factors_tests/matmul_overload_correction.cpp @@ -13,6 +13,7 @@ #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/blob_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" +#include "openvino/opsets/opset8.hpp" #include "ov_models/builders.hpp" #include "ov_models/pass/convert_prc.hpp" #include "ov_models/utils/ov_helpers.hpp" @@ -67,7 +68,7 @@ class MatMulOverloadCorrectionNegTest : public testing::WithParamInterface(ngPrc, ov::Shape(shape1))}; - auto relu = std::make_shared(params[0]); + auto relu = std::make_shared(params[0]); std::shared_ptr input2; if (isSecondInputConst) { @@ -77,33 +78,33 @@ class MatMulOverloadCorrectionNegTest : public testing::WithParamInterface(ngPrc, shape2); - params.push_back(std::dynamic_pointer_cast(input2)); + params.push_back(std::dynamic_pointer_cast(input2)); } auto lowNodeIn1 = ngraph::builder::makeConstant(ngPrc, {1}, {-maxInputValue}); auto highNodeIn1 = ngraph::builder::makeConstant(ngPrc, {1}, {maxInputValue}); - auto fqIn1 = std::make_shared(relu, - lowNodeIn1, - highNodeIn1, - lowNodeIn1, - highNodeIn1, - levels16); + auto fqIn1 = std::make_shared(relu, + lowNodeIn1, + highNodeIn1, + lowNodeIn1, + highNodeIn1, + levels16); auto lowNodeIn2 = ngraph::builder::makeConstant(ngPrc, {1}, {-maxInputValue}); auto highNodeIn2 = ngraph::builder::makeConstant(ngPrc, {1}, {maxInputValue}); - auto fqIn2 = std::make_shared(input2, - lowNodeIn2, - highNodeIn2, - lowNodeIn2, - highNodeIn2, - levels16); + auto fqIn2 = std::make_shared(input2, + lowNodeIn2, + highNodeIn2, + lowNodeIn2, + highNodeIn2, + levels16); std::shared_ptr matmul_input2 = fqIn2; if (!isSecondInputConst) { - auto pattern = std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{2}, - ngraph::Shape{shape1[1], shape1[1]}); - matmul_input2 = std::make_shared(fqIn2, pattern, false); + auto pattern = std::make_shared(ngraph::element::Type_t::i64, + ngraph::Shape{2}, + ngraph::Shape{shape1[1], shape1[1]}); + matmul_input2 = std::make_shared(fqIn2, pattern, false); } auto matmul = swapInputs ? std::make_shared(matmul_input2, fqIn1, false, true) @@ -113,14 +114,14 @@ class MatMulOverloadCorrectionNegTest : public testing::WithParamInterface(ngPrc, {1}, {-maxInputValue * maxInputValue * inputShape[1] / 10}); auto highNodeOut = ngraph::builder::makeConstant(ngPrc, {1}, {maxInputValue * maxInputValue * inputShape[1] / 10}); - auto fqOut = std::make_shared(matmul, - lowNodeOut, - highNodeOut, - lowNodeOut, - highNodeOut, - levels32); - - ngraph::ResultVector results{std::make_shared(fqOut)}; + auto fqOut = std::make_shared(matmul, + lowNodeOut, + highNodeOut, + lowNodeOut, + highNodeOut, + levels32); + + ngraph::ResultVector results{std::make_shared(fqOut)}; function = std::make_shared(results, params, "MatMulOverloadCorrection"); } diff --git a/src/plugins/intel_gna/tests/functional/scale_factors_tests/perchannel_quant_test.cpp b/src/plugins/intel_gna/tests/functional/scale_factors_tests/perchannel_quant_test.cpp index 308c87e58805d3..5fd0cd60a7e69a 100644 --- a/src/plugins/intel_gna/tests/functional/scale_factors_tests/perchannel_quant_test.cpp +++ b/src/plugins/intel_gna/tests/functional/scale_factors_tests/perchannel_quant_test.cpp @@ -11,6 +11,7 @@ #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/blob_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" +#include "openvino/opsets/opset8.hpp" #include "ov_models/builders.hpp" #include "ov_models/pass/convert_prc.hpp" #include "ov_models/utils/ov_helpers.hpp" @@ -75,15 +76,15 @@ class PerchannelQuantTest : public testing::WithParamInterface, pu auto constant = ngraph::builder::makeConstant(ngPrc, constShape, weights); auto wLowNode = ngraph::builder::makeConstant(ngPrc, {constShape.front()}, {weightsMin}); auto wHighNode = ngraph::builder::makeConstant(ngPrc, {constShape.front()}, {weightsMax}); - auto wFq = std::make_shared(constant, - wLowNode, - wHighNode, - wLowNode, - wHighNode, - std::numeric_limits::max() - 1); - auto matmul = std::make_shared(params[0], wFq, false, true); - - ngraph::ResultVector results{std::make_shared(matmul)}; + auto wFq = std::make_shared(constant, + wLowNode, + wHighNode, + wLowNode, + wHighNode, + std::numeric_limits::max() - 1); + auto matmul = std::make_shared(params[0], wFq, false, true); + + ngraph::ResultVector results{std::make_shared(matmul)}; function = std::make_shared(results, params, "PerchannelQuantTest"); } }; diff --git a/src/plugins/intel_gna/tests/functional/scale_factors_tests/test_fq_scale_factors.cpp b/src/plugins/intel_gna/tests/functional/scale_factors_tests/test_fq_scale_factors.cpp index b1e3b7f73e4fde..92897c230cafe0 100644 --- a/src/plugins/intel_gna/tests/functional/scale_factors_tests/test_fq_scale_factors.cpp +++ b/src/plugins/intel_gna/tests/functional/scale_factors_tests/test_fq_scale_factors.cpp @@ -12,6 +12,7 @@ #include "functional_test_utils/blob_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" #include "openvino/opsets/opset10.hpp" +#include "openvino/opsets/opset8.hpp" #include "ov_models/builders.hpp" #include "ov_models/pass/convert_prc.hpp" #include "ov_models/utils/ov_helpers.hpp" @@ -114,25 +115,17 @@ class TestFQScaleFactorsTest : public testing::WithParamInterface(ngPrc, {1}, {inputDataMin}); auto highNodeIn = ngraph::builder::makeConstant(ngPrc, {1}, {inputDataMax}); - auto fqIn = std::make_shared(test_node, - lowNodeIn, - highNodeIn, - lowNodeIn, - highNodeIn, - levels); + auto fqIn = + std::make_shared(test_node, lowNodeIn, highNodeIn, lowNodeIn, highNodeIn, levels); - auto mul = std::make_shared(fqIn, test_node); + auto mul = std::make_shared(fqIn, test_node); auto lowNodeOut = ngraph::builder::makeConstant(ngPrc, {1}, {-inputDataMin * inputDataMin}); auto highNodeOut = ngraph::builder::makeConstant(ngPrc, {1}, {inputDataMax * inputDataMax}); - auto fqOut = std::make_shared(mul, - lowNodeOut, - highNodeOut, - lowNodeOut, - highNodeOut, - levels); - - ngraph::ResultVector results{std::make_shared(fqOut)}; + auto fqOut = + std::make_shared(mul, lowNodeOut, highNodeOut, lowNodeOut, highNodeOut, levels); + + ngraph::ResultVector results{std::make_shared(fqOut)}; function = std::make_shared(results, params, "FQWithSmallScaleFactor"); functionRefs = ngraph::clone_function(*function); } diff --git a/src/plugins/intel_gna/tests/functional/scale_factors_tests/weighable_layer_without_fq.cpp b/src/plugins/intel_gna/tests/functional/scale_factors_tests/weighable_layer_without_fq.cpp index f9fa6be55195dc..ef2b2da8d29c1c 100644 --- a/src/plugins/intel_gna/tests/functional/scale_factors_tests/weighable_layer_without_fq.cpp +++ b/src/plugins/intel_gna/tests/functional/scale_factors_tests/weighable_layer_without_fq.cpp @@ -7,6 +7,7 @@ #include #include +#include "openvino/opsets/opset8.hpp" #include "ov_models/builders.hpp" #include "ov_models/utils/ov_helpers.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" @@ -55,22 +56,22 @@ class WeighableLayerWithoutFqTest : public testing::WithParamInterface(ngPrc, ov::Shape(inputShape))}; - auto relu = std::make_shared(params[0]); - auto fq1 = std::make_shared( - relu, - ngraph::opset8::Constant::create(ngraph::element::f32, {1}, {-10.}), - ngraph::opset8::Constant::create(ngraph::element::f32, {1}, {10.}), - ngraph::opset8::Constant::create(ngraph::element::f32, {1}, {-10.}), - ngraph::opset8::Constant::create(ngraph::element::f32, {1}, {10.}), - static_cast(std::numeric_limits::max()) + 1); + auto relu = std::make_shared(params[0]); + auto fq1 = + std::make_shared(relu, + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {-10.}), + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {10.}), + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {-10.}), + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {10.}), + static_cast(std::numeric_limits::max()) + 1); auto constant = ngraph::builder::makeConstant(ngPrc, constantShape, std::vector{}, true); - auto fq2 = std::make_shared( - constant, - ngraph::opset8::Constant::create(ngraph::element::f32, {1}, {-10}), - ngraph::opset8::Constant::create(ngraph::element::f32, {1}, {10.}), - ngraph::opset8::Constant::create(ngraph::element::f32, {1}, {-10.}), - ngraph::opset8::Constant::create(ngraph::element::f32, {1}, {10.}), - static_cast(std::numeric_limits::max()) + 1); + auto fq2 = + std::make_shared(constant, + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {-10}), + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {10.}), + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {-10.}), + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {10.}), + static_cast(std::numeric_limits::max()) + 1); auto concat = std::make_shared(ov::NodeVector{fq1, fq2}, 1); function = std::make_shared(concat, params, "WeighableLayerWithoutFq"); } diff --git a/src/plugins/intel_gna/tests/functional/shared_tests_instances/behavior/ov_executable_network/get_metric.cpp b/src/plugins/intel_gna/tests/functional/shared_tests_instances/behavior/ov_executable_network/get_metric.cpp index 00db2c4ec2f37f..a84648cf4a809d 100644 --- a/src/plugins/intel_gna/tests/functional/shared_tests_instances/behavior/ov_executable_network/get_metric.cpp +++ b/src/plugins/intel_gna/tests/functional/shared_tests_instances/behavior/ov_executable_network/get_metric.cpp @@ -20,13 +20,13 @@ class OVClassNetworkTestGNA : public ::testing::Test { void SetUp() override { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - auto param0 = std::make_shared(ngraph::element::Type_t::f32, ngraph::Shape(1, 1024)); - auto reshape = std::make_shared( - param0, - std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{4}, - ngraph::Shape{1, 1, 1, 1024}), - false); + auto param0 = std::make_shared(ngraph::element::Type_t::f32, ngraph::Shape(1, 1024)); + auto reshape = + std::make_shared(param0, + std::make_shared(ngraph::element::Type_t::i64, + ngraph::Shape{4}, + ngraph::Shape{1, 1, 1, 1024}), + false); param0->set_friendly_name("input"); auto conv1 = ngraph::builder::makeConvolution(reshape, ngraph::element::Type_t::f32, @@ -37,7 +37,7 @@ class OVClassNetworkTestGNA : public ::testing::Test { {1, 1}, ngraph::op::PadType::EXPLICIT, 4); - auto result = std::make_shared(conv1); + auto result = std::make_shared(conv1); gnaSimpleNetwork = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{param0}); gnaSimpleNetwork->set_friendly_name("GnaSingleConv"); diff --git a/src/plugins/intel_gna/tests/functional/shared_tests_instances/execution_graph_tests/add_output.cpp b/src/plugins/intel_gna/tests/functional/shared_tests_instances/execution_graph_tests/add_output.cpp index c56b77a0db892f..c729d94754029b 100644 --- a/src/plugins/intel_gna/tests/functional/shared_tests_instances/execution_graph_tests/add_output.cpp +++ b/src/plugins/intel_gna/tests/functional/shared_tests_instances/execution_graph_tests/add_output.cpp @@ -16,9 +16,9 @@ InferenceEngine::CNNNetwork getTargetNetwork() { auto input = std::make_shared(type, shape); auto mem_i = std::make_shared(type, shape, 0); auto mem_r = std::make_shared(mem_i, "r_1-3"); - auto mul = std::make_shared(mem_r, input); + auto mul = std::make_shared(mem_r, input); auto mem_w = std::make_shared(mul, "r_1-3"); - auto sigm = std::make_shared(mul); + auto sigm = std::make_shared(mul); mem_r->set_friendly_name("Memory_1"); mem_w->add_control_dependency(mem_r); sigm->add_control_dependency(mem_w); diff --git a/src/plugins/intel_gna/tests/functional/shared_tests_instances/single_layer_tests/activation.cpp b/src/plugins/intel_gna/tests/functional/shared_tests_instances/single_layer_tests/activation.cpp index 210540b5ac6841..b3dd8026cb2b26 100644 --- a/src/plugins/intel_gna/tests/functional/shared_tests_instances/single_layer_tests/activation.cpp +++ b/src/plugins/intel_gna/tests/functional/shared_tests_instances/single_layer_tests/activation.cpp @@ -33,15 +33,14 @@ class ActivationLayerGNATest : public ActivationLayerTest { threshold = 1.0; } - const auto inputReshapePattern = std::make_shared(ngraph::element::i64, - ngraph::Shape{inputShape.size()}, - inputShape); - const auto inputReshape = std::make_shared(params[0], inputReshapePattern, false); + const auto inputReshapePattern = + std::make_shared(ngraph::element::i64, ngraph::Shape{inputShape.size()}, inputShape); + const auto inputReshape = std::make_shared(params[0], inputReshapePattern, false); const auto activation = ngraph::builder::makeActivation(inputReshape, ngPrc, activationType, shapes.second, constantsValue); const auto outputReshapePattern = - std::make_shared(ngraph::element::i64, ngraph::Shape{2}, inputDims); - const auto outputReshape = std::make_shared(activation, outputReshapePattern, false); + std::make_shared(ngraph::element::i64, ngraph::Shape{2}, inputDims); + const auto outputReshape = std::make_shared(activation, outputReshapePattern, false); function = std::make_shared(ngraph::NodeVector{outputReshape}, params); } diff --git a/src/plugins/intel_gna/tests/functional/shared_tests_instances/single_layer_tests/gru_cell.cpp b/src/plugins/intel_gna/tests/functional/shared_tests_instances/single_layer_tests/gru_cell.cpp index 2efdb5d1ef3b65..81114099f46390 100644 --- a/src/plugins/intel_gna/tests/functional/shared_tests_instances/single_layer_tests/gru_cell.cpp +++ b/src/plugins/intel_gna/tests/functional/shared_tests_instances/single_layer_tests/gru_cell.cpp @@ -69,19 +69,19 @@ class GRUCellGNATest : public GRUCellTest { auto reccurrenceWeightsNode = ngraph::builder::makeConstant(ngPrc, WRB[1], reccurrenceWeights_vals); auto biasNode = ngraph::builder::makeConstant(ngPrc, WRB[2], bias_vals); - auto gru_cell = std::make_shared(params[0], - params[1], - weightsNode, - reccurrenceWeightsNode, - biasNode, - hidden_size, - activations, - activations_alpha, - activations_beta, - clip, - linear_before_reset); - - ngraph::ResultVector results{std::make_shared(gru_cell->output(0))}; + auto gru_cell = std::make_shared(params[0], + params[1], + weightsNode, + reccurrenceWeightsNode, + biasNode, + hidden_size, + activations, + activations_alpha, + activations_beta, + clip, + linear_before_reset); + + ngraph::ResultVector results{std::make_shared(gru_cell->output(0))}; function = std::make_shared(results, params, "gru_cell"); if (should_decompose) { ngraph::pass::Manager m; diff --git a/src/plugins/intel_gna/tests/functional/shared_tests_instances/single_layer_tests/gru_sequence.cpp b/src/plugins/intel_gna/tests/functional/shared_tests_instances/single_layer_tests/gru_sequence.cpp index 598ca5167420bb..97cd54070f1506 100644 --- a/src/plugins/intel_gna/tests/functional/shared_tests_instances/single_layer_tests/gru_sequence.cpp +++ b/src/plugins/intel_gna/tests/functional/shared_tests_instances/single_layer_tests/gru_sequence.cpp @@ -78,22 +78,22 @@ class GRUSequenceGNATest : public GRUSequenceTest { std::shared_ptr seq_length = ngraph::builder::makeConstant(ngraph::element::i64, WRB[3], lengths, false); - auto gru_sequence = std::make_shared(params[0], - params[1], - seq_length, - weightsNode, - reccurrenceWeightsNode, - biasNode, - hidden_size, - direction, - activations, - activations_alpha, - activations_beta, - clip, - linear_before_reset); - - ngraph::ResultVector results{std::make_shared(gru_sequence->output(0)), - std::make_shared(gru_sequence->output(1))}; + auto gru_sequence = std::make_shared(params[0], + params[1], + seq_length, + weightsNode, + reccurrenceWeightsNode, + biasNode, + hidden_size, + direction, + activations, + activations_alpha, + activations_beta, + clip, + linear_before_reset); + + ngraph::ResultVector results{std::make_shared(gru_sequence->output(0)), + std::make_shared(gru_sequence->output(1))}; function = std::make_shared(results, params, "gru_sequence"); bool is_pure_sequence = m_mode == SequenceTestsMode::PURE_SEQ; diff --git a/src/plugins/intel_gna/tests/functional/shared_tests_instances/single_layer_tests/lstm_sequence.cpp b/src/plugins/intel_gna/tests/functional/shared_tests_instances/single_layer_tests/lstm_sequence.cpp index 0b7ea4daa080c8..aa846c0a9617a8 100644 --- a/src/plugins/intel_gna/tests/functional/shared_tests_instances/single_layer_tests/lstm_sequence.cpp +++ b/src/plugins/intel_gna/tests/functional/shared_tests_instances/single_layer_tests/lstm_sequence.cpp @@ -77,23 +77,23 @@ class LSTMSequenceGNATest : public LSTMSequenceTest { params[0]->get_partial_shape()[1].get_min_length()); std::shared_ptr seq_length = ngraph::builder::makeConstant(ngraph::element::i64, WRB[3], lengths, false); - auto lstm_sequence = std::make_shared(params[0], - params[1], - params[2], - seq_length, - weightsNode, - reccurrenceWeightsNode, - biasNode, - hidden_size, - direction, - activations_alpha, - activations_beta, - activations, - clip); - - ngraph::ResultVector results{std::make_shared(lstm_sequence->output(0)), - std::make_shared(lstm_sequence->output(1)), - std::make_shared(lstm_sequence->output(2))}; + auto lstm_sequence = std::make_shared(params[0], + params[1], + params[2], + seq_length, + weightsNode, + reccurrenceWeightsNode, + biasNode, + hidden_size, + direction, + activations_alpha, + activations_beta, + activations, + clip); + + ngraph::ResultVector results{std::make_shared(lstm_sequence->output(0)), + std::make_shared(lstm_sequence->output(1)), + std::make_shared(lstm_sequence->output(2))}; function = std::make_shared(results, params, "lstm_sequence"); bool is_pure_sequence = m_mode == SequenceTestsMode::PURE_SEQ; diff --git a/src/plugins/intel_gna/tests/unit/gna_executable_network_metrics_test.cpp b/src/plugins/intel_gna/tests/unit/gna_executable_network_metrics_test.cpp index 838fa4b31b9ee9..bdcac55afb0d76 100644 --- a/src/plugins/intel_gna/tests/unit/gna_executable_network_metrics_test.cpp +++ b/src/plugins/intel_gna/tests/unit/gna_executable_network_metrics_test.cpp @@ -39,10 +39,10 @@ class GnaExecutableNetworkMetricsTest : public ::testing::Test { protected: std::shared_ptr getFunction() { - auto firstInput = std::make_shared(net_precision, shape); - auto secondInput = std::make_shared(net_precision, shape); - auto matmul = std::make_shared(firstInput, secondInput, false, true); - auto result = std::make_shared(matmul); + auto firstInput = std::make_shared(net_precision, shape); + auto secondInput = std::make_shared(net_precision, shape); + auto matmul = std::make_shared(firstInput, secondInput, false, true); + auto result = std::make_shared(matmul); auto function = std::make_shared(ov::ResultVector({result}), ov::ParameterVector({firstInput}), "MatMul"); return function; diff --git a/src/plugins/intel_gna/tests/unit/gna_get_aligned_split_sizes.cpp b/src/plugins/intel_gna/tests/unit/gna_get_aligned_split_sizes.cpp index 0cfc4caed8ae59..0ec3d2063b264b 100644 --- a/src/plugins/intel_gna/tests/unit/gna_get_aligned_split_sizes.cpp +++ b/src/plugins/intel_gna/tests/unit/gna_get_aligned_split_sizes.cpp @@ -57,12 +57,10 @@ void RunVariadicSplitSupportedTest(DeviceVersion device_version, std::vector( - std::make_shared(ngraph::element::f32, input_shape), - ngraph::opset9::Constant::create(ngraph::element::i64, ngraph::Shape({1}), {axis}), - ngraph::opset9::Constant::create(ngraph::element::i64, - ngraph::Shape({split_lengths.size()}), - split_lengths)); + auto split = std::make_shared( + std::make_shared(ngraph::element::f32, input_shape), + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape({1}), {axis}), + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape({split_lengths.size()}), split_lengths)); ASSERT_TRUE(Limitations::is_split_supported(split, false) == result); } Limitations::deinit(); @@ -103,9 +101,9 @@ void RunSplitSupportedTest(DeviceVersion device_version, std::vector( - std::make_shared(ngraph::element::f32, input_shape), - ngraph::opset9::Constant::create(ngraph::element::i64, ngraph::Shape({}), {axis}), + auto split = std::make_shared( + std::make_shared(ngraph::element::f32, input_shape), + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape({}), {axis}), num_splits); ASSERT_TRUE(Limitations::is_split_supported(split, false) == result); } diff --git a/src/plugins/intel_gna/tests/unit/gna_hw_precision_test.cpp b/src/plugins/intel_gna/tests/unit/gna_hw_precision_test.cpp index 3cca0989543e25..36c97f3c030d68 100644 --- a/src/plugins/intel_gna/tests/unit/gna_hw_precision_test.cpp +++ b/src/plugins/intel_gna/tests/unit/gna_hw_precision_test.cpp @@ -52,10 +52,10 @@ class GNAHwPrecisionTest : public ::testing::Test { protected: std::shared_ptr getFunction() { - auto firstInput = std::make_shared(net_precision, shape); - auto secondInput = std::make_shared(net_precision, shape); - auto matmul = std::make_shared(firstInput, secondInput, false, true); - auto result = std::make_shared(matmul); + auto firstInput = std::make_shared(net_precision, shape); + auto secondInput = std::make_shared(net_precision, shape); + auto matmul = std::make_shared(firstInput, secondInput, false, true); + auto result = std::make_shared(matmul); auto function = std::make_shared(ov::ResultVector({result}), ov::ParameterVector({firstInput}), "MatMul"); return function; diff --git a/src/plugins/intel_gna/tests/unit/gna_infer_request_test.cpp b/src/plugins/intel_gna/tests/unit/gna_infer_request_test.cpp index c20c37b4f1f0fb..4077a86159c013 100644 --- a/src/plugins/intel_gna/tests/unit/gna_infer_request_test.cpp +++ b/src/plugins/intel_gna/tests/unit/gna_infer_request_test.cpp @@ -49,7 +49,7 @@ class GNAInferRequestTest : public ::testing::Test { ov::test::utils::generate_float_numbers(shape_size, -0.5f, 0.5f), false); - auto add = std::make_shared(params[0], add_const); + auto add = std::make_shared(params[0], add_const); auto res = std::make_shared(add); auto function = std::make_shared(res, params, "Add"); return function; diff --git a/src/plugins/intel_gna/tests/unit/gna_memory_alignment.cpp b/src/plugins/intel_gna/tests/unit/gna_memory_alignment.cpp index 0678ccbd2329f3..02c3b7ce0c6cf3 100644 --- a/src/plugins/intel_gna/tests/unit/gna_memory_alignment.cpp +++ b/src/plugins/intel_gna/tests/unit/gna_memory_alignment.cpp @@ -97,10 +97,10 @@ class GNAPluginLoadNetworkTests : public ::testing::TestWithParam getMulFunction(const ngraph::Shape input_shape) { const ngraph::element::Type net_precision = ngraph::element::f32; - auto input = std::make_shared(net_precision, input_shape); - auto multiplier = std::make_shared(net_precision, input_shape); - auto matmul = std::make_shared(input, multiplier, false, true); - auto result = std::make_shared(matmul); + auto input = std::make_shared(net_precision, input_shape); + auto multiplier = std::make_shared(net_precision, input_shape); + auto matmul = std::make_shared(input, multiplier, false, true); + auto result = std::make_shared(matmul); auto function = std::make_shared(ov::ResultVector({result}), ov::ParameterVector({input}), "MatMul"); return function; } diff --git a/src/plugins/intel_gna/tests/unit/gna_memory_compact_test.cpp b/src/plugins/intel_gna/tests/unit/gna_memory_compact_test.cpp index 0673e7b11064c9..9918008f7997a7 100644 --- a/src/plugins/intel_gna/tests/unit/gna_memory_compact_test.cpp +++ b/src/plugins/intel_gna/tests/unit/gna_memory_compact_test.cpp @@ -14,6 +14,7 @@ #include "gna_fused_iterator.hpp" #include "gna_plugin.hpp" #include "memory/gna_memory.hpp" +#include "openvino/opsets/opset8.hpp" #include "ov_models/builders.hpp" using namespace InferenceEngine; @@ -316,11 +317,11 @@ TEST_F(GNAMemoryOrderTest, orderingFusedLayersActivation) { ov::CoordinateDiff pad_begin(0, 0), pad_end(0, 0); auto weights = ngraph::builder::makeConstant(ov::element::f32, {8, 16, 1, 1}, {1.f}); - auto input = std::make_shared(ov::element::f32, input_shape); - auto conv = std::make_shared(input, weights, strides, pad_begin, pad_end, dilations); + auto input = std::make_shared(ov::element::f32, input_shape); + auto conv = std::make_shared(input, weights, strides, pad_begin, pad_end, dilations); auto activation = ngraph::builder::makeActivation(conv, ov::element::f32, ngraph::helpers::ActivationTypes::Sigmoid); - auto result = std::make_shared(activation); + auto result = std::make_shared(activation); auto function = std::make_shared(ov::ResultVector({result}), ov::ParameterVector({input}), "convolution"); @@ -338,8 +339,8 @@ TEST_F(GNAMemoryOrderTest, orderingFusedLayersMaxPool) { ov::CoordinateDiff pad_begin(0, 0), pad_end(0, 0); auto weights = ngraph::builder::makeConstant(ov::element::f32, {8, 16, 1, 1}, {1.f}); - auto input = std::make_shared(ov::element::f32, input_shape); - auto conv = std::make_shared(input, weights, strides, pad_begin, pad_end, dilations); + auto input = std::make_shared(ov::element::f32, input_shape); + auto conv = std::make_shared(input, weights, strides, pad_begin, pad_end, dilations); OPENVINO_SUPPRESS_DEPRECATED_START auto maxpool = ngraph::builder::makePooling(conv, {1, 1}, @@ -351,7 +352,7 @@ TEST_F(GNAMemoryOrderTest, orderingFusedLayersMaxPool) { false, ngraph::helpers::PoolingTypes::MAX); OPENVINO_SUPPRESS_DEPRECATED_END - auto result = std::make_shared(maxpool); + auto result = std::make_shared(maxpool); auto function = std::make_shared(ov::ResultVector({result}), ov::ParameterVector({input}), "convolution"); @@ -369,8 +370,8 @@ TEST_F(GNAMemoryOrderTest, orderingFusedLayersActivationMaxPool) { ov::CoordinateDiff pad_begin(0, 0), pad_end(0, 0); auto weights = ngraph::builder::makeConstant(ov::element::f32, {8, 16, 1, 1}, {1.f}); - auto input = std::make_shared(ov::element::f32, input_shape); - auto conv = std::make_shared(input, weights, strides, pad_begin, pad_end, dilations); + auto input = std::make_shared(ov::element::f32, input_shape); + auto conv = std::make_shared(input, weights, strides, pad_begin, pad_end, dilations); auto activation = ngraph::builder::makeActivation(conv, ov::element::f32, ngraph::helpers::ActivationTypes::Sigmoid); OPENVINO_SUPPRESS_DEPRECATED_START @@ -384,7 +385,7 @@ TEST_F(GNAMemoryOrderTest, orderingFusedLayersActivationMaxPool) { false, ngraph::helpers::PoolingTypes::MAX); OPENVINO_SUPPRESS_DEPRECATED_END - auto result = std::make_shared(maxpool); + auto result = std::make_shared(maxpool); auto function = std::make_shared(ov::ResultVector({result}), ov::ParameterVector({input}), "convolution"); diff --git a/src/plugins/intel_gna/tests/unit/gna_plugin_load_network_test.cpp b/src/plugins/intel_gna/tests/unit/gna_plugin_load_network_test.cpp index 24d6a674104cfe..435f0bbab3d1bf 100644 --- a/src/plugins/intel_gna/tests/unit/gna_plugin_load_network_test.cpp +++ b/src/plugins/intel_gna/tests/unit/gna_plugin_load_network_test.cpp @@ -60,9 +60,9 @@ class GNAPluginLoadNetworkTest : public ::testing::Test, public ::testing::WithP const auto& model = GetParam().model; using ngraph::element::f32; - auto parameter = std::make_shared(f32, ngraph::Shape{model.input_size}); + auto parameter = std::make_shared(f32, ngraph::Shape{model.input_size}); - auto conv = std::dynamic_pointer_cast( + auto conv = std::dynamic_pointer_cast( ngraph::builder::makeConvolution(parameter, f32, model.filter_size, @@ -72,7 +72,7 @@ class GNAPluginLoadNetworkTest : public ::testing::Test, public ::testing::WithP c_dilations, ngraph::op::PadType::EXPLICIT, c_num_out_channels)); - auto result = std::make_shared(conv); + auto result = std::make_shared(conv); function = std::make_shared(result, ov::ParameterVector{parameter}, "convolution"); } }; diff --git a/src/plugins/intel_gna/tests/unit/ops/util_test.cpp b/src/plugins/intel_gna/tests/unit/ops/util_test.cpp index 2433b9864c3fbf..30a34995bd4483 100644 --- a/src/plugins/intel_gna/tests/unit/ops/util_test.cpp +++ b/src/plugins/intel_gna/tests/unit/ops/util_test.cpp @@ -80,9 +80,9 @@ TEST_P(GnaOpsUtilIsEltwiseAddTest, isEltwiseAddTest) { ov::NodeVector pooling_nodes_false = {std::make_shared(), std::make_shared(), std::make_shared(), - std::make_shared()}; + std::make_shared()}; -ov::NodeVector pooling_nodes_true = {std::make_shared()}; +ov::NodeVector pooling_nodes_true = {std::make_shared()}; ov::NodeVector eltwise_mul_nodes_false = { std::make_shared(), diff --git a/src/plugins/intel_gna/tests/unit/transformations/gather_sinking_test_utils.hpp b/src/plugins/intel_gna/tests/unit/transformations/gather_sinking_test_utils.hpp index a659bbb85878e9..d89eb0ee91f3bf 100644 --- a/src/plugins/intel_gna/tests/unit/transformations/gather_sinking_test_utils.hpp +++ b/src/plugins/intel_gna/tests/unit/transformations/gather_sinking_test_utils.hpp @@ -23,9 +23,9 @@ std::shared_ptr make_gather(std::shared_ptr input const ov::Shape& input_shape = input_node->get_output_shape(0); const std::vector indexes = create_indices_func(input_shape[axis], 0); - auto gather_indexes_node = ov::opset12::Constant::create(ov::element::i64, ov::Shape{indexes.size()}, indexes); + auto gather_indexes_node = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{indexes.size()}, indexes); - auto gather_axis_node = ov::opset12::Constant::create(ov::element::i64, ov::Shape{}, {axis}); + auto gather_axis_node = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{}, {axis}); return std::make_shared(input_node->output(0), gather_indexes_node, gather_axis_node); } diff --git a/src/plugins/intel_gna/tests/unit/transformations/gna_broadcast_const.cpp b/src/plugins/intel_gna/tests/unit/transformations/gna_broadcast_const.cpp index 184f20b8204525..7b3b59f55f2036 100644 --- a/src/plugins/intel_gna/tests/unit/transformations/gna_broadcast_const.cpp +++ b/src/plugins/intel_gna/tests/unit/transformations/gna_broadcast_const.cpp @@ -12,6 +12,7 @@ #include "common_test_utils/ov_test_utils.hpp" #include "legacy/ngraph_ops/eltwise.hpp" #include "legacy/ngraph_ops/scaleshift.hpp" +#include "openvino/opsets/opset8.hpp" #include "transformations/broadcast_const.hpp" namespace testing { @@ -26,17 +27,12 @@ std::unique_ptr createUnique(Args&&... args) { return std::unique_ptr(new T(std::forward(args)...)); } -std::shared_ptr createFakeQuantizeNode(std::shared_ptr parent_node) { - auto input_low = ngraph::opset8::Constant::create(ngraph::element::f32, {}, {-0.5}); - auto input_high = ngraph::opset8::Constant::create(ngraph::element::f32, {}, {0.5}); - auto output_low = ngraph::opset8::Constant::create(ngraph::element::f32, {}, {-0.5}); - auto output_high = ngraph::opset8::Constant::create(ngraph::element::f32, {}, {0.5}); - return std::make_shared(parent_node, - input_low, - input_high, - output_low, - output_high, - 0); +std::shared_ptr createFakeQuantizeNode(std::shared_ptr parent_node) { + auto input_low = ov::op::v0::Constant::create(ngraph::element::f32, {}, {-0.5}); + auto input_high = ov::op::v0::Constant::create(ngraph::element::f32, {}, {0.5}); + auto output_low = ov::op::v0::Constant::create(ngraph::element::f32, {}, {-0.5}); + auto output_high = ov::op::v0::Constant::create(ngraph::element::f32, {}, {0.5}); + return std::make_shared(parent_node, input_low, input_high, output_low, output_high, 0); } using Node = std::shared_ptr; @@ -133,23 +129,21 @@ std::shared_ptr CreateFunction(const ngraph::Shape& data_shape bool swap_outputs, bool add_scaleshift, EltwiseFactoryPtr eltwise_factory) { - const auto input_params_1 = std::make_shared(ngraph::element::Type_t::f32, data_shape); + const auto input_params_1 = std::make_shared(ngraph::element::Type_t::f32, data_shape); ngraph::ParameterVector params{input_params_1}; - const auto constant_1 = ngraph::opset8::Constant::create(ngraph::element::Type_t::f32, - ngraph::Shape{const_shape_dims}, - const_shape_values); + const auto constant_1 = + ov::op::v0::Constant::create(ngraph::element::Type_t::f32, ngraph::Shape{const_shape_dims}, const_shape_values); Node const_last_node = constant_1; if (add_scaleshift) { - const auto input_params_2 = - std::make_shared(ngraph::element::Type_t::f32, data_shape); + const auto input_params_2 = std::make_shared(ngraph::element::Type_t::f32, data_shape); params.push_back(input_params_2); - const auto constant_2 = ngraph::opset8::Constant::create(ngraph::element::Type_t::f32, - ngraph::Shape{const_shape_dims}, - const_shape_values); + const auto constant_2 = ov::op::v0::Constant::create(ngraph::element::Type_t::f32, + ngraph::Shape{const_shape_dims}, + const_shape_values); const_last_node = std::make_shared(input_params_2, constant_1, @@ -177,7 +171,7 @@ std::shared_ptr CreateFunction(const ngraph::Shape& data_shape const auto add = eltwise_factory->CreateNode(left_node, right_node); - const auto result = std::make_shared(add); + const auto result = std::make_shared(add); return std::make_shared(ngraph::ResultVector{result}, params); } @@ -268,13 +262,13 @@ void execute_cloned_test(std::shared_ptr function) { namespace { -std::vector opset8_eltwise_factories = {CreateEltwiseFactory(), - CreateEltwiseFactory(), - CreateEltwiseFactory()}; +std::vector opset8_eltwise_factories = {CreateEltwiseFactory(), + CreateEltwiseFactory(), + CreateEltwiseFactory()}; -std::vector all_eltwise_factories = {CreateEltwiseFactory(), - CreateEltwiseFactory(), - CreateEltwiseFactory(), +std::vector all_eltwise_factories = {CreateEltwiseFactory(), + CreateEltwiseFactory(), + CreateEltwiseFactory(), CreateEltwiseFactory()}; std::vector broadcast_passed_types = {ov::op::AutoBroadcastType::NONE, diff --git a/src/plugins/intel_gna/tests/unit/transformations/gna_convert_dwsc_to_scaleshifts.cpp b/src/plugins/intel_gna/tests/unit/transformations/gna_convert_dwsc_to_scaleshifts.cpp index 0b88e4a6439fa4..e453dfbad66837 100644 --- a/src/plugins/intel_gna/tests/unit/transformations/gna_convert_dwsc_to_scaleshifts.cpp +++ b/src/plugins/intel_gna/tests/unit/transformations/gna_convert_dwsc_to_scaleshifts.cpp @@ -12,6 +12,7 @@ #include #include "common_test_utils/ov_test_utils.hpp" +#include "openvino/opsets/opset7.hpp" #include "transformations/convert_dwsc_to_scaleshifts.hpp" namespace testing { @@ -40,19 +41,19 @@ typedef std::tuple fqDWSCToScaleShiftsParams; -std::shared_ptr createFQ(std::shared_ptr& in_node) { - auto input_low = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); - auto input_high = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {5}); - auto output_low = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {0}); - auto output_high = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {10}); - return std::make_shared(in_node, input_low, input_high, output_low, output_high, 11); +std::shared_ptr createFQ(std::shared_ptr& in_node) { + auto input_low = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); + auto input_high = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {5}); + auto output_low = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {0}); + auto output_high = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {10}); + return std::make_shared(in_node, input_low, input_high, output_low, output_high, 11); } std::shared_ptr createBiasFQ(const std::shared_ptr& in_node, - std::shared_ptr& bias_const, + std::shared_ptr& bias_const, const bool& fq) { std::shared_ptr node; - node = std::make_shared(in_node, bias_const); + node = std::make_shared(in_node, bias_const); if (fq) { node = createFQ(node); @@ -61,64 +62,64 @@ std::shared_ptr createBiasFQ(const std::shared_ptr& return node; } -std::shared_ptr createFunction(const bool& fq, - const modelType& model, - const ngraph::Output& input_node, - const ngraph::Shape& filters_shape, - const ngraph::Strides& conv_stride, - const ngraph::CoordinateDiff& pads_begin, - const ngraph::CoordinateDiff& pads_end, - const ngraph::Strides& conv_dilation, - const ngraph::Shape& bias_shape, - const ngraph::op::PadType& pad_type, - std::shared_ptr& dwsc, - std::shared_ptr& bias_const, - std::shared_ptr& fq_bias) { +std::shared_ptr createFunction(const bool& fq, + const modelType& model, + const ngraph::Output& input_node, + const ngraph::Shape& filters_shape, + const ngraph::Strides& conv_stride, + const ngraph::CoordinateDiff& pads_begin, + const ngraph::CoordinateDiff& pads_end, + const ngraph::Strides& conv_dilation, + const ngraph::Shape& bias_shape, + const ngraph::op::PadType& pad_type, + std::shared_ptr& dwsc, + std::shared_ptr& bias_const, + std::shared_ptr& fq_bias) { std::shared_ptr fq_filters; - auto transpose_in_order = std::make_shared(ngraph::element::i64, - ngraph::Shape{4}, - std::vector{0, 3, 1, 2}); - auto transpose_in = std::make_shared(input_node, transpose_in_order); + auto transpose_in_order = std::make_shared(ngraph::element::i64, + ngraph::Shape{4}, + std::vector{0, 3, 1, 2}); + auto transpose_in = std::make_shared(input_node, transpose_in_order); if (fq) { - fq_filters = std::make_shared( + fq_filters = std::make_shared( ngraph::element::i64, ngraph::Shape{input_node.get_shape()[3], 1, filters_shape[0], filters_shape[1]}); fq_filters = createFQ(fq_filters); - fq_filters = std::make_shared( + fq_filters = std::make_shared( fq_filters, - ngraph::opset7::Constant::create( + ov::op::v0::Constant::create( ngraph::element::i64, ngraph::Shape{5}, ngraph::Shape{input_node.get_shape()[3], 1, 1, filters_shape[0], filters_shape[1]}), false); } else { - fq_filters = std::make_shared( + fq_filters = std::make_shared( ngraph::element::i64, ngraph::Shape{input_node.get_shape()[3], 1, 1, filters_shape[0], filters_shape[1]}); } - dwsc = std::make_shared(transpose_in, - fq_filters, - conv_stride, - pads_begin, - pads_end, - conv_dilation, - pad_type); - auto transpose_out_order = std::make_shared(ngraph::element::i64, - ngraph::Shape{4}, - std::vector{0, 2, 3, 1}); - auto last_op = std::make_shared(dwsc, transpose_out_order); + dwsc = std::make_shared(transpose_in, + fq_filters, + conv_stride, + pads_begin, + pads_end, + conv_dilation, + pad_type); + auto transpose_out_order = std::make_shared(ngraph::element::i64, + ngraph::Shape{4}, + std::vector{0, 2, 3, 1}); + auto last_op = std::make_shared(dwsc, transpose_out_order); if (model == modelType::TranspDWSCBiasTransp || fq) { - bias_const = std::make_shared(ngraph::element::i64, bias_shape); + bias_const = std::make_shared(ngraph::element::i64, bias_shape); auto bias = createBiasFQ(dwsc, bias_const, fq); - fq_bias = std::dynamic_pointer_cast(bias); - last_op = std::make_shared(bias, transpose_out_order); + fq_bias = std::dynamic_pointer_cast(bias); + last_op = std::make_shared(bias, transpose_out_order); } - return std::make_shared(last_op); + return std::make_shared(last_op); } std::shared_ptr get_initial_function(const bool& fq, @@ -131,10 +132,10 @@ std::shared_ptr get_initial_function(const bool& fq, const ngraph::Strides& conv_dilation, const ngraph::Shape& bias_shape, const ngraph::op::PadType& pad_type, - std::shared_ptr& dwsc, - std::shared_ptr& bias_const, - std::shared_ptr& fq_bias) { - auto input_params = std::make_shared(ngraph::element::i64, input_shape); + std::shared_ptr& dwsc, + std::shared_ptr& bias_const, + std::shared_ptr& fq_bias) { + auto input_params = std::make_shared(ngraph::element::i64, input_shape); auto result = createFunction(fq, model, input_params, @@ -171,9 +172,9 @@ void ConvertDWSCToScaleShiftsTestInvalidFixture::SetUp() { ngraph::Strides conv_stride, conv_dilation; ngraph::CoordinateDiff pads_begin, pads_end; ngraph::op::PadType pad_type; - std::shared_ptr dwsc; - std::shared_ptr bias_const; - std::shared_ptr fq_bias; + std::shared_ptr dwsc; + std::shared_ptr bias_const; + std::shared_ptr fq_bias; std::tie(fq, params) = this->GetParam(); std::tie(model, input_shape, @@ -229,9 +230,9 @@ class ConvertDWSCToScaleShiftsTestFixture : public ov::test::TestsCommon, const ngraph::Strides& conv_dilation, const ngraph::Shape& bias_shape, const ngraph::op::PadType& pad_type, - const std::shared_ptr& dwsc, - const std::shared_ptr& bias_const, - const std::shared_ptr& fq_bias); + const std::shared_ptr& dwsc, + const std::shared_ptr& bias_const, + const std::shared_ptr& fq_bias); public: std::shared_ptr function, reference_function; @@ -246,9 +247,9 @@ void ConvertDWSCToScaleShiftsTestFixture::SetUp() { ngraph::Strides conv_stride, conv_dilation; ngraph::CoordinateDiff pads_begin, pads_end; ngraph::op::PadType pad_type; - std::shared_ptr dwsc; - std::shared_ptr bias_const; - std::shared_ptr fq_bias; + std::shared_ptr dwsc; + std::shared_ptr bias_const; + std::shared_ptr fq_bias; std::tie(fq, params) = this->GetParam(); std::tie(model, input_shape, @@ -288,55 +289,53 @@ void ConvertDWSCToScaleShiftsTestFixture::SetUp() { fq_bias); } -std::shared_ptr FlatCrop(ngraph::Output input, size_t offset, size_t size) { - return std::make_shared( - input, // data - ngraph::opset7::Constant::create(ngraph::element::i64, - ngraph::Shape{2}, - {(size_t)0, offset}), // begin sice index - ngraph::opset7::Constant::create(ngraph::element::i64, - ngraph::Shape{2}, - {(size_t)0, offset + size}), // end slice index - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {(size_t)1, (size_t)1}), // strides - std::vector{1, 0}, // begin mask - std::vector{1, 0}); // end mask +std::shared_ptr FlatCrop(ngraph::Output input, size_t offset, size_t size) { + return std::make_shared( + input, // data + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {(size_t)0, offset}), // begin sice index + ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape{2}, + {(size_t)0, offset + size}), // end slice index + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {(size_t)1, (size_t)1}), // strides + std::vector{1, 0}, // begin mask + std::vector{1, 0}); // end mask } -std::shared_ptr InsertFQLayer(const std::shared_ptr fq_layer, +std::shared_ptr InsertFQLayer(const std::shared_ptr fq_layer, std::shared_ptr last_node) { if (fq_layer != nullptr) { return fq_layer->clone_with_new_inputs( {last_node, - ngraph::opset7::Constant::create( + ov::op::v0::Constant::create( ngraph::element::f32, ngraph::Shape{1}, - std::dynamic_pointer_cast(fq_layer->input_value(1).get_node_shared_ptr()) + std::dynamic_pointer_cast(fq_layer->input_value(1).get_node_shared_ptr()) ->cast_vector()), - ngraph::opset7::Constant::create( + ov::op::v0::Constant::create( ngraph::element::f32, ngraph::Shape{1}, - std::dynamic_pointer_cast(fq_layer->input_value(2).get_node_shared_ptr()) + std::dynamic_pointer_cast(fq_layer->input_value(2).get_node_shared_ptr()) ->cast_vector()), - ngraph::opset7::Constant::create( + ov::op::v0::Constant::create( ngraph::element::f32, ngraph::Shape{1}, - std::dynamic_pointer_cast(fq_layer->input_value(3).get_node_shared_ptr()) + std::dynamic_pointer_cast(fq_layer->input_value(3).get_node_shared_ptr()) ->cast_vector()), - ngraph::opset7::Constant::create( + ov::op::v0::Constant::create( ngraph::element::f32, ngraph::Shape{1}, - std::dynamic_pointer_cast(fq_layer->input_value(4).get_node_shared_ptr()) + std::dynamic_pointer_cast(fq_layer->input_value(4).get_node_shared_ptr()) ->cast_vector())}); } return last_node; } -std::shared_ptr DecomposeDWSC(std::shared_ptr dwsc, - std::shared_ptr bias_const, - std::shared_ptr fq_bias, - std::shared_ptr flat_input_plane, +std::shared_ptr DecomposeDWSC(std::shared_ptr dwsc, + std::shared_ptr bias_const, + std::shared_ptr fq_bias, + std::shared_ptr flat_input_plane, std::shared_ptr flat_filters_plane) { - std::shared_ptr const_zero_padding; + std::shared_ptr const_zero_padding; std::shared_ptr reshaped_bias; ngraph::OutputVector output_chunks; auto input_channel_count = dwsc->get_input_shape(0)[1]; @@ -349,17 +348,16 @@ std::shared_ptr DecomposeDWSC(std::shared_ptr(dwsc->get_element_type(), - ngraph::Shape{1, input_channel_count}, - 0); + const_zero_padding = + std::make_shared(dwsc->get_element_type(), ngraph::Shape{1, input_channel_count}, 0); } // Reshape bias const if (bias_const) { auto bias_size = shape_size(bias_const->get_shape()); - reshaped_bias = ov::op::util::make_try_fold( + reshaped_bias = ov::op::util::make_try_fold( bias_const, - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, ngraph::Shape{1, bias_size}), + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{2}, ngraph::Shape{1, bias_size}), false); } @@ -382,17 +380,15 @@ std::shared_ptr DecomposeDWSC(std::shared_ptr(conv_input_slice, conv_filter_slice); + previous_layer_output = std::make_shared(conv_input_slice, conv_filter_slice); if (bias_const) { - previous_layer_output = - std::make_shared(previous_layer_output, reshaped_bias); + previous_layer_output = std::make_shared(previous_layer_output, reshaped_bias); previous_layer_output = InsertFQLayer(fq_bias, previous_layer_output); } last_layer_output = previous_layer_output; } else { - last_layer_output = std::make_shared(conv_input_slice, conv_filter_slice); - last_layer_output = std::make_shared(last_layer_output, previous_layer_output); + last_layer_output = std::make_shared(conv_input_slice, conv_filter_slice); + last_layer_output = std::make_shared(last_layer_output, previous_layer_output); previous_layer_output = last_layer_output; } } @@ -408,7 +404,7 @@ std::shared_ptr DecomposeDWSC(std::shared_ptr 1 if (output_chunks.size() > 1) { - return std::make_shared(output_chunks, 0); + return std::make_shared(output_chunks, 0); } return output_chunks[0].get_node_shared_ptr(); @@ -425,46 +421,46 @@ std::shared_ptr ConvertDWSCToScaleShiftsTestFixture::get_refer const ngraph::Strides& conv_dilation, const ngraph::Shape& bias_shape, const ngraph::op::PadType& pad_type, - const std::shared_ptr& dwsc, - const std::shared_ptr& bias_const, - const std::shared_ptr& fq_bias) { - auto input_params = std::make_shared(ngraph::element::i64, input_shape); + const std::shared_ptr& dwsc, + const std::shared_ptr& bias_const, + const std::shared_ptr& fq_bias) { + auto input_params = std::make_shared(ngraph::element::i64, input_shape); auto output_channel_count = dwsc->get_output_shape(0)[1]; auto output_width = dwsc->get_output_shape(0)[3]; // Prepare flat input data - auto flat_input_plane = std::make_shared( + auto flat_input_plane = std::make_shared( input_params, - ngraph::opset7::Constant::create(ngraph::element::i64, - ngraph::Shape{2}, - ngraph::Shape{1, ngraph::shape_size(input_shape)}), + ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape{2}, + ngraph::Shape{1, ngraph::shape_size(input_shape)}), false); // Prepare flat filter data auto filters_const = std::dynamic_pointer_cast(dwsc->get_input_node_shared_ptr(1)); auto filters_size = ngraph::shape_size(filters_const->get_shape()); - auto transposed_filters_const = ov::op::util::make_try_fold( + auto transposed_filters_const = ov::op::util::make_try_fold( filters_const, - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{5}, ngraph::Shape{4, 1, 2, 3, 0})); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{5}, ngraph::Shape{4, 1, 2, 3, 0})); - auto flat_filters_plane = ov::op::util::make_try_fold( + auto flat_filters_plane = ov::op::util::make_try_fold( transposed_filters_const, - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, ngraph::Shape{1, filters_size}), + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{2}, ngraph::Shape{1, filters_size}), false); // Convert DWSC to a set of diagonal layers auto output_plane = DecomposeDWSC(dwsc, bias_const, fq_bias, flat_input_plane, flat_filters_plane); // Restore the original output shape - auto result = std::make_shared( + auto result = std::make_shared( output_plane, - ngraph::opset7::Constant::create(ngraph::element::i64, - ngraph::Shape{4}, - ngraph::Shape{1, output_channel_count, 1, output_width}), + ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape{4}, + ngraph::Shape{1, output_channel_count, 1, output_width}), false); - return std::make_shared(ngraph::ResultVector{std::make_shared(result)}, + return std::make_shared(ngraph::ResultVector{std::make_shared(result)}, ngraph::ParameterVector{input_params}); } diff --git a/src/plugins/intel_gna/tests/unit/transformations/gna_convert_matmul_to_pointwise_convolution.cpp b/src/plugins/intel_gna/tests/unit/transformations/gna_convert_matmul_to_pointwise_convolution.cpp index 4c8dc70f452cb5..3ec27e0b5e151e 100644 --- a/src/plugins/intel_gna/tests/unit/transformations/gna_convert_matmul_to_pointwise_convolution.cpp +++ b/src/plugins/intel_gna/tests/unit/transformations/gna_convert_matmul_to_pointwise_convolution.cpp @@ -12,6 +12,7 @@ #include #include "common_test_utils/ov_test_utils.hpp" +#include "openvino/opsets/opset7.hpp" #include "ov_models/builders.hpp" #include "transformations/convert_matmul_to_pointwise_convolution.hpp" @@ -22,12 +23,12 @@ namespace { struct Graph { std::shared_ptr createFunction(); - std::shared_ptr input_params; + std::shared_ptr input_params; std::shared_ptr output; }; std::shared_ptr Graph::createFunction() { - auto result = std::make_shared(output); + auto result = std::make_shared(output); return std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); } @@ -84,8 +85,8 @@ class CreateBaseDecorator : public CreateGraphDecorator { Graph CreateBaseDecorator::build() { Graph graph; - graph.input_params = std::make_shared(ngraph::element::i64, input_data_shape_); - graph.output = ngraph::opset7::Constant::create(ngraph::element::i64, input_const_shape_, {1}); + graph.input_params = std::make_shared(ngraph::element::i64, input_data_shape_); + graph.output = ov::op::v0::Constant::create(ngraph::element::i64, input_const_shape_, {1}); return graph; } @@ -98,17 +99,12 @@ class CreateFakeQuantize : public CreateGraphDecorator { void updateGraph(Graph&) override; }; -std::shared_ptr createFakeQuantizeNode(std::shared_ptr parent_node) { - auto input_low = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1}); - auto input_high = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {20}); - auto output_low = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {0}); - auto output_high = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {10}); - return std::make_shared(parent_node, - input_low, - input_high, - output_low, - output_high, - 11); +std::shared_ptr createFakeQuantizeNode(std::shared_ptr parent_node) { + auto input_low = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1}); + auto input_high = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {20}); + auto output_low = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {0}); + auto output_high = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {10}); + return std::make_shared(parent_node, input_low, input_high, output_low, output_high, 11); } void CreateFakeQuantize::updateGraph(Graph& graph) { @@ -124,7 +120,7 @@ class CreateMatMul : public CreateGraphDecorator { }; void CreateMatMul::updateGraph(Graph& graph) { - auto matmul_node = std::make_shared(graph.input_params, graph.output); + auto matmul_node = std::make_shared(graph.input_params, graph.output); graph.output = matmul_node; } @@ -149,7 +145,7 @@ void CreateAdd::updateGraph(Graph& graph) { } auto bias = ngraph::builder::makeConstant(ngraph::element::i64, axes, {}, true); - auto add_node = std::make_shared(graph.output, bias); + auto add_node = std::make_shared(graph.output, bias); graph.output = add_node; } @@ -181,17 +177,17 @@ template (ngraph::element::i64, ngraph::Shape{16, 8}); - auto constant_node = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{8, 8}, {1}); + graph.input_params = std::make_shared(ngraph::element::i64, ngraph::Shape{16, 8}); + auto constant_node = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{8, 8}, {1}); - auto const_reshape_before = std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{4}, - ngraph::Shape{1, 1, 16, 8}); - auto reshape_before = std::make_shared(graph.input_params, const_reshape_before, false); + auto const_reshape_before = std::make_shared(ngraph::element::Type_t::i64, + ngraph::Shape{4}, + ngraph::Shape{1, 1, 16, 8}); + auto reshape_before = std::make_shared(graph.input_params, const_reshape_before, false); auto const_transpose_before = - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{4}, ngraph::Shape{0, 3, 1, 2}); - auto transpose_before = std::make_shared(reshape_before, const_transpose_before); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, ngraph::Shape{0, 3, 1, 2}); + auto transpose_before = std::make_shared(reshape_before, const_transpose_before); std::shared_ptr parent_node = constant_node; if (std::is_same, @@ -199,18 +195,18 @@ Graph createReferenceGraph() { parent_node = createFakeQuantizeNode(constant_node); } - auto weights_reshape_const = std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{4}, - ngraph::Shape{8, 8, 1, 1}); - auto weights_reshaped = std::make_shared(parent_node, weights_reshape_const, false); + auto weights_reshape_const = std::make_shared(ngraph::element::Type_t::i64, + ngraph::Shape{4}, + ngraph::Shape{8, 8, 1, 1}); + auto weights_reshaped = std::make_shared(parent_node, weights_reshape_const, false); - auto conv_node = std::make_shared(transpose_before, - weights_reshaped, - ngraph::Strides{1, 1}, - ngraph::CoordinateDiff{0, 0}, - ngraph::CoordinateDiff{0, 0}, - ngraph::Strides{1, 1}, - ngraph::op::PadType::VALID); + auto conv_node = std::make_shared(transpose_before, + weights_reshaped, + ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 0}, + ngraph::CoordinateDiff{0, 0}, + ngraph::Strides{1, 1}, + ngraph::op::PadType::VALID); parent_node = conv_node; if (std::is_same, std::integral_constant>::value) { @@ -221,7 +217,7 @@ Graph createReferenceGraph() { } auto bias = ngraph::builder::makeConstant(ngraph::element::i64, axes, {}, true); - auto add_node = std::make_shared(parent_node, bias); + auto add_node = std::make_shared(parent_node, bias); parent_node = add_node; } @@ -231,13 +227,12 @@ Graph createReferenceGraph() { } auto const_transpose_after = - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{4}, ngraph::Shape{0, 2, 3, 1}); - auto transpose_after = std::make_shared(parent_node, const_transpose_after); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, ngraph::Shape{0, 2, 3, 1}); + auto transpose_after = std::make_shared(parent_node, const_transpose_after); - auto const_reshape_after = std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{2}, - ngraph::Shape{16, 8}); - graph.output = std::make_shared(transpose_after, const_reshape_after, false); + auto const_reshape_after = + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{2}, ngraph::Shape{16, 8}); + graph.output = std::make_shared(transpose_after, const_reshape_after, false); return graph; } diff --git a/src/plugins/intel_gna/tests/unit/transformations/gna_convert_padded_to_valid_convolution.cpp b/src/plugins/intel_gna/tests/unit/transformations/gna_convert_padded_to_valid_convolution.cpp index ce4ec6b702c95c..e0c1485e5691f8 100644 --- a/src/plugins/intel_gna/tests/unit/transformations/gna_convert_padded_to_valid_convolution.cpp +++ b/src/plugins/intel_gna/tests/unit/transformations/gna_convert_padded_to_valid_convolution.cpp @@ -11,6 +11,7 @@ #include #include "common_test_utils/ov_test_utils.hpp" +#include "openvino/opsets/opset7.hpp" #include "transformations/convert_padded_to_valid_convolution.hpp" namespace testing { @@ -61,7 +62,7 @@ struct ConvData { size_t pads_end_height; }; -void GetConvParams(std::shared_ptr conv, ConvData& conv_data) { +void GetConvParams(std::shared_ptr conv, ConvData& conv_data) { conv_data.input_channel_count = conv->input_value(0).get_shape()[1]; conv_data.input_height = conv->input_value(0).get_shape()[2]; conv_data.input_width = conv->input_value(0).get_shape()[3]; @@ -71,18 +72,18 @@ void GetConvParams(std::shared_ptr conv, ConvData& conv_data.pads_end_width = conv->get_pads_end()[1]; } -std::shared_ptr createFQ(std::shared_ptr& in_node) { - auto input_low = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1}); - auto input_high = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {5}); - auto output_low = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {0}); - auto output_high = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {10}); - return std::make_shared(in_node, input_low, input_high, output_low, output_high, 11); +std::shared_ptr createFQ(std::shared_ptr& in_node) { + auto input_low = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1}); + auto input_high = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {5}); + auto output_low = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {0}); + auto output_high = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {10}); + return std::make_shared(in_node, input_low, input_high, output_low, output_high, 11); } ngraph::Output createBiasFQ(const ngraph::Output& in_node, - std::shared_ptr& bias_const, + std::shared_ptr& bias_const, const bool& fq) { - std::shared_ptr bcast_add = std::make_shared(in_node, bias_const); + std::shared_ptr bcast_add = std::make_shared(in_node, bias_const); if (fq) { bcast_add = createFQ(bcast_add); @@ -91,24 +92,24 @@ ngraph::Output createBiasFQ(const ngraph::Output& in return bcast_add; } -std::shared_ptr createFunction(const bool& fq, - const modelType& model, - const ngraph::Output& input_node, - const ngraph::Shape& filters_shape, - const ngraph::Strides& conv_stride, - const ngraph::CoordinateDiff& pads_begin, - const ngraph::CoordinateDiff& pads_end, - const ngraph::Strides& conv_dilation, - const ngraph::Shape& bias_shape, - const ngraph::Strides& maxpool_stride, - const ngraph::Shape& maxpool_shape, - const ngraph::op::PadType& pad_type, - ConvData* conv_data) { - auto transpose_in_order = std::make_shared(ngraph::element::i64, - ngraph::Shape{4}, - std::vector{0, 3, 1, 2}); - auto transpose_in = std::make_shared(input_node, transpose_in_order); - std::shared_ptr filters = std::make_shared( +std::shared_ptr createFunction(const bool& fq, + const modelType& model, + const ngraph::Output& input_node, + const ngraph::Shape& filters_shape, + const ngraph::Strides& conv_stride, + const ngraph::CoordinateDiff& pads_begin, + const ngraph::CoordinateDiff& pads_end, + const ngraph::Strides& conv_dilation, + const ngraph::Shape& bias_shape, + const ngraph::Strides& maxpool_stride, + const ngraph::Shape& maxpool_shape, + const ngraph::op::PadType& pad_type, + ConvData* conv_data) { + auto transpose_in_order = std::make_shared(ngraph::element::i64, + ngraph::Shape{4}, + std::vector{0, 3, 1, 2}); + auto transpose_in = std::make_shared(input_node, transpose_in_order); + std::shared_ptr filters = std::make_shared( ngraph::element::i64, ngraph::Shape{4, input_node.get_shape()[3], filters_shape[0], filters_shape[1]}); @@ -116,69 +117,69 @@ std::shared_ptr createFunction(const bool& fq, filters = createFQ(filters); } - auto conv = std::make_shared(transpose_in, - filters, - conv_stride, - pads_begin, - pads_end, - conv_dilation, - pad_type); + auto conv = std::make_shared(transpose_in, + filters, + conv_stride, + pads_begin, + pads_end, + conv_dilation, + pad_type); if (conv_data) GetConvParams(conv, *conv_data); - auto transpose_out_order = std::make_shared(ngraph::element::i64, - ngraph::Shape{4}, - std::vector{0, 2, 3, 1}); - auto bias_const = std::make_shared(ngraph::element::i64, bias_shape); + auto transpose_out_order = std::make_shared(ngraph::element::i64, + ngraph::Shape{4}, + std::vector{0, 2, 3, 1}); + auto bias_const = std::make_shared(ngraph::element::i64, bias_shape); - ngraph::Output last_op = std::make_shared(conv, transpose_out_order); + ngraph::Output last_op = std::make_shared(conv, transpose_out_order); switch (model) { case modelType::TranspConvBcastAddTransp: { auto bcast_add = createBiasFQ(conv, bias_const, fq); - last_op = std::make_shared(bcast_add, transpose_out_order); + last_op = std::make_shared(bcast_add, transpose_out_order); } break; case modelType::TranspConvActTransp: { auto bcast_add = createBiasFQ(conv, bias_const, fq); - std::shared_ptr activation = std::make_shared(bcast_add); + std::shared_ptr activation = std::make_shared(bcast_add); if (fq) { activation = createFQ(activation); } - last_op = std::make_shared(activation, transpose_out_order); + last_op = std::make_shared(activation, transpose_out_order); } break; case modelType::TranspConvBcastAddMaxPoolTransp: { auto bcast_add = createBiasFQ(conv, bias_const, fq); - auto maxpool = std::make_shared(bcast_add, - maxpool_stride, - ngraph::Shape{0, 0}, - ngraph::Shape{0, 0}, - maxpool_shape, - ngraph::op::RoundingType::FLOOR, - ngraph::op::PadType::VALID); - auto transpose = std::make_shared(maxpool, transpose_out_order); - last_op = std::make_shared(transpose); + auto maxpool = std::make_shared(bcast_add, + maxpool_stride, + ngraph::Shape{0, 0}, + ngraph::Shape{0, 0}, + maxpool_shape, + ngraph::op::RoundingType::FLOOR, + ngraph::op::PadType::VALID); + auto transpose = std::make_shared(maxpool, transpose_out_order); + last_op = std::make_shared(transpose); } break; case modelType::TranspConvBcastAddActTransp: { auto bcast_add = createBiasFQ(conv, bias_const, fq); - auto activation = std::make_shared(bcast_add); - last_op = std::make_shared(activation, transpose_out_order); + auto activation = std::make_shared(bcast_add); + last_op = std::make_shared(activation, transpose_out_order); } break; case modelType::TranspConvBcastAddMaxPoolActTransp: { auto bcast_add = createBiasFQ(conv, bias_const, fq); - auto maxpool = std::make_shared(bcast_add, - maxpool_stride, - ngraph::Shape{0, 0}, - ngraph::Shape{0, 0}, - maxpool_shape, - ngraph::op::RoundingType::FLOOR, - ngraph::op::PadType::VALID); - auto activation = std::make_shared(maxpool); - last_op = std::make_shared(activation, transpose_out_order); + auto maxpool = std::make_shared(bcast_add, + maxpool_stride, + ngraph::Shape{0, 0}, + ngraph::Shape{0, 0}, + maxpool_shape, + ngraph::op::RoundingType::FLOOR, + ngraph::op::PadType::VALID); + auto activation = std::make_shared(maxpool); + last_op = std::make_shared(activation, transpose_out_order); } break; case modelType::TranspConvTranspBcastAdd: { @@ -187,7 +188,7 @@ std::shared_ptr createFunction(const bool& fq, case modelType::TranspConvTranspBcastAddAct: { auto bcast_add = createBiasFQ(last_op, bias_const, fq); - last_op = std::make_shared(bcast_add); + last_op = std::make_shared(bcast_add); } break; case modelType::TranspConvTransp: @@ -195,7 +196,7 @@ std::shared_ptr createFunction(const bool& fq, break; } - return std::make_shared(last_op); + return std::make_shared(last_op); } std::shared_ptr get_initial_function(const bool& fq, @@ -211,7 +212,7 @@ std::shared_ptr get_initial_function(const bool& fq, const ngraph::Shape& maxpool_shape, const ngraph::op::PadType& pad_type, ConvData& conv_data) { - auto input_params = std::make_shared(ngraph::element::i64, input_shape); + auto input_params = std::make_shared(ngraph::element::i64, input_shape); auto result = createFunction(fq, model, input_params, @@ -365,23 +366,21 @@ void ConvertPaddedToValidConvTestFixture::SetUp() { conv_data); } -std::shared_ptr FlatCrop(ngraph::Output input, size_t offset, size_t size) { - return std::make_shared( - input, // data - ngraph::opset7::Constant::create(ngraph::element::i64, - ngraph::Shape{2}, - {(size_t)0, offset}), // begin sice index - ngraph::opset7::Constant::create(ngraph::element::i64, - ngraph::Shape{2}, - {(size_t)0, offset + size}), // end slice index - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {(size_t)1, (size_t)1}), // strides - std::vector{1, 0}, // begin mask - std::vector{1, 0}); // end mask +std::shared_ptr FlatCrop(ngraph::Output input, size_t offset, size_t size) { + return std::make_shared( + input, // data + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {(size_t)0, offset}), // begin sice index + ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape{2}, + {(size_t)0, offset + size}), // end slice index + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {(size_t)1, (size_t)1}), // strides + std::vector{1, 0}, // begin mask + std::vector{1, 0}); // end mask } void InsertPadding(ngraph::OutputVector& input_rows_to_concat, size_t size, - const std::shared_ptr padding_const, + const std::shared_ptr padding_const, size_t biggest_padding) { if (size == biggest_padding) { input_rows_to_concat.push_back(padding_const); @@ -409,16 +408,16 @@ std::shared_ptr CreatePaddedNet(const ngraph::Output if (!biggest_padding) return nullptr; - auto flat_input = std::make_shared( + auto flat_input = std::make_shared( input_node, - ngraph::opset7::Constant::create(ngraph::element::i64, - ngraph::Shape{2}, - ngraph::Shape{1ull, shape_size(input_node.get_shape())}), + ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape{2}, + ngraph::Shape{1ull, shape_size(input_node.get_shape())}), false); // Constant with zero padding auto const_holding_padding = - std::make_shared(ngraph::element::i64, ngraph::Shape{1, biggest_padding}, 0); + std::make_shared(ngraph::element::i64, ngraph::Shape{1, biggest_padding}, 0); std::shared_ptr original_row = flat_input; ngraph::OutputVector input_rows_to_concat; @@ -450,7 +449,7 @@ std::shared_ptr CreatePaddedNet(const ngraph::Output if (flat_right_padding) { InsertPadding(single_row_concat_inputs, flat_right_padding, const_holding_padding, biggest_padding); } - auto padded_row_concat = std::make_shared(single_row_concat_inputs, 1); + auto padded_row_concat = std::make_shared(single_row_concat_inputs, 1); input_rows_to_concat.push_back(padded_row_concat); } @@ -463,7 +462,7 @@ std::shared_ptr CreatePaddedNet(const ngraph::Output InsertPadding(input_rows_to_concat, padded_row_size, const_holding_padding, biggest_padding); } - auto padded_input_plane = std::make_shared(input_rows_to_concat, 1); + auto padded_input_plane = std::make_shared(input_rows_to_concat, 1); return padded_input_plane; } @@ -481,7 +480,7 @@ std::shared_ptr ConvertPaddedToValidConvTestFixture::get_refer const ngraph::Shape& maxpool_shape, const ngraph::op::PadType& pad_type, const ConvData& conv_data) { - auto input_params = std::make_shared(ngraph::element::i64, input_shape); + auto input_params = std::make_shared(ngraph::element::i64, input_shape); // Add padding where neccessary @@ -494,10 +493,10 @@ std::shared_ptr ConvertPaddedToValidConvTestFixture::get_refer // padding // padding auto padded_input_plane = CreatePaddedNet(input_params, conv_data); - std::shared_ptr result; + std::shared_ptr result; if (padded_input_plane) { - auto shape_const = std::make_shared( + auto shape_const = std::make_shared( ngraph::element::i64, ngraph::Shape{4}, ngraph::Shape{static_cast(1), @@ -505,7 +504,7 @@ std::shared_ptr ConvertPaddedToValidConvTestFixture::get_refer conv_data.pads_begin_width + conv_data.input_width + conv_data.pads_end_width, conv_data.input_channel_count}); auto padded_input_plane_reshaped = - std::make_shared(padded_input_plane, shape_const, false); + std::make_shared(padded_input_plane, shape_const, false); result = createFunction(fq, model, padded_input_plane_reshaped, diff --git a/src/plugins/intel_gna/tests/unit/transformations/gna_decompose_2d_convolution.cpp b/src/plugins/intel_gna/tests/unit/transformations/gna_decompose_2d_convolution.cpp index 2eed9b367d034e..0f7c5a99b463ac 100644 --- a/src/plugins/intel_gna/tests/unit/transformations/gna_decompose_2d_convolution.cpp +++ b/src/plugins/intel_gna/tests/unit/transformations/gna_decompose_2d_convolution.cpp @@ -13,6 +13,7 @@ #include "backend/gna_limitations.hpp" #include "common_test_utils/ov_test_utils.hpp" +#include "openvino/opsets/opset7.hpp" #include "transformations/decompose_2d_convolution.hpp" using namespace ov::intel_gna::limitations; @@ -53,14 +54,14 @@ typedef std::tuple input_node; - std::shared_ptr fq_filters; - std::shared_ptr conv; - std::shared_ptr bias; - std::shared_ptr fq_conv; - std::shared_ptr fq_bias; - std::shared_ptr max_pool; + std::shared_ptr fq_filters; + std::shared_ptr conv; + std::shared_ptr bias; + std::shared_ptr fq_conv; + std::shared_ptr fq_bias; + std::shared_ptr max_pool; std::shared_ptr af; - std::shared_ptr fq_af; + std::shared_ptr fq_af; std::shared_ptr bias_const; std::shared_ptr last_op_in_sequence_for_replacement; size_t conv_count; @@ -85,7 +86,7 @@ struct ConvParams { size_t output_width; }; -void GetConvParams(std::shared_ptr conv, ConvParams& conv_params) { +void GetConvParams(std::shared_ptr conv, ConvParams& conv_params) { conv_params.output_height = conv->get_output_shape(0)[2]; conv_params.output_width = conv->get_output_shape(0)[3]; conv_params.input_channel_count = conv->input_value(0).get_shape()[1]; @@ -102,20 +103,20 @@ void GetConvParams(std::shared_ptr conv, ConvParams conv_params.output_channel_count = conv_params.filter_count; } -std::shared_ptr createFQ(std::shared_ptr& in_node) { - auto input_low = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); - auto input_high = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {5}); - auto output_low = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {0}); - auto output_high = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {10}); - return std::make_shared(in_node, input_low, input_high, output_low, output_high, 11); +std::shared_ptr createFQ(std::shared_ptr& in_node) { + auto input_low = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); + auto input_high = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {5}); + auto output_low = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {0}); + auto output_high = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {10}); + return std::make_shared(in_node, input_low, input_high, output_low, output_high, 11); } std::shared_ptr createBiasFQ(const ngraph::Output& in_node, - std::shared_ptr& bias_const, - std::shared_ptr& bias, + std::shared_ptr& bias_const, + std::shared_ptr& bias, const bool& fq) { std::shared_ptr node; - bias = std::make_shared(in_node, bias_const); + bias = std::make_shared(in_node, bias_const); node = bias; if (fq) { @@ -125,22 +126,22 @@ std::shared_ptr createBiasFQ(const ngraph::Output& i return node; } -std::shared_ptr createFunction(const bool& fq, - const modelType& model, - const ngraph::Output& input_node, - const ngraph::Shape& filters_shape, - const ngraph::Strides& conv_stride, - const ngraph::Strides& conv_dilation, - const ngraph::Shape& bias_shape, - const ngraph::Strides& maxpool_stride, - const ngraph::Shape& maxpool_shape, - GraphData* graph_data, - ConvParams* conv_params) { - auto transpose_in_order = std::make_shared(ngraph::element::i64, - ngraph::Shape{4}, - std::vector{0, 3, 1, 2}); - auto transpose_in = std::make_shared(input_node, transpose_in_order); - std::shared_ptr fq_filters = std::make_shared( +std::shared_ptr createFunction(const bool& fq, + const modelType& model, + const ngraph::Output& input_node, + const ngraph::Shape& filters_shape, + const ngraph::Strides& conv_stride, + const ngraph::Strides& conv_dilation, + const ngraph::Shape& bias_shape, + const ngraph::Strides& maxpool_stride, + const ngraph::Shape& maxpool_shape, + GraphData* graph_data, + ConvParams* conv_params) { + auto transpose_in_order = std::make_shared(ngraph::element::i64, + ngraph::Shape{4}, + std::vector{0, 3, 1, 2}); + auto transpose_in = std::make_shared(input_node, transpose_in_order); + std::shared_ptr fq_filters = std::make_shared( ngraph::element::i64, ngraph::Shape{4, input_node.get_shape()[3], filters_shape[0], filters_shape[1]}); @@ -148,77 +149,77 @@ std::shared_ptr createFunction(const bool& fq, fq_filters = createFQ(fq_filters); } - auto conv = std::make_shared(transpose_in, - fq_filters, - conv_stride, - ngraph::CoordinateDiff{0, 0}, - ngraph::CoordinateDiff{0, 0}, - conv_dilation, - ngraph::op::PadType::VALID); + auto conv = std::make_shared(transpose_in, + fq_filters, + conv_stride, + ngraph::CoordinateDiff{0, 0}, + ngraph::CoordinateDiff{0, 0}, + conv_dilation, + ngraph::op::PadType::VALID); if (conv_params) GetConvParams(conv, *conv_params); - auto transpose_out_order = std::make_shared(ngraph::element::i64, - ngraph::Shape{4}, - std::vector{0, 2, 3, 1}); - auto bias_const = std::make_shared(ngraph::element::i64, bias_shape); - std::shared_ptr bias = nullptr; + auto transpose_out_order = std::make_shared(ngraph::element::i64, + ngraph::Shape{4}, + std::vector{0, 2, 3, 1}); + auto bias_const = std::make_shared(ngraph::element::i64, bias_shape); + std::shared_ptr bias = nullptr; std::shared_ptr fq_bias = nullptr, fq_af = nullptr; - std::shared_ptr max_pool = nullptr; + std::shared_ptr max_pool = nullptr; std::shared_ptr activation = nullptr; std::shared_ptr fq_conv = nullptr; - std::shared_ptr last_op = std::make_shared(conv, transpose_out_order); + std::shared_ptr last_op = std::make_shared(conv, transpose_out_order); switch (model) { case modelType::TranspConvBcastAddTransp: { fq_bias = createBiasFQ(conv, bias_const, bias, fq); - last_op = std::make_shared(fq_bias, transpose_out_order); + last_op = std::make_shared(fq_bias, transpose_out_order); } break; case modelType::TranspConvActTransp: { fq_bias = createBiasFQ(conv, bias_const, bias, fq); - std::shared_ptr activation = std::make_shared(fq_bias); + std::shared_ptr activation = std::make_shared(fq_bias); if (fq) { activation = createFQ(activation); } - last_op = std::make_shared(activation, transpose_out_order); + last_op = std::make_shared(activation, transpose_out_order); } break; case modelType::TranspConvBcastAddMaxPoolTransp: { fq_bias = createBiasFQ(conv, bias_const, bias, fq); - max_pool = std::make_shared(fq_bias, - maxpool_stride, - ngraph::Shape{0, 0}, - ngraph::Shape{0, 0}, - maxpool_shape, - ngraph::op::RoundingType::FLOOR, - ngraph::op::PadType::VALID); - auto transpose = std::make_shared(max_pool, transpose_out_order); - last_op = std::make_shared(transpose); + max_pool = std::make_shared(fq_bias, + maxpool_stride, + ngraph::Shape{0, 0}, + ngraph::Shape{0, 0}, + maxpool_shape, + ngraph::op::RoundingType::FLOOR, + ngraph::op::PadType::VALID); + auto transpose = std::make_shared(max_pool, transpose_out_order); + last_op = std::make_shared(transpose); } break; case modelType::TranspConvBcastAddActTransp: { fq_bias = createBiasFQ(conv, bias_const, bias, fq); - activation = std::make_shared(fq_bias); - last_op = std::make_shared(activation, transpose_out_order); + activation = std::make_shared(fq_bias); + last_op = std::make_shared(activation, transpose_out_order); } break; case modelType::TranspConvBcastAddMaxPoolActTransp: { fq_bias = createBiasFQ(conv, bias_const, bias, fq); - max_pool = std::make_shared(fq_bias, - maxpool_stride, - ngraph::Shape{0, 0}, - ngraph::Shape{0, 0}, - maxpool_shape, - ngraph::op::RoundingType::FLOOR, - ngraph::op::PadType::VALID); - activation = std::make_shared(max_pool); + max_pool = std::make_shared(fq_bias, + maxpool_stride, + ngraph::Shape{0, 0}, + ngraph::Shape{0, 0}, + maxpool_shape, + ngraph::op::RoundingType::FLOOR, + ngraph::op::PadType::VALID); + activation = std::make_shared(max_pool); if (fq) { fq_af = createFQ(activation); } - last_op = std::make_shared(fq_af ? fq_af : activation, transpose_out_order); + last_op = std::make_shared(fq_af ? fq_af : activation, transpose_out_order); } break; case modelType::TranspConvTranspBcastAdd: { @@ -227,14 +228,14 @@ std::shared_ptr createFunction(const bool& fq, case modelType::TranspConvTranspBcastAddAct: { fq_bias = createBiasFQ(last_op, bias_const, bias, fq); - last_op = std::make_shared(fq_bias); + last_op = std::make_shared(fq_bias); } break; case modelType::TranspConvTransp: { if (fq) { auto conv_ptr = conv->shared_from_this(); fq_conv = createFQ(conv_ptr); - last_op = std::make_shared(fq_conv, transpose_out_order); + last_op = std::make_shared(fq_conv, transpose_out_order); } } default: @@ -242,13 +243,13 @@ std::shared_ptr createFunction(const bool& fq, } if (graph_data) { - graph_data->fq_filters = fq ? std::dynamic_pointer_cast(fq_filters) : nullptr; + graph_data->fq_filters = fq ? std::dynamic_pointer_cast(fq_filters) : nullptr; graph_data->conv = conv; graph_data->bias = bias; - graph_data->fq_conv = fq ? std::dynamic_pointer_cast(fq_conv) : nullptr; - graph_data->fq_bias = fq ? std::dynamic_pointer_cast(fq_bias) : nullptr; + graph_data->fq_conv = fq ? std::dynamic_pointer_cast(fq_conv) : nullptr; + graph_data->fq_bias = fq ? std::dynamic_pointer_cast(fq_bias) : nullptr; graph_data->af = std::dynamic_pointer_cast(activation); - graph_data->fq_af = fq ? std::dynamic_pointer_cast(fq_af) : nullptr; + graph_data->fq_af = fq ? std::dynamic_pointer_cast(fq_af) : nullptr; graph_data->max_pool = max_pool; graph_data->last_op_in_sequence_for_replacement = last_op; graph_data->bias_const = nullptr; @@ -260,7 +261,7 @@ std::shared_ptr createFunction(const bool& fq, } } - return std::make_shared(last_op); + return std::make_shared(last_op); } std::shared_ptr get_initial_function(const bool& fq, @@ -274,7 +275,7 @@ std::shared_ptr get_initial_function(const bool& fq, const ngraph::Shape& maxpool_shape, GraphData& graph_data, ConvParams& conv_params) { - auto input_params = std::make_shared(ngraph::element::i64, input_shape); + auto input_params = std::make_shared(ngraph::element::i64, input_shape); auto result = createFunction(fq, model, input_params, @@ -395,36 +396,33 @@ void Decompose2DConvTestFixture::TearDown() { Limitations::deinit(); } -std::shared_ptr ReshapeBiasConst(std::shared_ptr conv_bias, +std::shared_ptr ReshapeBiasConst(std::shared_ptr conv_bias, const ConvParams& conv_params) { - auto add_const = - std::dynamic_pointer_cast(conv_bias->input_value(1).get_node_shared_ptr()); + auto add_const = std::dynamic_pointer_cast(conv_bias->input_value(1).get_node_shared_ptr()); IE_ASSERT(add_const); auto bias_size = shape_size(add_const->get_shape()); - return ov::op::util::make_try_fold( + return ov::op::util::make_try_fold( add_const, - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{4}, ngraph::Shape{1, bias_size, 1, 1}), + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, ngraph::Shape{1, bias_size, 1, 1}), false); } -std::shared_ptr FlatCrop(ngraph::Output input, size_t offset, size_t size) { +std::shared_ptr FlatCrop(ngraph::Output input, size_t offset, size_t size) { auto shape = input.get_shape(); - return std::make_shared( - input, // data - ngraph::opset7::Constant::create(ngraph::element::i64, - ngraph::Shape{2}, - {(size_t)0, offset}), // begin slice index - ngraph::opset7::Constant::create(ngraph::element::i64, - ngraph::Shape{2}, - {(size_t)0, offset + size}), // end slice index - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {(size_t)1, (size_t)1}), // strides - std::vector{1, 0}, // begin mask - std::vector{1, 0}); // end mask + return std::make_shared( + input, // data + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {(size_t)0, offset}), // begin slice index + ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape{2}, + {(size_t)0, offset + size}), // end slice index + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {(size_t)1, (size_t)1}), // strides + std::vector{1, 0}, // begin mask + std::vector{1, 0}); // end mask } -static std::vector> Split2DConvFilters(std::shared_ptr& filters, +static std::vector> Split2DConvFilters(std::shared_ptr& filters, const bool& vertical_permute, const bool& horizontal_permute, const size_t& split_channels) { @@ -438,17 +436,17 @@ static std::vector> Split2DConvFilters(std::shared IE_ASSERT(filter_shape.size() == 4); if (split_channels > 1) { - const auto axis_node = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{}, {1}); - const auto split = std::make_shared(filters, axis_node, split_channels); + const auto axis_node = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{}, {1}); + const auto split = std::make_shared(filters, axis_node, split_channels); flat_filters = split->outputs(); } if (horizontal_permute) { for (size_t split_index = 0; split_index < split_channels; split_index++) { ngraph::Output& flat_filter = flat_filters[split_index]; - result.push_back(std::make_shared( + result.push_back(std::make_shared( flat_filter, - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{4}, ngraph::Shape{0, 1, 3, 2}))); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, ngraph::Shape{0, 1, 3, 2}))); } } @@ -467,9 +465,9 @@ static std::vector> Split2DConvFilters(std::shared } for (auto& new_filter : result) - new_filter = ov::op::util::make_try_fold( + new_filter = ov::op::util::make_try_fold( new_filter, - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{4}, reshape_shape), + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, reshape_shape), false); return result; @@ -478,33 +476,32 @@ static std::vector> Split2DConvFilters(std::shared ngraph::OutputVector SplitInput(const GraphData& graph_data, ConvParams& conv_params) { // We need to have proper input shape first ngraph::OutputVector split_planes; - auto padded_input_plane = std::make_shared( + auto padded_input_plane = std::make_shared( graph_data.input_node, - ngraph::opset7::Constant::create(ngraph::element::i64, - ngraph::Shape{2}, - ngraph::Shape{1, shape_size(graph_data.input_node->get_shape())}), + ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape{2}, + ngraph::Shape{1, shape_size(graph_data.input_node->get_shape())}), false); if (graph_data.conv_count > 1) { // If we split input plane and filters due to GNA limitations - we must sum their results at the end conv_params.input_channel_count /= graph_data.conv_count; - auto reshape_before_transpose = std::make_shared( + auto reshape_before_transpose = std::make_shared( padded_input_plane, - ngraph::opset7::Constant::create( + ov::op::v0::Constant::create( ngraph::element::i64, ngraph::Shape{2}, {shape_size(padded_input_plane->get_shape()) / graph_data.conv_count, graph_data.conv_count}), false); - auto transpose_before_channel_wise_split = std::make_shared( + auto transpose_before_channel_wise_split = std::make_shared( reshape_before_transpose, - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {1, 0})->output(0)); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {1, 0})->output(0)); - const auto axis_node = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{}, {0}); - const auto split = std::make_shared(transpose_before_channel_wise_split, - axis_node, - graph_data.conv_count); + const auto axis_node = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{}, {0}); + const auto split = + std::make_shared(transpose_before_channel_wise_split, axis_node, graph_data.conv_count); split_planes = split->outputs(); } else { split_planes.push_back(padded_input_plane); @@ -518,7 +515,7 @@ std::vector> SplitFilters(const GraphData& graph_d // data as well; we also need to take filter height and potential dilation into account when modifying the filters // Take account of fake quantize when getting filter values - auto filter_values = std::dynamic_pointer_cast( + auto filter_values = std::dynamic_pointer_cast( graph_data.fq_filters == nullptr ? graph_data.conv->input_value(1).get_node_shared_ptr() : graph_data.fq_filters->input_value(0).get_node_shared_ptr()); bool vertical_permute = (conv_params.filter_height > 1); @@ -571,14 +568,13 @@ void TransformInput(const GraphData& graph_data, } // Interleaving dilated input planes - std::shared_ptr dilated_chunks_concat = - std::make_shared(dilated_input_planes, 0); + std::shared_ptr dilated_chunks_concat = std::make_shared(dilated_input_planes, 0); // Additional reshape is required for strided slices of input intended for each filter row if (conv_params.filter_stride_height > 1) { - dilated_chunks_concat = std::make_shared( + dilated_chunks_concat = std::make_shared( dilated_chunks_concat, - ngraph::opset7::Constant::create( + ov::op::v0::Constant::create( ngraph::element::i64, ngraph::Shape{2}, {conv_params.filter_height, @@ -586,47 +582,47 @@ void TransformInput(const GraphData& graph_data, false); } - auto transposed_dilated_chunks = std::make_shared( + auto transposed_dilated_chunks = std::make_shared( dilated_chunks_concat, - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {1, 0})->output(0)); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {1, 0})->output(0)); // Flattening of interleaved input planes - auto flattened_dilated_transposed_input = std::make_shared( + auto flattened_dilated_transposed_input = std::make_shared( transposed_dilated_chunks, - ngraph::opset7::Constant::create(ngraph::element::i64, - ngraph::Shape{2}, - {(size_t)1, - conv_params.input_width * conv_params.input_channel_count * - conv_params.output_height * conv_params.filter_height}), + ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape{2}, + {(size_t)1, + conv_params.input_width * conv_params.input_channel_count * + conv_params.output_height * conv_params.filter_height}), false); split_input_plane = flattened_dilated_transposed_input; } -static void InsertFQLayer(const std::shared_ptr fqLayer, +static void InsertFQLayer(const std::shared_ptr fqLayer, std::shared_ptr lastNode) { if (fqLayer != nullptr) { lastNode = fqLayer->clone_with_new_inputs( {lastNode, - ngraph::opset7::Constant::create( + ov::op::v0::Constant::create( ngraph::element::f32, ngraph::Shape{1}, - std::dynamic_pointer_cast(fqLayer->input_value(1).get_node_shared_ptr()) + std::dynamic_pointer_cast(fqLayer->input_value(1).get_node_shared_ptr()) ->cast_vector()), - ngraph::opset7::Constant::create( + ov::op::v0::Constant::create( ngraph::element::f32, ngraph::Shape{1}, - std::dynamic_pointer_cast(fqLayer->input_value(2).get_node_shared_ptr()) + std::dynamic_pointer_cast(fqLayer->input_value(2).get_node_shared_ptr()) ->cast_vector()), - ngraph::opset7::Constant::create( + ov::op::v0::Constant::create( ngraph::element::f32, ngraph::Shape{1}, - std::dynamic_pointer_cast(fqLayer->input_value(3).get_node_shared_ptr()) + std::dynamic_pointer_cast(fqLayer->input_value(3).get_node_shared_ptr()) ->cast_vector()), - ngraph::opset7::Constant::create( + ov::op::v0::Constant::create( ngraph::element::f32, ngraph::Shape{1}, - std::dynamic_pointer_cast(fqLayer->input_value(4).get_node_shared_ptr()) + std::dynamic_pointer_cast(fqLayer->input_value(4).get_node_shared_ptr()) ->cast_vector())}); } } @@ -638,21 +634,21 @@ std::shared_ptr Create1DConv(const GraphData& graph_data, const size_t conv_index, const size_t h_index) { // Transpose NHWC => NCHW - std::shared_ptr nchw_input = std::make_shared( + std::shared_ptr nchw_input = std::make_shared( input, - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 3, 1, 2})->output(0)); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 3, 1, 2})->output(0)); // Fake quantize InsertFQLayer(graph_data.fq_filters, filters); // 1D Convolution & fake quantize - auto conv = std::make_shared(nchw_input, - filters, - ngraph::Strides{1, conv_params.filter_stride_width}, - ngraph::CoordinateDiff{0, 0}, - ngraph::CoordinateDiff{0, 0}, - ngraph::Strides{1, 1}, - ngraph::op::PadType::VALID); + auto conv = std::make_shared(nchw_input, + filters, + ngraph::Strides{1, conv_params.filter_stride_width}, + ngraph::CoordinateDiff{0, 0}, + ngraph::CoordinateDiff{0, 0}, + ngraph::Strides{1, 1}, + ngraph::op::PadType::VALID); std::string conv_name = graph_data.conv->get_friendly_name() + "_H_" + std::to_string(h_index) + "_CH_" + std::to_string(0); conv->set_friendly_name(conv_name); @@ -661,19 +657,19 @@ std::shared_ptr Create1DConv(const GraphData& graph_data, // Bias & fake quantize if (graph_data.bias_const && conv_index == 0) { - last_conv_block_op = std::make_shared(conv, graph_data.bias_const); + last_conv_block_op = std::make_shared(conv, graph_data.bias_const); InsertFQLayer(graph_data.fq_bias, last_conv_block_op); } // Max pooling if (graph_data.pool_size_width > 1 || graph_data.pool_stride_width > 1) { - last_conv_block_op = std::make_shared(last_conv_block_op, - ngraph::Strides{1, graph_data.pool_stride_width}, - ngraph::Shape{0, 0}, - ngraph::Shape{0, 0}, - ngraph::Shape{1, graph_data.pool_size_width}, - graph_data.max_pool->get_rounding_type(), - ngraph::op::PadType::VALID); + last_conv_block_op = std::make_shared(last_conv_block_op, + ngraph::Strides{1, graph_data.pool_stride_width}, + ngraph::Shape{0, 0}, + ngraph::Shape{0, 0}, + ngraph::Shape{1, graph_data.pool_size_width}, + graph_data.max_pool->get_rounding_type(), + ngraph::op::PadType::VALID); } // Activation function if (graph_data.af && graph_data.conv_count == 1) { @@ -682,9 +678,9 @@ std::shared_ptr Create1DConv(const GraphData& graph_data, } // Transpose NCHW => NHWC - auto nhwc_output = std::make_shared( + auto nhwc_output = std::make_shared( last_conv_block_op, - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 2, 3, 1})->output(0)); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 2, 3, 1})->output(0)); return nhwc_output; } @@ -740,16 +736,16 @@ std::shared_ptr CreateDeomposedConv(const GraphData& graph_data, dilated_chunks.push_back(slice); } - dilated_chunks_concat = std::make_shared(dilated_chunks, 0); + dilated_chunks_concat = std::make_shared(dilated_chunks, 0); } - auto transposed_dilated_chunks = std::make_shared( + auto transposed_dilated_chunks = std::make_shared( dilated_chunks_concat, - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {1, 0})->output(0)); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {1, 0})->output(0)); - auto flattened_dilated_conv_input = std::make_shared( + auto flattened_dilated_conv_input = std::make_shared( transposed_dilated_chunks, - ngraph::opset7::Constant::create( + ov::op::v0::Constant::create( ngraph::element::i64, ngraph::Shape{4}, ngraph::Shape{1, 1, output_width, h_1_filter_channel_count * conv_params.filter_width}), @@ -758,12 +754,11 @@ std::shared_ptr CreateDeomposedConv(const GraphData& graph_data, nhwc_conv_y_input = flattened_dilated_conv_input; } else { // If no horizontal split is done, only reshape is required before decomposed convolution - nhwc_conv_y_input = std::make_shared( + nhwc_conv_y_input = std::make_shared( nhwc_conv_y_input, - ngraph::opset7::Constant::create( - ngraph::element::i64, - ngraph::Shape{4}, - ngraph::Shape{1, 1, conv_params.input_width, h_1_filter_channel_count}), + ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape{4}, + ngraph::Shape{1, 1, conv_params.input_width, h_1_filter_channel_count}), false); } @@ -784,7 +779,7 @@ std::shared_ptr CreateDeomposedConv(const GraphData& graph_data, if (result_chunks.size() > 1) { // Concat in horizontal dimension // In NHWC index of H is 1 - auto concatenated_sub_results = std::make_shared(result_chunks, 1); + auto concatenated_sub_results = std::make_shared(result_chunks, 1); last_op = concatenated_sub_results; } return last_op; @@ -823,7 +818,7 @@ static bool ShouldDecompose(GraphData& graph_data, const ConvParams& conv_params return true; } -std::shared_ptr Decompose(const GraphData& graph_data, ConvParams& conv_params) { +std::shared_ptr Decompose(const GraphData& graph_data, ConvParams& conv_params) { std::vector> partial_conv_results; // Split input and filters due to GNA filter element count limit @@ -845,7 +840,7 @@ std::shared_ptr Decompose(const GraphData& graph_data, C std::shared_ptr conv_result = partial_conv_results.front(); for (size_t i = 1; i < partial_conv_results.size(); i++) { - auto add_result = std::make_shared(partial_conv_results[i], conv_result); + auto add_result = std::make_shared(partial_conv_results[i], conv_result); conv_result = add_result; } @@ -859,7 +854,7 @@ std::shared_ptr Decompose(const GraphData& graph_data, C ngraph::replace_node(graph_data.last_op_in_sequence_for_replacement, conv_result); conv_result->set_friendly_name(conv_result_name); - return std::make_shared(conv_result); + return std::make_shared(conv_result); } std::shared_ptr Decompose2DConvTestFixture::get_reference(const bool& fq, @@ -867,18 +862,18 @@ std::shared_ptr Decompose2DConvTestFixture::get_reference(cons const ngraph::PartialShape& input_shape, GraphData& graph_data, ConvParams& conv_params) { - auto input_params = std::make_shared(ngraph::element::i64, input_shape); + auto input_params = std::make_shared(ngraph::element::i64, input_shape); graph_data.input_node = input_params; ShouldDecompose(graph_data, conv_params); if (model != modelType::TranspConvTransp) { graph_data.bias_const = - ReshapeBiasConst(std::dynamic_pointer_cast(graph_data.bias), conv_params); + ReshapeBiasConst(std::dynamic_pointer_cast(graph_data.bias), conv_params); } // Create decomposed reference function - std::shared_ptr result; + std::shared_ptr result; result = Decompose(graph_data, conv_params); return std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); } diff --git a/src/plugins/intel_gna/tests/unit/transformations/gna_decompose_mvn.cpp b/src/plugins/intel_gna/tests/unit/transformations/gna_decompose_mvn.cpp index a5e90ea86a6672..a568ab5d5fde84 100644 --- a/src/plugins/intel_gna/tests/unit/transformations/gna_decompose_mvn.cpp +++ b/src/plugins/intel_gna/tests/unit/transformations/gna_decompose_mvn.cpp @@ -10,6 +10,8 @@ #include "backend/gna_limitations.hpp" #include "common_test_utils/ov_test_utils.hpp" +#include "openvino/opsets/opset2.hpp" +#include "openvino/opsets/opset8.hpp" #include "transformations/decompose_mvn.hpp" #include "transformations/op_conversions/convert_mvn1_to_mvn6.hpp" @@ -40,73 +42,73 @@ struct MVNParams { static std::shared_ptr NormalizeVariance( const MVNParams& mvn_data, - const std::shared_ptr& subtract_mean, - const std::shared_ptr& avg_broadcast_const) { + const std::shared_ptr& subtract_mean, + const std::shared_ptr& avg_broadcast_const) { // Prepare consts auto combined_C_H = mvn_data.C * mvn_data.H; std::vector avg_weights(8 * mvn_data.W / mvn_data.num_parts, 1.0f / mvn_data.W); - auto avg_weights_const = ngraph::opset8::Constant::create(ngraph::element::f32, - ngraph::Shape{8, mvn_data.W / mvn_data.num_parts, 1, 1}, - avg_weights); + auto avg_weights_const = ov::op::v0::Constant::create(ngraph::element::f32, + ngraph::Shape{8, mvn_data.W / mvn_data.num_parts, 1, 1}, + avg_weights); std::vector eps_tensor(combined_C_H * mvn_data.W, mvn_data.eps); auto eps_tensor_const = - ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{1, combined_C_H * mvn_data.W}, eps_tensor); + ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1, combined_C_H * mvn_data.W}, eps_tensor); std::vector minus_half(combined_C_H * mvn_data.W, -0.5f); auto minus_half_const = - ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{1, combined_C_H * mvn_data.W}, minus_half); + ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1, combined_C_H * mvn_data.W}, minus_half); // Calculate square of the difference between input and its mean - auto squared_diff = std::make_shared(subtract_mean, subtract_mean); + auto squared_diff = std::make_shared(subtract_mean, subtract_mean); squared_diff->set_friendly_name("MvnSqrDiff"); // Calculate sum of the squares - auto squared_diff_reshape = std::make_shared( + auto squared_diff_reshape = std::make_shared( squared_diff, - ngraph::opset8::Constant::create( + ov::op::v0::Constant::create( ngraph::element::i64, ngraph::Shape{4}, ngraph::Shape{mvn_data.N, combined_C_H * mvn_data.num_parts, 1ull, mvn_data.W / mvn_data.num_parts}), false); - auto transposed_input_3 = std::make_shared( + auto transposed_input_3 = std::make_shared( squared_diff_reshape, - ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 3, 1, 2})); - auto transposed_avg_conv_3 = std::make_shared(transposed_input_3, - avg_weights_const, - ngraph::Strides{1, 1}, - ngraph::CoordinateDiff{0, 0}, - ngraph::CoordinateDiff{0, 0}, - ngraph::Strides{1, 1}, - ngraph::op::PadType::VALID); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 3, 1, 2})); + auto transposed_avg_conv_3 = std::make_shared(transposed_input_3, + avg_weights_const, + ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 0}, + ngraph::CoordinateDiff{0, 0}, + ngraph::Strides{1, 1}, + ngraph::op::PadType::VALID); transposed_avg_conv_3->set_friendly_name("MvnAvg3"); - auto avg_conv_3 = std::make_shared( + auto avg_conv_3 = std::make_shared( transposed_avg_conv_3, - ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 2, 3, 1})); - auto reshape_avg_conv_3 = std::make_shared( + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 2, 3, 1})); + auto reshape_avg_conv_3 = std::make_shared( avg_conv_3, - ngraph::opset8::Constant::create(ngraph::element::i64, - ngraph::Shape{4}, - ngraph::Shape{mvn_data.N, 1ull, combined_C_H, 8 * mvn_data.num_parts}), + ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape{4}, + ngraph::Shape{mvn_data.N, 1ull, combined_C_H, 8 * mvn_data.num_parts}), false); - auto transposed_input_4 = std::make_shared( + auto transposed_input_4 = std::make_shared( reshape_avg_conv_3, - ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 3, 1, 2})); - auto transposed_avg_conv_4 = std::make_shared(transposed_input_4, - avg_broadcast_const, - ngraph::Strides{1, 1}, - ngraph::CoordinateDiff{0, 0}, - ngraph::CoordinateDiff{0, 0}, - ngraph::Strides{1, 1}, - ngraph::op::PadType::VALID); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 3, 1, 2})); + auto transposed_avg_conv_4 = std::make_shared(transposed_input_4, + avg_broadcast_const, + ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 0}, + ngraph::CoordinateDiff{0, 0}, + ngraph::Strides{1, 1}, + ngraph::op::PadType::VALID); transposed_avg_conv_4->set_friendly_name("MvnAvg4"); - auto avg_conv_4 = std::make_shared( + auto avg_conv_4 = std::make_shared( transposed_avg_conv_4, - ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 2, 3, 1})); - auto reshape_avg_conv_4 = std::make_shared( + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 2, 3, 1})); + auto reshape_avg_conv_4 = std::make_shared( avg_conv_4, - ngraph::opset8::Constant::create(ngraph::element::i64, - ngraph::Shape{2}, - ngraph::Shape{1ull, combined_C_H * mvn_data.W}), + ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape{2}, + ngraph::Shape{1ull, combined_C_H * mvn_data.W}), false); std::shared_ptr inv_stdev; @@ -115,103 +117,102 @@ static std::shared_ptr NormalizeVariance( // even though the built-in MVN1 to MVN6 transformation enforces outside setting // Add epsilon inside the square root - auto add_epsilon = std::make_shared(eps_tensor_const, reshape_avg_conv_4); + auto add_epsilon = std::make_shared(eps_tensor_const, reshape_avg_conv_4); // Calculate square root and inversion - auto log_var_eps = std::make_shared(add_epsilon); + auto log_var_eps = std::make_shared(add_epsilon); log_var_eps->set_friendly_name("MvnLogVarEps"); - auto log_inv_stdev = std::make_shared(log_var_eps, minus_half_const); + auto log_inv_stdev = std::make_shared(log_var_eps, minus_half_const); log_inv_stdev->set_friendly_name("MvnLogInvStdev"); - inv_stdev = std::make_shared(log_inv_stdev); + inv_stdev = std::make_shared(log_inv_stdev); inv_stdev->set_friendly_name("MvnInvStdev"); - auto normalized_output = std::make_shared(subtract_mean, inv_stdev); + auto normalized_output = std::make_shared(subtract_mean, inv_stdev); normalized_output->set_friendly_name("MvnOutput"); return normalized_output; } -static std::shared_ptr Decompose(const std::shared_ptr input_node, - const MVNParams& mvn_data) { +static std::shared_ptr Decompose(const std::shared_ptr input_node, + const MVNParams& mvn_data) { // Prepare data auto combined_C_H = mvn_data.C * mvn_data.H; std::vector neg_avg_weights(8 * mvn_data.W / mvn_data.num_parts, -1.0f / mvn_data.W); - auto neg_avg_weights_const = - ngraph::opset8::Constant::create(ngraph::element::f32, - ngraph::Shape{8, mvn_data.W / mvn_data.num_parts, 1, 1}, - neg_avg_weights); + auto neg_avg_weights_const = ov::op::v0::Constant::create(ngraph::element::f32, + ngraph::Shape{8, mvn_data.W / mvn_data.num_parts, 1, 1}, + neg_avg_weights); std::vector avg_broadcast(8 * mvn_data.W * mvn_data.num_parts, 0.0f); for (size_t i = 0; i < mvn_data.W * mvn_data.num_parts; i++) { avg_broadcast[i * 8] = 1.0f; } - auto avg_broadcast_const = ngraph::opset8::Constant::create(ngraph::element::f32, - ngraph::Shape{mvn_data.W, 8 * mvn_data.num_parts, 1, 1}, - avg_broadcast); + auto avg_broadcast_const = ov::op::v0::Constant::create(ngraph::element::f32, + ngraph::Shape{mvn_data.W, 8 * mvn_data.num_parts, 1, 1}, + avg_broadcast); // Create average calculation part of the graph // We assume C = 1 case (combined channels) - auto reshape = std::make_shared( + auto reshape = std::make_shared( input_node, - ngraph::opset8::Constant::create(ngraph::element::i64, - ngraph::Shape{4}, - ngraph::Shape{mvn_data.N, 1ull, combined_C_H, mvn_data.W}), + ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape{4}, + ngraph::Shape{mvn_data.N, 1ull, combined_C_H, mvn_data.W}), false); - auto input_4d = std::make_shared( + auto input_4d = std::make_shared( reshape, - ngraph::opset8::Constant::create( + ov::op::v0::Constant::create( ngraph::element::i64, ngraph::Shape{4}, ngraph::Shape{mvn_data.N, combined_C_H * mvn_data.num_parts, 1ull, mvn_data.W / mvn_data.num_parts}), false); - auto input_2d = std::make_shared( + auto input_2d = std::make_shared( reshape, - ngraph::opset8::Constant::create(ngraph::element::i64, - ngraph::Shape{2}, - ngraph::Shape{1ull, combined_C_H * mvn_data.W}), + ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape{2}, + ngraph::Shape{1ull, combined_C_H * mvn_data.W}), false); - auto transposed_input_1 = std::make_shared( + auto transposed_input_1 = std::make_shared( input_4d, - ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 3, 1, 2})); - auto transposed_avg_conv_1 = std::make_shared(transposed_input_1, - neg_avg_weights_const, - ngraph::Strides{1, 1}, - ngraph::CoordinateDiff{0, 0}, - ngraph::CoordinateDiff{0, 0}, - ngraph::Strides{1, 1}, - ngraph::op::PadType::VALID); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 3, 1, 2})); + auto transposed_avg_conv_1 = std::make_shared(transposed_input_1, + neg_avg_weights_const, + ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 0}, + ngraph::CoordinateDiff{0, 0}, + ngraph::Strides{1, 1}, + ngraph::op::PadType::VALID); transposed_avg_conv_1->set_friendly_name("MvnAvg1"); - auto avg_conv_1 = std::make_shared( + auto avg_conv_1 = std::make_shared( transposed_avg_conv_1, - ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 2, 3, 1})); - auto reshape_avg_conv_1 = std::make_shared( + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 2, 3, 1})); + auto reshape_avg_conv_1 = std::make_shared( avg_conv_1, - ngraph::opset8::Constant::create(ngraph::element::i64, - ngraph::Shape{4}, - ngraph::Shape{mvn_data.N, 1ull, combined_C_H, 8 * mvn_data.num_parts}), + ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape{4}, + ngraph::Shape{mvn_data.N, 1ull, combined_C_H, 8 * mvn_data.num_parts}), false); - auto transposed_input_2 = std::make_shared( + auto transposed_input_2 = std::make_shared( reshape_avg_conv_1, - ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 3, 1, 2})); - auto transposed_avg_conv_2 = std::make_shared(transposed_input_2, - avg_broadcast_const, - ngraph::Strides{1, 1}, - ngraph::CoordinateDiff{0, 0}, - ngraph::CoordinateDiff{0, 0}, - ngraph::Strides{1, 1}, - ngraph::op::PadType::VALID); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 3, 1, 2})); + auto transposed_avg_conv_2 = std::make_shared(transposed_input_2, + avg_broadcast_const, + ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 0}, + ngraph::CoordinateDiff{0, 0}, + ngraph::Strides{1, 1}, + ngraph::op::PadType::VALID); transposed_avg_conv_2->set_friendly_name("MvnAvg2"); - auto avg_conv_2 = std::make_shared( + auto avg_conv_2 = std::make_shared( transposed_avg_conv_2, - ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 2, 3, 1})); - auto avg_conv_2_2d = std::make_shared( + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 2, 3, 1})); + auto avg_conv_2_2d = std::make_shared( avg_conv_2, - ngraph::opset8::Constant::create(ngraph::element::i64, - ngraph::Shape{2}, - ngraph::Shape{1ull, combined_C_H * mvn_data.W}), + ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape{2}, + ngraph::Shape{1ull, combined_C_H * mvn_data.W}), false); - auto subtract_mean = std::make_shared(input_2d, avg_conv_2_2d); + auto subtract_mean = std::make_shared(input_2d, avg_conv_2_2d); subtract_mean->set_friendly_name("MvnSubMean"); std::shared_ptr mvn_output, pre_output = subtract_mean; @@ -223,22 +224,20 @@ static std::shared_ptr Decompose(const std::shared_ptrget_output_shape(0).size() == 3) { - mvn_output = std::make_shared( + mvn_output = std::make_shared( pre_output, - ngraph::opset8::Constant::create(ngraph::element::i64, - ngraph::Shape{3}, - {mvn_data.C, mvn_data.H, mvn_data.W}), + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{3}, {mvn_data.C, mvn_data.H, mvn_data.W}), false); } else { - mvn_output = std::make_shared( + mvn_output = std::make_shared( pre_output, - ngraph::opset8::Constant::create(ngraph::element::i64, - ngraph::Shape{4}, - {mvn_data.N, mvn_data.C, mvn_data.H, mvn_data.W}), + ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape{4}, + {mvn_data.N, mvn_data.C, mvn_data.H, mvn_data.W}), false); } - return std::make_shared(mvn_output); + return std::make_shared(mvn_output); } std::shared_ptr getReferenceFunction(const ngraph::Shape& input_shape, @@ -271,8 +270,8 @@ std::shared_ptr getReferenceFunction(const ngraph::Shape& inpu } // Create decomposed reference function - auto input_params = std::make_shared(ngraph::element::f32, input_shape); - std::shared_ptr result = Decompose(input_params, mvn_data); + auto input_params = std::make_shared(ngraph::element::f32, input_shape); + std::shared_ptr result = Decompose(input_params, mvn_data); return std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); } @@ -284,18 +283,18 @@ std::shared_ptr getInitialFunction(const ngraph::Shape& input_ const InferenceEngine::SizeVector& axes, const bool& across_channels, const bool& mvn_version_6) { - auto input_params = std::make_shared(ngraph::element::f32, input_shape); + auto input_params = std::make_shared(ngraph::element::f32, input_shape); std::shared_ptr mvn; if (mvn_version_6) { const auto axesConst = - std::make_shared(ngraph::element::i64, ngraph::Shape{axes.size()}, axes); - mvn = std::make_shared(input_params, axesConst, normalize_variance, eps, eps_mode); + std::make_shared(ngraph::element::i64, ngraph::Shape{axes.size()}, axes); + mvn = std::make_shared(input_params, axesConst, normalize_variance, eps, eps_mode); } else { - mvn = std::make_shared(input_params, across_channels, normalize_variance, eps); + mvn = std::make_shared(input_params, across_channels, normalize_variance, eps); } - auto result = std::make_shared(mvn); + auto result = std::make_shared(mvn); return std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); } diff --git a/src/plugins/intel_gna/tests/unit/transformations/gna_handle_transposes_around_matmul.cpp b/src/plugins/intel_gna/tests/unit/transformations/gna_handle_transposes_around_matmul.cpp index c4ae53b7255da6..7fbdfd7cf40966 100644 --- a/src/plugins/intel_gna/tests/unit/transformations/gna_handle_transposes_around_matmul.cpp +++ b/src/plugins/intel_gna/tests/unit/transformations/gna_handle_transposes_around_matmul.cpp @@ -11,6 +11,7 @@ #include #include "common_test_utils/ov_test_utils.hpp" +#include "openvino/opsets/opset7.hpp" #include "transformations/handle_transposes_around_matmul.hpp" namespace handle_transpose_before_matmul { @@ -19,30 +20,30 @@ std::shared_ptr CreateTransposeMatmulFunction(const ngraph::Sh const ngraph::Shape& reshape_shape, const ngraph::Shape& matmul_shape, bool create_reshape_after_transpose) { - auto input_params = std::make_shared(ngraph::element::i64, input_shape); + auto input_params = std::make_shared(ngraph::element::i64, input_shape); auto new_shape_const = - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{reshape_shape.size()}, reshape_shape); - auto reshape = std::make_shared(input_params, new_shape_const, false); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{reshape_shape.size()}, reshape_shape); + auto reshape = std::make_shared(input_params, new_shape_const, false); - auto transpose_order = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {1, 0}); - auto transpose = std::make_shared(reshape, transpose_order); + auto transpose_order = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {1, 0}); + auto transpose = std::make_shared(reshape, transpose_order); std::vector data(ngraph::shape_size(matmul_shape)); std::iota(std::begin(data), std::end(data), 1); - auto constant = ngraph::opset7::Constant::create(ngraph::element::i64, matmul_shape, data); - std::shared_ptr matmul; + auto constant = ov::op::v0::Constant::create(ngraph::element::i64, matmul_shape, data); + std::shared_ptr matmul; if (create_reshape_after_transpose) { auto reshape_after_transpose_const = - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{reshape_shape.size()}, reshape_shape); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{reshape_shape.size()}, reshape_shape); auto reshape_after_transpose = - std::make_shared(transpose, reshape_after_transpose_const, false); - matmul = std::make_shared(reshape_after_transpose, constant); + std::make_shared(transpose, reshape_after_transpose_const, false); + matmul = std::make_shared(reshape_after_transpose, constant); } else { - matmul = std::make_shared(transpose, constant); + matmul = std::make_shared(transpose, constant); } - auto result = std::make_shared(matmul); + auto result = std::make_shared(matmul); return std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); } @@ -50,27 +51,27 @@ std::shared_ptr CreateMatmulFunction(const ngraph::Shape& inpu const ngraph::Shape& reshape_shape, const ngraph::Shape& matmul_shape, bool create_reshape_instead_of_transpose) { - auto input_params = std::make_shared(ngraph::element::i64, input_shape); + auto input_params = std::make_shared(ngraph::element::i64, input_shape); - std::shared_ptr reshape; + std::shared_ptr reshape; auto const_shape = - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{reshape_shape.size()}, reshape_shape); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{reshape_shape.size()}, reshape_shape); if (create_reshape_instead_of_transpose) { - auto new_reshape = std::make_shared(input_params, const_shape, false); - auto new_shape_after_transpose = ngraph::opset7::Constant::create(ngraph::element::i64, - ngraph::Shape{reshape_shape.size()}, - {reshape_shape[1], reshape_shape[0]}); - reshape = std::make_shared(new_reshape, new_shape_after_transpose, false); + auto new_reshape = std::make_shared(input_params, const_shape, false); + auto new_shape_after_transpose = ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape{reshape_shape.size()}, + {reshape_shape[1], reshape_shape[0]}); + reshape = std::make_shared(new_reshape, new_shape_after_transpose, false); } else { - reshape = std::make_shared(input_params, const_shape, false); + reshape = std::make_shared(input_params, const_shape, false); } std::vector data(ngraph::shape_size(matmul_shape)); std::iota(std::begin(data), std::end(data), 1); - auto constant = ngraph::opset7::Constant::create(ngraph::element::i64, matmul_shape, data); - auto matmul = std::make_shared(reshape, constant); + auto constant = ov::op::v0::Constant::create(ngraph::element::i64, matmul_shape, data); + auto matmul = std::make_shared(reshape, constant); - auto result = std::make_shared(matmul); + auto result = std::make_shared(matmul); return std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); } @@ -79,43 +80,41 @@ std::shared_ptr CreateConcatTransposeMatmulFunction(const ngra const ngraph::Shape& reshape1_shape, const ngraph::Shape& reshape2_shape, bool create_reshape_after_transpose) { - auto transpose_order = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {1, 0}); + auto transpose_order = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {1, 0}); - auto input1_params = std::make_shared(ngraph::element::i64, input1_shape); + auto input1_params = std::make_shared(ngraph::element::i64, input1_shape); std::vector data1(ngraph::shape_size(input1_shape)); std::iota(std::begin(data1), std::end(data1), 1); - auto concat1_const = ngraph::opset7::Constant::create(ngraph::element::i64, input1_shape, data1); + auto concat1_const = ov::op::v0::Constant::create(ngraph::element::i64, input1_shape, data1); ngraph::OutputVector concat1_chunks{input1_params, concat1_const}; - auto concat1 = std::make_shared(concat1_chunks, 0); - auto transpose1 = std::make_shared(concat1, transpose_order); + auto concat1 = std::make_shared(concat1_chunks, 0); + auto transpose1 = std::make_shared(concat1, transpose_order); - auto input2_params = std::make_shared(ngraph::element::i64, input2_shape); + auto input2_params = std::make_shared(ngraph::element::i64, input2_shape); std::vector data2(ngraph::shape_size(input2_shape)); std::iota(std::begin(data2), std::end(data2), 1); - auto concat2_const = ngraph::opset7::Constant::create(ngraph::element::i64, input2_shape, data2); + auto concat2_const = ov::op::v0::Constant::create(ngraph::element::i64, input2_shape, data2); ngraph::OutputVector concat2_chunks{input2_params, concat2_const}; - auto concat2 = std::make_shared(concat2_chunks, 0); - auto transpose2 = std::make_shared(concat2, transpose_order); + auto concat2 = std::make_shared(concat2_chunks, 0); + auto transpose2 = std::make_shared(concat2, transpose_order); - std::shared_ptr matmul; + std::shared_ptr matmul; if (create_reshape_after_transpose) { - auto reshape_after_transpose1_const = ngraph::opset7::Constant::create(ngraph::element::i64, - ngraph::Shape{reshape1_shape.size()}, - reshape1_shape); + auto reshape_after_transpose1_const = + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{reshape1_shape.size()}, reshape1_shape); auto reshape_after_transpose1 = - std::make_shared(transpose1, reshape_after_transpose1_const, false); - auto reshape_after_transpose2_const = ngraph::opset7::Constant::create(ngraph::element::i64, - ngraph::Shape{reshape2_shape.size()}, - reshape2_shape); + std::make_shared(transpose1, reshape_after_transpose1_const, false); + auto reshape_after_transpose2_const = + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{reshape2_shape.size()}, reshape2_shape); auto reshape_after_transpose2 = - std::make_shared(transpose2, reshape_after_transpose2_const, false); - matmul = std::make_shared(reshape_after_transpose1, reshape_after_transpose2); + std::make_shared(transpose2, reshape_after_transpose2_const, false); + matmul = std::make_shared(reshape_after_transpose1, reshape_after_transpose2); } else { - matmul = std::make_shared(transpose1, transpose2); + matmul = std::make_shared(transpose1, transpose2); } - auto result = std::make_shared(matmul); + auto result = std::make_shared(matmul); return std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input1_params, input2_params}); } @@ -125,37 +124,37 @@ std::shared_ptr CreateConcatMatmulFunction(const ngraph::Shape const ngraph::Shape& reshape1_shape, const ngraph::Shape& reshape2_shape, bool create_reshape_instead_of_transpose) { - auto input1_params = std::make_shared(ngraph::element::i64, input1_shape); + auto input1_params = std::make_shared(ngraph::element::i64, input1_shape); std::vector data1(ngraph::shape_size(input1_shape)); std::iota(std::begin(data1), std::end(data1), 1); - auto concat1_const = ngraph::opset7::Constant::create(ngraph::element::i64, input1_shape, data1); + auto concat1_const = ov::op::v0::Constant::create(ngraph::element::i64, input1_shape, data1); ngraph::OutputVector concat1_chunks{input1_params, concat1_const}; - auto concat1 = std::make_shared(concat1_chunks, 0); + auto concat1 = std::make_shared(concat1_chunks, 0); - auto input2_params = std::make_shared(ngraph::element::i64, input2_shape); + auto input2_params = std::make_shared(ngraph::element::i64, input2_shape); std::vector data2(ngraph::shape_size(input2_shape)); std::iota(std::begin(data2), std::end(data2), 1); - auto concat2_const = ngraph::opset7::Constant::create(ngraph::element::i64, input2_shape, data2); + auto concat2_const = ov::op::v0::Constant::create(ngraph::element::i64, input2_shape, data2); ngraph::OutputVector concat2_chunks{input2_params, concat2_const}; - auto concat2 = std::make_shared(concat2_chunks, 0); + auto concat2 = std::make_shared(concat2_chunks, 0); - std::shared_ptr matmul; + std::shared_ptr matmul; if (create_reshape_instead_of_transpose) { - auto new_shape_after_transpose1 = ngraph::opset7::Constant::create(ngraph::element::i64, - ngraph::Shape{reshape1_shape.size()}, - {reshape1_shape[1], reshape1_shape[0]}); - auto reshape1 = std::make_shared(concat1, new_shape_after_transpose1, false); - auto new_shape_after_transpose2 = ngraph::opset7::Constant::create(ngraph::element::i64, - ngraph::Shape{reshape2_shape.size()}, - {reshape2_shape[1], reshape2_shape[0]}); - auto reshape2 = std::make_shared(concat2, new_shape_after_transpose2, false); - matmul = std::make_shared(reshape1, reshape2); + auto new_shape_after_transpose1 = ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape{reshape1_shape.size()}, + {reshape1_shape[1], reshape1_shape[0]}); + auto reshape1 = std::make_shared(concat1, new_shape_after_transpose1, false); + auto new_shape_after_transpose2 = ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape{reshape2_shape.size()}, + {reshape2_shape[1], reshape2_shape[0]}); + auto reshape2 = std::make_shared(concat2, new_shape_after_transpose2, false); + matmul = std::make_shared(reshape1, reshape2); } else { - matmul = std::make_shared(concat1, concat2); + matmul = std::make_shared(concat1, concat2); } - auto result = std::make_shared(matmul); + auto result = std::make_shared(matmul); return std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input1_params, input2_params}); } @@ -173,39 +172,39 @@ std::shared_ptr CreateMatmulTransposeFunction(const ngraph::Sh bool matmul_on_left_side, bool enable_fq1, bool enable_fq2) { - auto input_params = std::make_shared(ngraph::element::i64, input_shape); + auto input_params = std::make_shared(ngraph::element::i64, input_shape); std::vector data(ngraph::shape_size(matmul_shape)); std::iota(std::begin(data), std::end(data), 1); - auto matmul_constant = ngraph::opset7::Constant::create(ngraph::element::i64, matmul_shape, data); - std::shared_ptr node = std::make_shared(input_params, matmul_constant); + auto matmul_constant = ov::op::v0::Constant::create(ngraph::element::i64, matmul_shape, data); + std::shared_ptr node = std::make_shared(input_params, matmul_constant); const auto matmul_output_shape = node->get_output_shape(0); if (enable_fq1) { - node = std::make_shared( - node, - ngraph::opset7::Constant::create(ngraph::element::f32, {1}, {-0.1}), - ngraph::opset7::Constant::create(ngraph::element::f32, {1}, {0.1}), - ngraph::opset7::Constant::create(ngraph::element::f32, {1}, {-0.1}), - ngraph::opset7::Constant::create(ngraph::element::f32, {1}, {0.1}), - 255); + node = + std::make_shared(node, + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {-0.1}), + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {0.1}), + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {-0.1}), + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {0.1}), + 255); } if (enable_add) { - auto add_const = ngraph::opset7::Constant::create(ngraph::element::i64, matmul_output_shape, {1}); + auto add_const = ov::op::v0::Constant::create(ngraph::element::i64, matmul_output_shape, {1}); if (matmul_on_left_side) { - node = std::make_shared(add_const, node); + node = std::make_shared(add_const, node); } else { - node = std::make_shared(node, add_const); + node = std::make_shared(node, add_const); } if (enable_fq2) { - node = std::make_shared( + node = std::make_shared( node, - ngraph::opset7::Constant::create(ngraph::element::f32, {1}, {-0.1}), - ngraph::opset7::Constant::create(ngraph::element::f32, {1}, {0.1}), - ngraph::opset7::Constant::create(ngraph::element::f32, {1}, {-0.1}), - ngraph::opset7::Constant::create(ngraph::element::f32, {1}, {0.1}), + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {-0.1}), + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {0.1}), + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {-0.1}), + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {0.1}), 255); } } @@ -213,23 +212,22 @@ std::shared_ptr CreateMatmulTransposeFunction(const ngraph::Sh if (create_reshape_before_transpose) { auto matmul_output_shape = node->get_output_shape(0); std::swap(matmul_output_shape[0], matmul_output_shape[1]); - auto reshape_before_transpose_const = - ngraph::opset7::Constant::create(ngraph::element::i64, - ngraph::Shape{matmul_output_shape.size()}, - matmul_output_shape); - node = std::make_shared(node, reshape_before_transpose_const, false); + auto reshape_before_transpose_const = ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape{matmul_output_shape.size()}, + matmul_output_shape); + node = std::make_shared(node, reshape_before_transpose_const, false); } - auto transpose_order = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {1, 0}); - node = std::make_shared(node, transpose_order); + auto transpose_order = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {1, 0}); + node = std::make_shared(node, transpose_order); if (enable_last_reshape) { auto shape_const = - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{reshape_shape.size()}, reshape_shape); - node = std::make_shared(node, shape_const, false); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{reshape_shape.size()}, reshape_shape); + node = std::make_shared(node, shape_const, false); } - auto result = std::make_shared(node); + auto result = std::make_shared(node); return std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); } @@ -242,65 +240,65 @@ std::shared_ptr CreateMatmulFunction(const ngraph::Shape& inpu bool matmul_on_left_side, bool enable_fq1, bool enable_fq2) { - auto input_params = std::make_shared(ngraph::element::i64, input_shape); + auto input_params = std::make_shared(ngraph::element::i64, input_shape); std::vector data(ngraph::shape_size(matmul_shape)); std::iota(std::begin(data), std::end(data), 1); - auto matmul_constant = ngraph::opset7::Constant::create(ngraph::element::i64, matmul_shape, data); - std::shared_ptr node = std::make_shared(input_params, matmul_constant); + auto matmul_constant = ov::op::v0::Constant::create(ngraph::element::i64, matmul_shape, data); + std::shared_ptr node = std::make_shared(input_params, matmul_constant); const auto matmul_output_shape = node->get_output_shape(0); if (enable_fq1) { - node = std::make_shared( - node, - ngraph::opset7::Constant::create(ngraph::element::f32, {1}, {-0.1}), - ngraph::opset7::Constant::create(ngraph::element::f32, {1}, {0.1}), - ngraph::opset7::Constant::create(ngraph::element::f32, {1}, {-0.1}), - ngraph::opset7::Constant::create(ngraph::element::f32, {1}, {0.1}), - 255); + node = + std::make_shared(node, + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {-0.1}), + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {0.1}), + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {-0.1}), + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {0.1}), + 255); } if (enable_add) { - auto add_const = ngraph::opset7::Constant::create(ngraph::element::i64, matmul_output_shape, {1}); + auto add_const = ov::op::v0::Constant::create(ngraph::element::i64, matmul_output_shape, {1}); if (matmul_on_left_side) { - node = std::make_shared(add_const, node); + node = std::make_shared(add_const, node); } else { - node = std::make_shared(node, add_const); + node = std::make_shared(node, add_const); } if (enable_fq2) { - node = std::make_shared( + node = std::make_shared( node, - ngraph::opset7::Constant::create(ngraph::element::f32, {1}, {-0.1}), - ngraph::opset7::Constant::create(ngraph::element::f32, {1}, {0.1}), - ngraph::opset7::Constant::create(ngraph::element::f32, {1}, {-0.1}), - ngraph::opset7::Constant::create(ngraph::element::f32, {1}, {0.1}), + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {-0.1}), + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {0.1}), + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {-0.1}), + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {0.1}), 255); } } std::shared_ptr reshape; auto shape_const = - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{reshape_shape.size()}, reshape_shape); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{reshape_shape.size()}, reshape_shape); if (create_reshape_instead_of_transpose) { auto reshape_instead_of_transpose_const = - ngraph::opset7::Constant::create(ngraph::element::i64, - ngraph::Shape{matmul_output_shape.size()}, - {matmul_output_shape[1], matmul_output_shape[0]}); + ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape{matmul_output_shape.size()}, + {matmul_output_shape[1], matmul_output_shape[0]}); auto reshape_instead_of_transpose = - std::make_shared(node, reshape_instead_of_transpose_const, false); + std::make_shared(node, reshape_instead_of_transpose_const, false); reshape = reshape_instead_of_transpose; if (enable_last_reshape) { - reshape = std::make_shared(reshape_instead_of_transpose, shape_const, false); + reshape = std::make_shared(reshape_instead_of_transpose, shape_const, false); } } else { reshape = node; if (enable_last_reshape) { - reshape = std::make_shared(node, shape_const, false); + reshape = std::make_shared(node, shape_const, false); } } - auto result = std::make_shared(reshape); + auto result = std::make_shared(reshape); return std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); } diff --git a/src/plugins/intel_gna/tests/unit/transformations/gna_insert_copy_layer.cpp b/src/plugins/intel_gna/tests/unit/transformations/gna_insert_copy_layer.cpp index 87d53ff68f9c1f..8ef907c90e64bc 100644 --- a/src/plugins/intel_gna/tests/unit/transformations/gna_insert_copy_layer.cpp +++ b/src/plugins/intel_gna/tests/unit/transformations/gna_insert_copy_layer.cpp @@ -89,30 +89,30 @@ class InsertCopyLayerConcatTest : public InsertCopyLayerTest { InsertCopyLayerTest::SetUp(); { - auto params = std::make_shared(ngraph::element::i64, input_shape); - auto add = std::make_shared(params, params); + auto params = std::make_shared(ngraph::element::i64, input_shape); + auto add = std::make_shared(params, params); ngraph::OutputVector concat_inputs; for (int i = 0; i < m_inputs_num; ++i) { concat_inputs.push_back(add); } - auto concat = std::make_shared(concat_inputs, m_axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(concat_inputs, m_axis); + auto result = std::make_shared(concat); m_func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Concat"); } { - auto params = std::make_shared(ngraph::element::i64, input_shape); - auto add = std::make_shared(params, params); + auto params = std::make_shared(ngraph::element::i64, input_shape); + auto add = std::make_shared(params, params); auto copy = std::make_shared(add); ngraph::OutputVector concat_inputs = {}; for (int i = 0; i < m_inputs_num - 1; ++i) { concat_inputs.push_back(copy); } concat_inputs.push_back(add); - auto concat = std::make_shared(concat_inputs, m_axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(concat_inputs, m_axis); + auto result = std::make_shared(concat); m_ref_func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Concat"); @@ -145,7 +145,7 @@ class InsertCopyLayerSplitConcatTest : public InsertCopyLayerTest { InsertCopyLayerTest::SetUp(); { - auto params = std::make_shared(ngraph::element::i64, input_shape); + auto params = std::make_shared(ngraph::element::i64, input_shape); OPENVINO_SUPPRESS_DEPRECATED_START auto split = ngraph::builder::makeSplit(params, ngraph::element::i64, m_inputs_num, m_axis); OPENVINO_SUPPRESS_DEPRECATED_END @@ -154,15 +154,15 @@ class InsertCopyLayerSplitConcatTest : public InsertCopyLayerTest { for (int i = 0; i < m_inputs_num; ++i) { concat_inputs.push_back(split->output(i)); } - auto concat = std::make_shared(concat_inputs, m_axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(concat_inputs, m_axis); + auto result = std::make_shared(concat); m_func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Concat"); } { - auto params = std::make_shared(ngraph::element::i64, input_shape); + auto params = std::make_shared(ngraph::element::i64, input_shape); OPENVINO_SUPPRESS_DEPRECATED_START auto split = ngraph::builder::makeSplit(params, ngraph::element::i64, m_inputs_num, m_axis); OPENVINO_SUPPRESS_DEPRECATED_END @@ -178,9 +178,9 @@ class InsertCopyLayerSplitConcatTest : public InsertCopyLayerTest { else concat_inputs.push_back(split->output(i)); } - auto concat = std::make_shared(concat_inputs, m_axis); + auto concat = std::make_shared(concat_inputs, m_axis); - auto result = std::make_shared(concat); + auto result = std::make_shared(concat); m_ref_func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Concat"); @@ -256,21 +256,21 @@ TEST_P(InsertCopyLayerMultiParamConcatTest, CompareWithRefs) { ngraph::Shape in_shape{10}; { - auto params = std::make_shared(ngraph::element::i64, in_shape); + auto params = std::make_shared(ngraph::element::i64, in_shape); ngraph::OutputVector concat_inputs{params, params}; - auto concat = std::make_shared(concat_inputs, axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(concat_inputs, axis); + auto result = std::make_shared(concat); m_func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Concat"); } { - auto params = std::make_shared(ngraph::element::i64, in_shape); + auto params = std::make_shared(ngraph::element::i64, in_shape); auto copy = std::make_shared(params); ngraph::OutputVector concat_inputs{copy, copy}; - auto concat = std::make_shared(concat_inputs, axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(concat_inputs, axis); + auto result = std::make_shared(concat); ref_func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Concat"); } @@ -305,27 +305,27 @@ TEST_P(InsertCopyLayerMultiParamNFLConcatTest, CompareWithRefs) { ngraph::Shape in_shape = {1, 2, 4}; { - auto params = std::make_shared(ngraph::element::i64, in_shape); + auto params = std::make_shared(ngraph::element::i64, in_shape); auto reshape1 = ov::op::util::reshapeTo(params, shape); auto reshape2 = ov::op::util::reshapeTo(params, shape); ngraph::OutputVector concat_inputs{reshape1, reshape2}; - auto concat = std::make_shared(concat_inputs, axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(concat_inputs, axis); + auto result = std::make_shared(concat); m_func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Concat"); } { - auto params = std::make_shared(ngraph::element::i64, in_shape); + auto params = std::make_shared(ngraph::element::i64, in_shape); auto reshape1 = ov::op::util::reshapeTo(params, shape); auto reshape2 = ov::op::util::reshapeTo(params, shape); auto copy1 = std::make_shared(reshape1); auto copy2 = std::make_shared(reshape2); ngraph::OutputVector concat_inputs{copy1, copy2}; - auto concat = std::make_shared(concat_inputs, axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(concat_inputs, axis); + auto result = std::make_shared(concat); ref_func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Concat"); } @@ -361,34 +361,34 @@ TEST_P(InsertCopyLayerMultiParamMultiNFLConcatTest, CompareWithRefs) { ngraph::Shape in_shape = {1, 2, 4}; { - auto params = std::make_shared(ngraph::element::i64, in_shape); + auto params = std::make_shared(ngraph::element::i64, in_shape); auto reshape1 = ov::op::util::reshapeTo(params, shape); auto reshape2 = ov::op::util::reshapeTo(params, shape); ngraph::OutputVector concat_inputs{reshape1, reshape2}; - auto concat1 = std::make_shared(concat_inputs, axis); - auto concat2 = std::make_shared(concat_inputs, axis); - auto result1 = std::make_shared(concat1); - auto result2 = std::make_shared(concat2); - auto result3 = std::make_shared(reshape1); + auto concat1 = std::make_shared(concat_inputs, axis); + auto concat2 = std::make_shared(concat_inputs, axis); + auto result1 = std::make_shared(concat1); + auto result2 = std::make_shared(concat2); + auto result3 = std::make_shared(reshape1); m_func = std::make_shared(ngraph::ResultVector{result1, result2, result3}, ngraph::ParameterVector{params}, "Concat"); } { - auto params = std::make_shared(ngraph::element::i64, in_shape); + auto params = std::make_shared(ngraph::element::i64, in_shape); auto reshape1 = ov::op::util::reshapeTo(params, shape); auto reshape2 = ov::op::util::reshapeTo(params, shape); auto copy1 = std::make_shared(reshape1); auto copy2 = std::make_shared(reshape2); ngraph::OutputVector concat_inputs{copy1, copy2}; - auto concat1 = std::make_shared(concat_inputs, axis); - auto concat2 = std::make_shared(concat_inputs, axis); - auto result1 = std::make_shared(concat1); - auto result2 = std::make_shared(concat2); - auto result3 = std::make_shared(reshape1); + auto concat1 = std::make_shared(concat_inputs, axis); + auto concat2 = std::make_shared(concat_inputs, axis); + auto result1 = std::make_shared(concat1); + auto result2 = std::make_shared(concat2); + auto result3 = std::make_shared(reshape1); ref_func = std::make_shared(ngraph::ResultVector{result1, result2, result3}, ngraph::ParameterVector{params}, "Concat"); @@ -421,36 +421,36 @@ TEST_P(InsertCopyLayerMultiConstConcatTest, CompareWithRefs) { ngraph::Shape in_shape{10}; { - auto params = std::make_shared(ngraph::element::i64, in_shape); - auto constant = std::make_shared(ngraph::element::i64, in_shape); + auto params = std::make_shared(ngraph::element::i64, in_shape); + auto constant = std::make_shared(ngraph::element::i64, in_shape); ngraph::OutputVector concat_inputs{params, constant, constant}; - auto concat = std::make_shared(concat_inputs, axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(concat_inputs, axis); + auto result = std::make_shared(concat); m_func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Concat"); } { - auto params = std::make_shared(ngraph::element::i64, in_shape); - auto constant = std::make_shared(ngraph::element::i64, in_shape); + auto params = std::make_shared(ngraph::element::i64, in_shape); + auto constant = std::make_shared(ngraph::element::i64, in_shape); auto copy = std::make_shared(constant); ngraph::OutputVector concat_inputs{params, copy, constant}; - auto concat = std::make_shared(concat_inputs, axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(concat_inputs, axis); + auto result = std::make_shared(concat); ref_func1 = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Concat"); } { - auto params = std::make_shared(ngraph::element::i64, in_shape); - auto constant = std::make_shared(ngraph::element::i64, in_shape); + auto params = std::make_shared(ngraph::element::i64, in_shape); + auto constant = std::make_shared(ngraph::element::i64, in_shape); auto copy = std::make_shared(constant); ngraph::OutputVector concat_inputs{params, constant, copy}; - auto concat = std::make_shared(concat_inputs, axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(concat_inputs, axis); + auto result = std::make_shared(concat); ref_func2 = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Concat"); } @@ -484,35 +484,35 @@ TEST_P(InsertCopyLayerMultiLayerConcatTest, CompareWithRefs) { ngraph::Shape in_shape{10}; { - auto params = std::make_shared(ngraph::element::i64, in_shape); - auto add = std::make_shared(params, params); + auto params = std::make_shared(ngraph::element::i64, in_shape); + auto add = std::make_shared(params, params); ngraph::OutputVector concat_inputs{add, add}; - auto concat = std::make_shared(concat_inputs, axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(concat_inputs, axis); + auto result = std::make_shared(concat); m_func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Concat"); } { - auto params = std::make_shared(ngraph::element::i64, in_shape); - auto add = std::make_shared(params, params); + auto params = std::make_shared(ngraph::element::i64, in_shape); + auto add = std::make_shared(params, params); auto copy = std::make_shared(add); ngraph::OutputVector concat_inputs{copy, add}; - auto concat = std::make_shared(concat_inputs, axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(concat_inputs, axis); + auto result = std::make_shared(concat); ref_func1 = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Concat"); } { - auto params = std::make_shared(ngraph::element::i64, in_shape); - auto add = std::make_shared(params, params); + auto params = std::make_shared(ngraph::element::i64, in_shape); + auto add = std::make_shared(params, params); auto copy = std::make_shared(add); ngraph::OutputVector concat_inputs{add, copy}; - auto concat = std::make_shared(concat_inputs, axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(concat_inputs, axis); + auto result = std::make_shared(concat); ref_func2 = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Concat"); } @@ -547,41 +547,41 @@ TEST_P(InsertCopyLayerMultiLayerNFLConcatTest, CompareWithRefs) { ngraph::Shape in_shape = {1, 2, 4}; { - auto params = std::make_shared(ngraph::element::i64, in_shape); - auto add = std::make_shared(params, params); + auto params = std::make_shared(ngraph::element::i64, in_shape); + auto add = std::make_shared(params, params); auto reshape1 = ov::op::util::reshapeTo(add, shape); auto reshape2 = ov::op::util::reshapeTo(add, shape); ngraph::OutputVector concat_inputs{reshape1, reshape2}; - auto concat = std::make_shared(concat_inputs, axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(concat_inputs, axis); + auto result = std::make_shared(concat); m_func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Concat"); } { - auto params = std::make_shared(ngraph::element::i64, in_shape); - auto add = std::make_shared(params, params); + auto params = std::make_shared(ngraph::element::i64, in_shape); + auto add = std::make_shared(params, params); auto reshape1 = ov::op::util::reshapeTo(add, shape); auto reshape_copy = std::make_shared(reshape1); auto reshape2 = ov::op::util::reshapeTo(add, shape); ngraph::OutputVector concat_inputs{reshape_copy, reshape2}; - auto concat = std::make_shared(concat_inputs, axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(concat_inputs, axis); + auto result = std::make_shared(concat); ref_func1 = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Concat"); } { - auto params = std::make_shared(ngraph::element::i64, in_shape); - auto add = std::make_shared(params, params); + auto params = std::make_shared(ngraph::element::i64, in_shape); + auto add = std::make_shared(params, params); auto reshape1 = ov::op::util::reshapeTo(add, shape); auto reshape2 = ov::op::util::reshapeTo(add, shape); auto reshape_copy = std::make_shared(reshape2); ngraph::OutputVector concat_inputs{reshape1, reshape_copy}; - auto concat = std::make_shared(concat_inputs, axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(concat_inputs, axis); + auto result = std::make_shared(concat); ref_func2 = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Concat"); } @@ -617,12 +617,12 @@ TEST_P(InsertCopyLayerMultiParamMemoryTest, CompareWithRefs) { { auto variable = std::make_shared( ov::op::util::VariableInfo{in_shape, ngraph::element::i64, variable_name}); - auto input = std::make_shared(ngraph::element::i64, in_shape); + auto input = std::make_shared(ngraph::element::i64, in_shape); auto init_value = ngraph::builder::makeConstant(ngraph::element::i64, in_shape, std::vector{0}); - auto read_value = std::make_shared(init_value, variable); - auto add = std::make_shared(input, read_value); - auto result = std::make_shared(add); - auto assign = std::make_shared(input, variable); + auto read_value = std::make_shared(init_value, variable); + auto add = std::make_shared(input, read_value); + auto result = std::make_shared(add); + auto assign = std::make_shared(input, variable); assign->add_control_dependency(read_value); ngraph::ParameterVector params = {input}; @@ -634,14 +634,14 @@ TEST_P(InsertCopyLayerMultiParamMemoryTest, CompareWithRefs) { { auto variable = std::make_shared( ov::op::util::VariableInfo{in_shape, ngraph::element::i64, variable_name}); - auto input = std::make_shared(ngraph::element::i64, in_shape); + auto input = std::make_shared(ngraph::element::i64, in_shape); auto init_value = ngraph::builder::makeConstant(ngraph::element::i64, in_shape, std::vector{0}); - auto read_value = std::make_shared(init_value, variable); + auto read_value = std::make_shared(init_value, variable); auto copy1 = std::make_shared(input); - auto add = std::make_shared(copy1, read_value); - auto result = std::make_shared(add); + auto add = std::make_shared(copy1, read_value); + auto result = std::make_shared(add); auto copy2 = std::make_shared(input); - auto assign = std::make_shared(copy2, variable); + auto assign = std::make_shared(copy2, variable); assign->add_control_dependency(read_value); ngraph::ParameterVector params = {input}; @@ -680,13 +680,13 @@ TEST_P(InsertCopyLayerMultiParamConcatMemoryTest, CompareWithRefs) { { auto variable = std::make_shared( ov::op::util::VariableInfo{in_shape, ngraph::element::i64, variable_name}); - auto input = std::make_shared(ngraph::element::i64, in_shape); + auto input = std::make_shared(ngraph::element::i64, in_shape); auto init_value = ngraph::builder::makeConstant(ngraph::element::i64, in_shape, std::vector{0}); - auto read_value = std::make_shared(init_value, variable); - auto assign = std::make_shared(input, variable); + auto read_value = std::make_shared(init_value, variable); + auto assign = std::make_shared(input, variable); assign->add_control_dependency(read_value); - auto concat = std::make_shared(ngraph::OutputVector{input, read_value}, axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(ngraph::OutputVector{input, read_value}, axis); + auto result = std::make_shared(concat); ngraph::ParameterVector params = {input}; ngraph::ResultVector results = {result}; @@ -697,15 +697,15 @@ TEST_P(InsertCopyLayerMultiParamConcatMemoryTest, CompareWithRefs) { { auto variable = std::make_shared( ov::op::util::VariableInfo{in_shape, ngraph::element::i64, variable_name}); - auto input = std::make_shared(ngraph::element::i64, in_shape); + auto input = std::make_shared(ngraph::element::i64, in_shape); auto copy1 = std::make_shared(input); auto init_value = ngraph::builder::makeConstant(ngraph::element::i64, in_shape, std::vector{0}); - auto read_value = std::make_shared(init_value, variable); - auto assign = std::make_shared(copy1, variable); + auto read_value = std::make_shared(init_value, variable); + auto assign = std::make_shared(copy1, variable); assign->add_control_dependency(read_value); auto copy2 = std::make_shared(input); - auto concat = std::make_shared(ngraph::OutputVector{copy2, read_value}, axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(ngraph::OutputVector{copy2, read_value}, axis); + auto result = std::make_shared(concat); ngraph::ParameterVector params = {input}; ngraph::ResultVector results = {result}; @@ -748,17 +748,17 @@ TEST_P(InsertCopyLayerMultiParamNFLConcatMemoryTest, CompareWithRefs) { { auto variable = std::make_shared( ov::op::util::VariableInfo{allowed_shape, ngraph::element::i64, variable_name}); - auto input = std::make_shared(ngraph::element::i64, in_shape); + auto input = std::make_shared(ngraph::element::i64, in_shape); auto reshape1 = ov::op::util::reshapeTo(input, shape1); auto reshape2 = ov::op::util::reshapeTo(input, shape2); auto init_value = ngraph::builder::makeConstant(ngraph::element::i64, shape2, std::vector{0}); - auto read_value = std::make_shared(init_value, variable); - auto assign = std::make_shared(reshape1, variable); + auto read_value = std::make_shared(init_value, variable); + auto assign = std::make_shared(reshape1, variable); assign->add_control_dependency(read_value); - auto concat = std::make_shared(ngraph::OutputVector{reshape2, read_value}, axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(ngraph::OutputVector{reshape2, read_value}, axis); + auto result = std::make_shared(concat); ngraph::ParameterVector params = {input}; ngraph::ResultVector results = {result}; @@ -769,19 +769,19 @@ TEST_P(InsertCopyLayerMultiParamNFLConcatMemoryTest, CompareWithRefs) { { auto variable = std::make_shared( ov::op::util::VariableInfo{allowed_shape, ngraph::element::i64, variable_name}); - auto input = std::make_shared(ngraph::element::i64, in_shape); + auto input = std::make_shared(ngraph::element::i64, in_shape); auto reshape1 = ov::op::util::reshapeTo(input, shape1); auto reshape2 = ov::op::util::reshapeTo(input, shape2); auto copy1 = std::make_shared(reshape1); auto copy2 = std::make_shared(reshape2); auto init_value = ngraph::builder::makeConstant(ngraph::element::i64, shape2, std::vector{0}); - auto read_value = std::make_shared(init_value, variable); - auto assign = std::make_shared(copy1, variable); + auto read_value = std::make_shared(init_value, variable); + auto assign = std::make_shared(copy1, variable); assign->add_control_dependency(read_value); - auto concat = std::make_shared(ngraph::OutputVector{copy2, read_value}, axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(ngraph::OutputVector{copy2, read_value}, axis); + auto result = std::make_shared(concat); ngraph::ParameterVector params = {input}; ngraph::ResultVector results = {result}; @@ -825,16 +825,16 @@ TEST_P(InsertCopyLayerMultiLayerConcatMemoryTest, CompareWithRefs) { { auto variable = std::make_shared( ov::op::util::VariableInfo{out_shape, ngraph::element::i64, variable_name}); - auto input = std::make_shared(ngraph::element::i64, in_shape); + auto input = std::make_shared(ngraph::element::i64, in_shape); auto reshape = ov::op::util::reshapeTo(input, shape); auto crop = std::make_shared(reshape, axes, dim, offset); auto init_value = ngraph::builder::makeConstant(ngraph::element::i64, out_shape, std::vector{0}); - auto read_value = std::make_shared(init_value, variable); - auto mul = std::make_shared(crop, read_value); - auto assign = std::make_shared(crop, variable); + auto read_value = std::make_shared(init_value, variable); + auto mul = std::make_shared(crop, read_value); + auto assign = std::make_shared(crop, variable); assign->add_control_dependency(read_value); - auto result = std::make_shared(mul); + auto result = std::make_shared(mul); ngraph::ParameterVector params = {input}; ngraph::ResultVector results = {result}; @@ -845,17 +845,17 @@ TEST_P(InsertCopyLayerMultiLayerConcatMemoryTest, CompareWithRefs) { { auto variable = std::make_shared( ov::op::util::VariableInfo{out_shape, ngraph::element::i64, variable_name}); - auto input = std::make_shared(ngraph::element::i64, in_shape); + auto input = std::make_shared(ngraph::element::i64, in_shape); auto reshape = ov::op::util::reshapeTo(input, shape); auto crop = std::make_shared(reshape, axes, dim, offset); auto copy = std::make_shared(crop); auto init_value = ngraph::builder::makeConstant(ngraph::element::i64, out_shape, std::vector{0}); - auto read_value = std::make_shared(init_value, variable); - auto mul = std::make_shared(crop, read_value); - auto assign = std::make_shared(copy, variable); + auto read_value = std::make_shared(init_value, variable); + auto mul = std::make_shared(crop, read_value); + auto assign = std::make_shared(copy, variable); assign->add_control_dependency(read_value); - auto result = std::make_shared(mul); + auto result = std::make_shared(mul); ngraph::ParameterVector params = {input}; ngraph::ResultVector results = {result}; @@ -901,17 +901,17 @@ TEST_P(InsertCopyLayerCropMemoryTest, CompareWithRefs) { { auto variable = std::make_shared(ov::op::util::VariableInfo{shape2, ngraph::element::i64, variable_name}); - auto input = std::make_shared(ngraph::element::i64, in_shape); + auto input = std::make_shared(ngraph::element::i64, in_shape); auto reshape1 = ov::op::util::reshapeTo(input, shape1); auto crop = std::make_shared(reshape1, axes, dim, offset); auto reshape2 = ov::op::util::reshapeTo(crop, shape2); auto init_value = ngraph::builder::makeConstant(ngraph::element::i64, shape2, std::vector{0}); - auto read_value = std::make_shared(init_value, variable); - auto add = std::make_shared(reshape2, read_value); - auto assign = std::make_shared(reshape2, variable); + auto read_value = std::make_shared(init_value, variable); + auto add = std::make_shared(reshape2, read_value); + auto assign = std::make_shared(reshape2, variable); assign->add_control_dependency(read_value); - auto result = std::make_shared(add); + auto result = std::make_shared(add); ngraph::ParameterVector params = {input}; ngraph::ResultVector results = {result}; @@ -922,18 +922,18 @@ TEST_P(InsertCopyLayerCropMemoryTest, CompareWithRefs) { { auto variable = std::make_shared(ov::op::util::VariableInfo{shape2, ngraph::element::i64, variable_name}); - auto input = std::make_shared(ngraph::element::i64, in_shape); + auto input = std::make_shared(ngraph::element::i64, in_shape); auto reshape1 = ov::op::util::reshapeTo(input, shape1); auto crop = std::make_shared(reshape1, axes, dim, offset); auto reshape2 = ov::op::util::reshapeTo(crop, shape2); auto copy = std::make_shared(reshape2); auto init_value = ngraph::builder::makeConstant(ngraph::element::i64, shape2, std::vector{0}); - auto read_value = std::make_shared(init_value, variable); - auto add = std::make_shared(reshape2, read_value); - auto assign = std::make_shared(copy, variable); + auto read_value = std::make_shared(init_value, variable); + auto add = std::make_shared(reshape2, read_value); + auto assign = std::make_shared(copy, variable); assign->add_control_dependency(read_value); - auto result = std::make_shared(add); + auto result = std::make_shared(add); ngraph::ParameterVector params = {input}; ngraph::ResultVector results = {result}; @@ -975,14 +975,14 @@ TEST_P(InsertCopyLayerCropNFLMemoryTest, CompareWithRefs) { { auto variable = std::make_shared( ov::op::util::VariableInfo{in_shape, ngraph::element::i64, variable_name}); - auto input = std::make_shared(ngraph::element::i64, in_shape); + auto input = std::make_shared(ngraph::element::i64, in_shape); auto init_value = ngraph::builder::makeConstant(ngraph::element::i64, in_shape, std::vector{0}); - auto read_value = std::make_shared(init_value, variable); - auto concat = std::make_shared(ngraph::OutputVector{input, read_value}, axis); - auto axis_const = ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{}, {0}); - auto split = std::make_shared(concat, axis_const, 2); - auto result = std::make_shared(split); - auto assign = std::make_shared(split, variable); + auto read_value = std::make_shared(init_value, variable); + auto concat = std::make_shared(ngraph::OutputVector{input, read_value}, axis); + auto axis_const = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{}, {0}); + auto split = std::make_shared(concat, axis_const, 2); + auto result = std::make_shared(split); + auto assign = std::make_shared(split, variable); assign->add_control_dependency(read_value); ngraph::ParameterVector params = {input}; @@ -994,15 +994,15 @@ TEST_P(InsertCopyLayerCropNFLMemoryTest, CompareWithRefs) { { auto variable = std::make_shared( ov::op::util::VariableInfo{in_shape, ngraph::element::i64, variable_name}); - auto input = std::make_shared(ngraph::element::i64, in_shape); + auto input = std::make_shared(ngraph::element::i64, in_shape); auto init_value = ngraph::builder::makeConstant(ngraph::element::i64, in_shape, std::vector{0}); - auto read_value = std::make_shared(init_value, variable); - auto concat = std::make_shared(ngraph::OutputVector{input, read_value}, axis); - auto axis_const = ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{}, {0}); - auto split = std::make_shared(concat, axis_const, 2); - auto result = std::make_shared(split); + auto read_value = std::make_shared(init_value, variable); + auto concat = std::make_shared(ngraph::OutputVector{input, read_value}, axis); + auto axis_const = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{}, {0}); + auto split = std::make_shared(concat, axis_const, 2); + auto result = std::make_shared(split); auto copy = std::make_shared(split); - auto assign = std::make_shared(copy, variable); + auto assign = std::make_shared(copy, variable); assign->add_control_dependency(read_value); ngraph::ParameterVector params = {input}; @@ -1045,16 +1045,16 @@ TEST_P(InsertCopyLayerConcatMemoryTest, CompareWithRefs) { { auto variable = std::make_shared( ov::op::util::VariableInfo{out_shape, ngraph::element::i64, variable_name}); - auto input1 = std::make_shared(ngraph::element::i64, in_shape); - auto input2 = std::make_shared(ngraph::element::i64, in_shape); - auto concat = std::make_shared(ngraph::OutputVector{input1, input2}, axis); + auto input1 = std::make_shared(ngraph::element::i64, in_shape); + auto input2 = std::make_shared(ngraph::element::i64, in_shape); + auto concat = std::make_shared(ngraph::OutputVector{input1, input2}, axis); auto init_value = ngraph::builder::makeConstant(ngraph::element::i64, out_shape, std::vector{0}); - auto read_value = std::make_shared(init_value, variable); - auto assign = std::make_shared(concat, variable); + auto read_value = std::make_shared(init_value, variable); + auto assign = std::make_shared(concat, variable); assign->add_control_dependency(read_value); - auto add = std::make_shared(concat, read_value); - auto result = std::make_shared(add); + auto add = std::make_shared(concat, read_value); + auto result = std::make_shared(add); ngraph::ParameterVector params = {input1, input2}; ngraph::ResultVector results = {result}; @@ -1065,17 +1065,17 @@ TEST_P(InsertCopyLayerConcatMemoryTest, CompareWithRefs) { { auto variable = std::make_shared( ov::op::util::VariableInfo{out_shape, ngraph::element::i64, variable_name}); - auto input1 = std::make_shared(ngraph::element::i64, in_shape); - auto input2 = std::make_shared(ngraph::element::i64, in_shape); - auto concat = std::make_shared(ngraph::OutputVector{input1, input2}, axis); + auto input1 = std::make_shared(ngraph::element::i64, in_shape); + auto input2 = std::make_shared(ngraph::element::i64, in_shape); + auto concat = std::make_shared(ngraph::OutputVector{input1, input2}, axis); auto copy = std::make_shared(concat); auto init_value = ngraph::builder::makeConstant(ngraph::element::i64, out_shape, std::vector{0}); - auto read_value = std::make_shared(init_value, variable); - auto assign = std::make_shared(copy, variable); + auto read_value = std::make_shared(init_value, variable); + auto assign = std::make_shared(copy, variable); assign->add_control_dependency(read_value); - auto add = std::make_shared(concat, read_value); - auto result = std::make_shared(add); + auto add = std::make_shared(concat, read_value); + auto result = std::make_shared(add); ngraph::ParameterVector params = {input1, input2}; ngraph::ResultVector results = {result}; @@ -1120,17 +1120,17 @@ TEST_P(InsertCopyLayerConcatNFLMemoryTest, CompareWithRefs) { { auto variable = std::make_shared(ov::op::util::VariableInfo{shape, ngraph::element::i64, variable_name}); - auto input1 = std::make_shared(ngraph::element::i64, in_shape); - auto input2 = std::make_shared(ngraph::element::i64, in_shape); - auto concat = std::make_shared(ngraph::OutputVector{input1, input2}, axis); + auto input1 = std::make_shared(ngraph::element::i64, in_shape); + auto input2 = std::make_shared(ngraph::element::i64, in_shape); + auto concat = std::make_shared(ngraph::OutputVector{input1, input2}, axis); auto reshape = ov::op::util::reshapeTo(concat, shape); auto init_value = ngraph::builder::makeConstant(ngraph::element::i64, shape, std::vector{0}); - auto read_value = std::make_shared(init_value, variable); - auto assign = std::make_shared(reshape, variable); + auto read_value = std::make_shared(init_value, variable); + auto assign = std::make_shared(reshape, variable); assign->add_control_dependency(read_value); - auto add = std::make_shared(reshape, read_value); - auto result = std::make_shared(add); + auto add = std::make_shared(reshape, read_value); + auto result = std::make_shared(add); ngraph::ParameterVector params = {input1, input2}; ngraph::ResultVector results = {result}; @@ -1141,18 +1141,18 @@ TEST_P(InsertCopyLayerConcatNFLMemoryTest, CompareWithRefs) { { auto variable = std::make_shared(ov::op::util::VariableInfo{shape, ngraph::element::i64, variable_name}); - auto input1 = std::make_shared(ngraph::element::i64, in_shape); - auto input2 = std::make_shared(ngraph::element::i64, in_shape); - auto concat = std::make_shared(ngraph::OutputVector{input1, input2}, axis); + auto input1 = std::make_shared(ngraph::element::i64, in_shape); + auto input2 = std::make_shared(ngraph::element::i64, in_shape); + auto concat = std::make_shared(ngraph::OutputVector{input1, input2}, axis); auto reshape = ov::op::util::reshapeTo(concat, shape); auto copy = std::make_shared(reshape); auto init_value = ngraph::builder::makeConstant(ngraph::element::i64, shape, std::vector{0}); - auto read_value = std::make_shared(init_value, variable); - auto assign = std::make_shared(copy, variable); + auto read_value = std::make_shared(init_value, variable); + auto assign = std::make_shared(copy, variable); assign->add_control_dependency(read_value); - auto add = std::make_shared(reshape, read_value); - auto result = std::make_shared(add); + auto add = std::make_shared(reshape, read_value); + auto result = std::make_shared(add); ngraph::ParameterVector params = {input1, input2}; ngraph::ResultVector results = {result}; @@ -1192,16 +1192,16 @@ TEST_P(InsertCopyLayerSplitMemoryTest, CompareWithRefs) { { auto variable = std::make_shared( ov::op::util::VariableInfo{allowed_shape, ngraph::element::i64, variable_name}); - auto input = std::make_shared(ngraph::element::i64, in_shape); + auto input = std::make_shared(ngraph::element::i64, in_shape); OPENVINO_SUPPRESS_DEPRECATED_START auto split = ngraph::builder::makeSplit(input, ngraph::element::i64, 1, axis); OPENVINO_SUPPRESS_DEPRECATED_END auto init_value = ngraph::builder::makeConstant(ngraph::element::i64, out_shape, std::vector{0}); - auto read_value = std::make_shared(init_value, variable); - auto assign = std::make_shared(split, variable); + auto read_value = std::make_shared(init_value, variable); + auto assign = std::make_shared(split, variable); assign->add_control_dependency(read_value); - auto concat = std::make_shared(ngraph::OutputVector{split, read_value}, axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(ngraph::OutputVector{split, read_value}, axis); + auto result = std::make_shared(concat); ngraph::ParameterVector params = {input}; ngraph::ResultVector results = {result}; @@ -1212,17 +1212,17 @@ TEST_P(InsertCopyLayerSplitMemoryTest, CompareWithRefs) { { auto variable = std::make_shared( ov::op::util::VariableInfo{allowed_shape, ngraph::element::i64, variable_name}); - auto input = std::make_shared(ngraph::element::i64, in_shape); + auto input = std::make_shared(ngraph::element::i64, in_shape); OPENVINO_SUPPRESS_DEPRECATED_START auto split = ngraph::builder::makeSplit(input, ngraph::element::i64, 1, axis); OPENVINO_SUPPRESS_DEPRECATED_END auto copy = std::make_shared(split); auto init_value = ngraph::builder::makeConstant(ngraph::element::i64, out_shape, std::vector{0}); - auto read_value = std::make_shared(init_value, variable); - auto assign = std::make_shared(copy, variable); + auto read_value = std::make_shared(init_value, variable); + auto assign = std::make_shared(copy, variable); assign->add_control_dependency(read_value); - auto concat = std::make_shared(ngraph::OutputVector{split, read_value}, axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(ngraph::OutputVector{split, read_value}, axis); + auto result = std::make_shared(concat); ngraph::ParameterVector params = {input}; ngraph::ResultVector results = {result}; @@ -1265,17 +1265,17 @@ TEST_P(InsertCopyLayerSplitNFLMemoryTest, CompareWithRefs) { { auto variable = std::make_shared( ov::op::util::VariableInfo{allowed_shape, ngraph::element::i64, variable_name}); - auto input = std::make_shared(ngraph::element::i64, in_shape); + auto input = std::make_shared(ngraph::element::i64, in_shape); OPENVINO_SUPPRESS_DEPRECATED_START auto split = ngraph::builder::makeSplit(input, ngraph::element::i64, 2, axis); OPENVINO_SUPPRESS_DEPRECATED_END auto reshape = ov::op::util::reshapeTo(split, shape); auto init_value = ngraph::builder::makeConstant(ngraph::element::i64, out_shape, std::vector{0}); - auto read_value = std::make_shared(init_value, variable); - auto assign = std::make_shared(reshape, variable); + auto read_value = std::make_shared(init_value, variable); + auto assign = std::make_shared(reshape, variable); assign->add_control_dependency(read_value); - auto concat = std::make_shared(ngraph::OutputVector{split, read_value}, axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(ngraph::OutputVector{split, read_value}, axis); + auto result = std::make_shared(concat); ngraph::ParameterVector params = {input}; ngraph::ResultVector results = {result}; @@ -1286,18 +1286,18 @@ TEST_P(InsertCopyLayerSplitNFLMemoryTest, CompareWithRefs) { { auto variable = std::make_shared( ov::op::util::VariableInfo{allowed_shape, ngraph::element::i64, variable_name}); - auto input = std::make_shared(ngraph::element::i64, in_shape); + auto input = std::make_shared(ngraph::element::i64, in_shape); OPENVINO_SUPPRESS_DEPRECATED_START auto split = ngraph::builder::makeSplit(input, ngraph::element::i64, 2, axis); OPENVINO_SUPPRESS_DEPRECATED_END auto reshape = ov::op::util::reshapeTo(split, shape); auto copy = std::make_shared(reshape); auto init_value = ngraph::builder::makeConstant(ngraph::element::i64, out_shape, std::vector{0}); - auto read_value = std::make_shared(init_value, variable); - auto assign = std::make_shared(copy, variable); + auto read_value = std::make_shared(init_value, variable); + auto assign = std::make_shared(copy, variable); assign->add_control_dependency(read_value); - auto concat = std::make_shared(ngraph::OutputVector{split, read_value}, axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(ngraph::OutputVector{split, read_value}, axis); + auto result = std::make_shared(concat); ngraph::ParameterVector params = {input}; ngraph::ResultVector results = {result}; @@ -1341,24 +1341,24 @@ TEST_P(InsertCopyLayerCropConcatTest, CompareWithRefs) { ngraph::Shape out_shape = {1, 1, 2, 2}; { - auto params = std::make_shared(ngraph::element::i64, in_shape); + auto params = std::make_shared(ngraph::element::i64, in_shape); auto reshape = ov::op::util::reshapeTo(params, shape); auto crop = std::make_shared(reshape, axes, dim, offset); auto const_value = ngraph::builder::makeConstant(ngraph::element::i64, out_shape, std::vector{1}); - auto concat = std::make_shared(ngraph::OutputVector{crop, const_value}, axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(ngraph::OutputVector{crop, const_value}, axis); + auto result = std::make_shared(concat); m_func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Concat"); } { - auto params = std::make_shared(ngraph::element::i64, in_shape); + auto params = std::make_shared(ngraph::element::i64, in_shape); auto reshape = ov::op::util::reshapeTo(params, shape); auto crop = std::make_shared(reshape, axes, dim, offset); auto copy = std::make_shared(crop); auto const_value = ngraph::builder::makeConstant(ngraph::element::i64, out_shape, std::vector{1}); - auto concat = std::make_shared(ngraph::OutputVector{copy, const_value}, axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(ngraph::OutputVector{copy, const_value}, axis); + auto result = std::make_shared(concat); ref_func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Concat"); } @@ -1393,19 +1393,19 @@ TEST_P(InsertCopyLayerNonfuncTest, CompareWithRefs) { ngraph::Shape in_shape = {1, 2, 4}; { - auto params = std::make_shared(ngraph::element::i64, in_shape); + auto params = std::make_shared(ngraph::element::i64, in_shape); auto reshape = ov::op::util::reshapeTo(params, shape); - auto result = std::make_shared(reshape); + auto result = std::make_shared(reshape); m_func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "nonfunc"); } { - auto params = std::make_shared(ngraph::element::i64, in_shape); + auto params = std::make_shared(ngraph::element::i64, in_shape); auto copy = std::make_shared(params); auto reshape = ov::op::util::reshapeTo(copy, shape); - auto result = std::make_shared(reshape); + auto result = std::make_shared(reshape); ref_func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "nonfunc"); @@ -1441,23 +1441,23 @@ TEST_P(InsertCopyLayerNonfuncTwoSubgraphsTest, CompareWithRefs) { ngraph::Shape in_shape = {1, 2, 4}; { - auto params = std::make_shared(ngraph::element::i64, in_shape); + auto params = std::make_shared(ngraph::element::i64, in_shape); auto reshape1 = ov::op::util::reshapeTo(params, shape); auto reshape2 = ov::op::util::reshapeTo(params, shape); - auto result1 = std::make_shared(reshape1); - auto result2 = std::make_shared(reshape2); + auto result1 = std::make_shared(reshape1); + auto result2 = std::make_shared(reshape2); m_func = std::make_shared(ngraph::ResultVector{result1, result2}, ngraph::ParameterVector{params}, "nonfunc"); } { - auto params = std::make_shared(ngraph::element::i64, in_shape); + auto params = std::make_shared(ngraph::element::i64, in_shape); auto copy = std::make_shared(params); auto reshape1 = ov::op::util::reshapeTo(copy, shape); auto reshape2 = ov::op::util::reshapeTo(copy, shape); - auto result1 = std::make_shared(reshape1); - auto result2 = std::make_shared(reshape2); + auto result1 = std::make_shared(reshape1); + auto result2 = std::make_shared(reshape2); ref_func = std::make_shared(ngraph::ResultVector{result1, result2}, ngraph::ParameterVector{params}, "nonfunc"); @@ -1493,21 +1493,21 @@ TEST_P(InsertCopyLayerNonfuncTwoResultsTest, CompareWithRefs) { ngraph::Shape in_shape = {1, 2, 4}; { - auto params = std::make_shared(ngraph::element::i64, in_shape); + auto params = std::make_shared(ngraph::element::i64, in_shape); auto reshape = ov::op::util::reshapeTo(params, shape); - auto result1 = std::make_shared(reshape); - auto result2 = std::make_shared(reshape); + auto result1 = std::make_shared(reshape); + auto result2 = std::make_shared(reshape); m_func = std::make_shared(ngraph::ResultVector{result1, result2}, ngraph::ParameterVector{params}, "nonfunc"); } { - auto params = std::make_shared(ngraph::element::i64, in_shape); + auto params = std::make_shared(ngraph::element::i64, in_shape); auto copy = std::make_shared(params); auto reshape = ov::op::util::reshapeTo(copy, shape); - auto result1 = std::make_shared(reshape); - auto result2 = std::make_shared(reshape); + auto result1 = std::make_shared(reshape); + auto result2 = std::make_shared(reshape); ref_func = std::make_shared(ngraph::ResultVector{result1, result2}, ngraph::ParameterVector{params}, "nonfunc"); @@ -1545,13 +1545,13 @@ TEST_P(InsertCopyLayerNFLBranchTest, CompareWithRefs) { ngraph::Shape in_shape = {1, 2, 4}; { - auto params = std::make_shared(ngraph::element::i64, in_shape); + auto params = std::make_shared(ngraph::element::i64, in_shape); auto reshape = ov::op::util::reshapeTo(params, shape); auto reshape2 = ov::op::util::reshapeTo(reshape, shape); - auto result = std::make_shared(reshape2); + auto result = std::make_shared(reshape2); - auto relu = std::make_shared(reshape); - auto result_relu = std::make_shared(relu); + auto relu = std::make_shared(reshape); + auto result_relu = std::make_shared(relu); m_func = std::make_shared(ngraph::ResultVector{result, result_relu}, ngraph::ParameterVector{params}, @@ -1559,14 +1559,14 @@ TEST_P(InsertCopyLayerNFLBranchTest, CompareWithRefs) { } { - auto params = std::make_shared(ngraph::element::i64, in_shape); + auto params = std::make_shared(ngraph::element::i64, in_shape); auto reshape = ov::op::util::reshapeTo(params, shape); auto copy = std::make_shared(reshape); auto reshape2 = ov::op::util::reshapeTo(copy, shape); - auto result = std::make_shared(reshape2); + auto result = std::make_shared(reshape2); - auto relu = std::make_shared(reshape); - auto result_relu = std::make_shared(relu); + auto relu = std::make_shared(reshape); + auto result_relu = std::make_shared(relu); ref_func = std::make_shared(ngraph::ResultVector{result, result_relu}, ngraph::ParameterVector{params}, @@ -1605,13 +1605,13 @@ TEST_P(InsertCopyLayerNFLvsFLSubgraphTest, CompareWithRefs) { ngraph::Shape in_shape = {1, 2, 4}; { - auto params = std::make_shared(ngraph::element::i64, in_shape); + auto params = std::make_shared(ngraph::element::i64, in_shape); auto reshape = ov::op::util::reshapeTo(params, shape); - auto result = std::make_shared(reshape); + auto result = std::make_shared(reshape); - auto relu = std::make_shared(params); + auto relu = std::make_shared(params); auto reshape2 = ov::op::util::reshapeTo(relu, shape); - auto result_relu = std::make_shared(reshape2); + auto result_relu = std::make_shared(reshape2); m_func = std::make_shared(ngraph::ResultVector{result, result_relu}, ngraph::ParameterVector{params}, @@ -1619,14 +1619,14 @@ TEST_P(InsertCopyLayerNFLvsFLSubgraphTest, CompareWithRefs) { } { - auto params = std::make_shared(ngraph::element::i64, in_shape); + auto params = std::make_shared(ngraph::element::i64, in_shape); auto copy = std::make_shared(params); auto reshape = ov::op::util::reshapeTo(copy, shape); - auto result = std::make_shared(reshape); + auto result = std::make_shared(reshape); - auto relu = std::make_shared(params); + auto relu = std::make_shared(params); auto reshape2 = ov::op::util::reshapeTo(relu, shape); - auto result_relu = std::make_shared(reshape2); + auto result_relu = std::make_shared(reshape2); ref_func = std::make_shared(ngraph::ResultVector{result, result_relu}, ngraph::ParameterVector{params}, @@ -1664,28 +1664,28 @@ TEST_P(InsertCopyLayerSplitNFLConcatTest, CompareWithRefs) { size_t axis = 0; { - auto params = std::make_shared(ngraph::element::i64, input_shape); + auto params = std::make_shared(ngraph::element::i64, input_shape); OPENVINO_SUPPRESS_DEPRECATED_START auto split = ngraph::builder::makeSplit(params, ngraph::element::i64, 1, axis); OPENVINO_SUPPRESS_DEPRECATED_END auto reshape = ov::op::util::reshapeTo(split->output(0), shape); auto const_value = ngraph::builder::makeConstant(ngraph::element::i64, shape, std::vector{1}); - auto concat = std::make_shared(ngraph::OutputVector{reshape, const_value}, axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(ngraph::OutputVector{reshape, const_value}, axis); + auto result = std::make_shared(concat); m_func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Concat"); } { - auto params = std::make_shared(ngraph::element::i64, input_shape); + auto params = std::make_shared(ngraph::element::i64, input_shape); OPENVINO_SUPPRESS_DEPRECATED_START auto split = ngraph::builder::makeSplit(params, ngraph::element::i64, 1, axis); OPENVINO_SUPPRESS_DEPRECATED_END auto reshape = ov::op::util::reshapeTo(split->output(0), shape); auto copy = std::make_shared(reshape); auto const_value = ngraph::builder::makeConstant(ngraph::element::i64, shape, std::vector{1}); - auto concat = std::make_shared(ngraph::OutputVector{copy, const_value}, axis); + auto concat = std::make_shared(ngraph::OutputVector{copy, const_value}, axis); - auto result = std::make_shared(concat); + auto result = std::make_shared(concat); ref_func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Concat"); } diff --git a/src/plugins/intel_gna/tests/unit/transformations/gna_insert_reshape_around_matmul.cpp b/src/plugins/intel_gna/tests/unit/transformations/gna_insert_reshape_around_matmul.cpp index f23acb81887e89..8b4f021e8cb67b 100644 --- a/src/plugins/intel_gna/tests/unit/transformations/gna_insert_reshape_around_matmul.cpp +++ b/src/plugins/intel_gna/tests/unit/transformations/gna_insert_reshape_around_matmul.cpp @@ -5,9 +5,9 @@ #include #include -#include #include #include +#include #include #include "common_test_utils/ov_test_utils.hpp" @@ -23,8 +23,8 @@ struct InsertReshapeAroundMatmulTest { const ngraph::Shape& constant_shape) { std::vector data(ngraph::shape_size(constant_shape)); std::iota(std::begin(data), std::end(data), 1); - auto constant = ngraph::opset8::Constant::create(ngraph::element::i64, constant_shape, data); - return std::make_shared(input, constant); + auto constant = ov::op::v0::Constant::create(ngraph::element::i64, constant_shape, data); + return std::make_shared(input, constant); } static std::shared_ptr CreateMatmul(std::shared_ptr input, @@ -32,9 +32,9 @@ struct InsertReshapeAroundMatmulTest { const ngraph::Shape& permutation_shape) { std::vector data(ngraph::shape_size(matmul_constant_shape)); std::iota(std::begin(data), std::end(data), 1); - auto constant = ngraph::opset8::Constant::create(ngraph::element::i64, matmul_constant_shape, data); + auto constant = ov::op::v0::Constant::create(ngraph::element::i64, matmul_constant_shape, data); std::shared_ptr node; - node = std::make_shared(input, constant); + node = std::make_shared(input, constant); if (ADD) { std::vector add_constant_shape(2, 1); @@ -52,28 +52,28 @@ struct InsertReshapeAroundMatmulTest { } auto constant_add = - ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{add_constant_shape}, data); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{add_constant_shape}, data); if (ADD_FIRST_INPUT_NOT_CONSTANT) { - node = std::make_shared(node, constant_add); + node = std::make_shared(node, constant_add); } else { - node = std::make_shared(constant_add, node); + node = std::make_shared(constant_add, node); } } if (FQ) { - node = std::make_shared( + node = std::make_shared( node, - ngraph::opset8::Constant::create(ngraph::element::f32, {1}, {-0.1}), - ngraph::opset8::Constant::create(ngraph::element::f32, {1}, {0.1}), - ngraph::opset8::Constant::create(ngraph::element::f32, {1}, {-0.1}), - ngraph::opset8::Constant::create(ngraph::element::f32, {1}, {0.1}), + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {-0.1}), + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {0.1}), + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {-0.1}), + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {0.1}), 255); } if (TRANSPOSE) { - node = std::make_shared( + node = std::make_shared( node, - ngraph::opset8::Constant::create(ngraph::element::i64, {permutation_shape.size()}, permutation_shape)); + ov::op::v0::Constant::create(ngraph::element::i64, {permutation_shape.size()}, permutation_shape)); } return node; @@ -82,11 +82,11 @@ struct InsertReshapeAroundMatmulTest { static std::shared_ptr CreateFunction(const ngraph::Shape& input_shape, const ngraph::Shape& matmul_constant_shape, const ngraph::Shape& permutation_shape = ngraph::Shape()) { - auto input = std::make_shared(ngraph::element::i64, input_shape); - auto before = std::make_shared(input); + auto input = std::make_shared(ngraph::element::i64, input_shape); + auto before = std::make_shared(input); auto matmul = CreateMatmul(before, matmul_constant_shape, permutation_shape); - auto after = std::make_shared(matmul); - return std::make_shared(ngraph::ResultVector{std::make_shared(after)}, + auto after = std::make_shared(matmul); + return std::make_shared(ngraph::ResultVector{std::make_shared(after)}, ngraph::ParameterVector{input}); } @@ -96,19 +96,19 @@ struct InsertReshapeAroundMatmulTest { const ngraph::Shape& matmul_constant_shape, const ngraph::Shape& reshape_after_shape, const ngraph::Shape& permutation_shape = ngraph::Shape()) { - auto input = std::make_shared(ngraph::element::i64, input_shape); - auto before = std::make_shared(input); - auto reshape_before_constant = ngraph::opset8::Constant::create(ngraph::element::i64, - ngraph::Shape{reshape_before_shape.size()}, - reshape_before_shape); - auto reshape_before = std::make_shared(before, reshape_before_constant, false); + auto input = std::make_shared(ngraph::element::i64, input_shape); + auto before = std::make_shared(input); + auto reshape_before_constant = ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape{reshape_before_shape.size()}, + reshape_before_shape); + auto reshape_before = std::make_shared(before, reshape_before_constant, false); auto matmul = CreateMatmul(reshape_before, matmul_constant_shape, permutation_shape); - auto reshape_after_constant = ngraph::opset8::Constant::create(ngraph::element::i64, - ngraph::Shape{reshape_after_shape.size()}, - reshape_after_shape); - auto reshape_after = std::make_shared(matmul, reshape_after_constant, false); - auto after = std::make_shared(reshape_after); - return std::make_shared(ngraph::ResultVector{std::make_shared(after)}, + auto reshape_after_constant = ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape{reshape_after_shape.size()}, + reshape_after_shape); + auto reshape_after = std::make_shared(matmul, reshape_after_constant, false); + auto after = std::make_shared(reshape_after); + return std::make_shared(ngraph::ResultVector{std::make_shared(after)}, ngraph::ParameterVector{input}); } }; // struct InsertReshapeAroundMatmulTest diff --git a/src/plugins/intel_gna/tests/unit/transformations/gna_insert_transpose_after_convolution_or_pooling.cpp b/src/plugins/intel_gna/tests/unit/transformations/gna_insert_transpose_after_convolution_or_pooling.cpp index 5e068db48219c5..a1ffcc75fe78bb 100644 --- a/src/plugins/intel_gna/tests/unit/transformations/gna_insert_transpose_after_convolution_or_pooling.cpp +++ b/src/plugins/intel_gna/tests/unit/transformations/gna_insert_transpose_after_convolution_or_pooling.cpp @@ -10,6 +10,7 @@ #include #include "common_test_utils/ov_test_utils.hpp" +#include "openvino/opsets/opset7.hpp" #include "transformations/insert_transpose_after_convolution_or_pooling.hpp" namespace testing { @@ -19,29 +20,29 @@ TEST(TransformationTests, InsertTransposeAfterConvOrPoolTestStartConvolution) { { auto input_params_convolution = - std::make_shared(ngraph::element::i64, ngraph::Shape{1, 3, 1, 64}); + std::make_shared(ngraph::element::i64, ngraph::Shape{1, 3, 1, 64}); - auto weights = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{3, 3, 1, 2}, {1}); - auto convolution_operation = std::make_shared(input_params_convolution, - weights, - ngraph::Strides{1, 1}, - ngraph::CoordinateDiff{0, 0}, - ngraph::CoordinateDiff{0, 1}, - ngraph::Strides{1, 1}); + auto weights = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{3, 3, 1, 2}, {1}); + auto convolution_operation = std::make_shared(input_params_convolution, + weights, + ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 0}, + ngraph::CoordinateDiff{0, 1}, + ngraph::Strides{1, 1}); - auto new_shape = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {1, 1, 1, 3 * 64}); - auto reshape_operation = std::make_shared(convolution_operation, new_shape, true); + auto new_shape = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {1, 1, 1, 3 * 64}); + auto reshape_operation = std::make_shared(convolution_operation, new_shape, true); auto weights_next_convolution = - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{1, 1, 1, 3 * 63}, {1}); - auto next_convolution_operation = std::make_shared(reshape_operation, - weights_next_convolution, - ngraph::Strides{1, 1}, - ngraph::CoordinateDiff{0, 0}, - ngraph::CoordinateDiff{0, 1}, - ngraph::Strides{1, 1}); - - auto result = std::make_shared(next_convolution_operation); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{1, 1, 1, 3 * 63}, {1}); + auto next_convolution_operation = std::make_shared(reshape_operation, + weights_next_convolution, + ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 0}, + ngraph::CoordinateDiff{0, 1}, + ngraph::Strides{1, 1}); + + auto result = std::make_shared(next_convolution_operation); func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params_convolution}); ngraph::pass::Manager m; @@ -53,37 +54,36 @@ TEST(TransformationTests, InsertTransposeAfterConvOrPoolTestStartConvolution) { { auto input_params_convolution = - std::make_shared(ngraph::element::i64, ngraph::Shape{1, 3, 1, 64}); + std::make_shared(ngraph::element::i64, ngraph::Shape{1, 3, 1, 64}); - auto weights = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{3, 3, 1, 2}, {1}); - auto convolution_operation = std::make_shared(input_params_convolution, - weights, - ngraph::Strides{1, 1}, - ngraph::CoordinateDiff{0, 0}, - ngraph::CoordinateDiff{0, 1}, - ngraph::Strides{1, 1}); + auto weights = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{3, 3, 1, 2}, {1}); + auto convolution_operation = std::make_shared(input_params_convolution, + weights, + ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 0}, + ngraph::CoordinateDiff{0, 1}, + ngraph::Strides{1, 1}); - auto new_shape_out = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {1, 64, 1, 3}); - auto reshape_out_operation = - std::make_shared(convolution_operation, new_shape_out, false); + auto new_shape_out = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {1, 64, 1, 3}); + auto reshape_out_operation = std::make_shared(convolution_operation, new_shape_out, false); - auto transpose = std::make_shared( + auto transpose = std::make_shared( reshape_out_operation, - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 3, 1, 2})); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 3, 1, 2})); - auto new_shape = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {1, 1, 1, 3 * 64}); - auto reshape_operation = std::make_shared(transpose, new_shape, true); + auto new_shape = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {1, 1, 1, 3 * 64}); + auto reshape_operation = std::make_shared(transpose, new_shape, true); auto weights_next_convolution = - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{1, 1, 1, 3 * 63}, {1}); - auto next_convolution_operation = std::make_shared(reshape_operation, - weights_next_convolution, - ngraph::Strides{1, 1}, - ngraph::CoordinateDiff{0, 0}, - ngraph::CoordinateDiff{0, 1}, - ngraph::Strides{1, 1}); - - auto result = std::make_shared(next_convolution_operation); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{1, 1, 1, 3 * 63}, {1}); + auto next_convolution_operation = std::make_shared(reshape_operation, + weights_next_convolution, + ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 0}, + ngraph::CoordinateDiff{0, 1}, + ngraph::Strides{1, 1}); + + auto result = std::make_shared(next_convolution_operation); reference_func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params_convolution}); } @@ -98,28 +98,27 @@ TEST(TransformationTests, InsertTransposeAfterConvOrPoolTestStartMaxPool) { std::shared_ptr func(nullptr), reference_func(nullptr); { - auto input_params = - std::make_shared(ngraph::element::i64, ngraph::Shape{1, 3, 1, 64}); + auto input_params = std::make_shared(ngraph::element::i64, ngraph::Shape{1, 3, 1, 64}); - auto max_pool_operation = std::make_shared(input_params, - ngraph::Strides{1, 1}, - ngraph::Shape{0, 0}, - ngraph::Shape{0, 1}, - ngraph::Shape{1, 2}); + auto max_pool_operation = std::make_shared(input_params, + ngraph::Strides{1, 1}, + ngraph::Shape{0, 0}, + ngraph::Shape{0, 1}, + ngraph::Shape{1, 2}); - auto new_shape = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {1, 1, 1, 3 * 64}); - auto reshape_operation = std::make_shared(max_pool_operation, new_shape, true); + auto new_shape = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {1, 1, 1, 3 * 64}); + auto reshape_operation = std::make_shared(max_pool_operation, new_shape, true); auto weights_next_convolution = - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{1, 1, 1, 3 * 63}, {1}); - auto next_convolution_operation = std::make_shared(reshape_operation, - weights_next_convolution, - ngraph::Strides{1, 1}, - ngraph::CoordinateDiff{0, 0}, - ngraph::CoordinateDiff{0, 1}, - ngraph::Strides{1, 1}); - - auto result = std::make_shared(next_convolution_operation); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{1, 1, 1, 3 * 63}, {1}); + auto next_convolution_operation = std::make_shared(reshape_operation, + weights_next_convolution, + ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 0}, + ngraph::CoordinateDiff{0, 1}, + ngraph::Strides{1, 1}); + + auto result = std::make_shared(next_convolution_operation); func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); ngraph::pass::Manager m; m.register_pass(); @@ -129,36 +128,34 @@ TEST(TransformationTests, InsertTransposeAfterConvOrPoolTestStartMaxPool) { } { - auto input_params = - std::make_shared(ngraph::element::i64, ngraph::Shape{1, 3, 1, 64}); + auto input_params = std::make_shared(ngraph::element::i64, ngraph::Shape{1, 3, 1, 64}); - auto max_pool_operation = std::make_shared(input_params, - ngraph::Strides{1, 1}, - ngraph::Shape{0, 0}, - ngraph::Shape{0, 1}, - ngraph::Shape{1, 2}); + auto max_pool_operation = std::make_shared(input_params, + ngraph::Strides{1, 1}, + ngraph::Shape{0, 0}, + ngraph::Shape{0, 1}, + ngraph::Shape{1, 2}); - auto new_shape_out = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {1, 64, 1, 3}); - auto reshape_out_operation = - std::make_shared(max_pool_operation, new_shape_out, false); + auto new_shape_out = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {1, 64, 1, 3}); + auto reshape_out_operation = std::make_shared(max_pool_operation, new_shape_out, false); - auto transpose = std::make_shared( + auto transpose = std::make_shared( reshape_out_operation, - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 3, 1, 2})); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 3, 1, 2})); - auto new_shape = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {1, 1, 1, 3 * 64}); - auto reshape_operation = std::make_shared(transpose, new_shape, true); + auto new_shape = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {1, 1, 1, 3 * 64}); + auto reshape_operation = std::make_shared(transpose, new_shape, true); auto weights_next_convolution = - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{1, 1, 1, 3 * 63}, {1}); - auto next_convolution_operation = std::make_shared(reshape_operation, - weights_next_convolution, - ngraph::Strides{1, 1}, - ngraph::CoordinateDiff{0, 0}, - ngraph::CoordinateDiff{0, 1}, - ngraph::Strides{1, 1}); - - auto result = std::make_shared(next_convolution_operation); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{1, 1, 1, 3 * 63}, {1}); + auto next_convolution_operation = std::make_shared(reshape_operation, + weights_next_convolution, + ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 0}, + ngraph::CoordinateDiff{0, 1}, + ngraph::Strides{1, 1}); + + auto result = std::make_shared(next_convolution_operation); reference_func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); } @@ -174,29 +171,29 @@ TEST(TransformationTests, InsertTransposeAfterConvOrPoolTestInputRank3) { { auto input_params_convolution = - std::make_shared(ngraph::element::i64, ngraph::Shape{1, 3, 64}); + std::make_shared(ngraph::element::i64, ngraph::Shape{1, 3, 64}); - auto weights = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2, 3, 2}, {1}); - auto convolution_operation = std::make_shared(input_params_convolution, - weights, - ngraph::Strides{1}, - ngraph::CoordinateDiff{0}, - ngraph::CoordinateDiff{1}, - ngraph::Strides{1}); + auto weights = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{2, 3, 2}, {1}); + auto convolution_operation = std::make_shared(input_params_convolution, + weights, + ngraph::Strides{1}, + ngraph::CoordinateDiff{0}, + ngraph::CoordinateDiff{1}, + ngraph::Strides{1}); - auto new_shape = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{3}, {1, 1, 128}); - auto reshape_operation = std::make_shared(convolution_operation, new_shape, true); + auto new_shape = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{3}, {1, 1, 128}); + auto reshape_operation = std::make_shared(convolution_operation, new_shape, true); auto weights_next_convolution = - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{1, 1, 63}, {1}); - auto next_convolution_operation = std::make_shared(reshape_operation, - weights_next_convolution, - ngraph::Strides{1}, - ngraph::CoordinateDiff{0}, - ngraph::CoordinateDiff{1}, - ngraph::Strides{1}); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{1, 1, 63}, {1}); + auto next_convolution_operation = std::make_shared(reshape_operation, + weights_next_convolution, + ngraph::Strides{1}, + ngraph::CoordinateDiff{0}, + ngraph::CoordinateDiff{1}, + ngraph::Strides{1}); - auto result = std::make_shared(next_convolution_operation); + auto result = std::make_shared(next_convolution_operation); func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params_convolution}); @@ -209,37 +206,36 @@ TEST(TransformationTests, InsertTransposeAfterConvOrPoolTestInputRank3) { { auto input_params_convolution = - std::make_shared(ngraph::element::i64, ngraph::Shape{1, 3, 64}); + std::make_shared(ngraph::element::i64, ngraph::Shape{1, 3, 64}); - auto weights = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2, 3, 2}, {1}); - auto convolution_operation = std::make_shared(input_params_convolution, - weights, - ngraph::Strides{1}, - ngraph::CoordinateDiff{0}, - ngraph::CoordinateDiff{1}, - ngraph::Strides{1}); + auto weights = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{2, 3, 2}, {1}); + auto convolution_operation = std::make_shared(input_params_convolution, + weights, + ngraph::Strides{1}, + ngraph::CoordinateDiff{0}, + ngraph::CoordinateDiff{1}, + ngraph::Strides{1}); - auto new_shape_out = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{3}, {1, 64, 2}); - auto reshape_out_operation = - std::make_shared(convolution_operation, new_shape_out, false); + auto new_shape_out = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{3}, {1, 64, 2}); + auto reshape_out_operation = std::make_shared(convolution_operation, new_shape_out, false); - auto transpose = std::make_shared( + auto transpose = std::make_shared( reshape_out_operation, - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{3}, {0, 2, 1})); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{3}, {0, 2, 1})); - auto new_shape = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{3}, {1, 1, 128}); - auto reshape_operation = std::make_shared(transpose, new_shape, true); + auto new_shape = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{3}, {1, 1, 128}); + auto reshape_operation = std::make_shared(transpose, new_shape, true); auto weights_next_convolution = - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{1, 1, 63}, {1}); - auto next_convolution_operation = std::make_shared(reshape_operation, - weights_next_convolution, - ngraph::Strides{1}, - ngraph::CoordinateDiff{0}, - ngraph::CoordinateDiff{1}, - ngraph::Strides{1}); - - auto result = std::make_shared(next_convolution_operation); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{1, 1, 63}, {1}); + auto next_convolution_operation = std::make_shared(reshape_operation, + weights_next_convolution, + ngraph::Strides{1}, + ngraph::CoordinateDiff{0}, + ngraph::CoordinateDiff{1}, + ngraph::Strides{1}); + + auto result = std::make_shared(next_convolution_operation); reference_func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params_convolution}); } @@ -252,13 +248,13 @@ TEST(TransformationTests, InsertTransposeAfterConvOrPoolTestInputRank3) { std::shared_ptr CreatePoolConvFunction(const ngraph::Shape& input_shape, const ngraph::Shape& pool_kernel_shape) { - auto input_params = std::make_shared(ngraph::element::i64, input_shape); + auto input_params = std::make_shared(ngraph::element::i64, input_shape); - auto max_pool_operation = std::make_shared(input_params, - pool_kernel_shape, - ngraph::Shape{0, 0}, - ngraph::Shape{0, 1}, - pool_kernel_shape); + auto max_pool_operation = std::make_shared(input_params, + pool_kernel_shape, + ngraph::Shape{0, 0}, + ngraph::Shape{0, 1}, + pool_kernel_shape); auto pool_out_shape = max_pool_operation->get_output_shape(0); ngraph::Shape new_shape = { @@ -266,18 +262,18 @@ std::shared_ptr CreatePoolConvFunction(const ngraph::Shape& in 1, 1, std::accumulate(std::begin(pool_out_shape), std::end(pool_out_shape), size_t{1}, std::multiplies())}; - auto new_shape_const = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{4}, new_shape); - auto reshape_operation = std::make_shared(max_pool_operation, new_shape_const, true); - - auto weights_next_convolution = ngraph::opset7::Constant::create(ngraph::element::i64, new_shape, {1}); - auto next_convolution_operation = std::make_shared(reshape_operation, - weights_next_convolution, - ngraph::Strides{1, 1}, - ngraph::CoordinateDiff{0, 0}, - ngraph::CoordinateDiff{0, 1}, - ngraph::Strides{1, 1}); - - auto result = std::make_shared(next_convolution_operation); + auto new_shape_const = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, new_shape); + auto reshape_operation = std::make_shared(max_pool_operation, new_shape_const, true); + + auto weights_next_convolution = ov::op::v0::Constant::create(ngraph::element::i64, new_shape, {1}); + auto next_convolution_operation = std::make_shared(reshape_operation, + weights_next_convolution, + ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 0}, + ngraph::CoordinateDiff{0, 1}, + ngraph::Strides{1, 1}); + + auto result = std::make_shared(next_convolution_operation); return std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); } diff --git a/src/plugins/intel_gna/tests/unit/transformations/gna_pwl.cpp b/src/plugins/intel_gna/tests/unit/transformations/gna_pwl.cpp index 58f493a9f15659..24b846f2ef6ae6 100644 --- a/src/plugins/intel_gna/tests/unit/transformations/gna_pwl.cpp +++ b/src/plugins/intel_gna/tests/unit/transformations/gna_pwl.cpp @@ -11,6 +11,7 @@ #include "common_test_utils/data_utils.hpp" #include "common_test_utils/ov_test_utils.hpp" +#include "openvino/opsets/opset9.hpp" #include "transformations/pwl_approximation.hpp" using namespace ov::intel_gna::common; @@ -21,7 +22,7 @@ template struct Function {}; template <> -struct Function { +struct Function { static std::function get_function() { return [](const double x) { return 0.5 * (1.0 + std::tanh(x / 2.0)); @@ -30,7 +31,7 @@ struct Function { }; template <> -struct Function { +struct Function { static std::function get_function() { return [](const double x) { return std::tanh(x); @@ -39,7 +40,7 @@ struct Function { }; template <> -struct Function { +struct Function { static std::function get_function() { return [](const double x) { return x / (1.0 + std::abs(x)); @@ -48,7 +49,7 @@ struct Function { }; template <> -struct Function { +struct Function { static std::function get_function() { return [](const double x) { return std::log(x); @@ -57,7 +58,7 @@ struct Function { }; template <> -struct Function { +struct Function { static std::function get_function() { return [](const double x) { return std::exp(x); @@ -66,7 +67,7 @@ struct Function { }; template <> -struct Function { +struct Function { static std::function get_function(double exp) { return [exp](const double x) { return std::pow(x, exp); @@ -75,13 +76,12 @@ struct Function { }; template -using Enable = - std::enable_if::value || std::is_same::value || - std::is_same::value || - std::is_same::value || std::is_same::value, - int>; +using Enable = std::enable_if::value || std::is_same::value || + std::is_same::value || + std::is_same::value || std::is_same::value, + int>; template -using EnableWithExtraArg = std::enable_if::value, int>; +using EnableWithExtraArg = std::enable_if::value, int>; template class GnaPWlTestsFixture { @@ -152,9 +152,9 @@ template template inline std::shared_ptr GnaPWlTestsFixture::create_activation_function( const ngraph::Shape& input_shape) { - auto input_params = std::make_shared(ngraph::element::f32, input_shape); + auto input_params = std::make_shared(ngraph::element::f32, input_shape); auto f = std::make_shared(input_params); - auto result = std::make_shared(f); + auto result = std::make_shared(f); return std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); } @@ -163,10 +163,10 @@ template inline std::shared_ptr GnaPWlTestsFixture::create_activation_function( const ngraph::Shape& input_shape, double exp) { - auto input_params = std::make_shared(ngraph::element::f32, input_shape); - auto exponents = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{}, {exp}); + auto input_params = std::make_shared(ngraph::element::f32, input_shape); + auto exponents = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{}, {exp}); auto f = std::make_shared(input_params, exponents); - auto result = std::make_shared(f); + auto result = std::make_shared(f); return std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); } @@ -242,37 +242,37 @@ inline void GnaPWlTestsFixture::validate_results(const std::vector& in } TEST(GnaPwlTest, Sigmoid) { - GnaPWlTestsFixture test_instance({1, 100}, -10.0, 10.0, 1.0); + GnaPWlTestsFixture test_instance({1, 100}, -10.0, 10.0, 1.0); test_instance.run(); } TEST(GnaPwlTest, Tanh) { - GnaPWlTestsFixture test_instance({1, 32}, -5.0, 5.0, 1.0); + GnaPWlTestsFixture test_instance({1, 32}, -5.0, 5.0, 1.0); test_instance.run(); } TEST(GnaPwlTest, Exp) { - GnaPWlTestsFixture test_instance({1, 32}, -std::log2(INT16_MAX), std::log10(INT16_MAX), 1.0); + GnaPWlTestsFixture test_instance({1, 32}, -std::log2(INT16_MAX), std::log10(INT16_MAX), 1.0); test_instance.run(); } TEST(GnaPwlTest, SoftSign) { - GnaPWlTestsFixture test_instance({1, 32}, -10, 10, 1.0); + GnaPWlTestsFixture test_instance({1, 32}, -10, 10, 1.0); test_instance.run(); } TEST(GnaPwlTest, Log) { - GnaPWlTestsFixture test_instance({1, 32}, 0.001, 2981, 1.0); + GnaPWlTestsFixture test_instance({1, 32}, 0.001, 2981, 1.0); test_instance.run(); } TEST(GnaPwlTest, Power) { for (float exp = 1; exp <= 2.2; exp += 0.1) { - GnaPWlTestsFixture test_instance({1, 32}, - AreFpEq(std::fmod(exp, 1.0), 0.0) ? -16 : 0, - 16, - exp, - 1.0); + GnaPWlTestsFixture test_instance({1, 32}, + AreFpEq(std::fmod(exp, 1.0), 0.0) ? -16 : 0, + 16, + exp, + 1.0); test_instance.run(); } } diff --git a/src/plugins/intel_gna/tests/unit/transformations/gna_remove_convert.cpp b/src/plugins/intel_gna/tests/unit/transformations/gna_remove_convert.cpp index 0b31e229a525f3..5746f1dae21a50 100644 --- a/src/plugins/intel_gna/tests/unit/transformations/gna_remove_convert.cpp +++ b/src/plugins/intel_gna/tests/unit/transformations/gna_remove_convert.cpp @@ -5,11 +5,11 @@ #include #include -#include #include #include #include "common_test_utils/ov_test_utils.hpp" +#include "openvino/opsets/opset8.hpp" #include "ov_models/builders.hpp" #include "transformations/remove_converts.hpp" @@ -54,12 +54,12 @@ void RemoveInputConvertTest::SetUp() { // test function { - auto params = std::make_shared(target_precision_, input_shape); + auto params = std::make_shared(target_precision_, input_shape); auto conversion = std::make_shared(params, net_precision_); - auto add_const = ngraph::opset8::Constant::create(net_precision_, input_shape, {10}); - auto add = std::make_shared(conversion, add_const); + auto add_const = ov::op::v0::Constant::create(net_precision_, input_shape, {10}); + auto add = std::make_shared(conversion, add_const); - auto result = std::make_shared(add); + auto result = std::make_shared(add); func_ = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Conversion"); @@ -67,11 +67,11 @@ void RemoveInputConvertTest::SetUp() { // ref function convert should be removed { - auto params = std::make_shared(net_precision_, input_shape); - auto add_const = ngraph::opset8::Constant::create(net_precision_, input_shape, {10}); - auto add = std::make_shared(params, add_const); + auto params = std::make_shared(net_precision_, input_shape); + auto add_const = ov::op::v0::Constant::create(net_precision_, input_shape, {10}); + auto add = std::make_shared(params, add_const); - auto result = std::make_shared(add); + auto result = std::make_shared(add); ref_func_no_convert_ = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Conversion"); @@ -111,11 +111,11 @@ class RemoveOutputConvertTest : public RemoveInputConvertTest { // test function { - auto params = std::make_shared(net_precision_, input_shape); - auto add_const = ngraph::opset8::Constant::create(net_precision_, input_shape, {10}); - auto add = std::make_shared(params, add_const); + auto params = std::make_shared(net_precision_, input_shape); + auto add_const = ov::op::v0::Constant::create(net_precision_, input_shape, {10}); + auto add = std::make_shared(params, add_const); auto conversion = std::make_shared(add, target_precision_); - auto result = std::make_shared(conversion); + auto result = std::make_shared(conversion); func_ = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Conversion"); @@ -123,11 +123,11 @@ class RemoveOutputConvertTest : public RemoveInputConvertTest { // ref function { - auto params = std::make_shared(net_precision_, input_shape); - auto add_const = ngraph::opset8::Constant::create(net_precision_, input_shape, {10}); - auto add = std::make_shared(params, add_const); + auto params = std::make_shared(net_precision_, input_shape); + auto add_const = ov::op::v0::Constant::create(net_precision_, input_shape, {10}); + auto add = std::make_shared(params, add_const); - auto result = std::make_shared(add); + auto result = std::make_shared(add); ref_func_no_convert_ = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Conversion"); @@ -167,12 +167,12 @@ class LeaveConvertTest : public RemoveInputConvertTest { // test function { - auto params = std::make_shared(net_precision_, input_shape); - auto add_const = ngraph::opset8::Constant::create(net_precision_, input_shape, {10}); - auto add1 = std::make_shared(params, add_const); + auto params = std::make_shared(net_precision_, input_shape); + auto add_const = ov::op::v0::Constant::create(net_precision_, input_shape, {10}); + auto add1 = std::make_shared(params, add_const); auto conversion = std::make_shared(add1, net_precision_); - auto add2 = std::make_shared(conversion, add_const); - auto result = std::make_shared(add2); + auto add2 = std::make_shared(conversion, add_const); + auto result = std::make_shared(add2); func_ = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Conversion"); @@ -212,7 +212,7 @@ class RemoveMultiInputsConvertTest : public RemoveInputConvertTest { auto convert3 = std::make_shared(input[2], net_precision_); auto mul1 = ngraph::builder::makeEltwise(convert1, convert2, ngraph::helpers::EltwiseTypes::ADD); auto mul2 = ngraph::builder::makeEltwise(convert3, mul1, ngraph::helpers::EltwiseTypes::ADD); - auto result = std::make_shared(mul2); + auto result = std::make_shared(mul2); func_ = std::make_shared(ngraph::ResultVector{result}, input, "multiple_input"); } @@ -223,7 +223,7 @@ class RemoveMultiInputsConvertTest : public RemoveInputConvertTest { std::make_shared(net_precision_, input_shape)}; auto mul1 = ngraph::builder::makeEltwise(input[0], input[1], ngraph::helpers::EltwiseTypes::ADD); auto mul2 = ngraph::builder::makeEltwise(input[2], mul1, ngraph::helpers::EltwiseTypes::ADD); - auto result = std::make_shared(mul2); + auto result = std::make_shared(mul2); ref_func_no_convert_ = std::make_shared(ngraph::ResultVector{result}, input, "multiple_input"); } @@ -248,8 +248,8 @@ class RemoveMultiOutputsConvertTest : public RemoveOutputConvertTest { auto mul2 = ngraph::builder::makeEltwise(input[2], input[3], ngraph::helpers::EltwiseTypes::ADD); auto convert1 = std::make_shared(mul1, target_precision_); auto convert2 = std::make_shared(mul2, target_precision_); - auto result1 = std::make_shared(convert1); - auto result2 = std::make_shared(convert2); + auto result1 = std::make_shared(convert1); + auto result2 = std::make_shared(convert2); func_ = std::make_shared(ngraph::ResultVector{result1, result2}, input, "multiple_output"); @@ -263,8 +263,8 @@ class RemoveMultiOutputsConvertTest : public RemoveOutputConvertTest { std::make_shared(net_precision_, input_shape)}; auto mul1 = ngraph::builder::makeEltwise(input[0], input[1], ngraph::helpers::EltwiseTypes::ADD); auto mul2 = ngraph::builder::makeEltwise(input[2], input[3], ngraph::helpers::EltwiseTypes::ADD); - auto result1 = std::make_shared(mul1); - auto result2 = std::make_shared(mul2); + auto result1 = std::make_shared(mul1); + auto result2 = std::make_shared(mul2); ref_func_no_convert_ = std::make_shared(ngraph::ResultVector{result1, result2}, input, "multiple_output"); @@ -292,9 +292,9 @@ class RemoveOutputConvertConnectedToLayerTest : public RemoveOutputConvertTest { auto convert1 = std::make_shared(mul1, target_precision_); auto convert2 = std::make_shared(mul2, target_precision_); auto convert3 = std::make_shared(mul3, target_precision_); - auto result1 = std::make_shared(convert1); - auto result2 = std::make_shared(convert2); - auto result3 = std::make_shared(convert3); + auto result1 = std::make_shared(convert1); + auto result2 = std::make_shared(convert2); + auto result3 = std::make_shared(convert3); func_ = std::make_shared(ngraph::ResultVector{result1, result2, result3}, input, @@ -310,9 +310,9 @@ class RemoveOutputConvertConnectedToLayerTest : public RemoveOutputConvertTest { auto mul1 = ngraph::builder::makeEltwise(input[0], input[1], ngraph::helpers::EltwiseTypes::ADD); auto mul2 = ngraph::builder::makeEltwise(input[2], input[3], ngraph::helpers::EltwiseTypes::ADD); auto mul3 = ngraph::builder::makeEltwise(mul1, mul2, ngraph::helpers::EltwiseTypes::ADD); - auto result1 = std::make_shared(mul1); - auto result2 = std::make_shared(mul2); - auto result3 = std::make_shared(mul3); + auto result1 = std::make_shared(mul1); + auto result2 = std::make_shared(mul2); + auto result3 = std::make_shared(mul3); ref_func_no_convert_ = std::make_shared(ngraph::ResultVector{result1, result2, result3}, input, diff --git a/src/plugins/intel_gna/tests/unit/transformations/gna_remove_extra_reshapes.cpp b/src/plugins/intel_gna/tests/unit/transformations/gna_remove_extra_reshapes.cpp index 842681b8d34f47..3ffecba3f60074 100644 --- a/src/plugins/intel_gna/tests/unit/transformations/gna_remove_extra_reshapes.cpp +++ b/src/plugins/intel_gna/tests/unit/transformations/gna_remove_extra_reshapes.cpp @@ -10,6 +10,7 @@ #include #include "common_test_utils/ov_test_utils.hpp" +#include "openvino/opsets/opset7.hpp" #include "transformations/remove_extra_reshapes.hpp" namespace testing { @@ -19,15 +20,15 @@ TEST(TransformationTests, RemoveExtraReshapesTestReshapeNotEqualInputOutput) { const ngraph::Shape data_shape{1, 3, 64, 64}; { - auto input_params = std::make_shared(ngraph::element::f32, data_shape); - auto new_shape = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{3}, {1, 3, 64 * 64}); - auto reshape_operation = std::make_shared(input_params, new_shape, true); - auto max_pool_operation = std::make_shared(reshape_operation, - ngraph::Strides{1}, - ngraph::Shape{0}, - ngraph::Shape{0}, - ngraph::Shape{3}); - auto result = std::make_shared(max_pool_operation); + auto input_params = std::make_shared(ngraph::element::f32, data_shape); + auto new_shape = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{3}, {1, 3, 64 * 64}); + auto reshape_operation = std::make_shared(input_params, new_shape, true); + auto max_pool_operation = std::make_shared(reshape_operation, + ngraph::Strides{1}, + ngraph::Shape{0}, + ngraph::Shape{0}, + ngraph::Shape{3}); + auto result = std::make_shared(max_pool_operation); func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); reference_func = ngraph::clone_function(*func); @@ -50,15 +51,15 @@ TEST(TransformationTests, RemoveExtraReshapesTestReshapeEqualInputOutput) { const ngraph::Shape data_shape{1, 3, 64, 64}; { - auto input_params = std::make_shared(ngraph::element::f32, data_shape); - auto new_shape = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {1, 3, 64, 64}); - auto reshape_operation = std::make_shared(input_params, new_shape, true); - auto max_pool_operation = std::make_shared(reshape_operation, - ngraph::Strides{1, 1}, - ngraph::Shape{0, 0}, - ngraph::Shape{0, 0}, - ngraph::Shape{3, 3}); - auto result = std::make_shared(max_pool_operation); + auto input_params = std::make_shared(ngraph::element::f32, data_shape); + auto new_shape = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {1, 3, 64, 64}); + auto reshape_operation = std::make_shared(input_params, new_shape, true); + auto max_pool_operation = std::make_shared(reshape_operation, + ngraph::Strides{1, 1}, + ngraph::Shape{0, 0}, + ngraph::Shape{0, 0}, + ngraph::Shape{3, 3}); + auto result = std::make_shared(max_pool_operation); func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); ngraph::pass::Manager m; @@ -69,13 +70,13 @@ TEST(TransformationTests, RemoveExtraReshapesTestReshapeEqualInputOutput) { } { - auto input_params = std::make_shared(ngraph::element::f32, data_shape); - auto max_pool_operation = std::make_shared(input_params, - ngraph::Strides{1, 1}, - ngraph::Shape{0, 0}, - ngraph::Shape{1, 1}, - ngraph::Shape{4, 4}); - auto result = std::make_shared(max_pool_operation); + auto input_params = std::make_shared(ngraph::element::f32, data_shape); + auto max_pool_operation = std::make_shared(input_params, + ngraph::Strides{1, 1}, + ngraph::Shape{0, 0}, + ngraph::Shape{1, 1}, + ngraph::Shape{4, 4}); + auto result = std::make_shared(max_pool_operation); reference_func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); } diff --git a/src/plugins/intel_gna/tests/unit/transformations/gna_remove_single_input_concat.cpp b/src/plugins/intel_gna/tests/unit/transformations/gna_remove_single_input_concat.cpp index 7af95a3a67c277..28f97615dbf635 100644 --- a/src/plugins/intel_gna/tests/unit/transformations/gna_remove_single_input_concat.cpp +++ b/src/plugins/intel_gna/tests/unit/transformations/gna_remove_single_input_concat.cpp @@ -10,12 +10,13 @@ #include #include "common_test_utils/ov_test_utils.hpp" +#include "openvino/opsets/opset8.hpp" #include "transformations/remove_single_input_concat.hpp" namespace testing { namespace { -using GraphInputs = std::vector>; +using GraphInputs = std::vector>; using GraphOutputs = ngraph::OutputVector; struct Graph { @@ -31,7 +32,7 @@ std::shared_ptr Graph::createFunction() { outputs.end(), std::back_inserter(results), [](ngraph::Output output) { - return std::make_shared(output); + return std::make_shared(output); }); ngraph::ParameterVector params(inputs.begin(), inputs.end()); @@ -48,7 +49,7 @@ Graph createGraph(int n_inputs, bool has_concat, int n_outputs) { Operations outputs; for (int i = 0; i < n_inputs; ++i) { - auto input = std::make_shared(ngraph::element::i64, ngraph::Shape{1, 3, 64}); + auto input = std::make_shared(ngraph::element::i64, ngraph::Shape{1, 3, 64}); inputs.push_back(input); outputs.push_back(input); } @@ -56,8 +57,8 @@ Graph createGraph(int n_inputs, bool has_concat, int n_outputs) { { Operations new_outputs; for (auto output : outputs) { - auto add_bias = ngraph::opset8::Constant::create(ngraph::element::i64, {1, 1, 1}, {2}); - auto add_operation = std::make_shared(output, add_bias); + auto add_bias = ov::op::v0::Constant::create(ngraph::element::i64, {1, 1, 1}, {2}); + auto add_operation = std::make_shared(output, add_bias); new_outputs.push_back(add_operation); } outputs.swap(new_outputs); @@ -65,7 +66,7 @@ Graph createGraph(int n_inputs, bool has_concat, int n_outputs) { if (has_concat) { auto concat_operation = - std::make_shared(ngraph::OutputVector(outputs.begin(), outputs.end()), 0); + std::make_shared(ngraph::OutputVector(outputs.begin(), outputs.end()), 0); outputs = {concat_operation}; } @@ -73,8 +74,8 @@ Graph createGraph(int n_inputs, bool has_concat, int n_outputs) { Operations new_outputs; for (auto output : outputs) { for (int i = 0; i < n_outputs; ++i) { - auto add_bias = ngraph::opset8::Constant::create(ngraph::element::i64, {1, 1, 1}, {3}); - auto add_operation = std::make_shared(output, add_bias); + auto add_bias = ov::op::v0::Constant::create(ngraph::element::i64, {1, 1, 1}, {3}); + auto add_operation = std::make_shared(output, add_bias); new_outputs.push_back(add_operation); } } diff --git a/src/plugins/intel_gna/tests/unit/transformations/gna_reorder_activation_and_pooling.cpp b/src/plugins/intel_gna/tests/unit/transformations/gna_reorder_activation_and_pooling.cpp index d4a65bd8cd3c3d..fb989ab2369345 100644 --- a/src/plugins/intel_gna/tests/unit/transformations/gna_reorder_activation_and_pooling.cpp +++ b/src/plugins/intel_gna/tests/unit/transformations/gna_reorder_activation_and_pooling.cpp @@ -5,11 +5,11 @@ #include #include -#include #include #include #include "common_test_utils/ov_test_utils.hpp" +#include "openvino/opsets/opset7.hpp" #include "transformations/reorder_activation_and_pooling.hpp" namespace testing { @@ -36,11 +36,11 @@ class ActivationNodeFactory : public IActivationNodeFactory { }; template <> -class ActivationNodeFactory : public IActivationNodeFactory { +class ActivationNodeFactory : public IActivationNodeFactory { public: ActivationNodeFactory(const double min, const double max) : min_(min), max_(max) {} std::shared_ptr createNode(const ngraph::Output& operation_before) override { - return std::make_shared(operation_before, min_, max_); + return std::make_shared(operation_before, min_, max_); } private: @@ -67,7 +67,7 @@ ActivationFactoryPtr createActivationFactory(Args&&... args) { */ typedef std::tuple ConvolutionActivationPoolTestOptions; @@ -96,33 +96,32 @@ std::shared_ptr ConvolutionActivationPoolTestFixture::get_init ActivationFactoryPtr activation_factory, bool isAddNodeNeeded) { auto input_params_convolution = - std::make_shared(ngraph::element::f32, ngraph::Shape{1, 3, 64, 64}); - auto input_params_add = - std::make_shared(ngraph::element::f32, ngraph::Shape{1, 3, 64, 64}); - - auto weights = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{3, 3, 1, 1}, {1}); - auto bias = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{3, 1, 1}, {1}); - auto convolution_operation = std::make_shared(input_params_convolution, - weights, - ngraph::Strides{1, 1}, - ngraph::CoordinateDiff{0, 0}, - ngraph::CoordinateDiff{0, 0}, - ngraph::Strides{1, 1}); + std::make_shared(ngraph::element::f32, ngraph::Shape{1, 3, 64, 64}); + auto input_params_add = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 3, 64, 64}); + + auto weights = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{3, 3, 1, 1}, {1}); + auto bias = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{3, 1, 1}, {1}); + auto convolution_operation = std::make_shared(input_params_convolution, + weights, + ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 0}, + ngraph::CoordinateDiff{0, 0}, + ngraph::Strides{1, 1}); std::shared_ptr last_operation = convolution_operation; if (isAddNodeNeeded) { - auto add_operation = std::make_shared(convolution_operation, input_params_add); + auto add_operation = std::make_shared(convolution_operation, input_params_add); last_operation = add_operation; } auto activation = activation_factory->createNode(last_operation); - auto max_pool_operation = std::make_shared(activation, - ngraph::Strides{1, 1}, - ngraph::Shape{1, 1}, - ngraph::Shape{1, 1}, - ngraph::Shape{1, 1}); + auto max_pool_operation = std::make_shared(activation, + ngraph::Strides{1, 1}, + ngraph::Shape{1, 1}, + ngraph::Shape{1, 1}, + ngraph::Shape{1, 1}); - auto result = std::make_shared(max_pool_operation); + auto result = std::make_shared(max_pool_operation); return std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params_convolution, input_params_add}); } @@ -131,35 +130,34 @@ std::shared_ptr ConvolutionActivationPoolTestFixture::get_refe ActivationFactoryPtr activation_factory, bool isAddNodeNeeded) { auto input_params_convolution = - std::make_shared(ngraph::element::f32, ngraph::Shape{1, 3, 64, 64}); + std::make_shared(ngraph::element::f32, ngraph::Shape{1, 3, 64, 64}); - auto input_params_add = - std::make_shared(ngraph::element::f32, ngraph::Shape{1, 3, 64, 64}); + auto input_params_add = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 3, 64, 64}); - auto weights = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{3, 3, 1, 1}, {1}); - auto bias = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{3, 1, 1}, {1}); - auto convolution_operation = std::make_shared(input_params_convolution, - weights, - ngraph::Strides{1, 1}, - ngraph::CoordinateDiff{0, 0}, - ngraph::CoordinateDiff{0, 0}, - ngraph::Strides{1, 1}); + auto weights = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{3, 3, 1, 1}, {1}); + auto bias = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{3, 1, 1}, {1}); + auto convolution_operation = std::make_shared(input_params_convolution, + weights, + ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 0}, + ngraph::CoordinateDiff{0, 0}, + ngraph::Strides{1, 1}); std::shared_ptr last_operation = convolution_operation; if (isAddNodeNeeded) { - auto add_operation = std::make_shared(convolution_operation, input_params_convolution); + auto add_operation = std::make_shared(convolution_operation, input_params_convolution); last_operation = add_operation; } - auto max_pool_operation = std::make_shared(last_operation, - ngraph::Strides{1, 1}, - ngraph::Shape{1, 1}, - ngraph::Shape{1, 1}, - ngraph::Shape{1, 1}); + auto max_pool_operation = std::make_shared(last_operation, + ngraph::Strides{1, 1}, + ngraph::Shape{1, 1}, + ngraph::Shape{1, 1}, + ngraph::Shape{1, 1}); auto activation = activation_factory->createNode(max_pool_operation); - auto result = std::make_shared(activation); + auto result = std::make_shared(activation); return std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params_convolution, input_params_add}); } @@ -179,15 +177,14 @@ TEST_P(ConvolutionActivationPoolTestFixture, CompareFunctions) { execute_test(function, reference_function); } -const std::vector activationFactories = { - createActivationFactory(), - createActivationFactory(), - createActivationFactory(), - createActivationFactory(), - createActivationFactory(), - createActivationFactory(), - createActivationFactory(), - createActivationFactory(0.1, 0.2)}; +const std::vector activationFactories = {createActivationFactory(), + createActivationFactory(), + createActivationFactory(), + createActivationFactory(), + createActivationFactory(), + createActivationFactory(), + createActivationFactory(), + createActivationFactory(0.1, 0.2)}; INSTANTIATE_TEST_SUITE_P(ConvolutionActivationPoolTestSuite, ConvolutionActivationPoolTestFixture, @@ -203,35 +200,35 @@ TEST(TransformationTests, ReorderActivationAndPoolingTestConvFqMp) { { auto input_params_convolution = - std::make_shared(ngraph::element::f32, ngraph::Shape{1, 3, 64, 64}); - - auto weights = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{3, 3, 1, 1}, {1}); - auto bias = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{3, 1, 1}, {1}); - auto convolution_operation = std::make_shared(input_params_convolution, - weights, - ngraph::Strides{1, 1}, - ngraph::CoordinateDiff{0, 0}, - ngraph::CoordinateDiff{0, 0}, - ngraph::Strides{1, 1}); - - auto input_low = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1}); - auto input_high = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {20}); - auto output_low = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {0}); - auto output_high = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {10}); - auto fake_quantize_op = std::make_shared(convolution_operation, - input_low, - input_high, - output_low, - output_high, - 11); - - auto max_pool_operation = std::make_shared(fake_quantize_op, - ngraph::Strides{1, 1}, - ngraph::Shape{1, 1}, - ngraph::Shape{1, 1}, - ngraph::Shape{1, 1}); - - auto result = std::make_shared(max_pool_operation); + std::make_shared(ngraph::element::f32, ngraph::Shape{1, 3, 64, 64}); + + auto weights = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{3, 3, 1, 1}, {1}); + auto bias = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{3, 1, 1}, {1}); + auto convolution_operation = std::make_shared(input_params_convolution, + weights, + ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 0}, + ngraph::CoordinateDiff{0, 0}, + ngraph::Strides{1, 1}); + + auto input_low = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1}); + auto input_high = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {20}); + auto output_low = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {0}); + auto output_high = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {10}); + auto fake_quantize_op = std::make_shared(convolution_operation, + input_low, + input_high, + output_low, + output_high, + 11); + + auto max_pool_operation = std::make_shared(fake_quantize_op, + ngraph::Strides{1, 1}, + ngraph::Shape{1, 1}, + ngraph::Shape{1, 1}, + ngraph::Shape{1, 1}); + + auto result = std::make_shared(max_pool_operation); func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params_convolution}); @@ -245,35 +242,35 @@ TEST(TransformationTests, ReorderActivationAndPoolingTestConvFqMp) { { auto input_params_convolution = - std::make_shared(ngraph::element::f32, ngraph::Shape{1, 3, 64, 64}); - - auto weights = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{3, 3, 1, 1}, {1}); - auto bias = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{3, 1, 1}, {1}); - auto convolution_operation = std::make_shared(input_params_convolution, - weights, - ngraph::Strides{1, 1}, - ngraph::CoordinateDiff{0, 0}, - ngraph::CoordinateDiff{0, 0}, - ngraph::Strides{1, 1}); - - auto max_pool_operation = std::make_shared(convolution_operation, - ngraph::Strides{1, 1}, - ngraph::Shape{1, 1}, - ngraph::Shape{1, 1}, - ngraph::Shape{1, 1}); - - auto input_low = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1}); - auto input_high = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {20}); - auto output_low = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {0}); - auto output_high = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {10}); - auto fake_quantize_op = std::make_shared(max_pool_operation, - input_low, - input_high, - output_low, - output_high, - 11); - - auto result = std::make_shared(fake_quantize_op); + std::make_shared(ngraph::element::f32, ngraph::Shape{1, 3, 64, 64}); + + auto weights = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{3, 3, 1, 1}, {1}); + auto bias = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{3, 1, 1}, {1}); + auto convolution_operation = std::make_shared(input_params_convolution, + weights, + ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 0}, + ngraph::CoordinateDiff{0, 0}, + ngraph::Strides{1, 1}); + + auto max_pool_operation = std::make_shared(convolution_operation, + ngraph::Strides{1, 1}, + ngraph::Shape{1, 1}, + ngraph::Shape{1, 1}, + ngraph::Shape{1, 1}); + + auto input_low = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1}); + auto input_high = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {20}); + auto output_low = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {0}); + auto output_high = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {10}); + auto fake_quantize_op = std::make_shared(max_pool_operation, + input_low, + input_high, + output_low, + output_high, + 11); + + auto result = std::make_shared(fake_quantize_op); reference_func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params_convolution}); } @@ -291,40 +288,40 @@ TEST(TransformationTests, ReorderActivationAndPoolingTestConvAddFqMp) { { auto input_params_convolution = - std::make_shared(ngraph::element::f32, ngraph::Shape{1, 3, 64, 64}); + std::make_shared(ngraph::element::f32, ngraph::Shape{1, 3, 64, 64}); auto input_params_add = - std::make_shared(ngraph::element::f32, ngraph::Shape{1, 3, 64, 64}); - - auto weights = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{3, 3, 1, 1}, {1}); - auto bias = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{3, 1, 1}, {1}); - auto convolution_operation = std::make_shared(input_params_convolution, - weights, - ngraph::Strides{1, 1}, - ngraph::CoordinateDiff{0, 0}, - ngraph::CoordinateDiff{0, 0}, - ngraph::Strides{1, 1}); - - auto add_operation = std::make_shared(convolution_operation, input_params_add); - - auto input_low = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1}); - auto input_high = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {20}); - auto output_low = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {0}); - auto output_high = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {10}); - auto fake_quantize_op = std::make_shared(add_operation, - input_low, - input_high, - output_low, - output_high, - 11); - - auto max_pool_operation = std::make_shared(fake_quantize_op, - ngraph::Strides{1, 1}, - ngraph::Shape{1, 1}, - ngraph::Shape{1, 1}, - ngraph::Shape{1, 1}); - - auto result = std::make_shared(max_pool_operation); + std::make_shared(ngraph::element::f32, ngraph::Shape{1, 3, 64, 64}); + + auto weights = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{3, 3, 1, 1}, {1}); + auto bias = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{3, 1, 1}, {1}); + auto convolution_operation = std::make_shared(input_params_convolution, + weights, + ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 0}, + ngraph::CoordinateDiff{0, 0}, + ngraph::Strides{1, 1}); + + auto add_operation = std::make_shared(convolution_operation, input_params_add); + + auto input_low = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1}); + auto input_high = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {20}); + auto output_low = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {0}); + auto output_high = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {10}); + auto fake_quantize_op = std::make_shared(add_operation, + input_low, + input_high, + output_low, + output_high, + 11); + + auto max_pool_operation = std::make_shared(fake_quantize_op, + ngraph::Strides{1, 1}, + ngraph::Shape{1, 1}, + ngraph::Shape{1, 1}, + ngraph::Shape{1, 1}); + + auto result = std::make_shared(max_pool_operation); func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params_convolution, input_params_add}); @@ -338,40 +335,40 @@ TEST(TransformationTests, ReorderActivationAndPoolingTestConvAddFqMp) { { auto input_params_convolution = - std::make_shared(ngraph::element::f32, ngraph::Shape{1, 3, 64, 64}); + std::make_shared(ngraph::element::f32, ngraph::Shape{1, 3, 64, 64}); auto input_params_add = - std::make_shared(ngraph::element::f32, ngraph::Shape{1, 3, 64, 64}); - - auto weights = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{3, 3, 1, 1}, {1}); - auto bias = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{3, 1, 1}, {1}); - auto convolution_operation = std::make_shared(input_params_convolution, - weights, - ngraph::Strides{1, 1}, - ngraph::CoordinateDiff{0, 0}, - ngraph::CoordinateDiff{0, 0}, - ngraph::Strides{1, 1}); - - auto add_operation = std::make_shared(convolution_operation, input_params_add); - - auto max_pool_operation = std::make_shared(add_operation, - ngraph::Strides{1, 1}, - ngraph::Shape{1, 1}, - ngraph::Shape{1, 1}, - ngraph::Shape{1, 1}); - - auto input_low = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1}); - auto input_high = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {20}); - auto output_low = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {0}); - auto output_high = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {10}); - auto fake_quantize_op = std::make_shared(max_pool_operation, - input_low, - input_high, - output_low, - output_high, - 11); - - auto result = std::make_shared(fake_quantize_op); + std::make_shared(ngraph::element::f32, ngraph::Shape{1, 3, 64, 64}); + + auto weights = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{3, 3, 1, 1}, {1}); + auto bias = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{3, 1, 1}, {1}); + auto convolution_operation = std::make_shared(input_params_convolution, + weights, + ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 0}, + ngraph::CoordinateDiff{0, 0}, + ngraph::Strides{1, 1}); + + auto add_operation = std::make_shared(convolution_operation, input_params_add); + + auto max_pool_operation = std::make_shared(add_operation, + ngraph::Strides{1, 1}, + ngraph::Shape{1, 1}, + ngraph::Shape{1, 1}, + ngraph::Shape{1, 1}); + + auto input_low = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1}); + auto input_high = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {20}); + auto output_low = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {0}); + auto output_high = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {10}); + auto fake_quantize_op = std::make_shared(max_pool_operation, + input_low, + input_high, + output_low, + output_high, + 11); + + auto result = std::make_shared(fake_quantize_op); reference_func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params_convolution, input_params_add}); diff --git a/src/plugins/intel_gna/tests/unit/transformations/gna_split_convolution_with_large_buffer_size.cpp b/src/plugins/intel_gna/tests/unit/transformations/gna_split_convolution_with_large_buffer_size.cpp index 863e42d21a45bd..fe8aeedb43f529 100644 --- a/src/plugins/intel_gna/tests/unit/transformations/gna_split_convolution_with_large_buffer_size.cpp +++ b/src/plugins/intel_gna/tests/unit/transformations/gna_split_convolution_with_large_buffer_size.cpp @@ -12,6 +12,7 @@ #include "backend/gna_limitations.hpp" #include "common/gna_target.hpp" #include "common_test_utils/ov_test_utils.hpp" +#include "openvino/opsets/opset7.hpp" #include "transformations/split_convolution_with_large_buffer_size.hpp" using namespace ov::intel_gna::limitations; @@ -23,12 +24,12 @@ namespace { struct Graph { std::shared_ptr createFunction(); - std::shared_ptr input_params; + std::shared_ptr input_params; ngraph::OutputVector output_nodes; }; std::shared_ptr Graph::createFunction() { - auto result = std::make_shared(output_nodes.front()); + auto result = std::make_shared(output_nodes.front()); return std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); } @@ -102,7 +103,7 @@ using CreateBaseDecoratorPtr = std::unique_ptr; Graph CreateBaseDecorator::build() { Graph graph; - graph.input_params = std::make_shared(ngraph::element::f32, input_data_shape_); + graph.input_params = std::make_shared(ngraph::element::f32, input_data_shape_); return graph; } @@ -120,14 +121,14 @@ class CreateConvolution : public CreateAppendableGraphDecorator { }; ngraph::Output CreateConvolution::createOutputNode(const ngraph::Output& parent_node) { - auto kernel = ngraph::opset7::Constant::create(ngraph::element::f32, kernel_shape_, {1}); - - return std::make_shared(parent_node, - kernel, - ngraph::Strides{1, 1}, - ngraph::CoordinateDiff{0, 0}, - ngraph::CoordinateDiff{0, 0}, - ngraph::Strides{1, 1}); + auto kernel = ov::op::v0::Constant::create(ngraph::element::f32, kernel_shape_, {1}); + + return std::make_shared(parent_node, + kernel, + ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 0}, + ngraph::CoordinateDiff{0, 0}, + ngraph::Strides{1, 1}); } // should be used only after CreateBaseDecorator @@ -142,21 +143,20 @@ class CreateSplittedConvolution : public CreateGraphDecorator { protected: void updateGraph(Graph& graph) override { auto split_node_c1 = - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape({1}), std::vector{3}); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape({1}), std::vector{3}); auto split_node_c2 = - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape({split_shape_.size()}), split_shape_); - auto split_node = - std::make_shared(graph.input_params, split_node_c1, split_node_c2); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape({split_shape_.size()}), split_shape_); + auto split_node = std::make_shared(graph.input_params, split_node_c1, split_node_c2); - auto kernel = ngraph::opset7::Constant::create(ngraph::element::f32, kernel_shape_, {1}); + auto kernel = ov::op::v0::Constant::create(ngraph::element::f32, kernel_shape_, {1}); for (int i = 0; i < split_shape_.size(); ++i) { - auto convolution_operation = std::make_shared(split_node->output(i), - kernel, - ngraph::Strides{1, 1}, - ngraph::CoordinateDiff{0, 0}, - ngraph::CoordinateDiff{0, 0}, - ngraph::Strides{1, 1}); + auto convolution_operation = std::make_shared(split_node->output(i), + kernel, + ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 0}, + ngraph::CoordinateDiff{0, 0}, + ngraph::Strides{1, 1}); graph.output_nodes.push_back(convolution_operation); } } @@ -175,8 +175,8 @@ class CreateAdd : public CreateAppendableGraphDecorator { }; ngraph::Output CreateAdd::createOutputNode(const ngraph::Output& parent_node) { - auto bias = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1}); - return std::make_shared(parent_node, bias); + auto bias = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1}); + return std::make_shared(parent_node, bias); } class CreateFakeQuantize : public CreateAppendableGraphDecorator { @@ -188,16 +188,11 @@ class CreateFakeQuantize : public CreateAppendableGraphDecorator { }; ngraph::Output CreateFakeQuantize::createOutputNode(const ngraph::Output& parent_node) { - auto input_low = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1}); - auto input_high = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {20}); - auto output_low = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {0}); - auto output_high = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {10}); - return std::make_shared(parent_node, - input_low, - input_high, - output_low, - output_high, - 11); + auto input_low = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1}); + auto input_high = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {20}); + auto output_low = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {0}); + auto output_high = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {10}); + return std::make_shared(parent_node, input_low, input_high, output_low, output_high, 11); } class CreateConcat : public CreateGraphDecorator { @@ -210,7 +205,7 @@ class CreateConcat : public CreateGraphDecorator { void CreateConcat::updateGraph(Graph& graph) { ngraph::OutputVector new_graph_output; - new_graph_output.emplace_back(std::make_shared(graph.output_nodes, 3)); + new_graph_output.emplace_back(std::make_shared(graph.output_nodes, 3)); graph.output_nodes.swap(new_graph_output); } diff --git a/src/plugins/intel_gna/tests/unit/transformations/gna_split_eltwise.cpp b/src/plugins/intel_gna/tests/unit/transformations/gna_split_eltwise.cpp index fd92a12cf89eb2..b8415d6fc9f727 100644 --- a/src/plugins/intel_gna/tests/unit/transformations/gna_split_eltwise.cpp +++ b/src/plugins/intel_gna/tests/unit/transformations/gna_split_eltwise.cpp @@ -7,7 +7,6 @@ #include #include #include -#include #include #include @@ -15,6 +14,7 @@ #include "common/gna_target.hpp" #include "common_test_utils/common_utils.hpp" #include "common_test_utils/ov_test_utils.hpp" +#include "openvino/opsets/opset9.hpp" #include "transformations/split_eltwise.hpp" using namespace ov::intel_gna::limitations; @@ -31,25 +31,25 @@ static std::shared_ptr createFunction(const ngraph::Shape& inp std::shared_ptr last_node, last_node0, last_node1; ngraph::ParameterVector parameters; - auto input0 = std::make_shared(ngraph::element::f32, input_shape); + auto input0 = std::make_shared(ngraph::element::f32, input_shape); parameters.push_back(input0); last_node0 = input0; std::shared_ptr input1; if (with_const) { - auto const_input = ngraph::opset9::Constant::create(ngraph::element::f32, input_shape, {1}); + auto const_input = ov::op::v0::Constant::create(ngraph::element::f32, input_shape, {1}); last_node1 = const_input; } else { - auto input1 = std::make_shared(ngraph::element::f32, input_shape); + auto input1 = std::make_shared(ngraph::element::f32, input_shape); last_node1 = input1; parameters.push_back(input1); } auto add_fake_quantize = [&](const std::shared_ptr& node) { - auto input_low = ngraph::opset9::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); - auto input_high = ngraph::opset9::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {5}); - auto output_low = ngraph::opset9::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {0}); - auto output_high = ngraph::opset9::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {10}); - return std::make_shared(node, input_low, input_high, output_low, output_high, 11); + auto input_low = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); + auto input_high = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {5}); + auto output_low = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {0}); + auto output_high = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {10}); + return std::make_shared(node, input_low, input_high, output_low, output_high, 11); }; if (with_fq) { @@ -61,33 +61,33 @@ static std::shared_ptr createFunction(const ngraph::Shape& inp if (split) { auto split_sizes_per_axis = ov::intel_gna::AlignedSplitSizesPerAxis(input_shape); - auto split0 = std::make_shared( + auto split0 = std::make_shared( last_node0, - ngraph::opset9::Constant::create(ngraph::element::i64, - ngraph::Shape({1}), - std::vector{split_sizes_per_axis.first}), - ngraph::opset9::Constant::create(ngraph::element::i64, - ngraph::Shape({split_sizes_per_axis.second.size()}), - split_sizes_per_axis.second)); - auto split1 = std::make_shared( + ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape({1}), + std::vector{split_sizes_per_axis.first}), + ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape({split_sizes_per_axis.second.size()}), + split_sizes_per_axis.second)); + auto split1 = std::make_shared( last_node1, - ngraph::opset9::Constant::create(ngraph::element::i64, - ngraph::Shape({1}), - std::vector{split_sizes_per_axis.first}), - ngraph::opset9::Constant::create(ngraph::element::i64, - ngraph::Shape({split_sizes_per_axis.second.size()}), - split_sizes_per_axis.second)); + ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape({1}), + std::vector{split_sizes_per_axis.first}), + ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape({split_sizes_per_axis.second.size()}), + split_sizes_per_axis.second)); ov::NodeVector concat_inputs; for (size_t i = 0; i < split_sizes_per_axis.second.size(); i++) { auto eltwise_node_part = std::make_shared(split0->output(i), split1->output(i), type); concat_inputs.push_back(eltwise_node_part); } - auto concat = std::make_shared(concat_inputs, split_sizes_per_axis.first); - auto result = std::make_shared(concat); + auto concat = std::make_shared(concat_inputs, split_sizes_per_axis.first); + auto result = std::make_shared(concat); return std::make_shared(ngraph::ResultVector{result}, parameters); } else { auto eltwise = std::make_shared(last_node0, last_node1, type); - auto result = std::make_shared(eltwise); + auto result = std::make_shared(eltwise); return std::make_shared(ngraph::ResultVector{result}, parameters); } } diff --git a/src/plugins/intel_gna/tests/unit/transformations/gna_substitute_softsign.cpp b/src/plugins/intel_gna/tests/unit/transformations/gna_substitute_softsign.cpp index 6cd0be8bc4cdf5..91c610d1478f1a 100644 --- a/src/plugins/intel_gna/tests/unit/transformations/gna_substitute_softsign.cpp +++ b/src/plugins/intel_gna/tests/unit/transformations/gna_substitute_softsign.cpp @@ -5,9 +5,8 @@ #include #include -#include -#include #include +#include #include #include "common_test_utils/ov_test_utils.hpp" @@ -17,7 +16,7 @@ namespace testing { namespace { std::shared_ptr createSoftSignFunction() { - auto input_params = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1, 1, 64}); + auto input_params = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1, 1, 64}); auto softsign = std::make_shared(input_params); @@ -32,18 +31,17 @@ TEST(TransformationTests, SubstituteSoftSignMulPower) { std::shared_ptr func(nullptr), reference_func(nullptr); { - auto input_params = - std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1, 1, 64}); + auto input_params = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1, 1, 64}); - auto abs = std::make_shared(input_params); + auto abs = std::make_shared(input_params); - auto const_1 = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{}, {1}); - auto const_neg_1 = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{}, {-1}); + auto const_1 = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{}, {1}); + auto const_neg_1 = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{}, {-1}); - auto add = std::make_shared(abs, const_1); - auto power = std::make_shared(add, const_neg_1); + auto add = std::make_shared(abs, const_1); + auto power = std::make_shared(add, const_neg_1); - auto mul = std::make_shared(power, input_params); + auto mul = std::make_shared(power, input_params); ngraph::ResultVector results{std::make_shared(mul)}; func = std::make_shared(ngraph::ResultVector{results}, ngraph::ParameterVector{input_params}); @@ -66,16 +64,15 @@ TEST(TransformationTests, SubstituteSoftSignDivide) { std::shared_ptr func(nullptr), reference_func(nullptr); { - auto input_params = - std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1, 1, 64}); + auto input_params = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1, 1, 64}); - auto abs = std::make_shared(input_params); + auto abs = std::make_shared(input_params); - auto const_1 = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{}, {1}); - auto add = std::make_shared(abs, const_1); + auto const_1 = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{}, {1}); + auto add = std::make_shared(abs, const_1); - auto divide = std::make_shared(input_params, add); - ngraph::ResultVector results{std::make_shared(divide)}; + auto divide = std::make_shared(input_params, add); + ngraph::ResultVector results{std::make_shared(divide)}; func = std::make_shared(ngraph::ResultVector{results}, ngraph::ParameterVector{input_params}); ngraph::pass::Manager m; @@ -97,18 +94,17 @@ TEST(TransformationTests, SubstituteSoftSignMulPowerInvalidAddConst) { std::shared_ptr func(nullptr), reference_func(nullptr); { - auto input_params = - std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1, 1, 64}); + auto input_params = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1, 1, 64}); - auto abs = std::make_shared(input_params); + auto abs = std::make_shared(input_params); - auto const_1 = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{}, {1.1}); - auto const_neg_1 = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{}, {-1}); + auto const_1 = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{}, {1.1}); + auto const_neg_1 = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{}, {-1}); - auto add = std::make_shared(abs, const_1); - auto power = std::make_shared(add, const_neg_1); + auto add = std::make_shared(abs, const_1); + auto power = std::make_shared(add, const_neg_1); - auto mul = std::make_shared(power, input_params); + auto mul = std::make_shared(power, input_params); ngraph::ResultVector results{std::make_shared(mul)}; func = std::make_shared(ngraph::ResultVector{results}, ngraph::ParameterVector{input_params}); @@ -131,18 +127,17 @@ TEST(TransformationTests, SubstituteSoftSignMulPowerInvalidPowerConst) { std::shared_ptr func(nullptr), reference_func(nullptr); { - auto input_params = - std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1, 1, 64}); + auto input_params = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1, 1, 64}); - auto abs = std::make_shared(input_params); + auto abs = std::make_shared(input_params); - auto const_1 = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{}, {1}); - auto const_neg_1 = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{}, {-1.1}); + auto const_1 = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{}, {1}); + auto const_neg_1 = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{}, {-1.1}); - auto add = std::make_shared(abs, const_1); - auto power = std::make_shared(add, const_neg_1); + auto add = std::make_shared(abs, const_1); + auto power = std::make_shared(add, const_neg_1); - auto mul = std::make_shared(power, input_params); + auto mul = std::make_shared(power, input_params); ngraph::ResultVector results{std::make_shared(mul)}; func = std::make_shared(ngraph::ResultVector{results}, ngraph::ParameterVector{input_params}); diff --git a/src/plugins/intel_gna/tests/unit/transformations/gna_swap_input_matmul.cpp b/src/plugins/intel_gna/tests/unit/transformations/gna_swap_input_matmul.cpp index 8118ac2948ca19..6f361e5f4c74e8 100644 --- a/src/plugins/intel_gna/tests/unit/transformations/gna_swap_input_matmul.cpp +++ b/src/plugins/intel_gna/tests/unit/transformations/gna_swap_input_matmul.cpp @@ -10,6 +10,7 @@ #include #include "common_test_utils/ov_test_utils.hpp" +#include "openvino/opsets/opset8.hpp" #include "transformations/swap_input_matmul_gna.hpp" namespace testing { @@ -24,30 +25,26 @@ static std::shared_ptr CreateMatMulFunction(const ngraph::Shap bool swappedInputs, bool needTranspose, bool expected = false) { - auto input_params = std::make_shared(ngraph::element::i64, input2_shape); + auto input_params = std::make_shared(ngraph::element::i64, input2_shape); std::shared_ptr input = input_params; if (input->get_output_shape(0).size() == 2 && needTranspose) { auto transpose_order = - ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{2}, std::vector{1, 0}); - input = std::make_shared(input, transpose_order); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{2}, std::vector{1, 0}); + input = std::make_shared(input, transpose_order); } - auto constant = ngraph::opset8::Constant::create(ngraph::element::i64, input1_shape, {1}); + auto constant = ov::op::v0::Constant::create(ngraph::element::i64, input1_shape, {1}); std::shared_ptr const_input = constant; if (withWeightsFq) { - auto input_low = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1}); - auto input_high = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {20}); - auto output_low = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {0}); - auto output_high = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {10}); - const_input = std::make_shared(const_input, - input_low, - input_high, - output_low, - output_high, - 11); + auto input_low = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1}); + auto input_high = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {20}); + auto output_low = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {0}); + auto output_high = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {10}); + const_input = + std::make_shared(const_input, input_low, input_high, output_low, output_high, 11); } - auto matmul = swappedInputs ? std::make_shared(input, const_input, false, needTranspose) - : std::make_shared(const_input, input, needTranspose, false); + auto matmul = swappedInputs ? std::make_shared(input, const_input, false, needTranspose) + : std::make_shared(const_input, input, needTranspose, false); std::shared_ptr final_node = matmul; if (withBias) { @@ -55,40 +52,36 @@ static std::shared_ptr CreateMatMulFunction(const ngraph::Shap if ((needTranspose && !expected || !needTranspose && expected) && bias_shape.size() > 1) { std::swap(shape[0], shape[1]); } - auto bias = ngraph::opset8::Constant::create(ngraph::element::i64, shape, {1}); + auto bias = ov::op::v0::Constant::create(ngraph::element::i64, shape, {1}); std::shared_ptr bias_node = bias; if (expected && bias_shape.size() > 1) { auto transpose_order = - ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{2}, std::vector{1, 0}); - bias_node = std::make_shared(bias_node, transpose_order); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{2}, std::vector{1, 0}); + bias_node = std::make_shared(bias_node, transpose_order); } - final_node = std::make_shared(matmul, bias_node); + final_node = std::make_shared(matmul, bias_node); } if (withOutFq) { - auto input_low = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1}); - auto input_high = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {20}); - auto output_low = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {0}); - auto output_high = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {10}); - final_node = std::make_shared(final_node, - input_low, - input_high, - output_low, - output_high, - 11); + auto input_low = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1}); + auto input_high = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {20}); + auto output_low = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {0}); + auto output_high = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {10}); + final_node = + std::make_shared(final_node, input_low, input_high, output_low, output_high, 11); } if (withAct) { - final_node = std::make_shared(final_node); + final_node = std::make_shared(final_node); } if (final_node->get_output_shape(0).size() == 2 && needTranspose) { auto transpose_order = - ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{2}, std::vector{1, 0}); - final_node = std::make_shared(final_node, transpose_order); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{2}, std::vector{1, 0}); + final_node = std::make_shared(final_node, transpose_order); } - auto result = std::make_shared(final_node); + auto result = std::make_shared(final_node); return std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); } diff --git a/src/plugins/intel_gna/tests/unit/transformations/gna_unfuse_reshape_and_transpose.cpp b/src/plugins/intel_gna/tests/unit/transformations/gna_unfuse_reshape_and_transpose.cpp index f0312aa24d40ce..8be223526e39c8 100644 --- a/src/plugins/intel_gna/tests/unit/transformations/gna_unfuse_reshape_and_transpose.cpp +++ b/src/plugins/intel_gna/tests/unit/transformations/gna_unfuse_reshape_and_transpose.cpp @@ -11,6 +11,7 @@ #include #include "common_test_utils/ov_test_utils.hpp" +#include "openvino/opsets/opset8.hpp" #include "transformations/unfuse_reshape_and_transpose.hpp" namespace testing { @@ -36,11 +37,11 @@ class ActivationFactory : public IActivationFactory { }; template <> -class ActivationFactory : public IActivationFactory { +class ActivationFactory : public IActivationFactory { public: ActivationFactory(const double min, const double max) : min_(min), max_(max) {} std::shared_ptr createNode(const ngraph::Output& operation_before) override { - return std::make_shared(operation_before, min_, max_); + return std::make_shared(operation_before, min_, max_); } private: @@ -70,32 +71,31 @@ static std::shared_ptr createFunction(const ngraph::Shape& con bool single_batch) { size_t total_in = std::accumulate(std::begin(conv_input_shape), std::end(conv_input_shape), 1, std::multiplies()); - auto input = std::make_shared(ngraph::element::f32, ngraph::Shape{1, total_in}); + auto input = std::make_shared(ngraph::element::f32, ngraph::Shape{1, total_in}); std::shared_ptr last_node, last_const; auto add_fake_quantize = [&](const std::shared_ptr& node) { - auto input_low = ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); - auto input_high = ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {5}); - auto output_low = ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {0}); - auto output_high = ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {10}); - return std::make_shared(node, input_low, input_high, output_low, output_high, 11); + auto input_low = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); + auto input_high = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {5}); + auto output_low = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {0}); + auto output_high = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {10}); + return std::make_shared(node, input_low, input_high, output_low, output_high, 11); }; if (single_reshape_before) { - auto reshape_in_const = - ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{4}, conv_input_shape); - auto reshape_in = std::make_shared(input, reshape_in_const, false); + auto reshape_in_const = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, conv_input_shape); + auto reshape_in = std::make_shared(input, reshape_in_const, false); last_node = reshape_in; } else { - auto reshape_in_const = ngraph::opset8::Constant::create( + auto reshape_in_const = ov::op::v0::Constant::create( ngraph::element::i64, ngraph::Shape{4}, ngraph::Shape{conv_input_shape[0], conv_input_shape[2], conv_input_shape[3], conv_input_shape[1]}); - auto reshape_in = std::make_shared(input, reshape_in_const, false); + auto reshape_in = std::make_shared(input, reshape_in_const, false); auto transpose_in_const = - ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{4}, ngraph::Shape{0, 3, 1, 2}); - auto transpose_in = std::make_shared(reshape_in, transpose_in_const); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, ngraph::Shape{0, 3, 1, 2}); + auto transpose_in = std::make_shared(reshape_in, transpose_in_const); last_node = transpose_in; } - auto conv_weights = ngraph::opset8::Constant::create(ngraph::element::f32, conv_filter_shape, {1}); + auto conv_weights = ov::op::v0::Constant::create(ngraph::element::f32, conv_filter_shape, {1}); last_const = conv_weights; if (with_fq) { auto conv_input_fq = add_fake_quantize(last_node); @@ -103,21 +103,20 @@ static std::shared_ptr createFunction(const ngraph::Shape& con auto conv_weights_fq = add_fake_quantize(conv_weights); last_const = conv_weights_fq; } - auto conv = std::make_shared(last_node, - last_const, - ngraph::Strides{1, 1}, - ngraph::CoordinateDiff{0, 0}, - ngraph::CoordinateDiff{0, 0}, - ngraph::Strides{1, 1}); + auto conv = std::make_shared(last_node, + last_const, + ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 0}, + ngraph::CoordinateDiff{0, 0}, + ngraph::Strides{1, 1}); last_node = conv; auto conv_output_shape = conv->get_output_shape(0); size_t total_out = std::accumulate(std::begin(conv_output_shape), std::end(conv_output_shape), 1, std::multiplies()); if (with_bias) { - auto add_const = ngraph::opset8::Constant::create(ngraph::element::f32, - ngraph::Shape{1, conv_output_shape.at(1), 1, 1}, - {1}); - auto add = std::make_shared(conv, add_const); + auto add_const = + ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1, conv_output_shape.at(1), 1, 1}, {1}); + auto add = std::make_shared(conv, add_const); last_node = add; } if (with_fq) { @@ -125,11 +124,11 @@ static std::shared_ptr createFunction(const ngraph::Shape& con last_node = conv_bias_fq; } if (with_pool) { - auto pool = std::make_shared(last_node, - ngraph::Strides{1, 1}, - ngraph::Shape{0, 0}, - ngraph::Shape{0, 0}, - ngraph::Shape{1, 1}); + auto pool = std::make_shared(last_node, + ngraph::Strides{1, 1}, + ngraph::Shape{0, 0}, + ngraph::Shape{0, 0}, + ngraph::Shape{1, 1}); last_node = pool; } if (activation_factory) { @@ -145,16 +144,16 @@ static std::shared_ptr createFunction(const ngraph::Shape& con } } auto out_shape = single_batch ? ngraph::Shape{1, total_out} : ngraph::Shape{total_out, 1}; - auto reshape_out_const = ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{2}, out_shape); + auto reshape_out_const = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{2}, out_shape); if (!single_reshape_after) { auto transpose_out_const = - ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{4}, ngraph::Shape{0, 2, 3, 1}); - auto transpose_out = std::make_shared(last_node, transpose_out_const); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, ngraph::Shape{0, 2, 3, 1}); + auto transpose_out = std::make_shared(last_node, transpose_out_const); last_node = transpose_out; } - auto reshape_out = std::make_shared(last_node, reshape_out_const, false); + auto reshape_out = std::make_shared(last_node, reshape_out_const, false); - auto result = std::make_shared(reshape_out); + auto result = std::make_shared(reshape_out); auto func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input}); return func; @@ -228,16 +227,15 @@ TEST_P(UnfuseReshapeAndTransposeTestSuiteFixture, CompareFunctions) { execute_test(function, reference_function); } -const std::vector activationFactories = { - nullptr, - createActivationFactory(), - createActivationFactory(), - createActivationFactory(), - createActivationFactory(), - createActivationFactory(), - createActivationFactory(), - createActivationFactory(), - createActivationFactory(0.1, 0.2)}; +const std::vector activationFactories = {nullptr, + createActivationFactory(), + createActivationFactory(), + createActivationFactory(), + createActivationFactory(), + createActivationFactory(), + createActivationFactory(), + createActivationFactory(), + createActivationFactory(0.1, 0.2)}; INSTANTIATE_TEST_SUITE_P( UnfuseReshapeAndTransposeTestSuite, diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp index 7f5402bfa5cc8e..07634731309133 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp @@ -23,7 +23,7 @@ std::shared_ptr getFunction1() { params.front()->set_friendly_name("Param_1"); params.front()->get_output_tensor(0).set_names({"input_tensor"}); - auto relu = std::make_shared(params[0]); + auto relu = std::make_shared(params[0]); relu->get_output_tensor(0).set_names({"relu"}); return std::make_shared(relu, params, "SimpleActivation"); diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_transformation.cpp index 80e61648cc86b7..ebeeb2a2d6017c 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_transformation.cpp @@ -77,7 +77,7 @@ const std::vector testValues = { {}, { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, {}, - std::make_shared(ov::element::u8, ov::Shape{1, 3, 16, 16}, std::vector(3 * 16 * 16, 1.0)), + std::make_shared(ov::element::u8, ov::Shape{1, 3, 16, 16}, std::vector(3 * 16 * 16, 1.0)), {}, { { ov::element::f16 }, diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/depth_to_space_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/depth_to_space_transformation.cpp index 8fc71d9ba38215..5449287a2dbb46 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/depth_to_space_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/depth_to_space_transformation.cpp @@ -9,7 +9,6 @@ using namespace LayerTestsDefinitions; using namespace InferenceEngine::details; -using namespace ngraph::opset1; namespace { const std::vector precisions = { @@ -17,9 +16,9 @@ const std::vector precisions = { ngraph::element::f16 }; -const std::vector modes = { - DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, - DepthToSpace::DepthToSpaceMode::DEPTH_FIRST +const std::vector modes = { + ov::op::v0::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, + ov::op::v0::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST }; const std::vector inputShapesBS2 = { diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/pad_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/pad_transformation.cpp index 8f6f43f4195264..52c847feb354d8 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/pad_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/pad_transformation.cpp @@ -27,11 +27,11 @@ const std::vector trasform namespace commonTestCases { -const std::vector padModes = { - ngraph::op::PadMode::CONSTANT, - ngraph::op::PadMode::EDGE, - ngraph::op::PadMode::REFLECT, - ngraph::op::PadMode::SYMMETRIC +const std::vector padModes = { + ov::op::PadMode::CONSTANT, + ov::op::PadMode::EDGE, + ov::op::PadMode::REFLECT, + ov::op::PadMode::SYMMETRIC }; const std::vector params = { @@ -92,7 +92,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_LPT, PadTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), ::testing::ValuesIn(inputShapes), - ::testing::Values(ngraph::op::PadMode::CONSTANT), + ::testing::Values(ov::op::PadMode::CONSTANT), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(params)), @@ -101,10 +101,10 @@ INSTANTIATE_TEST_SUITE_P(smoke_LPT, PadTransformation, namespace testCasesForOtherModes { -const std::vector modesWithoutConstant = { - ngraph::op::PadMode::EDGE, - ngraph::op::PadMode::REFLECT, - ngraph::op::PadMode::SYMMETRIC +const std::vector modesWithoutConstant = { + ov::op::PadMode::EDGE, + ov::op::PadMode::REFLECT, + ov::op::PadMode::SYMMETRIC }; const std::vector params = { diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/broadcast.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/broadcast.cpp index 3387375a0c07c1..5513123a7862cb 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/broadcast.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/broadcast.cpp @@ -96,7 +96,7 @@ INSTANTIATE_TEST_CASE_P(smoke_TestNumpyBroadcast2D, BroadcastLayerTest, ::testing::Combine(::testing::ValuesIn(targetShapesNumpy2D), ::testing::Values(ngraph::AxisSet{}), // not used in numpy mode - ::testing::Values(ngraph::op::BroadcastType::NUMPY), + ::testing::Values(ov::op::BroadcastType::NUMPY), ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_2d_static)), ::testing::ValuesIn(inputPrecisions), ::testing::Values(ov::test::utils::DEVICE_GPU)), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/convolution.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/convolution.cpp index 2f00b4e38e7090..85012f392611ab 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/convolution.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/convolution.cpp @@ -46,7 +46,7 @@ const auto conv2DParams_AutoPadValid = ::testing::Combine( ::testing::Values(std::vector({0, 0})), ::testing::ValuesIn(dilations), ::testing::ValuesIn(numOutChannels), - ::testing::Values(ngraph::op::PadType::VALID) + ::testing::Values(ov::op::PadType::VALID) ); INSTANTIATE_TEST_SUITE_P(smoke_Convolution2D_ExplicitPadding, ConvolutionLayerTest, diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/convolution_backprop_data.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/convolution_backprop_data.cpp index 1488f7cbf6358b..8fdfe1f197a9fd 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/convolution_backprop_data.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/convolution_backprop_data.cpp @@ -137,7 +137,7 @@ const auto conv3DParams_ExplicitPadding = ::testing::Combine( ::testing::ValuesIn(padEnds3D), ::testing::ValuesIn(dilations3D), ::testing::ValuesIn(numOutChannels), - ::testing::Values(ngraph::op::PadType::EXPLICIT), + ::testing::Values(ov::op::PadType::EXPLICIT), ::testing::ValuesIn(emptyOutputPadding) ); const auto conv3DParams_AutoPadValid = ::testing::Combine( @@ -147,7 +147,7 @@ const auto conv3DParams_AutoPadValid = ::testing::Combine( ::testing::Values(std::vector({0, 0, 0})), ::testing::ValuesIn(dilations3D), ::testing::ValuesIn(numOutChannels), - ::testing::Values(ngraph::op::PadType::VALID), + ::testing::Values(ov::op::PadType::VALID), ::testing::ValuesIn(emptyOutputPadding) ); diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/group_convolution.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/group_convolution.cpp index e643f5716e5696..cf57cdd081b164 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/group_convolution.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/group_convolution.cpp @@ -82,7 +82,7 @@ const auto groupConv2DParams_ExplicitPadding = ::testing::Combine( ::testing::ValuesIn(dilations), ::testing::ValuesIn(numOutChannels), ::testing::ValuesIn(numGroups), - ::testing::Values(ngraph::op::PadType::EXPLICIT) + ::testing::Values(ov::op::PadType::EXPLICIT) ); const auto groupConv2DParams_AutoPadValid = ::testing::Combine( ::testing::ValuesIn(kernels), @@ -92,7 +92,7 @@ const auto groupConv2DParams_AutoPadValid = ::testing::Combine( ::testing::ValuesIn(dilations), ::testing::ValuesIn(numOutChannels), ::testing::ValuesIn(numGroups), - ::testing::Values(ngraph::op::PadType::VALID) + ::testing::Values(ov::op::PadType::VALID) ); INSTANTIATE_TEST_SUITE_P(smoke_GroupConvolution2D_ExplicitPadding, GroupConvolutionLayerTest, @@ -128,7 +128,7 @@ const auto groupConv3DParams_ExplicitPadding = ::testing::Combine( ::testing::ValuesIn(dilations3d), ::testing::Values(4), ::testing::Values(2), - ::testing::Values(ngraph::op::PadType::EXPLICIT) + ::testing::Values(ov::op::PadType::EXPLICIT) ); const auto groupConv3DParams_AutoPadValid = ::testing::Combine( ::testing::ValuesIn(kernels3d), @@ -138,7 +138,7 @@ const auto groupConv3DParams_AutoPadValid = ::testing::Combine( ::testing::ValuesIn(dilations3d), ::testing::Values(4), ::testing::Values(2), - ::testing::Values(ngraph::op::PadType::VALID) + ::testing::Values(ov::op::PadType::VALID) ); INSTANTIATE_TEST_SUITE_P(smoke_GroupConvolution3D_ExplicitPadding, GroupConvolutionLayerTest, diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/gru_sequence.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/gru_sequence.cpp index 85f94d0b7d6e7c..b40264d2fa1eab 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/gru_sequence.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/gru_sequence.cpp @@ -27,9 +27,9 @@ namespace { std::vector linear_before_reset = {true, false}; std::vector clip{0.f}; std::vector clip_non_zeros{0.7f}; - std::vector direction = {ngraph::op::RecurrentSequenceDirection::FORWARD, - ngraph::op::RecurrentSequenceDirection::REVERSE, - ngraph::op::RecurrentSequenceDirection::BIDIRECTIONAL + std::vector direction = {ov::op::RecurrentSequenceDirection::FORWARD, + ov::op::RecurrentSequenceDirection::REVERSE, + ov::op::RecurrentSequenceDirection::BIDIRECTIONAL }; std::vector netPrecisions = {InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16}; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/interpolate.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/interpolate.cpp index 97d88b32360cd7..2bb3abc7e38ff1 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/interpolate.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/interpolate.cpp @@ -87,43 +87,43 @@ const std::vector> target5dShapes = { {1, 1, 4, 4, 4}, }; -const std::vector modesWithoutNearest = { - ngraph::op::v4::Interpolate::InterpolateMode::LINEAR, - ngraph::op::v4::Interpolate::InterpolateMode::CUBIC, - ngraph::op::v4::Interpolate::InterpolateMode::LINEAR_ONNX, +const std::vector modesWithoutNearest = { + ov::op::v4::Interpolate::InterpolateMode::LINEAR, + ov::op::v4::Interpolate::InterpolateMode::CUBIC, + ov::op::v4::Interpolate::InterpolateMode::LINEAR_ONNX, }; -const std::vector nearestMode = { - ngraph::op::v4::Interpolate::InterpolateMode::NEAREST, +const std::vector nearestMode = { + ov::op::v4::Interpolate::InterpolateMode::NEAREST, }; -const std::vector linearOnnxMode = { - ngraph::op::v4::Interpolate::InterpolateMode::LINEAR_ONNX, +const std::vector linearOnnxMode = { + ov::op::v4::Interpolate::InterpolateMode::LINEAR_ONNX, }; -const std::vector coordinateTransformModes = { - ngraph::op::v4::Interpolate::CoordinateTransformMode::TF_HALF_PIXEL_FOR_NN, - ngraph::op::v4::Interpolate::CoordinateTransformMode::PYTORCH_HALF_PIXEL, - ngraph::op::v4::Interpolate::CoordinateTransformMode::HALF_PIXEL, - ngraph::op::v4::Interpolate::CoordinateTransformMode::ASYMMETRIC, - ngraph::op::v4::Interpolate::CoordinateTransformMode::ALIGN_CORNERS, +const std::vector coordinateTransformModes = { + ov::op::v4::Interpolate::CoordinateTransformMode::TF_HALF_PIXEL_FOR_NN, + ov::op::v4::Interpolate::CoordinateTransformMode::PYTORCH_HALF_PIXEL, + ov::op::v4::Interpolate::CoordinateTransformMode::HALF_PIXEL, + ov::op::v4::Interpolate::CoordinateTransformMode::ASYMMETRIC, + ov::op::v4::Interpolate::CoordinateTransformMode::ALIGN_CORNERS, }; -const std::vector shapeCalculationMode = { - ngraph::op::v4::Interpolate::ShapeCalcMode::SIZES, - ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES, +const std::vector shapeCalculationMode = { + ov::op::v4::Interpolate::ShapeCalcMode::SIZES, + ov::op::v4::Interpolate::ShapeCalcMode::SCALES, }; -const std::vector nearestModes = { - ngraph::op::v4::Interpolate::NearestMode::SIMPLE, - ngraph::op::v4::Interpolate::NearestMode::ROUND_PREFER_FLOOR, - ngraph::op::v4::Interpolate::NearestMode::FLOOR, - ngraph::op::v4::Interpolate::NearestMode::CEIL, - ngraph::op::v4::Interpolate::NearestMode::ROUND_PREFER_CEIL, +const std::vector nearestModes = { + ov::op::v4::Interpolate::NearestMode::SIMPLE, + ov::op::v4::Interpolate::NearestMode::ROUND_PREFER_FLOOR, + ov::op::v4::Interpolate::NearestMode::FLOOR, + ov::op::v4::Interpolate::NearestMode::CEIL, + ov::op::v4::Interpolate::NearestMode::ROUND_PREFER_CEIL, }; -const std::vector defaultNearestMode = { - ngraph::op::v4::Interpolate::NearestMode::ROUND_PREFER_FLOOR, +const std::vector defaultNearestMode = { + ov::op::v4::Interpolate::NearestMode::ROUND_PREFER_FLOOR, }; const std::vector> pads = { @@ -308,9 +308,9 @@ INSTANTIATE_TEST_SUITE_P(smoke_Interpolate_11_Basic, Interpolate11LayerTest, ::t ::testing::Values(additional_config)), Interpolate11LayerTest::getTestCaseName); -const std::vector modesPillow = { - ngraph::op::v4::Interpolate::InterpolateMode::BILINEAR_PILLOW, - ngraph::op::v4::Interpolate::InterpolateMode::BICUBIC_PILLOW, +const std::vector modesPillow = { + ov::op::v4::Interpolate::InterpolateMode::BILINEAR_PILLOW, + ov::op::v4::Interpolate::InterpolateMode::BICUBIC_PILLOW, }; const std::vector pillowModePrecisions = { @@ -321,9 +321,9 @@ const std::vector pillowModePrecisions = { INSTANTIATE_TEST_SUITE_P(smoke_Interpolate_11_Pillow, Interpolate11LayerTest, ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(modesPillow), - ::testing::Values(ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES), - ::testing::Values(ngraph::op::v4::Interpolate::CoordinateTransformMode::TF_HALF_PIXEL_FOR_NN), - ::testing::Values(ngraph::op::v4::Interpolate::NearestMode::SIMPLE), + ::testing::Values(ov::op::v4::Interpolate::ShapeCalcMode::SCALES), + ::testing::Values(ov::op::v4::Interpolate::CoordinateTransformMode::TF_HALF_PIXEL_FOR_NN), + ::testing::Values(ov::op::v4::Interpolate::NearestMode::SIMPLE), ::testing::Values(false), ::testing::Values(std::vector{0, 0, 1, 1}), ::testing::Values(std::vector{0, 0, 1, 1}), @@ -345,9 +345,9 @@ INSTANTIATE_TEST_SUITE_P(smoke_Interpolate_11_Pillow, Interpolate11LayerTest, :: INSTANTIATE_TEST_SUITE_P(smoke_Interpolate_11_Pillow_Horizontal, Interpolate11LayerTest, ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(modesPillow), - ::testing::Values(ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES), - ::testing::Values(ngraph::op::v4::Interpolate::CoordinateTransformMode::TF_HALF_PIXEL_FOR_NN), - ::testing::Values(ngraph::op::v4::Interpolate::NearestMode::SIMPLE), + ::testing::Values(ov::op::v4::Interpolate::ShapeCalcMode::SCALES), + ::testing::Values(ov::op::v4::Interpolate::CoordinateTransformMode::TF_HALF_PIXEL_FOR_NN), + ::testing::Values(ov::op::v4::Interpolate::NearestMode::SIMPLE), ::testing::Values(false), ::testing::Values(std::vector{0, 0, 1, 1}), ::testing::Values(std::vector{0, 0, 1, 1}), @@ -369,9 +369,9 @@ INSTANTIATE_TEST_SUITE_P(smoke_Interpolate_11_Pillow_Horizontal, Interpolate11La INSTANTIATE_TEST_SUITE_P(smoke_Interpolate_11_Pillow_Vertical, Interpolate11LayerTest, ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(modesPillow), - ::testing::Values(ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES), - ::testing::Values(ngraph::op::v4::Interpolate::CoordinateTransformMode::TF_HALF_PIXEL_FOR_NN), - ::testing::Values(ngraph::op::v4::Interpolate::NearestMode::SIMPLE), + ::testing::Values(ov::op::v4::Interpolate::ShapeCalcMode::SCALES), + ::testing::Values(ov::op::v4::Interpolate::CoordinateTransformMode::TF_HALF_PIXEL_FOR_NN), + ::testing::Values(ov::op::v4::Interpolate::NearestMode::SIMPLE), ::testing::Values(false), ::testing::Values(std::vector{0, 0, 1, 1}), ::testing::Values(std::vector{0, 0, 1, 1}), @@ -393,9 +393,9 @@ INSTANTIATE_TEST_SUITE_P(smoke_Interpolate_11_Pillow_Vertical, Interpolate11Laye INSTANTIATE_TEST_SUITE_P(smoke_Interpolate_11_Pillow_Vertical_BF, Interpolate11LayerTest, ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(modesPillow), - ::testing::Values(ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES), - ::testing::Values(ngraph::op::v4::Interpolate::CoordinateTransformMode::TF_HALF_PIXEL_FOR_NN), - ::testing::Values(ngraph::op::v4::Interpolate::NearestMode::SIMPLE), + ::testing::Values(ov::op::v4::Interpolate::ShapeCalcMode::SCALES), + ::testing::Values(ov::op::v4::Interpolate::CoordinateTransformMode::TF_HALF_PIXEL_FOR_NN), + ::testing::Values(ov::op::v4::Interpolate::NearestMode::SIMPLE), ::testing::Values(false), ::testing::Values(std::vector{2, 1, 0, 0}), ::testing::Values(std::vector{2, 1, 0, 0}), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/lstm_sequence.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/lstm_sequence.cpp index bc9278fb203ee8..95021cf61038f4 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/lstm_sequence.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/lstm_sequence.cpp @@ -29,9 +29,9 @@ std::vector> activations = {{"relu", "sigmoid", "tanh"} std::vector> activations_smoke = {{"relu", "sigmoid", "tanh"}}; std::vector clip{0.f}; std::vector clip_non_zeros{0.7f}; -std::vector direction = {ngraph::op::RecurrentSequenceDirection::FORWARD, - ngraph::op::RecurrentSequenceDirection::REVERSE, - ngraph::op::RecurrentSequenceDirection::BIDIRECTIONAL +std::vector direction = {ov::op::RecurrentSequenceDirection::FORWARD, + ov::op::RecurrentSequenceDirection::REVERSE, + ov::op::RecurrentSequenceDirection::BIDIRECTIONAL }; std::vector netPrecisions = {InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16}; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/matrix_nms.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/matrix_nms.cpp index 89e855b5062335..7a040365fff045 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/matrix_nms.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/matrix_nms.cpp @@ -18,17 +18,17 @@ const std::vector> inStaticShapeParams = {{{3, 100, 4}, { const auto inputPrecisions = InputPrecisions{ov::element::f32, ov::element::i32, ov::element::f32}; -const std::vector sortResultType = {op::v8::MatrixNms::SortResultType::CLASSID, - op::v8::MatrixNms::SortResultType::SCORE, - op::v8::MatrixNms::SortResultType::NONE}; +const std::vector sortResultType = {ov::op::v8::MatrixNms::SortResultType::CLASSID, + ov::op::v8::MatrixNms::SortResultType::SCORE, + ov::op::v8::MatrixNms::SortResultType::NONE}; const std::vector outType = {element::i32, element::i64}; const std::vector topKParams = {TopKParams{-1, 5}, TopKParams{100, -1}}; const std::vector thresholdParams = {ThresholdParams{0.0f, 2.0f, 0.0f}, ThresholdParams{0.1f, 1.5f, 0.2f}}; const std::vector backgroudClass = {-1, 1}; const std::vector normalized = {true, false}; -const std::vector decayFunction = {op::v8::MatrixNms::DecayFunction::GAUSSIAN, - op::v8::MatrixNms::DecayFunction::LINEAR}; +const std::vector decayFunction = {ov::op::v8::MatrixNms::DecayFunction::GAUSSIAN, + ov::op::v8::MatrixNms::DecayFunction::LINEAR}; const std::vector outStaticShape = {true}; // only be true as gpu plugin not support nms with internal dynamic yet. diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/normalize_l2.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/normalize_l2.cpp index e8e90841be99bb..5a5369d3e9d80c 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/normalize_l2.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/normalize_l2.cpp @@ -18,9 +18,9 @@ const std::vector> axes = { }; const std::vector eps = {1e-7f, 1e-6f, 1e-5f, 1e-4f}; -const std::vector epsMode = { - ngraph::op::EpsMode::ADD, - ngraph::op::EpsMode::MAX, +const std::vector epsMode = { + ov::op::EpsMode::ADD, + ov::op::EpsMode::MAX, }; INSTANTIATE_TEST_SUITE_P(smoke_NormalizeL2, diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/pooling.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/pooling.cpp index 5e9614e6c674ff..af4cdeeb0f426a 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/pooling.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/pooling.cpp @@ -33,8 +33,8 @@ const std::vector> padBegins = {{0, 0}, {0, 2}}; const std::vector> padEnds = {{0, 0}, {0, 2}}; -const std::vector roundingTypes = {ngraph::op::RoundingType::CEIL, - ngraph::op::RoundingType::FLOOR}; +const std::vector roundingTypes = {ov::op::RoundingType::CEIL, + ov::op::RoundingType::FLOOR}; const std::vector indexElementTypes = {ngraph::element::Type_t::i32}; const std::vector axes = {0, 2}; const std::vector inputShapeSmall = {1, 3, 30, 30}; @@ -48,8 +48,8 @@ const auto maxPool_ExplicitPad_FloorRounding_Params = ::testing::Combine( ::testing::ValuesIn(strides), ::testing::ValuesIn(padBegins), ::testing::ValuesIn(padEnds), - ::testing::Values(ngraph::op::RoundingType::FLOOR), - ::testing::Values(ngraph::op::PadType::EXPLICIT), + ::testing::Values(ov::op::RoundingType::FLOOR), + ::testing::Values(ov::op::PadType::EXPLICIT), ::testing::Values(false) // placeholder value - exclude pad not applicable for max pooling ); @@ -73,8 +73,8 @@ const auto maxPool_ExplicitPad_CeilRounding_Params = ::testing::Combine( ::testing::Values(std::vector({1, 1})), ::testing::ValuesIn(padBegins), ::testing::ValuesIn(padEnds), - ::testing::Values(ngraph::op::RoundingType::CEIL), - ::testing::Values(ngraph::op::PadType::EXPLICIT), + ::testing::Values(ov::op::RoundingType::CEIL), + ::testing::Values(ov::op::PadType::EXPLICIT), ::testing::Values(false) // placeholder value - exclude pad not applicable for max pooling ); @@ -100,8 +100,8 @@ const auto avgPoolExplicitPadCeilRoundingParams = ::testing::Combine( ::testing::Values(std::vector({1, 1})), ::testing::ValuesIn(padBegins), ::testing::ValuesIn(padEnds), - ::testing::Values(ngraph::op::RoundingType::CEIL), - ::testing::Values(ngraph::op::PadType::EXPLICIT), + ::testing::Values(ov::op::RoundingType::CEIL), + ::testing::Values(ov::op::PadType::EXPLICIT), ::testing::Values(true, false) ); @@ -124,8 +124,8 @@ const auto avgPoolExplicitPadFloorRoundingParams = ::testing::Combine( ::testing::ValuesIn(strides), ::testing::ValuesIn(padBegins), ::testing::ValuesIn(padEnds), - ::testing::Values(ngraph::op::RoundingType::FLOOR), - ::testing::Values(ngraph::op::PadType::EXPLICIT), + ::testing::Values(ov::op::RoundingType::FLOOR), + ::testing::Values(ov::op::PadType::EXPLICIT), ::testing::Values(true, false) ); @@ -150,9 +150,9 @@ const auto allPools_ValidPad_Params = ::testing::Combine( ::testing::ValuesIn(strides), ::testing::Values(std::vector({0, 0})), ::testing::ValuesIn(padEnds), - ::testing::Values(ngraph::op::RoundingType::FLOOR), // placeholder value - Rounding Type not applicable for Valid pad type + ::testing::Values(ov::op::RoundingType::FLOOR), // placeholder value - Rounding Type not applicable for Valid pad type // TODO: PadType::VALID seems not to ignore padBegins - ::testing::Values(ngraph::op::PadType::VALID), + ::testing::Values(ov::op::PadType::VALID), ::testing::Values(false) // placeholder value - exclude pad not applicable for max pooling ); @@ -180,8 +180,8 @@ const auto maxPool8_ExplicitPad_FloorRounding_Params = ::testing::Combine( ::testing::ValuesIn(padEnds), ::testing::ValuesIn(indexElementTypes), ::testing::ValuesIn(axes), - ::testing::Values(ngraph::op::RoundingType::FLOOR), - ::testing::Values(ngraph::op::PadType::EXPLICIT) + ::testing::Values(ov::op::RoundingType::FLOOR), + ::testing::Values(ov::op::PadType::EXPLICIT) ); INSTANTIATE_TEST_SUITE_P(smoke_MaxPool8_ExplicitPad_FloorRounding, MaxPoolingV8LayerTest, @@ -205,8 +205,8 @@ const auto maxPool8_ExplicitPad_CeilRounding_Params = ::testing::Combine( ::testing::ValuesIn(padEnds), ::testing::ValuesIn(indexElementTypes), ::testing::ValuesIn(axes), - ::testing::Values(ngraph::op::RoundingType::CEIL), - ::testing::Values(ngraph::op::PadType::EXPLICIT) + ::testing::Values(ov::op::RoundingType::CEIL), + ::testing::Values(ov::op::PadType::EXPLICIT) ); INSTANTIATE_TEST_SUITE_P(smoke_MaxPool8_ExplicitPad_CeilRounding, MaxPoolingV8LayerTest, diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/rnn_sequence.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/rnn_sequence.cpp index d789bf3e57a14f..f1ba2a1eaf2df3 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/rnn_sequence.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/rnn_sequence.cpp @@ -25,9 +25,9 @@ std::vector input_size{10}; std::vector> activations = {{"relu"}, {"sigmoid"}, {"tanh"}}; std::vector clip{0.f}; std::vector clip_non_zeros{0.7f}; -std::vector direction = {ngraph::op::RecurrentSequenceDirection::FORWARD, - ngraph::op::RecurrentSequenceDirection::REVERSE, - ngraph::op::RecurrentSequenceDirection::BIDIRECTIONAL, +std::vector direction = {ov::op::RecurrentSequenceDirection::FORWARD, + ov::op::RecurrentSequenceDirection::REVERSE, + ov::op::RecurrentSequenceDirection::BIDIRECTIONAL, }; std::vector netPrecisions = {InferenceEngine::Precision::FP32}; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/scatter_elements_update.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/scatter_elements_update.cpp index 952cb4c82dc0f6..ba6c4b7e0d773a 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/scatter_elements_update.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/scatter_elements_update.cpp @@ -9,7 +9,6 @@ #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; -using namespace ngraph::opset3; namespace { // map> diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/quantized_convolution_backprop_data.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/quantized_convolution_backprop_data.cpp index 373e9eabd16934..83ea688c2eb3c8 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/quantized_convolution_backprop_data.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/quantized_convolution_backprop_data.cpp @@ -34,7 +34,7 @@ const auto quantConvBackpropData2DParams = ::testing::Combine( ::testing::ValuesIn(padEnds2D), ::testing::ValuesIn(dilations2D), ::testing::ValuesIn(numOutChannels), - ::testing::Values(ngraph::op::PadType::AUTO), + ::testing::Values(ov::op::PadType::AUTO), ::testing::ValuesIn(levels), ::testing::ValuesIn(granularity) ); @@ -62,7 +62,7 @@ const auto quantConvBackpropData3DParams = ::testing::Combine( ::testing::ValuesIn(padEnds3D), ::testing::ValuesIn(dilations3D), ::testing::ValuesIn(numOutChannels), - ::testing::Values(ngraph::op::PadType::AUTO), + ::testing::Values(ov::op::PadType::AUTO), ::testing::ValuesIn(levels), ::testing::ValuesIn(granularity) ); diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/quantized_group_convolution_backprop_data.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/quantized_group_convolution_backprop_data.cpp index e06ae6690d427d..0e851a36c51430 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/quantized_group_convolution_backprop_data.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/quantized_group_convolution_backprop_data.cpp @@ -36,7 +36,7 @@ const auto quantGroupConvBackpropData2DParams = ::testing::Combine( ::testing::ValuesIn(dilations2D), ::testing::ValuesIn(numOutChannels), ::testing::ValuesIn(numGroups), - ::testing::Values(ngraph::op::PadType::AUTO), + ::testing::Values(ov::op::PadType::AUTO), ::testing::ValuesIn(levels), ::testing::ValuesIn(granularity) ); @@ -65,7 +65,7 @@ const auto quantGroupConvBackpropData3DParams = ::testing::Combine( ::testing::ValuesIn(dilations3D), ::testing::ValuesIn(numOutChannels), ::testing::ValuesIn(numGroups), - ::testing::Values(ngraph::op::PadType::AUTO), + ::testing::Values(ov::op::PadType::AUTO), ::testing::ValuesIn(levels), ::testing::ValuesIn(granularity) ); diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/depth_to_space.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/depth_to_space.cpp index cd271734d817ae..b291aeaff1c57c 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/depth_to_space.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/depth_to_space.cpp @@ -39,10 +39,10 @@ class DepthToSpaceLayerGPUTest : public testing::WithParamInterfaceGetParam(); @@ -86,9 +86,9 @@ const std::vector input_types = { ov::element::i8 }; -const std::vector depthToSpaceModes = { - DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, - DepthToSpace::DepthToSpaceMode::DEPTH_FIRST +const std::vector depthToSpaceModes = { + ov::op::v0::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, + ov::op::v0::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST }; // ======================== Static Shapes Tests ======================== diff --git a/src/tests/functional/plugin/shared/include/behavior/infer_request/io_blob.hpp b/src/tests/functional/plugin/shared/include/behavior/infer_request/io_blob.hpp index 6487c8cb6d4b5a..ed664576d410c7 100644 --- a/src/tests/functional/plugin/shared/include/behavior/infer_request/io_blob.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/infer_request/io_blob.hpp @@ -345,11 +345,11 @@ TEST_P(InferRequestIOBBlobTest, canReallocateExternalBlobViaGet) { { ngraph::PartialShape shape({1, 3, 10, 10}); ngraph::element::Type type(ngraph::element::Type_t::f32); - auto param = std::make_shared(type, shape); + auto param = std::make_shared(type, shape); param->set_friendly_name("param"); - auto relu = std::make_shared(param); + auto relu = std::make_shared(param); relu->set_friendly_name("relu"); - auto result = std::make_shared(relu); + auto result = std::make_shared(relu); result->set_friendly_name("result"); ngraph::ParameterVector params = {param}; diff --git a/src/tests/functional/plugin/shared/include/behavior/plugin/core_integration.hpp b/src/tests/functional/plugin/shared/include/behavior/plugin/core_integration.hpp index 66aa7ea007837d..192dc5da09491a 100644 --- a/src/tests/functional/plugin/shared/include/behavior/plugin/core_integration.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/plugin/core_integration.hpp @@ -450,23 +450,23 @@ TEST_P(IEClassNetworkTestP, SetAffinityWithConstantBranches) { { ngraph::PartialShape shape({1, 84}); ngraph::element::Type type(ngraph::element::Type_t::f32); - auto param = std::make_shared(type, shape); + auto param = std::make_shared(type, shape); auto matMulWeights = - ngraph::opset6::Constant::create(ngraph::element::Type_t::f32, {10, 84}, {1}); - auto shapeOf = std::make_shared(matMulWeights); - auto gConst1 = ngraph::opset6::Constant::create(ngraph::element::Type_t::i32, {1}, {1}); - auto gConst2 = ngraph::opset6::Constant::create(ngraph::element::Type_t::i64, {}, {0}); - auto gather = std::make_shared(shapeOf, gConst1, gConst2); - auto concatConst = ngraph::opset6::Constant::create(ngraph::element::Type_t::i64, {1}, {1}); + ov::op::v0::Constant::create(ngraph::element::Type_t::f32, {10, 84}, {1}); + auto shapeOf = std::make_shared(matMulWeights); + auto gConst1 = ov::op::v0::Constant::create(ngraph::element::Type_t::i32, {1}, {1}); + auto gConst2 = ov::op::v0::Constant::create(ngraph::element::Type_t::i64, {}, {0}); + auto gather = std::make_shared(shapeOf, gConst1, gConst2); + auto concatConst = ov::op::v0::Constant::create(ngraph::element::Type_t::i64, {1}, {1}); auto concat = - std::make_shared(ngraph::NodeVector{concatConst, gather}, 0); - auto relu = std::make_shared(param); - auto reshape = std::make_shared(relu, concat, false); - auto matMul = std::make_shared(reshape, matMulWeights, false, true); + std::make_shared(ngraph::NodeVector{concatConst, gather}, 0); + auto relu = std::make_shared(param); + auto reshape = std::make_shared(relu, concat, false); + auto matMul = std::make_shared(reshape, matMulWeights, false, true); auto matMulBias = - ngraph::opset6::Constant::create(ngraph::element::Type_t::f32, {1, 10}, {1}); - auto addBias = std::make_shared(matMul, matMulBias); - auto result = std::make_shared(addBias); + ov::op::v0::Constant::create(ngraph::element::Type_t::f32, {1, 10}, {1}); + auto addBias = std::make_shared(matMul, matMulBias); + auto result = std::make_shared(addBias); ngraph::ParameterVector params = {param}; ngraph::ResultVector results = {result}; diff --git a/src/tests/functional/plugin/shared/include/behavior/plugin/preprocessing.hpp b/src/tests/functional/plugin/shared/include/behavior/plugin/preprocessing.hpp index aaf35c28aaebbc..faee036bb07bef 100644 --- a/src/tests/functional/plugin/shared/include/behavior/plugin/preprocessing.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/plugin/preprocessing.hpp @@ -88,14 +88,14 @@ struct PreprocessingPrecisionConvertTest : auto in_prec = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(with_extra_conv ? inPrc : decltype(inPrc)(InferenceEngine::Precision::FP32)); ov::ParameterVector paramsIn {std::make_shared(in_prec, ov::Shape(inputShape))}; - auto toF32 = std::make_shared(paramsIn[0], ngraph::element::Type_t::f32); + auto toF32 = std::make_shared(paramsIn[0], ngraph::element::Type_t::f32); - auto constNode = std::make_shared( + auto constNode = std::make_shared( ngraph::element::Type_t::i64, ngraph::Shape{inputShape.size()}, inputShape); std::shared_ptr reshape_input = with_extra_conv ? toF32->shared_from_this() : paramsIn[0]; - auto reshape = std::dynamic_pointer_cast( - std::make_shared(reshape_input, constNode, specialZero)); - ngraph::ResultVector results{std::make_shared(reshape)}; + auto reshape = std::dynamic_pointer_cast( + std::make_shared(reshape_input, constNode, specialZero)); + ngraph::ResultVector results{std::make_shared(reshape)}; return std::make_shared(results, paramsIn, "Reshape"); }; diff --git a/src/tests/functional/plugin/shared/include/behavior/plugin/set_preprocess.hpp b/src/tests/functional/plugin/shared/include/behavior/plugin/set_preprocess.hpp index e2e7b12bb3eba9..92d9c67231c762 100644 --- a/src/tests/functional/plugin/shared/include/behavior/plugin/set_preprocess.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/plugin/set_preprocess.hpp @@ -64,11 +64,11 @@ TEST_P(InferRequestPreprocessTest, SetMeanImagePreProcessGetBlob) { { ngraph::PartialShape shape({1, 3, 10, 10}); ngraph::element::Type type(ngraph::element::Type_t::f32); - auto param = std::make_shared(type, shape); + auto param = std::make_shared(type, shape); param->set_friendly_name("param"); - auto relu = std::make_shared(param); + auto relu = std::make_shared(param); relu->set_friendly_name("relu"); - auto result = std::make_shared(relu); + auto result = std::make_shared(relu); result->set_friendly_name("result"); ngraph::ParameterVector params = {param}; @@ -130,11 +130,11 @@ TEST_P(InferRequestPreprocessTest, SetMeanImagePreProcessSetBlob) { { ngraph::PartialShape shape({1, 3, 10, 10}); ngraph::element::Type type(ngraph::element::Type_t::f32); - auto param = std::make_shared(type, shape); + auto param = std::make_shared(type, shape); param->set_friendly_name("param"); - auto relu = std::make_shared(param); + auto relu = std::make_shared(param); relu->set_friendly_name("relu"); - auto result = std::make_shared(relu); + auto result = std::make_shared(relu); result->set_friendly_name("result"); ngraph::ParameterVector params = {param}; @@ -199,11 +199,11 @@ TEST_P(InferRequestPreprocessTest, SetMeanValuePreProcessGetBlob) { { ngraph::PartialShape shape({1, 3, 10, 10}); ngraph::element::Type type(ngraph::element::Type_t::f32); - auto param = std::make_shared(type, shape); + auto param = std::make_shared(type, shape); param->set_friendly_name("param"); - auto relu = std::make_shared(param); + auto relu = std::make_shared(param); relu->set_friendly_name("relu"); - auto result = std::make_shared(relu); + auto result = std::make_shared(relu); result->set_friendly_name("result"); ngraph::ParameterVector params = {param}; @@ -259,11 +259,11 @@ TEST_P(InferRequestPreprocessTest, SetMeanValuePreProcessSetBlob) { { ngraph::PartialShape shape({1, 3, 10, 10}); ngraph::element::Type type(ngraph::element::Type_t::f32); - auto param = std::make_shared(type, shape); + auto param = std::make_shared(type, shape); param->set_friendly_name("param"); - auto relu = std::make_shared(param); + auto relu = std::make_shared(param); relu->set_friendly_name("relu"); - auto result = std::make_shared(relu); + auto result = std::make_shared(relu); result->set_friendly_name("result"); ngraph::ParameterVector params = {param}; @@ -321,11 +321,11 @@ TEST_P(InferRequestPreprocessTest, ReverseInputChannelsPreProcessGetBlob) { { ngraph::PartialShape shape({1, 3, 10, 10}); ngraph::element::Type type(ngraph::element::Type_t::f32); - auto param = std::make_shared(type, shape); + auto param = std::make_shared(type, shape); param->set_friendly_name("param"); - auto relu = std::make_shared(param); + auto relu = std::make_shared(param); relu->set_friendly_name("relu"); - auto result = std::make_shared(relu); + auto result = std::make_shared(relu); result->set_friendly_name("result"); ngraph::ParameterVector params = {param}; @@ -382,11 +382,11 @@ TEST_P(InferRequestPreprocessTest, ReverseInputChannelsPreProcessSetBlob) { { ngraph::PartialShape shape({1, 3, 10, 10}); ngraph::element::Type type(ngraph::element::Type_t::f32); - auto param = std::make_shared(type, shape); + auto param = std::make_shared(type, shape); param->set_friendly_name("param"); - auto relu = std::make_shared(param); + auto relu = std::make_shared(param); relu->set_friendly_name("relu"); - auto result = std::make_shared(relu); + auto result = std::make_shared(relu); result->set_friendly_name("result"); ngraph::ParameterVector params = {param}; @@ -446,11 +446,11 @@ TEST_P(InferRequestPreprocessTest, SetScalePreProcessGetBlob) { { ngraph::PartialShape shape({1, 3, 10, 10}); ngraph::element::Type type(ngraph::element::Type_t::f32); - auto param = std::make_shared(type, shape); + auto param = std::make_shared(type, shape); param->set_friendly_name("param"); - auto relu = std::make_shared(param); + auto relu = std::make_shared(param); relu->set_friendly_name("relu"); - auto result = std::make_shared(relu); + auto result = std::make_shared(relu); result->set_friendly_name("result"); ngraph::ParameterVector params = {param}; @@ -506,11 +506,11 @@ TEST_P(InferRequestPreprocessTest, SetScalePreProcessSetBlob) { { ngraph::PartialShape shape({1, 3, 10, 10}); ngraph::element::Type type(ngraph::element::Type_t::f32); - auto param = std::make_shared(type, shape); + auto param = std::make_shared(type, shape); param->set_friendly_name("param"); - auto relu = std::make_shared(param); + auto relu = std::make_shared(param); relu->set_friendly_name("relu"); - auto result = std::make_shared(relu); + auto result = std::make_shared(relu); result->set_friendly_name("result"); ngraph::ParameterVector params = {param}; @@ -656,11 +656,11 @@ TEST_P(InferRequestPreprocessConversionTest, Infer) { { ngraph::PartialShape shape({batch, channels, shape_size, shape_size}); ngraph::element::Type type(InferenceEngine::details::convertPrecision(netPrecision)); - auto param = std::make_shared(type, shape); + auto param = std::make_shared(type, shape); param->set_friendly_name("param"); - auto relu = std::make_shared(param); + auto relu = std::make_shared(param); relu->set_friendly_name("relu"); - auto result = std::make_shared(relu); + auto result = std::make_shared(relu); result->set_friendly_name("result"); ngraph::ParameterVector params = {param}; @@ -850,11 +850,11 @@ TEST_P(InferRequestPreprocessDynamicallyInSetBlobTest, Infer) { { ngraph::PartialShape shape({batch, channels, shape_size, shape_size}); ngraph::element::Type type(InferenceEngine::details::convertPrecision(netPrecision)); - auto param = std::make_shared(type, shape); + auto param = std::make_shared(type, shape); param->set_friendly_name("param"); - auto relu = std::make_shared(param); + auto relu = std::make_shared(param); relu->set_friendly_name("relu"); - auto result = std::make_shared(relu); + auto result = std::make_shared(relu); result->set_friendly_name("result"); ngraph::ParameterVector params = {param}; @@ -982,11 +982,11 @@ TEST_P(InferRequestPreprocessTest, InferWithRGB2BGRConversion) { { ngraph::PartialShape shape({batch, channels, shape_size, shape_size}); ngraph::element::Type type(InferenceEngine::details::convertPrecision(netPrecision)); - auto param = std::make_shared(type, shape); + auto param = std::make_shared(type, shape); param->set_friendly_name("param"); - auto relu = std::make_shared(param); + auto relu = std::make_shared(param); relu->set_friendly_name("relu"); - auto result = std::make_shared(relu); + auto result = std::make_shared(relu); result->set_friendly_name("result"); ngraph::ParameterVector params = {param}; diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/concat_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/concat_transformation.hpp index 48c3ba7d2b8353..da93245b4fe0a4 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/concat_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/concat_transformation.hpp @@ -15,10 +15,10 @@ namespace LayerTestsDefinitions { class ConcatTransformationTestValues { public: - std::shared_ptr input_constant1; + std::shared_ptr input_constant1; ngraph::builder::subgraph::FakeQuantizeOnData fqOnData1; ngraph::builder::subgraph::DequantizationOperations dequantization1; - std::shared_ptr input_constant2; + std::shared_ptr input_constant2; ngraph::builder::subgraph::FakeQuantizeOnData fqOnData2; ngraph::builder::subgraph::DequantizationOperations dequantization2; }; diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/depth_to_space_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/depth_to_space_transformation.hpp index 2b96b2e57206d3..45b1947bb37c57 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/depth_to_space_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/depth_to_space_transformation.hpp @@ -15,7 +15,7 @@ typedef std::tuple< ngraph::element::Type, ngraph::PartialShape, std::string, - ngraph::opset1::DepthToSpace::DepthToSpaceMode, + ov::op::v0::DepthToSpace::DepthToSpaceMode, size_t> DepthToSpaceTransformationParams; class DepthToSpaceTransformation : diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/pad_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/pad_transformation.hpp index bfdf719f47224b..17fb3a7663a385 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/pad_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/pad_transformation.hpp @@ -21,7 +21,7 @@ class PadTransformationParam { typedef std::tuple< ngraph::element::Type, ngraph::PartialShape, - ngraph::op::PadMode, + ov::op::PadMode, std::string, ov::pass::low_precision::LayerTransformation::Params, PadTransformationParam diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/loop.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/loop.hpp index fab554125229db..a7d05848121acd 100644 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/loop.hpp +++ b/src/tests/functional/plugin/shared/include/single_layer_tests/loop.hpp @@ -47,19 +47,19 @@ TEST_P(TrivialLoopTest, PassThroughBody) { const auto shape = ngraph::Shape{ieShape}; const auto scalarShape = ngraph::Shape{}; - auto start = std::make_shared(prc, shape); - auto count = std::make_shared(ngraph::element::i64, scalarShape, 5); - auto icond = std::make_shared(ngraph::element::boolean, scalarShape, true); + auto start = std::make_shared(prc, shape); + auto count = std::make_shared(ngraph::element::i64, scalarShape, 5); + auto icond = std::make_shared(ngraph::element::boolean, scalarShape, true); // Loop body - auto b_data = std::make_shared(prc, shape); - auto b_cond = std::make_shared(ngraph::element::boolean, scalarShape); + auto b_data = std::make_shared(prc, shape); + auto b_cond = std::make_shared(ngraph::element::boolean, scalarShape); auto body = std::make_shared( ngraph::OutputVector {b_cond, b_data}, // | passthrough body, no data changes ngraph::ParameterVector {b_cond, b_data}); // | input -> output - auto loop = std::make_shared(count, icond); + auto loop = std::make_shared(count, icond); loop->set_function(body); loop->set_special_body_ports({-1, 0}); loop->set_invariant_input(b_cond, icond); @@ -91,20 +91,20 @@ TEST_P(TrivialLoopTest, UnusedInputBody) { const auto shape = ngraph::Shape{ieShape}; const auto scalarShape = ngraph::Shape{}; - auto start = std::make_shared(prc, shape); - auto count = std::make_shared(ngraph::element::i64, scalarShape, 5); - auto icond = std::make_shared(ngraph::element::boolean, scalarShape, true); + auto start = std::make_shared(prc, shape); + auto count = std::make_shared(ngraph::element::i64, scalarShape, 5); + auto icond = std::make_shared(ngraph::element::boolean, scalarShape, true); // Loop body - auto b_data = std::make_shared(prc, shape); - auto b_cond = std::make_shared(ngraph::element::boolean, scalarShape, true); - auto b_iter = std::make_shared(ngraph::element::i64, scalarShape); + auto b_data = std::make_shared(prc, shape); + auto b_cond = std::make_shared(ngraph::element::boolean, scalarShape, true); + auto b_iter = std::make_shared(ngraph::element::i64, scalarShape); auto body = std::make_shared( ngraph::OutputVector {b_cond, b_data}, ngraph::ParameterVector {b_data, b_iter}); - auto loop = std::make_shared(count, icond); + auto loop = std::make_shared(count, icond); loop->set_function(body); loop->set_special_body_ports({1, 0}); loop->set_invariant_input(b_data, start); diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/basic_lstm.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/basic_lstm.hpp index cabf234085ee79..f75cb259fced23 100644 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/basic_lstm.hpp +++ b/src/tests/functional/plugin/shared/include/subgraph_tests/basic_lstm.hpp @@ -32,7 +32,7 @@ TEST_P(Basic_LSTM_S, CompareWithRefImpl_LowLatencyTransformation) { // todo: it is better to modify the model -> use ShapeOf() and Gather() std::vector outFormShapes1 = { 1, 1, third_dim }; - auto pattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{3}, outFormShapes1); + auto pattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{3}, outFormShapes1); auto param_target_inputs = function->get_parameters().at(0)->output(0).get_target_inputs(); // replace hardcoded shape diff --git a/src/tests/functional/plugin/shared/src/behavior/executable_network/exec_graph_info.cpp b/src/tests/functional/plugin/shared/src/behavior/executable_network/exec_graph_info.cpp index f4466c80ea9208..38072636f3d370 100644 --- a/src/tests/functional/plugin/shared/src/behavior/executable_network/exec_graph_info.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/executable_network/exec_graph_info.cpp @@ -466,9 +466,9 @@ void ExecGraphUniqueNodeNames::SetUp() { auto split_axis_op = std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{1}); auto split = std::make_shared(params[0], split_axis_op, 2); - auto concat = std::make_shared(split->outputs(), 1); + auto concat = std::make_shared(split->outputs(), 1); - ngraph::ResultVector results{std::make_shared(concat)}; + ngraph::ResultVector results{std::make_shared(concat)}; fnPtr = std::make_shared(results, params, "SplitConvConcat"); } diff --git a/src/tests/functional/plugin/shared/src/behavior/infer_request/memory_states.cpp b/src/tests/functional/plugin/shared/src/behavior/infer_request/memory_states.cpp index 413c125f82dece..ad1bac678f81de 100644 --- a/src/tests/functional/plugin/shared/src/behavior/infer_request/memory_states.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/infer_request/memory_states.cpp @@ -39,18 +39,18 @@ InferenceEngine::CNNNetwork InferRequestVariableStateTest::getNetwork() { ngraph::Shape shape = {1, 200}; ngraph::element::Type type = ngraph::element::f32; - auto input = std::make_shared(type, shape); - auto mem_i1 = std::make_shared(type, shape, 0); - auto mem_r1 = std::make_shared(mem_i1, "r_1-3"); - auto mul1 = std::make_shared(mem_r1, input); - - auto mem_i2 = std::make_shared(type, shape, 0); - auto mem_r2 = std::make_shared(mem_i2, "c_1-3"); - auto mul2 = std::make_shared(mem_r2, mul1); - auto mem_w2 = std::make_shared(mul2, "c_1-3"); - - auto mem_w1 = std::make_shared(mul2, "r_1-3"); - auto sigm = std::make_shared(mul2); + auto input = std::make_shared(type, shape); + auto mem_i1 = std::make_shared(type, shape, 0); + auto mem_r1 = std::make_shared(mem_i1, "r_1-3"); + auto mul1 = std::make_shared(mem_r1, input); + + auto mem_i2 = std::make_shared(type, shape, 0); + auto mem_r2 = std::make_shared(mem_i2, "c_1-3"); + auto mul2 = std::make_shared(mem_r2, mul1); + auto mem_w2 = std::make_shared(mul2, "c_1-3"); + + auto mem_w1 = std::make_shared(mul2, "r_1-3"); + auto sigm = std::make_shared(mul2); sigm->set_friendly_name("sigmod_state"); mem_r1->set_friendly_name("Memory_1"); mem_w1->add_control_dependency(mem_r1); diff --git a/src/tests/functional/plugin/shared/src/behavior/infer_request/set_io_blob_precision.cpp b/src/tests/functional/plugin/shared/src/behavior/infer_request/set_io_blob_precision.cpp index f73267628dc8d9..494056d5c21af2 100644 --- a/src/tests/functional/plugin/shared/src/behavior/infer_request/set_io_blob_precision.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/infer_request/set_io_blob_precision.cpp @@ -105,9 +105,9 @@ void SetBlobTest::SetUp() { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(precNg); ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(IS))}; - auto axisNode = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{}, std::vector{-1})->output(0); + auto axisNode = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{}, std::vector{-1})->output(0); auto cumSum = std::make_shared(params[0], axisNode, false, false); - ngraph::ResultVector results{std::make_shared(cumSum)}; + ngraph::ResultVector results{std::make_shared(cumSum)}; function = std::make_shared(results, params, "InferSetBlob"); } diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/batched_tensors.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/batched_tensors.cpp index 7a6e30368bca8e..f252aa8792f327 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/batched_tensors.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/batched_tensors.cpp @@ -52,14 +52,14 @@ std::shared_ptr OVInferRequestBatchedTests::create_n_inputs(size_t n, ele ParameterVector params; for (size_t i = 0; i < n; i++) { auto index_str = std::to_string(i); - auto data1 = std::make_shared(type, shape); + auto data1 = std::make_shared(type, shape); data1->set_friendly_name("input" + index_str); data1->get_output_tensor(0).set_names({"tensor_input" + index_str}); data1->set_layout(layout); auto constant = opset8::Constant::create(type, {1}, {1}); - auto op1 = std::make_shared(data1, constant); + auto op1 = std::make_shared(data1, constant); op1->set_friendly_name("Add" + index_str); - auto res1 = std::make_shared(op1); + auto res1 = std::make_shared(op1); res1->set_friendly_name("Result" + index_str); res1->get_output_tensor(0).set_names({"tensor_output" + index_str}); params.push_back(data1); diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/inference.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/inference.cpp index 71c4629bbb7def..ac4197bec8af25 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/inference.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/inference.cpp @@ -30,13 +30,13 @@ std::shared_ptr OVInferRequestInferenceTests::create_n_inputs(size_t n, ParameterVector params; for (size_t i = 0; i < n; i++) { auto index_str = std::to_string(i); - auto data1 = std::make_shared(type, shape); + auto data1 = std::make_shared(type, shape); data1->set_friendly_name("input" + index_str); data1->get_output_tensor(0).set_names({"tensor_input" + index_str}); auto constant = opset8::Constant::create(type, {1}, {1}); - auto op1 = std::make_shared(data1, constant); + auto op1 = std::make_shared(data1, constant); op1->set_friendly_name("Add" + index_str); - auto res1 = std::make_shared(op1); + auto res1 = std::make_shared(op1); res1->set_friendly_name("Result" + index_str); res1->get_output_tensor(0).set_names({"tensor_output" + index_str}); params.push_back(data1); diff --git a/src/tests/functional/plugin/shared/src/behavior/plugin/caching_tests.cpp b/src/tests/functional/plugin/shared/src/behavior/plugin/caching_tests.cpp index ad67be003a92d7..4b8f27afd780f1 100644 --- a/src/tests/functional/plugin/shared/src/behavior/plugin/caching_tests.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/plugin/caching_tests.cpp @@ -28,16 +28,16 @@ namespace LayerTestsDefinitions { static std::shared_ptr simple_function_multiply(ngraph::element::Type type, size_t batchSize) { // Create Parameter operation with static shape - auto data = std::make_shared(type, ngraph::Shape{batchSize, 2}); + auto data = std::make_shared(type, ngraph::Shape{batchSize, 2}); data->set_friendly_name("Parameter"); - auto constant = ngraph::opset6::Constant::create(type, ngraph::Shape{1}, {2}); + auto constant = ov::op::v0::Constant::create(type, ngraph::Shape{1}, {2}); constant->set_friendly_name("constant"); - auto mul = std::make_shared(data, constant); + auto mul = std::make_shared(data, constant); mul->set_friendly_name("mul"); // Create Result operation - auto res = std::make_shared(mul); + auto res = std::make_shared(mul); res->set_friendly_name("res"); // Create nGraph function @@ -48,14 +48,14 @@ static std::shared_ptr simple_function_multiply(ngraph::elemen static std::shared_ptr simple_function_relu(ngraph::element::Type type, size_t batchSize) { // Create Parameter operation with static shape - auto data = std::make_shared(type, ngraph::Shape{batchSize, 2}); + auto data = std::make_shared(type, ngraph::Shape{batchSize, 2}); data->set_friendly_name("Parameter"); - auto relu = std::make_shared(data); + auto relu = std::make_shared(data); relu->set_friendly_name("relu"); // Create Result operation - auto res = std::make_shared(relu); + auto res = std::make_shared(relu); res->set_friendly_name("res"); // Create nGraph function diff --git a/src/tests/functional/plugin/shared/src/execution_graph_tests/keep_assign.cpp b/src/tests/functional/plugin/shared/src/execution_graph_tests/keep_assign.cpp index 049bacd5a14787..379ce5943b4b0b 100644 --- a/src/tests/functional/plugin/shared/src/execution_graph_tests/keep_assign.cpp +++ b/src/tests/functional/plugin/shared/src/execution_graph_tests/keep_assign.cpp @@ -30,15 +30,14 @@ TEST_P(ExecGraphKeepAssignNode, KeepAssignNode) { ngraph::element::Type type = ngraph::element::f32; using std::make_shared; - using namespace ngraph::opset5; // Some simple graph with Memory(Assign) node // in read // - auto input = make_shared(type, shape); // | \ / // - auto mem_i = make_shared(type, shape, 0); // | mul // - auto mem_r = make_shared(mem_i, "id"); // | / \ // - auto mul = make_shared(mem_r, input); // sum assign // - auto mem_w = make_shared(mul, "id"); // | // - auto sum = make_shared(mul, input); // out // + auto input = make_shared(type, shape); // | \ / // + auto mem_i = make_shared(type, shape, 0); // | mul // + auto mem_r = make_shared(mem_i, "id"); // | / \ // + auto mul = make_shared(mem_r, input); // sum assign // + auto mem_w = make_shared(mul, "id"); // | // + auto sum = make_shared(mul, input); // out // mem_w->add_control_dependency(mem_r); sum->add_control_dependency(mem_w); diff --git a/src/tests/functional/plugin/shared/src/execution_graph_tests/nms_transformation_for_last_node.cpp b/src/tests/functional/plugin/shared/src/execution_graph_tests/nms_transformation_for_last_node.cpp index 78052b1f7e8fe0..8f4e535455043e 100644 --- a/src/tests/functional/plugin/shared/src/execution_graph_tests/nms_transformation_for_last_node.cpp +++ b/src/tests/functional/plugin/shared/src/execution_graph_tests/nms_transformation_for_last_node.cpp @@ -44,8 +44,8 @@ TEST_P(ExecGraphNmsTransformLastNode, CheckIfCanBeInfered) { float in_boxes[8] = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0}; float in_scores[8] = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0}; - auto boxes = std::make_shared(element::f32, boxes_shape); - auto scores = std::make_shared(element::f32, scores_shape); + auto boxes = std::make_shared(element::f32, boxes_shape); + auto scores = std::make_shared(element::f32, scores_shape); auto max_output_boxes_per_class = opset5::Constant::create(element::i64, Shape{}, {10}); auto iou_threshold = opset5::Constant::create(element::f32, Shape{}, {0.75}); auto score_threshold = opset5::Constant::create(element::f32, Shape{}, {0.7}); @@ -53,7 +53,7 @@ TEST_P(ExecGraphNmsTransformLastNode, CheckIfCanBeInfered) { iou_threshold, score_threshold, opset5::NonMaxSuppression::BoxEncodingType::CORNER, true, element::i64); ngraph::ResultVector results { - std::make_shared(nms->output(0)), + std::make_shared(nms->output(0)), }; auto f = std::make_shared(results, ParameterVector{boxes, scores}, "NMS"); diff --git a/src/tests/functional/plugin/shared/src/execution_graph_tests/num_inputs_fusing_bin_conv.cpp b/src/tests/functional/plugin/shared/src/execution_graph_tests/num_inputs_fusing_bin_conv.cpp index e8e35d3c872dfa..474d6b7869954c 100644 --- a/src/tests/functional/plugin/shared/src/execution_graph_tests/num_inputs_fusing_bin_conv.cpp +++ b/src/tests/functional/plugin/shared/src/execution_graph_tests/num_inputs_fusing_bin_conv.cpp @@ -28,7 +28,7 @@ void ExecGraphInputsFusingBinConv::SetUp() { const size_t numOutChannels = 16, numGroups = 16; const std::vector strides = {1, 1}, dilations = {1, 1}; const std::vector padsBegin = {1, 1}, padsEnd = {0, 0}; - const ngraph::op::PadType paddingType = ngraph::op::PadType::EXPLICIT; + const ov::op::PadType paddingType = ov::op::PadType::EXPLICIT; const float padValue = 1.0; targetDevice = this->GetParam(); @@ -38,9 +38,9 @@ void ExecGraphInputsFusingBinConv::SetUp() { auto conv = ngraph::builder::makeGroupConvolution(binConv, ngraph::element::f32, convKernelSize, strides, padsBegin, padsEnd, dilations, paddingType, numOutChannels, numGroups); - auto biasNode = std::make_shared(ngraph::element::f32, std::vector{16, 1, 1}); - auto add = std::make_shared(conv, biasNode); - ngraph::ResultVector results{std::make_shared(add)}; + auto biasNode = std::make_shared(ngraph::element::f32, std::vector{16, 1, 1}); + auto add = std::make_shared(conv, biasNode); + ngraph::ResultVector results{std::make_shared(add)}; fnPtr = std::make_shared(results, params, "BinConvFuseConv"); } diff --git a/src/tests/functional/plugin/shared/src/execution_graph_tests/remove_parameter.cpp b/src/tests/functional/plugin/shared/src/execution_graph_tests/remove_parameter.cpp index dd29dd66c55b27..66ca2aa3243eb3 100644 --- a/src/tests/functional/plugin/shared/src/execution_graph_tests/remove_parameter.cpp +++ b/src/tests/functional/plugin/shared/src/execution_graph_tests/remove_parameter.cpp @@ -42,8 +42,8 @@ TEST_P(ExecGraphRemoveParameterNode, RemoveParameterNode) { // out // auto input = make_shared(type, shape); auto input2 = make_shared(type, shape); - auto mul = make_shared(input2, input); - auto sum = make_shared(mul, input); + auto mul = make_shared(input2, input); + auto sum = make_shared(mul, input); auto function = std::make_shared( ngraph::NodeVector{sum}, ngraph::ParameterVector{input2, input}, diff --git a/src/tests/functional/plugin/shared/src/execution_graph_tests/runtime_precision.cpp b/src/tests/functional/plugin/shared/src/execution_graph_tests/runtime_precision.cpp index 97071f12363855..16fcd73411f726 100644 --- a/src/tests/functional/plugin/shared/src/execution_graph_tests/runtime_precision.cpp +++ b/src/tests/functional/plugin/shared/src/execution_graph_tests/runtime_precision.cpp @@ -47,10 +47,10 @@ std::shared_ptr makeFakeQuantizeReluFunction(const std::vector auto inputHighNode = ngraph::builder::makeConstant(ngraph::element::f32, {1, 1, 1, 1}, {255}); auto outputLowNode = ngraph::builder::makeConstant(ngraph::element::f32, {1, 1, 1, 1}, {0}); auto outputHighNode = ngraph::builder::makeConstant(ngraph::element::f32, {1, 1, 1, 1}, {255}); - auto fakeQuantize = std::make_shared(inputs[0], inputLowNode, inputHighNode, outputLowNode, outputHighNode, 256); + auto fakeQuantize = std::make_shared(inputs[0], inputLowNode, inputHighNode, outputLowNode, outputHighNode, 256); fakeQuantize->set_friendly_name("FakeQuantize"); - auto relu = std::make_shared(fakeQuantize); + auto relu = std::make_shared(fakeQuantize); relu->set_friendly_name("Relu"); auto function = std::make_shared(relu, inputs, "FakeQuantizeRelu"); @@ -66,10 +66,10 @@ std::shared_ptr makeFakeQuantizeBinaryConvolutionFunction(cons auto inputHighNode = ngraph::builder::makeConstant(ngraph::element::f32, {1, 1, 1, 1}, {1}); auto outputLowNode = ngraph::builder::makeConstant(ngraph::element::f32, {1, 1, 1, 1}, {0}); auto outputHighNode = ngraph::builder::makeConstant(ngraph::element::f32, {1, 1, 1, 1}, {1}); - auto fakeQuantize = std::make_shared(inputs[0], inputLowNode, inputHighNode, outputLowNode, outputHighNode, 2); + auto fakeQuantize = std::make_shared(inputs[0], inputLowNode, inputHighNode, outputLowNode, outputHighNode, 2); fakeQuantize->set_friendly_name("FakeQuantize"); - auto binConv = ngraph::builder::makeBinaryConvolution(fakeQuantize, {3, 3}, {1, 1}, {1, 1}, {1, 1}, {1, 1}, ngraph::op::PadType::EXPLICIT, 32, 0); + auto binConv = ngraph::builder::makeBinaryConvolution(fakeQuantize, {3, 3}, {1, 1}, {1, 1}, {1, 1}, {1, 1}, ov::op::PadType::EXPLICIT, 32, 0); binConv->set_friendly_name("BinaryConvolution"); auto function = std::make_shared(binConv, inputs, "FakeQuantizeBinaryConvolution"); diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/depth_to_space_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/depth_to_space_transformation.cpp index d7336b247a3026..84a35de492b7f6 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/depth_to_space_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/depth_to_space_transformation.cpp @@ -28,20 +28,18 @@ #include "ov_lpt_models/depth_to_space.hpp" -using namespace ngraph::opset1; - namespace LayerTestsDefinitions { std::string DepthToSpaceTransformation::getTestCaseName(const testing::TestParamInfo& obj) { - static std::map names = { - {DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, "BLOCKS_FIRST"}, - {DepthToSpace::DepthToSpaceMode::DEPTH_FIRST, "DEPTH_FIRST"}, + static std::map names = { + {ov::op::v0::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, "BLOCKS_FIRST"}, + {ov::op::v0::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST, "DEPTH_FIRST"}, }; ngraph::element::Type precision; ngraph::PartialShape inputShape; std::string targetDevice; - DepthToSpace::DepthToSpaceMode mode; + ov::op::v0::DepthToSpace::DepthToSpaceMode mode; size_t blockSize; auto params = LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(); std::tie(precision, inputShape, targetDevice, mode, blockSize) = obj.param; @@ -55,7 +53,7 @@ std::string DepthToSpaceTransformation::getTestCaseName(const testing::TestParam void DepthToSpaceTransformation::SetUp() { ngraph::element::Type precision; ngraph::PartialShape inputShape; - DepthToSpace::DepthToSpaceMode mode; + ov::op::v0::DepthToSpace::DepthToSpaceMode mode; size_t blockSize; std::tie(precision, inputShape, targetDevice, mode, blockSize) = this->GetParam(); diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/interpolate_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/interpolate_transformation.cpp index 56012b6d91289f..c3ca18b5bf485c 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/interpolate_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/interpolate_transformation.cpp @@ -55,7 +55,7 @@ void InterpolateTransformation::SetUp() { interpAttributes attributes; std::tie(precision, shapes, targetDevice, attributes) = this->GetParam(); - ngraph::op::InterpolateAttrs interpAttrs; + ov::op::v0::Interpolate::Attributes interpAttrs; interpAttrs.axes = attributes.axes; interpAttrs.mode = attributes.mode; interpAttrs.align_corners = attributes.align_corners; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat.cpp index f91f934de24c99..9339d4c7fab388 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat.cpp @@ -70,7 +70,7 @@ void OutputLayersConcat::SetUp() { auto ngPrecision = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - const auto input1 = std::make_shared(ngPrecision, ngraph::Shape(inputShape1)); + const auto input1 = std::make_shared(ngPrecision, ngraph::Shape(inputShape1)); input1->set_friendly_name("input1"); const auto fakeQuantize1 = ngraph::builder::makeFakeQuantize( @@ -80,7 +80,7 @@ void OutputLayersConcat::SetUp() { ASSERT_EQ(4ul, inputShape1.size()) << "unexpected input layout"; const InferenceEngine::SizeVector inputShape2 = { inputShape1[0], inputShape1[1] * 2ul, inputShape1[2], inputShape1[3] }; - const auto input2 = std::make_shared(ngPrecision, ngraph::Shape(inputShape2)); + const auto input2 = std::make_shared(ngPrecision, ngraph::Shape(inputShape2)); input2->set_friendly_name("input2"); const auto fakeQuantize2 = ngraph::builder::makeFakeQuantize( @@ -88,12 +88,12 @@ void OutputLayersConcat::SetUp() { { 0.f }, { 255.f / 2.f }, { 0.f }, { 255.f / 2.f }); fakeQuantize2->set_friendly_name("fakeQuantize2"); - const std::shared_ptr concat = std::make_shared( + const std::shared_ptr concat = std::make_shared( ngraph::OutputVector{ fakeQuantize1->output(0), fakeQuantize2->output(0)}, 1); concat->set_friendly_name("concat"); const float k = 1.f; - const auto weights = ngraph::opset1::Constant::create( + const auto weights = ov::op::v0::Constant::create( ngPrecision, ngraph::Shape{ inputShape1[1ul] + inputShape2[1ul], inputShape1[1ul] + inputShape2[1ul], 1ul, 1ul }, std::vector((inputShape1[1ul] + inputShape2[1ul]) * (inputShape1[1ul] + inputShape2[1ul]), 1ul)); @@ -103,7 +103,7 @@ void OutputLayersConcat::SetUp() { { -128.f / k }, { 127.f / k }, { -128.f / k }, { 127.f / k }); fakeQuantizeOnWeights->set_friendly_name("fakeQuantizeOnWeights"); - const std::shared_ptr convolution = std::make_shared( + const std::shared_ptr convolution = std::make_shared( concat->output(0), fakeQuantizeOnWeights, ngraph::Strides{ 1ul, 1ul }, @@ -113,9 +113,9 @@ void OutputLayersConcat::SetUp() { convolution->set_friendly_name("convolution"); ngraph::ResultVector results { - std::make_shared(concat), - std::make_shared(convolution), - std::make_shared(fakeQuantize2) + std::make_shared(concat), + std::make_shared(convolution), + std::make_shared(fakeQuantize2) }; function = std::make_shared(results, ngraph::ParameterVector { input1, input2 }, "OutputLayersHandling"); diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat_multi_channel.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat_multi_channel.cpp index bb82dbdaf77459..eb095e6ab5ecd1 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat_multi_channel.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat_multi_channel.cpp @@ -82,7 +82,7 @@ void OutputLayersConcatMultiChannel::SetUp() { auto ngPrecision = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - const auto input1 = std::make_shared(ngPrecision, ngraph::Shape(inputShape1)); + const auto input1 = std::make_shared(ngPrecision, ngraph::Shape(inputShape1)); input1->set_friendly_name("input1"); const auto fakeQuantize1 = ngraph::builder::makeFakeQuantize(input1->output(0), ngPrecision, 256ul, { 1ul }); @@ -90,24 +90,24 @@ void OutputLayersConcatMultiChannel::SetUp() { ASSERT_EQ(4ul, inputShape1.size()) << "unexpected input layout"; const InferenceEngine::SizeVector inputShape2 = { inputShape1[0], inputShape1[1] * 2ul, inputShape1[2], inputShape1[3] }; - const auto input2 = std::make_shared(ngPrecision, ngraph::Shape(inputShape2)); + const auto input2 = std::make_shared(ngPrecision, ngraph::Shape(inputShape2)); input2->set_friendly_name("input2"); const auto fakeQuantize2 = ngraph::builder::makeFakeQuantize(input2->output(0), ngPrecision, 256ul, { 1ul }); fakeQuantize2->set_friendly_name("fakeQuantize2"); - const std::shared_ptr concat = std::make_shared( + const std::shared_ptr concat = std::make_shared( ngraph::OutputVector{ fakeQuantize1->output(0), fakeQuantize2->output(0)}, 1); concat->set_friendly_name("concat"); - auto const1 = ngraph::opset1::Constant::create(ngPrecision, ngraph::Shape{ 1, 1, 1, 1 }, { 1 }); - std::shared_ptr convolution = std::make_shared(concat, const1); + auto const1 = ov::op::v0::Constant::create(ngPrecision, ngraph::Shape{ 1, 1, 1, 1 }, { 1 }); + std::shared_ptr convolution = std::make_shared(concat, const1); convolution->set_friendly_name("convolution"); ngraph::ResultVector results { - std::make_shared(concat), - std::make_shared(convolution), - std::make_shared(fakeQuantize2) + std::make_shared(concat), + std::make_shared(convolution), + std::make_shared(fakeQuantize2) }; function = std::make_shared(results, ngraph::ParameterVector { input1, input2 }, "OutputLayersHandling"); diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_handling_in_transformations.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_handling_in_transformations.cpp index 67ca66f65a63c3..ac78e3246fb7e5 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_handling_in_transformations.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_handling_in_transformations.cpp @@ -53,7 +53,7 @@ void OutputLayers::SetUp() { std::tie(netPrecision, inputShape, targetDevice, params) = this->GetParam(); auto ngPrecision = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - const auto input = std::make_shared(ngPrecision, ngraph::Shape(inputShape)); + const auto input = std::make_shared(ngPrecision, ngraph::Shape(inputShape)); input->set_friendly_name("input"); const float k = 1.f; @@ -62,7 +62,7 @@ void OutputLayers::SetUp() { { 0.f }, { 255.f / k }, { 0.f }, { 255.f / k }); fakeQuantizeOnActivations->set_friendly_name("fakeQuantizeOnActivations"); - const auto weights = ngraph::opset1::Constant::create( + const auto weights = ov::op::v0::Constant::create( ngPrecision, ngraph::Shape{ inputShape[1ul], inputShape[1ul], 1ul, 1ul }, std::vector(inputShape[1ul] * inputShape[1ul], 1ul)); @@ -72,7 +72,7 @@ void OutputLayers::SetUp() { { -128.f / k }, { 127.f / k }, { -128.f / k }, { 127.f / k }); fakeQuantizeOnWeights->set_friendly_name("fakeQuantizeOnWeights"); - std::shared_ptr convolution = std::make_shared( + std::shared_ptr convolution = std::make_shared( fakeQuantizeOnActivations, fakeQuantizeOnWeights, ngraph::Strides{ 1ul, 1ul }, @@ -82,8 +82,8 @@ void OutputLayers::SetUp() { convolution->set_friendly_name("convolution"); ngraph::ResultVector results { - std::make_shared(convolution), - std::make_shared(fakeQuantizeOnActivations) + std::make_shared(convolution), + std::make_shared(fakeQuantizeOnActivations) }; function = std::make_shared(results, ngraph::ParameterVector { input }, "OutputLayersHandling"); diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/pad_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/pad_transformation.cpp index 57a5dde6799bee..da6e3ba2af3666 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/pad_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/pad_transformation.cpp @@ -15,7 +15,7 @@ namespace LayerTestsDefinitions { std::string PadTransformation::getTestCaseName(const testing::TestParamInfo& obj) { ngraph::element::Type netPrecision; ngraph::PartialShape inputShape; - ngraph::op::PadMode padMode; + ov::op::PadMode padMode; std::string targetDevice; ov::pass::low_precision::LayerTransformation::Params params; PadTransformationParam param; @@ -25,14 +25,14 @@ std::string PadTransformation::getTestCaseName(const testing::TestParamInfoGetParam(); diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_max_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_max_transformation.cpp index 25c7ec5bee2de7..9980d12ac8199b 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_max_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_max_transformation.cpp @@ -41,7 +41,7 @@ void ReduceMaxTransformation::SetUp() { ngraph::builder::subgraph::DequantizationOperations dequantizationBefore; ngraph::builder::subgraph::DequantizationOperations dequantizationAfter; - function = ngraph::builder::subgraph::ReduceFunction::get( + function = ngraph::builder::subgraph::ReduceFunction::get( netPrecision, inputShape, param.fakeQuantize, diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_mean_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_mean_transformation.cpp index b5139f40821269..35f54b7cc8a128 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_mean_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_mean_transformation.cpp @@ -49,7 +49,7 @@ void ReduceMeanTransformation::SetUp() { ReduceMeanTransformationParam param; std::tie(netPrecision, inputShape, targetDevice, params, param) = GetParam(); - function = ngraph::builder::subgraph::ReduceFunction::get( + function = ngraph::builder::subgraph::ReduceFunction::get( netPrecision, inputShape, param.fakeQuantize, diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_min_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_min_transformation.cpp index 4e4448dac1e1cf..a1b3b9cb24fb28 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_min_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_min_transformation.cpp @@ -41,7 +41,7 @@ void ReduceMinTransformation::SetUp() { ngraph::builder::subgraph::DequantizationOperations dequantizationBefore; ngraph::builder::subgraph::DequantizationOperations dequantizationAfter; - function = ngraph::builder::subgraph::ReduceFunction::get( + function = ngraph::builder::subgraph::ReduceFunction::get( netPrecision, inputShape, param.fakeQuantize, diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_sum_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_sum_transformation.cpp index f3536cdb67f5af..14d9e3cc97f0a6 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_sum_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_sum_transformation.cpp @@ -41,7 +41,7 @@ void ReduceSumTransformation::SetUp() { ngraph::builder::subgraph::DequantizationOperations dequantizationBefore; ngraph::builder::subgraph::DequantizationOperations dequantizationAfter; - function = ngraph::builder::subgraph::ReduceFunction::get( + function = ngraph::builder::subgraph::ReduceFunction::get( netPrecision, inputShape, param.fakeQuantize, diff --git a/src/tests/functional/plugin/shared/src/snippets/codegen_bert.cpp b/src/tests/functional/plugin/shared/src/snippets/codegen_bert.cpp index fa0d79e691fe63..418d184ecd2132 100644 --- a/src/tests/functional/plugin/shared/src/snippets/codegen_bert.cpp +++ b/src/tests/functional/plugin/shared/src/snippets/codegen_bert.cpp @@ -45,25 +45,25 @@ namespace snippets { std::tie(netPrecision, inputShape0, inputShape1, targetDevice) = this->GetParam(); auto shape = ngraph::Shape{inputShape0}; - auto input1 = std::make_shared(netPrecision, shape); - auto input2 = std::make_shared(netPrecision, shape); + auto input1 = std::make_shared(netPrecision, shape); + auto input2 = std::make_shared(netPrecision, shape); auto shapeMM = ngraph::Shape{inputShape1}; - auto input3 = std::make_shared(netPrecision, shapeMM); + auto input3 = std::make_shared(netPrecision, shapeMM); - auto add = std::make_shared(input1, input2); - auto mm = std::make_shared(add, input3); + auto add = std::make_shared(input1, input2); + auto mm = std::make_shared(add, input3); std::vector vals(ngraph::shape_size(shape)); for (int i = 0; i < vals.size(); i++) { vals[i] = static_cast(i)*vals.size(); } - auto c0 = std::make_shared(netPrecision, shape); - auto add2 = std::make_shared(mm, c0); + auto c0 = std::make_shared(netPrecision, shape); + auto add2 = std::make_shared(mm, c0); - auto add3 = std::make_shared(add, add2); - auto result = std::make_shared(add3); + auto add3 = std::make_shared(add, add2); + auto result = std::make_shared(add3); function = std::make_shared( ngraph::ResultVector{result}, diff --git a/src/tests/functional/plugin/shared/src/snippets/fake_quantize_decomposition_test.cpp b/src/tests/functional/plugin/shared/src/snippets/fake_quantize_decomposition_test.cpp index e5b3c55391ed44..8d5d6c51bf69f6 100644 --- a/src/tests/functional/plugin/shared/src/snippets/fake_quantize_decomposition_test.cpp +++ b/src/tests/functional/plugin/shared/src/snippets/fake_quantize_decomposition_test.cpp @@ -25,7 +25,7 @@ std::string FakeQuantizeDecompositionTest::getTestCaseName(testing::TestParamInf const auto targetDevice = std::get<3>(obj.param); const auto type_info = operation.first->get_type_info(); - const auto operationString = ngraph::is_type(operation.first) ? + const auto operationString = ngraph::is_type(operation.first) ? "nullptr" : (std::string(type_info.name) + "_" + std::string(type_info.version_id)); @@ -55,7 +55,7 @@ void FakeQuantizeDecompositionTest::SetUp() { init_input_shapes({{values.inputShape, {values.inputShape}}}); - std::shared_ptr op = ngraph::is_type(operation.first) ? nullptr : operation.first; + std::shared_ptr op = ngraph::is_type(operation.first) ? nullptr : operation.first; function = ov::test::snippets::FakeQuantizeFunction::getOperationAndFakeQuantize( {values.inputShape}, values.inputType, diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/binary_convolution.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/binary_convolution.hpp index 48562853f67f73..b57cee5745ba25 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/binary_convolution.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/binary_convolution.hpp @@ -22,7 +22,7 @@ using binConvSpecificParams = std::tuple< std::vector, // Pads end InferenceEngine::SizeVector, // Dilations size_t, // Num Output channels - ngraph::op::PadType, // Padding type + ov::op::PadType, // Padding type float>; // Padding value using binaryConvolutionTestParamsSet = std::tuple< diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/broadcast.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/broadcast.hpp index 9b0e293a8d52a7..38cf1bbe94ed2e 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/broadcast.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/broadcast.hpp @@ -16,7 +16,7 @@ namespace LayerTestsDefinitions { using BroadcastParamsTuple = typename std::tuple< InferenceEngine::SizeVector, // target shape ngraph::AxisSet, // axes mapping - ngraph::op::BroadcastType, // broadcast mode + ov::op::BroadcastType, // broadcast mode InferenceEngine::SizeVector, // Input shape InferenceEngine::Precision, // Network precision std::string>; // Device name diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution.hpp index 6cdc9b5e323988..95b0a68c5914b3 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution.hpp @@ -23,7 +23,7 @@ typedef std::tuple< std::vector, // Pad end InferenceEngine::SizeVector, // Dilation size_t, // Num out channels - ngraph::op::PadType // Padding type + ov::op::PadType // Padding type > convSpecificParams; typedef std::tuple< convSpecificParams, diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution_backprop.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution_backprop.hpp index ef896760a42fe8..61503f7797b7b9 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution_backprop.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution_backprop.hpp @@ -22,7 +22,7 @@ typedef std::tuple< std::vector, // Pad end InferenceEngine::SizeVector, // Dilation size_t, // Num out channels - ngraph::op::PadType, // Padding type + ov::op::PadType, // Padding type std::vector // Output padding > convBackpropSpecificParams; typedef std::tuple< diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution_backprop_data.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution_backprop_data.hpp index 4ce4dc1decb687..5b28cca7187b98 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution_backprop_data.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution_backprop_data.hpp @@ -24,7 +24,7 @@ typedef std::tuple< std::vector, // Pad end InferenceEngine::SizeVector, // Dilation size_t, // Num out channels - ngraph::op::PadType, // Padding type + ov::op::PadType, // Padding type std::vector // Output padding > convBackpropDataSpecificParams; typedef std::tuple< diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/deformable_convolution.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/deformable_convolution.hpp index ad164b7e6d07ba..c93764b6dde36c 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/deformable_convolution.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/deformable_convolution.hpp @@ -25,7 +25,7 @@ typedef std::tuple< size_t, // Groups size_t, // Deformable groups size_t, // Num out channels - ngraph::op::PadType, // Padding type + ov::op::PadType, // Padding type bool, // Bilinear interpolation pad bool // Modulation > deformableConvSpecificParams; diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/depth_to_space.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/depth_to_space.hpp index 486a0ae7684ce5..26d972933f8851 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/depth_to_space.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/depth_to_space.hpp @@ -19,7 +19,7 @@ namespace LayerTestsDefinitions { using depthToSpaceParamsTuple = typename std::tuple< std::vector, // Input shape InferenceEngine::Precision, // Input precision - ngraph::opset3::DepthToSpace::DepthToSpaceMode, // Mode + ov::op::v0::DepthToSpace::DepthToSpaceMode, // Mode std::size_t, // Block size std::string>; // Device name> diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/detection_output.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/detection_output.hpp index af1f75c09b027c..e5d8a854ffa087 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/detection_output.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/detection_output.hpp @@ -14,7 +14,7 @@ namespace LayerTestsDefinitions { -std::ostream& operator <<(std::ostream& os, const ngraph::op::DetectionOutputAttrs& inputShape); +std::ostream& operator <<(std::ostream& os, const ov::op::v0::DetectionOutput::Attributes& inputShape); enum { idxLocation, @@ -62,7 +62,7 @@ using DetectionOutputParams = std::tuple< class DetectionOutputLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: static std::string getTestCaseName(const testing::TestParamInfo& obj); - ngraph::op::DetectionOutputAttrs attrs; + ov::op::v0::DetectionOutput::Attributes attrs; std::vector inShapes; void GenerateInputs() override; void Compare(const std::vector>> &expectedOutputs, diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/extract_image_patches.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/extract_image_patches.hpp index 4d1502cb7366ea..8240652b6182fb 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/extract_image_patches.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/extract_image_patches.hpp @@ -18,7 +18,7 @@ using extractImagePatchesTuple = typename std::tuple< std::vector, // kernel size std::vector, // strides std::vector, // rates - ngraph::op::PadType, // pad type + ov::op::PadType, // pad type InferenceEngine::Precision, // Network precision InferenceEngine::Precision, // Input precision InferenceEngine::Precision, // Output precision diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/fake_quantize.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/fake_quantize.hpp index 241b9492e38868..66e5d3b0ef485f 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/fake_quantize.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/fake_quantize.hpp @@ -32,7 +32,7 @@ typedef std::tuple< std::vector, // fake quantize inputs shape std::vector, // fake quantize (inputLow, inputHigh, outputLow, outputHigh) or empty for random std::vector, // input generator data (low, high, resolution) or empty for default - ngraph::op::AutoBroadcastSpec // fake quantize broadcast mode + ov::op::AutoBroadcastSpec // fake quantize broadcast mode > fqSpecificParams; typedef std::tuple< fqSpecificParams, diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/grid_sample.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/grid_sample.hpp index 24b54c1a3b1dd9..19e06b49d5f443 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/grid_sample.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/grid_sample.hpp @@ -13,9 +13,9 @@ namespace LayerTestsDefinitions { using GridSampleParams = std::tuple; // Device name diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/group_convolution.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/group_convolution.hpp index add3010c44bac6..de90196085c575 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/group_convolution.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/group_convolution.hpp @@ -22,7 +22,7 @@ typedef std::tuple< InferenceEngine::SizeVector, size_t, size_t, - ngraph::op::PadType> groupConvSpecificParams; + ov::op::PadType> groupConvSpecificParams; typedef std::tuple< groupConvSpecificParams, InferenceEngine::Precision, diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/group_convolution_backprop_data.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/group_convolution_backprop_data.hpp index bb694c120e8e87..03017430c08572 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/group_convolution_backprop_data.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/group_convolution_backprop_data.hpp @@ -24,7 +24,7 @@ using groupConvBackpropDataSpecificParams = std::tuple< InferenceEngine::SizeVector, // dilations size_t, // num output channels size_t, // num groups - ngraph::op::PadType>; // padding type + ov::op::PadType>; // padding type using groupConvBackpropDataLayerTestParamsSet = std::tuple< groupConvBackpropDataSpecificParams, InferenceEngine::Precision, // Network precision @@ -52,7 +52,7 @@ using groupConvBackpropSpecificParams = std::tuple< InferenceEngine::SizeVector, // dilations size_t, // num output channels size_t, // num groups - ngraph::op::PadType, // padding type + ov::op::PadType, // padding type std::vector>; // output padding using groupConvBackpropLayerTestParamsSet = std::tuple< groupConvBackpropSpecificParams, diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/group_normalization.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/group_normalization.hpp index 27873d36c80098..28fe5588ae9b24 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/group_normalization.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/group_normalization.hpp @@ -68,7 +68,7 @@ class GroupNormalizationTest : public testing::WithParamInterface(groupNormalization)}; + const ngraph::ResultVector results{std::make_shared(groupNormalization)}; // TODO: This workaround is needed as there is no full support for f16 type in the reference implementation if (ngPrc == element::Type_t::f16) { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gru_sequence.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gru_sequence.hpp index 225b5d6c2f616a..8a3d91f3969fc8 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gru_sequence.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gru_sequence.hpp @@ -26,7 +26,7 @@ using GRUSequenceParams = typename std::tuple< std::vector, // activations float, // clip bool, // linear_before_reset - ngraph::op::RecurrentSequenceDirection, // direction + ov::op::RecurrentSequenceDirection, // direction ngraph::helpers::InputLayerType, // WRB input type (Constant or Parameter) InferenceEngine::Precision, // Network precision std::string>; // Device name diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/lstm_sequence.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/lstm_sequence.hpp index 0f520392f88efd..e22e30bac0b20b 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/lstm_sequence.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/lstm_sequence.hpp @@ -23,7 +23,7 @@ using LSTMSequenceParams = typename std::tuple< size_t, // input size std::vector, // activations float, // clip - ngraph::op::RecurrentSequenceDirection, // direction + ov::op::RecurrentSequenceDirection, // direction ngraph::helpers::InputLayerType, // WRB input type (Constant or Parameter) InferenceEngine::Precision, // Network precision std::string>; // Device name diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/matrix_nms.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/matrix_nms.hpp index 586d39dfa3c1e7..d051d1ad37e1e2 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/matrix_nms.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/matrix_nms.hpp @@ -29,13 +29,13 @@ using ThresholdParams = std::tuple, // Params using to create 1st and 2nd inputs InputPrecisions, // Input precisions - ngraph::op::v8::MatrixNms::SortResultType, // Order of output elements + ov::op::v8::MatrixNms::SortResultType, // Order of output elements ngraph::element::Type, // Output type TopKParams, // Maximum number of boxes topk params ThresholdParams, // Thresholds: score_threshold, gaussian_sigma, post_threshold int, // Background class id bool, // If boxes are normalized - ngraph::op::v8::MatrixNms::DecayFunction, // Decay function + ov::op::v8::MatrixNms::DecayFunction, // Decay function bool, // make output shape static std::string>; // Device name @@ -51,7 +51,7 @@ class MatrixNmsLayerTest : public testing::WithParamInterface, private: void GetOutputParams(size_t& numBatches, size_t& maxOutputBoxesPerBatch); - ngraph::op::v8::MatrixNms::Attributes m_attrs; + ov::op::v8::MatrixNms::Attributes m_attrs; bool m_outStaticShape; }; diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/memory.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/memory.hpp index 5ac8c35e078c17..d39931e79366a8 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/memory.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/memory.hpp @@ -35,11 +35,11 @@ class MemoryTest : public testing::WithParamInterface, virtual void Infer() override; virtual std::shared_ptr CreateReadValueOp( const ov::Output& value, const std::shared_ptr& variable) const { - return std::make_shared(value, variable); + return std::make_shared(value, variable); } virtual std::shared_ptr CreateAssignOp( const ov::Output& value, const std::shared_ptr& variable) const { - return std::make_shared(value, variable); + return std::make_shared(value, variable); } virtual void CreateCommonFunc(); @@ -62,12 +62,12 @@ class MemoryTestV3 : public MemoryTest { protected: std::shared_ptr CreateReadValueOp( const ov::Output& value, const std::shared_ptr& variable) const override { - return std::make_shared(value, variable->get_info().variable_id); + return std::make_shared(value, variable->get_info().variable_id); } std::shared_ptr CreateAssignOp( const ov::Output& value, const std::shared_ptr& variable) const override { - return std::make_shared(value, variable->get_info().variable_id); + return std::make_shared(value, variable->get_info().variable_id); } }; diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/multiclass_nms.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/multiclass_nms.hpp index 4c936209d72783..c467cfe52f75c6 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/multiclass_nms.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/multiclass_nms.hpp @@ -36,7 +36,7 @@ using MulticlassNmsParams = std::tuple, int32_t, // background_class int32_t, // keep_top_k ngraph::element::Type, // Output type - ngraph::op::util::MulticlassNmsBase::SortResultType, // SortResultType + ov::op::util::MulticlassNmsBase::SortResultType, // SortResultType InputboolVar, // Sort result across batch, normalized bool, // make output shape static std::string>; diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/non_max_suppression.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/non_max_suppression.hpp index 54d2ea05f831f3..a8081ed3747240 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/non_max_suppression.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/non_max_suppression.hpp @@ -14,7 +14,7 @@ namespace testing { namespace internal { template <> -inline void PrintTo(const ::ngraph::op::v5::NonMaxSuppression::BoxEncodingType& value, ::std::ostream* os) {} +inline void PrintTo(const ::ov::op::v5::NonMaxSuppression::BoxEncodingType& value, ::std::ostream* os) {} } // namespace internal } // namespace testing @@ -36,7 +36,7 @@ using NmsParams = std::tuple; // Device name diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/normalize_l2.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/normalize_l2.hpp index 3adc1ddc9dac77..3c46d1c5cf3522 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/normalize_l2.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/normalize_l2.hpp @@ -16,7 +16,7 @@ namespace LayerTestsDefinitions { using NormalizeL2LayerTestParams = std::tuple< std::vector, // axes float, // eps - ngraph::op::EpsMode, // eps_mode + ov::op::EpsMode, // eps_mode InferenceEngine::SizeVector, // inputShape InferenceEngine::Precision, // netPrecision std::string // targetDevice diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/pooling.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/pooling.hpp index 82c28c09a9ed2b..8eb712eb6b57c3 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/pooling.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/pooling.hpp @@ -22,8 +22,8 @@ typedef std::tuple< std::vector, // Stride std::vector, // Pad begin std::vector, // Pad end - ngraph::op::RoundingType, // Rounding type - ngraph::op::PadType, // Pad type + ov::op::RoundingType, // Rounding type + ov::op::PadType, // Pad type bool // Exclude pad > poolSpecificParams; typedef std::tuple< @@ -56,8 +56,8 @@ typedef std::tuple< std::vector, // Pad end ngraph::element::Type_t, // Index element type int64_t, // Axis - ngraph::op::RoundingType, // Rounding type - ngraph::op::PadType // Pad type + ov::op::RoundingType, // Rounding type + ov::op::PadType // Pad type > maxPoolV8SpecificParams; typedef std::tuple< diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/rnn_sequence.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/rnn_sequence.hpp index a3788d9b1cb1da..d9e2b0138d9e4a 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/rnn_sequence.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/rnn_sequence.hpp @@ -23,7 +23,7 @@ using RNNSequenceParams = typename std::tuple< size_t, // input size std::vector, // activations float, // clip - ngraph::op::RecurrentSequenceDirection, // direction + ov::op::RecurrentSequenceDirection, // direction ngraph::helpers::InputLayerType, // WRB input type (Constant or Parameter) InferenceEngine::Precision, // Network precision std::string>; // Device name diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/select.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/select.hpp index e27f2853fc6186..7426dc04a0ca03 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/select.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/select.hpp @@ -15,7 +15,7 @@ namespace LayerTestsDefinitions { typedef std::tuple< std::vector>, // mask, then, else shapes InferenceEngine::Precision, // then, else precision - ngraph::op::AutoBroadcastSpec, // broadcast + ov::op::AutoBroadcastSpec, // broadcast std::string> selectTestParams; // device name class SelectLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/space_to_depth.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/space_to_depth.hpp index 665baa01dc7265..361ff9fa41015c 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/space_to_depth.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/space_to_depth.hpp @@ -19,7 +19,7 @@ namespace LayerTestsDefinitions { using spaceToDepthParamsTuple = typename std::tuple< std::vector, // Input shape InferenceEngine::Precision, // Input precision - ngraph::opset3::SpaceToDepth::SpaceToDepthMode, // Mode + ov::op::v0::SpaceToDepth::SpaceToDepthMode, // Mode std::size_t, // Block size std::string>; // Device name> diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/tensor_iterator.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/tensor_iterator.hpp index f7dbd4322aa3ea..157c4fca461320 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/tensor_iterator.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/tensor_iterator.hpp @@ -25,7 +25,7 @@ using TensorIteratorParams = typename std::tuple< size_t, // sequence axis float, // clip ngraph::helpers::TensorIteratorBody, // body type - ngraph::op::RecurrentSequenceDirection, // direction + ov::op::RecurrentSequenceDirection, // direction InferenceEngine::Precision, // Network precision std::string>; // Device name diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/topk.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/topk.hpp index 1892ad7f92bccd..3759c0bef6d569 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/topk.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/topk.hpp @@ -14,8 +14,8 @@ namespace LayerTestsDefinitions { typedef std::tuple< int64_t, // keepK int64_t, // axis - ngraph::opset4::TopK::Mode, // mode - ngraph::opset4::TopK::SortType, // sort + ov::op::v3::TopK::Mode, // mode + ov::op::v3::TopK::SortType, // sort InferenceEngine::Precision, // Net precision InferenceEngine::Precision, // Input precision InferenceEngine::Precision, // Output precision diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/permute_concat_concat_permute.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/permute_concat_concat_permute.hpp index ed15880da7bdcc..4bcecd86963e6b 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/permute_concat_concat_permute.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/permute_concat_concat_permute.hpp @@ -28,7 +28,7 @@ class PermuteConcatConcatPermute : public testing::WithParamInterface CreateConst(const std::vector& input_shape, + static std::shared_ptr CreateConst(const std::vector& input_shape, const ::ngraph::element::Type& precision, bool use_1_as_first_dimension); template diff --git a/src/tests/functional/shared_test_classes/src/base/utils/generate_inputs.cpp b/src/tests/functional/shared_test_classes/src/base/utils/generate_inputs.cpp index 306e145729d796..c61d9d5ee39578 100644 --- a/src/tests/functional/shared_test_classes/src/base/utils/generate_inputs.cpp +++ b/src/tests/functional/shared_test_classes/src/base/utils/generate_inputs.cpp @@ -111,7 +111,7 @@ ov::runtime::Tensor generate(const ov::element::Type& elemType, } } // namespace Activation -ov::runtime::Tensor generate(const std::shared_ptr& node, +ov::runtime::Tensor generate(const std::shared_ptr& node, size_t port, const ov::element::Type& elemType, const ov::Shape& targetShape) { @@ -136,7 +136,7 @@ ov::runtime::Tensor generate(const std::shared_ptr& return Activation::generate(elemType, targetShape); } -ov::runtime::Tensor generate(const std::shared_ptr& node, +ov::runtime::Tensor generate(const std::shared_ptr& node, size_t port, const ov::element::Type& elemType, const ov::Shape& targetShape) { @@ -163,7 +163,7 @@ ov::runtime::Tensor generate(const std::shared_ptr& node, } } -ov::runtime::Tensor generate(const std::shared_ptr& node, +ov::runtime::Tensor generate(const std::shared_ptr& node, size_t port, const ov::element::Type& elemType, const ov::Shape& targetShape) { @@ -182,7 +182,7 @@ ov::runtime::Tensor generate(const std::shared_ptr& node, } } -ov::runtime::Tensor generate(const std::shared_ptr& node, +ov::runtime::Tensor generate(const std::shared_ptr& node, size_t port, const ov::element::Type& elemType, const ov::Shape& targetShape) { @@ -209,7 +209,7 @@ ov::runtime::Tensor generate(const std::shared_ptr& node, +ov::runtime::Tensor generate(const std::shared_ptr& node, size_t port, const ov::element::Type& elemType, const ov::Shape& targetShape) { @@ -334,7 +334,7 @@ ov::runtime::Tensor generate(const std::shared_ptr& node } -ov::runtime::Tensor generate(const std::shared_ptr& node, +ov::runtime::Tensor generate(const std::shared_ptr& node, size_t port, const ov::element::Type& elemType, const ov::Shape& targetShape) { @@ -366,49 +366,49 @@ ov::runtime::Tensor generate(const ov::element::Type& elemType, } } -ov::runtime::Tensor generate(const std::shared_ptr& node, +ov::runtime::Tensor generate(const std::shared_ptr& node, size_t port, const ov::element::Type& elemType, const ov::Shape& targetShape) { return LogicalOp::generate(elemType, targetShape); } -ov::runtime::Tensor generate(const std::shared_ptr& node, +ov::runtime::Tensor generate(const std::shared_ptr& node, size_t port, const ov::element::Type& elemType, const ov::Shape& targetShape) { return LogicalOp::generate(elemType, targetShape); } -ov::runtime::Tensor generate(const std::shared_ptr& node, +ov::runtime::Tensor generate(const std::shared_ptr& node, size_t port, const ov::element::Type& elemType, const ov::Shape& targetShape) { return LogicalOp::generate(elemType, targetShape); } -ov::runtime::Tensor generate(const std::shared_ptr& node, +ov::runtime::Tensor generate(const std::shared_ptr& node, size_t port, const ov::element::Type& elemType, const ov::Shape& targetShape) { return LogicalOp::generate(elemType, targetShape); } -ov::runtime::Tensor generate(const std::shared_ptr& node, +ov::runtime::Tensor generate(const std::shared_ptr& node, size_t port, const ov::element::Type& elemType, const ov::Shape& targetShape) { return LogicalOp::generate(elemType, targetShape); } -ov::runtime::Tensor generate(const std::shared_ptr& node, +ov::runtime::Tensor generate(const std::shared_ptr& node, size_t port, const ov::element::Type& elemType, const ov::Shape& targetShape) { return LogicalOp::generate(elemType, targetShape); } -ov::runtime::Tensor generate(const std::shared_ptr& node, +ov::runtime::Tensor generate(const std::shared_ptr& node, size_t port, const ov::element::Type& elemType, const ov::Shape& targetShape) { @@ -462,7 +462,7 @@ ov::runtime::Tensor generate(const std::shared_ptr& node, } } -ov::runtime::Tensor generate(const std::shared_ptr& node, +ov::runtime::Tensor generate(const std::shared_ptr& node, size_t port, const ov::element::Type& elemType, const ov::Shape& targetShape) { @@ -482,14 +482,14 @@ ov::runtime::Tensor generate(const std::shared_ptr& no return generate(std::dynamic_pointer_cast(node), port, elemType, targetShape); } -ov::runtime::Tensor generate(const std::shared_ptr& node, +ov::runtime::Tensor generate(const std::shared_ptr& node, size_t port, const ov::element::Type& elemType, const ov::Shape& targetShape) { return ov::test::utils::create_and_fill_tensor_consistently(elemType, targetShape, 3, 0, 1); } -ov::runtime::Tensor generate(const std::shared_ptr& node, +ov::runtime::Tensor generate(const std::shared_ptr& node, size_t port, const ov::element::Type& elemType, const ov::Shape& targetShape) { @@ -521,7 +521,7 @@ ov::runtime::Tensor generate(const std::shared_ptr& no return generate(std::dynamic_pointer_cast(node), port, elemType, targetShape); } -ov::runtime::Tensor generate(const std::shared_ptr& node, +ov::runtime::Tensor generate(const std::shared_ptr& node, size_t port, const ov::element::Type& elemType, const ov::Shape& targetShape) { @@ -625,7 +625,7 @@ ov::runtime::Tensor generate_unique_possibilities(const ov::Shape &targetShape) return tensor; } -ov::runtime::Tensor generate(const std::shared_ptr& node, +ov::runtime::Tensor generate(const std::shared_ptr& node, size_t port, const ov::element::Type& elemType, const ov::Shape& targetShape) { @@ -642,7 +642,7 @@ ov::runtime::Tensor generate(const std::shared_ptr(node), port, elemType, targetShape); } -ov::runtime::Tensor generate(const std::shared_ptr& node, +ov::runtime::Tensor generate(const std::shared_ptr& node, size_t port, const ov::element::Type& elemType, const ov::Shape& targetShape) { @@ -655,7 +655,7 @@ ov::runtime::Tensor generate(const std::shared_ptr& return generate(std::dynamic_pointer_cast(node), port, elemType, targetShape); } -ov::runtime::Tensor generate(const std::shared_ptr& node, +ov::runtime::Tensor generate(const std::shared_ptr& node, size_t port, const ov::element::Type& elemType, const ov::Shape& targetShape) { @@ -693,7 +693,7 @@ ov::runtime::Tensor generate(const } ov::runtime::Tensor generate(const - std::shared_ptr& node, + std::shared_ptr& node, size_t port, const ov::element::Type& elemType, const ov::Shape& targetShape) { diff --git a/src/tests/functional/shared_test_classes/src/single_layer/activation.cpp b/src/tests/functional/shared_test_classes/src/single_layer/activation.cpp index 56d48726f82bbd..0840dce4a91176 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/activation.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/activation.cpp @@ -218,7 +218,7 @@ void ActivationParamLayerTest::SetUp() { params.insert(params.end(), activationParams.begin(), activationParams.end()); auto activation = ngraph::builder::makeActivation(params, ngPrc, activationType); - ngraph::ResultVector results{std::make_shared(activation)}; + ngraph::ResultVector results{std::make_shared(activation)}; function = std::make_shared(results, params); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/adaptive_pooling.cpp b/src/tests/functional/shared_test_classes/src/single_layer/adaptive_pooling.cpp index d9e6d3c7b23ade..27f914487b4c93 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/adaptive_pooling.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/adaptive_pooling.cpp @@ -45,8 +45,8 @@ void AdaPoolLayerTest::SetUp() { auto pooledParam = ngraph::builder::makeConstant(ngraph::element::i32, pooledShape, pooledSpatialShape); // we cannot create abstract Op to use polymorphism - auto adapoolMax = std::make_shared(params[0], pooledParam, ngraph::element::i32); - auto adapoolAvg = std::make_shared(params[0], pooledParam); + auto adapoolMax = std::make_shared(params[0], pooledParam, ngraph::element::i32); + auto adapoolAvg = std::make_shared(params[0], pooledParam); function = (poolingMode == "max" ? std::make_shared(adapoolMax->outputs(), params, "AdaPoolMax") : std::make_shared(adapoolAvg->outputs(), params, "AdaPoolAvg")); diff --git a/src/tests/functional/shared_test_classes/src/single_layer/batch_to_space.cpp b/src/tests/functional/shared_test_classes/src/single_layer/batch_to_space.cpp index 10549f356cbe14..7a012d4721fd14 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/batch_to_space.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/batch_to_space.cpp @@ -39,7 +39,7 @@ void BatchToSpaceLayerTest::SetUp() { OPENVINO_SUPPRESS_DEPRECATED_START auto b2s = ngraph::builder::makeBatchToSpace(params[0], ngPrc, blockShape, cropsBegin, cropsEnd); OPENVINO_SUPPRESS_DEPRECATED_END - ngraph::ResultVector results{std::make_shared(b2s)}; + ngraph::ResultVector results{std::make_shared(b2s)}; function = std::make_shared(results, params, "BatchToSpace"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/binary_convolution.cpp b/src/tests/functional/shared_test_classes/src/single_layer/binary_convolution.cpp index 4ffe6c5a32ced9..62ceb9b62a70b7 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/binary_convolution.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/binary_convolution.cpp @@ -16,7 +16,7 @@ std::string BinaryConvolutionLayerTest::getTestCaseName(const testing::TestParam std::tie(binConvParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, targetDevice) = obj.param; - ngraph::op::PadType padType; + ov::op::PadType padType; InferenceEngine::SizeVector kernel, stride, dilation; std::vector padBegin, padEnd; size_t convOutChannels; @@ -61,7 +61,7 @@ void BinaryConvolutionLayerTest::SetUp() { std::tie(binConvParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, targetDevice) = this->GetParam(); - ngraph::op::PadType padType; + ov::op::PadType padType; InferenceEngine::SizeVector kernelSize, strides, dilations; std::vector padsBegin, padsEnd; size_t numOutChannels; @@ -74,7 +74,7 @@ void BinaryConvolutionLayerTest::SetUp() { // TODO: refactor build BinaryConvolution op to accept filters input as Parameter auto binConv = ngraph::builder::makeBinaryConvolution(params[0], kernelSize, strides, padsBegin, padsEnd, dilations, padType, numOutChannels, padValue); - ngraph::ResultVector results{std::make_shared(binConv)}; + ngraph::ResultVector results{std::make_shared(binConv)}; function = std::make_shared(results, params, "BinaryConvolution"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/bucketize.cpp b/src/tests/functional/shared_test_classes/src/single_layer/bucketize.cpp index 381a2c9f55fcf7..4d0bd144bccf85 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/bucketize.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/bucketize.cpp @@ -56,11 +56,11 @@ namespace LayerTestsDefinitions { auto ngInDataPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inDataPrc); auto ngInBucketsPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inBucketsPrc); auto ngNetPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrc); - auto data = std::make_shared(ngInDataPrc, ngraph::Shape(dataShape)); + auto data = std::make_shared(ngInDataPrc, ngraph::Shape(dataShape)); data->set_friendly_name("a_data"); - auto buckets = std::make_shared(ngInBucketsPrc, ngraph::Shape(bucketsShape)); + auto buckets = std::make_shared(ngInBucketsPrc, ngraph::Shape(bucketsShape)); buckets->set_friendly_name("b_buckets"); - auto bucketize = std::make_shared(data, buckets, ngNetPrc, with_right_bound); - function = std::make_shared(std::make_shared(bucketize), ngraph::ParameterVector{data, buckets}, "Bucketize"); + auto bucketize = std::make_shared(data, buckets, ngNetPrc, with_right_bound); + function = std::make_shared(std::make_shared(bucketize), ngraph::ParameterVector{data, buckets}, "Bucketize"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/clamp.cpp b/src/tests/functional/shared_test_classes/src/single_layer/clamp.cpp index d1adf624faa3f7..df0aa4541caff6 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/clamp.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/clamp.cpp @@ -32,9 +32,9 @@ void ClampLayerTest::SetUp() { std::tie(inShape, interval, netPrc, targetDevice) = this->GetParam(); auto ngNetPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrc); - auto input = std::make_shared(ngNetPrc, ngraph::Shape(inShape)); - auto clamp = std::make_shared(input, interval.first, interval.second); - function = std::make_shared(std::make_shared(clamp), ngraph::ParameterVector{input}); + auto input = std::make_shared(ngNetPrc, ngraph::Shape(inShape)); + auto clamp = std::make_shared(input, interval.first, interval.second); + function = std::make_shared(std::make_shared(clamp), ngraph::ParameterVector{input}); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/concat.cpp b/src/tests/functional/shared_test_classes/src/single_layer/concat.cpp index d57b4c66c8908c..4d6a570eefe5c5 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/concat.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/concat.cpp @@ -39,8 +39,8 @@ void ConcatLayerTest::SetUp() { params.push_back(param); paramsOuts.push_back(param); } - auto concat = std::make_shared(paramsOuts, axis); - ngraph::ResultVector results{std::make_shared(concat)}; + auto concat = std::make_shared(paramsOuts, axis); + ngraph::ResultVector results{std::make_shared(concat)}; function = std::make_shared(results, params, "concat"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/constant.cpp b/src/tests/functional/shared_test_classes/src/single_layer/constant.cpp index dffa6ab742c623..b758f8ff435a27 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/constant.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/constant.cpp @@ -37,8 +37,8 @@ void ConstantLayerTest::SetUp() { std::tie(data_shape, data_precision, data_elements, targetDevice) = this->GetParam(); const auto precision = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(data_precision); - auto constant = ngraph::op::Constant::create(precision, data_shape, data_elements); - ngraph::ResultVector results{std::make_shared(constant)}; + auto constant = ov::op::v0::Constant::create(precision, data_shape, data_elements); + ngraph::ResultVector results{std::make_shared(constant)}; function = std::make_shared(results, ngraph::ParameterVector{}, "constant"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/convolution.cpp b/src/tests/functional/shared_test_classes/src/single_layer/convolution.cpp index 6dd2b56dce0210..000699a711ab67 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/convolution.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/convolution.cpp @@ -15,7 +15,7 @@ std::string ConvolutionLayerTest::getTestCaseName(const testing::TestParamInfo padBegin, padEnd; size_t convOutChannels; @@ -45,7 +45,7 @@ void ConvolutionLayerTest::SetUp() { auto netPrecision = InferenceEngine::Precision::UNSPECIFIED; std::tie(convParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, targetDevice) = this->GetParam(); - ngraph::op::PadType padType; + ov::op::PadType padType; InferenceEngine::SizeVector kernel, stride, dilation; std::vector padBegin, padEnd; size_t convOutChannels; @@ -58,10 +58,10 @@ void ConvolutionLayerTest::SetUp() { filter_weights = ov::test::utils::generate_float_numbers(convOutChannels * inputShape[1] * filter_size, -0.1f, 0.1f); } - auto conv = std::dynamic_pointer_cast( + auto conv = std::dynamic_pointer_cast( ngraph::builder::makeConvolution(params[0], ngPrc, kernel, stride, padBegin, padEnd, dilation, padType, convOutChannels, false, filter_weights)); - ngraph::ResultVector results{std::make_shared(conv)}; + ngraph::ResultVector results{std::make_shared(conv)}; function = std::make_shared(results, params, "convolution"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/convolution_backprop.cpp b/src/tests/functional/shared_test_classes/src/single_layer/convolution_backprop.cpp index f7f1597e0ea300..4979cf3e8eae60 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/convolution_backprop.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/convolution_backprop.cpp @@ -15,7 +15,7 @@ std::string ConvolutionBackpropLayerTest::getTestCaseName(const testing::TestPar InferenceEngine::SizeVector outputShapes; std::string targetDevice; std::tie(convBackpropDataParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShapes, outputShapes, targetDevice) = obj.param; - ngraph::op::PadType padType; + ov::op::PadType padType; InferenceEngine::SizeVector kernel, stride, dilation; std::vector padBegin, padEnd, outPadding; size_t convOutChannels; @@ -47,23 +47,23 @@ void ConvolutionBackpropLayerTest::SetUp() { std::vector outputShape; auto netPrecision = InferenceEngine::Precision::UNSPECIFIED; std::tie(convBackpropDataParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, outputShape, targetDevice) = this->GetParam(); - ngraph::op::PadType padType; + ov::op::PadType padType; InferenceEngine::SizeVector kernel, stride, dilation; std::vector padBegin, padEnd, outPadding; size_t convOutChannels; std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, padType, outPadding) = convBackpropDataParams; auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto convBackpropData = std::dynamic_pointer_cast( + auto convBackpropData = std::dynamic_pointer_cast( ngraph::builder::makeConvolutionBackpropData(params[0], ngPrc, kernel, stride, padBegin, padEnd, dilation, padType, convOutChannels, false, outPadding)); if (!outputShape.empty()) { - auto outShape = ngraph::opset3::Constant::create(ngraph::element::i64, {outputShape.size()}, outputShape); - convBackpropData = std::dynamic_pointer_cast( + auto outShape = ov::op::v0::Constant::create(ngraph::element::i64, {outputShape.size()}, outputShape); + convBackpropData = std::dynamic_pointer_cast( ngraph::builder::makeConvolutionBackpropData(params[0], outShape, ngPrc, kernel, stride, padBegin, padEnd, dilation, padType, convOutChannels)); } - ngraph::ResultVector results{std::make_shared(convBackpropData)}; + ngraph::ResultVector results{std::make_shared(convBackpropData)}; function = std::make_shared(results, params, "convolutionBackpropData"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/convolution_backprop_data.cpp b/src/tests/functional/shared_test_classes/src/single_layer/convolution_backprop_data.cpp index ca86a0333b19b0..d02ee1cf6b4257 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/convolution_backprop_data.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/convolution_backprop_data.cpp @@ -17,7 +17,7 @@ std::string ConvolutionBackpropDataLayerTest::getTestCaseName(const testing::Tes InferenceEngine::SizeVector outputShapes; std::string targetDevice; std::tie(convBackpropDataParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShapes, outputShapes, targetDevice) = obj.param; - ngraph::op::PadType padType; + ov::op::PadType padType; InferenceEngine::SizeVector kernel, stride, dilation; std::vector padBegin, padEnd, outPadding; size_t convOutChannels; @@ -49,23 +49,23 @@ void ConvolutionBackpropDataLayerTest::SetUp() { std::vector outputShape; auto netPrecision = InferenceEngine::Precision::UNSPECIFIED; std::tie(convBackpropDataParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, outputShape, targetDevice) = this->GetParam(); - ngraph::op::PadType padType; + ov::op::PadType padType; InferenceEngine::SizeVector kernel, stride, dilation; std::vector padBegin, padEnd, outPadding; size_t convOutChannels; std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, padType, outPadding) = convBackpropDataParams; auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto convBackpropData = std::dynamic_pointer_cast( + auto convBackpropData = std::dynamic_pointer_cast( ngraph::builder::makeConvolutionBackpropData(params[0], ngPrc, kernel, stride, padBegin, padEnd, dilation, padType, convOutChannels, false, outPadding)); if (!outputShape.empty()) { - auto outShape = ngraph::opset3::Constant::create(ngraph::element::i64, {outputShape.size()}, outputShape); - convBackpropData = std::dynamic_pointer_cast( + auto outShape = ov::op::v0::Constant::create(ngraph::element::i64, {outputShape.size()}, outputShape); + convBackpropData = std::dynamic_pointer_cast( ngraph::builder::makeConvolutionBackpropData(params[0], outShape, ngPrc, kernel, stride, padBegin, padEnd, dilation, padType, convOutChannels)); } - ngraph::ResultVector results{std::make_shared(convBackpropData)}; + ngraph::ResultVector results{std::make_shared(convBackpropData)}; function = std::make_shared(results, params, "convolutionBackpropData"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/ctc_greedy_decoder.cpp b/src/tests/functional/shared_test_classes/src/single_layer/ctc_greedy_decoder.cpp index 6850a3e6f74eb7..5b48032bb98699 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/ctc_greedy_decoder.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/ctc_greedy_decoder.cpp @@ -47,11 +47,11 @@ void CTCGreedyDecoderLayerTest::SetUp() { ov::ParameterVector paramsIn {std::make_shared(ngPrc, ov::Shape(inputShapes))}; OPENVINO_SUPPRESS_DEPRECATED_START - auto ctcGreedyDecoder = std::dynamic_pointer_cast( + auto ctcGreedyDecoder = std::dynamic_pointer_cast( ngraph::builder::makeCTCGreedyDecoder(paramsIn[0], mergeRepeated)); OPENVINO_SUPPRESS_DEPRECATED_END - ngraph::ResultVector results{ std::make_shared(ctcGreedyDecoder) }; + ngraph::ResultVector results{ std::make_shared(ctcGreedyDecoder) }; function = std::make_shared(results, paramsIn, "CTCGreedyDecoder"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/ctc_greedy_decoder_seq_len.cpp b/src/tests/functional/shared_test_classes/src/single_layer/ctc_greedy_decoder_seq_len.cpp index 8c674b72aa831d..66d71ff16e6bd8 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/ctc_greedy_decoder_seq_len.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/ctc_greedy_decoder_seq_len.cpp @@ -81,14 +81,14 @@ void CTCGreedyDecoderSeqLenLayerTest::SetUp() { blankIndex = std::min(blankIndex, C - 1); OPENVINO_SUPPRESS_DEPRECATED_START - auto ctcGreedyDecoderSeqLen = std::dynamic_pointer_cast( + auto ctcGreedyDecoderSeqLen = std::dynamic_pointer_cast( ngraph::builder::makeCTCGreedyDecoderSeqLen(paramsIn[0], sequenceLenNode, blankIndex, mergeRepeated, ngIdxPrc)); OPENVINO_SUPPRESS_DEPRECATED_END ngraph::ResultVector results; for (int i = 0; i < ctcGreedyDecoderSeqLen->get_output_size(); i++) { - results.push_back(std::make_shared(ctcGreedyDecoderSeqLen->output(i))); + results.push_back(std::make_shared(ctcGreedyDecoderSeqLen->output(i))); } function = std::make_shared(results, paramsIn, "CTCGreedyDecoderSeqLen"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/ctc_loss.cpp b/src/tests/functional/shared_test_classes/src/single_layer/ctc_loss.cpp index 268d25ff19f320..c7462c9d9ce48a 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/ctc_loss.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/ctc_loss.cpp @@ -52,12 +52,12 @@ void CTCLossLayerTest::SetUp() { ov::ParameterVector params {std::make_shared(ngFpPrc, ov::Shape(logitsShapes))}; OPENVINO_SUPPRESS_DEPRECATED_START - auto ctcLoss = std::dynamic_pointer_cast( + auto ctcLoss = std::dynamic_pointer_cast( ngraph::builder::makeCTCLoss(params[0], logitsLength, labels, labelsLength, blankIndex, ngFpPrc, ngIntPrc, preprocessCollapseRepeated, ctcMergeRepeated, unique)); OPENVINO_SUPPRESS_DEPRECATED_END - ngraph::ResultVector results{std::make_shared(ctcLoss)}; + ngraph::ResultVector results{std::make_shared(ctcLoss)}; function = std::make_shared(results, params, "CTCLoss"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/cum_sum.cpp b/src/tests/functional/shared_test_classes/src/single_layer/cum_sum.cpp index 79fdc2cd3ba134..64f298fa36d47b 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/cum_sum.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/cum_sum.cpp @@ -31,11 +31,11 @@ void CumSumLayerTest::SetUp() { int64_t axis; std::tie(inputShapes, inputPrecision, axis, exclusive, reverse, targetDevice) = this->GetParam(); const auto inType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); - const auto paramData = std::make_shared(inType, ngraph::Shape(inputShapes)); - const auto axisNode = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{}, std::vector{axis})->output(0); - const auto cumSum = std::make_shared(paramData, axisNode, exclusive, reverse); + const auto paramData = std::make_shared(inType, ngraph::Shape(inputShapes)); + const auto axisNode = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{}, std::vector{axis})->output(0); + const auto cumSum = std::make_shared(paramData, axisNode, exclusive, reverse); - ngraph::ResultVector results{std::make_shared(cumSum)}; + ngraph::ResultVector results{std::make_shared(cumSum)}; function = std::make_shared(results, ngraph::ParameterVector{paramData}, "cumsum"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/deformable_convolution.cpp b/src/tests/functional/shared_test_classes/src/single_layer/deformable_convolution.cpp index ae68e2f3d713e5..3f928e191284f8 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/deformable_convolution.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/deformable_convolution.cpp @@ -13,7 +13,7 @@ std::string DeformableConvolutionLayerTest::getTestCaseName(const testing::TestP std::string targetDevice; std::tie(convParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShapes, targetDevice) = obj.param; - ngraph::op::PadType padType; + ov::op::PadType padType; InferenceEngine::SizeVector offsets, filter, stride, dilation; std::vector padBegin, padEnd; size_t groups, deformable_groups, convOutChannels; @@ -64,7 +64,7 @@ void DeformableConvolutionLayerTest::SetUp() { InferenceEngine::Precision netPrecision; std::tie(convParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, targetDevice) = this->GetParam(); - ngraph::op::PadType padType; + ov::op::PadType padType; InferenceEngine::SizeVector offsets, filter, stride, dilation; std::vector padBegin, padEnd; size_t groups, deformable_groups, convOutChannels; @@ -76,30 +76,30 @@ void DeformableConvolutionLayerTest::SetUp() { for (auto&& shape : {inputShape, offsets, filter}) { params.push_back(std::make_shared(ngPrc, ov::Shape(shape))); } - auto data = std::make_shared(ngPrc, ngraph::Shape(inputShape)); + auto data = std::make_shared(ngPrc, ngraph::Shape(inputShape)); data->set_friendly_name("a_data"); - auto offset_vals = std::make_shared(ngPrc, ngraph::Shape(offsets)); + auto offset_vals = std::make_shared(ngPrc, ngraph::Shape(offsets)); offset_vals->set_friendly_name("b_offset_vals"); - auto filter_vals = std::make_shared(ngPrc, ngraph::Shape(filter)); + auto filter_vals = std::make_shared(ngPrc, ngraph::Shape(filter)); filter_vals->set_friendly_name("c_filter_vals"); ngraph::ParameterVector parameters{data, offset_vals, filter_vals}; std::shared_ptr deformable_conv; if (with_modulation) { auto modulation_shape = ngraph::Shape(offsets); modulation_shape[1] = offsets[1] / 2; - auto modulation_scalars = std::make_shared(ngPrc, modulation_shape); + auto modulation_scalars = std::make_shared(ngPrc, modulation_shape); modulation_scalars->set_friendly_name("c_modulation_scalars"); - deformable_conv = std::make_shared(data, offset_vals, filter_vals, modulation_scalars, stride, padBegin, + deformable_conv = std::make_shared(data, offset_vals, filter_vals, modulation_scalars, stride, padBegin, padEnd, dilation, padType, groups, deformable_groups, with_bilinear_interpolation_pad); parameters.push_back(modulation_scalars); } else { - deformable_conv = std::make_shared(data, offset_vals, filter_vals, stride, padBegin, padEnd, dilation, + deformable_conv = std::make_shared(data, offset_vals, filter_vals, stride, padBegin, padEnd, dilation, padType, groups, deformable_groups, with_bilinear_interpolation_pad); } - ngraph::ResultVector results{std::make_shared(deformable_conv)}; + ngraph::ResultVector results{std::make_shared(deformable_conv)}; function = std::make_shared(results, parameters, "deformable_convolution"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/deformable_psroi_pooling.cpp b/src/tests/functional/shared_test_classes/src/single_layer/deformable_psroi_pooling.cpp index 3349348e5115c4..38a038ca64ab4e 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/deformable_psroi_pooling.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/deformable_psroi_pooling.cpp @@ -97,7 +97,7 @@ namespace LayerTestsDefinitions { if (offsetsShape.empty()) { // Test without optional third input (offsets) params = ov::ParameterVector{std::make_shared(ngPrc, ov::Shape(dataShape)), std::make_shared(ngPrc, ov::Shape(roisShape))}; - defomablePSROIPooling = std::make_shared(params[0], + defomablePSROIPooling = std::make_shared(params[0], params[1], outputDim, spatialScale_, @@ -111,7 +111,7 @@ namespace LayerTestsDefinitions { params = ov::ParameterVector{std::make_shared(ngPrc, ov::Shape(dataShape)), std::make_shared(ngPrc, ov::Shape(roisShape)), std::make_shared(ngPrc, ov::Shape(offsetsShape))}; - defomablePSROIPooling = std::make_shared(params[0], + defomablePSROIPooling = std::make_shared(params[0], params[1], params[2], outputDim, @@ -124,7 +124,7 @@ namespace LayerTestsDefinitions { part_size); } - ngraph::ResultVector results{std::make_shared(defomablePSROIPooling)}; + ngraph::ResultVector results{std::make_shared(defomablePSROIPooling)}; function = std::make_shared(results, params, "deformable_psroi_pooling"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/depth_to_space.cpp b/src/tests/functional/shared_test_classes/src/single_layer/depth_to_space.cpp index ce7f04d7935c8e..894c7bda8f9cdc 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/depth_to_space.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/depth_to_space.cpp @@ -7,12 +7,10 @@ namespace LayerTestsDefinitions { -using namespace ngraph::opset3; - -static inline std::string DepthToSpaceModeToString(const DepthToSpace::DepthToSpaceMode& mode) { - static std::map names = { - {DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, "BLOCKS_FIRST"}, - {DepthToSpace::DepthToSpaceMode::DEPTH_FIRST, "DEPTH_FIRST"}, +static inline std::string DepthToSpaceModeToString(const ov::op::v0::DepthToSpace::DepthToSpaceMode& mode) { + static std::map names = { + {ov::op::v0::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, "BLOCKS_FIRST"}, + {ov::op::v0::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST, "DEPTH_FIRST"}, }; auto i = names.find(mode); @@ -24,7 +22,7 @@ static inline std::string DepthToSpaceModeToString(const DepthToSpace::DepthToSp std::string DepthToSpaceLayerTest::getTestCaseName(const testing::TestParamInfo &obj) { std::vector inShape; - DepthToSpace::DepthToSpaceMode mode; + ov::op::v0::DepthToSpace::DepthToSpaceMode mode; std::size_t blockSize; InferenceEngine::Precision inputPrecision; std::string targetName; @@ -40,14 +38,14 @@ std::string DepthToSpaceLayerTest::getTestCaseName(const testing::TestParamInfo< void DepthToSpaceLayerTest::SetUp() { std::vector inShape; - DepthToSpace::DepthToSpaceMode mode; + ov::op::v0::DepthToSpace::DepthToSpaceMode mode; std::size_t blockSize; InferenceEngine::Precision inputPrecision; std::tie(inShape, inputPrecision, mode, blockSize, targetDevice) = this->GetParam(); auto inPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); ov::ParameterVector params {std::make_shared(inPrc, ov::Shape(inShape))}; auto d2s = std::make_shared(params[0], mode, blockSize); - ngraph::ResultVector results{std::make_shared(d2s)}; + ngraph::ResultVector results{std::make_shared(d2s)}; function = std::make_shared(results, params, "DepthToSpace"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/detection_output.cpp b/src/tests/functional/shared_test_classes/src/single_layer/detection_output.cpp index fafaa6fe6eeb1e..9ab980acc3a687 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/detection_output.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/detection_output.cpp @@ -7,7 +7,7 @@ namespace LayerTestsDefinitions { -std::ostream& operator <<(std::ostream& result, const ngraph::op::DetectionOutputAttrs& attrs) { +std::ostream& operator <<(std::ostream& result, const ov::op::v0::DetectionOutput::Attributes& attrs) { result << "Classes=" << attrs.num_classes << "_"; result << "backgrId=" << attrs.background_label_id << "_"; result << "topK=" << attrs.top_k << "_"; @@ -30,7 +30,7 @@ std::ostream& operator <<(std::ostream& result, const ngraph::op::DetectionOutpu std::string DetectionOutputLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { DetectionOutputAttributes commonAttrs; ParamsWhichSizeDepends specificAttrs; - ngraph::op::DetectionOutputAttrs attrs; + ov::op::v0::DetectionOutput::Attributes attrs; size_t batch; std::string targetDevice; std::tie(commonAttrs, specificAttrs, batch, attrs.objectness_score, targetDevice) = obj.param; @@ -164,7 +164,7 @@ void DetectionOutputLayerTest::SetUp() { else OPENVINO_THROW("DetectionOutput layer supports only 3 or 5 inputs"); - ngraph::ResultVector results{std::make_shared(detOut)}; + ngraph::ResultVector results{std::make_shared(detOut)}; function = std::make_shared(results, params, "DetectionOutput"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/dft.cpp b/src/tests/functional/shared_test_classes/src/single_layer/dft.cpp index 9e4e872befa35d..7cf196ffb37c65 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/dft.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/dft.cpp @@ -34,13 +34,13 @@ void DFTLayerTest::SetUp() { std::tie(inputShapes, inputPrecision, axes, signalSize, opType, targetDevice) = this->GetParam(); auto inType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); ngraph::ParameterVector paramVector; - auto paramData = std::make_shared(inType, ngraph::Shape(inputShapes)); + auto paramData = std::make_shared(inType, ngraph::Shape(inputShapes)); paramVector.push_back(paramData); auto dft = ngraph::builder::makeDFT(paramVector[0], axes, signalSize, opType); - ngraph::ResultVector results{std::make_shared(dft)}; + ngraph::ResultVector results{std::make_shared(dft)}; function = std::make_shared(results, paramVector, "DFT"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/einsum.cpp b/src/tests/functional/shared_test_classes/src/single_layer/einsum.cpp index 4fdaee9ba47e12..2bf206027bce10 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/einsum.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/einsum.cpp @@ -42,7 +42,7 @@ void EinsumLayerTest::SetUp() { } const auto einsum = std::make_shared(paramsOuts, equation); - const ngraph::ResultVector results{std::make_shared(einsum)}; + const ngraph::ResultVector results{std::make_shared(einsum)}; function = std::make_shared(results, params, "einsum"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/embedding_bag_offsets_sum.cpp b/src/tests/functional/shared_test_classes/src/single_layer/embedding_bag_offsets_sum.cpp index 574ee1e2b859f4..426d16885e4694 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/embedding_bag_offsets_sum.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/embedding_bag_offsets_sum.cpp @@ -42,13 +42,13 @@ void EmbeddingBagOffsetsSumLayerTest::SetUp() { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); auto ngIdxPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(indPrecision); - auto emb_table_node = std::make_shared(ngPrc, ngraph::Shape(embTableShape)); + auto emb_table_node = std::make_shared(ngPrc, ngraph::Shape(embTableShape)); ngraph::ParameterVector params = {emb_table_node}; - auto embBag = std::dynamic_pointer_cast( + auto embBag = std::dynamic_pointer_cast( ngraph::builder::makeEmbeddingBagOffsetsSum( ngPrc, ngIdxPrc, emb_table_node, indices, offsets, defaultIndex, withWeights, withDefIndex)); - ngraph::ResultVector results{std::make_shared(embBag)}; + ngraph::ResultVector results{std::make_shared(embBag)}; function = std::make_shared(results, params, "embeddingBagOffsetsSum"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/embedding_bag_packed_sum.cpp b/src/tests/functional/shared_test_classes/src/single_layer/embedding_bag_packed_sum.cpp index 4f8deac2409064..bad040c2e74beb 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/embedding_bag_packed_sum.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/embedding_bag_packed_sum.cpp @@ -39,11 +39,11 @@ void EmbeddingBagPackedSumLayerTest::SetUp() { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); auto ngIdxPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(indPrecision); - auto emb_table_node = std::make_shared(ngPrc, ngraph::Shape(embTableShape)); + auto emb_table_node = std::make_shared(ngPrc, ngraph::Shape(embTableShape)); ngraph::ParameterVector params = {emb_table_node}; auto embBag = ov::test::utils::make_embedding_bag_packed_sum(ngPrc, ngIdxPrc, emb_table_node, indices, withWeights); - ngraph::ResultVector results{std::make_shared(embBag)}; + ngraph::ResultVector results{std::make_shared(embBag)}; function = std::make_shared(results, params, "embeddingBagPackedSum"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/embedding_segments_sum.cpp b/src/tests/functional/shared_test_classes/src/single_layer/embedding_segments_sum.cpp index 59686188962528..87491598e75503 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/embedding_segments_sum.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/embedding_segments_sum.cpp @@ -44,13 +44,13 @@ void EmbeddingSegmentsSumLayerTest::SetUp() { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); auto ngIdxPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(indPrecision); - auto emb_table_node = std::make_shared(ngPrc, ngraph::Shape(embTableShape)); + auto emb_table_node = std::make_shared(ngPrc, ngraph::Shape(embTableShape)); ngraph::ParameterVector params = {emb_table_node}; - auto embBag = std::dynamic_pointer_cast( + auto embBag = std::dynamic_pointer_cast( ngraph::builder::makeEmbeddingSegmentsSum( ngPrc, ngIdxPrc, emb_table_node, indices, segmentIds, numSegments, defaultIndex, withWeights, withDefIndex)); - ngraph::ResultVector results{std::make_shared(embBag)}; + ngraph::ResultVector results{std::make_shared(embBag)}; function = std::make_shared(results, params, "embeddingSegmentsSum"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/experimental_detectron_detection_output.cpp b/src/tests/functional/shared_test_classes/src/single_layer/experimental_detectron_detection_output.cpp index 63e95b4c33bb94..160bace40ce018 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/experimental_detectron_detection_output.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/experimental_detectron_detection_output.cpp @@ -12,7 +12,7 @@ namespace test { namespace subgraph { namespace { - std::ostream& operator <<(std::ostream& ss, const ngraph::opset6::ExperimentalDetectronDetectionOutput::Attributes& attributes) { + std::ostream& operator <<(std::ostream& ss, const ov::op::v6::ExperimentalDetectronDetectionOutput::Attributes& attributes) { ss << "score_threshold=" << attributes.score_threshold << "_"; ss << "nms_threshold=" << attributes.nms_threshold << "_"; ss << "max_delta_log_wh=" << attributes.max_delta_log_wh << "_"; @@ -28,7 +28,7 @@ namespace { std::string ExperimentalDetectronDetectionOutputLayerTest::getTestCaseName( const testing::TestParamInfo& obj) { std::vector inputShapes; - ngraph::opset6::ExperimentalDetectronDetectionOutput::Attributes attributes; + ov::op::v6::ExperimentalDetectronDetectionOutput::Attributes attributes; ElementType netPrecision; std::string targetName; std::tie( @@ -61,7 +61,7 @@ std::string ExperimentalDetectronDetectionOutputLayerTest::getTestCaseName( void ExperimentalDetectronDetectionOutputLayerTest::SetUp() { std::vector inputShapes; - ngraph::opset6::ExperimentalDetectronDetectionOutput::Attributes attributes; + ov::op::v6::ExperimentalDetectronDetectionOutput::Attributes attributes; ElementType netPrecision; std::string targetName; @@ -90,7 +90,7 @@ void ExperimentalDetectronDetectionOutputLayerTest::SetUp() { for (auto&& shape : inputDynamicShapes) params.push_back(std::make_shared(netPrecision, shape)); - auto experimentalDetectron = std::make_shared( + auto experimentalDetectron = std::make_shared( params[0], // input_rois params[1], // input_deltas params[2], // input_scores diff --git a/src/tests/functional/shared_test_classes/src/single_layer/extract_image_patches.cpp b/src/tests/functional/shared_test_classes/src/single_layer/extract_image_patches.cpp index 436eaf0477c39c..8dece91ed85f29 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/extract_image_patches.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/extract_image_patches.cpp @@ -10,7 +10,7 @@ namespace LayerTestsDefinitions { std::string ExtractImagePatchesTest::getTestCaseName(const testing::TestParamInfo &obj) { std::vector inputShape, kernel, strides, rates; - ngraph::op::PadType pad_type; + ov::op::PadType pad_type; InferenceEngine::Precision netPrc; InferenceEngine::Precision inPrc, outPrc; InferenceEngine::Layout inLayout; @@ -32,17 +32,17 @@ std::string ExtractImagePatchesTest::getTestCaseName(const testing::TestParamInf void ExtractImagePatchesTest::SetUp() { std::vector inputShape, kernel, strides, rates; - ngraph::op::PadType pad_type; + ov::op::PadType pad_type; InferenceEngine::Precision netPrecision; std::tie(inputShape, kernel, strides, rates, pad_type, netPrecision, inPrc, outPrc, inLayout, targetDevice) = this->GetParam(); auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - auto inputNode = std::make_shared(ngPrc, ngraph::Shape(inputShape)); + auto inputNode = std::make_shared(ngPrc, ngraph::Shape(inputShape)); ngraph::ParameterVector params = {inputNode}; - auto extImgPatches = std::make_shared( + auto extImgPatches = std::make_shared( inputNode, ngraph::Shape(kernel), ngraph::Strides(strides), ngraph::Shape(rates), pad_type); - ngraph::ResultVector results{std::make_shared(extImgPatches)}; + ngraph::ResultVector results{std::make_shared(extImgPatches)}; function = std::make_shared(results, params, "ExtractImagePatches"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/eye.cpp b/src/tests/functional/shared_test_classes/src/single_layer/eye.cpp index e6c560c2aa6739..484a010da483f3 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/eye.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/eye.cpp @@ -51,7 +51,7 @@ void EyeLayerTest::SetUp() { col_num = eye_par[1]; shift = eye_par[2]; - std::shared_ptr eye_operation; + std::shared_ptr eye_operation; auto rows_const = std::make_shared(ngraph::element::i32, input_shapes[0], &row_num); rows_const->set_friendly_name("rows"); @@ -66,13 +66,13 @@ void EyeLayerTest::SetUp() { out_batch_shape.data()); batch_shape_par->set_friendly_name("batchShape"); eye_operation = - std::make_shared(rows_const, cols_const, diag_const, batch_shape_par, net_precision); + std::make_shared(rows_const, cols_const, diag_const, batch_shape_par, net_precision); } else { - eye_operation = std::make_shared(rows_const, cols_const, diag_const, net_precision); + eye_operation = std::make_shared(rows_const, cols_const, diag_const, net_precision); } // Without this call the eye operation will be calculated by CPU and substituted by Constant operator ov::pass::disable_constant_folding(eye_operation); - ngraph::ResultVector results{std::make_shared(eye_operation)}; + ngraph::ResultVector results{std::make_shared(eye_operation)}; function = std::make_shared(results, ngraph::ParameterVector{}, "eye"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/fake_quantize.cpp b/src/tests/functional/shared_test_classes/src/single_layer/fake_quantize.cpp index 04dfe2540390ae..feb4a6875c81b3 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/fake_quantize.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/fake_quantize.cpp @@ -20,7 +20,7 @@ std::string FakeQuantizeLayerTest::getTestCaseName(const testing::TestParamInfo< std::vector constShape; std::vector fqDirectArgs; std::vector inputArg; - ngraph::op::AutoBroadcastSpec broadcast; + ov::op::AutoBroadcastSpec broadcast; std::tie(levels, constShape, fqDirectArgs, inputArg, broadcast) = fqParams; std::ostringstream result; @@ -57,7 +57,7 @@ void FakeQuantizeLayerTest::SetUp() { std::vector constShape; std::vector fqDirectArg; std::vector inputArg; - ngraph::op::AutoBroadcastSpec broadcast; + ov::op::AutoBroadcastSpec broadcast; std::tie(levels, constShape, fqDirectArg, inputArg, broadcast) = fqParams; if (inputArg.size() == 3) { inputDataMin = inputArg[0]; @@ -92,9 +92,9 @@ void FakeQuantizeLayerTest::SetUp() { {fqDirectArg[2]}, {fqDirectArg[3]}); } - auto fq = std::dynamic_pointer_cast(fakeQNode); + auto fq = std::dynamic_pointer_cast(fakeQNode); - ngraph::ResultVector results{std::make_shared(fq)}; + ngraph::ResultVector results{std::make_shared(fq)}; function = std::make_shared(results, params, "fakeQuantize"); configuration = config.second; } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/gather.cpp b/src/tests/functional/shared_test_classes/src/single_layer/gather.cpp index bd7f75e20b48d8..50fe5f1aa5bff0 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/gather.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/gather.cpp @@ -16,10 +16,10 @@ void GatherLayerTestBase::SetUp(const gatherParamsTuple& params) { ASSERT_EQ(ngraph::shape_size(indicesShape), indices.size()) << "Indices vector size and provided indices shape doesn't fit each other"; auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector functionParams {std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto indicesNode = ngraph::opset3::Constant::create(ngraph::element::i64, ngraph::Shape(indicesShape), indices); - auto axisNode = ngraph::opset3::Constant::create(ngraph::element::i64, ngraph::Shape({}), {axis}); - auto gather = std::make_shared(functionParams[0], indicesNode, axisNode); - ngraph::ResultVector results{std::make_shared(gather)}; + auto indicesNode = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape(indicesShape), indices); + auto axisNode = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape({}), {axis}); + auto gather = std::make_shared(functionParams[0], indicesNode, axisNode); + ngraph::ResultVector results{std::make_shared(gather)}; function = std::make_shared(results, functionParams, "gather"); } @@ -85,9 +85,9 @@ void Gather7LayerTest::SetUp() { ov::ParameterVector functionParams {std::make_shared(ngPrc, ov::Shape(inputShape))}; auto indicesNode = ngraph::builder::makeConstant(ngraph::element::i64, indicesShape, {}, true, inputShape[axis < 0 ? axis + inputShape.size() : axis] - 1, 0); - auto axisNode = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape({}), { axis }); - auto gather = std::make_shared(functionParams[0], indicesNode, axisNode, batchIdx); - ngraph::ResultVector results{ std::make_shared(gather) }; + auto axisNode = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape({}), { axis }); + auto gather = std::make_shared(functionParams[0], indicesNode, axisNode, batchIdx); + ngraph::ResultVector results{ std::make_shared(gather) }; function = std::make_shared(results, functionParams, "gather"); } @@ -127,9 +127,9 @@ void Gather8LayerTest::SetUp() { auto indicesNode = ngraph::builder::makeConstant(ngraph::element::i64, indicesShape, {}, true, inputShape[axis < 0 ? axis + inputShape.size() : axis] - 1, -static_cast(inputShape[axis < 0 ? axis + inputShape.size() : axis])); - auto axisNode = ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape({}), { axis }); - auto gather = std::make_shared(functionParams[0], indicesNode, axisNode, batchIdx); - ngraph::ResultVector results{ std::make_shared(gather) }; + auto axisNode = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape({}), { axis }); + auto gather = std::make_shared(functionParams[0], indicesNode, axisNode, batchIdx); + ngraph::ResultVector results{ std::make_shared(gather) }; function = std::make_shared(results, functionParams, "gather"); } @@ -166,11 +166,11 @@ void Gather8IndiceScalarLayerTest::SetUp() { int batchIdx = std::get<1>(axis_batchIdx); auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector functionParams {std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto indicesNode = ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{}, {inputShape[axis] - 1})->output(0); + auto indicesNode = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{}, {inputShape[axis] - 1})->output(0); - auto axisNode = ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape({}), { axis }); - auto gather = std::make_shared(functionParams[0], indicesNode, axisNode, batchIdx); - ngraph::ResultVector results{ std::make_shared(gather) }; + auto axisNode = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape({}), { axis }); + auto gather = std::make_shared(functionParams[0], indicesNode, axisNode, batchIdx); + ngraph::ResultVector results{ std::make_shared(gather) }; function = std::make_shared(results, functionParams, "gather"); } @@ -219,9 +219,9 @@ void Gather8withIndicesDataLayerTest::SetUp() { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector functionParams {std::make_shared(ngPrc, ov::Shape(inputShape))}; auto indicesNode = ngraph::builder::makeConstant(ngraph::element::i64, indicesShape, indicesData); - auto axisNode = ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape({}), { axis }); - auto gather = std::make_shared(functionParams[0], indicesNode, axisNode, batchIdx); - ngraph::ResultVector results{ std::make_shared(gather) }; + auto axisNode = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape({}), { axis }); + auto gather = std::make_shared(functionParams[0], indicesNode, axisNode, batchIdx); + ngraph::ResultVector results{ std::make_shared(gather) }; function = std::make_shared(results, functionParams, "gather"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/gather_elements.cpp b/src/tests/functional/shared_test_classes/src/single_layer/gather_elements.cpp index ce5b6b869abefc..e0512e2337a6a1 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/gather_elements.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/gather_elements.cpp @@ -56,7 +56,7 @@ void GatherElementsLayerTest::SetUp() { auto gather = std::make_shared(params[0], indicesNode, axis); - ngraph::ResultVector results{std::make_shared(gather)}; + ngraph::ResultVector results{std::make_shared(gather)}; function = std::make_shared(results, params, "gatherEl"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/gather_nd.cpp b/src/tests/functional/shared_test_classes/src/single_layer/gather_nd.cpp index a0d9d40705ab3d..df4ed4c90efb67 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/gather_nd.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/gather_nd.cpp @@ -46,9 +46,9 @@ void GatherNDLayerTest::SetUp() { ov::ParameterVector params {std::make_shared(ngDPrc, ov::Shape(dataShape))}; auto dataNode = params[0]; - auto gather = std::dynamic_pointer_cast( + auto gather = std::dynamic_pointer_cast( ngraph::builder::makeGatherND(dataNode, indicesShape, ngIPrc, batchDims)); - ngraph::ResultVector results{std::make_shared(gather)}; + ngraph::ResultVector results{std::make_shared(gather)}; function = std::make_shared(results, params, "gatherND"); } @@ -70,9 +70,9 @@ void GatherND8LayerTest::SetUp() { ov::ParameterVector params {std::make_shared(ngDPrc, ov::Shape(dataShape))}; auto dataNode = params[0]; - auto gather = std::dynamic_pointer_cast( + auto gather = std::dynamic_pointer_cast( ngraph::builder::makeGatherND8(dataNode, indicesShape, ngIPrc, batchDims)); - ngraph::ResultVector results{ std::make_shared(gather) }; + ngraph::ResultVector results{ std::make_shared(gather) }; function = std::make_shared(results, params, "gatherND"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/gather_tree.cpp b/src/tests/functional/shared_test_classes/src/single_layer/gather_tree.cpp index fc98359d8e3907..11e69d99b695e1 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/gather_tree.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/gather_tree.cpp @@ -60,9 +60,9 @@ void GatherTreeLayerTest::SetUp() { throw std::runtime_error("Unsupported inputType"); } - auto operationResult = std::make_shared(paramsIn.front(), inp2, inp3, inp4); + auto operationResult = std::make_shared(paramsIn.front(), inp2, inp3, inp4); - ngraph::ResultVector results{std::make_shared(operationResult)}; + ngraph::ResultVector results{std::make_shared(operationResult)}; function = std::make_shared(results, paramsIn, "GatherTree"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/grid_sample.cpp b/src/tests/functional/shared_test_classes/src/single_layer/grid_sample.cpp index c2a55b66ba00d7..1a68ff8f332625 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/grid_sample.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/grid_sample.cpp @@ -9,9 +9,9 @@ namespace LayerTestsDefinitions { std::string GridSampleLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { InferenceEngine::SizeVector dataShape; InferenceEngine::SizeVector gridShape; - decltype(ngraph::op::v9::GridSample::Attributes::align_corners) alignCorners; - decltype(ngraph::op::v9::GridSample::Attributes::mode) mode; - decltype(ngraph::op::v9::GridSample::Attributes::padding_mode) paddingMode; + decltype(ov::op::v9::GridSample::Attributes::align_corners) alignCorners; + decltype(ov::op::v9::GridSample::Attributes::mode) mode; + decltype(ov::op::v9::GridSample::Attributes::padding_mode) paddingMode; InferenceEngine::Precision inDataPrc; InferenceEngine::Precision inGridPrc; std::string targetDevice; @@ -33,9 +33,9 @@ std::string GridSampleLayerTest::getTestCaseName(const testing::TestParamInfo(ngInDataPrc, ngraph::Shape(dataShape)); - auto grid = std::make_shared(ngInGridPrc, ngraph::Shape(gridShape)); - auto gridSample = std::make_shared( + auto data = std::make_shared(ngInDataPrc, ngraph::Shape(dataShape)); + auto grid = std::make_shared(ngInGridPrc, ngraph::Shape(gridShape)); + auto gridSample = std::make_shared( data, grid, - ngraph::op::v9::GridSample::Attributes(alignCorners, mode, paddingMode)); - function = std::make_shared(std::make_shared(gridSample), + ov::op::v9::GridSample::Attributes(alignCorners, mode, paddingMode)); + function = std::make_shared(std::make_shared(gridSample), ngraph::ParameterVector{data, grid}, "GridSample"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/grn.cpp b/src/tests/functional/shared_test_classes/src/single_layer/grn.cpp index 54fe36b363170b..f1e135f06c47d2 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/grn.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/grn.cpp @@ -37,8 +37,8 @@ void GrnLayerTest::SetUp() { std::tie(netPrecision, inPrc, outPrc, inLayout, outLayout, inputShapes, bias, targetDevice) = GetParam(); auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector paramsIn {std::make_shared(ngPrc, ov::Shape(inputShapes))}; - auto grn = std::make_shared(paramsIn[0], bias); - ngraph::ResultVector results{ std::make_shared(grn) }; + auto grn = std::make_shared(paramsIn[0], bias); + ngraph::ResultVector results{ std::make_shared(grn) }; function = std::make_shared(results, paramsIn, "Grn"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/group_convolution.cpp b/src/tests/functional/shared_test_classes/src/single_layer/group_convolution.cpp index 14408094c21dff..696f74a2ff7c32 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/group_convolution.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/group_convolution.cpp @@ -14,7 +14,7 @@ std::string GroupConvolutionLayerTest::getTestCaseName(const testing::TestParamI InferenceEngine::SizeVector inputShapes; std::string targetDevice; std::tie(groupConvParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShapes, targetDevice) = obj.param; - ngraph::op::PadType padType; + ov::op::PadType padType; InferenceEngine::SizeVector kernel, stride, dilation; std::vector padBegin, padEnd; size_t convOutChannels, numGroups; @@ -44,17 +44,17 @@ void GroupConvolutionLayerTest::SetUp() { std::vector inputShape; auto netPrecision = InferenceEngine::Precision::UNSPECIFIED; std::tie(groupConvParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, targetDevice) = this->GetParam(); - ngraph::op::PadType padType; + ov::op::PadType padType; InferenceEngine::SizeVector kernel, stride, dilation; std::vector padBegin, padEnd; size_t convOutChannels, numGroups; std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, numGroups, padType) = groupConvParams; auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto groupConv = std::dynamic_pointer_cast( + auto groupConv = std::dynamic_pointer_cast( ngraph::builder::makeGroupConvolution(params[0], ngPrc, kernel, stride, padBegin, padEnd, dilation, padType, convOutChannels, numGroups)); - ngraph::ResultVector results{std::make_shared(groupConv)}; + ngraph::ResultVector results{std::make_shared(groupConv)}; function = std::make_shared(results, params, "groupConvolution"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/group_convolution_backprop_data.cpp b/src/tests/functional/shared_test_classes/src/single_layer/group_convolution_backprop_data.cpp index de4f784b4c6ec4..fc56ef8a87b72f 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/group_convolution_backprop_data.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/group_convolution_backprop_data.cpp @@ -16,7 +16,7 @@ std::string GroupConvBackpropDataLayerTest::getTestCaseName(const testing::TestP InferenceEngine::SizeVector inputShapes; std::string targetDevice; std::tie(groupConvBackpropDataParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShapes, targetDevice) = obj.param; - ngraph::op::PadType padType; + ov::op::PadType padType; InferenceEngine::SizeVector kernel, stride, dilation; std::vector padBegin, padEnd; size_t convOutChannels, numGroups; @@ -46,17 +46,17 @@ void GroupConvBackpropDataLayerTest::SetUp() { std::vector inputShape; auto netPrecision = InferenceEngine::Precision::UNSPECIFIED; std::tie(groupConvBackpropDataParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, targetDevice) = this->GetParam(); - ngraph::op::PadType padType; + ov::op::PadType padType; InferenceEngine::SizeVector kernel, stride, dilation; std::vector padBegin, padEnd; size_t convOutChannels, numGroups; std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, numGroups, padType) = groupConvBackpropDataParams; auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto groupConvBackpropData = std::dynamic_pointer_cast( + auto groupConvBackpropData = std::dynamic_pointer_cast( ngraph::builder::makeGroupConvolutionBackpropData(params[0], ngPrc, kernel, stride, padBegin, padEnd, dilation, padType, convOutChannels, numGroups)); - ngraph::ResultVector results{std::make_shared(groupConvBackpropData)}; + ngraph::ResultVector results{std::make_shared(groupConvBackpropData)}; function = std::make_shared(results, params, "GroupConvolutionBackpropData"); } @@ -68,7 +68,7 @@ std::string GroupConvBackpropLayerTest::getTestCaseName(testing::TestParamInfo padBegin, padEnd, outPadding; size_t convOutChannels, numGroups; @@ -100,25 +100,25 @@ void GroupConvBackpropLayerTest::SetUp() { std::vector inputShape, outputShape; auto netPrecision = InferenceEngine::Precision::UNSPECIFIED; std::tie(groupConvBackpropDataParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, outputShape, targetDevice) = this->GetParam(); - ngraph::op::PadType padType; + ov::op::PadType padType; InferenceEngine::SizeVector kernel, stride, dilation; std::vector padBegin, padEnd, outPadding; size_t convOutChannels, numGroups; std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, numGroups, padType, outPadding) = groupConvBackpropDataParams; auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - std::shared_ptr groupConvBackpropData; + std::shared_ptr groupConvBackpropData; if (!outputShape.empty()) { - auto outShape = ngraph::opset3::Constant::create(ngraph::element::i64, {outputShape.size()}, outputShape); - groupConvBackpropData = std::dynamic_pointer_cast( + auto outShape = ov::op::v0::Constant::create(ngraph::element::i64, {outputShape.size()}, outputShape); + groupConvBackpropData = std::dynamic_pointer_cast( ngraph::builder::makeGroupConvolutionBackpropData(params[0], outShape, ngPrc, kernel, stride, padBegin, padEnd, dilation, padType, convOutChannels, numGroups, false, outPadding)); } else { - groupConvBackpropData = std::dynamic_pointer_cast( + groupConvBackpropData = std::dynamic_pointer_cast( ngraph::builder::makeGroupConvolutionBackpropData(params[0], ngPrc, kernel, stride, padBegin, padEnd, dilation, padType, convOutChannels, numGroups, false, outPadding)); } - ngraph::ResultVector results{std::make_shared(groupConvBackpropData)}; + ngraph::ResultVector results{std::make_shared(groupConvBackpropData)}; function = std::make_shared(results, params, "GroupConvolutionBackpropData"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/gru_cell.cpp b/src/tests/functional/shared_test_classes/src/single_layer/gru_cell.cpp index 7343ad51749b0b..f1fff2faee7463 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/gru_cell.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/gru_cell.cpp @@ -105,7 +105,7 @@ void GRUCellTest::SetUp() { auto gru_cell = std::make_shared(params[0], params[1], W, R, B, hidden_size, activations, activations_alpha, activations_beta, clip, linear_before_reset); - ngraph::ResultVector results{std::make_shared(gru_cell->output(0))}; + ngraph::ResultVector results{std::make_shared(gru_cell->output(0))}; function = std::make_shared(results, params, "gru_cell"); if (should_decompose) { ngraph::pass::Manager m; diff --git a/src/tests/functional/shared_test_classes/src/single_layer/gru_sequence.cpp b/src/tests/functional/shared_test_classes/src/single_layer/gru_sequence.cpp index a741b6263fde15..2052edfd863ba8 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/gru_sequence.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/gru_sequence.cpp @@ -23,7 +23,7 @@ namespace LayerTestsDefinitions { std::vector activations_beta; float clip; bool linear_before_reset; - ngraph::op::RecurrentSequenceDirection direction; + ov::op::RecurrentSequenceDirection direction; InputLayerType WRBType; InferenceEngine::Precision netPrecision; std::string targetDevice; @@ -59,12 +59,12 @@ namespace LayerTestsDefinitions { std::vector activations_beta; float clip; bool linear_before_reset; - ngraph::op::RecurrentSequenceDirection direction; + ov::op::RecurrentSequenceDirection direction; InputLayerType WRBType; InferenceEngine::Precision netPrecision; std::tie(m_mode, seq_lengths, batch, hidden_size, activations, clip, linear_before_reset, direction, WRBType, netPrecision, targetDevice) = this->GetParam(); - size_t num_directions = direction == ngraph::op::RecurrentSequenceDirection::BIDIRECTIONAL ? 2 : 1; + size_t num_directions = direction == ov::op::RecurrentSequenceDirection::BIDIRECTIONAL ? 2 : 1; std::vector inputShapes = { {{batch, seq_lengths, input_size}, {batch, num_directions, hidden_size}, {batch}, {num_directions, 3 * hidden_size, input_size}, {num_directions, 3 * hidden_size, hidden_size}, @@ -115,15 +115,15 @@ namespace LayerTestsDefinitions { auto gru_sequence = std::make_shared(params[0], params[1], seq_lengths_node, W, R, B, hidden_size, direction, activations, activations_alpha, activations_beta, clip, linear_before_reset); - ngraph::ResultVector results{std::make_shared(gru_sequence->output(0)), - std::make_shared(gru_sequence->output(1))}; + ngraph::ResultVector results{std::make_shared(gru_sequence->output(0)), + std::make_shared(gru_sequence->output(1))}; function = std::make_shared(results, params, "gru_sequence"); bool is_pure_sequence = (m_mode == SequenceTestsMode::PURE_SEQ || m_mode == SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM || m_mode == SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST); if (!is_pure_sequence) { ngraph::pass::Manager manager; - if (direction == ngraph::op::RecurrentSequenceDirection::BIDIRECTIONAL) + if (direction == ov::op::RecurrentSequenceDirection::BIDIRECTIONAL) manager.register_pass(); manager.register_pass(); manager.run_passes(function); diff --git a/src/tests/functional/shared_test_classes/src/single_layer/interpolate.cpp b/src/tests/functional/shared_test_classes/src/single_layer/interpolate.cpp index 0382268acb8cc7..9a07facc117300 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/interpolate.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/interpolate.cpp @@ -24,10 +24,10 @@ std::string InterpolateLayerTest::getTestCaseName(const testing::TestParamInfo axes; std::vector scales; bool antialias; - ngraph::op::v4::Interpolate::InterpolateMode mode; - ngraph::op::v4::Interpolate::ShapeCalcMode shapeCalcMode; - ngraph::op::v4::Interpolate::CoordinateTransformMode coordinateTransformMode; - ngraph::op::v4::Interpolate::NearestMode nearestMode; + ov::op::v4::Interpolate::InterpolateMode mode; + ov::op::v4::Interpolate::ShapeCalcMode shapeCalcMode; + ov::op::v4::Interpolate::CoordinateTransformMode coordinateTransformMode; + ov::op::v4::Interpolate::NearestMode nearestMode; double cubeCoef; std::tie(mode, shapeCalcMode, coordinateTransformMode, nearestMode, antialias, padBegin, padEnd, cubeCoef, axes, scales) = interpolateParams; std::ostringstream result; @@ -62,10 +62,10 @@ void InterpolateLayerTest::SetUp() { std::vector axes; std::vector scales; bool antialias; - ngraph::op::v4::Interpolate::InterpolateMode mode; - ngraph::op::v4::Interpolate::ShapeCalcMode shapeCalcMode; - ngraph::op::v4::Interpolate::CoordinateTransformMode coordinateTransformMode; - ngraph::op::v4::Interpolate::NearestMode nearestMode; + ov::op::v4::Interpolate::InterpolateMode mode; + ov::op::v4::Interpolate::ShapeCalcMode shapeCalcMode; + ov::op::v4::Interpolate::CoordinateTransformMode coordinateTransformMode; + ov::op::v4::Interpolate::NearestMode nearestMode; configuration.insert(additional_config.begin(), additional_config.end()); @@ -75,32 +75,32 @@ void InterpolateLayerTest::SetUp() { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto sizesConst = ngraph::opset3::Constant(ngraph::element::Type_t::i64, {targetShape.size()}, targetShape); - auto sizesInput = std::make_shared(sizesConst); + auto sizesConst = ov::op::v0::Constant(ngraph::element::Type_t::i64, {targetShape.size()}, targetShape); + auto sizesInput = std::make_shared(sizesConst); - auto scales_const = ngraph::opset3::Constant(ngraph::element::Type_t::f32, {scales.size()}, scales); - auto scalesInput = std::make_shared(scales_const); + auto scales_const = ov::op::v0::Constant(ngraph::element::Type_t::f32, {scales.size()}, scales); + auto scalesInput = std::make_shared(scales_const); - ngraph::op::v4::Interpolate::InterpolateAttrs interpolateAttributes{mode, shapeCalcMode, padBegin, + ov::op::v4::Interpolate::InterpolateAttrs interpolateAttributes{mode, shapeCalcMode, padBegin, padEnd, coordinateTransformMode, nearestMode, antialias, cubeCoef}; - std::shared_ptr interpolate; + std::shared_ptr interpolate; if (axes.empty()) { - interpolate = std::make_shared(params[0], + interpolate = std::make_shared(params[0], sizesInput, scalesInput, interpolateAttributes); } else { - auto axesConst = ngraph::opset3::Constant(ngraph::element::Type_t::i64, {axes.size()}, axes); - auto axesInput = std::make_shared(axesConst); + auto axesConst = ov::op::v0::Constant(ngraph::element::Type_t::i64, {axes.size()}, axes); + auto axesInput = std::make_shared(axesConst); - interpolate = std::make_shared(params[0], + interpolate = std::make_shared(params[0], sizesInput, scalesInput, axesInput, interpolateAttributes); } - const ngraph::ResultVector results{std::make_shared(interpolate)}; + const ngraph::ResultVector results{std::make_shared(interpolate)}; function = std::make_shared(results, params, "interpolate"); } @@ -149,12 +149,12 @@ void Interpolate1LayerTest::SetUp() { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto sizesConst = ngraph::opset3::Constant(ngraph::element::Type_t::i64, {targetShape.size()}, targetShape); - auto sizesInput = std::make_shared(sizesConst); + auto sizesConst = ov::op::v0::Constant(ngraph::element::Type_t::i64, {targetShape.size()}, targetShape); + auto sizesInput = std::make_shared(sizesConst); bool align_corners = true; - ngraph::op::v0::InterpolateAttrs interpolateAttributes; + ov::op::v0::Interpolate::Attributes interpolateAttributes; interpolateAttributes.axes = axes; interpolateAttributes.mode = mode; interpolateAttributes.align_corners = align_corners; @@ -162,9 +162,9 @@ void Interpolate1LayerTest::SetUp() { interpolateAttributes.pads_begin = pads; interpolateAttributes.pads_end = pads; - auto interpolate = std::make_shared(params[0], sizesInput, interpolateAttributes); + auto interpolate = std::make_shared(params[0], sizesInput, interpolateAttributes); - const ngraph::ResultVector results{std::make_shared(interpolate)}; + const ngraph::ResultVector results{std::make_shared(interpolate)}; function = std::make_shared(results, params, "interpolate"); } @@ -211,13 +211,13 @@ std::string InterpolateLayerTest::getTestCaseName(const testing::TestParamInfo makeScalesOrSizesInput(ov::op::util::InterpolateBase::ShapeCalcMode shapeCalcMode, +static std::shared_ptr makeScalesOrSizesInput(ov::op::util::InterpolateBase::ShapeCalcMode shapeCalcMode, const std::vector& sizes, const std::vector& scales) { if (shapeCalcMode == ov::op::util::InterpolateBase::ShapeCalcMode::SIZES) - return std::make_shared(ngraph::element::Type_t::i64, ov::Shape{sizes.size()}, sizes); + return std::make_shared(ngraph::element::Type_t::i64, ov::Shape{sizes.size()}, sizes); else - return std::make_shared(ngraph::element::Type_t::f32, ov::Shape{scales.size()}, scales); + return std::make_shared(ngraph::element::Type_t::f32, ov::Shape{scales.size()}, scales); } void InterpolateLayerTest::SetUp() { @@ -248,20 +248,20 @@ void InterpolateLayerTest::SetUp() { ov::op::util::InterpolateBase::InterpolateAttrs interpolateAttributes{mode, shapeCalcMode, padBegin, padEnd, coordinateTransformMode, nearestMode, antialias, cubeCoef}; - std::shared_ptr interpolate{}; + std::shared_ptr interpolate{}; if (axes.empty()) { - interpolate = std::make_shared(params[0], + interpolate = std::make_shared(params[0], scalesOrSizesInput, interpolateAttributes); } else { - auto axesInput = std::make_shared(ngraph::element::Type_t::i64, ov::Shape{axes.size()}, axes); + auto axesInput = std::make_shared(ngraph::element::Type_t::i64, ov::Shape{axes.size()}, axes); - interpolate = std::make_shared(params[0], + interpolate = std::make_shared(params[0], scalesOrSizesInput, axesInput, interpolateAttributes); } - const ngraph::ResultVector results{std::make_shared(interpolate)}; + const ngraph::ResultVector results{std::make_shared(interpolate)}; function = std::make_shared(results, params, "interpolate"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/log_softmax.cpp b/src/tests/functional/shared_test_classes/src/single_layer/log_softmax.cpp index 843c8945aab6ac..ab0859022090df 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/log_softmax.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/log_softmax.cpp @@ -41,9 +41,9 @@ void LogSoftmaxLayerTest::SetUp() { const ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - const auto logSoftmax = std::make_shared(params.at(0), axis); + const auto logSoftmax = std::make_shared(params.at(0), axis); - const ngraph::ResultVector results {std::make_shared(logSoftmax)}; + const ngraph::ResultVector results {std::make_shared(logSoftmax)}; function = std::make_shared(results, params, "logSoftmax"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/logical.cpp b/src/tests/functional/shared_test_classes/src/single_layer/logical.cpp index c692ad4525e903..1136600a88f973 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/logical.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/logical.cpp @@ -72,7 +72,7 @@ void LogicalLayerTest::SetUp() { auto secondInput = ngraph::builder::makeInputLayer(ngInputsPrc, secondInputType, inputShapes.second); OPENVINO_SUPPRESS_DEPRECATED_END if (secondInputType == ngraph::helpers::InputLayerType::PARAMETER) { - inputs.push_back(std::dynamic_pointer_cast(secondInput)); + inputs.push_back(std::dynamic_pointer_cast(secondInput)); } logicalNode = ngraph::builder::makeLogical(inputs[0], secondInput, logicalOpType); } else { diff --git a/src/tests/functional/shared_test_classes/src/single_layer/loop.cpp b/src/tests/functional/shared_test_classes/src/single_layer/loop.cpp index 25f4bfbd90f6dc..844d73cba73292 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/loop.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/loop.cpp @@ -59,9 +59,9 @@ namespace LayerTestsDefinitions { types_separate.push_back(el.second); } // Example: - /* auto X = std::make_shared(ngraph::element::f32, ngraph::Shape{32, 1, 10}); - auto Y = std::make_shared(ngraph::element::f32, ngraph::Shape{32, 1, 10}); - auto M = std::make_shared(ngraph::element::f32, ngraph::Shape{32, 1, 10});*/ + /* auto X = std::make_shared(ngraph::element::f32, ngraph::Shape{32, 1, 10}); + auto Y = std::make_shared(ngraph::element::f32, ngraph::Shape{32, 1, 10}); + auto M = std::make_shared(ngraph::element::f32, ngraph::Shape{32, 1, 10});*/ ov::ParameterVector params; for (auto&& shape : inputs_separate) { params.push_back(std::make_shared(ngPrc, ov::Shape(shape))); @@ -70,55 +70,55 @@ namespace LayerTestsDefinitions { // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters const std::vector body_params_shapes(inputs_separate.size(), ngraph::PartialShape::dynamic()); - auto current_iteration = std::make_shared(ngraph::element::i64, ngraph::Shape{1}); + auto current_iteration = std::make_shared(ngraph::element::i64, ngraph::Shape{1}); //Example: -/* auto Xi = std::make_shared(ngraph::element::f32, ngraph::PartialShape::dynamic()); - auto Yi = std::make_shared(ngraph::element::f32, ngraph::PartialShape::dynamic()); - auto M_body = std::make_shared(ngraph::element::f32, ngraph::PartialShape::dynamic());*/ +/* auto Xi = std::make_shared(ngraph::element::f32, ngraph::PartialShape::dynamic()); + auto Yi = std::make_shared(ngraph::element::f32, ngraph::PartialShape::dynamic()); + auto M_body = std::make_shared(ngraph::element::f32, ngraph::PartialShape::dynamic());*/ ngraph::ParameterVector body_params; for (const auto &pshape : body_params_shapes) { - auto paramNode = std::make_shared(ngPrc, pshape); + auto paramNode = std::make_shared(ngPrc, pshape); body_params.push_back(paramNode); } std::shared_ptr body_condition_const; if (is_body_condition_const) { if (body_condition) { - body_condition_const = std::make_shared( + body_condition_const = std::make_shared( ngraph::element::boolean, ngraph::Shape{1}, true); } else { - body_condition_const = std::make_shared( + body_condition_const = std::make_shared( ngraph::element::boolean, ngraph::Shape{1}, false); } } auto trip_count_const = - std::make_shared(ngraph::element::i64, ngraph::Shape{1}, trip_count); + std::make_shared(ngraph::element::i64, ngraph::Shape{1}, trip_count); std::shared_ptr exec_condition; if (execute_first_iteration) { - exec_condition = std::make_shared( + exec_condition = std::make_shared( ngraph::element::boolean, ngraph::Shape{1}, true); } else { - exec_condition = std::make_shared( + exec_condition = std::make_shared( ngraph::element::boolean, ngraph::Shape{1}, false); } // Body std::shared_ptr Zo = body_params[0]; for (int i = 1; i < body_params.size(); ++i) { - Zo = std::make_shared(body_params[i], Zo); + Zo = std::make_shared(body_params[i], Zo); } // body_params.insert(body_params.begin(), current_iteration); auto body = std::make_shared(ngraph::OutputVector{body_condition_const, Zo}, body_params); - auto loop = std::make_shared(trip_count_const, exec_condition); + auto loop = std::make_shared(trip_count_const, exec_condition); loop->set_function(body); - loop->set_special_body_ports(ngraph::opset5::Loop::SpecialBodyPorts{-1, 0}); + loop->set_special_body_ports(ov::op::v5::Loop::SpecialBodyPorts{-1, 0}); for (int i = 0; i < body_params.size(); ++i) { if (types_separate[i] == LOOP_IN_TYPE::INVARIANT) { @@ -137,9 +137,9 @@ namespace LayerTestsDefinitions { // start=0, stride=1, part_size=1, end=-1, axis=1 auto out2 = loop->get_concatenated_slices(Zo, 0, 1, 1, -1, 1); - auto result0 = std::make_shared(out0); - auto result1 = std::make_shared(out1); - auto result2 = std::make_shared(out2); + auto result0 = std::make_shared(out0); + auto result1 = std::make_shared(out1); + auto result2 = std::make_shared(out2); function = std::make_shared(ngraph::ResultVector{result0, result1, result2}, params, "loop"); } @@ -205,9 +205,9 @@ namespace LayerTestsDefinitions { auto cond_input_create = [¶ms] (ngraph::element::Type prc, const ngraph::Shape &shape, int value = 0, bool is_static = false) -> std::shared_ptr { if (is_static) - return std::make_shared(prc, shape, value); + return std::make_shared(prc, shape, value); - auto input = std::make_shared(prc, shape); + auto input = std::make_shared(prc, shape); params.push_back(input); return input; }; @@ -230,24 +230,24 @@ namespace LayerTestsDefinitions { // Full loop Dynamic exit loop // n_iter = count n_iter = ex_val // - auto b_indx = std::make_shared(ngraph::element::i64, ngraph::Shape{}); - auto b_data = std::make_shared(prc, ngShape); - auto b_indx_cast = std::make_shared(b_indx, prc); - auto b_add = std::make_shared(b_data, b_indx_cast); + auto b_indx = std::make_shared(ngraph::element::i64, ngraph::Shape{}); + auto b_data = std::make_shared(prc, ngShape); + auto b_indx_cast = std::make_shared(b_indx, prc); + auto b_add = std::make_shared(b_data, b_indx_cast); std::shared_ptr b_cond; if (dynamic_exit == -1) { - b_cond = std::make_shared(ngraph::element::boolean, ngraph::Shape{}, true); + b_cond = std::make_shared(ngraph::element::boolean, ngraph::Shape{}, true); } else { - auto b_exit_value = std::make_shared(ngraph::element::i64, scalarShape, dynamic_exit); - b_cond = std::make_shared(b_indx, b_exit_value); + auto b_exit_value = std::make_shared(ngraph::element::i64, scalarShape, dynamic_exit); + b_cond = std::make_shared(b_indx, b_exit_value); } auto body = std::make_shared( ngraph::OutputVector {b_cond, b_add}, // TODO: check with reverse ngraph::ParameterVector {b_indx, b_data}); // TODO: check with reverse - auto loop = std::make_shared(count, skip); + auto loop = std::make_shared(count, skip); loop->set_function(body); loop->set_special_body_ports({0, 0}); loop->set_merged_input(b_data, start, b_add); @@ -330,22 +330,22 @@ namespace LayerTestsDefinitions { auto to_slice_shape = ngraph::Shape{ieShape}; to_slice_shape[0] = batch_size; - auto to_slice = std::make_shared(prc, to_slice_shape); - auto start = std::make_shared(prc, shape, 0); - auto count = std::make_shared(ngraph::element::i64, scalarShape, num_iteration); - auto icond = std::make_shared(ngraph::element::boolean, scalarShape, true); + auto to_slice = std::make_shared(prc, to_slice_shape); + auto start = std::make_shared(prc, shape, 0); + auto count = std::make_shared(ngraph::element::i64, scalarShape, num_iteration); + auto icond = std::make_shared(ngraph::element::boolean, scalarShape, true); // Loop body - auto b_data = std::make_shared(prc, shape); - auto b_recu = std::make_shared(prc, shape); - auto b_add = std::make_shared(b_data, b_recu); - auto b_cond = std::make_shared(ngraph::element::boolean, scalarShape, true); + auto b_data = std::make_shared(prc, shape); + auto b_recu = std::make_shared(prc, shape); + auto b_add = std::make_shared(b_data, b_recu); + auto b_cond = std::make_shared(ngraph::element::boolean, scalarShape, true); auto body = std::make_shared( ngraph::OutputVector {b_cond, b_add}, ngraph::ParameterVector {b_data, b_recu}); - auto loop = std::make_shared(count, icond); + auto loop = std::make_shared(count, icond); loop->set_function(body); loop->set_special_body_ports({-1, 0}); loop->set_sliced_input(b_data, to_slice, 0, 1, 1, -1, 0); @@ -366,25 +366,25 @@ namespace LayerTestsDefinitions { const auto prc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(iePrc); const auto scalarShape = ngraph::Shape{}; - auto to_slice = std::make_shared(prc, to_slice_shape); - auto start = std::make_shared(prc, shape, 0); - auto exit_on = std::make_shared(ngraph::element::i64, scalarShape, num_iteration); - auto count = std::make_shared(ngraph::element::i64, scalarShape, trip_count); - auto icond = std::make_shared(ngraph::element::boolean, scalarShape, true); + auto to_slice = std::make_shared(prc, to_slice_shape); + auto start = std::make_shared(prc, shape, 0); + auto exit_on = std::make_shared(ngraph::element::i64, scalarShape, num_iteration); + auto count = std::make_shared(ngraph::element::i64, scalarShape, trip_count); + auto icond = std::make_shared(ngraph::element::boolean, scalarShape, true); // Loop body - auto b_data = std::make_shared(prc, shape); - auto b_recu = std::make_shared(prc, shape); - auto b_add = std::make_shared(b_data, b_recu); - auto b_iter = std::make_shared(ngraph::element::i64, scalarShape); - auto b_exit_on = std::make_shared(ngraph::element::i64, scalarShape); - auto b_cond = std::make_shared(b_iter, b_exit_on); + auto b_data = std::make_shared(prc, shape); + auto b_recu = std::make_shared(prc, shape); + auto b_add = std::make_shared(b_data, b_recu); + auto b_iter = std::make_shared(ngraph::element::i64, scalarShape); + auto b_exit_on = std::make_shared(ngraph::element::i64, scalarShape); + auto b_cond = std::make_shared(b_iter, b_exit_on); auto body = std::make_shared( ngraph::OutputVector {b_cond, b_add}, ngraph::ParameterVector {b_data, b_recu, b_iter, b_exit_on}); - auto loop = std::make_shared(count, icond); + auto loop = std::make_shared(count, icond); loop->set_function(body); loop->set_special_body_ports({2, 0}); loop->set_sliced_input(b_data, to_slice, 0, 1, 1, -1, 0); diff --git a/src/tests/functional/shared_test_classes/src/single_layer/low_precision.cpp b/src/tests/functional/shared_test_classes/src/single_layer/low_precision.cpp index a68b292df93b43..7c1ad191f9a0d4 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/low_precision.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/low_precision.cpp @@ -32,7 +32,7 @@ void LowPrecisionTest::SetUp() { auto weights2Shape = ngraph::Shape{ 128, 32 }; // fully connected 1 - auto input = std::make_shared(ngPrc, inputShape); + auto input = std::make_shared(ngPrc, inputShape); std::vector weights1Data(ngraph::shape_size(weights1Shape), 0.0f); for (size_t i = 0; i < 16; i++) { @@ -40,38 +40,38 @@ void LowPrecisionTest::SetUp() { } auto weights1 = ngraph::builder::makeConstant(ngPrc, weights1Shape, weights1Data); - auto fc1 = std::make_shared(input, weights1); + auto fc1 = std::make_shared(input, weights1); fc1->set_friendly_name("FullyConnected_1"); // bias 1 std::vector bias1Data(ngraph::shape_size(inputShape), 0.0f); auto bias1 = ngraph::builder::makeConstant(ngPrc, inputShape, bias1Data); - auto add1 = std::make_shared(fc1, bias1); + auto add1 = std::make_shared(fc1, bias1); add1->set_friendly_name("Add_1"); #if 0 // ReLU 1 - auto relu1 = std::make_shared(add1); + auto relu1 = std::make_shared(add1); relu1->set_friendly_name("Relu_1"); //// fully connected 2 std::vector weights2Data(ngraph::shape_size(weights2Shape), 0.0f); std::fill(weights2Data.begin(), weights2Data.end(), 0.0001f); auto weights2 = ngraph::builder::makeConstant(ngPrc, weights2Shape, weights2Data); - auto fc2 = std::make_shared(relu1, weights2); + auto fc2 = std::make_shared(relu1, weights2); fc2->set_friendly_name("FullyConnected_2"); //// bias 2 std::vector bias2Data(ngraph::shape_size(weights2Shape), 0.0f); auto bias2 = ngraph::builder::makeConstant(ngPrc, weights2Shape, bias2Data); - auto add2 = std::make_shared(fc2, bias2); + auto add2 = std::make_shared(fc2, bias2); add2->set_friendly_name("Add_2"); //// ReLU 2 - auto relu2 = std::make_shared(add2); + auto relu2 = std::make_shared(add2); relu2->set_friendly_name("Relu_2"); #endif configuration = config.second; - function = std::make_shared(ngraph::ResultVector{std::make_shared(add1)}, + function = std::make_shared(ngraph::ResultVector{std::make_shared(add1)}, ngraph::ParameterVector{input}, "LowPrecisionTest"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/lrn.cpp b/src/tests/functional/shared_test_classes/src/single_layer/lrn.cpp index b594de81572777..d38edf9dce59ee 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/lrn.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/lrn.cpp @@ -43,9 +43,9 @@ void LrnLayerTest::SetUp() { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(inputShapes))}; - auto axes_node = std::make_shared(ngraph::element::i64, ngraph::Shape{axes.size()}, axes.data()); - auto lrn = std::make_shared(params[0], axes_node, alpha, beta, bias, size); - ngraph::ResultVector results {std::make_shared(lrn)}; + auto axes_node = std::make_shared(ngraph::element::i64, ngraph::Shape{axes.size()}, axes.data()); + auto lrn = std::make_shared(params[0], axes_node, alpha, beta, bias, size); + ngraph::ResultVector results {std::make_shared(lrn)}; function = std::make_shared(results, params, "lrn"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/lstm_cell.cpp b/src/tests/functional/shared_test_classes/src/single_layer/lstm_cell.cpp index f028f5f3681f75..67c81880006fab 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/lstm_cell.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/lstm_cell.cpp @@ -100,8 +100,8 @@ void LSTMCellTest::SetUp() { auto lstm_cell = std::make_shared(params[0], params[1], params[2], W, R, B, hidden_size, activations, activations_alpha, activations_beta, clip); - ngraph::ResultVector results{std::make_shared(lstm_cell->output(0)), - std::make_shared(lstm_cell->output(1))}; + ngraph::ResultVector results{std::make_shared(lstm_cell->output(0)), + std::make_shared(lstm_cell->output(1))}; function = std::make_shared(results, params, "lstm_cell"); if (should_decompose) { ngraph::pass::Manager m; diff --git a/src/tests/functional/shared_test_classes/src/single_layer/lstm_cell_basic.cpp b/src/tests/functional/shared_test_classes/src/single_layer/lstm_cell_basic.cpp index 795e3b8ef9228c..88a36406acb948 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/lstm_cell_basic.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/lstm_cell_basic.cpp @@ -70,8 +70,8 @@ void LSTMCellBasicTest::SetUp() { std::vector WRB = {inputShapes[3], inputShapes[4], inputShapes[5]}; auto lstm_cell = ngraph::builder::makeLSTM(paramsOuts, WRB, hidden_size, activations, {}, {}, clip); - ngraph::ResultVector results{std::make_shared(lstm_cell->output(0)), - std::make_shared(lstm_cell->output(1))}; + ngraph::ResultVector results{std::make_shared(lstm_cell->output(0)), + std::make_shared(lstm_cell->output(1))}; function = std::make_shared(results, params, "lstm_cell"); if (should_decompose) { ngraph::pass::Manager m; diff --git a/src/tests/functional/shared_test_classes/src/single_layer/lstm_sequence.cpp b/src/tests/functional/shared_test_classes/src/single_layer/lstm_sequence.cpp index 4946f738367e2a..086b514dc65cfe 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/lstm_sequence.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/lstm_sequence.cpp @@ -21,7 +21,7 @@ namespace LayerTestsDefinitions { std::vector activations_alpha; std::vector activations_beta; float clip; - ngraph::op::RecurrentSequenceDirection direction; + ov::op::RecurrentSequenceDirection direction; InputLayerType WRBType; InferenceEngine::Precision netPrecision; std::string targetDevice; @@ -59,12 +59,12 @@ namespace LayerTestsDefinitions { std::vector activations_alpha; std::vector activations_beta; float clip; - ngraph::op::RecurrentSequenceDirection direction; + ov::op::RecurrentSequenceDirection direction; InputLayerType WRBType; InferenceEngine::Precision netPrecision; std::tie(m_mode, seq_lengths, batch, hidden_size, input_size, activations, clip, direction, WRBType, netPrecision, targetDevice) = this->GetParam(); - size_t num_directions = direction == ngraph::op::RecurrentSequenceDirection::BIDIRECTIONAL ? 2 : 1; + size_t num_directions = direction == ov::op::RecurrentSequenceDirection::BIDIRECTIONAL ? 2 : 1; m_max_seq_len = seq_lengths; std::vector inputShapes = { {{batch, seq_lengths, input_size}, {batch, num_directions, hidden_size}, {batch, num_directions, hidden_size}, @@ -116,16 +116,16 @@ namespace LayerTestsDefinitions { auto lstm_sequence = std::make_shared(params[0], params[1], params[2], seq_lengths_node, W, R, B, hidden_size, direction, std::vector{}, std::vector{}, activations, clip); - ngraph::ResultVector results{std::make_shared(lstm_sequence->output(0)), - std::make_shared(lstm_sequence->output(1)), - std::make_shared(lstm_sequence->output(2))}; + ngraph::ResultVector results{std::make_shared(lstm_sequence->output(0)), + std::make_shared(lstm_sequence->output(1)), + std::make_shared(lstm_sequence->output(2))}; function = std::make_shared(results, params, "lstm_sequence"); bool is_pure_sequence = (m_mode == SequenceTestsMode::PURE_SEQ || m_mode == SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM || m_mode == SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST); if (!is_pure_sequence) { ngraph::pass::Manager manager; - if (direction == ngraph::op::RecurrentSequenceDirection::BIDIRECTIONAL) + if (direction == ov::op::RecurrentSequenceDirection::BIDIRECTIONAL) manager.register_pass(); manager.register_pass(); manager.run_passes(function); diff --git a/src/tests/functional/shared_test_classes/src/single_layer/mat_mul.cpp b/src/tests/functional/shared_test_classes/src/single_layer/mat_mul.cpp index 5148815930508d..75543ba36c31a8 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/mat_mul.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/mat_mul.cpp @@ -67,10 +67,10 @@ void MatMulTest::SetUp() { auto secondaryInput = ngraph::builder::makeInputLayer(ngPrc, secondaryInputType, shapeRelatedParams.input2.first); OPENVINO_SUPPRESS_DEPRECATED_END if (secondaryInputType == ngraph::helpers::InputLayerType::PARAMETER) { - params.push_back(std::dynamic_pointer_cast(secondaryInput)); + params.push_back(std::dynamic_pointer_cast(secondaryInput)); } auto MatMul = std::make_shared(params[0], secondaryInput, shapeRelatedParams.input1.second, shapeRelatedParams.input2.second); - ngraph::ResultVector results{std::make_shared(MatMul)}; + ngraph::ResultVector results{std::make_shared(MatMul)}; function = std::make_shared(results, params, "MatMul"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/matrix_nms.cpp b/src/tests/functional/shared_test_classes/src/single_layer/matrix_nms.cpp index c4677606a469e5..4ed7e20fd79916 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/matrix_nms.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/matrix_nms.cpp @@ -319,7 +319,7 @@ void MatrixNmsLayerTest::SetUp() { for (auto&& shape : inputDynamicShapes) { params.push_back(std::make_shared(paramsPrec, shape)); } - auto nms = std::make_shared(params[0], params[1], m_attrs); + auto nms = std::make_shared(params[0], params[1], m_attrs); function = std::make_shared(nms, params, "MatrixNMS"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/minimum_maximum.cpp b/src/tests/functional/shared_test_classes/src/single_layer/minimum_maximum.cpp index 00432a511f7f3b..bd46c5e412d1c7 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/minimum_maximum.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/minimum_maximum.cpp @@ -43,7 +43,7 @@ namespace LayerTestsDefinitions { auto secondaryInput = ngraph::builder::makeInputLayer(ngPrc, inputType, {inputShapes[1]}); OPENVINO_SUPPRESS_DEPRECATED_END if (inputType == ngraph::helpers::InputLayerType::PARAMETER) { - input.push_back(std::dynamic_pointer_cast(secondaryInput)); + input.push_back(std::dynamic_pointer_cast(secondaryInput)); } OPENVINO_SUPPRESS_DEPRECATED_START diff --git a/src/tests/functional/shared_test_classes/src/single_layer/multinomial.cpp b/src/tests/functional/shared_test_classes/src/single_layer/multinomial.cpp index 60a172271987de..42c631194bcf87 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/multinomial.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/multinomial.cpp @@ -54,10 +54,10 @@ void MultinomialTest::SetUp() { params.push_back(std::make_shared(ngPrc, shape)); } - auto numSamplesConstant = std::make_shared( + auto numSamplesConstant = std::make_shared( ngraph::element::Type_t::i64, ov::Shape{1}, numSamples); const auto paramOuts = - ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes(params)); + ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes(params)); const auto multinomial = std::make_shared( paramOuts.at(0), diff --git a/src/tests/functional/shared_test_classes/src/single_layer/mvn.cpp b/src/tests/functional/shared_test_classes/src/single_layer/mvn.cpp index 720dfd811b24d2..cf0ec9e587390c 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/mvn.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/mvn.cpp @@ -39,12 +39,12 @@ void Mvn1LayerTest::SetUp() { auto inType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); ov::ParameterVector param {std::make_shared(inType, ov::Shape(inputShapes))}; OPENVINO_SUPPRESS_DEPRECATED_START - auto mvn = std::dynamic_pointer_cast(ngraph::builder::makeMVN(param[0], acrossChanels, normalizeVariance, eps)); + auto mvn = std::dynamic_pointer_cast(ngraph::builder::makeMVN(param[0], acrossChanels, normalizeVariance, eps)); if (!axes.empty()) { - mvn = std::dynamic_pointer_cast(ngraph::builder::makeMVN(param[0], axes, normalizeVariance, eps)); + mvn = std::dynamic_pointer_cast(ngraph::builder::makeMVN(param[0], axes, normalizeVariance, eps)); } OPENVINO_SUPPRESS_DEPRECATED_END - ngraph::ResultVector results{std::make_shared(mvn)}; + ngraph::ResultVector results{std::make_shared(mvn)}; function = std::make_shared(results, param, "MVN1"); } @@ -87,7 +87,7 @@ void Mvn6LayerTest::SetUp() { OPENVINO_SUPPRESS_DEPRECATED_START auto mvn = ngraph::builder::makeMVN6(param[0], axesNode, normalizeVariance, eps, epsMode); OPENVINO_SUPPRESS_DEPRECATED_END - ngraph::ResultVector results{std::make_shared(mvn)}; + ngraph::ResultVector results{std::make_shared(mvn)}; function = std::make_shared(results, param, "MVN6"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/non_max_suppression.cpp b/src/tests/functional/shared_test_classes/src/single_layer/non_max_suppression.cpp index 2dbdeb4234eef7..12114e53b71c04 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/non_max_suppression.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/non_max_suppression.cpp @@ -17,7 +17,7 @@ std::string NmsLayerTest::getTestCaseName(const testing::TestParamInfo(nms->output(0), opset5::Constant::create(outType, Shape{1}, {1})); + std::make_shared(nms->output(0), ov::op::v0::Constant::create(outType, Shape{1}, {1})); auto nms_1_identity = - std::make_shared(nms->output(1), opset5::Constant::create(ngPrc, Shape{1}, {1})); + std::make_shared(nms->output(1), ov::op::v0::Constant::create(ngPrc, Shape{1}, {1})); auto nms_2_identity = - std::make_shared(nms->output(2), opset5::Constant::create(outType, Shape{1}, {1})); + std::make_shared(nms->output(2), ov::op::v0::Constant::create(outType, Shape{1}, {1})); nms_0_identity->set_friendly_name("Multiply_0"); nms_1_identity->set_friendly_name("Multiply_1"); nms_2_identity->set_friendly_name("Multiply_2"); @@ -349,7 +350,7 @@ void Nms9LayerTest::SetUp() { InputPrecisions inPrecisions; size_t maxOutBoxesPerClass; float iouThr, scoreThr, softNmsSigma; - op::v5::NonMaxSuppression::BoxEncodingType boxEncoding; + ov::op::v5::NonMaxSuppression::BoxEncodingType boxEncoding; bool sortResDescend; element::Type outType; std::tie(inShapeParams, diff --git a/src/tests/functional/shared_test_classes/src/single_layer/nonzero.cpp b/src/tests/functional/shared_test_classes/src/single_layer/nonzero.cpp index 2c57450a1265b6..7d1a279a228978 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/nonzero.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/nonzero.cpp @@ -29,11 +29,11 @@ void NonZeroLayerTest::SetUp() { configuration.insert(additionalConfig.cbegin(), additionalConfig.cend()); const auto& precision = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); - const auto& paramNode = std::make_shared(precision, ngraph::Shape(inputShape)); + const auto& paramNode = std::make_shared(precision, ngraph::Shape(inputShape)); - auto nonZeroOp = std::make_shared(paramNode->output(0)); + auto nonZeroOp = std::make_shared(paramNode->output(0)); - ngraph::ResultVector results{std::make_shared(nonZeroOp)}; + ngraph::ResultVector results{std::make_shared(nonZeroOp)}; function = std::make_shared(results, ngraph::ParameterVector{paramNode}, "non_zero"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/normalize_l2.cpp b/src/tests/functional/shared_test_classes/src/single_layer/normalize_l2.cpp index 771fe5c2d0f965..8ffbed114d4182 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/normalize_l2.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/normalize_l2.cpp @@ -9,7 +9,7 @@ namespace LayerTestsDefinitions { std::string NormalizeL2LayerTest::getTestCaseName(const testing::TestParamInfo& obj) { std::vector axes; float eps; - ngraph::op::EpsMode epsMode; + ov::op::EpsMode epsMode; InferenceEngine::SizeVector inputShape; InferenceEngine::Precision netPrecision; std::string targetDevice; @@ -40,7 +40,7 @@ void NormalizeL2LayerTest::SetUp() { InferenceEngine::SizeVector inputShape; std::vector axes; float eps; - ngraph::op::EpsMode epsMode; + ov::op::EpsMode epsMode; InferenceEngine::Precision netPrecision; std::tie(axes, eps, epsMode, inputShape, netPrecision, targetDevice) = this->GetParam(); auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); @@ -51,7 +51,7 @@ void NormalizeL2LayerTest::SetUp() { auto normAxes = std::make_shared(ov::element::i64, ov::Shape{axes.size()}, axes); auto norm = std::make_shared(data_input, normAxes, eps, epsMode); - ngraph::ResultVector results{std::make_shared(norm)}; + ngraph::ResultVector results{std::make_shared(norm)}; function = std::make_shared(results, params, "NormalizeL2"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/one_hot.cpp b/src/tests/functional/shared_test_classes/src/single_layer/one_hot.cpp index 10c1a3b5f92274..1ad2b718a532c6 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/one_hot.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/one_hot.cpp @@ -48,7 +48,7 @@ void OneHotLayerTest::SetUp() { auto off_value_const = std::make_shared(set_type, ov::Shape{}, off_val); auto onehot = std::make_shared(params[0], depth_const, on_value_const, off_value_const, axis); - ngraph::ResultVector results{std::make_shared(onehot)}; + ngraph::ResultVector results{std::make_shared(onehot)}; function = std::make_shared(results, params, "OneHot"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/pad.cpp b/src/tests/functional/shared_test_classes/src/single_layer/pad.cpp index 09f98d4f157b79..3c6db7a66fc8ad 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/pad.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/pad.cpp @@ -45,7 +45,7 @@ void PadLayerTest::SetUp() { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; auto pad = CreatePadOp(params[0], padsBegin, padsEnd, argPadValue, padMode); - ngraph::ResultVector results{std::make_shared(pad)}; + ngraph::ResultVector results{std::make_shared(pad)}; function = std::make_shared(results, params, "pad"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/pooling.cpp b/src/tests/functional/shared_test_classes/src/single_layer/pooling.cpp index f7a1a93f25fb84..86c57ea0765358 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/pooling.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/pooling.cpp @@ -17,8 +17,8 @@ std::string PoolingLayerTest::getTestCaseName(const testing::TestParamInfo kernel, stride; std::vector padBegin, padEnd; - ngraph::op::PadType padType; - ngraph::op::RoundingType roundingType; + ov::op::PadType padType; + ov::op::RoundingType roundingType; bool excludePad; std::tie(poolType, kernel, stride, padBegin, padEnd, roundingType, padType, excludePad) = poolParams; @@ -59,8 +59,8 @@ std::string GlobalPoolingLayerTest::getTestCaseName(const testing::TestParamInfo ngraph::helpers::PoolingTypes poolType; std::vector kernel, stride; std::vector padBegin, padEnd; - ngraph::op::PadType padType; - ngraph::op::RoundingType roundingType; + ov::op::PadType padType; + ov::op::RoundingType roundingType; bool excludePad; std::tie(poolType, kernel, stride, padBegin, padEnd, roundingType, padType, excludePad) = poolParams; @@ -81,7 +81,7 @@ std::string GlobalPoolingLayerTest::getTestCaseName(const testing::TestParamInfo result << "S" << ov::test::utils::vec2str(stride) << "_"; result << "PB" << ov::test::utils::vec2str(padBegin) << "_"; result << "PE" << ov::test::utils::vec2str(padEnd) << "_"; - if (padType == ngraph::op::PadType::EXPLICIT) { + if (padType == ov::op::PadType::EXPLICIT) { result << "Rounding=" << roundingType << "_"; } result << "AutoPad=" << padType << "_"; @@ -104,8 +104,8 @@ std::string MaxPoolingV8LayerTest::getTestCaseName(const testing::TestParamInfo< std::tie(poolParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShapes, targetDevice) = obj.param; std::vector kernel, stride, dilation; std::vector padBegin, padEnd; - ngraph::op::PadType padType; - ngraph::op::RoundingType roundingType; + ov::op::PadType padType; + ov::op::RoundingType roundingType; ngraph::element::Type indexElementType; int64_t axis; std::tie(kernel, stride, dilation, padBegin, padEnd, indexElementType, axis, roundingType, padType) = poolParams; @@ -138,8 +138,8 @@ void PoolingLayerTest::SetUp() { ngraph::helpers::PoolingTypes poolType; std::vector kernel, stride; std::vector padBegin, padEnd; - ngraph::op::PadType padType; - ngraph::op::RoundingType roundingType; + ov::op::PadType padType; + ov::op::RoundingType roundingType; bool excludePad; std::tie(poolType, kernel, stride, padBegin, padEnd, roundingType, padType, excludePad) = poolParams; @@ -158,7 +158,7 @@ void PoolingLayerTest::SetUp() { poolType); OPENVINO_SUPPRESS_DEPRECATED_END - ngraph::ResultVector results{std::make_shared(pooling)}; + ngraph::ResultVector results{std::make_shared(pooling)}; function = std::make_shared(results, params, "pooling"); } @@ -170,8 +170,8 @@ void GlobalPoolingLayerTest::SetUp() { ngraph::helpers::PoolingTypes poolType; std::vector kernel, stride; std::vector padBegin, padEnd; - ngraph::op::PadType padType; - ngraph::op::RoundingType roundingType; + ov::op::PadType padType; + ov::op::RoundingType roundingType; bool excludePad; std::tie(poolType, kernel, stride, padBegin, padEnd, roundingType, padType, excludePad) = poolParams; @@ -192,7 +192,7 @@ void GlobalPoolingLayerTest::SetUp() { poolType); OPENVINO_SUPPRESS_DEPRECATED_END - ngraph::ResultVector results{std::make_shared(pooling)}; + ngraph::ResultVector results{std::make_shared(pooling)}; function = std::make_shared(results, params, "pooling"); } @@ -203,8 +203,8 @@ void MaxPoolingV8LayerTest::SetUp() { std::tie(poolParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, targetDevice) = this->GetParam(); std::vector kernel, stride, dilation; std::vector padBegin, padEnd; - ngraph::op::PadType padType; - ngraph::op::RoundingType roundingType; + ov::op::PadType padType; + ov::op::RoundingType roundingType; ngraph::element::Type indexElementType; int64_t axis; std::tie(kernel, stride, dilation, padBegin, padEnd, indexElementType, axis, roundingType, padType) = poolParams; @@ -219,10 +219,10 @@ void MaxPoolingV8LayerTest::SetUp() { const auto maxPoolV8_second_output_is_supported = targetDevice == ov::test::utils::DEVICE_GPU; ngraph::ResultVector results; if (maxPoolV8_second_output_is_supported) { - results = {std::make_shared(maxPool->output(0)), - std::make_shared(maxPool->output(1))}; + results = {std::make_shared(maxPool->output(0)), + std::make_shared(maxPool->output(1))}; } else { - results = { std::make_shared(maxPool->output(0)) }; + results = { std::make_shared(maxPool->output(0)) }; } function = std::make_shared(results, params, "MaxPoolV8"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/power.cpp b/src/tests/functional/shared_test_classes/src/single_layer/power.cpp index 6cb0251c00b872..e61b7f1f1cf8d8 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/power.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/power.cpp @@ -36,8 +36,8 @@ namespace LayerTestsDefinitions { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector paramsIn{std::make_shared(ngPrc, ov::Shape(inputShapes[0]))}; - auto power_const = std::make_shared(ngPrc, ngraph::Shape{ 1 }, power); - auto pow = std::make_shared(paramsIn[0], power_const); + auto power_const = std::make_shared(ngPrc, ngraph::Shape{ 1 }, power); + auto pow = std::make_shared(paramsIn[0], power_const); function = std::make_shared(pow, paramsIn, "power"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/prior_box.cpp b/src/tests/functional/shared_test_classes/src/single_layer/prior_box.cpp index 4cd4b12ab28ac8..db3ef84aa0c944 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/prior_box.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/prior_box.cpp @@ -69,7 +69,7 @@ void PriorBoxLayerTest::SetUp() { ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(inputShapes)), std::make_shared(ngPrc, ov::Shape(imageShapes))}; - ngraph::op::v8::PriorBox::Attributes attributes; + ov::op::v8::PriorBox::Attributes attributes; attributes.min_size = min_size; attributes.max_size = max_size; attributes.aspect_ratio = aspect_ratio; @@ -84,16 +84,16 @@ void PriorBoxLayerTest::SetUp() { attributes.scale_all_sizes = scale_all_sizes; attributes.min_max_aspect_ratios_order = min_max_aspect_ratios_order; - auto shape_of_1 = std::make_shared(params[0]); - auto shape_of_2 = std::make_shared(params[1]); - auto priorBox = std::make_shared( + auto shape_of_1 = std::make_shared(params[0]); + auto shape_of_2 = std::make_shared(params[1]); + auto priorBox = std::make_shared( shape_of_1, shape_of_2, attributes); ov::pass::disable_constant_folding(priorBox); - ngraph::ResultVector results{std::make_shared(priorBox)}; + ngraph::ResultVector results{std::make_shared(priorBox)}; function = std::make_shared (results, params, "PriorBoxFunction"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/prior_box_clustered.cpp b/src/tests/functional/shared_test_classes/src/single_layer/prior_box_clustered.cpp index aaf00952633cf4..c9453be82b6ce4 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/prior_box_clustered.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/prior_box_clustered.cpp @@ -76,7 +76,7 @@ void PriorBoxClusteredLayerTest::SetUp() { ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShapes)), std::make_shared(ngPrc, ov::Shape(inputShapes))}; - ngraph::op::PriorBoxClusteredAttrs attributes; + ov::op::v0::PriorBoxClustered::Attributes attributes; attributes.widths = widths; attributes.heights = heights; attributes.clip = clip; @@ -86,14 +86,14 @@ void PriorBoxClusteredLayerTest::SetUp() { attributes.offset = offset; attributes.variances = variances; - auto shape_of_1 = std::make_shared(params[0]); - auto shape_of_2 = std::make_shared(params[1]); - auto priorBoxClustered = std::make_shared( + auto shape_of_1 = std::make_shared(params[0]); + auto shape_of_2 = std::make_shared(params[1]); + auto priorBoxClustered = std::make_shared( shape_of_1, shape_of_2, attributes); - ngraph::ResultVector results{ std::make_shared(priorBoxClustered) }; + ngraph::ResultVector results{ std::make_shared(priorBoxClustered) }; function = std::make_shared(results, params, "PB_Clustered"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/proposal.cpp b/src/tests/functional/shared_test_classes/src/single_layer/proposal.cpp index dddc9271909748..8eeef77d076a46 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/proposal.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/proposal.cpp @@ -153,7 +153,7 @@ void ProposalLayerTest::SetUp() { params[1]->set_friendly_name("b_boxes"); OPENVINO_SUPPRESS_DEPRECATED_START - auto proposal = std::dynamic_pointer_cast( + auto proposal = std::dynamic_pointer_cast( ngraph::builder::makeProposal(params[0], params[1], img_info, ngPrc, base_size, pre_nms_topn, @@ -172,8 +172,8 @@ void ProposalLayerTest::SetUp() { OPENVINO_SUPPRESS_DEPRECATED_END ngraph::ResultVector results{ - std::make_shared(proposal->output(0)), - std::make_shared(proposal->output(1))}; + std::make_shared(proposal->output(0)), + std::make_shared(proposal->output(1))}; function = std::make_shared(results, params, "proposal"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/psroi_pooling.cpp b/src/tests/functional/shared_test_classes/src/single_layer/psroi_pooling.cpp index 9f1cf2313cf60a..c258ce10941952 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/psroi_pooling.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/psroi_pooling.cpp @@ -109,7 +109,7 @@ void PSROIPoolingLayerTest::SetUp() { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(inputShape)), std::make_shared(ngPrc, ov::Shape(coordsShape))}; - std::shared_ptr psroiPooling = std::make_shared(params[0], + std::shared_ptr psroiPooling = std::make_shared(params[0], params[1], outputDim, groupSize_, @@ -117,7 +117,7 @@ void PSROIPoolingLayerTest::SetUp() { spatialBinsX_, spatialBinsY_, mode_); - ngraph::ResultVector results{std::make_shared(psroiPooling)}; + ngraph::ResultVector results{std::make_shared(psroiPooling)}; function = std::make_shared(results, params, "psroi_pooling"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/random_uniform.cpp b/src/tests/functional/shared_test_classes/src/single_layer/random_uniform.cpp index d94885cb2ebd6a..27026e7b02dd84 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/random_uniform.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/random_uniform.cpp @@ -71,13 +71,13 @@ void RandomUniformLayerTest::SetUp() { auto min_value = createConstant(randomUniformParams.precision, randomUniformParams.min_value); auto max_value = createConstant(randomUniformParams.precision, randomUniformParams.max_value); - auto random_uniform = std::make_shared(shape_of, + auto random_uniform = std::make_shared(shape_of, min_value, max_value, precision, global_seed, op_seed); - ngraph::ResultVector results{std::make_shared(random_uniform)}; + ngraph::ResultVector results{std::make_shared(random_uniform)}; function = std::make_shared(results, ngraph::ParameterVector{input}, "random_uniform"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/range.cpp b/src/tests/functional/shared_test_classes/src/single_layer/range.cpp index 5ab0fad424e925..910c7f5bee42c0 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/range.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/range.cpp @@ -57,10 +57,10 @@ void RangeLayerTest::SetUp() { param->set_friendly_name(shape.first); params.push_back(param); } - auto range = std::make_shared(params[0], params[1], params[2], ngPrc); + auto range = std::make_shared(params[0], params[1], params[2], ngPrc); function = std::make_shared( - std::make_shared(range), + std::make_shared(range), params, "Range"); } @@ -117,8 +117,8 @@ void RangeNumpyLayerTest::SetUp() { params[1]->set_friendly_name("stop"); params[2]->set_friendly_name("step"); - auto range = std::make_shared(params[0], params[1], params[2], ngNetPrc); - const ngraph::ResultVector results{std::make_shared(range)}; + auto range = std::make_shared(params[0], params[1], params[2], ngNetPrc); + const ngraph::ResultVector results{std::make_shared(range)}; function = std::make_shared(results, params, "Range"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/rdft.cpp b/src/tests/functional/shared_test_classes/src/single_layer/rdft.cpp index a953a84086dccd..5a858f86d30736 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/rdft.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/rdft.cpp @@ -34,12 +34,12 @@ void RDFTLayerTest::SetUp() { std::tie(inputShapes, inputPrecision, axes, signalSize, opType, targetDevice) = this->GetParam(); auto inType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); ngraph::ParameterVector paramVector; - auto paramData = std::make_shared(inType, ngraph::Shape(inputShapes)); + auto paramData = std::make_shared(inType, ngraph::Shape(inputShapes)); paramVector.push_back(paramData); auto rdft = ngraph::builder::makeRDFT(paramVector[0], axes, signalSize, opType); - ngraph::ResultVector results{std::make_shared(rdft)}; + ngraph::ResultVector results{std::make_shared(rdft)}; function = std::make_shared(results, paramVector, "RDFT"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/reduce_ops.cpp b/src/tests/functional/shared_test_classes/src/single_layer/reduce_ops.cpp index f9c40c7b60fe2a..069de325e3db87 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/reduce_ops.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/reduce_ops.cpp @@ -58,10 +58,10 @@ void ReduceOpsLayerTest::SetUp() { FAIL() << "Reduce op doesn't support operation type: " << opType; } auto reductionAxesNode = std::dynamic_pointer_cast( - std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape(shapeAxes), axes)); + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape(shapeAxes), axes)); const auto reduce = ngraph::builder::makeReduce(params[0], reductionAxesNode, keepDims, reductionType); - const ngraph::ResultVector results{std::make_shared(reduce)}; + const ngraph::ResultVector results{std::make_shared(reduce)}; function = std::make_shared(results, params, "Reduce"); } InferenceEngine::Blob::Ptr ReduceOpsLayerTest::GenerateInput(const InferenceEngine::InputInfo &info) const { diff --git a/src/tests/functional/shared_test_classes/src/single_layer/region_yolo.cpp b/src/tests/functional/shared_test_classes/src/single_layer/region_yolo.cpp index c4f7e13d5c8030..adaaaaca2d5927 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/region_yolo.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/region_yolo.cpp @@ -43,9 +43,9 @@ void RegionYoloLayerTest::SetUp() { InferenceEngine::Precision netPrecision; std::tie(inputShape, classes, coords, num_regions, do_softmax, mask, start_axis, end_axis, netPrecision, targetDevice) = this->GetParam(); auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - auto param = std::make_shared(ngPrc, inputShape); - auto region_yolo = std::make_shared(param, coords, classes, num_regions, do_softmax, mask, start_axis, end_axis); - function = std::make_shared(std::make_shared(region_yolo), ngraph::ParameterVector{param}, "RegionYolo"); + auto param = std::make_shared(ngPrc, inputShape); + auto region_yolo = std::make_shared(param, coords, classes, num_regions, do_softmax, mask, start_axis, end_axis); + function = std::make_shared(std::make_shared(region_yolo), ngraph::ParameterVector{param}, "RegionYolo"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/reorg_yolo.cpp b/src/tests/functional/shared_test_classes/src/single_layer/reorg_yolo.cpp index 6deda839ec4fc4..0f918de70bc9a5 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/reorg_yolo.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/reorg_yolo.cpp @@ -25,9 +25,9 @@ void ReorgYoloLayerTest::SetUp() { size_t stride; InferenceEngine::Precision netPrecision; std::tie(inputShape, stride, netPrecision, targetDevice) = this->GetParam(); - auto param = std::make_shared(ngraph::element::f32, inputShape); - auto reorg_yolo = std::make_shared(param, stride); - function = std::make_shared(std::make_shared(reorg_yolo), ngraph::ParameterVector{param}, "ReorgYolo"); + auto param = std::make_shared(ngraph::element::f32, inputShape); + auto reorg_yolo = std::make_shared(param, stride); + function = std::make_shared(std::make_shared(reorg_yolo), ngraph::ParameterVector{param}, "ReorgYolo"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/reshape.cpp b/src/tests/functional/shared_test_classes/src/single_layer/reshape.cpp index 31578cd9379062..69e1ef64100b11 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/reshape.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/reshape.cpp @@ -37,11 +37,11 @@ void ReshapeLayerTest::SetUp() { this->GetParam(); auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector paramsIn {std::make_shared(ngPrc, ov::Shape(inputShapes))}; - auto constNode = std::make_shared( + auto constNode = std::make_shared( ngraph::element::Type_t::i64, ngraph::Shape{outFormShapes.size()}, outFormShapes); - auto reshape = std::dynamic_pointer_cast( - std::make_shared(paramsIn[0], constNode, specialZero)); - ngraph::ResultVector results{std::make_shared(reshape)}; + auto reshape = std::dynamic_pointer_cast( + std::make_shared(paramsIn[0], constNode, specialZero)); + ngraph::ResultVector results{std::make_shared(reshape)}; function = std::make_shared(results, paramsIn, "Reshape"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/result.cpp b/src/tests/functional/shared_test_classes/src/single_layer/result.cpp index 71eb0dfe193143..cca0500eee4e9e 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/result.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/result.cpp @@ -29,7 +29,7 @@ void ResultLayerTest::SetUp() { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - const ngraph::ResultVector results{std::make_shared(params[0])}; + const ngraph::ResultVector results{std::make_shared(params[0])}; function = std::make_shared(results, params, "result"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/reverse.cpp b/src/tests/functional/shared_test_classes/src/single_layer/reverse.cpp index 9ab223e16c2b14..b6f506092b16c0 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/reverse.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/reverse.cpp @@ -52,7 +52,7 @@ void ReverseLayerTest::SetUp() { axes_constant = std::make_shared(ov::element::boolean, ov::Shape{axesMask.size()}, axesMask); } - const auto reverse = std::make_shared(params[0], axes_constant, mode); + const auto reverse = std::make_shared(params[0], axes_constant, mode); function = std::make_shared(reverse->outputs(), params, "reverse"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/reverse_sequence.cpp b/src/tests/functional/shared_test_classes/src/single_layer/reverse_sequence.cpp index 05af8ca3377c84..199f0cd111da4c 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/reverse_sequence.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/reverse_sequence.cpp @@ -45,11 +45,11 @@ void ReverseSequenceLayerTest::SetUp() { auto secondaryInput = ngraph::builder::makeInputLayer(secondPrc, secondaryInputType, secondInputShape); OPENVINO_SUPPRESS_DEPRECATED_END if (secondaryInputType == ngraph::helpers::InputLayerType::PARAMETER) { - paramsIn.push_back(std::dynamic_pointer_cast(secondaryInput)); + paramsIn.push_back(std::dynamic_pointer_cast(secondaryInput)); } - auto reverse = std::make_shared(paramsIn[0], secondaryInput, batchAxisIndx, seqAxisIndx); - ngraph::ResultVector results{std::make_shared(reverse)}; + auto reverse = std::make_shared(paramsIn[0], secondaryInput, batchAxisIndx, seqAxisIndx); + ngraph::ResultVector results{std::make_shared(reverse)}; function = std::make_shared(results, paramsIn, "ReverseSequence"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/rnn_cell.cpp b/src/tests/functional/shared_test_classes/src/single_layer/rnn_cell.cpp index 5325d57f69e0d9..7643c51aff7145 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/rnn_cell.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/rnn_cell.cpp @@ -92,7 +92,7 @@ void RNNCellTest::SetUp() { auto rnn_cell = std::make_shared(params[0], params[1], W, R, B, hidden_size, activations, activations_alpha, activations_beta, clip); - ngraph::ResultVector results{std::make_shared(rnn_cell)}; + ngraph::ResultVector results{std::make_shared(rnn_cell)}; function = std::make_shared(results, params, "rnn_cell"); if (should_decompose) { ngraph::pass::Manager m; diff --git a/src/tests/functional/shared_test_classes/src/single_layer/rnn_sequence.cpp b/src/tests/functional/shared_test_classes/src/single_layer/rnn_sequence.cpp index de1af3726c9026..7427345fa59445 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/rnn_sequence.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/rnn_sequence.cpp @@ -20,7 +20,7 @@ namespace LayerTestsDefinitions { std::vector activations_alpha; std::vector activations_beta; float clip; - ngraph::op::RecurrentSequenceDirection direction; + ov::op::RecurrentSequenceDirection direction; InferenceEngine::Precision netPrecision; InputLayerType WRBType; std::string targetDevice; @@ -55,12 +55,12 @@ namespace LayerTestsDefinitions { std::vector activations_alpha; std::vector activations_beta; float clip; - ngraph::op::RecurrentSequenceDirection direction; + ov::op::RecurrentSequenceDirection direction; InputLayerType WRBType; InferenceEngine::Precision netPrecision; std::tie(m_mode, seq_lengths, batch, hidden_size, input_size, activations, clip, direction, WRBType, netPrecision, targetDevice) = this->GetParam(); - size_t num_directions = direction == ngraph::op::RecurrentSequenceDirection::BIDIRECTIONAL ? 2 : 1; + size_t num_directions = direction == ov::op::RecurrentSequenceDirection::BIDIRECTIONAL ? 2 : 1; std::vector inputShapes = { {{batch, seq_lengths, input_size}, {batch, num_directions, hidden_size}, {batch}, {num_directions, hidden_size, input_size}, {num_directions, hidden_size, hidden_size}, @@ -110,15 +110,15 @@ namespace LayerTestsDefinitions { auto rnn_sequence = std::make_shared(params[0], params[1], seq_lengths_node, W, R, B, hidden_size, direction, activations, activations_alpha, activations_beta, clip); - ngraph::ResultVector results{std::make_shared(rnn_sequence->output(0)), - std::make_shared(rnn_sequence->output(1))}; + ngraph::ResultVector results{std::make_shared(rnn_sequence->output(0)), + std::make_shared(rnn_sequence->output(1))}; function = std::make_shared(results, params, "rnn_sequence"); bool is_pure_sequence = (m_mode == SequenceTestsMode::PURE_SEQ || m_mode == SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM || m_mode == SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST); if (!is_pure_sequence) { ngraph::pass::Manager manager; - if (direction == ngraph::op::RecurrentSequenceDirection::BIDIRECTIONAL) + if (direction == ov::op::RecurrentSequenceDirection::BIDIRECTIONAL) manager.register_pass(); manager.register_pass(); manager.run_passes(function); diff --git a/src/tests/functional/shared_test_classes/src/single_layer/roi_align.cpp b/src/tests/functional/shared_test_classes/src/single_layer/roi_align.cpp index 97d39c93f0b548..683246fc841970 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/roi_align.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/roi_align.cpp @@ -101,10 +101,10 @@ void ROIAlignLayerTest::SetUp() { fillIdxTensor(roiIdxVector, inputShape[0]); ngraph::Shape idxShape = {coordsShape[0]}; - auto coords = std::make_shared(ngPrc, coordsShape, proposalVector.data()); - auto roisIdx = std::make_shared(ngraph::element::i32, idxShape, roiIdxVector.data()); + auto coords = std::make_shared(ngPrc, coordsShape, proposalVector.data()); + auto roisIdx = std::make_shared(ngraph::element::i32, idxShape, roiIdxVector.data()); - std::shared_ptr roiAlign = std::make_shared(params[0], + std::shared_ptr roiAlign = std::make_shared(params[0], coords, roisIdx, pooledH, @@ -112,7 +112,7 @@ void ROIAlignLayerTest::SetUp() { poolingRatio, spatialScale, poolingMode); - ngraph::ResultVector results{std::make_shared(roiAlign)}; + ngraph::ResultVector results{std::make_shared(roiAlign)}; function = std::make_shared(results, params, "roi_align"); } @@ -186,10 +186,10 @@ void ROIAlignV9LayerTest::SetUp() { ROIAlignLayerTest::fillIdxTensor(roiIdxVector, inputShape[0]); ngraph::Shape idxShape = {coordsShape[0]}; - auto coords = std::make_shared(ngPrc, coordsShape, proposalVector.data()); - auto roisIdx = std::make_shared(ngraph::element::i32, idxShape, roiIdxVector.data()); + auto coords = std::make_shared(ngPrc, coordsShape, proposalVector.data()); + auto roisIdx = std::make_shared(ngraph::element::i32, idxShape, roiIdxVector.data()); - std::shared_ptr roiAlign = std::make_shared( + std::shared_ptr roiAlign = std::make_shared( params[0], coords, roisIdx, @@ -197,10 +197,10 @@ void ROIAlignV9LayerTest::SetUp() { pooledW, poolingRatio, spatialScale, - ov::EnumNames::as_enum(poolingMode), - ov::EnumNames::as_enum(roiAlignedMode)); + ov::EnumNames::as_enum(poolingMode), + ov::EnumNames::as_enum(roiAlignedMode)); - ngraph::ResultVector results{std::make_shared(roiAlign)}; + ngraph::ResultVector results{std::make_shared(roiAlign)}; function = std::make_shared(results, params, "roi_align"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/roi_pooling.cpp b/src/tests/functional/shared_test_classes/src/single_layer/roi_pooling.cpp index 8a1c38f60962ef..494cb435bf12c9 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/roi_pooling.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/roi_pooling.cpp @@ -80,7 +80,7 @@ namespace LayerTestsDefinitions { } else { roi_pooling = std::make_shared(params[0], params[1], poolShape, spatial_scale, "bilinear"); } - ngraph::ResultVector results{std::make_shared(roi_pooling)}; + ngraph::ResultVector results{std::make_shared(roi_pooling)}; function = std::make_shared(results, params, "roi_pooling"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/roll.cpp b/src/tests/functional/shared_test_classes/src/single_layer/roll.cpp index 10771dbb86b3ab..67f1d091e5103f 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/roll.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/roll.cpp @@ -31,15 +31,15 @@ void RollLayerTest::SetUp() { std::tie(inputShapes, inputPrecision, shift, axes, targetDevice) = this->GetParam(); auto inType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); ngraph::ParameterVector paramVector; - auto paramData = std::make_shared(inType, ngraph::Shape(inputShapes)); + auto paramData = std::make_shared(inType, ngraph::Shape(inputShapes)); paramVector.push_back(paramData); - auto shiftNode = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{shift.size()}, shift)->output(0); - auto axesNode = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{axes.size()}, axes)->output(0); + auto shiftNode = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{shift.size()}, shift)->output(0); + auto axesNode = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{axes.size()}, axes)->output(0); - auto roll = std::make_shared(paramVector[0], shiftNode, axesNode); + auto roll = std::make_shared(paramVector[0], shiftNode, axesNode); - ngraph::ResultVector results{std::make_shared(roll)}; + ngraph::ResultVector results{std::make_shared(roll)}; function = std::make_shared(results, paramVector, "roll"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/scatter_ND_update.cpp b/src/tests/functional/shared_test_classes/src/single_layer/scatter_ND_update.cpp index 55e0e453ea0d55..2e2d481530f0ed 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/scatter_ND_update.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/scatter_ND_update.cpp @@ -62,15 +62,15 @@ void ScatterNDUpdateLayerTest::SetUp() { auto inPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); auto idxPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(indicesPrecision); ngraph::ParameterVector paramVector; - auto inputParams = std::make_shared(inPrc, ngraph::Shape(inShape)); + auto inputParams = std::make_shared(inPrc, ngraph::Shape(inShape)); paramVector.push_back(inputParams); - auto updateParams = std::make_shared(inPrc, ngraph::Shape(updateShape)); + auto updateParams = std::make_shared(inPrc, ngraph::Shape(updateShape)); paramVector.push_back(updateParams); auto indicesNode = std::make_shared(idxPrc, ov::Shape(indicesShape), indicesValue); auto s2d = std::make_shared(paramVector[0], indicesNode, paramVector[1]); - ngraph::ResultVector results{std::make_shared(s2d)}; + ngraph::ResultVector results{std::make_shared(s2d)}; function = std::make_shared(results, paramVector, "ScatterNDUpdate"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/scatter_elements_update.cpp b/src/tests/functional/shared_test_classes/src/single_layer/scatter_elements_update.cpp index 9b3a4ca7bd421e..21369540823b8f 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/scatter_elements_update.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/scatter_elements_update.cpp @@ -53,16 +53,16 @@ void ScatterElementsUpdateLayerTest::SetUp() { auto inPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); auto idxPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(indicesPrecision); ngraph::ParameterVector paramVector; - auto inputParams = std::make_shared(inPrc, ngraph::Shape(inShape)); + auto inputParams = std::make_shared(inPrc, ngraph::Shape(inShape)); paramVector.push_back(inputParams); - auto updateParams = std::make_shared(inPrc, ngraph::Shape(indicesShape)); + auto updateParams = std::make_shared(inPrc, ngraph::Shape(indicesShape)); paramVector.push_back(updateParams); auto indicesNode = std::make_shared(idxPrc, ov::Shape(indicesShape), indicesValue); auto axis_node = std::make_shared(ov::element::i32, ov::Shape{}, std::vector{axis}); auto s2d = std::make_shared(paramVector[0], indicesNode, paramVector[1], axis_node); - ngraph::ResultVector results{std::make_shared(s2d)}; + ngraph::ResultVector results{std::make_shared(s2d)}; function = std::make_shared(results, paramVector, "ScatterElementsUpdate"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/scatter_update.cpp b/src/tests/functional/shared_test_classes/src/single_layer/scatter_update.cpp index d250ddbefbd2dd..2ce083343f3f97 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/scatter_update.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/scatter_update.cpp @@ -72,16 +72,16 @@ void ScatterUpdateLayerTest::SetUp() { auto inPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); auto idxPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(indicesPrecision); ngraph::ParameterVector paramVector; - auto inputParams = std::make_shared(inPrc, ngraph::Shape(inShape)); + auto inputParams = std::make_shared(inPrc, ngraph::Shape(inShape)); paramVector.push_back(inputParams); - auto updateParams = std::make_shared(inPrc, ngraph::Shape(updateShape)); + auto updateParams = std::make_shared(inPrc, ngraph::Shape(updateShape)); paramVector.push_back(updateParams); auto indicesNode = std::make_shared(idxPrc, ov::Shape(indicesShape), indicesValue); auto axis_node = std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{axis}); auto s2d = std::make_shared(paramVector[0], indicesNode, paramVector[1], axis_node); - ngraph::ResultVector results{std::make_shared(s2d)}; + ngraph::ResultVector results{std::make_shared(s2d)}; function = std::make_shared(results, paramVector, "ScatterUpdate"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/select.cpp b/src/tests/functional/shared_test_classes/src/single_layer/select.cpp index 58e1bd3b46ebcc..f25ac5f6cf98e8 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/select.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/select.cpp @@ -10,7 +10,7 @@ namespace LayerTestsDefinitions { std::string SelectLayerTest::getTestCaseName(const testing::TestParamInfo &obj) { std::vector> dataShapes(3); InferenceEngine::Precision dataType; - ngraph::op::AutoBroadcastSpec broadcast; + ov::op::AutoBroadcastSpec broadcast; std::string targetDevice; std::tie(dataShapes, dataType, broadcast, targetDevice) = obj.param; std::ostringstream result; @@ -25,19 +25,19 @@ namespace LayerTestsDefinitions { void SelectLayerTest::SetUp() { std::vector> inputShapes(numOfInputs); InferenceEngine::Precision inputPrecision; - ngraph::op::AutoBroadcastSpec broadcast; + ov::op::AutoBroadcastSpec broadcast; std::tie(inputShapes, inputPrecision, broadcast, targetDevice) = this->GetParam(); ngraph::ParameterVector paramNodesVector; - auto paramNode = std::make_shared(ngraph::element::Type_t::boolean, ngraph::Shape(inputShapes[CONDITION])); + auto paramNode = std::make_shared(ngraph::element::Type_t::boolean, ngraph::Shape(inputShapes[CONDITION])); paramNodesVector.push_back(paramNode); auto inType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); for (size_t i = 1; i < inputShapes.size(); i++) { - paramNode = std::make_shared(inType, ngraph::Shape(inputShapes[i])); + paramNode = std::make_shared(inType, ngraph::Shape(inputShapes[i])); paramNodesVector.push_back(paramNode); } auto select = std::make_shared(paramNodesVector[0], paramNodesVector[1], paramNodesVector[2], broadcast); - ngraph::ResultVector results{std::make_shared(select)}; + ngraph::ResultVector results{std::make_shared(select)}; function = std::make_shared(results, paramNodesVector, "select"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/shape_of.cpp b/src/tests/functional/shared_test_classes/src/single_layer/shape_of.cpp index 8d92da3a114f0d..d036f95bfe6467 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/shape_of.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/shape_of.cpp @@ -27,8 +27,8 @@ namespace LayerTestsDefinitions { auto inType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); auto outType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(outPrc); ov::ParameterVector param {std::make_shared(inType, ov::Shape(inputShapes))}; - auto shapeOf = std::make_shared(param[0], outType); - ngraph::ResultVector results{std::make_shared(shapeOf)}; + auto shapeOf = std::make_shared(param[0], outType); + ngraph::ResultVector results{std::make_shared(shapeOf)}; function = std::make_shared(results, param, "shapeOf"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/shuffle_channels.cpp b/src/tests/functional/shared_test_classes/src/single_layer/shuffle_channels.cpp index 16538163fd18d4..cb0b951746d5e0 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/shuffle_channels.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/shuffle_channels.cpp @@ -41,7 +41,7 @@ void ShuffleChannelsLayerTest::SetUp() { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; auto shuffleChannels = std::make_shared(params[0], axis, group); - ngraph::ResultVector results{std::make_shared(shuffleChannels)}; + ngraph::ResultVector results{std::make_shared(shuffleChannels)}; function = std::make_shared(results, params, "shuffleChannels"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/space_to_batch.cpp b/src/tests/functional/shared_test_classes/src/single_layer/space_to_batch.cpp index 63d66585ef1d02..db436f71b103cd 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/space_to_batch.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/space_to_batch.cpp @@ -40,7 +40,7 @@ void SpaceToBatchLayerTest::SetUp() { OPENVINO_SUPPRESS_DEPRECATED_START auto s2b = ngraph::builder::makeSpaceToBatch(params[0], ngPrc, blockShape, padsBegin, padsEnd); OPENVINO_SUPPRESS_DEPRECATED_END - ngraph::ResultVector results{std::make_shared(s2b)}; + ngraph::ResultVector results{std::make_shared(s2b)}; function = std::make_shared(results, params, "SpaceToBatch"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/space_to_depth.cpp b/src/tests/functional/shared_test_classes/src/single_layer/space_to_depth.cpp index bdc4b6eba59673..68058208ddc092 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/space_to_depth.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/space_to_depth.cpp @@ -7,12 +7,10 @@ namespace LayerTestsDefinitions { -using namespace ngraph::opset3; - -static inline std::string SpaceToDepthModeToString(const SpaceToDepth::SpaceToDepthMode& mode) { - static std::map names = { - {SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST, "BLOCKS_FIRST"}, - {SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST, "DEPTH_FIRST"}, +static inline std::string SpaceToDepthModeToString(const ov::op::v0::SpaceToDepth::SpaceToDepthMode& mode) { + static std::map names = { + {ov::op::v0::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST, "BLOCKS_FIRST"}, + {ov::op::v0::SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST, "DEPTH_FIRST"}, }; auto i = names.find(mode); @@ -24,7 +22,7 @@ static inline std::string SpaceToDepthModeToString(const SpaceToDepth::SpaceToDe std::string SpaceToDepthLayerTest::getTestCaseName(const testing::TestParamInfo &obj) { std::vector inShape; - SpaceToDepth::SpaceToDepthMode mode; + ov::op::v0::SpaceToDepth::SpaceToDepthMode mode; std::size_t blockSize; InferenceEngine::Precision inputPrecision; std::string targetName; @@ -40,14 +38,14 @@ std::string SpaceToDepthLayerTest::getTestCaseName(const testing::TestParamInfo< void SpaceToDepthLayerTest::SetUp() { std::vector inShape; - SpaceToDepth::SpaceToDepthMode mode; + ov::op::v0::SpaceToDepth::SpaceToDepthMode mode; std::size_t blockSize; InferenceEngine::Precision inputPrecision; std::tie(inShape, inputPrecision, mode, blockSize, targetDevice) = this->GetParam(); auto inPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); ov::ParameterVector params {std::make_shared(inPrc, ov::Shape(inShape))}; auto s2d = std::make_shared(params[0], mode, blockSize); - ngraph::ResultVector results{std::make_shared(s2d)}; + ngraph::ResultVector results{std::make_shared(s2d)}; function = std::make_shared(results, params, "SpaceToDepth"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/split.cpp b/src/tests/functional/shared_test_classes/src/single_layer/split.cpp index 6c09bf04b55b76..aad1da53dc228a 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/split.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/split.cpp @@ -45,12 +45,12 @@ void SplitLayerTest::SetUp() { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; OPENVINO_SUPPRESS_DEPRECATED_START - auto split = std::dynamic_pointer_cast(ngraph::builder::makeSplit(params[0], + auto split = std::dynamic_pointer_cast(ngraph::builder::makeSplit(params[0], ngPrc, numSplits, axis)); OPENVINO_SUPPRESS_DEPRECATED_END ngraph::ResultVector results; for (int i = 0; i < outIndices.size(); i++) { - results.push_back(std::make_shared(split->output(outIndices[i]))); + results.push_back(std::make_shared(split->output(outIndices[i]))); } function = std::make_shared(results, params, "split"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/squeeze_unsqueeze.cpp b/src/tests/functional/shared_test_classes/src/single_layer/squeeze_unsqueeze.cpp index 1f229118a6b920..14c196ba2089a7 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/squeeze_unsqueeze.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/squeeze_unsqueeze.cpp @@ -42,14 +42,14 @@ void SqueezeUnsqueezeLayerTest::SetUp() { std::shared_ptr op; if (axesVector.empty() && opType == ngraph::helpers::SqueezeOpType::SQUEEZE) { - op = std::make_shared(params.front()); + op = std::make_shared(params.front()); } else { OPENVINO_SUPPRESS_DEPRECATED_START op = ngraph::builder::makeSqueezeUnsqueeze(params.front(), ngraph::element::i64, axesVector, opType); OPENVINO_SUPPRESS_DEPRECATED_END } - const ngraph::ResultVector results{std::make_shared(op)}; + const ngraph::ResultVector results{std::make_shared(op)}; function = std::make_shared(results, params, "Squeeze"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/strided_slice.cpp b/src/tests/functional/shared_test_classes/src/single_layer/strided_slice.cpp index 5f8a4ffe9fb199..1ae8b3110f8d3e 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/strided_slice.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/strided_slice.cpp @@ -59,7 +59,7 @@ void StridedSliceLayerTest::SetUp() { ssParams.shrinkAxisMask, ssParams.ellipsisAxisMask); - ngraph::ResultVector results{std::make_shared(ss)}; + ngraph::ResultVector results{std::make_shared(ss)}; function = std::make_shared(results, params, "StridedSlice"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/tensor_iterator.cpp b/src/tests/functional/shared_test_classes/src/single_layer/tensor_iterator.cpp index 0df383f00bf3dd..3a67c205ddec24 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/tensor_iterator.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/tensor_iterator.cpp @@ -16,7 +16,7 @@ namespace LayerTestsDefinitions { size_t sequence_axis; ngraph::helpers::TensorIteratorBody ti_body; float clip; - ngraph::op::RecurrentSequenceDirection direction; + ov::op::RecurrentSequenceDirection direction; InferenceEngine::Precision netPrecision; std::string targetDevice; std::tie(should_decompose, seq_lengths, batch, hidden_size, sequence_axis, clip, ti_body, direction, netPrecision, @@ -67,19 +67,19 @@ namespace LayerTestsDefinitions { size_t sequence_axis; ngraph::helpers::TensorIteratorBody ti_body; float clip; - ngraph::op::RecurrentSequenceDirection direction; + ov::op::RecurrentSequenceDirection direction; InferenceEngine::Precision netPrecision; std::tie(should_decompose, seq_lengths, batch, hidden_size, sequence_axis, clip, ti_body, direction, netPrecision, targetDevice) = this->GetParam(); std::vector> inputShapes; auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - auto tensor_iterator = std::make_shared(); + auto tensor_iterator = std::make_shared(); // Each case consist of 3 steps: // 1. Create TensorIterator body. // 2. Set PortMap // 3. Create outer function - auto axis = std::make_shared(ngraph::element::i64, ngraph::Shape{1}, + auto axis = std::make_shared(ngraph::element::i64, ngraph::Shape{1}, std::vector{static_cast(sequence_axis)}); switch (ti_body) { case ngraph::helpers::TensorIteratorBody::LSTM: { @@ -101,22 +101,22 @@ namespace LayerTestsDefinitions { std::make_shared(ngPrc, ov::Shape(inputShapes[1])), std::make_shared(ngPrc, ov::Shape(inputShapes[2]))}; - auto squeeze = std::make_shared(body_params[0], axis); + auto squeeze = std::make_shared(body_params[0], axis); std::vector WRB = {inputShapes[3], inputShapes[4], inputShapes[5]}; ngraph::OutputVector out_vector = {squeeze, body_params[1], body_params[2]}; auto lstm_cell = ngraph::builder::makeLSTM(out_vector, WRB, hidden_size, {"sigmoid", "tanh", "tanh"}, {}, {}, clip); - auto unsqueeze = std::make_shared(lstm_cell->output(0), axis); - ngraph::ResultVector results{std::make_shared(unsqueeze), - std::make_shared(lstm_cell->output(0)), - std::make_shared(lstm_cell->output(1))}; + auto unsqueeze = std::make_shared(lstm_cell->output(0), axis); + ngraph::ResultVector results{std::make_shared(unsqueeze), + std::make_shared(lstm_cell->output(0)), + std::make_shared(lstm_cell->output(1))}; auto body = std::make_shared(results, body_params, "lstm_cell"); tensor_iterator->set_function(body); // 2. Set PortMap - if (direction == ngraph::op::RecurrentSequenceDirection::FORWARD) { + if (direction == ov::op::RecurrentSequenceDirection::FORWARD) { tensor_iterator->set_sliced_input(body_params[0], outer_params[0], 0, 1, 1, -1, sequence_axis); tensor_iterator->get_concatenated_slices(results[0], 0, 1, 1, -1, sequence_axis); - } else if (direction == ngraph::op::RecurrentSequenceDirection::REVERSE) { + } else if (direction == ov::op::RecurrentSequenceDirection::REVERSE) { tensor_iterator->set_sliced_input(body_params[0], outer_params[0], -1, -1, 1, 0, sequence_axis); tensor_iterator->get_concatenated_slices(results[0], -1, -1, 1, 0, sequence_axis); } else { @@ -151,21 +151,21 @@ namespace LayerTestsDefinitions { std::make_shared(ngPrc, ov::Shape(inputShapes[1]))}; std::vector WRB = {inputShapes[2], inputShapes[3], inputShapes[4]}; - auto squeeze = std::make_shared(body_params[0], axis); + auto squeeze = std::make_shared(body_params[0], axis); ngraph::OutputVector out_vector = {squeeze, body_params[1]}; auto gru_cell = ngraph::builder::makeGRU(out_vector, WRB, hidden_size, {"sigmoid", "tanh"}, {}, {}, clip, false); - auto unsqueeze = std::make_shared(gru_cell->output(0), axis); - ngraph::ResultVector results{std::make_shared(gru_cell->output(0)), - std::make_shared(unsqueeze)}; + auto unsqueeze = std::make_shared(gru_cell->output(0), axis); + ngraph::ResultVector results{std::make_shared(gru_cell->output(0)), + std::make_shared(unsqueeze)}; auto body = std::make_shared(results, body_params, "gru_cell"); tensor_iterator->set_function(body); // 2. Set PortMap - if (direction == ngraph::op::RecurrentSequenceDirection::FORWARD) { + if (direction == ov::op::RecurrentSequenceDirection::FORWARD) { tensor_iterator->set_sliced_input(body_params[0], outer_params[0], 0, 1, 1, -1, sequence_axis); tensor_iterator->get_concatenated_slices(results[1], 0, 1, 1, -1, sequence_axis); - } else if (direction == ngraph::op::RecurrentSequenceDirection::REVERSE) { + } else if (direction == ov::op::RecurrentSequenceDirection::REVERSE) { tensor_iterator->set_sliced_input(body_params[0], outer_params[0], -1, -1, 1, 0, sequence_axis); tensor_iterator->get_concatenated_slices(results[1], -1, -1, 1, 0, sequence_axis); } else { @@ -197,20 +197,20 @@ namespace LayerTestsDefinitions { ov::ParameterVector body_params{std::make_shared(ngPrc, ov::Shape(inputShapes[0])), std::make_shared(ngPrc, ov::Shape(inputShapes[1]))}; std::vector WRB = {inputShapes[2], inputShapes[3], inputShapes[4]}; - auto squeeze = std::make_shared(body_params[0], axis); + auto squeeze = std::make_shared(body_params[0], axis); ngraph::OutputVector out_vector = {squeeze, body_params[1]}; auto rnn_cell = ngraph::builder::makeRNN(out_vector, WRB, hidden_size, {"tanh"}, {}, {}, clip); - auto unsqueeze = std::make_shared(rnn_cell->output(0), axis); - ngraph::ResultVector results{std::make_shared(rnn_cell), - std::make_shared(unsqueeze)}; + auto unsqueeze = std::make_shared(rnn_cell->output(0), axis); + ngraph::ResultVector results{std::make_shared(rnn_cell), + std::make_shared(unsqueeze)}; auto body = std::make_shared(results, body_params, "rnn_cell"); tensor_iterator->set_function(body); // 2. Set PortMap - if (direction == ngraph::op::RecurrentSequenceDirection::FORWARD) { + if (direction == ov::op::RecurrentSequenceDirection::FORWARD) { tensor_iterator->set_sliced_input(body_params[0], outer_params[0], 0, 1, 1, -1, sequence_axis); tensor_iterator->get_concatenated_slices(results[1], 0, 1, 1, -1, sequence_axis); - } else if (direction == ngraph::op::RecurrentSequenceDirection::REVERSE) { + } else if (direction == ov::op::RecurrentSequenceDirection::REVERSE) { tensor_iterator->set_sliced_input(body_params[0], outer_params[0], -1, -1, 1, 0, sequence_axis); tensor_iterator->get_concatenated_slices(results[1], -1, -1, 1, 0, sequence_axis); } else { diff --git a/src/tests/functional/shared_test_classes/src/single_layer/tile.cpp b/src/tests/functional/shared_test_classes/src/single_layer/tile.cpp index 9c5afdd310bf38..1acd10bbda24e1 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/tile.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/tile.cpp @@ -38,7 +38,7 @@ void TileLayerTest::SetUp() { auto repeatsNode = std::make_shared(ov::element::i64, std::vector{tileParams.size()}, tileParams); auto tile = std::make_shared(params[0], repeatsNode); - ngraph::ResultVector results{std::make_shared(tile)}; + ngraph::ResultVector results{std::make_shared(tile)}; function = std::make_shared(results, params, "tile"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/topk.cpp b/src/tests/functional/shared_test_classes/src/single_layer/topk.cpp index b4897117cbfb37..7f1b1ef5bfe679 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/topk.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/topk.cpp @@ -12,8 +12,8 @@ namespace LayerTestsDefinitions { InferenceEngine::SizeVector inputShape; std::string targetDevice; int64_t keepK, axis; - ngraph::opset4::TopK::Mode mode; - ngraph::opset4::TopK::SortType sort; + ov::op::v3::TopK::Mode mode; + ov::op::v3::TopK::SortType sort; std::tie(keepK, axis, mode, sort, netPrecision, inPrc, outPrc, inLayout, inputShape, targetDevice) = obj.param; std::ostringstream result; result << "IS=" << ov::test::utils::vec2str(inputShape) << "_"; @@ -33,20 +33,20 @@ void TopKLayerTest::SetUp() { InferenceEngine::SizeVector inputShape; InferenceEngine::Precision netPrecision; int64_t keepK, axis; - ngraph::opset4::TopK::Mode mode; - ngraph::opset4::TopK::SortType sort; + ov::op::v3::TopK::Mode mode; + ov::op::v3::TopK::SortType sort; std::tie(keepK, axis, mode, sort, netPrecision, inPrc, outPrc, inLayout, inputShape, targetDevice) = this->GetParam(); auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto k = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{}, &keepK); - auto topk = std::dynamic_pointer_cast( - std::make_shared(params[0], k, axis, mode, sort)); + auto k = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{}, &keepK); + auto topk = std::dynamic_pointer_cast( + std::make_shared(params[0], k, axis, mode, sort)); ngraph::ResultVector results; for (size_t i = 0; i < topk->get_output_size(); i++) { - results.push_back(std::make_shared(topk->output(i))); + results.push_back(std::make_shared(topk->output(i))); } function = std::make_shared(results, params, "TopK"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/transpose.cpp b/src/tests/functional/shared_test_classes/src/single_layer/transpose.cpp index 2fc4131e834f76..16226bcaf9a88f 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/transpose.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/transpose.cpp @@ -34,11 +34,11 @@ void TransposeLayerTest::SetUp() { ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; const auto inOrderShape = inputOrder.empty() ? ngraph::Shape({0}) : ngraph::Shape({inputShape.size()}); - const auto inputOrderOp = std::make_shared(ngraph::element::i64, + const auto inputOrderOp = std::make_shared(ngraph::element::i64, inOrderShape, inputOrder); - const auto transpose = std::make_shared(params.at(0), inputOrderOp); - const ngraph::ResultVector results{std::make_shared(transpose)}; + const auto transpose = std::make_shared(params.at(0), inputOrderOp); + const ngraph::ResultVector results{std::make_shared(transpose)}; function = std::make_shared(results, params, "Transpose"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/variadic_split.cpp b/src/tests/functional/shared_test_classes/src/single_layer/variadic_split.cpp index c424b57bc99a73..2a4ef3f757bea5 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/variadic_split.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/variadic_split.cpp @@ -36,11 +36,11 @@ namespace LayerTestsDefinitions { std::tie(numSplits, axis, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, targetDevice) = this->GetParam(); auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto VariadicSplit = std::dynamic_pointer_cast(ngraph::builder::makeVariadicSplit(params[0], numSplits, + auto VariadicSplit = std::dynamic_pointer_cast(ngraph::builder::makeVariadicSplit(params[0], numSplits, axis)); ngraph::ResultVector results; for (int i = 0; i < numSplits.size(); i++) { - results.push_back(std::make_shared(VariadicSplit->output(i))); + results.push_back(std::make_shared(VariadicSplit->output(i))); } function = std::make_shared(results, params, "VariadicSplit"); } diff --git a/src/tests/functional/shared_test_classes/src/single_op/broadcast.cpp b/src/tests/functional/shared_test_classes/src/single_op/broadcast.cpp index 084eaf1fd0be6d..d89fac28335797 100644 --- a/src/tests/functional/shared_test_classes/src/single_op/broadcast.cpp +++ b/src/tests/functional/shared_test_classes/src/single_op/broadcast.cpp @@ -51,7 +51,7 @@ void BroadcastLayerTest::SetUp() { ov::ParameterVector params{std::make_shared(model_type, inputDynamicShapes.front())}; std::shared_ptr broadcast; - if (mode == ngraph::op::BroadcastType::NONE) { + if (mode == ov::op::BroadcastType::NONE) { auto axis_set_const = ov::op::v0::Constant::create(ngraph::element::i64, {axes_mapping.size()}, axes_mapping.to_vector()); broadcast = std::make_shared(params[0], target_shape_const, diff --git a/src/tests/functional/shared_test_classes/src/single_op/convolution.cpp b/src/tests/functional/shared_test_classes/src/single_op/convolution.cpp index 0e44df42406b60..bfb521871548b7 100644 --- a/src/tests/functional/shared_test_classes/src/single_op/convolution.cpp +++ b/src/tests/functional/shared_test_classes/src/single_op/convolution.cpp @@ -19,7 +19,7 @@ std::string ConvolutionLayerTest::getTestCaseName(const testing::TestParamInfo shapes; std::string targetDevice; std::tie(conv_params, model_type, shapes, targetDevice) = obj.param; - ngraph::op::PadType pad_type; + ov::op::PadType pad_type; InferenceEngine::SizeVector kernel, stride, dilation; std::vector pad_begin, pad_end; size_t conv_out_channels; diff --git a/src/tests/functional/shared_test_classes/src/single_op/normalize_l2.cpp b/src/tests/functional/shared_test_classes/src/single_op/normalize_l2.cpp index b3db915a59a035..d53d0b7f846375 100644 --- a/src/tests/functional/shared_test_classes/src/single_op/normalize_l2.cpp +++ b/src/tests/functional/shared_test_classes/src/single_op/normalize_l2.cpp @@ -14,7 +14,7 @@ namespace test { std::string NormalizeL2LayerTest::getTestCaseName(const testing::TestParamInfo& obj) { std::vector axes; float eps; - ngraph::op::EpsMode eps_mode; + ov::op::EpsMode eps_mode; std::vector shapes; ov::element::Type model_type; std::string targetDevice; @@ -45,7 +45,7 @@ void NormalizeL2LayerTest::SetUp() { std::vector shapes; std::vector axes; float eps; - ngraph::op::EpsMode eps_mode; + ov::op::EpsMode eps_mode; ov::element::Type model_type; std::tie(axes, eps, eps_mode, shapes, model_type, targetDevice) = this->GetParam(); init_input_shapes(shapes); diff --git a/src/tests/functional/shared_test_classes/src/subgraph/activation_concats_eltwise.cpp b/src/tests/functional/shared_test_classes/src/subgraph/activation_concats_eltwise.cpp index 0eef9adde8fe92..81313bca9fb32f 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/activation_concats_eltwise.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/activation_concats_eltwise.cpp @@ -52,8 +52,8 @@ void ActivationConcatsEltwise::SetUp() { auto eltw = ngraph::builder::makeEltwise(concat_1, concat_2, ngraph::helpers::EltwiseTypes::ADD); - auto reshape_pattern = std::make_shared(ngraph::element::i64, ngraph::Shape{2}, std::vector({1, inputSize + concatSize})); - auto final_reshape = std::make_shared(eltw, reshape_pattern, false); + auto reshape_pattern = std::make_shared(ngraph::element::i64, ngraph::Shape{2}, std::vector({1, inputSize + concatSize})); + auto final_reshape = std::make_shared(eltw, reshape_pattern, false); function = std::make_shared(final_reshape, input, "ActivationConcatsEltwise"); } } // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/activation_fq.cpp b/src/tests/functional/shared_test_classes/src/subgraph/activation_fq.cpp index eda78cc0525999..d9df9b10f5356d 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/activation_fq.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/activation_fq.cpp @@ -69,9 +69,9 @@ namespace SubgraphTestsDefinitions { auto FQNode = ngraph::builder::makeFakeQuantize(act, ngraph::element::f32, levels[0], constShape[0], { inputDataMin }, { inputDataMax }, { inputDataMin }, { inputDataMax }); - auto FQ = std::dynamic_pointer_cast(FQNode); + auto FQ = std::dynamic_pointer_cast(FQNode); - ngraph::ResultVector results{std::make_shared(FQ)}; + ngraph::ResultVector results{std::make_shared(FQ)}; function = std::make_shared(results, params, "ActivationFakeQuantizeSubgraph"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/basic_lstm.cpp b/src/tests/functional/shared_test_classes/src/subgraph/basic_lstm.cpp index d9814b2869dc82..fc04b12b3dc556 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/basic_lstm.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/basic_lstm.cpp @@ -68,32 +68,32 @@ std::shared_ptr Basic_LSTM_S::GetNetwork(size_t thirdDimOut, //Reshape_1 [1,thirdDimOut*num_cells] -> [1, num_cells, thirdDimOut] std::vector outFormShapes1 = { batch_size, num_cells, thirdDimOut }; - auto pattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 3 }, outFormShapes1); - auto reshape1 = std::make_shared(params[0], pattern1, false); + auto pattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 3 }, outFormShapes1); + auto reshape1 = std::make_shared(params[0], pattern1, false); auto reshape1_shape = reshape1->output(0).get_shape(); auto H_init = ngraph::builder::makeConstant(ngPrc, { batch_size, hiddenSize }, {}, true, weights_range.second, weights_range.first); auto C_init = ngraph::builder::makeConstant(ngPrc, { batch_size, hiddenSize }, {}, true, weights_range.second, weights_range.first); if (hidden_memory_init_out != nullptr) { - *hidden_memory_init_out = std::static_pointer_cast(H_init)->cast_vector(); + *hidden_memory_init_out = std::static_pointer_cast(H_init)->cast_vector(); } if (cell_memory_init_out != nullptr) { - *cell_memory_init_out = std::static_pointer_cast(C_init)->cast_vector(); + *cell_memory_init_out = std::static_pointer_cast(C_init)->cast_vector(); } - auto H_t = std::make_shared(ngPrc, ngraph::Shape{ batch_size, hiddenSize }); - auto C_t = std::make_shared(ngPrc, ngraph::Shape{ batch_size, hiddenSize }); + auto H_t = std::make_shared(ngPrc, ngraph::Shape{ batch_size, hiddenSize }); + auto C_t = std::make_shared(ngPrc, ngraph::Shape{ batch_size, hiddenSize }); H_t->set_friendly_name("hidden_state_1"); C_t->set_friendly_name("cell_state_1"); //Body - auto X = std::make_shared(ngPrc, ngraph::Shape{ batch_size, 1, reshape1_shape[2] }); + auto X = std::make_shared(ngPrc, ngraph::Shape{ batch_size, 1, reshape1_shape[2] }); auto weightsNode = ngraph::builder::makeConstant(ngPrc, { 4 * hiddenSize, reshape1_shape[2] }, {}, true, weights_range.second, weights_range.first); auto reccurrenceWeightsNode = ngraph::builder::makeConstant(ngPrc, { 4 * hiddenSize, hiddenSize }, {}, true, weights_range.second, weights_range.first); //lstm [1, 10], [1, 118], [1, 118] -> [1, 118], [1, 118] outFormShapes1 = { batch_size, reshape1_shape[2] }; - auto constantX = std::make_shared(ngraph::element::i64, ngraph::Shape{ 2 }, outFormShapes1); - auto lstm1 = std::make_shared(std::make_shared(X, constantX, false), + auto constantX = std::make_shared(ngraph::element::i64, ngraph::Shape{ 2 }, outFormShapes1); + auto lstm1 = std::make_shared(std::make_shared(X, constantX, false), H_t, C_t, weightsNode, reccurrenceWeightsNode, hiddenSize); @@ -104,7 +104,7 @@ std::shared_ptr Basic_LSTM_S::GetNetwork(size_t thirdDimOut, auto body = std::make_shared( ngraph::OutputVector{ H_o, C_o }, ngraph::ParameterVector{ X, H_t, C_t }); - auto tensor_iterator = std::make_shared(); + auto tensor_iterator = std::make_shared(); tensor_iterator->set_body(body); //input tensor shape: [1, num_cells, thirdDimOut] chunk shape: [1, 1, thirdDimOut] @@ -117,7 +117,7 @@ std::shared_ptr Basic_LSTM_S::GetNetwork(size_t thirdDimOut, const size_t output_size = 12; auto fc1 = ngraph::builder::makeFullyConnected(out0, ngPrc, output_size, true, { hiddenSize, output_size }, { weights_range.second }, { 0.f }); - ngraph::ResultVector results{ std::make_shared(fc1) }; + ngraph::ResultVector results{ std::make_shared(fc1) }; return std::make_shared(results, params, "Basic_LSTM_S"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/broadcast_power.cpp b/src/tests/functional/shared_test_classes/src/subgraph/broadcast_power.cpp index 3a57f3fd6b7543..80008290c98945 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/broadcast_power.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/broadcast_power.cpp @@ -29,16 +29,16 @@ void BroadcastPowerTest::SetUp() { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputs_shapes[0]))}; - auto reshape_pattern = std::make_shared(ngraph::element::i64, ngraph::Shape{inputs_shapes[1].size()}, + auto reshape_pattern = std::make_shared(ngraph::element::i64, ngraph::Shape{inputs_shapes[1].size()}, inputs_shapes[1]); - auto reshape = std::make_shared(params[0], reshape_pattern, false); + auto reshape = std::make_shared(params[0], reshape_pattern, false); auto const_mult2 = ngraph::builder::makeConstant(ngPrc, {}, {-1.0f}); auto sum = ngraph::builder::makeEltwise(reshape, const_mult2, ngraph::helpers::EltwiseTypes::MULTIPLY); - auto reshape_pattern_2 = std::make_shared(ngraph::element::i64, ngraph::Shape{inputs_shapes[0].size()}, + auto reshape_pattern_2 = std::make_shared(ngraph::element::i64, ngraph::Shape{inputs_shapes[0].size()}, inputs_shapes[0]); - auto reshape_2 = std::make_shared(sum, reshape_pattern_2, false); + auto reshape_2 = std::make_shared(sum, reshape_pattern_2, false); function = std::make_shared(reshape_2, params, "BroadcastPowerPass"); } } // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/cascade_concat.cpp b/src/tests/functional/shared_test_classes/src/subgraph/cascade_concat.cpp index 33a101d1e0c8b0..76a1f0147b3145 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/cascade_concat.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/cascade_concat.cpp @@ -36,10 +36,10 @@ void CascadeConcat::SetUp() { std::make_shared(ngPrc, ov::Shape(input2[0])), std::make_shared(ngPrc, ov::Shape(input3[0]))}; - auto relu1 = std::make_shared(input[0]); - auto relu2 = std::make_shared(input[1]); - auto relu3 = std::make_shared(input[2]); - auto concat = std::make_shared(ov::OutputVector{relu1->output(0), + auto relu1 = std::make_shared(input[0]); + auto relu2 = std::make_shared(input[1]); + auto relu3 = std::make_shared(input[2]); + auto concat = std::make_shared(ov::OutputVector{relu1->output(0), relu2->output(0)}, 1); @@ -48,18 +48,18 @@ void CascadeConcat::SetUp() { auto reshape2_constant = std::make_shared(ov::element::i64, ov::Shape{1}, std::vector{0}); auto reshape2 = std::make_shared(reshape, reshape2_constant); - auto concat2 = std::make_shared(ov::OutputVector{reshape2->output(0), + auto concat2 = std::make_shared(ov::OutputVector{reshape2->output(0), relu3->output(0)}, 1); ngraph::ResultVector results; if (multioutput) { auto const_mult = ngraph::builder::makeConstant(ngPrc, ngraph::Shape{1, input1[0][1]+input2[0][1]}, std::vector{1.01f}); - auto mult = std::make_shared(concat, const_mult); - results = ngraph::ResultVector{std::make_shared(concat2), - std::make_shared(mult)}; + auto mult = std::make_shared(concat, const_mult); + results = ngraph::ResultVector{std::make_shared(concat2), + std::make_shared(mult)}; } else { - results = ngraph::ResultVector{std::make_shared(concat2)}; + results = ngraph::ResultVector{std::make_shared(concat2)}; } function = std::make_shared(results, input, "concat_reshape_reshape_concat_mul"); } @@ -108,14 +108,14 @@ void CascadeConcatWithMultiConnReshape::SetUp() { auto inputShapeSqueezed = inputShape; inputShapeSqueezed.insert(std::begin(inputShapeSqueezed), 1); ov::ParameterVector input {std::make_shared(ngPrc, ov::Shape(inputShapeSqueezed))}; - auto relu = std::make_shared(input[0]); + auto relu = std::make_shared(input[0]); auto const1 = ngraph::builder::makeConstant(ngPrc, inputShapeSqueezed, std::vector{}, true); auto concat1 = std::make_shared(ov::NodeVector{relu, const1}, inputShapeSqueezed.size() - 1); auto squeeze_constant = std::make_shared(ov::element::i64, ov::Shape{1}, std::vector{0}); auto squeeze = std::make_shared(concat1, squeeze_constant); - auto relu1 = std::make_shared(squeeze); + auto relu1 = std::make_shared(squeeze); auto unsqueeze1_constant = std::make_shared(ov::element::i64, ov::Shape{1}, std::vector{0}); auto unsqueeze1 = std::make_shared(relu1, unsqueeze1_constant); @@ -125,13 +125,13 @@ void CascadeConcatWithMultiConnReshape::SetUp() { // Change concat name to make it the second connection in the map of squeeze output connections concat2->set_friendly_name("XConcat"); - auto relu2 = std::make_shared(concat2); + auto relu2 = std::make_shared(concat2); auto unsqueeze2_constant = std::make_shared(ov::element::i64, ov::Shape{1}, std::vector{0}); auto unsqueeze2 = std::make_shared(relu2, unsqueeze2_constant); - ngraph::ResultVector results = {std::make_shared(unsqueeze1), - std::make_shared(unsqueeze2)}; + ngraph::ResultVector results = {std::make_shared(unsqueeze1), + std::make_shared(unsqueeze2)}; function = std::make_shared(results, input, "CascadeConcatWithMultiConnReshapeTest"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/clamp_fq.cpp b/src/tests/functional/shared_test_classes/src/subgraph/clamp_fq.cpp index b8dfc25f12f1fe..6d533c76d95b44 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/clamp_fq.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/clamp_fq.cpp @@ -63,16 +63,16 @@ namespace SubgraphTestsDefinitions { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto clamp = std::make_shared(params[0], clamp_min_max[0], clamp_min_max[1]); + auto clamp = std::make_shared(params[0], clamp_min_max[0], clamp_min_max[1]); auto FQNode = ngraph::builder::makeFakeQuantize(clamp, ngraph::element::f32, levels[0], constShape[0], { inputDataMin }, { inputDataMax }, { inputDataMin }, { inputDataMax }); - auto FQ = std::dynamic_pointer_cast(FQNode); - auto sigmoid = std::make_shared(FQ); + auto FQ = std::dynamic_pointer_cast(FQNode); + auto sigmoid = std::make_shared(FQ); - ngraph::ResultVector results{std::make_shared(sigmoid)}; + ngraph::ResultVector results{std::make_shared(sigmoid)}; function = std::make_shared(results, params, "fakeQuantizeSubgraph"); configuration = config.second; } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/concat_conv.cpp b/src/tests/functional/shared_test_classes/src/subgraph/concat_conv.cpp index 215a33f47e6484..f42edf1e3b7926 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/concat_conv.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/concat_conv.cpp @@ -62,15 +62,15 @@ void ConcatConvTest::SetUp() { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto relu1 = std::make_shared(params[0]); + auto relu1 = std::make_shared(params[0]); auto const_values = ov::test::utils::generate_float_numbers(inputShape[1], -2.0f, 2.0f); auto constant = ngraph::builder::makeConstant(ngPrc, inputShape, const_values); auto concat = std::make_shared(ov::NodeVector{constant, relu1}, 1); std::vector convInputShape = {1, inputChannels, 1, 2 * inputShape[0] * inputShape[1] / inputChannels}; - auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); - auto reshape1 = std::make_shared(concat, reshapePattern1, false); + auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); + auto reshape1 = std::make_shared(concat, reshapePattern1, false); auto filterWeights = ov::test::utils::generate_float_numbers(outputChannels * convInputShape[1] * kernelShape[0] * kernelShape[1], -0.2f, 0.2f); @@ -79,13 +79,13 @@ void ConcatConvTest::SetUp() { {kernelShape[0], kernelShape[1]}, {kernelShape[0] > 1 ? stride : 1, stride}, {0, 0}, - { 0, 0 }, { 1, 1 }, ngraph::op::PadType::VALID, outputChannels, false, filterWeights); + { 0, 0 }, { 1, 1 }, ov::op::PadType::VALID, outputChannels, false, filterWeights); auto widthAfterConv = (convInputShape[3] - kernelShape[1]) / stride + 1; std::vector outFormShapes = {1, outputChannels * widthAfterConv }; - auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); - auto reshape2 = std::make_shared(conv, reshapePattern2, false); + auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); + auto reshape2 = std::make_shared(conv, reshapePattern2, false); function = std::make_shared(reshape2, params, "ConcatConvTest"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/concat_multi_input.cpp b/src/tests/functional/shared_test_classes/src/subgraph/concat_multi_input.cpp index b6808f06c3c255..410b6d789ba5a6 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/concat_multi_input.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/concat_multi_input.cpp @@ -39,33 +39,33 @@ void ConcatMultiInput::SetUp() { void ConcatMultiInput::GenerateStridedSliceModel() { ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(paramSize))}; - auto stride = std::make_shared(ngraph::element::i64, ngraph::Shape{ 2 }, std::vector{ 1, 1 }); + auto stride = std::make_shared(ngraph::element::i64, ngraph::Shape{ 2 }, std::vector{ 1, 1 }); std::vector newAxis = { 0, 0 }; std::vector begin_mask = { 0, 0 }; std::vector end_mask = { 0, 0 }; - std::vector> ssArray; + std::vector> ssArray; ngraph::OutputVector concatInput; - auto relu = std::make_shared(params[0]); + auto relu = std::make_shared(params[0]); std::vector startOffset = { 0, 0 }; for (size_t i = 0; i < inputShapes.size(); ++i) { std::vector shape = { static_cast(inputShapes[i][0]), static_cast(inputShapes[i][1]) }; std::vector endoffset = { static_cast(inputShapes[i][0]) + startOffset[0], static_cast(inputShapes[i][1]) + startOffset[1]}; - auto begin = std::make_shared(ngraph::element::i64, ngraph::Shape{ 2 }, startOffset); - auto end = std::make_shared(ngraph::element::i64, ngraph::Shape{ 2 }, endoffset); - auto ss = std::make_shared(relu, begin, end, stride, begin_mask, end_mask, newAxis); + auto begin = std::make_shared(ngraph::element::i64, ngraph::Shape{ 2 }, startOffset); + auto end = std::make_shared(ngraph::element::i64, ngraph::Shape{ 2 }, endoffset); + auto ss = std::make_shared(relu, begin, end, stride, begin_mask, end_mask, newAxis); ssArray.push_back(ss); concatInput.push_back(ssArray[i]); startOffset[1] += shape[1]; } - auto concat = std::make_shared(concatInput, 1); + auto concat = std::make_shared(concatInput, 1); - ngraph::ResultVector results{ std::make_shared(concat) }; + ngraph::ResultVector results{ std::make_shared(concat) }; function = std::make_shared(results, params, "ConcatMultiInput"); } @@ -104,7 +104,7 @@ void ConcatMultiInput::GenerateConstOnlyModel() { auto concat = std::make_shared(concatInputs, 1); - ngraph::ResultVector results{ std::make_shared(concat) }; + ngraph::ResultVector results{ std::make_shared(concat) }; function = std::make_shared(results, input_vector, "ConcatConstOnly"); } @@ -114,17 +114,17 @@ void ConcatMultiInput::GenerateMemoryModel() { auto variable = std::make_shared(ngraph::VariableInfo{ov::Shape(inputShapes[0]), ngraph::element::dynamic, "concat_input_memory"}); - auto mem_i = std::make_shared(ngPrc, inputShapes[0]); - auto mem_r = std::make_shared(mem_i, variable); + auto mem_i = std::make_shared(ngPrc, inputShapes[0]); + auto mem_r = std::make_shared(mem_i, variable); ngraph::OutputVector concat_input; concat_input.push_back(mem_r); concat_input.push_back(input.at(0)); - auto concat = std::make_shared(concat_input, axis); + auto concat = std::make_shared(concat_input, axis); - auto mem_w = std::make_shared(input.at(0), variable); + auto mem_w = std::make_shared(input.at(0), variable); - auto res = std::make_shared(concat); + auto res = std::make_shared(concat); function = std::make_shared(ngraph::ResultVector{res}, ngraph::SinkVector{mem_w}, input, "ConcatMemory"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/concat_quantization_during_memory_requantization.cpp b/src/tests/functional/shared_test_classes/src/subgraph/concat_quantization_during_memory_requantization.cpp index 7678ee5db24e41..fae76e845a6e3d 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/concat_quantization_during_memory_requantization.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/concat_quantization_during_memory_requantization.cpp @@ -38,30 +38,30 @@ namespace SubgraphTestsDefinitions { ov::ParameterVector input{std::make_shared(ngPrc, ov::Shape{1, inputSize})}; - auto mem_1_const = std::make_shared(ngPrc, ngraph::Shape{ 1, hiddenSize }, memory_1_init); - auto mem_1_read = std::make_shared(mem_1_const, "memory_1"); + auto mem_1_const = std::make_shared(ngPrc, ngraph::Shape{ 1, hiddenSize }, memory_1_init); + auto mem_1_read = std::make_shared(mem_1_const, "memory_1"); - auto concat_1 = std::make_shared(ngraph::OutputVector{ mem_1_read, input[0] }, 1); + auto concat_1 = std::make_shared(ngraph::OutputVector{ mem_1_read, input[0] }, 1); // Revert concat names to set the needed order of scale factors calculation concat_1->set_friendly_name("concat2"); auto split_1 = ngraph::builder::makeVariadicSplit(concat_1, { inputSize, hiddenSize }, 1); - auto mul_const = std::make_shared(ngPrc, ngraph::Shape{ 1, hiddenSize }, + auto mul_const = std::make_shared(ngPrc, ngraph::Shape{ 1, hiddenSize }, ov::test::utils::generate_float_numbers(hiddenSize, -0.2f, 0.0f)); auto mul = ngraph::builder::makeEltwise(split_1->output(1), mul_const, ngraph::helpers::EltwiseTypes::MULTIPLY); - auto mem_1_write = std::make_shared(mul, "memory_1"); + auto mem_1_write = std::make_shared(mul, "memory_1"); - auto mem_2_const = std::make_shared(ngPrc, ngraph::Shape{ 1, hiddenSize }, memory_2_init); - auto mem_2_read = std::make_shared(mem_2_const, "memory_2"); + auto mem_2_const = std::make_shared(ngPrc, ngraph::Shape{ 1, hiddenSize }, memory_2_init); + auto mem_2_read = std::make_shared(mem_2_const, "memory_2"); - auto concat_2 = std::make_shared(ngraph::OutputVector{ mem_2_read, mul }, 1); + auto concat_2 = std::make_shared(ngraph::OutputVector{ mem_2_read, mul }, 1); // Revert concat names to set the needed order of scale factors calculation concat_2->set_friendly_name("concat1"); auto split_axis_op = std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{1}); auto split_2 = std::make_shared(concat_2, split_axis_op, 2); - auto mem_2_write = std::make_shared(split_2->output(0), "memory_2"); - auto sigm = std::make_shared(split_2->output(1)); + auto mem_2_write = std::make_shared(split_2->output(0), "memory_2"); + auto sigm = std::make_shared(split_2->output(1)); mem_1_write->add_control_dependency(mem_1_read); sigm->add_control_dependency(mem_1_write); @@ -85,20 +85,20 @@ namespace SubgraphTestsDefinitions { ov::ParameterVector input{std::make_shared(ngPrc, ov::Shape{1, inputSize})}; - auto mem_1_const = std::make_shared(ngPrc, ngraph::Shape{ 1, hiddenSize }, memory_1_init); - auto concat_1 = std::make_shared(ngraph::OutputVector{ mem_1_const, input[0] }, 1); + auto mem_1_const = std::make_shared(ngPrc, ngraph::Shape{ 1, hiddenSize }, memory_1_init); + auto concat_1 = std::make_shared(ngraph::OutputVector{ mem_1_const, input[0] }, 1); auto split_1 = ngraph::builder::makeVariadicSplit(concat_1, { inputSize, hiddenSize }, 1); - auto mul_const = std::make_shared(ngPrc, ngraph::Shape{ 1, hiddenSize }, + auto mul_const = std::make_shared(ngPrc, ngraph::Shape{ 1, hiddenSize }, ov::test::utils::generate_float_numbers(hiddenSize, -0.2f, 0.0f)); auto mul = ngraph::builder::makeEltwise(split_1->output(1), mul_const, ngraph::helpers::EltwiseTypes::MULTIPLY); - auto mem_2_const = std::make_shared(ngPrc, ngraph::Shape{ 1, hiddenSize }, memory_2_init); - auto concat_2 = std::make_shared(ngraph::OutputVector{ mem_2_const, mul }, 1); + auto mem_2_const = std::make_shared(ngPrc, ngraph::Shape{ 1, hiddenSize }, memory_2_init); + auto concat_2 = std::make_shared(ngraph::OutputVector{ mem_2_const, mul }, 1); auto split_axis_op = std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{1}); auto split_2 = std::make_shared(concat_2, split_axis_op, 2); - auto sigm = std::make_shared(split_2->output(1)); + auto sigm = std::make_shared(split_2->output(1)); function = std::make_shared(sigm, input, "concat_quant_during_memory_requant_nomemory"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/concat_qunatization.cpp b/src/tests/functional/shared_test_classes/src/subgraph/concat_qunatization.cpp index 31e212e0d9fb83..74598aabc0cd6f 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/concat_qunatization.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/concat_qunatization.cpp @@ -32,29 +32,29 @@ void ConcatQuantization::SetUp() { ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape{1, 160})}; std::vector outFormShapes1 = { 1, 5, 32 }; - auto pattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 3 }, outFormShapes1); - auto reshape1 = std::make_shared(params[0], pattern1, false); + auto pattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 3 }, outFormShapes1); + auto reshape1 = std::make_shared(params[0], pattern1, false); - auto tanh = std::make_shared(reshape1); + auto tanh = std::make_shared(reshape1); std::vector outFormShapes2 = { 1, 160 }; - auto pattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes2); - auto reshape2 = std::make_shared(tanh, pattern2, false); + auto pattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes2); + auto reshape2 = std::make_shared(tanh, pattern2, false); auto scale = ngraph::builder::makeConstant(ngPrc, outFormShapes2, {}, true); - //For ngraph::op::ScaleShift: Cannot cast ngraph node ScaleShift to CNNLayer! - auto scale_shift = std::make_shared(reshape2, scale); + //For ov::op::v0::ScaleShift: Cannot cast ngraph node ScaleShift to CNNLayer! + auto scale_shift = std::make_shared(reshape2, scale); std::vector outFormShapes3 = { 5, 32 }; - auto pattern3 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes3); - auto reshape3 = std::make_shared(scale_shift, pattern3, false); + auto pattern3 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes3); + auto reshape3 = std::make_shared(scale_shift, pattern3, false); - auto pattern4 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes3); - auto reshape4 = std::make_shared(tanh, pattern4, false); + auto pattern4 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes3); + auto reshape4 = std::make_shared(tanh, pattern4, false); - auto concat = std::make_shared(ngraph::OutputVector{ reshape3, reshape4 }, 0); + auto concat = std::make_shared(ngraph::OutputVector{ reshape3, reshape4 }, 0); concat->set_friendly_name("concat"); - ngraph::ResultVector results{std::make_shared(concat)}; + ngraph::ResultVector results{std::make_shared(concat)}; function = std::make_shared(results, params, "ConcatQuantization"); } } // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/connect_split_concat_concat.cpp b/src/tests/functional/shared_test_classes/src/subgraph/connect_split_concat_concat.cpp index 420cb9fbcf51bd..745665c51c5876 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/connect_split_concat_concat.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/connect_split_concat_concat.cpp @@ -26,18 +26,18 @@ void SplitConcatConcatTest::SetUp() { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape{1, 256})}; - auto relu_start = std::make_shared(params[0]); + auto relu_start = std::make_shared(params[0]); auto split_axis_op = std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{1}); auto split = std::make_shared(relu_start, split_axis_op, 2); auto const_concat = ngraph::builder::makeConstant(ngPrc, {1, 96}, std::vector{0}); auto const_concat_2 = ngraph::builder::makeConstant(ngPrc, {1, 96}, std::vector{0}); - auto concat = std::make_shared(ngraph::OutputVector{split->output(0), const_concat}, 1); - auto concat_2 = std::make_shared(ngraph::OutputVector{concat, const_concat_2}, + auto concat = std::make_shared(ngraph::OutputVector{split->output(0), const_concat}, 1); + auto concat_2 = std::make_shared(ngraph::OutputVector{concat, const_concat_2}, 1); - auto relu = std::make_shared(concat_2); + auto relu = std::make_shared(concat_2); ngraph::ResultVector resultVector{ - std::make_shared(relu) + std::make_shared(relu) }; function = std::make_shared(resultVector, params, "Multiple_connection_split_concat"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/const_conv_concat.cpp b/src/tests/functional/shared_test_classes/src/subgraph/const_conv_concat.cpp index 7dabd62e98fca4..c8cb41a79327ef 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/const_conv_concat.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/const_conv_concat.cpp @@ -64,8 +64,8 @@ void ConstConvConcatTest::SetUp() { ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(inputShape))}; std::vector convInputShape = {inputShape[0], inputChannels, 1, inputShape[1] / inputChannels}; - auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); - auto reshape1 = std::make_shared(params[0], reshapePattern1, false); + auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); + auto reshape1 = std::make_shared(params[0], reshapePattern1, false); auto filterWeights = ov::test::utils::generate_float_numbers(outputChannels * convInputShape[1] * kernelShape[0] * kernelShape[1], 0.0f, 0.1f); @@ -74,7 +74,7 @@ void ConstConvConcatTest::SetUp() { {kernelShape[0], kernelShape[1]}, {kernelShape[0] > 1 ? stride : 1, stride}, {0, 0}, - { 0, 0 }, { 1, 1 }, ngraph::op::PadType::VALID, outputChannels, false, filterWeights); + { 0, 0 }, { 1, 1 }, ov::op::PadType::VALID, outputChannels, false, filterWeights); auto widthAfterConv = (convInputShape[3] - kernelShape[1]) / stride + 1; std::vector outFormShapes = {1, outputChannels * widthAfterConv }; @@ -83,9 +83,9 @@ void ConstConvConcatTest::SetUp() { auto constant = ngraph::builder::makeConstant(ngPrc, {1, outputChannels, 1, widthAfterConv}, const_values); auto concat = std::make_shared(ov::NodeVector{constant, conv}, 3); - auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, + auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, std::vector{1, 2 * outputChannels * widthAfterConv }); - auto reshape2 = std::make_shared(concat, reshapePattern2, false); + auto reshape2 = std::make_shared(concat, reshapePattern2, false); function = std::make_shared(reshape2, params, "ConstConvConcatTest"); functionRefs = ngraph::clone_function(*function); diff --git a/src/tests/functional/shared_test_classes/src/subgraph/conv_eltwise_fusion.cpp b/src/tests/functional/shared_test_classes/src/subgraph/conv_eltwise_fusion.cpp index 625c187dffc06e..593e2efaa12c9b 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/conv_eltwise_fusion.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/conv_eltwise_fusion.cpp @@ -6,7 +6,6 @@ #include "common_test_utils/graph_comparator.hpp" #include "openvino/core/node.hpp" -#include "openvino/opsets/opset11.hpp" #include "openvino/pass/constant_folding.hpp" #include "openvino/pass/manager.hpp" #include "ov_models/builders.hpp" @@ -54,7 +53,7 @@ void ConvEltwiseFusion::SetUp() { pass::Manager manager; { - auto param = std::make_shared(precision, input_shape); + auto param = std::make_shared(precision, input_shape); auto spatial_dims = input_shape.size() - 2; Shape strides(spatial_dims, 1); @@ -66,17 +65,17 @@ void ConvEltwiseFusion::SetUp() { const_shape, std::vector(shape_size(const_shape), 3)); std::shared_ptr conv; - if (conv_type == opset11::Convolution::get_type_info_static()) { - conv = std::make_shared(param, weights, strides, pad_begin, pad_end, strides); - } else if (conv_type == opset11::GroupConvolution::get_type_info_static()) { - conv = std::make_shared(param, weights, strides, pad_begin, pad_end, strides); - } else if (conv_type == opset11::ConvolutionBackpropData::get_type_info_static()) { + if (conv_type == ov::op::v1::Convolution::get_type_info_static()) { + conv = std::make_shared(param, weights, strides, pad_begin, pad_end, strides); + } else if (conv_type == ov::op::v1::GroupConvolution::get_type_info_static()) { + conv = std::make_shared(param, weights, strides, pad_begin, pad_end, strides); + } else if (conv_type == ov::op::v1::ConvolutionBackpropData::get_type_info_static()) { if (num_inputs == 3) { - auto output_shape = std::make_shared( + auto output_shape = std::make_shared( element::u64, Shape{spatial_dims}, std::vector(input_shape.begin() + 2, input_shape.end())); - conv = std::make_shared(param, + conv = std::make_shared(param, weights, output_shape, strides, @@ -84,20 +83,20 @@ void ConvEltwiseFusion::SetUp() { pad_end, strides); } else { - conv = std::make_shared(param, + conv = std::make_shared(param, weights, strides, pad_begin, pad_end, strides); } - } else if (conv_type == opset11::GroupConvolutionBackpropData::get_type_info_static()) { + } else if (conv_type == ov::op::v1::GroupConvolutionBackpropData::get_type_info_static()) { if (num_inputs == 3) { - auto output_shape = std::make_shared( + auto output_shape = std::make_shared( element::u64, Shape{spatial_dims}, std::vector(input_shape.begin() + 2, input_shape.end())); - conv = std::make_shared(param, + conv = std::make_shared(param, weights, output_shape, strides, @@ -105,7 +104,7 @@ void ConvEltwiseFusion::SetUp() { pad_end, strides); } else { - conv = std::make_shared(param, + conv = std::make_shared(param, weights, strides, pad_begin, @@ -117,14 +116,14 @@ void ConvEltwiseFusion::SetUp() { } std::shared_ptr eltwise; - if (eltwise_type == opset11::Multiply::get_type_info_static()) { - eltwise = std::make_shared(conv, eltwise_const); + if (eltwise_type == ov::op::v1::Multiply::get_type_info_static()) { + eltwise = std::make_shared(conv, eltwise_const); manager.register_pass(); manager.register_pass(); manager.register_pass(); manager.register_pass(); - } else if (eltwise_type == opset11::Add::get_type_info_static()) { - eltwise = std::make_shared(conv, eltwise_const); + } else if (eltwise_type == ov::op::v1::Add::get_type_info_static()) { + eltwise = std::make_shared(conv, eltwise_const); // manager.register_pass(); // manager.register_pass(); } else { @@ -139,7 +138,7 @@ void ConvEltwiseFusion::SetUp() { std::shared_ptr function_ref; if (!negative) { - auto param = std::make_shared(precision, input_shape); + auto param = std::make_shared(precision, input_shape); auto spatial_dims = input_shape.size() - 2; Shape strides(spatial_dims, 1); @@ -148,17 +147,17 @@ void ConvEltwiseFusion::SetUp() { weights_shape, std::vector(shape_size(weights_shape), 6)); std::shared_ptr conv; - if (conv_type == opset11::Convolution::get_type_info_static()) { - conv = std::make_shared(param, weights, strides, pad_begin, pad_end, strides); - } else if (conv_type == opset11::GroupConvolution::get_type_info_static()) { - conv = std::make_shared(param, weights, strides, pad_begin, pad_end, strides); - } else if (conv_type == opset11::ConvolutionBackpropData::get_type_info_static()) { + if (conv_type == ov::op::v1::Convolution::get_type_info_static()) { + conv = std::make_shared(param, weights, strides, pad_begin, pad_end, strides); + } else if (conv_type == ov::op::v1::GroupConvolution::get_type_info_static()) { + conv = std::make_shared(param, weights, strides, pad_begin, pad_end, strides); + } else if (conv_type == ov::op::v1::ConvolutionBackpropData::get_type_info_static()) { if (num_inputs == 3) { - auto output_shape = std::make_shared( + auto output_shape = std::make_shared( element::u64, Shape{spatial_dims}, std::vector(input_shape.begin() + 2, input_shape.end())); - conv = std::make_shared(param, + conv = std::make_shared(param, weights, output_shape, strides, @@ -166,20 +165,20 @@ void ConvEltwiseFusion::SetUp() { pad_end, strides); } else { - conv = std::make_shared(param, + conv = std::make_shared(param, weights, strides, pad_begin, pad_end, strides); } - } else if (conv_type == opset11::GroupConvolutionBackpropData::get_type_info_static()) { + } else if (conv_type == ov::op::v1::GroupConvolutionBackpropData::get_type_info_static()) { if (num_inputs == 3) { - auto output_shape = std::make_shared( + auto output_shape = std::make_shared( element::u64, Shape{spatial_dims}, std::vector(input_shape.begin() + 2, input_shape.end())); - conv = std::make_shared(param, + conv = std::make_shared(param, weights, output_shape, strides, @@ -187,7 +186,7 @@ void ConvEltwiseFusion::SetUp() { pad_end, strides); } else { - conv = std::make_shared(param, + conv = std::make_shared(param, weights, strides, pad_begin, diff --git a/src/tests/functional/shared_test_classes/src/subgraph/conv_fq_eltwise.cpp b/src/tests/functional/shared_test_classes/src/subgraph/conv_fq_eltwise.cpp index a2519f47629deb..dc4d937842b53f 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/conv_fq_eltwise.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/conv_fq_eltwise.cpp @@ -77,8 +77,8 @@ void ConvFqEltwiseTest::SetUp() { std::mt19937 gen(seed); std::vector convInputShape = {1, inputChannels, 1, inputShape[0] * inputShape[1] / inputChannels}; - auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); - auto reshape1 = std::make_shared(params[0], reshapePattern1, false); + auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); + auto reshape1 = std::make_shared(params[0], reshapePattern1, false); float weightVal = 0.2; auto filterWeightsNode = ngraph::builder::makeConstant(ngPrc, {outputChannels, inputChannels, kernelShape[0], kernelShape[1]}, @@ -87,14 +87,14 @@ void ConvFqEltwiseTest::SetUp() { ngraph::builder::makeConstant(ngraph::element::f32, std::vector{1}, std::vector{-convFQValue}); auto convHighNode = ngraph::builder::makeConstant(ngraph::element::f32, std::vector{1}, std::vector{convFQValue}); - auto convWeightsFQNode = std::make_shared(filterWeightsNode, + auto convWeightsFQNode = std::make_shared(filterWeightsNode, convLowNode, convHighNode, convLowNode, convHighNode, levels); - auto convWeightsFQ = std::dynamic_pointer_cast(convWeightsFQNode); - auto conv = std::make_shared(reshape1, convWeightsFQ, strides, std::vector{ 0, 0 }, + auto convWeightsFQ = std::dynamic_pointer_cast(convWeightsFQNode); + auto conv = std::make_shared(reshape1, convWeightsFQ, strides, std::vector{ 0, 0 }, std::vector{ 0, 0 }, std::vector{ 1, 1 }, - ngraph::op::PadType::VALID); + ov::op::PadType::VALID); auto biasesWeightsNode = ngraph::builder::makeConstant(ngPrc, {}, std::vector{ 0.0f }); - auto add_1 = std::make_shared(conv, biasesWeightsNode); + auto add_1 = std::make_shared(conv, biasesWeightsNode); auto widthAfterConv = (convInputShape[3] - kernelShape[1]) / strides[1] + 1; auto heightAfterConv = (convInputShape[2] - kernelShape[0]) / strides[0] + 1; @@ -104,13 +104,13 @@ void ConvFqEltwiseTest::SetUp() { std::vector{inputDataMin * weightVal * kernelShape[1] * 1.5f}); auto highNode = ngraph::builder::makeConstant(ngraph::element::f32, std::vector{ 1 }, std::vector{inputDataMax * weightVal * kernelShape[1] * 1.5f}); - auto fq = std::make_shared(add_1, lowNode, highNode, lowNode, highNode, levels); + auto fq = std::make_shared(add_1, lowNode, highNode, lowNode, highNode, levels); auto constNode = ngraph::builder::makeConstant(ngPrc, {}, std::vector{ 0.5f }); - auto add_2 = std::make_shared(fq, constNode); + auto add_2 = std::make_shared(fq, constNode); - auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); - auto reshape2 = std::make_shared(add_2, reshapePattern2, false); + auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); + auto reshape2 = std::make_shared(add_2, reshapePattern2, false); function = std::make_shared(reshape2, params, "convFqEltwise"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/conv_fq_relu.cpp b/src/tests/functional/shared_test_classes/src/subgraph/conv_fq_relu.cpp index ad2a8ad441199b..d108635c790794 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/conv_fq_relu.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/conv_fq_relu.cpp @@ -77,8 +77,8 @@ void ConvFqReluTest::SetUp() { std::mt19937 gen(seed); std::vector convInputShape = {1, inputChannels, 1, inputShape[0] * inputShape[1] / inputChannels}; - auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); - auto reshape1 = std::make_shared(params[0], reshapePattern1, false); + auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); + auto reshape1 = std::make_shared(params[0], reshapePattern1, false); float weightVal = 0.2; auto filterWeightsNode = ngraph::builder::makeConstant(ngPrc, {outputChannels, inputChannels, kernelShape[0], kernelShape[1]}, @@ -87,14 +87,14 @@ void ConvFqReluTest::SetUp() { ngraph::builder::makeConstant(ngraph::element::f32, std::vector{1}, std::vector{-convFQValue}); auto convHighNode = ngraph::builder::makeConstant(ngraph::element::f32, std::vector{1}, std::vector{convFQValue}); - auto convWeightsFQNode = std::make_shared(filterWeightsNode, + auto convWeightsFQNode = std::make_shared(filterWeightsNode, convLowNode, convHighNode, convLowNode, convHighNode, levels); - auto convWeightsFQ = std::dynamic_pointer_cast(convWeightsFQNode); - auto conv = std::make_shared(reshape1, convWeightsFQ, strides, std::vector{ 0, 0 }, + auto convWeightsFQ = std::dynamic_pointer_cast(convWeightsFQNode); + auto conv = std::make_shared(reshape1, convWeightsFQ, strides, std::vector{ 0, 0 }, std::vector{ 0, 0 }, std::vector{ 1, 1 }, - ngraph::op::PadType::VALID); + ov::op::PadType::VALID); auto biasesWeightsNode = ngraph::builder::makeConstant(ngPrc, {}, std::vector{ 0.0f }); - auto add_1 = std::make_shared(conv, biasesWeightsNode); + auto add_1 = std::make_shared(conv, biasesWeightsNode); auto widthAfterConv = (convInputShape[3] - kernelShape[1]) / strides[1] + 1; auto heightAfterConv = (convInputShape[2] - kernelShape[0]) / strides[0] + 1; @@ -104,12 +104,12 @@ void ConvFqReluTest::SetUp() { std::vector{inputDataMin * weightVal * kernelShape[1] * 1.5f}); auto highNode = ngraph::builder::makeConstant(ngraph::element::f32, std::vector{ 1 }, std::vector{inputDataMax * weightVal * kernelShape[1] * 1.5f}); - auto fq = std::make_shared(add_1, lowNode, highNode, lowNode, highNode, levels); + auto fq = std::make_shared(add_1, lowNode, highNode, lowNode, highNode, levels); - auto relu = std::make_shared(fq); + auto relu = std::make_shared(fq); - auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); - auto reshape2 = std::make_shared(relu, reshapePattern2, false); + auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); + auto reshape2 = std::make_shared(relu, reshapePattern2, false); function = std::make_shared(reshape2, params, "convFqEltwise"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/convolution_relu_sequence.cpp b/src/tests/functional/shared_test_classes/src/subgraph/convolution_relu_sequence.cpp index b3f0c42713c717..3a83e80c427533 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/convolution_relu_sequence.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/convolution_relu_sequence.cpp @@ -73,12 +73,12 @@ void ConvolutionReluSequenceTest::SetUp() { ngraph::builder::makeConvolution( lastOutputs, ngPrc, single.kernelSize, single.strides, single.padBegin, single.padEnd, - dilation, ngraph::op::PadType::EXPLICIT, single.numOutChannels, addBiases, filter_weights, biases)); - lastOutputs = std::make_shared(conv); + dilation, ov::op::PadType::EXPLICIT, single.numOutChannels, addBiases, filter_weights, biases)); + lastOutputs = std::make_shared(conv); if (single.poolingWindow.size() == 2 && (single.poolingWindow[0] != 1 || single.poolingWindow[1] != 1)) { - lastOutputs = std::make_shared(lastOutputs, single.poolingStride, + lastOutputs = std::make_shared(lastOutputs, single.poolingStride, ngraph::Shape{ 0, 0 }, ngraph::Shape{ 0, 0 }, single.poolingWindow); @@ -86,7 +86,7 @@ void ConvolutionReluSequenceTest::SetUp() { inputChannels = single.numOutChannels; } - ngraph::ResultVector results{std::make_shared(lastOutputs)}; + ngraph::ResultVector results{std::make_shared(lastOutputs)}; function = std::make_shared(results, params, "convolution_relu_sequence"); } } // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/copy_before_squeeze.cpp b/src/tests/functional/shared_test_classes/src/subgraph/copy_before_squeeze.cpp index 83866052440052..1362295cd1dbd2 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/copy_before_squeeze.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/copy_before_squeeze.cpp @@ -27,22 +27,22 @@ namespace SubgraphTestsDefinitions { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector input {std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto reshape_0_pattern = std::make_shared(ngraph::element::i64, + auto reshape_0_pattern = std::make_shared(ngraph::element::i64, ngraph::Shape{3}, std::vector{1, inputShape[1] / 64, 64}); - auto reshape_0 = std::make_shared(input[0], reshape_0_pattern, false); - auto relu = std::make_shared(reshape_0); + auto reshape_0 = std::make_shared(input[0], reshape_0_pattern, false); + auto relu = std::make_shared(reshape_0); - auto constant_squeeze = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{1}, std::vector{0}); - auto reshape_pattern = std::make_shared(ngraph::element::i64, + auto constant_squeeze = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{1}, std::vector{0}); + auto reshape_pattern = std::make_shared(ngraph::element::i64, ngraph::Shape{2}, std::vector{1, inputShape[1]}); - auto squeeze_1 = std::make_shared(relu, constant_squeeze); - auto reshape_1 = std::make_shared(squeeze_1, reshape_pattern, false); - auto squeeze_2 = std::make_shared(relu, constant_squeeze); - auto reshape_2 = std::make_shared(squeeze_2, reshape_pattern, false); + auto squeeze_1 = std::make_shared(relu, constant_squeeze); + auto reshape_1 = std::make_shared(squeeze_1, reshape_pattern, false); + auto squeeze_2 = std::make_shared(relu, constant_squeeze); + auto reshape_2 = std::make_shared(squeeze_2, reshape_pattern, false); - auto concat = std::make_shared(ngraph::OutputVector{reshape_1, reshape_2}, 1); + auto concat = std::make_shared(ngraph::OutputVector{reshape_1, reshape_2}, 1); function = std::make_shared(concat, input, "copy_before_squeeze"); } } // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/delayed_copy_layer.cpp b/src/tests/functional/shared_test_classes/src/subgraph/delayed_copy_layer.cpp index bd2aa5bc46856a..ddff4c2837ea8d 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/delayed_copy_layer.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/delayed_copy_layer.cpp @@ -72,16 +72,16 @@ namespace SubgraphTestsDefinitions { memory_init = ov::test::utils::generate_float_numbers(memory_size, -0.2f, 0.2f); - auto mem_c = std::make_shared(ngPrc, ngraph::Shape{1, memory_size}, memory_init); + auto mem_c = std::make_shared(ngPrc, ngraph::Shape{1, memory_size}, memory_init); - auto mem_r = std::make_shared(mem_c, "id"); + auto mem_r = std::make_shared(mem_c, "id"); - auto concat = std::make_shared(ngraph::OutputVector{mem_r, input[0]}, 1); + auto concat = std::make_shared(ngraph::OutputVector{mem_r, input[0]}, 1); auto split = ngraph::builder::makeVariadicSplit(concat, {3 * memory_size, memory_size}, 1); - auto mem_w = std::make_shared(split->output(1), "id"); + auto mem_w = std::make_shared(split->output(1), "id"); auto VariadicSplit = ngraph::builder::makeVariadicSplit(concat, {memory_size / 2, 3 * memory_size + memory_size / 2}, 1); - auto relu2 = std::make_shared(VariadicSplit->output(1)); + auto relu2 = std::make_shared(VariadicSplit->output(1)); mem_w->add_control_dependency(mem_r); relu2->add_control_dependency(mem_w); @@ -101,12 +101,12 @@ namespace SubgraphTestsDefinitions { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector input {std::make_shared(ngPrc, ov::Shape{1, 3 * memory_size})}; - auto mem_c = std::make_shared(ngPrc, ngraph::Shape{1, memory_size}, memory_init); - auto concat = std::make_shared(ngraph::OutputVector{mem_c, input[0]}, 1); + auto mem_c = std::make_shared(ngPrc, ngraph::Shape{1, memory_size}, memory_init); + auto concat = std::make_shared(ngraph::OutputVector{mem_c, input[0]}, 1); auto split = ngraph::builder::makeVariadicSplit(concat, {3 * memory_size, memory_size}, 1); auto VariadicSplit = ngraph::builder::makeVariadicSplit(concat, {memory_size / 2, 3 * memory_size + memory_size / 2}, 1); - auto relu2 = std::make_shared(VariadicSplit->output(1)); + auto relu2 = std::make_shared(VariadicSplit->output(1)); function = std::make_shared(relu2, input, "delayed_copy_layer_nonmemory"); } @@ -126,21 +126,21 @@ namespace SubgraphTestsDefinitions { memory_init = ov::test::utils::generate_float_numbers(memory_size, -0.2f, 0.2f); auto mem_c = ngraph::builder::makeConstant(ngPrc, ngraph::Shape{8, memory_size / 8}, memory_init); - auto mem_r = std::make_shared(mem_c, "id"); + auto mem_r = std::make_shared(mem_c, "id"); auto reshape_pattern1 = ngraph::builder::makeConstant(ngraph::element::i64, ngraph::Shape{2}, ngraph::Shape{1, memory_size}); - auto reshape1 = std::make_shared(mem_r, reshape_pattern1, false); + auto reshape1 = std::make_shared(mem_r, reshape_pattern1, false); auto split_axis_op = std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{1}); auto split = std::make_shared(reshape1, split_axis_op, 2); - auto concat = std::make_shared(ngraph::OutputVector{split->output(0), input[0]}, 1); + auto concat = std::make_shared(ngraph::OutputVector{split->output(0), input[0]}, 1); auto reshape_pattern2 = ngraph::builder::makeConstant(ngraph::element::i64, ngraph::Shape{2}, ngraph::Shape{8, memory_size / 8}); - auto reshape2 = std::make_shared(concat, reshape_pattern2, false); + auto reshape2 = std::make_shared(concat, reshape_pattern2, false); - auto mem_w = std::make_shared(reshape2, "id"); + auto mem_w = std::make_shared(reshape2, "id"); - auto relu = std::make_shared(reshape2); + auto relu = std::make_shared(reshape2); auto reshape_pattern3 = ngraph::builder::makeConstant(ngraph::element::i64, ngraph::Shape{2}, ngraph::Shape{1, memory_size}); - auto reshape3 = std::make_shared(relu, reshape_pattern3, false); + auto reshape3 = std::make_shared(relu, reshape_pattern3, false); mem_w->add_control_dependency(mem_r); reshape3->add_control_dependency(mem_w); @@ -162,17 +162,17 @@ namespace SubgraphTestsDefinitions { auto mem_c = ngraph::builder::makeConstant(ngPrc, ngraph::Shape{1, memory_size}, memory_init); auto reshape_pattern1 = ngraph::builder::makeConstant(ngraph::element::i64, ngraph::Shape{2}, ngraph::Shape{1, memory_size}); - auto reshape1 = std::make_shared(mem_c, reshape_pattern1, false); + auto reshape1 = std::make_shared(mem_c, reshape_pattern1, false); auto split_axis_op = std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{1}); auto split = std::make_shared(reshape1, split_axis_op, 2); - auto concat = std::make_shared(ngraph::OutputVector{split->output(0), input[0]}, 1); + auto concat = std::make_shared(ngraph::OutputVector{split->output(0), input[0]}, 1); auto reshape_pattern2 = ngraph::builder::makeConstant(ngraph::element::i64, ngraph::Shape{2}, ngraph::Shape{8, memory_size / 8}); - auto reshape2 = std::make_shared(concat, reshape_pattern2, false); + auto reshape2 = std::make_shared(concat, reshape_pattern2, false); - auto relu = std::make_shared(reshape2); + auto relu = std::make_shared(reshape2); auto reshape_pattern3 = ngraph::builder::makeConstant(ngraph::element::i64, ngraph::Shape{2}, ngraph::Shape{1, memory_size}); - auto reshape3 = std::make_shared(relu, reshape_pattern3, false); + auto reshape3 = std::make_shared(relu, reshape_pattern3, false); function = std::make_shared(reshape3, input, "delayed_copy_layer_reshape_nonmemory"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/eltwise_conv_eltwise.cpp b/src/tests/functional/shared_test_classes/src/subgraph/eltwise_conv_eltwise.cpp index 5b548fad75b645..8c7121310e1771 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/eltwise_conv_eltwise.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/eltwise_conv_eltwise.cpp @@ -64,8 +64,8 @@ void EltwiseAfterConvTest::SetUp() { ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(inputShape))}; std::vector convInputShape = {1, inputChannels, 1, inputShape[0] * inputShape[1] / inputChannels}; - auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); - auto reshape1 = std::make_shared(params[0], reshapePattern1, false); + auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); + auto reshape1 = std::make_shared(params[0], reshapePattern1, false); auto filterWeights = ov::test::utils::generate_float_numbers(outputChannels * convInputShape[1] * kernelShape[0] * kernelShape[1], -0.2f, 0.2f); @@ -74,20 +74,20 @@ void EltwiseAfterConvTest::SetUp() { {kernelShape[0], kernelShape[1]}, {kernelShape[0] > 1 ? stride : 1, stride}, {0, 0}, - { 0, 0 }, { 1, 1 }, ngraph::op::PadType::VALID, outputChannels, false, filterWeights); + { 0, 0 }, { 1, 1 }, ov::op::PadType::VALID, outputChannels, false, filterWeights); auto widthAfterConv = (convInputShape[3] - kernelShape[1]) / stride + 1; std::vector outFormShapes = {1, outputChannels * widthAfterConv }; - auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); - auto reshape2 = std::make_shared(conv, reshapePattern2, false); + auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); + auto reshape2 = std::make_shared(conv, reshapePattern2, false); auto scale = ov::test::utils::generate_float_numbers(outFormShapes[1], -2.0f, 2.0f); auto shift = ov::test::utils::generate_float_numbers(outFormShapes[1], -2.0f, 2.0f); - auto mul_const = std::make_shared(ngPrc, outFormShapes, scale); - auto mul = std::make_shared(reshape2, mul_const); - auto add_const = std::make_shared(ngPrc, outFormShapes, shift); - auto add = std::make_shared(mul, add_const); + auto mul_const = std::make_shared(ngPrc, outFormShapes, scale); + auto mul = std::make_shared(reshape2, mul_const); + auto add_const = std::make_shared(ngPrc, outFormShapes, shift); + auto add = std::make_shared(mul, add_const); function = std::make_shared(mul, params, "EltwiseAfterConvTest"); } @@ -150,14 +150,14 @@ void EltwiseBeforeConvTest::SetUp() { auto scale = ov::test::utils::generate_float_numbers(inputShape[1], -2.0f, 2.0f); auto shift = ov::test::utils::generate_float_numbers(inputShape[1], -2.0f, 2.0f); - auto mul_const = std::make_shared(ngPrc, inputShape, scale); - auto mul = std::make_shared(params[0], mul_const); - auto add_const = std::make_shared(ngPrc, inputShape, shift); - auto add = std::make_shared(mul, add_const); + auto mul_const = std::make_shared(ngPrc, inputShape, scale); + auto mul = std::make_shared(params[0], mul_const); + auto add_const = std::make_shared(ngPrc, inputShape, shift); + auto add = std::make_shared(mul, add_const); std::vector convInputShape = {1, inputChannels, 1, inputShape[0] * inputShape[1] / inputChannels}; - auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); - auto reshape1 = std::make_shared(mul, reshapePattern1, false); + auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); + auto reshape1 = std::make_shared(mul, reshapePattern1, false); auto filterWeights = ov::test::utils::generate_float_numbers(outputChannels * convInputShape[1] * kernelShape[0] * kernelShape[1], -0.2f, 0.2f); @@ -166,12 +166,12 @@ void EltwiseBeforeConvTest::SetUp() { {kernelShape[0], kernelShape[1]}, {kernelShape[0] > 1 ? stride : 1, stride}, {0, 0}, - { 0, 0 }, { 1, 1 }, ngraph::op::PadType::VALID, outputChannels, false, filterWeights); + { 0, 0 }, { 1, 1 }, ov::op::PadType::VALID, outputChannels, false, filterWeights); auto widthAfterReshape = (convInputShape[3] - kernelShape[1]) / stride + 1; std::vector outFormShapes = {1, outputChannels * widthAfterReshape }; - auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); - auto reshape2 = std::make_shared(conv, reshapePattern2, false); + auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); + auto reshape2 = std::make_shared(conv, reshapePattern2, false); function = std::make_shared(reshape2, params, "EltwiseBeforeConvTest"); } @@ -234,8 +234,8 @@ void EltwiseWithTwoConvsAsInputsTest::SetUp() { std::make_shared(ngPrc, ov::Shape(inputShape))}; std::vector convInputShape = {1, inputChannels, 1, inputShape[0] * inputShape[1] / inputChannels}; - auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); - auto reshape1 = std::make_shared(params[0], reshapePattern1, false); + auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); + auto reshape1 = std::make_shared(params[0], reshapePattern1, false); auto filterWeights1 = ov::test::utils::generate_float_numbers(outputChannels * convInputShape[1] * kernelShape[0] * kernelShape[1], -0.2f, 0.2f); @@ -245,15 +245,15 @@ void EltwiseWithTwoConvsAsInputsTest::SetUp() { {kernelShape[0], kernelShape[1]}, {stride_h, stride}, {0, 0}, - { 0, 0 }, { 1, 1 }, ngraph::op::PadType::VALID, outputChannels, false, filterWeights1); + { 0, 0 }, { 1, 1 }, ov::op::PadType::VALID, outputChannels, false, filterWeights1); auto widthAfterReshape = (convInputShape[3] - kernelShape[1]) / stride + 1; std::vector outFormShapes = {1, outputChannels * widthAfterReshape }; - auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); - auto reshape2 = std::make_shared(conv1, reshapePattern2, false); + auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); + auto reshape2 = std::make_shared(conv1, reshapePattern2, false); - auto reshapePattern3 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); - auto reshape3 = std::make_shared(params[1], reshapePattern3, false); + auto reshapePattern3 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); + auto reshape3 = std::make_shared(params[1], reshapePattern3, false); auto filterWeights2 = ov::test::utils::generate_float_numbers(outputChannels * convInputShape[1] * kernelShape[0] * kernelShape[1], -0.2f, 0.2f); @@ -262,12 +262,12 @@ void EltwiseWithTwoConvsAsInputsTest::SetUp() { {kernelShape[0], kernelShape[1]}, {stride_h, stride}, {0, 0}, - { 0, 0 }, { 1, 1 }, ngraph::op::PadType::VALID, outputChannels, false, filterWeights2); + { 0, 0 }, { 1, 1 }, ov::op::PadType::VALID, outputChannels, false, filterWeights2); - auto reshapePattern4 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); - auto reshape4 = std::make_shared(conv2, reshapePattern4, false); + auto reshapePattern4 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); + auto reshape4 = std::make_shared(conv2, reshapePattern4, false); - auto add = std::make_shared(reshape2, reshape4); + auto add = std::make_shared(reshape2, reshape4); function = std::make_shared(add, params, "EltwiseWithTwoConvsAsInputsTest"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/eltwise_reshape_activation.cpp b/src/tests/functional/shared_test_classes/src/subgraph/eltwise_reshape_activation.cpp index 7e6e6f0147397f..9ee715f8409bab 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/eltwise_reshape_activation.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/eltwise_reshape_activation.cpp @@ -39,13 +39,13 @@ void EltwiseReshapeActivation::SetUp() { std::make_shared(ngPrc, ov::Shape(shapes[0]))}; auto eltw = ngraph::builder::makeEltwise(input[0], input[1], ngraph::helpers::EltwiseTypes::ADD); - auto reshape_pattern1 = std::make_shared(ngraph::element::i64, ngraph::Shape{shapes[1].size()}, shapes[1]); - auto reshape1 = std::make_shared(eltw, reshape_pattern1, false); + auto reshape_pattern1 = std::make_shared(ngraph::element::i64, ngraph::Shape{shapes[1].size()}, shapes[1]); + auto reshape1 = std::make_shared(eltw, reshape_pattern1, false); auto relu = ngraph::builder::makeActivation(reshape1, ngPrc, ngraph::helpers::ActivationTypes::Relu); - auto reshape_pattern2 = std::make_shared(ngraph::element::i64, ngraph::Shape{shapes[0].size()}, shapes[0]); - auto reshape2 = std::make_shared(relu, reshape_pattern2, false); + auto reshape_pattern2 = std::make_shared(ngraph::element::i64, ngraph::Shape{shapes[0].size()}, shapes[0]); + auto reshape2 = std::make_shared(relu, reshape_pattern2, false); function = std::make_shared(reshape2, input, "EltwiseReshapeActivation"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/fc_conv_fc.cpp b/src/tests/functional/shared_test_classes/src/subgraph/fc_conv_fc.cpp index a6b5491ea28197..d4ff136b32d262 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/fc_conv_fc.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/fc_conv_fc.cpp @@ -64,8 +64,8 @@ void FcAfterConvTest::SetUp() { ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(inputShape))}; std::vector convInputShape = {1, inputChannels, 1, inputShape[0] * inputShape[1] / inputChannels}; - auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); - auto reshape1 = std::make_shared(params[0], reshapePattern1, false); + auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); + auto reshape1 = std::make_shared(params[0], reshapePattern1, false); auto filterWeights = ov::test::utils::generate_float_numbers(outputChannels * convInputShape[1] * kernelShape[0] * kernelShape[1], -0.1f, 0.1f); @@ -74,13 +74,13 @@ void FcAfterConvTest::SetUp() { {kernelShape[0], kernelShape[1]}, {kernelShape[0] > 1 ? stride : 1, stride}, {0, 0}, - { 0, 0 }, { 1, 1 }, ngraph::op::PadType::VALID, outputChannels, false, filterWeights); + { 0, 0 }, { 1, 1 }, ov::op::PadType::VALID, outputChannels, false, filterWeights); auto widthAfterConv = (convInputShape[3] - kernelShape[1]) / stride + 1; std::vector outFormShapes = {1, outputChannels * widthAfterConv }; - auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); - auto reshape2 = std::make_shared(conv, reshapePattern2, false); - auto relu1 = std::make_shared(reshape2); + auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); + auto reshape2 = std::make_shared(conv, reshapePattern2, false); + auto relu1 = std::make_shared(reshape2); std::vector fc3_weights = ov::test::utils::generate_float_numbers(outFormShapes[1] * outFormShapes[1], -0.1f, 0.1f); auto fc3 = ngraph::builder::makeFullyConnected(relu1, ngPrc, outFormShapes[1], false, {}, fc3_weights); @@ -154,8 +154,8 @@ void FcBeforeConvTest::SetUp() { auto fc2 = ngraph::builder::makeFullyConnected(fc1, ngPrc, inputShape[1], false, {}, fc2_weights); std::vector convInputShape = {1, inputChannels, 1, inputShape[0] * inputShape[1] / inputChannels}; - auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); - auto reshape1 = std::make_shared(fc2, reshapePattern1, false); + auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); + auto reshape1 = std::make_shared(fc2, reshapePattern1, false); auto filterWeights = ov::test::utils::generate_float_numbers(outputChannels * convInputShape[1] * kernelShape[0] * kernelShape[1], -0.1f, 0.1f); @@ -164,12 +164,12 @@ void FcBeforeConvTest::SetUp() { {kernelShape[0], kernelShape[1]}, {kernelShape[0] > 1 ? stride : 1, stride}, {0, 0}, - { 0, 0 }, { 1, 1 }, ngraph::op::PadType::VALID, outputChannels, false, filterWeights); + { 0, 0 }, { 1, 1 }, ov::op::PadType::VALID, outputChannels, false, filterWeights); auto widthAfterConv = (convInputShape[3] - kernelShape[1]) / stride + 1; std::vector outFormShapes = {1, outputChannels * widthAfterConv }; - auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); - auto reshape2 = std::make_shared(conv, reshapePattern2, false); + auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); + auto reshape2 = std::make_shared(conv, reshapePattern2, false); function = std::make_shared(reshape2, params, "FcBeforeConvTest"); } @@ -231,8 +231,8 @@ void FcBetweenConvsTest::SetUp() { ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(inputShape))}; std::vector conv1InputShape = {1, inputChannels, 1, inputShape[0] * inputShape[1] / inputChannels}; - auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, conv1InputShape); - auto reshape1 = std::make_shared(params[0], reshapePattern1, false); + auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, conv1InputShape); + auto reshape1 = std::make_shared(params[0], reshapePattern1, false); auto filter1Weights = ov::test::utils::generate_float_numbers(outputChannels * conv1InputShape[1] * kernelShape[0] * kernelShape[1], -0.2f, 0.2f); @@ -241,30 +241,30 @@ void FcBetweenConvsTest::SetUp() { {kernelShape[0], kernelShape[1]}, {kernelShape[0] > 1 ? stride : 1, stride}, {0, 0}, - { 0, 0 }, { 1, 1 }, ngraph::op::PadType::VALID, outputChannels, false, filter1Weights); + { 0, 0 }, { 1, 1 }, ov::op::PadType::VALID, outputChannels, false, filter1Weights); auto widthAfterConv1 = (conv1InputShape[3] - kernelShape[1]) / stride + 1; std::vector outFormShapes1 = {1, outputChannels * widthAfterConv1 }; - auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes1); - auto reshape2 = std::make_shared(conv1, reshapePattern2, false); - auto relu = std::make_shared(reshape2); + auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes1); + auto reshape2 = std::make_shared(conv1, reshapePattern2, false); + auto relu = std::make_shared(reshape2); auto fc_weights = ov::test::utils::generate_float_numbers(outFormShapes1[1] * outFormShapes1[1], -0.2f, 0.2f); auto fc = ngraph::builder::makeFullyConnected(relu, ngPrc, outFormShapes1[1], false, {}, fc_weights); std::vector conv2InputShape = {1, outputChannels, 1, widthAfterConv1}; - auto reshapePattern3 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, conv2InputShape); - auto reshape3 = std::make_shared(fc, reshapePattern3, false); + auto reshapePattern3 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, conv2InputShape); + auto reshape3 = std::make_shared(fc, reshapePattern3, false); auto filter2Weights = ov::test::utils::generate_float_numbers(outputChannels * conv2InputShape[1], -0.2f, 0.2f); auto conv2 = ngraph::builder::makeConvolution(reshape3, ngPrc, { 1, 1 }, { 1, 1 }, { 0, 0 }, - { 0, 0 }, { 1, 1 }, ngraph::op::PadType::VALID, outputChannels, false, filter2Weights); + { 0, 0 }, { 1, 1 }, ov::op::PadType::VALID, outputChannels, false, filter2Weights); std::vector outFormShapes2 = {1, outputChannels * conv2InputShape[3]}; - auto reshapePattern4 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes2); - auto reshape4 = std::make_shared(conv2, reshapePattern4, false); + auto reshapePattern4 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes2); + auto reshape4 = std::make_shared(conv2, reshapePattern4, false); function = std::make_shared(reshape4, params, "FcBetweenConvsTest"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/first_connect_input_concat.cpp b/src/tests/functional/shared_test_classes/src/subgraph/first_connect_input_concat.cpp index a819f39d3410fe..6416f82ce5f8fe 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/first_connect_input_concat.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/first_connect_input_concat.cpp @@ -35,10 +35,10 @@ void ConcatFirstInputTest::SetUp() { params.push_back(std::make_shared(ngPrc, ov::Shape(shape))); } auto const_second_param = ngraph::builder::makeConstant(ngPrc, {1, 8}, std::vector{-1.0f}); - auto concat = std::make_shared(ngraph::OutputVector{params[0], const_second_param}, 1); - auto relu = std::make_shared(concat); + auto concat = std::make_shared(ngraph::OutputVector{params[0], const_second_param}, 1); + auto relu = std::make_shared(concat); - ngraph::ResultVector results{std::make_shared(relu)}; + ngraph::ResultVector results{std::make_shared(relu)}; function = std::make_shared(results, params, "ConcatMultiInput"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/fq_conv_fq_affine.cpp b/src/tests/functional/shared_test_classes/src/subgraph/fq_conv_fq_affine.cpp index ac480f6b567c4a..082e7b382914c6 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/fq_conv_fq_affine.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/fq_conv_fq_affine.cpp @@ -78,26 +78,26 @@ void FqConvFqAffineTest::SetUp() { auto inputFQNode = ngraph::builder::makeFakeQuantize(params[0], ngraph::element::f32, levels[0], std::vector{}, { inputDataMin }, { inputDataMax }, { inputDataMin }, { inputDataMax }); - auto inputFQ = std::dynamic_pointer_cast(inputFQNode); + auto inputFQ = std::dynamic_pointer_cast(inputFQNode); std::vector convInputShape = {1, inputChannels, 1, inputShape[0] * inputShape[1] / inputChannels}; - auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); - auto reshape1 = std::make_shared(inputFQ, reshapePattern1, false); + auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); + auto reshape1 = std::make_shared(inputFQ, reshapePattern1, false); auto filterWeightsNode = ngraph::builder::makeConstant(ngPrc, {outputChannels, inputChannels, kernelShape[0], kernelShape[1]}, { 1.0f }); auto convLowNode = ngraph::builder::makeConstant(ngraph::element::f32, std::vector{ 1 }, std::vector{inputDataMin}); auto convHighNode = ngraph::builder::makeConstant(ngraph::element::f32, std::vector{ 1 }, std::vector{inputDataMax}); - auto convWeightsFQNode = std::make_shared(filterWeightsNode, + auto convWeightsFQNode = std::make_shared(filterWeightsNode, convLowNode, convHighNode, convLowNode, convHighNode, levels[1]); - auto convWeightsFQ = std::dynamic_pointer_cast(convWeightsFQNode); + auto convWeightsFQ = std::dynamic_pointer_cast(convWeightsFQNode); - auto conv = std::make_shared(reshape1, convWeightsFQ, strides, std::vector{ 0, 0 }, + auto conv = std::make_shared(reshape1, convWeightsFQ, strides, std::vector{ 0, 0 }, std::vector{ 0, 0 }, std::vector{ 1, 1 }, - ngraph::op::PadType::VALID); + ov::op::PadType::VALID); auto biasesWeightsNode = ngraph::builder::makeConstant(ngPrc, {}, std::vector{ 0.0f }); - auto add = std::make_shared(conv, biasesWeightsNode); + auto add = std::make_shared(conv, biasesWeightsNode); auto widthAfterConv = (convInputShape[3] - kernelShape[1]) / strides[1] + 1; auto heightAfterConv = (convInputShape[2] - kernelShape[0]) / strides[0] + 1; @@ -105,26 +105,26 @@ void FqConvFqAffineTest::SetUp() { ngraph::Output nodeBeforeReshape; if (permute) { - auto permuteOrder = std::make_shared(ngraph::element::i64, + auto permuteOrder = std::make_shared(ngraph::element::i64, ngraph::Shape{4}, ngraph::Shape{{0, 3, 2, 1}}); - auto transpose = std::make_shared(add, permuteOrder); + auto transpose = std::make_shared(add, permuteOrder); nodeBeforeReshape = transpose; } else { nodeBeforeReshape = add; } - auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); - auto reshape2 = std::make_shared(nodeBeforeReshape, reshapePattern2, false); + auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); + auto reshape2 = std::make_shared(nodeBeforeReshape, reshapePattern2, false); auto matMulWeightsNode = ngraph::builder::makeConstant(ngPrc, {outFormShapes[1], outFormShapes[1]}, { 1.0f }); auto matMulLowNode = ngraph::builder::makeConstant(ngraph::element::f32, std::vector{ 1 }, std::vector{inputDataMin}); auto matMulHighNode = ngraph::builder::makeConstant(ngraph::element::f32, std::vector{ 1 }, std::vector{inputDataMax}); - auto matMulWeightsFQNode = std::make_shared(matMulWeightsNode, + auto matMulWeightsFQNode = std::make_shared(matMulWeightsNode, matMulLowNode, matMulHighNode, matMulLowNode, matMulHighNode, levels[1]); - auto matMulWeightsFQ = std::dynamic_pointer_cast(matMulWeightsFQNode); + auto matMulWeightsFQ = std::dynamic_pointer_cast(matMulWeightsFQNode); - auto matmul = std::make_shared(reshape2, matMulWeightsFQ, false, true); + auto matmul = std::make_shared(reshape2, matMulWeightsFQ, false, true); function = std::make_shared(matmul, params, "fqConvfqAffine"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/fq_with_mixed_levels.cpp b/src/tests/functional/shared_test_classes/src/subgraph/fq_with_mixed_levels.cpp index 8599967c535104..02ea5d6fe24d66 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/fq_with_mixed_levels.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/fq_with_mixed_levels.cpp @@ -35,10 +35,10 @@ void FqWithMixedLevelsTest::SetUp() { size_t level1, const std::vector>& data1, size_t level2, const std::vector>& data2, size_t level3, const std::vector>& data3) { - auto sigmoid = std::make_shared(input); + auto sigmoid = std::make_shared(input); auto fake1 = ngraph::builder::makeFakeQuantize(sigmoid, ngPrc, level1, { 1 }, data1[0], data1[1], data1[2], data1[3]); std::vector weights = ov::test::utils::generate_float_numbers(shapes[1][0] * shapes[1][1], weights_min, weights_max); - auto constant = std::make_shared(ngPrc, ngraph::Shape{shapes[1][0], shapes[1][1]}, weights); + auto constant = std::make_shared(ngPrc, ngraph::Shape{shapes[1][0], shapes[1][1]}, weights); auto fake2 = ngraph::builder::makeFakeQuantize(constant, ngPrc, level2, { 1 }, data2[0], data2[1], data2[2], data2[3]); auto matmul = std::make_shared(fake1, fake2, false, true); auto bias = ngraph::builder::makeConstant(ngPrc, std::vector{shapes[0][0], shapes[1][0]}, std::vector{ 1.0 }); @@ -67,7 +67,7 @@ void FqWithMixedLevelsTest::SetUp() { std::numeric_limits::max(), {{ -1.0 }, { 1.0 }, { -1.0 }, { 1.0 }}, std::numeric_limits::max(), {{ -2.5 }, { 2.5 }, { -2.5 }, { 2.5 }}, std::numeric_limits::max(), {{ -5. } , { 5. }, { -5. }, { 5. }}); - auto result = std::make_shared(input); + auto result = std::make_shared(input); function = std::make_shared(ngraph::ResultVector{result}, params, "FqWithMixedLevelsTest"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/handling_orientation_conv.cpp b/src/tests/functional/shared_test_classes/src/subgraph/handling_orientation_conv.cpp index a8d791fe931ed4..0d3ecb60be5025 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/handling_orientation_conv.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/handling_orientation_conv.cpp @@ -26,32 +26,32 @@ namespace SubgraphTestsDefinitions { std::make_shared(ngPrc, ov::Shape{1, 336})}; std::vector outFormShapes1 = { 1, 1, 168, 2 }; std::vector outFormShapes2 = { 1, 336, 1, 1 }; - auto pattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, outFormShapes1); - auto reshape1 = std::make_shared(params[0], pattern1, false); + auto pattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, outFormShapes1); + auto reshape1 = std::make_shared(params[0], pattern1, false); - auto pattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, outFormShapes2); - auto reshape2 = std::make_shared(params[1], pattern2, false); + auto pattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, outFormShapes2); + auto reshape2 = std::make_shared(params[1], pattern2, false); - auto permute1 = std::make_shared(reshape1, - ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{ 4 }, { 0, 3, 1, 2 })); + auto permute1 = std::make_shared(reshape1, + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{ 4 }, { 0, 3, 1, 2 })); auto conv1 = ngraph::builder::makeConvolution(permute1, ngPrc, { 1, 8 }, { 1, 1 }, { 0, 0 }, { 0, 0 }, { 1, 1 }, - ngraph::op::PadType::VALID, 12); + ov::op::PadType::VALID, 12); - auto permute2 = std::make_shared(conv1, - ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{ 4 }, { 0, 2, 3, 1 })); + auto permute2 = std::make_shared(conv1, + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{ 4 }, { 0, 2, 3, 1 })); auto conv2 = ngraph::builder::makeConvolution(reshape2, ngPrc, { 1, 1 }, { 1, 1 }, { 0, 0 }, { 0, 0 }, { 1, 1 }, - ngraph::op::PadType::VALID, 336); + ov::op::PadType::VALID, 336); std::vector outFormShapes3 = { 1, 1932 }; std::vector outFormShapes4 = { 1, 336 }; - auto pattern3 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes3); - auto pattern4 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes4); - auto reshape3 = std::make_shared(permute2, pattern3, false); - auto reshape4 = std::make_shared(conv2, pattern4, false); - ngraph::ResultVector results{ std::make_shared(reshape3), - std::make_shared(reshape4)}; + auto pattern3 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes3); + auto pattern4 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes4); + auto reshape3 = std::make_shared(permute2, pattern3, false); + auto reshape4 = std::make_shared(conv2, pattern4, false); + ngraph::ResultVector results{ std::make_shared(reshape3), + std::make_shared(reshape4)}; function = std::make_shared(results, params, "RemovePermutationPass"); } } // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/input_conv.cpp b/src/tests/functional/shared_test_classes/src/subgraph/input_conv.cpp index 6ec9cb30bc7791..58381184679c25 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/input_conv.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/input_conv.cpp @@ -86,7 +86,7 @@ void InputConvTest::SetUp() { {0, 0}, {0, 0}, {1, 1}, - ngraph::op::PadType::VALID, + ov::op::PadType::VALID, outputChannels, true, generateWeights(outputChannels, kernelShape[1])); @@ -94,13 +94,13 @@ void InputConvTest::SetUp() { if (addReshape) { size_t numOutputWidth = (((inputShape[1] * inputShape[2] * inputShape[3] - kernelShape[1] * kernelShape[0]) / (inputShape[1] * stride)) + 1); std::vector outFormShapes0 = { 1, outputChannels * numOutputWidth }; - auto pattern0 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes0); - auto reshape0 = std::make_shared(conv0, pattern0, false); + auto pattern0 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes0); + auto reshape0 = std::make_shared(conv0, pattern0, false); - ngraph::ResultVector results{ std::make_shared(reshape0) }; + ngraph::ResultVector results{ std::make_shared(reshape0) }; function = std::make_shared(results, params, "InputConvTest"); } else { - ngraph::ResultVector results{ std::make_shared(conv0) }; + ngraph::ResultVector results{ std::make_shared(conv0) }; function = std::make_shared(results, params, "InputConvTest"); } } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/input_split_concat.cpp b/src/tests/functional/shared_test_classes/src/subgraph/input_split_concat.cpp index 08202b7a982d95..ffc2dc218da338 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/input_split_concat.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/input_split_concat.cpp @@ -36,14 +36,14 @@ void InputSplitConcatTest::SetUp() { auto split_axis_op = std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{1}); auto split = std::make_shared(params[0], split_axis_op, 2); - auto relu1 = std::make_shared(split->output(0)); + auto relu1 = std::make_shared(split->output(0)); auto const_vals = ov::test::utils::generate_float_numbers(inputShape[1], -5.0f, 5.0f); auto constant = ngraph::builder::makeConstant(ngPrc, inputShape, const_vals); - auto concat = std::make_shared(ngraph::OutputVector{constant, split->output(1)}, 1); - auto relu2 = std::make_shared(concat); + auto concat = std::make_shared(ngraph::OutputVector{constant, split->output(1)}, 1); + auto relu2 = std::make_shared(concat); - ngraph::ResultVector results{ std::make_shared(relu1), std::make_shared(relu2) }; + ngraph::ResultVector results{ std::make_shared(relu1), std::make_shared(relu2) }; function = std::make_shared(results, params, "InputSplitConcatTest"); } } // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/matmul_act_add.cpp b/src/tests/functional/shared_test_classes/src/subgraph/matmul_act_add.cpp index 4b33f924169e59..adc4de27a10e49 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/matmul_act_add.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/matmul_act_add.cpp @@ -37,11 +37,11 @@ void MatMulActAddTest::SetUp() { auto mul_const = ngraph::builder::makeConstant(ngPrc, { outFormShapes[1], inputSize }, ov::test::utils::generate_float_numbers(outFormShapes[1] * inputSize, -0.5f, 0.5f), false); - auto matmul = std::make_shared(params[0], mul_const, false, true); + auto matmul = std::make_shared(params[0], mul_const, false, true); - auto tanh = std::make_shared(matmul); - auto eltw = std::make_shared(matmul, tanh); - auto res = std::make_shared(eltw); + auto tanh = std::make_shared(matmul); + auto eltw = std::make_shared(matmul, tanh); + auto res = std::make_shared(eltw); function = std::make_shared(res, params, "MatMul_Act_Add"); } } // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/matmul_squeeze_add.cpp b/src/tests/functional/shared_test_classes/src/subgraph/matmul_squeeze_add.cpp index 01b628d63cf8fd..098d33a76226df 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/matmul_squeeze_add.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/matmul_squeeze_add.cpp @@ -45,24 +45,24 @@ void MatmulSqueezeAddTest::SetUp() { {outputSize, inputShape[1]}, ov::test::utils::generate_float_numbers(outputSize * inputShape[1], 0, 1, seed), false); - auto matmul_0 = std::make_shared(params[0], constant_0, false, true); + auto matmul_0 = std::make_shared(params[0], constant_0, false, true); auto constant_1 = - std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{1}, std::vector{0}); - auto unsqueeze_0 = std::make_shared(matmul_0, constant_1); + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{1}, std::vector{0}); + auto unsqueeze_0 = std::make_shared(matmul_0, constant_1); auto constant_2 = ngraph::builder::makeConstant( element_type, {1, inputShape[0], outputSize}, ov::test::utils::generate_float_numbers(inputShape[0] * outputSize, 0, 1, seed), false); - auto add_0 = std::make_shared(unsqueeze_0, constant_2); + auto add_0 = std::make_shared(unsqueeze_0, constant_2); auto constant_3 = - std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{1}, std::vector{0}); - auto squeeze_0 = std::make_shared(add_0, constant_3); + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{1}, std::vector{0}); + auto squeeze_0 = std::make_shared(add_0, constant_3); - ngraph::ResultVector results{std::make_shared(squeeze_0)}; + ngraph::ResultVector results{std::make_shared(squeeze_0)}; function = std::make_shared(results, params, "MatmulSqueezeAddTest"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/memory_LSTMCell.cpp b/src/tests/functional/shared_test_classes/src/subgraph/memory_LSTMCell.cpp index 65d50b50aaaab3..28a8880fd4dfff 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/memory_LSTMCell.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/memory_LSTMCell.cpp @@ -13,7 +13,6 @@ #include "ov_models/builders.hpp" using namespace ngraph; -using namespace opset7; namespace ov { namespace test { diff --git a/src/tests/functional/shared_test_classes/src/subgraph/memory_eltwise_reshape_concat.cpp b/src/tests/functional/shared_test_classes/src/subgraph/memory_eltwise_reshape_concat.cpp index 314fc9bac749d1..bd2a0177ef20e8 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/memory_eltwise_reshape_concat.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/memory_eltwise_reshape_concat.cpp @@ -55,18 +55,18 @@ void MemoryEltwiseReshapeConcatTest::initTestModel() { auto memory_constant = ngraph::builder::makeConstant(ngPrc, input_dims, memory_init); memory_constant->set_friendly_name("memory_constant"); - auto memory_read = std::make_shared(memory_constant, "memory"); + auto memory_read = std::make_shared(memory_constant, "memory"); memory_read->set_friendly_name("memory_read"); auto mul = ngraph::builder::makeEltwise(input_parameter[0], memory_read, ngraph::helpers::EltwiseTypes::MULTIPLY); mul->set_friendly_name("multiplication"); - auto memory_write = std::make_shared(mul, "memory"); + auto memory_write = std::make_shared(mul, "memory"); memory_write->set_friendly_name("memory_write"); - auto reshape_1_pattern = std::make_shared(ngraph::element::i64, ngraph::Shape{2}, std::vector({inputSize, concatSize})); + auto reshape_1_pattern = std::make_shared(ngraph::element::i64, ngraph::Shape{2}, std::vector({inputSize, concatSize})); reshape_1_pattern->set_friendly_name("reshape_pattern"); - auto reshape_1 = std::make_shared(mul, reshape_1_pattern, false); + auto reshape_1 = std::make_shared(mul, reshape_1_pattern, false); reshape_1->set_friendly_name("reshape"); auto concat_constant = ngraph::builder::makeConstant(ngPrc, {1, concatSize}, concat_vals); @@ -77,9 +77,9 @@ void MemoryEltwiseReshapeConcatTest::initTestModel() { memory_write->add_control_dependency(memory_read); concat->add_control_dependency(memory_write); - auto final_reshape_pattern = std::make_shared(ngraph::element::i64, ngraph::Shape{4}, + auto final_reshape_pattern = std::make_shared(ngraph::element::i64, ngraph::Shape{4}, std::vector({1, 1, inputSize + 1, concatSize})); - auto final_reshape = std::make_shared(concat, final_reshape_pattern, false); + auto final_reshape = std::make_shared(concat, final_reshape_pattern, false); function = std::make_shared(final_reshape, input_parameter, "memory_multiply_reshape_concat"); } @@ -94,14 +94,14 @@ void MemoryEltwiseReshapeConcatTest::initNgraphFriendlyModel() { auto mul = ngraph::builder::makeEltwise(input_parameter[0], memory_constant, ngraph::helpers::EltwiseTypes::MULTIPLY); mul->set_friendly_name("multiplication"); - auto reshape_pattern = std::make_shared(ngraph::element::i64, ngraph::Shape{3}, std::vector({1, inputSize, concatSize})); + auto reshape_pattern = std::make_shared(ngraph::element::i64, ngraph::Shape{3}, std::vector({1, inputSize, concatSize})); reshape_pattern->set_friendly_name("reshape_pattern"); - auto reshape = std::make_shared(mul, reshape_pattern, false); + auto reshape = std::make_shared(mul, reshape_pattern, false); reshape->set_friendly_name("reshape"); - auto squeeze_const = std::make_shared(ngraph::element::i64, ngraph::Shape{1}, 0); + auto squeeze_const = std::make_shared(ngraph::element::i64, ngraph::Shape{1}, 0); squeeze_const->set_friendly_name("squeeze_const"); - auto squeeze = std::make_shared(reshape, squeeze_const); + auto squeeze = std::make_shared(reshape, squeeze_const); squeeze->set_friendly_name("squeeze"); auto concat_constant = ngraph::builder::makeConstant(ngPrc, {1, concatSize}, concat_vals); diff --git a/src/tests/functional/shared_test_classes/src/subgraph/memory_fq_concat_prelu.cpp b/src/tests/functional/shared_test_classes/src/subgraph/memory_fq_concat_prelu.cpp index 4382efcd8491c6..f23a1fca68166c 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/memory_fq_concat_prelu.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/memory_fq_concat_prelu.cpp @@ -98,7 +98,7 @@ void MemoryFqConcatPrelu::SetUp() { input.push_back(std::make_shared(ngPrc, ov::Shape(shape))); } auto memory_read = ngraph::builder::makeConstant(ngPrc, {inputs[0]}, {0}); - auto read = std::make_shared(memory_read, "variable1"); + auto read = std::make_shared(memory_read, "variable1"); auto fake_constatnt = ngraph::builder::makeConstant(ngPrc, {inputs[0]}, {0}); auto fake = ngraph::builder::makeFakeQuantize(fake_constatnt, ngPrc, std::get<0>(fake_quantize_params), @@ -108,8 +108,8 @@ void MemoryFqConcatPrelu::SetUp() { std::get<4>(fake_quantize_params), std::get<5>(fake_quantize_params)); auto concat = std::make_shared(ov::OutputVector{read, fake, input[0]}, 1); - auto prelu_constant = ngraph::op::Constant::create(ngPrc, {1}, {-2}); - auto prelu = std::make_shared(concat, prelu_constant); + auto prelu_constant = ov::op::v0::Constant::create(ngPrc, {1}, {-2}); + auto prelu = std::make_shared(concat, prelu_constant); auto begin = std::get<0>(strided_slice_params); auto end = std::get<1>(strided_slice_params); @@ -130,8 +130,8 @@ void MemoryFqConcatPrelu::SetUp() { std::vector{}, std::vector{}); - auto assign = std::make_shared(slice, "variable1"); - auto result = std::make_shared(prelu); + auto assign = std::make_shared(slice, "variable1"); + auto result = std::make_shared(prelu); assign->add_control_dependency(read); result->add_control_dependency(assign); function = std::make_shared(ngraph::ResultVector{result}, input, "memory_fq_concat_prelu"); diff --git a/src/tests/functional/shared_test_classes/src/subgraph/multi_crops_to_concat.cpp b/src/tests/functional/shared_test_classes/src/subgraph/multi_crops_to_concat.cpp index e77ec44724b992..05daf1dfcf54ab 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/multi_crops_to_concat.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/multi_crops_to_concat.cpp @@ -68,7 +68,7 @@ void MultiCropsToConcatTest::SetUp() { std::vector{ 0, 0 }, std::vector{ 0, 0 }); - auto concat1 = std::make_shared(ngraph::OutputVector{crop1, crop2}, 1); + auto concat1 = std::make_shared(ngraph::OutputVector{crop1, crop2}, 1); std::shared_ptr result; // Case with 3 crops @@ -87,10 +87,10 @@ void MultiCropsToConcatTest::SetUp() { std::vector{ 0, 0 }, std::vector{ 0, 0 }); - auto concat2 = std::make_shared(ngraph::OutputVector{crop1, crop2}, 1); - result = std::make_shared(concat2); + auto concat2 = std::make_shared(ngraph::OutputVector{crop1, crop2}, 1); + result = std::make_shared(concat2); } else { - result = std::make_shared(concat1); + result = std::make_shared(concat1); } function = std::make_shared(result, params, "InputSplitConcatTest"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/multi_input_scale.cpp b/src/tests/functional/shared_test_classes/src/subgraph/multi_input_scale.cpp index e66bb94703f230..094f487ca738b9 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/multi_input_scale.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/multi_input_scale.cpp @@ -46,7 +46,7 @@ void MultipleInputScaleTest::SetUp() { auto add = ngraph::builder::makeEltwise(fc1, fc2, ngraph::helpers::EltwiseTypes::ADD); - auto result = std::make_shared(add); + auto result = std::make_shared(add); function = std::make_shared(result, input, "multiple_input_scale"); functionRefs = ngraph::clone_function(*function); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/multioutput_eltwise_squeeze_eltwise.cpp b/src/tests/functional/shared_test_classes/src/subgraph/multioutput_eltwise_squeeze_eltwise.cpp index 3404f0a4597d22..0e6c6fd03e6037 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/multioutput_eltwise_squeeze_eltwise.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/multioutput_eltwise_squeeze_eltwise.cpp @@ -36,7 +36,7 @@ namespace SubgraphTestsDefinitions { auto eltwise_const = ngraph::builder::makeConstant(ngPrc, ngraph::Shape{input[0]->get_shape()}, std::vector{-1.0f}); - auto eltwise = std::make_shared(input[0], eltwise_const); + auto eltwise = std::make_shared(input[0], eltwise_const); auto squeeze_constant = std::make_shared(ov::element::i64, ov::Shape{1}, std::vector{0}); auto squeeze = std::make_shared(eltwise, squeeze_constant); @@ -45,10 +45,10 @@ namespace SubgraphTestsDefinitions { auto eltwise_const2 = ngraph::builder::makeConstant(ngPrc, ngraph::Shape{1}, std::vector{1.01f}); auto eltwise_const3 = ngraph::builder::makeConstant(ngPrc, ngraph::Shape{1}, std::vector{1.01f}); - auto eltwise2 = std::make_shared(eltwise, eltwise_const2); - auto eltwise3 = std::make_shared(unsqueeze, eltwise_const3); - ngraph::ResultVector results{std::make_shared(eltwise2), - std::make_shared(eltwise3)}; + auto eltwise2 = std::make_shared(eltwise, eltwise_const2); + auto eltwise3 = std::make_shared(unsqueeze, eltwise_const3); + ngraph::ResultVector results{std::make_shared(eltwise2), + std::make_shared(eltwise3)}; function = std::make_shared(results, input, "eltwise_reshape_eltwise_multioutput"); } } // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/multiple_connect_split_concat.cpp b/src/tests/functional/shared_test_classes/src/subgraph/multiple_connect_split_concat.cpp index d416ee03fdc5aa..01794994ce83d2 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/multiple_connect_split_concat.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/multiple_connect_split_concat.cpp @@ -26,18 +26,18 @@ void MultipleConnectSplitConcatTest::SetUp() { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape{1, 256})}; - auto relu_start = std::make_shared(params[0]); + auto relu_start = std::make_shared(params[0]); auto split_axis_op = std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{1}); auto split = std::make_shared(relu_start, split_axis_op, 1); - auto concat = std::make_shared(ngraph::OutputVector{split->output(0), split->output(0)}, 1); - auto concat_2 = std::make_shared(ngraph::OutputVector{split->output(0), split->output(0)}, + auto concat = std::make_shared(ngraph::OutputVector{split->output(0), split->output(0)}, 1); + auto concat_2 = std::make_shared(ngraph::OutputVector{split->output(0), split->output(0)}, 1); - auto relu = std::make_shared(concat); - auto relu_2 = std::make_shared(concat_2); + auto relu = std::make_shared(concat); + auto relu_2 = std::make_shared(concat_2); ngraph::ResultVector resultVector{ - std::make_shared(relu), - std::make_shared(relu_2) + std::make_shared(relu), + std::make_shared(relu_2) }; function = std::make_shared(resultVector, params, "Multiple_connection_split_concat"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/multiple_input_fq.cpp b/src/tests/functional/shared_test_classes/src/subgraph/multiple_input_fq.cpp index cf1d06993e46f0..cfee87e744a466 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/multiple_input_fq.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/multiple_input_fq.cpp @@ -58,7 +58,7 @@ void MultipleInputTest::SetUp() { auto fake_add4 = ngraph::builder::makeFakeQuantize(add4, ngPrc, std::numeric_limits::max(), { 1 }, { 5 * minInput }, { 5 * maxInput }, { 5 * minInput }, { 5 * maxInput }); - auto result = std::make_shared(fake_add4); + auto result = std::make_shared(fake_add4); function = std::make_shared(ngraph::ResultVector{result}, input, "multiple_input"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/negative_memory_layer_offset.cpp b/src/tests/functional/shared_test_classes/src/subgraph/negative_memory_layer_offset.cpp index bc276d28dbd5d3..7ed5349556274c 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/negative_memory_layer_offset.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/negative_memory_layer_offset.cpp @@ -36,14 +36,14 @@ namespace SubgraphTestsDefinitions { memory_init.emplace_back(static_cast(dist(gen))); ov::ParameterVector input{std::make_shared(ngPrc, ov::Shape{1, inputSize})}; - auto mem_c = std::make_shared(ngPrc, ngraph::Shape{ 1, hiddenSize }, memory_init); - auto mem_r = std::make_shared(mem_c, "memory"); + auto mem_c = std::make_shared(ngPrc, ngraph::Shape{ 1, hiddenSize }, memory_init); + auto mem_r = std::make_shared(mem_c, "memory"); // Use memory layer as the second input of 'concat' to get negative offset - auto concat = std::make_shared(ngraph::OutputVector{ input[0], mem_r }, 1); + auto concat = std::make_shared(ngraph::OutputVector{ input[0], mem_r }, 1); auto split = ngraph::builder::makeVariadicSplit(concat, { hiddenSize, inputSize }, 1); - auto mem_w = std::make_shared(split->output(0), "memory"); - auto sigm = std::make_shared(split->output(1)); + auto mem_w = std::make_shared(split->output(0), "memory"); + auto sigm = std::make_shared(split->output(1)); mem_w->add_control_dependency(mem_r); sigm->add_control_dependency(mem_w); @@ -58,10 +58,10 @@ namespace SubgraphTestsDefinitions { std::tie(netPrecision, targetDevice, inputSize, hiddenSize, std::ignore) = this->GetParam(); auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector input{std::make_shared(ngPrc, ov::Shape{1, inputSize})}; - auto mem_c = std::make_shared(ngPrc, ngraph::Shape{ 1, hiddenSize }, memory_init); - auto concat = std::make_shared(ngraph::OutputVector{ input[0], mem_c }, 1); + auto mem_c = std::make_shared(ngPrc, ngraph::Shape{ 1, hiddenSize }, memory_init); + auto concat = std::make_shared(ngraph::OutputVector{ input[0], mem_c }, 1); auto split = ngraph::builder::makeVariadicSplit(concat, { hiddenSize, inputSize }, 1); - auto sigm = std::make_shared(split->output(1)); + auto sigm = std::make_shared(split->output(1)); function = std::make_shared(sigm, input, "negative_memory_layer_offset_nonmemory"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/parameter_reshape_result.cpp b/src/tests/functional/shared_test_classes/src/subgraph/parameter_reshape_result.cpp index ce18ea90f050c9..7504596faf75a4 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/parameter_reshape_result.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/parameter_reshape_result.cpp @@ -35,9 +35,9 @@ void ParamReshapeResult::SetUp() { auto shape = inputShape; shape[shape.size() - 2] *= 2; shape[shape.size() - 1] /= 2; - auto reshape_const = std::make_shared(ngraph::element::Type_t::i64, + auto reshape_const = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{shape.size()}, shape); - auto reshape = std::make_shared(params[0], reshape_const, false); + auto reshape = std::make_shared(params[0], reshape_const, false); function = std::make_shared(reshape, params, "ParamReshapeResult"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/parameter_result.cpp b/src/tests/functional/shared_test_classes/src/subgraph/parameter_result.cpp index e11bc877cd4605..1bf29f54c76b1a 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/parameter_result.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/parameter_result.cpp @@ -23,8 +23,8 @@ std::string ParameterResultSubgraphTestBase::getTestCaseName(const testing::Test } std::shared_ptr ParameterResultSubgraphTestBase::createModel(const ov::PartialShape& shape) { - auto parameter = std::make_shared(ov::element::f32, shape); - const ngraph::ResultVector results{std::make_shared(parameter)}; + auto parameter = std::make_shared(ov::element::f32, shape); + const ngraph::ResultVector results{std::make_shared(parameter)}; ngraph::ParameterVector params = {parameter}; auto model = std::make_shared(results, params, "ParameterResult"); return model; diff --git a/src/tests/functional/shared_test_classes/src/subgraph/parameter_shapeof_result.cpp b/src/tests/functional/shared_test_classes/src/subgraph/parameter_shapeof_result.cpp index 3a4b591b4f447b..c68222efcf659b 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/parameter_shapeof_result.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/parameter_shapeof_result.cpp @@ -24,9 +24,9 @@ void ParameterShapeOfResultSubgraphTest::SetUp() { std::tie(inType, targetDevice) = this->GetParam(); inPrc = InferenceEngine::details::convertPrecision(inType); - const auto parameter = std::make_shared(inType, ngraph::Shape{1, 3, 10, 10}); - const auto shapeOf = std::make_shared(parameter); - const ngraph::ResultVector results{std::make_shared(shapeOf)}; + const auto parameter = std::make_shared(inType, ngraph::Shape{1, 3, 10, 10}); + const auto shapeOf = std::make_shared(parameter); + const ngraph::ResultVector results{std::make_shared(shapeOf)}; ngraph::ParameterVector params = {parameter}; function = std::make_shared(results, params, "ParameterShapeOfResult"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/permute_concat_concat_permute.cpp b/src/tests/functional/shared_test_classes/src/subgraph/permute_concat_concat_permute.cpp index 8f0c34ec088df8..1c2f28ea17618b 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/permute_concat_concat_permute.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/permute_concat_concat_permute.cpp @@ -33,19 +33,19 @@ void PermuteConcatConcatPermute::SetUp() { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(net_precision); - auto input_param = std::make_shared(ngPrc, ngraph::Shape{input_shape}); + auto input_param = std::make_shared(ngPrc, ngraph::Shape{input_shape}); std::vector permute_param = {1, 0}; auto permute_params = - ngraph::opset9::Constant::create(ngraph::element::i64, ngraph::Shape{permute_param.size()}, permute_param); - auto permute_1 = std::make_shared(input_param, permute_params); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{permute_param.size()}, permute_param); + auto permute_1 = std::make_shared(input_param, permute_params); auto const_input_1 = CreateConst(input_shape, ngPrc, false); - auto concat_1 = std::make_shared(ngraph::OutputVector{const_input_1, permute_1}, 0); + auto concat_1 = std::make_shared(ngraph::OutputVector{const_input_1, permute_1}, 0); auto const_input_2 = CreateConst(input_shape, ngPrc, true); - auto concat_2 = std::make_shared(ngraph::OutputVector{concat_1, const_input_2}, 0); + auto concat_2 = std::make_shared(ngraph::OutputVector{concat_1, const_input_2}, 0); - auto permute_2 = std::make_shared(concat_2, permute_params); + auto permute_2 = std::make_shared(concat_2, permute_params); function = std::make_shared(permute_2, ngraph::ParameterVector{input_param}, @@ -53,7 +53,7 @@ void PermuteConcatConcatPermute::SetUp() { range_ = InferenceEngine::details::product(input_shape); } -std::shared_ptr PermuteConcatConcatPermute::CreateConst( +std::shared_ptr PermuteConcatConcatPermute::CreateConst( const std::vector& input_shape, const ::ngraph::element::Type& precision, bool use_1_as_first_dimension) { @@ -75,7 +75,7 @@ std::shared_ptr PermuteConcatConcatPermute::CreateCons const auto const_input_shape = ngraph::Shape{const_input_shape_vec}; auto const_input_values_size = InferenceEngine::details::product(const_input_shape_vec); auto const_input_values = std::vector(const_input_values_size, 0); - return ngraph::opset9::Constant::create(precision, const_input_shape, const_input_values); + return ov::op::v0::Constant::create(precision, const_input_shape, const_input_values); } void PermuteConcatConcatPermute::Validate() { diff --git a/src/tests/functional/shared_test_classes/src/subgraph/permute_concat_permute.cpp b/src/tests/functional/shared_test_classes/src/subgraph/permute_concat_permute.cpp index a469c997608a0f..1066e19bba4763 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/permute_concat_permute.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/permute_concat_permute.cpp @@ -38,11 +38,11 @@ void PermuteConcatPermute::SetUp() { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - auto input_param = std::make_shared(ngPrc, ngraph::Shape{input_shape}); + auto input_param = std::make_shared(ngPrc, ngraph::Shape{input_shape}); auto permute_params_1 = - ngraph::opset9::Constant::create(ngraph::element::i64, ngraph::Shape{permute_1_param.size()}, permute_1_param); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{permute_1_param.size()}, permute_1_param); - auto permute_1 = std::make_shared(input_param, permute_params_1); + auto permute_1 = std::make_shared(input_param, permute_params_1); auto const_input_shape_vec = std::vector{1}; const_input_shape_vec.insert(const_input_shape_vec.end(), input_shape.begin(), std::prev(input_shape.end())); @@ -50,16 +50,16 @@ void PermuteConcatPermute::SetUp() { auto const_input_values_size = InferenceEngine::details::product(const_input_shape_vec); auto const_input_values = std::vector(const_input_values_size, 0); - auto const_input_1 = ngraph::opset9::Constant::create(ngPrc, constinput_shape, const_input_values); - auto const_input_2 = ngraph::opset9::Constant::create(ngPrc, constinput_shape, const_input_values); - auto const_input_3 = ngraph::opset9::Constant::create(ngPrc, constinput_shape, const_input_values); + auto const_input_1 = ov::op::v0::Constant::create(ngPrc, constinput_shape, const_input_values); + auto const_input_2 = ov::op::v0::Constant::create(ngPrc, constinput_shape, const_input_values); + auto const_input_3 = ov::op::v0::Constant::create(ngPrc, constinput_shape, const_input_values); - auto concat = std::make_shared( + auto concat = std::make_shared( ngraph::OutputVector{const_input_1, const_input_2, permute_1, const_input_3}, 0); auto permute_params_2 = - ngraph::opset9::Constant::create(ngraph::element::i64, ngraph::Shape{permute_2_param.size()}, permute_2_param); - auto permute_2 = std::make_shared(concat, permute_params_2); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{permute_2_param.size()}, permute_2_param); + auto permute_2 = std::make_shared(concat, permute_params_2); function = std::make_shared(permute_2, ngraph::ParameterVector{input_param}, "permute_concat_permute"); diff --git a/src/tests/functional/shared_test_classes/src/subgraph/quantized_convolution_backprop_data.cpp b/src/tests/functional/shared_test_classes/src/subgraph/quantized_convolution_backprop_data.cpp index 0e48dba65c233d..a210c2091e7652 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/quantized_convolution_backprop_data.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/quantized_convolution_backprop_data.cpp @@ -15,7 +15,7 @@ std::string QuantConvBackpropDataLayerTest::getTestCaseName(const testing::TestP ov::Shape inputShapes; std::string targetDevice; std::tie(groupConvBackpropDataParams, element_type, inputShapes, targetDevice) = obj.param; - ngraph::op::PadType padType; + ov::op::PadType padType; ov::Shape kernel, stride, dilation; std::vector padBegin, padEnd; size_t convOutChannels; diff --git a/src/tests/functional/shared_test_classes/src/subgraph/quantized_group_convolution_backprop_data.cpp b/src/tests/functional/shared_test_classes/src/subgraph/quantized_group_convolution_backprop_data.cpp index a97952389171a2..3c5134b0dbdd6d 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/quantized_group_convolution_backprop_data.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/quantized_group_convolution_backprop_data.cpp @@ -76,7 +76,7 @@ void QuantGroupConvBackpropDataLayerTest::SetUp() { auto weightsFq = ngraph::builder::makeFakeQuantize(weightsNode, element_type, quantLevels, weightsFqConstShapes); - auto groupConvBackpropData = std::dynamic_pointer_cast( + auto groupConvBackpropData = std::dynamic_pointer_cast( ngraph::builder::makeGroupConvolutionBackpropData(dataFq, weightsFq, element_type, stride, padBegin, padEnd, dilation, padType)); ov::ResultVector results{std::make_shared(groupConvBackpropData)}; diff --git a/src/tests/functional/shared_test_classes/src/subgraph/reduce_eltwise.cpp b/src/tests/functional/shared_test_classes/src/subgraph/reduce_eltwise.cpp index 4c8dbd44e041b8..ca4bec6cacdd64 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/reduce_eltwise.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/reduce_eltwise.cpp @@ -50,16 +50,16 @@ void ReduceEltwiseTest::SetUp() { FAIL() << "Reduce op doesn't support operation type: " << opType; } auto reductionAxesNode = std::dynamic_pointer_cast( - std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape(shapeAxes), axes)); + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape(shapeAxes), axes)); - auto reduce = std::make_shared(params[0], reductionAxesNode, keepDims); + auto reduce = std::make_shared(params[0], reductionAxesNode, keepDims); std::vector constShape(reduce.get()->get_output_partial_shape(0).rank().get_length(), 1); ASSERT_GT(constShape.size(), 2); constShape[2] = inputShape.back(); auto constant = ngraph::builder::makeConstant(ngPrc, constShape, {}, true); auto eltw = ngraph::builder::makeEltwise(reduce, constant, ngraph::helpers::EltwiseTypes::MULTIPLY); - ngraph::ResultVector results{std::make_shared(eltw)}; + ngraph::ResultVector results{std::make_shared(eltw)}; function = std::make_shared(results, params, "ReduceEltwise"); } } // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/relu_split_reshape.cpp b/src/tests/functional/shared_test_classes/src/subgraph/relu_split_reshape.cpp index c14250547f4de8..1779f62ec1ff44 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/relu_split_reshape.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/relu_split_reshape.cpp @@ -35,7 +35,7 @@ void ReluSplitReshape::SetUp() { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto relu = std::make_shared(params[0]); + auto relu = std::make_shared(params[0]); auto split_axis_op = std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{static_cast(splitAxis)}); auto split = std::make_shared(relu, split_axis_op, splitNum); @@ -43,9 +43,9 @@ void ReluSplitReshape::SetUp() { auto shape = split->get_output_shape(0); shape[shape.size() - 2] *= 2; shape[shape.size() - 1] /= 2; - auto reshape_const = std::make_shared(ngraph::element::Type_t::i64, + auto reshape_const = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{shape.size()}, shape); - auto reshape = std::make_shared(split->output(0), reshape_const, false); + auto reshape = std::make_shared(split->output(0), reshape_const, false); function = std::make_shared(reshape, params, "ReluSplitReshape"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/reshape_permute_reshape.cpp b/src/tests/functional/shared_test_classes/src/subgraph/reshape_permute_reshape.cpp index 541bbcbd7ba08d..36ea849035635c 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/reshape_permute_reshape.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/reshape_permute_reshape.cpp @@ -27,18 +27,18 @@ namespace SubgraphTestsDefinitions { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); std::vector shape_input{1, input_dim}; ov::ParameterVector input {std::make_shared(ngPrc, ov::Shape(shape_input))}; - auto reshape1_pattern = std::make_shared(ngraph::element::i64, + auto reshape1_pattern = std::make_shared(ngraph::element::i64, ngraph::Shape{inputs[0].size()}, inputs[0]); - auto reshape1 = std::make_shared(input[0], reshape1_pattern, false); - auto permute_params = std::make_shared(ngraph::element::i64, + auto reshape1 = std::make_shared(input[0], reshape1_pattern, false); + auto permute_params = std::make_shared(ngraph::element::i64, ngraph::Shape{inputs[1].size()}, inputs[1]); - auto permute = std::make_shared(reshape1, permute_params); - auto reshape2_pattern = std::make_shared(ngraph::element::i64, + auto permute = std::make_shared(reshape1, permute_params); + auto reshape2_pattern = std::make_shared(ngraph::element::i64, ngraph::Shape{2}, std::vector{1, input_dim}); - auto reshape2 = std::make_shared(permute, reshape2_pattern, false); + auto reshape2 = std::make_shared(permute, reshape2_pattern, false); function = std::make_shared(reshape2, input, "reshape_permute_reshape"); } } // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/scale_shift.cpp b/src/tests/functional/shared_test_classes/src/subgraph/scale_shift.cpp index 45109a87d2349c..87c3ec6a3c53f5 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/scale_shift.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/scale_shift.cpp @@ -32,10 +32,10 @@ namespace SubgraphTestsDefinitions { paramsShape = ngraph::Shape(inputShapes[1]); auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector paramsIn{std::make_shared(ngPrc, ov::Shape(inputShapes[0]))}; - auto mul_const = std::make_shared(ngPrc, paramsShape, scale); - auto mul = std::make_shared(paramsIn[0], mul_const); - auto add_const = std::make_shared(ngPrc, paramsShape, shift); - auto add = std::make_shared(mul, add_const); + auto mul_const = std::make_shared(ngPrc, paramsShape, scale); + auto mul = std::make_shared(paramsIn[0], mul_const); + auto add_const = std::make_shared(ngPrc, paramsShape, shift); + auto add = std::make_shared(mul, add_const); function = std::make_shared(add, paramsIn, "scale_shift"); } } // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/scaleshift_conv_scaleshift.cpp b/src/tests/functional/shared_test_classes/src/subgraph/scaleshift_conv_scaleshift.cpp index a03023d57cfbe5..b92caa7bfb06f6 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/scaleshift_conv_scaleshift.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/scaleshift_conv_scaleshift.cpp @@ -64,8 +64,8 @@ void ScaleShiftAfterConvTest::SetUp() { ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(inputShape))}; std::vector convInputShape = {1, inputChannels, 1, inputShape[0] * inputShape[1] / inputChannels}; - auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); - auto reshape1 = std::make_shared(params[0], reshapePattern1, false); + auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); + auto reshape1 = std::make_shared(params[0], reshapePattern1, false); auto filterWeights = ov::test::utils::generate_float_numbers(outputChannels * convInputShape[1] * kernelShape[0] * kernelShape[1], -0.2f, 0.2f); @@ -74,24 +74,24 @@ void ScaleShiftAfterConvTest::SetUp() { {kernelShape[0], kernelShape[1]}, {kernelShape[0] > 1 ? stride : 1, stride}, {0, 0}, - { 0, 0 }, { 1, 1 }, ngraph::op::PadType::VALID, outputChannels, false, filterWeights); + { 0, 0 }, { 1, 1 }, ov::op::PadType::VALID, outputChannels, false, filterWeights); auto widthAfterConv = (convInputShape[3] - kernelShape[1]) / stride + 1; std::vector outFormShapes = { 1, outputChannels * widthAfterConv, 1, 1 }; - auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, outFormShapes); - auto reshape2 = std::make_shared(conv, reshapePattern2, false); + auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, outFormShapes); + auto reshape2 = std::make_shared(conv, reshapePattern2, false); auto scale = ov::test::utils::generate_float_numbers(outputChannels * widthAfterConv, -2.0f, 2.0f); auto shift = ov::test::utils::generate_float_numbers(outputChannels * widthAfterConv, -2.0f, 2.0f); - auto mul_const = std::make_shared(ngPrc, outFormShapes, scale); - auto mul = std::make_shared(reshape2, mul_const); - auto add_const = std::make_shared(ngPrc, outFormShapes, shift); - auto add = std::make_shared(mul, add_const); + auto mul_const = std::make_shared(ngPrc, outFormShapes, scale); + auto mul = std::make_shared(reshape2, mul_const); + auto add_const = std::make_shared(ngPrc, outFormShapes, shift); + auto add = std::make_shared(mul, add_const); outFormShapes = {1, outputChannels * widthAfterConv }; - auto reshapePattern3 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); - auto reshape3 = std::make_shared(add, reshapePattern3, false); + auto reshapePattern3 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); + auto reshape3 = std::make_shared(add, reshapePattern3, false); function = std::make_shared(mul, params, "ScaleShiftAfterConvTest"); } @@ -153,19 +153,19 @@ void ScaleShiftBeforeConvTest::SetUp() { ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(inputShape))}; std::vector convInputShape = {1, inputShape[1], 1, 1}; - auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); - auto reshape1 = std::make_shared(params[0], reshapePattern1, false); + auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); + auto reshape1 = std::make_shared(params[0], reshapePattern1, false); auto scale = ov::test::utils::generate_float_numbers(convInputShape[1], -2.0f, 2.0f); auto shift = ov::test::utils::generate_float_numbers(convInputShape[1], -2.0f, 2.0f); - auto mul_const = std::make_shared(ngPrc, convInputShape, scale); - auto mul = std::make_shared(reshape1, mul_const); - auto add_const = std::make_shared(ngPrc, convInputShape, shift); - auto add = std::make_shared(mul, add_const); + auto mul_const = std::make_shared(ngPrc, convInputShape, scale); + auto mul = std::make_shared(reshape1, mul_const); + auto add_const = std::make_shared(ngPrc, convInputShape, shift); + auto add = std::make_shared(mul, add_const); convInputShape = {1, inputChannels, 1, inputShape[1] / inputChannels}; - auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); - auto reshape2 = std::make_shared(mul, reshapePattern2, false); + auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); + auto reshape2 = std::make_shared(mul, reshapePattern2, false); auto filterWeights = ov::test::utils::generate_float_numbers(outputChannels * convInputShape[1] * kernelShape[0] * kernelShape[1], -0.1f, 0.1f); @@ -174,12 +174,12 @@ void ScaleShiftBeforeConvTest::SetUp() { {kernelShape[0], kernelShape[1]}, {kernelShape[0] > 1 ? stride : 1, stride}, {0, 0}, - { 0, 0 }, { 1, 1 }, ngraph::op::PadType::VALID, outputChannels, false, filterWeights); + { 0, 0 }, { 1, 1 }, ov::op::PadType::VALID, outputChannels, false, filterWeights); auto widthAfterReshape = (convInputShape[3] - kernelShape[1]) / stride + 1; std::vector outFormShapes = {1, outputChannels * widthAfterReshape }; - auto reshapePattern3 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); - auto reshape3 = std::make_shared(conv, reshapePattern3, false); + auto reshapePattern3 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); + auto reshape3 = std::make_shared(conv, reshapePattern3, false); function = std::make_shared(reshape3, params, "ScaleShiftBeforeConvTest"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/softsign.cpp b/src/tests/functional/shared_test_classes/src/subgraph/softsign.cpp index c24296d44dd017..efd77ea28a09f1 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/softsign.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/softsign.cpp @@ -36,16 +36,16 @@ void SoftsignTest::SetUp() { ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto abs = std::make_shared(params[0]); + auto abs = std::make_shared(params[0]); - auto const_1 = ngraph::opset1::Constant::create(ngPrc, ngraph::Shape{}, {1}); - auto const_neg_1 = ngraph::opset1::Constant::create(ngPrc, ngraph::Shape{}, {-1}); + auto const_1 = ov::op::v0::Constant::create(ngPrc, ngraph::Shape{}, {1}); + auto const_neg_1 = ov::op::v0::Constant::create(ngPrc, ngraph::Shape{}, {-1}); - auto add = std::make_shared(abs, const_1); - auto power = std::make_shared(add, const_neg_1); + auto add = std::make_shared(abs, const_1); + auto power = std::make_shared(add, const_neg_1); - auto mul = std::make_shared(power, params[0]); - ngraph::ResultVector results{ std::make_shared(mul) }; + auto mul = std::make_shared(power, params[0]); + ngraph::ResultVector results{ std::make_shared(mul) }; function = std::make_shared(results, params, "SoftSignTest"); } @@ -66,14 +66,14 @@ std::shared_ptr SoftsignTest::GenerateNgraphFriendlySoftSign() auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto abs = std::make_shared(params[0]); + auto abs = std::make_shared(params[0]); auto constant_0 = ngraph::builder::makeConstant(ngPrc, inputShape, { 1 }); - auto add = std::make_shared(abs, constant_0); + auto add = std::make_shared(abs, constant_0); auto constant_1 = ngraph::builder::makeConstant(ngPrc, inputShape, { -1 }); - auto power = std::make_shared(add, constant_1); - auto mul = std::make_shared(power, params[0]); + auto power = std::make_shared(add, constant_1); + auto mul = std::make_shared(power, params[0]); - ngraph::ResultVector results{ std::make_shared(mul) }; + ngraph::ResultVector results{ std::make_shared(mul) }; return std::make_shared(results, params, "SoftSignTest"); } } // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/split_concat_multi_inputs.cpp b/src/tests/functional/shared_test_classes/src/subgraph/split_concat_multi_inputs.cpp index bbe77145aeabc4..0c520da17e8d09 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/split_concat_multi_inputs.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/split_concat_multi_inputs.cpp @@ -45,12 +45,12 @@ void SplitConcatMultiInputsTest::SetUp() { ngraph::OutputVector concatInputs = split->outputs(); - auto concat = std::make_shared(concatInputs, 1); + auto concat = std::make_shared(concatInputs, 1); if (withFC) { auto mul_const = ngraph::builder::makeConstant(ngPrc, { 10, inputShape[1] }, ov::test::utils::generate_float_numbers(10 * inputShape[1], -0.2f, 0.2f), false); - auto matmul = std::make_shared(concat, mul_const, false, true); + auto matmul = std::make_shared(concat, mul_const, false, true); function = std::make_shared(matmul, params, "SplitConcatMultiInputs"); } else { function = std::make_shared(concat, params, "SplitConcatMultiInputs"); diff --git a/src/tests/functional/shared_test_classes/src/subgraph/split_conv.cpp b/src/tests/functional/shared_test_classes/src/subgraph/split_conv.cpp index ac082acad8b5f3..2085fd03403ae2 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/split_conv.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/split_conv.cpp @@ -67,12 +67,12 @@ void SplitConvTest::SetUp() { auto split_axis_op = std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{splitAxis}); auto split = std::make_shared(params[0], split_axis_op, splitsNum); - auto relu1 = std::make_shared(split->output(0)); + auto relu1 = std::make_shared(split->output(0)); - auto relu2 = std::make_shared(split->output(1)); + auto relu2 = std::make_shared(split->output(1)); std::vector convInputShape = {1, inputChannels, 1, inputShape[0] * inputShape[1] / inputChannels / 2}; - auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); - auto reshape1 = std::make_shared(relu2, reshapePattern1, false); + auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); + auto reshape1 = std::make_shared(relu2, reshapePattern1, false); auto filterWeights = ov::test::utils::generate_float_numbers(outputChannels * convInputShape[1] * kernelShape[0] * kernelShape[1], -0.2f, 0.2f); @@ -81,16 +81,16 @@ void SplitConvTest::SetUp() { {kernelShape[0], kernelShape[1]}, {kernelShape[0] > 1 ? stride : 1, stride}, {0, 0}, - { 0, 0 }, { 1, 1 }, ngraph::op::PadType::VALID, outputChannels, false, filterWeights); + { 0, 0 }, { 1, 1 }, ov::op::PadType::VALID, outputChannels, false, filterWeights); auto widthAfterConv = (convInputShape[3] - kernelShape[1]) / stride + 1; std::vector outFormShapes = {1, outputChannels * widthAfterConv }; - auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); - auto reshape2 = std::make_shared(conv, reshapePattern2, false); + auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); + auto reshape2 = std::make_shared(conv, reshapePattern2, false); - ngraph::ResultVector results{std::make_shared(relu1), - std::make_shared(reshape2)}; + ngraph::ResultVector results{std::make_shared(relu1), + std::make_shared(reshape2)}; function = std::make_shared(results, params, "SplitConvTest"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/split_relu.cpp b/src/tests/functional/shared_test_classes/src/subgraph/split_relu.cpp index cb9e32b049170a..b885ceb2343c2a 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/split_relu.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/split_relu.cpp @@ -38,8 +38,8 @@ namespace SubgraphTestsDefinitions { ngraph::ResultVector results; for (size_t i : connect_index) { - auto relu = std::make_shared(split->output(i)); - results.push_back(std::make_shared(relu)); + auto relu = std::make_shared(split->output(i)); + results.push_back(std::make_shared(relu)); } function = std::make_shared(results, input, "split_relu"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/split_trivial_permute_concat.cpp b/src/tests/functional/shared_test_classes/src/subgraph/split_trivial_permute_concat.cpp index b03a01d175f763..d371c48b4c83ad 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/split_trivial_permute_concat.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/split_trivial_permute_concat.cpp @@ -38,13 +38,13 @@ namespace SubgraphTestsDefinitions { std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{static_cast(splitAxis)}); auto split = std::make_shared(input[0], split_axis_op, 2); - auto permute_in_params = std::make_shared(ngraph::element::i64, + auto permute_in_params = std::make_shared(ngraph::element::i64, ngraph::Shape{ 4 }, ngraph::Shape{ {0, 3, 2, 1} }); - auto permute_0 = std::make_shared(split->output(0), permute_in_params); - auto permute_1 = std::make_shared(split->output(1), permute_in_params); + auto permute_0 = std::make_shared(split->output(0), permute_in_params); + auto permute_1 = std::make_shared(split->output(1), permute_in_params); - auto concat = std::make_shared(ngraph::OutputVector{ permute_0, permute_1 }, concatAxis); + auto concat = std::make_shared(ngraph::OutputVector{ permute_0, permute_1 }, concatAxis); auto act = ngraph::builder::makeActivation(concat, ngPrc, ngraph::helpers::ActivationTypes::Relu); function = std::make_shared(act, input, "split_trivial_permute_concat"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/strided_slice.cpp b/src/tests/functional/shared_test_classes/src/subgraph/strided_slice.cpp index c7515c058eab99..bcb66ebd598bee 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/strided_slice.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/strided_slice.cpp @@ -47,7 +47,7 @@ void StridedSliceTest::SetUp() { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(ssParams.inputShape))}; - auto relu = std::make_shared(params[0]); + auto relu = std::make_shared(params[0]); ov::Shape constShape = {ssParams.begin.size()}; auto beginNode = std::make_shared(ov::element::i64, constShape, ssParams.begin.data()); @@ -64,7 +64,7 @@ void StridedSliceTest::SetUp() { ssParams.shrinkAxisMask, ssParams.ellipsisAxisMask); - ngraph::ResultVector results{std::make_shared(ss)}; + ngraph::ResultVector results{std::make_shared(ss)}; function = std::make_shared(results, params, "strided_slice"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/stridedslice_concat.cpp b/src/tests/functional/shared_test_classes/src/subgraph/stridedslice_concat.cpp index dcd42366206b0e..e369bf8d9629c8 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/stridedslice_concat.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/stridedslice_concat.cpp @@ -46,7 +46,7 @@ void SliceConcatTest::SetUp() { ngraph::Output input = params[0]; if (inputShape[0] != 1 || inputShape.size() != 2) { - input = std::make_shared(params[0], + input = std::make_shared(params[0], ngraph::builder::makeConstant(ngraph::element::i64, ngraph::Shape{inputShape.size()}, inputShape), false); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/stridedslice_conv.cpp b/src/tests/functional/shared_test_classes/src/subgraph/stridedslice_conv.cpp index b5fbec7eb83143..af8fffa45d2588 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/stridedslice_conv.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/stridedslice_conv.cpp @@ -81,7 +81,7 @@ void SliceConvTest::SetUp() { {kernelShape[0], kernelShape[1]}, {kernelShape[0] > 1 ? stride : 1, stride}, {0, 0}, - { 0, 0 }, { 1, 1 }, ngraph::op::PadType::VALID, outputChannels, false, filterWeights); + { 0, 0 }, { 1, 1 }, ov::op::PadType::VALID, outputChannels, false, filterWeights); function = std::make_shared(conv, params, "StridedSliceConvTest"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/tensor_names.cpp b/src/tests/functional/shared_test_classes/src/subgraph/tensor_names.cpp index eaacac921bcb30..4506e49464e4bd 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/tensor_names.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/tensor_names.cpp @@ -17,16 +17,16 @@ std::string TensorNamesTest::getTestCaseName(const testing::TestParamInfoGetParam(); - auto parameter = std::make_shared(ngraph::element::Type_t::f32, ngraph::Shape{1, 3, 10, 10}); + auto parameter = std::make_shared(ngraph::element::Type_t::f32, ngraph::Shape{1, 3, 10, 10}); parameter->set_friendly_name("parameter"); parameter->get_output_tensor(0).set_names({"input"}); - auto relu_prev = std::make_shared(parameter); + auto relu_prev = std::make_shared(parameter); relu_prev->set_friendly_name("relu_prev"); relu_prev->get_output_tensor(0).set_names({"relu,prev_t", "identity_prev_t"}); - auto relu = std::make_shared(relu_prev); + auto relu = std::make_shared(relu_prev); relu->set_friendly_name("relu"); relu->get_output_tensor(0).set_names({"relu,t", "identity"}); - const ngraph::ResultVector results{std::make_shared(relu)}; + const ngraph::ResultVector results{std::make_shared(relu)}; results[0]->set_friendly_name("out"); ngraph::ParameterVector params{parameter}; function = std::make_shared(results, params, "TensorNames"); diff --git a/src/tests/functional/shared_test_classes/src/subgraph/transpose_add.cpp b/src/tests/functional/shared_test_classes/src/subgraph/transpose_add.cpp index 64ee23719dce53..e5391c666116b4 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/transpose_add.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/transpose_add.cpp @@ -36,12 +36,12 @@ void TransposeAdd::SetUp() { ngraph::Shape permute_order(input_shape.size()); std::iota(std::begin(permute_order), std::end(permute_order), 0); std::iter_swap(std::end(permute_order) - 2, std::end(permute_order) - 1); - auto transpose_in_params = std::make_shared(ngraph::element::i64, + auto transpose_in_params = std::make_shared(ngraph::element::i64, ngraph::Shape{permute_order.size()}, permute_order); - auto transpose_in = std::make_shared(params[0], transpose_in_params); + auto transpose_in = std::make_shared(params[0], transpose_in_params); auto add_const = ngraph::builder::makeConstant(ngPrc, transpose_in->get_output_shape(0), {}, true); - auto add = std::make_shared(transpose_in, add_const); + auto add = std::make_shared(transpose_in, add_const); function = std::make_shared(add, params, "transpose_add"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/transpose_conv_transpose_squeeze.cpp b/src/tests/functional/shared_test_classes/src/subgraph/transpose_conv_transpose_squeeze.cpp index 67a9401dea826a..716ffaba3aa2f4 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/transpose_conv_transpose_squeeze.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/transpose_conv_transpose_squeeze.cpp @@ -52,29 +52,29 @@ void TransposeConvTest::SetUp() { std::vector nchw_order = { 0, 3, 1, 2 }; std::vector nhwc_order = { 0, 2, 3, 1 }; std::vector conv_input_shape = {1, 1, input_shape[0] * input_shape[1] / input_channels, input_channels}; - auto reshape_pattern = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{conv_input_shape.size()}, conv_input_shape); - auto reshape = std::make_shared(params[0], reshape_pattern, false); + auto reshape_pattern = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{conv_input_shape.size()}, conv_input_shape); + auto reshape = std::make_shared(params[0], reshape_pattern, false); - const auto input_order1 = std::make_shared(ngraph::element::i64, + const auto input_order1 = std::make_shared(ngraph::element::i64, ngraph::Shape({conv_input_shape.size()}), nchw_order); - auto transpose1 = std::make_shared(reshape, input_order1); + auto transpose1 = std::make_shared(reshape, input_order1); float weight_val = 0.02; auto filter_weights_node = ngraph::builder::makeConstant(ng_prc, {output_channels, input_channels, kernel_shape[0], kernel_shape[1]}, { weight_val }); - auto conv = std::make_shared(transpose1, filter_weights_node, strides, std::vector{ 0, 0 }, + auto conv = std::make_shared(transpose1, filter_weights_node, strides, std::vector{ 0, 0 }, std::vector{ 0, 0 }, std::vector{ 1, 1 }, - ngraph::op::PadType::VALID); + ov::op::PadType::VALID); - const auto input_order2 = std::make_shared(ngraph::element::i64, + const auto input_order2 = std::make_shared(ngraph::element::i64, ngraph::Shape({conv_input_shape.size()}), nhwc_order); - auto transpose2 = std::make_shared(conv, input_order2); + auto transpose2 = std::make_shared(conv, input_order2); - auto constant_squeeze = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{1}, std::vector{0}); - auto squeeze = std::make_shared(transpose2, constant_squeeze); + auto constant_squeeze = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{1}, std::vector{0}); + auto squeeze = std::make_shared(transpose2, constant_squeeze); function = std::make_shared(squeeze, params, "transposeConv"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/trivial_concat.cpp b/src/tests/functional/shared_test_classes/src/subgraph/trivial_concat.cpp index 47fcbcccf7ecf8..a43946decdf7cd 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/trivial_concat.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/trivial_concat.cpp @@ -32,24 +32,24 @@ void TrivialConcatLayerTest::SetUp() { auto input_relu = ngraph::builder::makeActivation(params[0], ngPrc, ngraph::helpers::ActivationTypes::Relu); - auto input_reshape_pattern = std::make_shared(ngraph::element::i64, + auto input_reshape_pattern = std::make_shared(ngraph::element::i64, ngraph::Shape{inputShape.size()}, std::vector(inputShape)); - auto input = std::make_shared(input_relu, input_reshape_pattern, false); + auto input = std::make_shared(input_relu, input_reshape_pattern, false); auto constant_values = ov::test::utils::generate_float_numbers(total_size, 15.5f, 16.1f); auto constant = ngraph::builder::makeConstant(ngPrc, std::vector({1, total_size}), constant_values); - auto first_reshape = std::make_shared(constant, input_reshape_pattern, false); + auto first_reshape = std::make_shared(constant, input_reshape_pattern, false); - auto concat = std::make_shared(ngraph::OutputVector({first_reshape, input}), axis); + auto concat = std::make_shared(ngraph::OutputVector({first_reshape, input}), axis); - auto final_reshape_pattern = std::make_shared(ngraph::element::i64, + auto final_reshape_pattern = std::make_shared(ngraph::element::i64, ngraph::Shape{2}, std::vector({1, 2 * total_size})); - auto final_reshape = std::make_shared(concat, final_reshape_pattern, false); + auto final_reshape = std::make_shared(concat, final_reshape_pattern, false); auto act = ngraph::builder::makeActivation(final_reshape, ngPrc, ngraph::helpers::ActivationTypes::Relu); - ngraph::ResultVector results{std::make_shared(act)}; + ngraph::ResultVector results{std::make_shared(act)}; function = std::make_shared(results, params, "trivial_concat"); } } // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/two_fake_quantize_to_fullyconnected.cpp b/src/tests/functional/shared_test_classes/src/subgraph/two_fake_quantize_to_fullyconnected.cpp index 7fa90952ec1dd5..a02956f2b8f609 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/two_fake_quantize_to_fullyconnected.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/two_fake_quantize_to_fullyconnected.cpp @@ -116,25 +116,25 @@ void FakeQuantizeSubgraphTest::SetUp() { auto inputFQNode = ngraph::builder::makeFakeQuantize(params[0], ngraph::element::f32, levels[0], constShape[0], { inputDataMin }, { inputDataMax }, { inputDataMin }, { inputDataMax }); - auto weightsFQNode = std::make_shared(const_param, + auto weightsFQNode = std::make_shared(const_param, lowNode, highNode, lowNode, highNode, levels[1]); - auto inputFQ = std::dynamic_pointer_cast(inputFQNode); - auto weightsFQ = std::dynamic_pointer_cast(weightsFQNode); - auto matmul = std::make_shared(inputFQ, weightsFQ, false, true); + auto inputFQ = std::dynamic_pointer_cast(inputFQNode); + auto weightsFQ = std::dynamic_pointer_cast(weightsFQNode); + auto matmul = std::make_shared(inputFQ, weightsFQ, false, true); std::shared_ptr biases_node; if (biases) { auto const_bias = ngraph::builder::makeConstant(ngPrc, {1, constShape[1][0]}, std::vector{ -1.0f }); - biases_node = std::make_shared(matmul, const_bias); + biases_node = std::make_shared(matmul, const_bias); } else { biases_node = matmul; } - auto sigmoid = std::make_shared(biases_node); - ngraph::ResultVector results{std::make_shared(sigmoid)}; + auto sigmoid = std::make_shared(biases_node); + ngraph::ResultVector results{std::make_shared(sigmoid)}; if (biases) { - auto sigmoid_2 = std::make_shared(inputFQ); - results.push_back(std::make_shared(sigmoid_2)); + auto sigmoid_2 = std::make_shared(inputFQ); + results.push_back(std::make_shared(sigmoid_2)); } function = std::make_shared(results, params, "fakeQuantizeSubgraph"); configuration = config.second; diff --git a/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/markup_bias.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/markup_bias.hpp index 7fdee03ef0fa8e..399dde070c22f3 100644 --- a/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/markup_bias.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/markup_bias.hpp @@ -5,7 +5,7 @@ #pragma once #include -#include +#include "openvino/core/model.hpp" #include "common/builders.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/align_concat_quantization_parameters.cpp b/src/tests/ov_helpers/ov_lpt_models/src/align_concat_quantization_parameters.cpp index a50223294c9cfb..b6ab0d86ca5d54 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/align_concat_quantization_parameters.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/align_concat_quantization_parameters.cpp @@ -4,7 +4,7 @@ #include "ov_lpt_models/align_concat_quantization_parameters.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include #include "low_precision/network_helper.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/assign_and_read_value.cpp b/src/tests/ov_helpers/ov_lpt_models/src/assign_and_read_value.cpp index 13b4ce29715630..52b53745eaa27b 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/assign_and_read_value.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/assign_and_read_value.cpp @@ -6,12 +6,12 @@ #include -#include -#include -#include +#include "openvino/opsets/opset1.hpp" +#include "openvino/opsets/opset3.hpp" +#include "openvino/opsets/opset6.hpp" #include "ov_models/subgraph_builders.hpp" #include "openvino/op/util/variable.hpp" -#include +#include "openvino/op/util/assign_base.hpp" #include "ov_lpt_models/common/builders.hpp" #include "ov_lpt_models/assign_and_read_value.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/avg_pool.cpp b/src/tests/ov_helpers/ov_lpt_models/src/avg_pool.cpp index c3f409a4acd883..b8a8d468550e62 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/avg_pool.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/avg_pool.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include +#include "openvino/opsets/opset1.hpp" #include #include "low_precision/network_helper.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/batch_to_space.cpp b/src/tests/ov_helpers/ov_lpt_models/src/batch_to_space.cpp index aa6fa431276b98..b0817bd996207e 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/batch_to_space.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/batch_to_space.cpp @@ -4,7 +4,7 @@ #include "ov_lpt_models/batch_to_space.hpp" -#include +#include "openvino/opsets/opset2.hpp" #include "ov_lpt_models/common/builders.hpp" namespace ngraph { diff --git a/src/tests/ov_helpers/ov_lpt_models/src/clamp.cpp b/src/tests/ov_helpers/ov_lpt_models/src/clamp.cpp index c48804a97c0973..be204b15bfb88f 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/clamp.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/clamp.cpp @@ -6,7 +6,7 @@ #include -#include +#include "openvino/opsets/opset1.hpp" #include "ov_models/subgraph_builders.hpp" #include "ov_lpt_models/common/builders.hpp" #include "ov_lpt_models/clamp.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/common/builders.cpp b/src/tests/ov_helpers/ov_lpt_models/src/common/builders.cpp index a14ef51efefbce..d008502387c3d1 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/common/builders.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/common/builders.cpp @@ -7,7 +7,7 @@ #include #include -#include +#include "openvino/opsets/opset1.hpp" #include "ov_ops/type_relaxed.hpp" #include "ov_models/subgraph_builders.hpp" #include "low_precision/network_helper.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/common/dequantization_operations.cpp b/src/tests/ov_helpers/ov_lpt_models/src/common/dequantization_operations.cpp index 249e97041a402c..bd93064a6a7ebd 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/common/dequantization_operations.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/common/dequantization_operations.cpp @@ -3,7 +3,7 @@ // #include "ov_lpt_models/common/dequantization_operations.hpp" -#include +#include "openvino/opsets/opset1.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ov_helpers/ov_lpt_models/src/common/fake_quantize_on_data.cpp b/src/tests/ov_helpers/ov_lpt_models/src/common/fake_quantize_on_data.cpp index 498e81d229886e..a6c67623bc2c68 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/common/fake_quantize_on_data.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/common/fake_quantize_on_data.cpp @@ -3,7 +3,7 @@ // #include "ov_lpt_models/common/fake_quantize_on_data.hpp" -#include +#include "openvino/opsets/opset1.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ov_helpers/ov_lpt_models/src/common/fake_quantize_on_weights.cpp b/src/tests/ov_helpers/ov_lpt_models/src/common/fake_quantize_on_weights.cpp index 049e3fd9f90457..9fe23b69f308b6 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/common/fake_quantize_on_weights.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/common/fake_quantize_on_weights.cpp @@ -3,7 +3,7 @@ // #include "ov_lpt_models/common/fake_quantize_on_weights.hpp" -#include +#include "openvino/opsets/opset1.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ov_helpers/ov_lpt_models/src/common/multiply.cpp b/src/tests/ov_helpers/ov_lpt_models/src/common/multiply.cpp index 2dabc7c52d725b..d2987bd0255dfc 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/common/multiply.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/common/multiply.cpp @@ -3,7 +3,7 @@ // #include "ov_lpt_models/common/multiply.hpp" -#include +#include "openvino/opsets/opset1.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ov_helpers/ov_lpt_models/src/compose_fake_quantize.cpp b/src/tests/ov_helpers/ov_lpt_models/src/compose_fake_quantize.cpp index 8346192eee8460..7e3ddc5a8247b9 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/compose_fake_quantize.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/compose_fake_quantize.cpp @@ -5,7 +5,7 @@ #include "ov_lpt_models/compose_fake_quantize.hpp" #include "low_precision/network_helper.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include "ov_models/subgraph_builders.hpp" #include "ov_lpt_models/common/builders.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/concat.cpp b/src/tests/ov_helpers/ov_lpt_models/src/concat.cpp index e27e92fca9d05a..8c924f9f638fe1 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/concat.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/concat.cpp @@ -4,7 +4,7 @@ #include "ov_lpt_models/concat.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include "ov_ops/type_relaxed.hpp" #include "low_precision/network_helper.hpp" #include "low_precision/rt_info/precision_preserved_attribute.hpp" @@ -758,7 +758,7 @@ std::shared_ptr ConcatFunction::getOriginalWithIntermediateWithConsta attributes.antialias = false; attributes.pads_begin = { 0 }; attributes.pads_end = { 0 }; - const auto outputShape = op::Constant::create( + const auto outputShape = ov::opset1::Constant::create( ov::element::i64, ov::Shape{ 2 }, ov::Shape{ inputShape[2].is_dynamic() ? 9ul : static_cast(inputShape[2].get_length()), @@ -1870,7 +1870,7 @@ std::shared_ptr ConcatFunction::getReferenceWithIntermediateWithConst attributes.pads_begin = { 0 }; attributes.pads_end = { 0 }; - const auto outputShape = op::Constant::create( + const auto outputShape = ov::opset1::Constant::create( ov::element::i64, ov::Shape{ 2 }, ov::Shape{ inputShape[2].is_dynamic() ? 9ul : static_cast(inputShape[2].get_length()), diff --git a/src/tests/ov_helpers/ov_lpt_models/src/convolution.cpp b/src/tests/ov_helpers/ov_lpt_models/src/convolution.cpp index c00354a25b8773..aef4d1b951c7c5 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/convolution.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/convolution.cpp @@ -4,7 +4,7 @@ #include "ov_lpt_models/convolution.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include #include "ov_models/subgraph_builders.hpp" #include "low_precision/network_helper.hpp" @@ -52,7 +52,7 @@ std::shared_ptr ConvolutionFunction::getOriginal( if (weights->cast_vector().size() == 1ul) { auto targetShape = ov::Shape{ outputChannelsCount, inputChannelsCount, 1, 1 }; weights = ov::as_type_ptr(fold( - weights, op::Constant::create(ov::element::i64, Shape{ targetShape.size() }, targetShape))); + weights, ov::opset1::Constant::create(ov::element::i64, Shape{ targetShape.size() }, targetShape))); } std::shared_ptr convertedWeights; @@ -266,7 +266,7 @@ std::shared_ptr ConvolutionFunction::getReference( if (weights->cast_vector().size() == 1ul) { auto targetShape = ov::Shape{ outputChannelsCount, inputChannelsCount, 1, 1 }; weights = ov::as_type_ptr(fold( - weights, op::Constant::create(ov::element::i64, Shape{ targetShape.size() }, targetShape))); + weights, ov::opset1::Constant::create(ov::element::i64, Shape{ targetShape.size() }, targetShape))); } const auto convertOnWeights = std::make_shared(weights, netPrecision); diff --git a/src/tests/ov_helpers/ov_lpt_models/src/convolution_backprop_data.cpp b/src/tests/ov_helpers/ov_lpt_models/src/convolution_backprop_data.cpp index e748f31872e27e..758f3cfea8eae1 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/convolution_backprop_data.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/convolution_backprop_data.cpp @@ -4,7 +4,7 @@ #include "ov_lpt_models/convolution_backprop_data.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include #include "ov_models/subgraph_builders.hpp" #include "low_precision/network_helper.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/elementwise_with_multi_parent_dequantization.cpp b/src/tests/ov_helpers/ov_lpt_models/src/elementwise_with_multi_parent_dequantization.cpp index eee4982142f526..53c95244acfb83 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/elementwise_with_multi_parent_dequantization.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/elementwise_with_multi_parent_dequantization.cpp @@ -5,7 +5,7 @@ #include "ov_lpt_models/elementwise_with_multi_parent_dequantization.hpp" #include "low_precision/network_helper.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include "ov_models/builders.hpp" #include "ov_models/subgraph_builders.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize.cpp b/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize.cpp index e86a5c71582321..b9d54f2b549eb8 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize.cpp @@ -4,7 +4,7 @@ #include "ov_lpt_models/fake_quantize.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include "ov_ops/type_relaxed.hpp" #include "ov_models/subgraph_builders.hpp" #include "low_precision/network_helper.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize_and_convolution.cpp b/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize_and_convolution.cpp index 8ad42a8e9d5ec4..3275b3f517809d 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize_and_convolution.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize_and_convolution.cpp @@ -4,7 +4,7 @@ #include "ov_lpt_models/fake_quantize_and_convolution.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include "ov_models/subgraph_builders.hpp" #include "ov_lpt_models/common/builders.hpp" @@ -209,7 +209,7 @@ std::shared_ptr FakeQuantizeAndConvolutionFunction::get( if (multiplyAfter) { const auto& O = lastOperation->get_shape()[1]; std::vector weights_val(O, 1); - auto constant = op::Constant::create(element::f32, Shape{O, 1, 1}, weights_val); + auto constant = ov::opset1::Constant::create(element::f32, Shape{O, 1, 1}, weights_val); lastOperation = std::make_shared(lastOperation, constant); } } else { diff --git a/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize_and_two_output_branches_with_convolution.cpp b/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize_and_two_output_branches_with_convolution.cpp index b158c55fc01964..58a6abf775faca 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize_and_two_output_branches_with_convolution.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize_and_two_output_branches_with_convolution.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include +#include "openvino/opsets/opset1.hpp" #include "ov_lpt_models/common/builders.hpp" #include "ov_lpt_models/fake_quantize_and_two_output_branches_with_convolution.hpp" #include "ov_lpt_models/common/fake_quantize_on_weights.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize_on_weights_and_unsupported_child.cpp b/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize_on_weights_and_unsupported_child.cpp index f601957f5b3568..b8d0e6fd17ca05 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize_on_weights_and_unsupported_child.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize_on_weights_and_unsupported_child.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include +#include "openvino/opsets/opset1.hpp" #include "ov_lpt_models/common/builders.hpp" #include "ov_lpt_models/fake_quantize_on_weights_and_unsupported_child.hpp" #include "ov_lpt_models/common/fake_quantize_on_weights.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize_precision_selection.cpp b/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize_precision_selection.cpp index 4308283f073d54..5c422352c88a83 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize_precision_selection.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize_precision_selection.cpp @@ -4,7 +4,7 @@ #include "ov_lpt_models/fake_quantize_precision_selection.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include #include "ov_models/subgraph_builders.hpp" #include "ov_lpt_models/common/builders.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/fold_fake_quantize.cpp b/src/tests/ov_helpers/ov_lpt_models/src/fold_fake_quantize.cpp index 5fbf63e2eada48..e199e7f0b207f2 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/fold_fake_quantize.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/fold_fake_quantize.cpp @@ -4,7 +4,7 @@ #include "ov_lpt_models/fold_fake_quantize.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include "ov_ops/type_relaxed.hpp" #include "ov_models/subgraph_builders.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/fuse_convert.cpp b/src/tests/ov_helpers/ov_lpt_models/src/fuse_convert.cpp index 9efef8ad4c158f..edadc20e8f5f7e 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/fuse_convert.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/fuse_convert.cpp @@ -4,7 +4,7 @@ #include "ov_lpt_models/fuse_convert.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include "ov_models/subgraph_builders.hpp" #include "ov_lpt_models/common/builders.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/fuse_fake_quantize.cpp b/src/tests/ov_helpers/ov_lpt_models/src/fuse_fake_quantize.cpp index 05274aa9568ab7..47eb5fc2dc77b9 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/fuse_fake_quantize.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/fuse_fake_quantize.cpp @@ -4,7 +4,7 @@ #include "ov_lpt_models/fuse_fake_quantize.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include "ov_ops/type_relaxed.hpp" #include "low_precision/network_helper.hpp" #include "ov_models/subgraph_builders.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/fuse_fake_quantize_and_scale_shift.cpp b/src/tests/ov_helpers/ov_lpt_models/src/fuse_fake_quantize_and_scale_shift.cpp index 94fb132962a273..4bdddc2eef0803 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/fuse_fake_quantize_and_scale_shift.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/fuse_fake_quantize_and_scale_shift.cpp @@ -4,7 +4,7 @@ #include "ov_lpt_models/fuse_fake_quantize_and_scale_shift.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include "ov_models/builders.hpp" namespace ngraph { diff --git a/src/tests/ov_helpers/ov_lpt_models/src/fuse_multiply_to_fake_quantize.cpp b/src/tests/ov_helpers/ov_lpt_models/src/fuse_multiply_to_fake_quantize.cpp index 5164d9ca1a51e3..c5f7caa6413ee2 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/fuse_multiply_to_fake_quantize.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/fuse_multiply_to_fake_quantize.cpp @@ -4,7 +4,7 @@ #include "ov_lpt_models/fuse_multiply_to_fake_quantize.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include "ov_ops/type_relaxed.hpp" #include "ov_models/subgraph_builders.hpp" #include "low_precision/network_helper.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/fuse_subtract_to_fake_quantize.cpp b/src/tests/ov_helpers/ov_lpt_models/src/fuse_subtract_to_fake_quantize.cpp index f39b90130c1d68..9618c5f2827f37 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/fuse_subtract_to_fake_quantize.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/fuse_subtract_to_fake_quantize.cpp @@ -4,7 +4,7 @@ #include "ov_lpt_models/fuse_subtract_to_fake_quantize.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include "ov_ops/type_relaxed.hpp" #include "ov_models/subgraph_builders.hpp" #include "low_precision/network_helper.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/gather.cpp b/src/tests/ov_helpers/ov_lpt_models/src/gather.cpp index 4ae1f31278806d..4c745d2d726e24 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/gather.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/gather.cpp @@ -4,9 +4,9 @@ #include "ov_lpt_models/gather.hpp" -#include -#include -#include +#include "openvino/opsets/opset1.hpp" +#include "openvino/opsets/opset7.hpp" +#include "openvino/opsets/opset8.hpp" #include "ov_lpt_models/common/builders.hpp" namespace ngraph { diff --git a/src/tests/ov_helpers/ov_lpt_models/src/group_convolution.cpp b/src/tests/ov_helpers/ov_lpt_models/src/group_convolution.cpp index cbb08e1fc6c1ed..076cc2a12a106e 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/group_convolution.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/group_convolution.cpp @@ -4,7 +4,7 @@ #include "ov_lpt_models/group_convolution.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include #include "ov_models/subgraph_builders.hpp" #include "low_precision/network_helper.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/markup_avg_pool_precisions.cpp b/src/tests/ov_helpers/ov_lpt_models/src/markup_avg_pool_precisions.cpp index 5b429ac22054bf..2d60507b328297 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/markup_avg_pool_precisions.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/markup_avg_pool_precisions.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include +#include "openvino/opsets/opset1.hpp" #include #include "low_precision/network_helper.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/markup_bias.cpp b/src/tests/ov_helpers/ov_lpt_models/src/markup_bias.cpp index 185ff9683a6f61..2df17e9ac806a4 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/markup_bias.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/markup_bias.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include +#include "openvino/opsets/opset1.hpp" #include "ov_lpt_models/markup_bias.hpp" #include "ov_models/utils/ov_helpers.hpp" #include "ov_models/builders.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/mat_mul.cpp b/src/tests/ov_helpers/ov_lpt_models/src/mat_mul.cpp index 285a5f7f6de649..1a52e287fe1f52 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/mat_mul.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/mat_mul.cpp @@ -7,7 +7,7 @@ #include #include -#include +#include "openvino/opsets/opset1.hpp" #include "ov_ops/type_relaxed.hpp" #include "ov_models/subgraph_builders.hpp" #include "low_precision/network_helper.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/mat_mul_with_optimized_constant_fake_quantize.cpp b/src/tests/ov_helpers/ov_lpt_models/src/mat_mul_with_optimized_constant_fake_quantize.cpp index b9b3471a3ffa60..a5ce7b0790c5f3 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/mat_mul_with_optimized_constant_fake_quantize.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/mat_mul_with_optimized_constant_fake_quantize.cpp @@ -4,7 +4,7 @@ #include "ov_lpt_models/mat_mul_with_optimized_constant_fake_quantize.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include "ov_models/builders.hpp" namespace ngraph { diff --git a/src/tests/ov_helpers/ov_lpt_models/src/max_pool.cpp b/src/tests/ov_helpers/ov_lpt_models/src/max_pool.cpp index 4bc4dbd44e2281..919706a5b8f637 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/max_pool.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/max_pool.cpp @@ -4,7 +4,7 @@ #include "ov_lpt_models/max_pool.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include #include "low_precision/network_helper.hpp" #include "ov_models/subgraph_builders.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/move_dequantization_after.cpp b/src/tests/ov_helpers/ov_lpt_models/src/move_dequantization_after.cpp index 0548636f09bbc9..708475fe9e6a37 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/move_dequantization_after.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/move_dequantization_after.cpp @@ -5,7 +5,7 @@ #include "ov_lpt_models/move_dequantization_after.hpp" #include "low_precision/network_helper.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include "ov_models/subgraph_builders.hpp" #include "ov_lpt_models/common/builders.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/move_fake_quantize.cpp b/src/tests/ov_helpers/ov_lpt_models/src/move_fake_quantize.cpp index e76a42e551fa07..de07b0eed9fb57 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/move_fake_quantize.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/move_fake_quantize.cpp @@ -5,7 +5,7 @@ #include "ov_lpt_models/move_fake_quantize.hpp" #include -#include +#include "openvino/opsets/opset1.hpp" #include "ov_ops/type_relaxed.hpp" #include "low_precision/network_helper.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/multiply.cpp b/src/tests/ov_helpers/ov_lpt_models/src/multiply.cpp index b36629e0b66920..6dbdd101aef262 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/multiply.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/multiply.cpp @@ -6,7 +6,7 @@ #include -#include +#include "openvino/opsets/opset1.hpp" #include #include "ov_models/subgraph_builders.hpp" #include "low_precision/network_helper.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/multiply_partial_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/multiply_partial_function.cpp index af7f1718f263eb..1c2614f060216d 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/multiply_partial_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/multiply_partial_function.cpp @@ -6,7 +6,7 @@ #include -#include +#include "openvino/opsets/opset1.hpp" #include #include "ov_models/subgraph_builders.hpp" #include "low_precision/network_helper.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/multiply_with_one_parent.cpp b/src/tests/ov_helpers/ov_lpt_models/src/multiply_with_one_parent.cpp index da0b6957f31b82..ef3ef183c3da6c 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/multiply_with_one_parent.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/multiply_with_one_parent.cpp @@ -4,7 +4,7 @@ #include "ov_lpt_models/multiply_with_one_parent.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include "ov_models/builders.hpp" namespace ngraph { diff --git a/src/tests/ov_helpers/ov_lpt_models/src/mvn.cpp b/src/tests/ov_helpers/ov_lpt_models/src/mvn.cpp index 02be02a1384b18..d3082b0a4d6f18 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/mvn.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/mvn.cpp @@ -33,7 +33,7 @@ std::shared_ptr MVNFunction::getOriginal( std::make_shared(element::i64, Shape{reductionAxes.size()}, reductionAxes.to_vector()), normalizeVariance, 1e-9, - op::MVNEpsMode::INSIDE_SQRT); + ov::op::MVNEpsMode::INSIDE_SQRT); } mvn->set_friendly_name("output"); auto& rtInfo = mvn->get_rt_info(); @@ -78,7 +78,7 @@ std::shared_ptr MVNFunction::getReference( std::shared_ptr mvn; if (opset_version == 2) { mvn = std::make_shared>( - op::MVN(dequantizationOpBefore, reductionAxes, normalizeVariance), + ov::op::v0::MVN(dequantizationOpBefore, reductionAxes, normalizeVariance), dequantizationAfter.empty() ? precision : element::f32); } else if (opset_version == 6) { mvn = std::make_shared>( @@ -86,7 +86,7 @@ std::shared_ptr MVNFunction::getReference( std::make_shared(element::i64, Shape{reductionAxes.size()}, reductionAxes.to_vector()), normalizeVariance, 1e-9, - op::MVNEpsMode::INSIDE_SQRT), + ov::op::MVNEpsMode::INSIDE_SQRT), dequantizationAfter.empty() ? precision : element::f32); } auto& rtInfo = mvn->get_rt_info(); diff --git a/src/tests/ov_helpers/ov_lpt_models/src/normalize_l2.cpp b/src/tests/ov_helpers/ov_lpt_models/src/normalize_l2.cpp index 54b8e000d1be43..a3b94a49567152 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/normalize_l2.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/normalize_l2.cpp @@ -5,7 +5,7 @@ #include "ov_lpt_models/normalize_l2.hpp" #include -#include +#include "openvino/opsets/opset1.hpp" #include "ov_models/subgraph_builders.hpp" #include "ov_lpt_models/common/builders.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/precision_propagation.cpp b/src/tests/ov_helpers/ov_lpt_models/src/precision_propagation.cpp index ebd318d7bf8742..4ae654fe774cca 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/precision_propagation.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/precision_propagation.cpp @@ -4,7 +4,7 @@ #include "ov_lpt_models/precision_propagation.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include "ov_ops/type_relaxed.hpp" #include "low_precision/network_helper.hpp" #include "low_precision/rt_info/precision_preserved_attribute.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/precomp.hpp b/src/tests/ov_helpers/ov_lpt_models/src/precomp.hpp index de698a6d2f9588..c41df1a99372ba 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/precomp.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/precomp.hpp @@ -4,8 +4,8 @@ #pragma once -#include -#include +#include "openvino/openvino.hpp" +#include "openvino/op/ops.hpp" #include #include diff --git a/src/tests/ov_helpers/ov_lpt_models/src/prelu.cpp b/src/tests/ov_helpers/ov_lpt_models/src/prelu.cpp index b6e733714bdd06..876a810151fc39 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/prelu.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/prelu.cpp @@ -6,7 +6,7 @@ #include -#include +#include "openvino/opsets/opset1.hpp" #include "ov_ops/type_relaxed.hpp" #include "ov_models/subgraph_builders.hpp" #include "ov_lpt_models/common/builders.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/relu.cpp b/src/tests/ov_helpers/ov_lpt_models/src/relu.cpp index c5a2a241aac42c..c5328cc9abfcb4 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/relu.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/relu.cpp @@ -6,7 +6,7 @@ #include -#include +#include "openvino/opsets/opset1.hpp" #include "ov_ops/type_relaxed.hpp" #include "ov_lpt_models/common/builders.hpp" #include "low_precision/network_helper.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/reshape.cpp b/src/tests/ov_helpers/ov_lpt_models/src/reshape.cpp index 02cd23c503640e..ccab249a9e18a4 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/reshape.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/reshape.cpp @@ -4,7 +4,7 @@ #include "ov_lpt_models/reshape.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include "ov_lpt_models/common/builders.hpp" namespace ngraph { diff --git a/src/tests/ov_helpers/ov_lpt_models/src/round.cpp b/src/tests/ov_helpers/ov_lpt_models/src/round.cpp index 5eff5046f78e04..c8858744424a4c 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/round.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/round.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include +#include "openvino/opsets/opset1.hpp" #include "ov_lpt_models/round.hpp" #include "ov_lpt_models/common/builders.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/shuffle_channels.cpp b/src/tests/ov_helpers/ov_lpt_models/src/shuffle_channels.cpp index 48847c1f33a7b0..6ef40243193b2b 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/shuffle_channels.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/shuffle_channels.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include +#include "openvino/opsets/opset1.hpp" #include "low_precision/network_helper.hpp" #include "ov_lpt_models/common/builders.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/space_to_batch.cpp b/src/tests/ov_helpers/ov_lpt_models/src/space_to_batch.cpp index b6468f45249fde..ffe47dc3b75067 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/space_to_batch.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/space_to_batch.cpp @@ -4,7 +4,7 @@ #include "ov_lpt_models/space_to_batch.hpp" -#include +#include "openvino/opsets/opset2.hpp" #include "ov_lpt_models/common/builders.hpp" namespace ngraph { diff --git a/src/tests/ov_helpers/ov_lpt_models/src/squeeze.cpp b/src/tests/ov_helpers/ov_lpt_models/src/squeeze.cpp index ed4298207e3c25..6812c7463f9219 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/squeeze.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/squeeze.cpp @@ -63,7 +63,7 @@ std::shared_ptr SqueezeFunction::getReference( const std::shared_ptr dequantizationOpBefore = makeDequantization(input, dequantizationBefore); const auto squeeze = std::make_shared>( - op::Squeeze(dequantizationOpBefore, std::make_shared(element::i64, Shape{ axes.size() }, axes)), + ov::opset1::Squeeze(dequantizationOpBefore, std::make_shared(element::i64, Shape{ axes.size() }, axes)), precisionAfterOperation); const std::shared_ptr dequantizationOpAfter = makeDequantization(squeeze, dequantizationAfter); dequantizationOpAfter->set_friendly_name("output"); diff --git a/src/tests/ov_helpers/ov_lpt_models/src/subtract.cpp b/src/tests/ov_helpers/ov_lpt_models/src/subtract.cpp index 4b058335d620d9..f4eaf38b1ef0fa 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/subtract.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/subtract.cpp @@ -5,7 +5,7 @@ #include "ov_lpt_models/subtract.hpp" #include "low_precision/network_helper.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include "ov_lpt_models/common/builders.hpp" #include "ov_models/subgraph_builders.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/subtract_multiply_to_multiply_add.cpp b/src/tests/ov_helpers/ov_lpt_models/src/subtract_multiply_to_multiply_add.cpp index 99e49c247eda13..6cf23a0a875acb 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/subtract_multiply_to_multiply_add.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/subtract_multiply_to_multiply_add.cpp @@ -4,7 +4,7 @@ #include "ov_lpt_models/subtract_multiply_to_multiply_add.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include "ov_lpt_models/common/builders.hpp" using namespace ov::pass::low_precision; diff --git a/src/tests/ov_helpers/ov_lpt_models/src/transformations_after_split.cpp b/src/tests/ov_helpers/ov_lpt_models/src/transformations_after_split.cpp index 0895968b5b3ab5..43003eab6aa914 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/transformations_after_split.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/transformations_after_split.cpp @@ -6,7 +6,7 @@ #include -#include +#include "openvino/opsets/opset1.hpp" #include "ov_lpt_models/common/fake_quantize_on_data.hpp" #include "ov_lpt_models/common/dequantization_operations.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/transpose.cpp b/src/tests/ov_helpers/ov_lpt_models/src/transpose.cpp index aaf1093e6c52e9..7b67a00a71ccd7 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/transpose.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/transpose.cpp @@ -4,7 +4,7 @@ #include "ov_lpt_models/transpose.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include "ov_lpt_models/common/builders.hpp" namespace ngraph { diff --git a/src/tests/ov_helpers/ov_lpt_models/src/transpose_after_mat_mul.cpp b/src/tests/ov_helpers/ov_lpt_models/src/transpose_after_mat_mul.cpp index 5724e21251edf5..8abff7dc5eef9b 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/transpose_after_mat_mul.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/transpose_after_mat_mul.cpp @@ -5,7 +5,7 @@ #include "ov_lpt_models/transpose_after_mat_mul.hpp" #include "low_precision/network_helper.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include "ov_lpt_models/common/builders.hpp" #include "ov_models/subgraph_builders.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/unsqueeze.cpp b/src/tests/ov_helpers/ov_lpt_models/src/unsqueeze.cpp index 5341019edd5ffc..2e7f888fe3725b 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/unsqueeze.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/unsqueeze.cpp @@ -63,7 +63,7 @@ std::shared_ptr UnsqueezeFunction::getReference( const std::shared_ptr dequantizationOpBefore = makeDequantization(input, dequantizationBefore); const auto unsqueeze = std::make_shared>( - op::v0::Unsqueeze(dequantizationOpBefore, std::make_shared(element::i64, Shape{ axes.size() }, axes)), + ov::op::v0::Unsqueeze(dequantizationOpBefore, std::make_shared(element::i64, Shape{ axes.size() }, axes)), precisionAfterOperation); const std::shared_ptr dequantizationOpAfter = makeDequantization(unsqueeze, dequantizationAfter); dequantizationOpAfter->set_friendly_name("output"); diff --git a/src/tests/ov_helpers/ov_models/include/ov_models/builders.hpp b/src/tests/ov_helpers/ov_models/include/ov_models/builders.hpp index 6c3731e125c031..d6f7eb167a3fe7 100644 --- a/src/tests/ov_helpers/ov_models/include/ov_models/builders.hpp +++ b/src/tests/ov_helpers/ov_models/include/ov_models/builders.hpp @@ -5,20 +5,8 @@ #pragma once #include -#include - -// TODO: Temporary solution to fix compilation of plugin tests -#include -#include -#include -#include -#include -#include -#include -#include -#include #include -// TODO: Temporary solution to fix compilation of plugin tests +#include #include "common_test_utils/test_enums.hpp" #include "openvino/core/node.hpp" diff --git a/src/tests/ov_helpers/ov_models/include/ov_models/utils/ov_helpers.hpp b/src/tests/ov_helpers/ov_models/include/ov_models/utils/ov_helpers.hpp index 668c11afd258e4..d140301b2367ae 100644 --- a/src/tests/ov_helpers/ov_models/include/ov_models/utils/ov_helpers.hpp +++ b/src/tests/ov_helpers/ov_models/include/ov_models/utils/ov_helpers.hpp @@ -10,11 +10,11 @@ #endif #include -#include -#include #include #include "common_test_utils/test_enums.hpp" +#include "openvino/opsets/opset1.hpp" +#include "openvino/runtime/tensor.hpp" namespace ngraph { namespace helpers { diff --git a/src/tests/ov_helpers/ov_models/src/eltwise.cpp b/src/tests/ov_helpers/ov_models/src/eltwise.cpp index 17838f6a5d702a..223f9a9f46ca00 100644 --- a/src/tests/ov_helpers/ov_models/src/eltwise.cpp +++ b/src/tests/ov_helpers/ov_models/src/eltwise.cpp @@ -3,9 +3,9 @@ // #include -#include #include "common_test_utils/test_enums.hpp" +#include "openvino/opsets/opset13.hpp" #include "ov_models/utils/ov_helpers.hpp" namespace ngraph { diff --git a/src/tests/ov_helpers/ov_snippets_models/src/precision_propagation.cpp b/src/tests/ov_helpers/ov_snippets_models/src/precision_propagation.cpp index b842c56da5966e..be736a1c1fd66c 100644 --- a/src/tests/ov_helpers/ov_snippets_models/src/precision_propagation.cpp +++ b/src/tests/ov_helpers/ov_snippets_models/src/precision_propagation.cpp @@ -4,7 +4,7 @@ #include "precision_propagation.hpp" #include -#include +#include "openvino/opsets/opset1.hpp" namespace ov { namespace test { diff --git a/src/tests/ov_helpers/ov_snippets_models/src/precision_propagation_convertion.cpp b/src/tests/ov_helpers/ov_snippets_models/src/precision_propagation_convertion.cpp index e73194071cf4e6..1d3eb20a4791c3 100644 --- a/src/tests/ov_helpers/ov_snippets_models/src/precision_propagation_convertion.cpp +++ b/src/tests/ov_helpers/ov_snippets_models/src/precision_propagation_convertion.cpp @@ -4,7 +4,7 @@ #include "precision_propagation_convertion.hpp" #include -#include +#include "openvino/opsets/opset1.hpp" namespace ov { namespace test { diff --git a/src/tests/ov_helpers/ov_snippets_models/src/precomp.hpp b/src/tests/ov_helpers/ov_snippets_models/src/precomp.hpp index 6e9e85edfaf0f2..4e526c519954a2 100644 --- a/src/tests/ov_helpers/ov_snippets_models/src/precomp.hpp +++ b/src/tests/ov_helpers/ov_snippets_models/src/precomp.hpp @@ -4,8 +4,8 @@ #pragma once -#include -#include +#include "openvino/openvino.hpp" +#include "openvino/op/ops.hpp" #include #include diff --git a/src/tests/ov_helpers/ov_snippets_models/src/subgraph_roll_matmul_roll.cpp b/src/tests/ov_helpers/ov_snippets_models/src/subgraph_roll_matmul_roll.cpp index 6953fdb5adc25a..89562cb6c14457 100644 --- a/src/tests/ov_helpers/ov_snippets_models/src/subgraph_roll_matmul_roll.cpp +++ b/src/tests/ov_helpers/ov_snippets_models/src/subgraph_roll_matmul_roll.cpp @@ -4,7 +4,7 @@ #include "subgraph_roll_matmul_roll.hpp" #include -#include +#include "openvino/opsets/opset1.hpp" namespace ov { namespace test { diff --git a/src/tests/ov_helpers/ov_snippets_models/src/two_binary_ops.cpp b/src/tests/ov_helpers/ov_snippets_models/src/two_binary_ops.cpp index 3cf17045bc1b50..942a4a5e3b1dc8 100644 --- a/src/tests/ov_helpers/ov_snippets_models/src/two_binary_ops.cpp +++ b/src/tests/ov_helpers/ov_snippets_models/src/two_binary_ops.cpp @@ -3,7 +3,7 @@ // #include "two_binary_ops.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include "snippets/op/convert_saturation.hpp" namespace ov {