From a5365b94de6925e3835ed6126dacdceea80195ca Mon Sep 17 00:00:00 2001 From: Gorbachev Date: Tue, 7 Mar 2023 17:59:35 +0000 Subject: [PATCH 01/12] fix 1 --- src/plugins/intel_gpu/CMakeLists.txt | 2 +- src/plugins/intel_gpu/src/graph/broadcast.cpp | 4 ++-- .../graph/graph_optimizer/pre_replace_deconv.cpp | 2 +- .../graph_optimizer/prepare_primitive_fusing.cpp | 2 +- .../intel_gpu/src/graph/layout_optimizer.cpp | 2 +- src/plugins/intel_gpu/src/graph/network.cpp | 2 +- src/plugins/intel_gpu/src/graph/program_node.cpp | 2 +- .../src/kernel_selector/kernel_selector_params.h | 4 ++-- .../src/kernel_selector/kernel_selector_utils.cpp | 2 +- .../convert_color/convert_color_kernel_base.cpp | 2 +- .../kernels/ctc_loss/ctc_loss_kernel_ref.cpp | 2 +- .../ed_rfe/roi_feature_extractor_kernel_ref.cpp | 2 +- .../kernels/gather/gather_kernel_ref.cpp | 2 +- .../kernels/matrix_nms/matrix_nms_kernel_ref.cpp | 8 ++++---- .../multiclass_nms/multiclass_nms_kernel_ref.cpp | 2 +- .../kernels/resample/resample_kernel_opt.cpp | 2 +- .../scatter_nd_update_kernel_ref.cpp | 2 +- .../intel_gpu/src/plugin/ops/convolution.cpp | 2 +- .../src/plugin/ops/ctc_greedy_decoder.cpp | 2 +- src/plugins/intel_gpu/src/plugin/ops/custom.cpp | 2 +- src/plugins/intel_gpu/src/plugin/ops/dft.cpp | 2 +- src/plugins/intel_gpu/src/plugin/ops/gather_nd.cpp | 5 +++-- src/plugins/intel_gpu/src/plugin/ops/one_hot.cpp | 2 +- src/plugins/intel_gpu/src/plugin/ops/proposal.cpp | 2 +- .../intel_gpu/src/plugin/ops/region_yolo.cpp | 8 ++++---- .../intel_gpu/src/plugin/ops/reorg_yolo.cpp | 2 +- .../intel_gpu/src/plugin/ops/reverse_sequence.cpp | 4 ++-- src/plugins/intel_gpu/src/plugin/ops/rnn.cpp | 14 +++++++------- .../intel_gpu/src/plugin/ops/roi_pooling.cpp | 4 ++-- src/plugins/intel_gpu/src/plugin/ops/roll.cpp | 2 +- src/plugins/intel_gpu/src/plugin/ops/split.cpp | 4 ++-- .../intel_gpu/src/plugin/ops/strided_slice.cpp | 8 +++++--- src/plugins/intel_gpu/src/plugin/ops/topk.cpp | 2 +- src/plugins/intel_gpu/src/plugin/program.cpp | 4 ++-- src/plugins/intel_gpu/src/runtime/layout.cpp | 2 +- 35 files changed, 59 insertions(+), 56 deletions(-) diff --git a/src/plugins/intel_gpu/CMakeLists.txt b/src/plugins/intel_gpu/CMakeLists.txt index a76e015346b82a..d4420d36c5602b 100644 --- a/src/plugins/intel_gpu/CMakeLists.txt +++ b/src/plugins/intel_gpu/CMakeLists.txt @@ -15,7 +15,7 @@ endif() if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") # 4267 4244 conversion from 'XXX' to 'YYY', possible loss of data ie_add_compiler_flags(/wd4244) - ie_add_compiler_flags(/wd4267) + # ie_add_compiler_flags(/wd4267) # '<': signed/unsigned mismatch ie_add_compiler_flags(/wd4018) endif() diff --git a/src/plugins/intel_gpu/src/graph/broadcast.cpp b/src/plugins/intel_gpu/src/graph/broadcast.cpp index 02a57e35b93bce..0b74c0020dd8c1 100644 --- a/src/plugins/intel_gpu/src/graph/broadcast.cpp +++ b/src/plugins/intel_gpu/src/graph/broadcast.cpp @@ -84,9 +84,9 @@ std::vector broadcast_inst::calc_output_layouts(broadcast_node const& /* ov::op::v3::shape_infer(&op, input_shapes, output_shapes, const_data); } else if (impl_param.input_layouts.size() >= 2) { auto input1 = impl_param.get_input_layout(1); - int output_rank = input1.get().size(); + int output_rank = static_cast(input1.get().size()); if (input1.is_static()) { - output_rank = input1.get_dim(0); // target shape rank is set as second input. + output_rank = static_cast(input1.get_dim(0)); // target shape rank is set as second input. } output_shapes[0] = ShapeType::dynamic(std::max(output_rank, static_cast(1))); } diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/pre_replace_deconv.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/pre_replace_deconv.cpp index 40229fe358cd6d..03c660751f108a 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/pre_replace_deconv.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/pre_replace_deconv.cpp @@ -225,7 +225,7 @@ void pre_replace_deconv::run(program& p) { p.rename(deconv_node, rename_id); // reshape weights - int pixel_shuffle_size = scale_factor * scale_factor; + auto pixel_shuffle_size = scale_factor * scale_factor; int kernel_size = 5; tensor target_weights_size = { pixel_shuffle_size, filter_layout.feature(), kernel_size, kernel_size }; auto target_weights_layout = layout{ weights_layout.data_type, weights_layout.format, target_weights_size }; diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_primitive_fusing.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_primitive_fusing.cpp index dea6b9c7cf55d8..938b599e85ca5d 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_primitive_fusing.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_primitive_fusing.cpp @@ -275,7 +275,7 @@ void prepare_primitive_fusing::fuse_bias(program &p) { for (size_t i = 0; i < const_shape.size(); ++i) { if (const_shape[i] != 1) { count_elements_not_one++; - idx_element_not_one = i; + idx_element_not_one = static_cast(i); } if (count_elements_not_one > 1) break; diff --git a/src/plugins/intel_gpu/src/graph/layout_optimizer.cpp b/src/plugins/intel_gpu/src/graph/layout_optimizer.cpp index f49827fb0eb5bd..ae0ed7f122359c 100644 --- a/src/plugins/intel_gpu/src/graph/layout_optimizer.cpp +++ b/src/plugins/intel_gpu/src/graph/layout_optimizer.cpp @@ -827,7 +827,7 @@ static bool is_node_for_onednn(fully_connected_node const& node) { auto fc_prim = node.get_primitive(); auto ps = node.get_output_layout().get_partial_shape(); int non_spatial_count = 2 + (fc_prim->input_size == 3 ? 1 : 0); - int rank = ps.size(); + int rank = static_cast(ps.size()); // OneDnn doesn't support spatial dimensions for output for (int i = non_spatial_count; i < rank; i++) { diff --git a/src/plugins/intel_gpu/src/graph/network.cpp b/src/plugins/intel_gpu/src/graph/network.cpp index 40217538874682..13c80f330e307b 100644 --- a/src/plugins/intel_gpu/src/graph/network.cpp +++ b/src/plugins/intel_gpu/src/graph/network.cpp @@ -541,7 +541,7 @@ void network::save(cldnn::BinaryOutputBuffer& ob) { } int exec_order_size; - exec_order_size = _exec_order.size(); + exec_order_size = static_cast(_exec_order.size()); ob << exec_order_size; for (const auto& p_inst : _exec_order) { diff --git a/src/plugins/intel_gpu/src/graph/program_node.cpp b/src/plugins/intel_gpu/src/graph/program_node.cpp index 140562e8d9354d..24dc4b5aa3cc3d 100644 --- a/src/plugins/intel_gpu/src/graph/program_node.cpp +++ b/src/plugins/intel_gpu/src/graph/program_node.cpp @@ -927,7 +927,7 @@ void program_node::init_onednn_primitive_attributes() { if (fused_desc->activation_function == cldnn::activation_func::relu_negative_slope && !fused_desc->additional_params_input.empty()) { auto dep_idx = cldnn_post_ops[idx].dep_start_idx; - int oc_dim = desc.output_layout.get_tensor().feature.size(); + int oc_dim = static_cast(desc.output_layout.get_tensor().feature.size()); post_ops.append_prelu(1 << oc_dim); update_onednn_post_op_list(onednn_post_op_type::binary_relu, dep_idx); } else if (fused_desc->activation_function == cldnn::activation_func::hard_sigmoid) { diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernel_selector_params.h b/src/plugins/intel_gpu/src/kernel_selector/kernel_selector_params.h index 207999c2c47b4f..40cd4165f0c4a0 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernel_selector_params.h +++ b/src/plugins/intel_gpu/src/kernel_selector/kernel_selector_params.h @@ -522,13 +522,13 @@ struct FusedOpsConfiguration { FusedOpsConfiguration& SetShuffleVarName(std::string val) { shuffle_var_name = val; return *this; } bool IsPostReorderFused(void) const { return orig_output_layout != DataLayout::DataLayoutCount; } int GetDimIndexFromOrder(Tensor::DataChannelName val) const { - int dims_num = bfzyx_idx_order.size(); + size_t dims_num = bfzyx_idx_order.size(); if (val == Tensor::DataChannelName::BATCH && dims_num >= 1) { return 0; } else if (val == Tensor::DataChannelName::FEATURE && dims_num >= 2) { return 1; } else if (dims_num >= 3 && dims_num - static_cast(val) - 1 >= 0) { - return bfzyx_idx_order.size() - static_cast(val) - 1; + return static_cast(bfzyx_idx_order.size()) - static_cast(val) - 1; } else { return -1; } diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernel_selector_utils.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernel_selector_utils.cpp index 5b5171df9e559b..cab26c337b49f9 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernel_selector_utils.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernel_selector_utils.cpp @@ -240,7 +240,7 @@ std::vector GetOptimalLocalWorkGroupSizes(std::vector gws, const if (axis_by_gws[layout_order[axis_idx]] != unused_axis) { bool is_already_exists = false; if (axis_idx > 0) { - for (int i = axis_idx - 1; i >= 0; i--) { + for (int i = static_cast(axis_idx) - 1; i >= 0; i--) { if (axis_by_gws[layout_order[axis_idx]] == axis_by_gws[layout_order[i]]) { is_already_exists = true; break; diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/convert_color/convert_color_kernel_base.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/convert_color/convert_color_kernel_base.cpp index a14ae64e8aeda5..a38c677b24cca2 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/convert_color/convert_color_kernel_base.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/convert_color/convert_color_kernel_base.cpp @@ -88,7 +88,7 @@ KernelsData ConvertColorKernelBase::GetCommonKernelsData(const Params& params, c auto jit = CreateJit(kernelName, cldnn_jit, entry_point); auto& kernel = kd.kernels[0]; - size_t number_of_inputs = prim_params.inputs.size(); + uint32_t number_of_inputs = static_cast(prim_params.inputs.size()); FillCLKernelData(kernel, dispatchData, params.engineInfo, kernelName, jit, entry_point, "", false, false, number_of_inputs); diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/ctc_loss/ctc_loss_kernel_ref.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/ctc_loss/ctc_loss_kernel_ref.cpp index 4d315b3c987f45..a4e3be0477b995 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/ctc_loss/ctc_loss_kernel_ref.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/ctc_loss/ctc_loss_kernel_ref.cpp @@ -44,7 +44,7 @@ KernelsData CTCLossKernelRef::GetKernelsData(const Params& params, const optiona {}, false, false, - kernel_params.inputs.size()); + static_cast(kernel_params.inputs.size())); return {kernel_data}; } diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/ed_rfe/roi_feature_extractor_kernel_ref.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/ed_rfe/roi_feature_extractor_kernel_ref.cpp index 762f2876732d3c..f84bb64fe18e10 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/ed_rfe/roi_feature_extractor_kernel_ref.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/ed_rfe/roi_feature_extractor_kernel_ref.cpp @@ -111,7 +111,7 @@ KernelsData ExperimentalDetectronROIFeatureExtractorRef::GetKernelsData(const Pa auto jit = CreateJit(kernelName, cldnn_jit, entry_point); auto& kernel = kd.kernels[0]; - FillCLKernelData(kernel, dispatch_data, params.engineInfo, kernelName, jit, entry_point, "", false, false, org_params.number_of_inputs); + FillCLKernelData(kernel, dispatch_data, params.engineInfo, kernelName, jit, entry_point, "", false, false, static_cast(org_params.number_of_inputs)); return {kd}; } diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/gather/gather_kernel_ref.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/gather/gather_kernel_ref.cpp index 4e15c2d8fefb00..25d3517a4ac838 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/gather/gather_kernel_ref.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/gather/gather_kernel_ref.cpp @@ -180,7 +180,7 @@ CommonDispatchData GatherKernelRef::SetDefault(const gather_params& params) cons auto out_layout = params.outputs[0].GetLayout(); std::vector> dims_by_gws; - int rank = params.outputs[0].Dimentions(); + auto rank = params.outputs[0].Dimentions(); if (rank == 4) { dispatchData.gws = {output.X().v, output.Y().v, output.Feature().v * output.Batch().v}; dims_by_gws = {{Tensor::DataChannelName::X}, diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/matrix_nms/matrix_nms_kernel_ref.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/matrix_nms/matrix_nms_kernel_ref.cpp index b73e4288413d99..dc4cdcaee3506c 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/matrix_nms/matrix_nms_kernel_ref.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/matrix_nms/matrix_nms_kernel_ref.cpp @@ -49,8 +49,8 @@ MatrixNmsKernelRef::DispatchData SetDefault(const matrix_nms_params& params, siz } std::tuple GetMaxBoxes(const matrix_nms_params& params) { - const int classes_num = params.inputs[1].Feature().v; - const int boxes_num = params.inputs[0].Feature().v; + const int classes_num = static_cast(params.inputs[1].Feature().v); + const int boxes_num = static_cast(params.inputs[0].Feature().v); int max_boxes_per_class{boxes_num}; if (params.nms_top_k >= 0) @@ -79,8 +79,8 @@ KernelsData MatrixNmsKernelRef::GetKernelsData(const Params& params, const optio constexpr size_t BOX_INFO_SIZE{16}; - const int batches_num = new_params.inputs[1].Batch().v; - const int classes_num = new_params.inputs[1].Feature().v; + const int batches_num = static_cast(new_params.inputs[1].Batch().v); + const int classes_num = static_cast(new_params.inputs[1].Feature().v); int max_boxes_per_class, max_boxes_per_batch; std::tie(max_boxes_per_class, max_boxes_per_batch) = GetMaxBoxes(new_params); diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/multiclass_nms/multiclass_nms_kernel_ref.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/multiclass_nms/multiclass_nms_kernel_ref.cpp index ed356cdc1ce9fe..f44d9b52a71258 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/multiclass_nms/multiclass_nms_kernel_ref.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/multiclass_nms/multiclass_nms_kernel_ref.cpp @@ -87,7 +87,7 @@ JitConstants MulticlassNmsKernelRef::GetJitConstants(const multiclass_nms_params int64_t max_output_boxes_per_class = 0; if (params.nms_top_k >= 0) { - max_output_boxes_per_class = std::min(num_boxes, params.nms_top_k); + max_output_boxes_per_class = std::min(static_cast(num_boxes), params.nms_top_k); } else { max_output_boxes_per_class = num_boxes; } diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/resample/resample_kernel_opt.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/resample/resample_kernel_opt.cpp index 01e8f7758a2c5d..1633c62e1722e5 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/resample/resample_kernel_opt.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/resample/resample_kernel_opt.cpp @@ -90,7 +90,7 @@ static size_t get_vec_size(const resample_params ¶ms) { } static int get_feature_slice_size(const resample_params ¶ms) { - return 16 * get_vec_size(params); + return static_cast(16 * get_vec_size(params)); } ResampleKernelBase::DispatchData ResampleKernelOpt::SetDefault(const kernel_selector::resample_params &arg) const { diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/scatter_update/scatter_nd_update_kernel_ref.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/scatter_update/scatter_nd_update_kernel_ref.cpp index d11630c978eb85..9ae4663b309a8e 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/scatter_update/scatter_nd_update_kernel_ref.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/scatter_update/scatter_nd_update_kernel_ref.cpp @@ -130,7 +130,7 @@ static std::string GetInputBlockND(const scatter_nd_update_params& params, size_ block_nd_s[rank] = "1"; size_t input_offset = num * 6; - for (int32_t idx = (rank - 1); idx >= 0; --idx) { + for (int32_t idx = static_cast(rank - 1); idx >= 0; --idx) { block_nd[idx] = input_dims[idx] * block_nd[idx + 1]; size_t dim_offset = idx < 2 ? idx : idx + 6 - rank; diff --git a/src/plugins/intel_gpu/src/plugin/ops/convolution.cpp b/src/plugins/intel_gpu/src/plugin/ops/convolution.cpp index 685371c2648e8a..0d1f8443eb02ad 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/convolution.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/convolution.cpp @@ -221,7 +221,7 @@ static void CreateGroupConvolutionBackpropDataOp(Program& p, const std::shared_p } } - uint32_t groups = op->get_input_shape(1)[0]; + uint32_t groups = static_cast(op->get_input_shape(1)[0]); auto weightsName = inputs[1]; auto weights_node = op->get_input_node_shared_ptr(1); diff --git a/src/plugins/intel_gpu/src/plugin/ops/ctc_greedy_decoder.cpp b/src/plugins/intel_gpu/src/plugin/ops/ctc_greedy_decoder.cpp index 65337876efedc7..2b4d0c13415b22 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/ctc_greedy_decoder.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/ctc_greedy_decoder.cpp @@ -45,7 +45,7 @@ static void CreateCommonCTCGreedyDecoderOp(Program& p, const std::shared_ptrget_input_shape(0).back() - 1; + uint32_t blank_index = static_cast(op->get_input_shape(0).back() - 1); if (reordered_inputs.size() == 3) { auto blank_index_node = std::dynamic_pointer_cast(op->get_input_node_shared_ptr(2)); if (!blank_index_node) { diff --git a/src/plugins/intel_gpu/src/plugin/ops/custom.cpp b/src/plugins/intel_gpu/src/plugin/ops/custom.cpp index 895dc9f5f550d6..cde2faf1e0ded6 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/custom.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/custom.cpp @@ -199,7 +199,7 @@ void CreateCustomOp(Program& p, const std::shared_ptr& op, CustomL IE_THROW() << "Invalid input tensor for index: " << iidx; auto inputDims = op->get_input_shape(iidx); - xDim = inputDims[inputDims.size() - 1]; + xDim = static_cast(inputDims[inputDims.size() - 1]); yDim = dims.size() > 1 ? inputDims[inputDims.size() - 2] : 0; featureDim = dims.size() > 2 ? inputDims[inputDims.size() - 3] : 0; batchDim = dims.size() > 3 ? inputDims[inputDims.size() - 4]: 0; diff --git a/src/plugins/intel_gpu/src/plugin/ops/dft.cpp b/src/plugins/intel_gpu/src/plugin/ops/dft.cpp index a785e7543a3353..42e079e8149335 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/dft.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/dft.cpp @@ -29,7 +29,7 @@ void createDft(Program& p, IE_THROW() << "Unsupported parameter nodes type in " << friendly_name << " (" << op->get_type_name() << ")"; } auto axes = axes_constant->cast_vector(); - uint8_t axis_correction = op->get_input_shape(0).size(); + uint8_t axis_correction = static_cast(op->get_input_shape(0).size()); if (direction != cldnn::dft_direction::forward || mode != cldnn::dft_mode::real) { --axis_correction; } diff --git a/src/plugins/intel_gpu/src/plugin/ops/gather_nd.cpp b/src/plugins/intel_gpu/src/plugin/ops/gather_nd.cpp index 7e3e38e9a074dd..04d2d7dead7bc9 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/gather_nd.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/gather_nd.cpp @@ -21,7 +21,8 @@ static void CreateGatherNDOp(Program& p, const std::shared_ptrget_input_partial_shape(0).size(); auto indices_rank = op->get_input_partial_shape(1).size(); - auto batch_dims = op->get_batch_dims(); + auto batch_dims = static_cast(op->get_batch_dims()); + auto batch_merged_output = true; auto primitive = cldnn::gather_nd(layerName, inputs[0], @@ -29,7 +30,7 @@ static void CreateGatherNDOp(Program& p, const std::shared_ptr= 0; i--) { + for (int i = static_cast(dims.size() - 1); i >= 0; i--) { if (dims[i] == 1) axis--; else diff --git a/src/plugins/intel_gpu/src/plugin/ops/proposal.cpp b/src/plugins/intel_gpu/src/plugin/ops/proposal.cpp index 414df4c77d3db7..c5083f7c6f984b 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/proposal.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/proposal.cpp @@ -20,7 +20,7 @@ static void CreateProposalOp(Program& p, const std::shared_ptrget_attrs(); float nms_thresh = attrs.nms_thresh; - int min_size = attrs.min_size; + int min_size = static_cast(attrs.min_size); int feature_stride = attrs.feat_stride; int pre_nms_topn = attrs.pre_nms_topn; int post_nms_topn = attrs.post_nms_topn; diff --git a/src/plugins/intel_gpu/src/plugin/ops/region_yolo.cpp b/src/plugins/intel_gpu/src/plugin/ops/region_yolo.cpp index c61f2e8c4274de..58ce0285aa3490 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/region_yolo.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/region_yolo.cpp @@ -17,11 +17,11 @@ static void CreateRegionYoloOp(Program& p, const std::shared_ptrget_num_coords(); - uint32_t classes = op->get_num_classes(); - uint32_t num = op->get_num_regions(); + uint32_t coords = static_cast(op->get_num_coords()); + uint32_t classes = static_cast(op->get_num_classes()); + uint32_t num = static_cast(op->get_num_regions()); bool do_softmax = op->get_do_softmax(); - uint32_t mask_size = op->get_mask().size(); + uint32_t mask_size = static_cast(op->get_mask().size()); auto regionPrim = cldnn::region_yolo(layerName, inputs[0], diff --git a/src/plugins/intel_gpu/src/plugin/ops/reorg_yolo.cpp b/src/plugins/intel_gpu/src/plugin/ops/reorg_yolo.cpp index e182dfc122f7e2..9982fa08a9fa3f 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/reorg_yolo.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/reorg_yolo.cpp @@ -17,7 +17,7 @@ static void CreateReorgYoloOp(Program& p, const std::shared_ptrget_strides()[0]; + uint32_t stride = static_cast(op->get_strides()[0]); auto reorgPrim = cldnn::reorg_yolo(layerName, inputs[0], diff --git a/src/plugins/intel_gpu/src/plugin/ops/reverse_sequence.cpp b/src/plugins/intel_gpu/src/plugin/ops/reverse_sequence.cpp index 5a6f28667c3c68..1953a1c47d4f27 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/reverse_sequence.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/reverse_sequence.cpp @@ -17,8 +17,8 @@ static void CreateReverseSequenceOp(Program& p, const std::shared_ptrget_batch_axis(); - size_t seq_axis = op->get_sequence_axis(); + auto batch_axis = static_cast(op->get_batch_axis()); + auto seq_axis = static_cast(op->get_sequence_axis()); auto reverseSequencePrim = cldnn::reverse_sequence(layerName, inputs[0], inputs[1], diff --git a/src/plugins/intel_gpu/src/plugin/ops/rnn.cpp b/src/plugins/intel_gpu/src/plugin/ops/rnn.cpp index 398f0b52e73bfd..bfcbe84da82b59 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/rnn.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/rnn.cpp @@ -83,9 +83,9 @@ static void CreateLSTMCellOp(Program& p, const std::shared_ptrget_input_shape(2).size() != 2) IE_THROW() << "Wrong input shapes for LSTMCell op " << op->get_friendly_name(); - lstm_input_size = in_dims0.back(); - lstm_batch_size = in_dims0.at(in_dims0.size()-2); - lstm_hidden_size = out_dims0.back(); + lstm_input_size = static_cast(in_dims0.back()); + lstm_batch_size = static_cast(in_dims0.at(in_dims0.size()-2)); + lstm_hidden_size = static_cast(out_dims0.back()); } std::vector activations; @@ -179,10 +179,10 @@ static void CreateLSTMSequenceOp(Program& p, const std::shared_ptrget_input_shape(2).size() != 3) IE_THROW() << "Wrong input shapes for LSTMSequence op " << op->get_friendly_name(); - lstm_input_size = in_dims0.back(); - lstm_sequence_len = in_dims0.at(in_dims0.size() - 2); - lstm_batch_size = in_dims0.at(in_dims0.size() - 3); - lstm_hidden_size = out_dims0.back(); + lstm_input_size = static_cast(in_dims0.back()); + lstm_sequence_len = static_cast(in_dims0.at(in_dims0.size() - 2)); + lstm_batch_size = static_cast(in_dims0.at(in_dims0.size() - 3)); + lstm_hidden_size = static_cast(out_dims0.back()); } std::vector activations; diff --git a/src/plugins/intel_gpu/src/plugin/ops/roi_pooling.cpp b/src/plugins/intel_gpu/src/plugin/ops/roi_pooling.cpp index 9a8a017f9041c7..9f55e3f41b3f78 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/roi_pooling.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/roi_pooling.cpp @@ -68,8 +68,8 @@ static void CreatePSROIPoolingOp(Program& p, const std::shared_ptrget_mode()); - int group_size = op->get_group_size(); - int output_dim = op->get_output_dim(); + int group_size = static_cast(op->get_group_size()); + int output_dim = static_cast(op->get_output_dim()); float spatial_scale = op->get_spatial_scale(); int spatial_bins_x = op->get_spatial_bins_x(); int spatial_bins_y = op->get_spatial_bins_y(); diff --git a/src/plugins/intel_gpu/src/plugin/ops/roll.cpp b/src/plugins/intel_gpu/src/plugin/ops/roll.cpp index 57b838e5e7363f..76f628b8774f24 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/roll.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/roll.cpp @@ -23,7 +23,7 @@ void CreateRollOp(Program& p, const std::shared_ptr& op) { const auto& input_pshape = op->get_input_partial_shape(0); OPENVINO_ASSERT(input_pshape.is_static(), "Dynamic shapes are not supported for Roll operation yet"); const auto& input_shape = input_pshape.to_shape(); - const uint8_t rank = input_shape.size(); + const auto rank = input_shape.size(); const auto format = cldnn::format::get_default_format(rank); const auto default_rank = format.dimension(); diff --git a/src/plugins/intel_gpu/src/plugin/ops/split.cpp b/src/plugins/intel_gpu/src/plugin/ops/split.cpp index 626b174c51a8b2..f6f493e4598dfa 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/split.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/split.cpp @@ -21,14 +21,14 @@ static void CreateCommonSplitOp(Program& p, const std::shared_ptr& auto inputs = p.GetInputInfo(op); if (p.use_new_shape_infer() || op->is_dynamic()) { cldnn::crop_ngraph_op_mode op_mode = cldnn::crop_ngraph_op_mode::variadic_split; - size_t num_splits = 1; + auto num_splits = static_cast(1); if (ngraph::is_type(op)) { num_splits = ngraph::as_type_ptr(op)->get_num_splits(); op_mode = cldnn::crop_ngraph_op_mode::split; } for (size_t i = 0; i < op->get_output_size(); i++) { - auto cropPrim = cldnn::crop(get_layer_name(i), inputs, cldnn::tensor(1), cldnn::tensor(0), op_mode, i, num_splits); + auto cropPrim = cldnn::crop(get_layer_name(i), inputs, cldnn::tensor(1), cldnn::tensor(0), op_mode, static_cast(i), num_splits); p.add_primitive(*op, cropPrim); } } else { diff --git a/src/plugins/intel_gpu/src/plugin/ops/strided_slice.cpp b/src/plugins/intel_gpu/src/plugin/ops/strided_slice.cpp index 360fa7819a5a88..1bdcf90c33d8c7 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/strided_slice.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/strided_slice.cpp @@ -91,9 +91,11 @@ static void CreateStridedSliceOp(Program& p, const std::shared_ptr(begin.size() - axis - num_new_axis_after_ellipses - 1); + unsigned long num_of_hidden_dims = + static_cast(input_shape.size() - num_input_axis_after_ellipses + - num_input_axis_before_ellipses); for (size_t i = 0; i < num_of_hidden_dims; ++i) { axes.emplace_back(uniq_id); uniq_id++; diff --git a/src/plugins/intel_gpu/src/plugin/ops/topk.cpp b/src/plugins/intel_gpu/src/plugin/ops/topk.cpp index 13870b5f52aed8..8830a54915cc7e 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/topk.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/topk.cpp @@ -22,7 +22,7 @@ static void CreateTopKOp(Program& p, const std::shared_ptr ov::op::TopKMode mode = op->get_mode(); ov::op::TopKSortType stype = op->get_sort_type(); - uint32_t top_k = op->get_k(); + uint32_t top_k = static_cast(op->get_k()); uint64_t chosen_axis = op->get_axis(); if (p.use_new_shape_infer()) { diff --git a/src/plugins/intel_gpu/src/plugin/program.cpp b/src/plugins/intel_gpu/src/plugin/program.cpp index 8c078256de5563..1065178bcd491c 100644 --- a/src/plugins/intel_gpu/src/plugin/program.cpp +++ b/src/plugins/intel_gpu/src/plugin/program.cpp @@ -186,7 +186,7 @@ Program::Program(InferenceEngine::CNNNetwork& network, cldnn::engine& engine, co } int m_bv_sz = GetMaxBatchSizeForSingleProgram(); - m_max_batch = m_config.get_property(ov::intel_gpu::max_dynamic_batch); + m_max_batch = static_cast(m_config.get_property(ov::intel_gpu::max_dynamic_batch)); if (dyn_shape_batch_found || m_max_batch > 1) { // compile log2 networks to serve dynamic batch requests @@ -307,7 +307,7 @@ int Program::GetMaxBatchSizeForSingleProgram() { auto max_dynamic_batch = m_config.get_property(ov::intel_gpu::max_dynamic_batch); if (max_dynamic_batch > 1) { // calculate number of networks necessary based on binary log - unsigned int tmp = max_dynamic_batch; + unsigned int tmp = static_cast(max_dynamic_batch); unsigned int mask = 1U << 31; unsigned int ldigit = 31; diff --git a/src/plugins/intel_gpu/src/runtime/layout.cpp b/src/plugins/intel_gpu/src/runtime/layout.cpp index 544dfea131cb61..fad8eb1bb27151 100644 --- a/src/plugins/intel_gpu/src/runtime/layout.cpp +++ b/src/plugins/intel_gpu/src/runtime/layout.cpp @@ -614,7 +614,7 @@ ov::PartialShape layout::transform(cldnn::format new_fmt) const { } auto new_dims = convert_dimensions(new_sizes, default_fmt.internal_order(), new_fmt.order()); - for (int idx = (new_dims.size() - 1); idx >= 0; idx--) { + for (int idx = static_cast(new_dims.size() - 1); idx >= 0; idx--) { if (new_dims[idx] == -1) new_dims.erase((new_dims.begin() + idx)); else if (new_dims[idx] < 0) From 84d62dd21bcb72e1ebc798f6ea8f559696c0b10a Mon Sep 17 00:00:00 2001 From: Gorbachev Date: Tue, 7 Mar 2023 18:40:40 +0000 Subject: [PATCH 02/12] fix 2-10 --- .../include/intel_gpu/plugin/graph.hpp | 2 +- .../graph_optimizer/pre_replace_deconv.cpp | 2 +- .../graph/graph_optimizer/prepare_padding.cpp | 14 +++++++------- .../impls/onednn/concatenation_onednn.cpp | 4 ++-- .../src/graph/include/sliding_window_utils.hpp | 18 +++++++++--------- .../intel_gpu/src/graph/primitive_inst.cpp | 4 ++-- .../intel_gpu/src/plugin/infer_request.cpp | 2 +- .../src/plugin/infer_request_legacy.cpp | 8 ++++---- .../intel_gpu/src/plugin/ops/constant.cpp | 2 +- .../intel_gpu/src/plugin/ops/custom.cpp | 6 +++--- .../src/plugin/ops/detection_output.cpp | 4 ++-- .../intel_gpu/src/plugin/ops/gather_nd.cpp | 9 +++------ .../intel_gpu/src/plugin/ops/proposal.cpp | 8 ++++---- .../intel_gpu/src/plugin/ops/roi_pooling.cpp | 4 ++-- .../intel_gpu/src/plugin/ops/strided_slice.cpp | 2 +- src/plugins/intel_gpu/src/plugin/program.cpp | 4 ++-- .../src/plugin/transformations_pipeline.cpp | 6 +++--- 17 files changed, 48 insertions(+), 51 deletions(-) diff --git a/src/plugins/intel_gpu/include/intel_gpu/plugin/graph.hpp b/src/plugins/intel_gpu/include/intel_gpu/plugin/graph.hpp index 8e831a43d8b692..ec3556e684fd8c 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/plugin/graph.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/plugin/graph.hpp @@ -59,7 +59,7 @@ class Graph { cldnn::engine& get_engine() const { return m_context->get_engine(); } const ExecutionConfig& get_config() const { return m_config; } - int GetMaxDynamicBatchSize() const { return m_config.get_property(ov::intel_gpu::max_dynamic_batch); } + int GetMaxDynamicBatchSize() const { return static_cast(m_config.get_property(ov::intel_gpu::max_dynamic_batch));} const std::map& GetInputLayouts() const { return m_program->GetInputLayouts(); } const InferenceEngine::InputsDataMap GetNetworkInputs() const { return m_program->GetNetworkInputs(); } const InferenceEngine::OutputsDataMap GetNetworkOutputs() const { return m_program->GetNetworkOutputs(); } diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/pre_replace_deconv.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/pre_replace_deconv.cpp index 03c660751f108a..c56b2690cd1072 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/pre_replace_deconv.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/pre_replace_deconv.cpp @@ -225,7 +225,7 @@ void pre_replace_deconv::run(program& p) { p.rename(deconv_node, rename_id); // reshape weights - auto pixel_shuffle_size = scale_factor * scale_factor; + auto pixel_shuffle_size = static_cast(scale_factor * scale_factor); int kernel_size = 5; tensor target_weights_size = { pixel_shuffle_size, filter_layout.feature(), kernel_size, kernel_size }; auto target_weights_layout = layout{ weights_layout.data_type, weights_layout.format, target_weights_size }; diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_padding.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_padding.cpp index c7860dc5ac93ae..34700379db22cc 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_padding.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_padding.cpp @@ -102,7 +102,7 @@ void prepare_padding::run(program& p) { // WA for this format. sliding window needs to be fixed --perf degradation for IncepctionV1 type models tensor size(1); for (size_t i = 0; i < prim->size.size(); i++) { - size.spatial[i] = prim->size[prim->size.size() - i - 1]; + size.spatial[i] = static_cast(prim->size[prim->size.size() - i - 1]); } if (node->get_output_layout().format == format::b_fs_yx_fsv16) @@ -183,13 +183,13 @@ void prepare_padding::run(program& p) { auto pad = conv->pad; auto stride = conv->stride; auto dilation = conv->dilation; - uint32_t stride_z = stride.size() >= 3 ? stride[stride.size() - 3] : 1; - uint32_t stride_y = stride.size() >= 2 ? stride[stride.size() - 2] : 1; - uint32_t stride_x = stride.size() >= 1 ? stride[stride.size() - 1] : 1; + uint32_t stride_z = stride.size() >= 3 ? static_cast(stride[stride.size() - 3]) : 1; + uint32_t stride_y = stride.size() >= 2 ? static_cast(stride[stride.size() - 2]) : 1; + uint32_t stride_x = stride.size() >= 1 ? static_cast(stride[stride.size() - 1]) : 1; - uint32_t dilation_z = dilation.size() >= 3 ? dilation[dilation.size() - 3] : 1; - uint32_t dilation_y = dilation.size() >= 2 ? dilation[dilation.size() - 2] : 1; - uint32_t dilation_x = dilation.size() >= 1 ? dilation[dilation.size() - 1] : 1; + uint32_t dilation_z = dilation.size() >= 3 ? static_cast(dilation[dilation.size() - 3]) : 1; + uint32_t dilation_y = dilation.size() >= 2 ? static_cast(dilation[dilation.size() - 2]) : 1; + uint32_t dilation_x = dilation.size() >= 1 ? static_cast(dilation[dilation.size() - 1]) : 1; tensor::value_type pad_z = pad.size() >= 3 ? pad[pad.size() - 3] : 0; tensor::value_type pad_y = pad.size() >= 2 ? pad[pad.size() - 2] : 0; diff --git a/src/plugins/intel_gpu/src/graph/impls/onednn/concatenation_onednn.cpp b/src/plugins/intel_gpu/src/graph/impls/onednn/concatenation_onednn.cpp index aa798b390f8f35..d991b891e62b7b 100644 --- a/src/plugins/intel_gpu/src/graph/impls/onednn/concatenation_onednn.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/onednn/concatenation_onednn.cpp @@ -34,8 +34,8 @@ struct concatenation_onednn : typed_primitive_onednn_impl(i))); + args.insert({input_idx++, input.get_onednn_memory(_pd.dnnl::primitive_desc_base::src_desc(static_cast(i)), offset)}); } { diff --git a/src/plugins/intel_gpu/src/graph/include/sliding_window_utils.hpp b/src/plugins/intel_gpu/src/graph/include/sliding_window_utils.hpp index 6899ef9bc3c2ef..d9945e3b42cf08 100644 --- a/src/plugins/intel_gpu/src/graph/include/sliding_window_utils.hpp +++ b/src/plugins/intel_gpu/src/graph/include/sliding_window_utils.hpp @@ -104,9 +104,9 @@ inline tensor calc_sliding_window_output_range(const tensor& inp auto stride_y = stride.size() >= 2 ? stride[stride.size() - 2] : 1; auto stride_x = stride.size() >= 1 ? stride[stride.size() - 1] : 1; - tensor::value_type dilation_z = dilation.size() >= 3 ? dilation[dilation.size() - 3] : 1; - tensor::value_type dilation_y = dilation.size() >= 2 ? dilation[dilation.size() - 2] : 1; - tensor::value_type dilation_x = dilation.size() >= 1 ? dilation[dilation.size() - 1] : 1; + tensor::value_type dilation_z = dilation.size() >= 3 ? static_cast(dilation[dilation.size() - 3]) : 1; + tensor::value_type dilation_y = dilation.size() >= 2 ? static_cast(static_cast(dilation[dilation.size() - 2])) : 1; + tensor::value_type dilation_x = dilation.size() >= 1 ? static_cast(dilation[dilation.size() - 1]) : 1; auto pad_z = pad.size() >= 3 ? pad[pad.size() - 3] : 0; auto pad_y = pad.size() >= 2 ? pad[pad.size() - 2] : 0; @@ -161,9 +161,9 @@ inline tensor calc_sliding_window_output_range(const ten int64_t stride_y = stride.size() >= 2 ? stride[stride.size() - 2] : 1; int64_t stride_x = stride.size() >= 1 ? stride[stride.size() - 1] : 1; - tensor::value_type dilation_z = dilation.size() >= 3 ? dilation[dilation.size() - 3] : 1; - tensor::value_type dilation_y = dilation.size() >= 2 ? dilation[dilation.size() - 2] : 1; - tensor::value_type dilation_x = dilation.size() >= 1 ? dilation[dilation.size() - 1] : 1; + tensor::value_type dilation_z = dilation.size() >= 3 ? static_cast(dilation[dilation.size() - 3]) : 1; + tensor::value_type dilation_y = dilation.size() >= 2 ? static_cast(dilation[dilation.size() - 2]) : 1; + tensor::value_type dilation_x = dilation.size() >= 1 ? static_cast(dilation[dilation.size() - 1]) : 1; int64_t pad_z = pad.size() >= 3 ? pad[pad.size() - 3] : 0; int64_t pad_y = pad.size() >= 2 ? pad[pad.size() - 2] : 0; @@ -347,9 +347,9 @@ inline tensor calc_sliding_window_needed_input_range(const tensor& output_size, auto stride_y = stride.size() >= 2 ? stride[stride.size() - 2] : 1; auto stride_x = stride.size() >= 1 ? stride[stride.size() - 1] : 1; - tensor::value_type dilation_z = dilation.size() >= 3 ? dilation[dilation.size() - 3] : 1; - tensor::value_type dilation_y = dilation.size() >= 2 ? dilation[dilation.size() - 2] : 1; - tensor::value_type dilation_x = dilation.size() >= 1 ? dilation[dilation.size() - 1] : 1; + tensor::value_type dilation_z = dilation.size() >= 3 ? static_cast(dilation[dilation.size() - 3]) : 1; + tensor::value_type dilation_y = dilation.size() >= 2 ? static_cast(dilation[dilation.size() - 2]) : 1; + tensor::value_type dilation_x = dilation.size() >= 1 ? static_cast(dilation[dilation.size() - 1]) : 1; auto pad_z = pad.size() >= 3 ? pad[pad.size() - 3] : 0; auto pad_y = pad.size() >= 2 ? pad[pad.size() - 2] : 0; diff --git a/src/plugins/intel_gpu/src/graph/primitive_inst.cpp b/src/plugins/intel_gpu/src/graph/primitive_inst.cpp index 29b97f400be713..aae9e844a4eeb6 100644 --- a/src/plugins/intel_gpu/src/graph/primitive_inst.cpp +++ b/src/plugins/intel_gpu/src/graph/primitive_inst.cpp @@ -300,7 +300,7 @@ bool primitive_inst::update_impl() { size_t offset = 0; for (size_t i = 0; i < _node->get_dependencies().size(); i++) { if (_node->get_dependency(i).get_output_layout().is_dynamic()) { - auto input_shape = _node->type()->extend_input_shape_to_6d(params, i); + auto input_shape = _node->type()->extend_input_shape_to_6d(params, static_cast(i)); for (size_t j = 0; j < input_shape.size(); j++) lock[offset++] = static_cast(input_shape[j]); } @@ -308,7 +308,7 @@ bool primitive_inst::update_impl() { for (size_t i = 0; i < _node->get_output_layouts().size(); i++) { if (_node->get_output_layout(i).is_dynamic()) { - auto output_shape = _node->type()->extend_output_shape_to_6d(params, i); + auto output_shape = _node->type()->extend_output_shape_to_6d(params, static_cast(i)); for (size_t j = 0; j < output_shape.size(); j++) lock[offset++] = static_cast(output_shape[j]); } diff --git a/src/plugins/intel_gpu/src/plugin/infer_request.cpp b/src/plugins/intel_gpu/src/plugin/infer_request.cpp index 1619c5cf1959fa..5f5fc4ad174c1b 100644 --- a/src/plugins/intel_gpu/src/plugin/infer_request.cpp +++ b/src/plugins/intel_gpu/src/plugin/infer_request.cpp @@ -592,7 +592,7 @@ void InferRequest::setup_stream_graph() { auto& streamGraphs = static_cast(_exeNetwork.get())->m_graphs; if (nullptr != streamExecutor) { streamID = streamExecutor->GetStreamId(); - int numGraphs = streamGraphs.size(); + auto numGraphs = streamGraphs.size(); streamID = streamID % numGraphs; } m_graph = streamGraphs[streamID]; diff --git a/src/plugins/intel_gpu/src/plugin/infer_request_legacy.cpp b/src/plugins/intel_gpu/src/plugin/infer_request_legacy.cpp index 35fbf6c55b4f76..32a54b3bf6c1ea 100644 --- a/src/plugins/intel_gpu/src/plugin/infer_request_legacy.cpp +++ b/src/plugins/intel_gpu/src/plugin/infer_request_legacy.cpp @@ -127,7 +127,7 @@ void checkInputBlob(const Blob::Ptr &blob, checkAlloc(nv12_ptr->uv(), str_input_not_allocated); } else if (auto batched_ptr = blob->as()) { for (size_t i = 0; i < batched_ptr->size(); i++) { - auto nv12_ptr = getNV12BlobOrException(batched_ptr, i); + auto nv12_ptr = getNV12BlobOrException(batched_ptr, static_cast(i)); checkAlloc(nv12_ptr->y(), str_input_not_allocated); checkAlloc(nv12_ptr->uv(), str_input_not_allocated); } @@ -289,7 +289,7 @@ void InferRequestLegacy::SetBlob(const std::string& name, const Blob::Ptr& data) auto batched_ptr = data->as(); bool is_batched = batched_ptr != nullptr; bool is_nv12 = nv12_ptr != nullptr; - int expected_batch = is_batched ? desc.getDims()[0] : 1; + auto expected_batch = is_batched ? desc.getDims()[0] : 1; if (ColorFormat::NV12 == foundInput->getPreProcess().getColorFormat() && m_graph->get_config().get_property(ov::intel_gpu::nv12_two_inputs)) { // try extracting Y and UV remote blobs from it @@ -297,12 +297,12 @@ void InferRequestLegacy::SetBlob(const std::string& name, const Blob::Ptr& data) // that should then go into biplanar NV12 reorder if (is_nv12 || is_batched) { - int num_blobs = is_batched ? batched_ptr->size() : 1; + auto num_blobs = is_batched ? batched_ptr->size() : 1; for (auto i = 0; i < expected_batch; i++) { std::string y_name = name + "_Y" + std::to_string(i); std::string uv_name = name + "_UV" + std::to_string(i); if (is_batched) { - int idx = i < num_blobs ? i : num_blobs-1; + int idx = i < num_blobs ? i : static_cast(num_blobs)-1; nv12_ptr = getNV12BlobOrException(batched_ptr, idx); } diff --git a/src/plugins/intel_gpu/src/plugin/ops/constant.cpp b/src/plugins/intel_gpu/src/plugin/ops/constant.cpp index 3bffd477773069..1714e50e5da7be 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/constant.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/constant.cpp @@ -154,7 +154,7 @@ void createClDnnConstant(Program& p, const ngraph::Shape& constDims, const std:: auto constFormat = cldnn::format::get_default_format(constDims.size()); if (props.needsBatchInterpretation) { - constTensor.batch[0] = constTensor.count(); + constTensor.batch[0] = static_cast(constTensor.count()); constTensor.feature[0] = 1; } diff --git a/src/plugins/intel_gpu/src/plugin/ops/custom.cpp b/src/plugins/intel_gpu/src/plugin/ops/custom.cpp index cde2faf1e0ded6..267db4ecb19b22 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/custom.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/custom.cpp @@ -200,9 +200,9 @@ void CreateCustomOp(Program& p, const std::shared_ptr& op, CustomL auto inputDims = op->get_input_shape(iidx); xDim = static_cast(inputDims[inputDims.size() - 1]); - yDim = dims.size() > 1 ? inputDims[inputDims.size() - 2] : 0; - featureDim = dims.size() > 2 ? inputDims[inputDims.size() - 3] : 0; - batchDim = dims.size() > 3 ? inputDims[inputDims.size() - 4]: 0; + yDim = dims.size() > 1 ? static_cast(inputDims[inputDims.size() - 2]) : 0; + featureDim = dims.size() > 2 ? static_cast(inputDims[inputDims.size() - 3]) : 0; + batchDim = dims.size() > 3 ? static_cast(inputDims[inputDims.size() - 4]) : 0; } const std::map vars = { { 'b', batchDim } , { 'B', batchDim }, diff --git a/src/plugins/intel_gpu/src/plugin/ops/detection_output.cpp b/src/plugins/intel_gpu/src/plugin/ops/detection_output.cpp index 7af50dc7bc1b56..542ce6e13591d4 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/detection_output.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/detection_output.cpp @@ -43,8 +43,8 @@ static void CreateDetectionOutputOp(Program& p, const std::shared_ptr(attrs.input_width); + int input_height = static_cast(attrs.input_height); bool normalized = attrs.normalized; std::string code_type = attrs.code_type; bool clip_before_nms = attrs.clip_before_nms; diff --git a/src/plugins/intel_gpu/src/plugin/ops/gather_nd.cpp b/src/plugins/intel_gpu/src/plugin/ops/gather_nd.cpp index 04d2d7dead7bc9..902db8426b2714 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/gather_nd.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/gather_nd.cpp @@ -20,17 +20,14 @@ static void CreateGatherNDOp(Program& p, const std::shared_ptrget_input_partial_shape(0).size(); auto indices_rank = op->get_input_partial_shape(1).size(); - - auto batch_dims = static_cast(op->get_batch_dims()); - auto batch_merged_output = true; + auto batch_dims = static_cast(op->get_batch_dims()); auto primitive = cldnn::gather_nd(layerName, inputs[0], inputs[1], input_rank, indices_rank, - batch_dims, - batch_merged_output); + batch_dims); p.add_primitive(*op, primitive); } @@ -45,7 +42,7 @@ static void CreateGatherNDOp(Program& p, const std::shared_ptrget_input_partial_shape(0).size(); auto indices_rank = op->get_input_partial_shape(1).size(); - auto batch_dims = op->get_batch_dims(); + auto batch_dims = static_cast(op->get_batch_dims()); auto primitive = cldnn::gather_nd(layerName, inputs[0], diff --git a/src/plugins/intel_gpu/src/plugin/ops/proposal.cpp b/src/plugins/intel_gpu/src/plugin/ops/proposal.cpp index c5083f7c6f984b..dbec5aebe7a9c2 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/proposal.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/proposal.cpp @@ -21,14 +21,14 @@ static void CreateProposalOp(Program& p, const std::shared_ptrget_attrs(); float nms_thresh = attrs.nms_thresh; int min_size = static_cast(attrs.min_size); - int feature_stride = attrs.feat_stride; - int pre_nms_topn = attrs.pre_nms_topn; - int post_nms_topn = attrs.post_nms_topn; + int feature_stride = static_cast(attrs.feat_stride); + int pre_nms_topn = static_cast(attrs.pre_nms_topn); + int post_nms_topn = static_cast(attrs.post_nms_topn); const std::vector ratio = attrs.ratio; const std::vector scale = attrs.scale; float box_coordinate_scale = attrs.box_coordinate_scale; float box_size_scale = attrs.box_size_scale; - int base_size = attrs.base_size; + int base_size = static_cast(attrs.base_size); std::string framework = attrs.framework; bool normalize = attrs.normalize; bool clip_before_nms = attrs.clip_before_nms; diff --git a/src/plugins/intel_gpu/src/plugin/ops/roi_pooling.cpp b/src/plugins/intel_gpu/src/plugin/ops/roi_pooling.cpp index 9f55e3f41b3f78..053819f8cc343b 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/roi_pooling.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/roi_pooling.cpp @@ -96,8 +96,8 @@ static void CreateROIPoolingOp(Program& p, const std::shared_ptrget_output_size(); - int pooled_height = out_size[0]; - int pooled_width = out_size[1]; + int pooled_height = static_cast(out_size[0]); + int pooled_width = static_cast(out_size[1]); float spatial_scale = op->get_spatial_scale(); bool position_sensitive = false; diff --git a/src/plugins/intel_gpu/src/plugin/ops/strided_slice.cpp b/src/plugins/intel_gpu/src/plugin/ops/strided_slice.cpp index 1bdcf90c33d8c7..e31cf2ccb6af03 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/strided_slice.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/strided_slice.cpp @@ -209,7 +209,7 @@ static void CreateStridedSliceOp(Program& p, const std::shared_ptr 3) { IE_THROW() << "Invalid crop axis: " << std::to_string(axes[i]) << " in op " + op->get_friendly_name(); } - offset_tensor[axes[i]] = offset[i]; + offset_tensor[axes[i]] = static_cast(offset[i]); } ngraph::Shape crop_shape(reshape_pattern); diff --git a/src/plugins/intel_gpu/src/plugin/program.cpp b/src/plugins/intel_gpu/src/plugin/program.cpp index 1065178bcd491c..ff6702b3ce868b 100644 --- a/src/plugins/intel_gpu/src/plugin/program.cpp +++ b/src/plugins/intel_gpu/src/plugin/program.cpp @@ -469,9 +469,9 @@ std::vector Program::GetInputInfo(const std::shared_ptrget_input_source_output(i).get_index())); + inputInfo.push_back(cldnn::input_info(primitive_ids.at(prevName), is_legacy_multiple_outputs ? 0: static_cast(op->get_input_source_output(i).get_index()))); } else { - inputInfo.push_back(cldnn::input_info(prevName, is_legacy_multiple_outputs ? 0 : op->get_input_source_output(i).get_index())); + inputInfo.push_back(cldnn::input_info(prevName, is_legacy_multiple_outputs ? 0 : static_cast(op->get_input_source_output(i).get_index()))); } } return inputInfo; diff --git a/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp b/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp index 8e40258785361b..78ccd48ab3419a 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp +++ b/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp @@ -354,14 +354,14 @@ void TransformationsPipeline::apply(std::shared_ptr func) { auto axesVal = axesNode->cast_vector(); auto& mvnShape = mvn->get_output_partial_shape(0); for (int32_t& axis : axesVal) - axis = axis < 0 ? axis + mvnShape.size() : axis; + axis = axis < 0 ? axis + static_cast(mvnShape.size()) : axis; std::sort(axesVal.begin(), axesVal.end()); if (mvnShape.size() == 1) return false; if (mvnShape.size() > 5 || (mvnShape.size() != axesVal.size() + 1 && mvnShape.size() != axesVal.size() + 2)) return false; - int value = mvnShape.size() - 1; - for (int i = axesVal.size() - 1; i >= 0; i--, value--) { + int value = static_cast(mvnShape.size()) - 1; + for (int i = static_cast(axesVal.size()) - 1; i >= 0; i--, value--) { if (axesVal[i] != value) return false; } From dc33d2bd7fbf84fd3bad9762059a25a2569de875 Mon Sep 17 00:00:00 2001 From: Gorbachev Date: Thu, 9 Mar 2023 14:43:49 +0000 Subject: [PATCH 03/12] fixed code style --- src/plugins/intel_gpu/src/plugin/infer_request_legacy.cpp | 4 ++-- src/plugins/intel_gpu/src/plugin/ops/roll.cpp | 2 +- src/plugins/intel_gpu/src/plugin/ops/strided_slice.cpp | 2 +- src/plugins/intel_gpu/src/plugin/program.cpp | 3 ++- 4 files changed, 6 insertions(+), 5 deletions(-) diff --git a/src/plugins/intel_gpu/src/plugin/infer_request_legacy.cpp b/src/plugins/intel_gpu/src/plugin/infer_request_legacy.cpp index 32a54b3bf6c1ea..bee48411b4b98c 100644 --- a/src/plugins/intel_gpu/src/plugin/infer_request_legacy.cpp +++ b/src/plugins/intel_gpu/src/plugin/infer_request_legacy.cpp @@ -289,7 +289,7 @@ void InferRequestLegacy::SetBlob(const std::string& name, const Blob::Ptr& data) auto batched_ptr = data->as(); bool is_batched = batched_ptr != nullptr; bool is_nv12 = nv12_ptr != nullptr; - auto expected_batch = is_batched ? desc.getDims()[0] : 1; + auto expected_batch = is_batched ? static_cast(desc.getDims()[0]) : 1; if (ColorFormat::NV12 == foundInput->getPreProcess().getColorFormat() && m_graph->get_config().get_property(ov::intel_gpu::nv12_two_inputs)) { // try extracting Y and UV remote blobs from it @@ -297,7 +297,7 @@ void InferRequestLegacy::SetBlob(const std::string& name, const Blob::Ptr& data) // that should then go into biplanar NV12 reorder if (is_nv12 || is_batched) { - auto num_blobs = is_batched ? batched_ptr->size() : 1; + auto num_blobs = is_batched ? static_cast(batched_ptr->size()) : 1; for (auto i = 0; i < expected_batch; i++) { std::string y_name = name + "_Y" + std::to_string(i); std::string uv_name = name + "_UV" + std::to_string(i); diff --git a/src/plugins/intel_gpu/src/plugin/ops/roll.cpp b/src/plugins/intel_gpu/src/plugin/ops/roll.cpp index 76f628b8774f24..e4ba889ce806c9 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/roll.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/roll.cpp @@ -53,7 +53,7 @@ void CreateRollOp(Program& p, const std::shared_ptr& op) { } // Normalize shift - for (size_t s = 0; s < rank; ++s) { + for (size_t s = 0; s < static_cast(rank); ++s) { auto& sh = shift[s]; const auto dim = static_cast(input_shape[s]); sh %= dim; diff --git a/src/plugins/intel_gpu/src/plugin/ops/strided_slice.cpp b/src/plugins/intel_gpu/src/plugin/ops/strided_slice.cpp index e31cf2ccb6af03..4b9ab81efb1f80 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/strided_slice.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/strided_slice.cpp @@ -93,7 +93,7 @@ static void CreateStridedSliceOp(Program& p, const std::shared_ptr(begin.size() - axis - num_new_axis_after_ellipses - 1); - unsigned long num_of_hidden_dims = + unsigned long num_of_hidden_dims = static_cast(input_shape.size() - num_input_axis_after_ellipses - num_input_axis_before_ellipses); for (size_t i = 0; i < num_of_hidden_dims; ++i) { diff --git a/src/plugins/intel_gpu/src/plugin/program.cpp b/src/plugins/intel_gpu/src/plugin/program.cpp index ff6702b3ce868b..cffe8a8c7b1622 100644 --- a/src/plugins/intel_gpu/src/plugin/program.cpp +++ b/src/plugins/intel_gpu/src/plugin/program.cpp @@ -469,7 +469,8 @@ std::vector Program::GetInputInfo(const std::shared_ptr(op->get_input_source_output(i).get_index()))); + inputInfo.push_back( + cldnn::input_info(primitive_ids.at(prevName), is_legacy_multiple_outputs ? 0: static_cast(op->get_input_source_output(i).get_index()))); } else { inputInfo.push_back(cldnn::input_info(prevName, is_legacy_multiple_outputs ? 0 : static_cast(op->get_input_source_output(i).get_index()))); } From 2e1fafb761bc5265e2ab7a9b32bbf71bf332c709 Mon Sep 17 00:00:00 2001 From: Gorbachev Date: Thu, 9 Mar 2023 18:47:29 +0000 Subject: [PATCH 04/12] fixed win plugin --- .../dev_api/performance_heuristics.hpp | 8 +++---- .../intel_gpu/primitives/arg_max_min.hpp | 4 +++- .../intel_gpu/src/graph/convolution.cpp | 24 +++++++++---------- .../intel_gpu/src/graph/deconvolution.cpp | 9 ++++--- .../intel_gpu/src/graph/fully_connected.cpp | 2 +- src/plugins/intel_gpu/src/graph/gather.cpp | 5 +++- .../graph_optimizer/pre_replace_deconv.cpp | 2 +- .../graph/graph_optimizer/prepare_padding.cpp | 12 +++++++--- .../src/graph/impls/ocl/arg_max_min.cpp | 2 +- .../graph/impls/ocl/binary_convolution.cpp | 12 +++++----- .../src/graph/impls/ocl/concatenation.cpp | 4 ++-- .../src/graph/impls/ocl/convolution.cpp | 12 +++++----- .../intel_gpu/src/graph/impls/ocl/crop.cpp | 2 +- .../src/graph/impls/ocl/deconvolution.cpp | 12 +++++----- .../impls/ocl/deformable_convolution.cpp | 12 +++++----- .../src/graph/impls/ocl/fully_connected.cpp | 3 ++- .../src/graph/impls/ocl/non_zero.cpp | 2 +- .../intel_gpu/src/graph/impls/ocl/permute.cpp | 2 +- .../intel_gpu/src/graph/impls/ocl/pooling.cpp | 18 +++++++------- .../src/graph/impls/ocl/prior_box.cpp | 2 +- .../intel_gpu/src/graph/impls/ocl/reduce.cpp | 2 +- .../src/graph/impls/ocl/resample.cpp | 4 +++- .../intel_gpu/src/graph/impls/ocl/slice.cpp | 6 ++++- .../impls/onednn/fully_connected_onednn.cpp | 3 ++- src/plugins/intel_gpu/src/graph/pooling.cpp | 14 +++++------ src/plugins/intel_gpu/src/graph/program.cpp | 2 +- .../intel_gpu/src/graph/program_node.cpp | 2 +- src/plugins/intel_gpu/src/graph/reduce.cpp | 2 +- .../intel_gpu/src/graph/strided_slice.cpp | 5 +++- .../src/kernel_selector/kernel_selector.cpp | 5 +++- .../kernels/border/border_kernel_base.cpp | 2 +- .../detection_output_kernel_ref.cpp | 6 ++--- .../src/plugin/infer_request_legacy.cpp | 10 ++++---- .../ops/experimental_detectron_topk_rois.cpp | 2 +- .../src/plugin/ops/extract_image_patches.cpp | 15 +++++++++--- .../intel_gpu/src/plugin/ops/gather_nd.cpp | 9 ++++--- src/plugins/intel_gpu/src/plugin/ops/lrn.cpp | 2 +- src/plugins/intel_gpu/src/plugin/ops/roll.cpp | 4 ++-- src/plugins/intel_gpu/src/runtime/layout.cpp | 15 +++++++++--- 39 files changed, 152 insertions(+), 107 deletions(-) diff --git a/src/inference/dev_api/performance_heuristics.hpp b/src/inference/dev_api/performance_heuristics.hpp index e6f374beca0c1b..5111e676e12329 100644 --- a/src/inference/dev_api/performance_heuristics.hpp +++ b/src/inference/dev_api/performance_heuristics.hpp @@ -57,7 +57,7 @@ static MemBandwidthPressure MemBandwidthPressureTolerance( const bool isBF16orFP16 = isHalfPrecision(type1); const int data_type_size = isINT8 ? 1 : isBF16orFP16 ? 2 : 4; - int dataSizeInput = 0, dataSizeOutput = 0; + size_t dataSizeInput = 0, dataSizeOutput = 0; if (!std::strcmp("MatMul", node_name)) { const auto input0 = node->input(0); const auto input1 = node->input(1); @@ -77,7 +77,7 @@ static MemBandwidthPressure MemBandwidthPressureTolerance( std::accumulate(shapeOutput.begin(), shapeOutput.end(), size_t(1), std::multiplies()); const auto total_data = dataSizeInput0 + non_const * dataSizeInput1 + dataSizeOutput; total_gemms++; - const auto factor = memLimitedFactor(total_data, data_type_size); + const auto factor = memLimitedFactor(static_cast(total_data), data_type_size); mem_limited_gemms += factor < memThresholdAssumeLimited; worst_case = std::min(factor, worst_case); } @@ -103,7 +103,7 @@ static MemBandwidthPressure MemBandwidthPressureTolerance( std::accumulate(shapeInput.begin(), shapeInput.end(), size_t(1), std::multiplies()); dataSizeOutput = std::accumulate(shapeOutput.begin(), shapeOutput.end(), size_t(1), std::multiplies()); - const auto factor = memLimitedFactor(dataSizeInput + dataSizeOutput, data_type_size); + const auto factor = memLimitedFactor(static_cast(dataSizeInput + dataSizeOutput), data_type_size); mem_limited_convs += factor < memThresholdAssumeLimited; worst_case = std::min(factor, worst_case); } @@ -124,7 +124,7 @@ static MemBandwidthPressure MemBandwidthPressureTolerance( std::accumulate(shapeInput.begin(), shapeInput.end(), size_t(1), std::multiplies()); dataSizeOutput = std::accumulate(shapeOutput.begin(), shapeOutput.end(), size_t(1), std::multiplies()); - const auto factor = memLimitedFactor(dataSizeInput + dataSizeOutput, data_type_size); + const auto factor = memLimitedFactor(static_cast(dataSizeInput + dataSizeOutput), data_type_size); mem_limited_deconvs += factor < memThresholdAssumeLimited; worst_case = std::min(factor, worst_case); } diff --git a/src/plugins/intel_gpu/include/intel_gpu/primitives/arg_max_min.hpp b/src/plugins/intel_gpu/include/intel_gpu/primitives/arg_max_min.hpp index 6da5ce80d20f95..afca08f6d3d37c 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/primitives/arg_max_min.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/primitives/arg_max_min.hpp @@ -103,7 +103,9 @@ struct arg_max_min : public primitive_base { values_first == rhs_casted.values_first; } - uint32_t get_output_nums() const { return (input_size() == 3 ? 2 : output_size()); } + uint32_t get_output_nums() const { + return (input_size() == 3 ? 2 : static_cast(output_size())); + } bool has_second_output() const { return get_output_nums() == 2; } bool use_multiple_outputs() const { return input_size() != 3; } diff --git a/src/plugins/intel_gpu/src/graph/convolution.cpp b/src/plugins/intel_gpu/src/graph/convolution.cpp index a50b2f59c0bf11..8c84a8b2c842b9 100644 --- a/src/plugins/intel_gpu/src/graph/convolution.cpp +++ b/src/plugins/intel_gpu/src/graph/convolution.cpp @@ -42,13 +42,13 @@ layout convolution_inst::calc_output_layout(convolution_node const& node, kernel output_type = data_types::f32; } - uint32_t stride_z = stride.size() >= 3 ? stride[stride.size() - 3] : 1; - uint32_t stride_y = stride.size() >= 2 ? stride[stride.size() - 2] : 1; - uint32_t stride_x = stride.size() >= 1 ? stride[stride.size() - 1] : 1; + auto stride_z = stride.size() >= 3 ? stride[stride.size() - 3] : 1; + auto stride_y = stride.size() >= 2 ? stride[stride.size() - 2] : 1; + auto stride_x = stride.size() >= 1 ? stride[stride.size() - 1] : 1; - uint32_t dilation_z = dilation.size() >= 3 ? dilation[dilation.size() - 3] : 1; - uint32_t dilation_y = dilation.size() >= 2 ? dilation[dilation.size() - 2] : 1; - uint32_t dilation_x = dilation.size() >= 1 ? dilation[dilation.size() - 1] : 1; + auto dilation_z = dilation.size() >= 3 ? dilation[dilation.size() - 3] : 1; + auto dilation_y = dilation.size() >= 2 ? dilation[dilation.size() - 2] : 1; + auto dilation_x = dilation.size() >= 1 ? dilation[dilation.size() - 1] : 1; // TODO: Consider moving general parameter verification to arguments constructor. CLDNN_ERROR_LESS_OR_EQUAL_THAN(desc->id, @@ -249,13 +249,13 @@ std::vector convolution_inst::calc_output_layouts(convolution_node const output_type = data_types::f32; } - uint32_t stride_z = stride.size() >= 3 ? stride[stride.size() - 3] : 1; - uint32_t stride_y = stride.size() >= 2 ? stride[stride.size() - 2] : 1; - uint32_t stride_x = stride.size() >= 1 ? stride[stride.size() - 1] : 1; + auto stride_z = stride.size() >= 3 ? stride[stride.size() - 3] : 1; + auto stride_y = stride.size() >= 2 ? stride[stride.size() - 2] : 1; + auto stride_x = stride.size() >= 1 ? stride[stride.size() - 1] : 1; - uint32_t dilation_z = dilation.size() >= 3 ? dilation[dilation.size() - 3] : 1; - uint32_t dilation_y = dilation.size() >= 2 ? dilation[dilation.size() - 2] : 1; - uint32_t dilation_x = dilation.size() >= 1 ? dilation[dilation.size() - 1] : 1; + auto dilation_z = dilation.size() >= 3 ? dilation[dilation.size() - 3] : 1; + auto dilation_y = dilation.size() >= 2 ? dilation[dilation.size() - 2] : 1; + auto dilation_x = dilation.size() >= 1 ? dilation[dilation.size() - 1] : 1; // TODO: Consider moving general parameter verification to arguments constructor. CLDNN_ERROR_LESS_OR_EQUAL_THAN(desc->id, diff --git a/src/plugins/intel_gpu/src/graph/deconvolution.cpp b/src/plugins/intel_gpu/src/graph/deconvolution.cpp index 6fb1d8496de398..af71244e3b986e 100644 --- a/src/plugins/intel_gpu/src/graph/deconvolution.cpp +++ b/src/plugins/intel_gpu/src/graph/deconvolution.cpp @@ -80,14 +80,17 @@ layout deconvolution_inst::calc_output_layout(deconvolution_node const& node, ke 3, "As for now, deconvolutions with more than 3 dimensions are not supported"); - int32_t x = off_factor * pad[pad.size() - 1] + (input_layout.spatial(0) - 1) * strd[strd.size() - 1] + weights_layout.spatial(0); + int32_t x = static_cast( + off_factor * pad[pad.size() - 1] + (input_layout.spatial(0) - 1) * strd[strd.size() - 1] + weights_layout.spatial(0)); int32_t y = 1; if (spatial_dims > 1) { - y = off_factor * pad[pad.size() - 2] + (input_layout.spatial(1) - 1) * strd[strd.size() - 2] + weights_layout.spatial(1); + y = static_cast( + off_factor * pad[pad.size() - 2] + (input_layout.spatial(1) - 1) * strd[strd.size() - 2] + weights_layout.spatial(1)); } int32_t z = 1; if (spatial_dims > 2) { - z = off_factor * pad[pad.size() - 3] + (input_layout.spatial(2) - 1) * strd[strd.size() - 3] + weights_layout.spatial(2); + z = static_cast( + off_factor * pad[pad.size() - 3] + (input_layout.spatial(2) - 1) * strd[strd.size() - 3] + weights_layout.spatial(2)); } tensor output_size(input_layout.batch(), diff --git a/src/plugins/intel_gpu/src/graph/fully_connected.cpp b/src/plugins/intel_gpu/src/graph/fully_connected.cpp index 80a3b97b844f99..53e1ff3e141a8f 100644 --- a/src/plugins/intel_gpu/src/graph/fully_connected.cpp +++ b/src/plugins/intel_gpu/src/graph/fully_connected.cpp @@ -107,7 +107,7 @@ layout fully_connected_inst::calc_output_layout(fully_connected_node const& node auto reshape_to_2d = [](const ov::PartialShape& shape, int64_t feature) { auto staticShape = shape.to_shape(); - size_t total = std::accumulate(staticShape.begin(), staticShape.end(), 1, std::multiplies()); + size_t total = std::accumulate(staticShape.begin(), staticShape.end(), static_cast(1), std::multiplies()); std::vector reshapeSize = { static_cast(total) / feature, feature }; return reshapeSize; }; diff --git a/src/plugins/intel_gpu/src/graph/gather.cpp b/src/plugins/intel_gpu/src/graph/gather.cpp index 7c022b1be2bf58..702a4989c0727b 100644 --- a/src/plugins/intel_gpu/src/graph/gather.cpp +++ b/src/plugins/intel_gpu/src/graph/gather.cpp @@ -17,7 +17,10 @@ layout gather_inst::calc_output_layout(gather_node const& node, kernel_impl_para auto desc = impl_param.typed_desc(); auto input_layout = impl_param.get_input_layout(); - std::vector dims_converted(desc->output_shape.begin(), desc->output_shape.end()); + std::vector dims_converted; + for (auto dim : desc->output_shape) { + dims_converted.push_back(static_cast(dim)); + } // extend shape to 4d for (size_t i = dims_converted.size(); i < 4; i++) dims_converted.push_back(1); diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/pre_replace_deconv.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/pre_replace_deconv.cpp index c56b2690cd1072..e4379d2c959c3e 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/pre_replace_deconv.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/pre_replace_deconv.cpp @@ -252,7 +252,7 @@ void pre_replace_deconv::run(program& p) { static_cast(filter_layout.feature()), static_cast(filter_layout.spatial(0)), static_cast(filter_layout.spatial(1)), - scale_factor, + static_cast(scale_factor), subpixel_weights); if (weights_data_type == data_types::f16) { diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_padding.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_padding.cpp index 34700379db22cc..9460351beb5a43 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_padding.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_padding.cpp @@ -277,9 +277,15 @@ void prepare_padding::run(program& p) { auto padding_begin_x = std::max(pad_x, 0); auto padding_begin_y = std::max(pad_y, 0); auto padding_begin_z = std::max(pad_z, 0); - auto padding_end_x = std::max(input_limit_x - prev_prim_output_layout.spatial(0), 0); - auto padding_end_y = std::max(input_limit_y - prev_prim_output_layout.spatial(1), 0); - auto padding_end_z = std::max(input_limit_z - prev_prim_output_layout.spatial(2), 0); + auto padding_end_x = std::max( + static_cast(input_limit_x) - prev_prim_output_layout.spatial(0), + 0); + auto padding_end_y = std::max( + static_cast(input_limit_y) - prev_prim_output_layout.spatial(1), + 0); + auto padding_end_z = std::max( + static_cast(input_limit_z) - prev_prim_output_layout.spatial(2), + 0); cldnn::padding needed_padding({0, 0, padding_begin_x, padding_begin_y, padding_begin_z}, {0, 0, padding_end_x, padding_end_y, padding_end_z}, 0); needed_padding = padding::max(prev_prim_output_layout.data_padding, needed_padding); diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/arg_max_min.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/arg_max_min.cpp index 98a6cf168d431b..ec6c5c9a83d795 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/arg_max_min.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/arg_max_min.cpp @@ -66,7 +66,7 @@ struct arg_max_min_impl : typed_primitive_impl_ocl { const auto& mode = primitive->mode; const auto& sort_type = primitive->sort; const auto& values_first = primitive->values_first; - const auto& outputs_num = (primitive->input_size() == 3 ? 2 : primitive->output_size()); + const auto& outputs_num = static_cast(primitive->input_size() == 3 ? 2 : primitive->output_size()); auto argm_params = get_default_params(impl_param); auto argm_optional_params = diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/binary_convolution.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/binary_convolution.cpp index 4d91b87fc036ef..9292a491106204 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/binary_convolution.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/binary_convolution.cpp @@ -59,14 +59,14 @@ struct binary_convolution_impl : typed_primitive_impl_ocl { uint32_t pad_x = std::max(pad.size() >= 1 ? pad[pad.size() - 1] : 0, 0); params.padding = {pad_x, pad_y, pad_z}; - uint32_t stride_z = stride.size() >= 3 ? stride[stride.size() - 3] : 1; - uint32_t stride_y = stride.size() >= 2 ? stride[stride.size() - 2] : 1; - uint32_t stride_x = stride.size() >= 1 ? stride[stride.size() - 1] : 1; + uint32_t stride_z = stride.size() >= 3 ? static_cast(stride[stride.size() - 3]) : 1; + uint32_t stride_y = stride.size() >= 2 ? static_cast(stride[stride.size() - 2]) : 1; + uint32_t stride_x = stride.size() >= 1 ? static_cast(stride[stride.size() - 1]) : 1; params.stride = {stride_x, stride_y, stride_z}; - uint32_t dilation_z = dilation.size() >= 3 ? dilation[dilation.size() - 3] : 1; - uint32_t dilation_y = dilation.size() >= 2 ? dilation[dilation.size() - 2] : 1; - uint32_t dilation_x = dilation.size() >= 1 ? dilation[dilation.size() - 1] : 1; + uint32_t dilation_z = dilation.size() >= 3 ? static_cast(dilation[dilation.size() - 3]) : 1; + uint32_t dilation_y = dilation.size() >= 2 ? static_cast(dilation[dilation.size() - 2]) : 1; + uint32_t dilation_x = dilation.size() >= 1 ? static_cast(dilation[dilation.size() - 1]) : 1; params.dilation = {dilation_x, dilation_y, dilation_z}; return {params, optional_params}; diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/concatenation.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/concatenation.cpp index 80485d4aef2707..2280b00c76f2bd 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/concatenation.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/concatenation.cpp @@ -13,8 +13,8 @@ namespace ocl { namespace { kernel_selector::concat_axis convert_axis(int64_t axis, size_t rank) { - unsigned cldnn_axis = axis >= 0 ? axis : axis + static_cast(rank); - if (cldnn_axis >= rank) + auto cldnn_axis = axis >= 0 ? axis : axis + static_cast(rank); + if (cldnn_axis >= static_cast(rank)) IE_THROW() << "Concatenation axis exceeds number of dimensions"; // Difference in dimension ordering between IE and GPU plugin, diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/convolution.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/convolution.cpp index ec822fd2a3585c..f7181441321ba6 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/convolution.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/convolution.cpp @@ -78,14 +78,14 @@ struct convolution_impl : typed_primitive_impl_ocl { uint32_t pad_x = std::max(pad.size() >= 1 ? pad[pad.size() - 1] : 0, 0); conv_params.padding = {pad_x, pad_y, pad_z}; - uint32_t stride_z = stride.size() >= 3 ? stride[stride.size() - 3] : 1; - uint32_t stride_y = stride.size() >= 2 ? stride[stride.size() - 2] : 1; - uint32_t stride_x = stride.size() >= 1 ? stride[stride.size() - 1] : 1; + uint32_t stride_z = stride.size() >= 3 ? static_cast(stride[stride.size() - 3]) : 1; + uint32_t stride_y = stride.size() >= 2 ? static_cast(stride[stride.size() - 2]) : 1; + uint32_t stride_x = stride.size() >= 1 ? static_cast(stride[stride.size() - 1]) : 1; conv_params.stride = {stride_x, stride_y, stride_z}; - uint32_t dilation_z = dilation.size() >= 3 ? dilation[dilation.size() - 3] : 1; - uint32_t dilation_y = dilation.size() >= 2 ? dilation[dilation.size() - 2] : 1; - uint32_t dilation_x = dilation.size() >= 1 ? dilation[dilation.size() - 1] : 1; + uint32_t dilation_z = dilation.size() >= 3 ? static_cast(dilation[dilation.size() - 3]) : 1; + uint32_t dilation_y = dilation.size() >= 2 ? static_cast(dilation[dilation.size() - 2]) : 1; + uint32_t dilation_x = dilation.size() >= 1 ? static_cast(dilation[dilation.size() - 1]) : 1; conv_params.dilation = {dilation_x, dilation_y, dilation_z}; if ((impl_param.input_layouts[0].data_type == data_types::u8 || diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/crop.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/crop.cpp index 760e7d6ba4a3f4..b11ba4f49569ea 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/crop.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/crop.cpp @@ -45,7 +45,7 @@ struct crop_impl : typed_primitive_impl_ocl { auto runtime_offset = convert_data_tensor(impl_param.get_input_layout(), impl_param.input_offsets[0]).GetFirstElementOffset(); kernel_selector::ScalarDescriptor s; s.t = kernel_selector::ScalarDescriptor::Types::UINT32; - s.v.u32 = runtime_offset; + s.v.u32 = static_cast(runtime_offset); OPENVINO_ASSERT(_kernel_data.kernels[0].params.scalars.size() == 1, "[GPU] Scalar field for runtime offset is not added for crop shape agnostic impl"); _kernel_data.kernels[0].params.scalars[0] = s; diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/deconvolution.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/deconvolution.cpp index d25826c63ca5ec..c1cec6e088ef0f 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/deconvolution.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/deconvolution.cpp @@ -60,14 +60,14 @@ struct deconvolution_impl : typed_primitive_impl_ocl { uint32_t pad_x = std::max(pad.size() >= 1 ? pad[pad.size() - 1] : 0, 0); params.padding = {pad_x, pad_y, pad_z}; - uint32_t stride_z = stride.size() >= 3 ? stride[stride.size() - 3] : 1; - uint32_t stride_y = stride.size() >= 2 ? stride[stride.size() - 2] : 1; - uint32_t stride_x = stride.size() >= 1 ? stride[stride.size() - 1] : 1; + uint32_t stride_z = stride.size() >= 3 ? static_cast(stride[stride.size() - 3]) : 1; + uint32_t stride_y = stride.size() >= 2 ? static_cast(stride[stride.size() - 2]) : 1; + uint32_t stride_x = stride.size() >= 1 ? static_cast(stride[stride.size() - 1]) : 1; params.stride = {stride_x, stride_y, stride_z}; - uint32_t dilation_z = dilation.size() >= 3 ? dilation[dilation.size() - 3] : 1; - uint32_t dilation_y = dilation.size() >= 2 ? dilation[dilation.size() - 2] : 1; - uint32_t dilation_x = dilation.size() >= 1 ? dilation[dilation.size() - 1] : 1; + uint32_t dilation_z = dilation.size() >= 3 ? static_cast(dilation[dilation.size() - 3]) : 1; + uint32_t dilation_y = dilation.size() >= 2 ? static_cast(dilation[dilation.size() - 2]) : 1; + uint32_t dilation_x = dilation.size() >= 1 ? static_cast(dilation[dilation.size() - 1]) : 1; params.dilation = {dilation_x, dilation_y, dilation_z}; return {params, optional_params}; diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/deformable_convolution.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/deformable_convolution.cpp index 048a83f3cec158..0860f4f3678e06 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/deformable_convolution.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/deformable_convolution.cpp @@ -102,14 +102,14 @@ struct deformable_interp_impl : typed_primitive_impl_ocl { params.padding = {pad_x, pad_y, pad_z}; - uint32_t stride_z = stride.size() >= 3 ? stride[stride.size() - 3] : 1; - uint32_t stride_y = stride.size() >= 2 ? stride[stride.size() - 2] : 1; - uint32_t stride_x = stride.size() >= 1 ? stride[stride.size() - 1] : 1; + uint32_t stride_z = stride.size() >= 3 ? static_cast(stride[stride.size() - 3]) : 1; + uint32_t stride_y = stride.size() >= 2 ? static_cast(stride[stride.size() - 2]) : 1; + uint32_t stride_x = stride.size() >= 1 ? static_cast(stride[stride.size() - 1]) : 1; params.stride = {stride_x, stride_y, stride_z}; - uint32_t dilation_z = dilation.size() >= 3 ? dilation[dilation.size() - 3] : 1; - uint32_t dilation_y = dilation.size() >= 2 ? dilation[dilation.size() - 2] : 1; - uint32_t dilation_x = dilation.size() >= 1 ? dilation[dilation.size() - 1] : 1; + uint32_t dilation_z = dilation.size() >= 3 ? static_cast(dilation[dilation.size() - 3]) : 1; + uint32_t dilation_y = dilation.size() >= 2 ? static_cast(dilation[dilation.size() - 2]) : 1; + uint32_t dilation_x = dilation.size() >= 1 ? static_cast(dilation[dilation.size() - 1]) : 1; params.dilation = {dilation_x, dilation_y, dilation_z}; params.kernelSize = { (uint32_t)kernel_size.spatial[0], diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/fully_connected.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/fully_connected.cpp index c6e703d63ffe97..64d6ac2197b413 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/fully_connected.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/fully_connected.cpp @@ -41,7 +41,8 @@ struct fully_connected_impl : typed_primitive_impl_ocl { auto reshape_to_2d = [](const ov::PartialShape& shape, const ov::Dimension& feature) { if (shape.is_static()) { auto static_shape = shape.to_shape(); - size_t total = std::accumulate(static_shape.begin(), static_shape.end(), 1, std::multiplies()); + size_t total = + std::accumulate(static_shape.begin(), static_shape.end(), size_t(1), std::multiplies()); auto dim = feature.is_static() ? feature.get_length() : static_cast(static_shape.back()); return ov::PartialShape{ static_cast(total) / dim, dim }; } else { diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/non_zero.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/non_zero.cpp index e796cec93e06ad..0c0a904847be3b 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/non_zero.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/non_zero.cpp @@ -55,7 +55,7 @@ struct gather_nonzero_impl : typed_primitive_impl_ocl { auto optional_params = get_default_optional_params(impl_param.get_program()); params.inputs.push_back(convert_data_tensor(impl_param.get_input_layout(1))); - params.ov_input_rank = impl_param.get_input_layout().get_partial_shape().size(); + params.ov_input_rank = static_cast(impl_param.get_input_layout().get_partial_shape().size()); return {params, optional_params}; } diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/permute.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/permute.cpp index 3f8842d1e5bec5..b853d89823d5da 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/permute.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/permute.cpp @@ -24,7 +24,7 @@ inline std::vector convert_permute_order(const std::vector& // 1. Switch permute order values for spatial dims for (auto const& o : ie_order_aligned) { if (o >= 2) - cldnn_order.push_back(1 + ie_order_aligned.size() - o); + cldnn_order.push_back(1 + static_cast(ie_order_aligned.size()) - o); else cldnn_order.push_back(o); } diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/pooling.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/pooling.cpp index de63adf0c27b83..40b71ceb58bb93 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/pooling.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/pooling.cpp @@ -138,9 +138,9 @@ struct pooling_impl : typed_primitive_impl_ocl { else pp.divMode = cldnn_2_kernel_divider_mode(primitive->mode); - uint32_t kernel_z = kernel.size() >= 3 ? kernel[kernel.size() - 3] : 1; - uint32_t kernel_y = kernel.size() >= 2 ? kernel[kernel.size() - 2] : 1; - uint32_t kernel_x = kernel.size() >= 1 ? kernel[kernel.size() - 1] : 1; + uint32_t kernel_z = kernel.size() >= 3 ? static_cast(kernel[kernel.size() - 3]) : 1; + uint32_t kernel_y = kernel.size() >= 2 ? static_cast(kernel[kernel.size() - 2]) : 1; + uint32_t kernel_x = kernel.size() >= 1 ? static_cast(kernel[kernel.size() - 1]) : 1; pp.poolSize = {kernel_x, kernel_y, kernel_z}; uint32_t pad_z = std::max(pads_begin.size() >= 3 ? pads_begin[pads_begin.size() - 3] : 0, 0); @@ -148,14 +148,14 @@ struct pooling_impl : typed_primitive_impl_ocl { uint32_t pad_x = std::max(pads_begin.size() >= 1 ? pads_begin[pads_begin.size() - 1] : 0, 0); pp.poolPad = {pad_x, pad_y, pad_z}; - uint32_t stride_z = stride.size() >= 3 ? stride[stride.size() - 3] : 1; - uint32_t stride_y = stride.size() >= 2 ? stride[stride.size() - 2] : 1; - uint32_t stride_x = stride.size() >= 1 ? stride[stride.size() - 1] : 1; + uint32_t stride_z = stride.size() >= 3 ? static_cast(stride[stride.size() - 3]) : 1; + uint32_t stride_y = stride.size() >= 2 ? static_cast(stride[stride.size() - 2]) : 1; + uint32_t stride_x = stride.size() >= 1 ? static_cast(stride[stride.size() - 1]) : 1; pp.poolStride = {stride_x, stride_y, stride_z}; - uint32_t dilation_z = dilation.size() >= 3 ? dilation[dilation.size() - 3] : 1; - uint32_t dilation_y = dilation.size() >= 2 ? dilation[dilation.size() - 2] : 1; - uint32_t dilation_x = dilation.size() >= 1 ? dilation[dilation.size() - 1] : 1; + uint32_t dilation_z = dilation.size() >= 3 ? static_cast(dilation[dilation.size() - 3]) : 1; + uint32_t dilation_y = dilation.size() >= 2 ? static_cast(dilation[dilation.size() - 2]) : 1; + uint32_t dilation_x = dilation.size() >= 1 ? static_cast(dilation[dilation.size() - 1]) : 1; pp.poolDilation = {dilation_x, dilation_y, dilation_z}; return {params, optional_params}; diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/prior_box.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/prior_box.cpp index aeaf96f98debb0..fd6f6ca72ba77a 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/prior_box.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/prior_box.cpp @@ -71,7 +71,7 @@ struct prior_box_impl : typed_primitive_impl_ocl { params.widths = primitive->widths; params.heights = primitive->heights; const auto output_shape = impl_param.get_output_layout().get_shape(); - params.num_priors_4 = output_shape[1] / (params.width * params.height); + params.num_priors_4 = static_cast(output_shape[1] / (params.width * params.height)); params.inputs.push_back(convert_data_tensor(impl_param.get_input_layout(1))); return {params, {}}; diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/reduce.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/reduce.cpp index f7d8ba3f96c720..7bfda772ac1307 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/reduce.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/reduce.cpp @@ -22,7 +22,7 @@ static std::vector convert_axes(std::vector axes, size_t rank if (axis < 0) axis = axis + rank; - converted_axes.push_back(rank + 1 - axis); + converted_axes.push_back(static_cast(rank + 1 - axis)); } return converted_axes; diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/resample.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/resample.cpp index c01d6bc6f2b84a..ae7cd59eb38980 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/resample.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/resample.cpp @@ -79,7 +79,9 @@ inline std::vector convert_pads(const std::vector& pad, size_t if (pad.empty()) { new_pad = std::vector(rank, 0); } else { - new_pad = std::vector(pad.begin(), pad.end()); + for (auto pad : pad) { + new_pad.push_back(static_cast(pad)); + } if (new_pad.size() > 2) std::reverse(new_pad.begin() + 2, new_pad.end()); for (size_t i = new_pad.size(); i < rank || i < 4; ++i) diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/slice.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/slice.cpp index 898de26502a894..19dfd79bbcf298 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/slice.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/slice.cpp @@ -48,7 +48,11 @@ std::vector extractIntegerData(const data_node& node, const stream std::vector extractShape(kernel_selector::Tensor::DataTensor& tensor) { auto logical_dims = tensor.LogicalDims(); // LogicalDims method returns dims in reversed order - return {logical_dims.rbegin(), logical_dims.rend()}; + std::vector reverse_logical_dims; + for (auto it = logical_dims.rbegin(); it != logical_dims.rend(); ++it) { + reverse_logical_dims.push_back(static_cast(*it)); + } + return reverse_logical_dims; } } // namespace diff --git a/src/plugins/intel_gpu/src/graph/impls/onednn/fully_connected_onednn.cpp b/src/plugins/intel_gpu/src/graph/impls/onednn/fully_connected_onednn.cpp index b8b1fb70bd5957..7cf62d141e4a1a 100644 --- a/src/plugins/intel_gpu/src/graph/impls/onednn/fully_connected_onednn.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/onednn/fully_connected_onednn.cpp @@ -24,7 +24,8 @@ struct fully_connected_onednn : typed_primitive_onednn_impl { private: static std::vector reshape_to_2d(const ov::PartialShape& shape, int64_t feature) { auto staticShape = shape.to_shape(); - size_t total = std::accumulate(staticShape.begin(), staticShape.end(), 1, std::multiplies()); + size_t total = + std::accumulate(staticShape.begin(), staticShape.end(), static_cast(1), std::multiplies()); std::vector reshapeSize = { static_cast(total) / feature, feature }; return reshapeSize; } diff --git a/src/plugins/intel_gpu/src/graph/pooling.cpp b/src/plugins/intel_gpu/src/graph/pooling.cpp index f92e7ca91afccc..908f1b44384d6c 100644 --- a/src/plugins/intel_gpu/src/graph/pooling.cpp +++ b/src/plugins/intel_gpu/src/graph/pooling.cpp @@ -47,13 +47,13 @@ layout pooling_inst::calc_output_layout(parent::typed_node const& node, kernel_i } } - uint32_t stride_z = stride.size() >= 3 ? stride[stride.size() - 3] : 1; - uint32_t stride_y = stride.size() >= 2 ? stride[stride.size() - 2] : 1; - uint32_t stride_x = stride.size() >= 1 ? stride[stride.size() - 1] : 1; + auto stride_z = stride.size() >= 3 ? stride[stride.size() - 3] : 1; + auto stride_y = stride.size() >= 2 ? stride[stride.size() - 2] : 1; + auto stride_x = stride.size() >= 1 ? stride[stride.size() - 1] : 1; - uint32_t kernel_z = window_size.size() >= 3 ? window_size[window_size.size() - 3] : 1; - uint32_t kernel_y = window_size.size() >= 2 ? window_size[window_size.size() - 2] : 1; - uint32_t kernel_x = window_size.size() >= 1 ? window_size[window_size.size() - 1] : 1; + auto kernel_z = window_size.size() >= 3 ? window_size[window_size.size() - 3] : 1; + auto kernel_y = window_size.size() >= 2 ? window_size[window_size.size() - 2] : 1; + auto kernel_x = window_size.size() >= 1 ? window_size[window_size.size() - 1] : 1; // TODO: Consider moving general parameter verification to arguments constructor. CLDNN_ERROR_LESS_OR_EQUAL_THAN(desc->id, @@ -127,7 +127,7 @@ layout pooling_inst::calc_output_layout(parent::typed_node const& node, kernel_i // TODO: Check compatibility of output size calculation (with caffe). tensor size(1); for (size_t i = 0; i < window_size.size(); i++) { - size.spatial[i] = window_size[window_size.size() - i - 1]; + size.spatial[i] = static_cast(window_size[window_size.size() - i - 1]); } auto output_range = calc_sliding_window_output_range(input_layout.get_tensor(), size, diff --git a/src/plugins/intel_gpu/src/graph/program.cpp b/src/plugins/intel_gpu/src/graph/program.cpp index d1abe87a7a2a38..3e9f1703e3618a 100644 --- a/src/plugins/intel_gpu/src/graph/program.cpp +++ b/src/plugins/intel_gpu/src/graph/program.cpp @@ -364,7 +364,7 @@ bool program::analyze_output_size_handling_need() { tensor size(1); for (size_t i = 0; i < prim->size.size(); i++) { - size.spatial[i] = prim->size[prim->size.size() - i - 1]; + size.spatial[i] = static_cast(prim->size[prim->size.size() - i - 1]); } // TODO: Check compatibility of output size calculation (with caffe). auto primInputSize = prim_node.input().get_output_layout().get_tensor(); diff --git a/src/plugins/intel_gpu/src/graph/program_node.cpp b/src/plugins/intel_gpu/src/graph/program_node.cpp index 24dc4b5aa3cc3d..6055cd23407f01 100644 --- a/src/plugins/intel_gpu/src/graph/program_node.cpp +++ b/src/plugins/intel_gpu/src/graph/program_node.cpp @@ -936,7 +936,7 @@ void program_node::init_onednn_primitive_attributes() { } else if (fused_desc->activation_function == cldnn::activation_func::hsigmoid) { // hard_sigmoid(x,a,b) = clamp(ax+b, 0, 1) // hsigmoid(x) = clamp(val+3, 0, 6) / 6 = clamp(val/6+0.5, 0, 1) = hard_sigmoid(val, 1/6, 1/2) - post_ops.append_eltwise(dnnl::algorithm::eltwise_hardsigmoid, 1./6, 1./2); + post_ops.append_eltwise(dnnl::algorithm::eltwise_hardsigmoid, 1.f/6, 1.f/2); update_onednn_post_op_list(onednn_post_op_type::eltwise_hardsigmoid, empty_mem); } else if (fused_desc->activation_function == cldnn::activation_func::negative) { post_ops.append_eltwise(dnnl::algorithm::eltwise_linear, -1, 0); diff --git a/src/plugins/intel_gpu/src/graph/reduce.cpp b/src/plugins/intel_gpu/src/graph/reduce.cpp index cb5d82c87589e0..5e0ddb723a5ace 100644 --- a/src/plugins/intel_gpu/src/graph/reduce.cpp +++ b/src/plugins/intel_gpu/src/graph/reduce.cpp @@ -25,7 +25,7 @@ static std::vector convert_axes(std::vector axes, size_t rank if (axis < 0) axis = axis + rank; - converted_axes.push_back(rank + 1 - axis); + converted_axes.push_back(static_cast(rank + 1 - axis)); } return converted_axes; diff --git a/src/plugins/intel_gpu/src/graph/strided_slice.cpp b/src/plugins/intel_gpu/src/graph/strided_slice.cpp index 7d13b4e5b42a97..2942e9624dee13 100644 --- a/src/plugins/intel_gpu/src/graph/strided_slice.cpp +++ b/src/plugins/intel_gpu/src/graph/strided_slice.cpp @@ -18,7 +18,10 @@ layout strided_slice_inst::calc_output_layout(strided_slice_node const& node, ke auto input_layout = impl_param.get_input_layout(); auto output_format = format::get_default_format(desc->out_size.size()); auto out_shape = desc->out_size; - std::vector dims_converted(out_shape.begin(), out_shape.end()); + std::vector dims_converted; + for (auto dim : out_shape) { + dims_converted.push_back(static_cast(dim)); + } // extend shape to 4d for (size_t i = dims_converted.size(); i < 4; i++) { dims_converted.push_back(1); diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernel_selector.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernel_selector.cpp index 8ad5edf9b7465a..eb6c04765d943c 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernel_selector.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernel_selector.cpp @@ -89,7 +89,10 @@ KernelsData kernel_selector_base::GetNaiveBestKernel(const KernelList& all_impls if (!params.is_shape_agnostic) { for (size_t k = 0; k < kds[0].kernels.size(); ++k) { auto gws = kds[0].kernels[k].params.workGroups.global; - kernelsData[0].kernels[k].skip_execution = (std::accumulate(gws.begin(), gws.end(), 1, std::multiplies()) == 0); + kernelsData[0].kernels[k].skip_execution = (std::accumulate(gws.begin(), + gws.end(), + static_cast(1), + std::multiplies()) == 0); } } break; diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/border/border_kernel_base.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/border/border_kernel_base.cpp index 94310677c754a4..fe5fd4180716c2 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/border/border_kernel_base.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/border/border_kernel_base.cpp @@ -15,7 +15,7 @@ inline std::string GetInputTypeStr(uint32_t idx) { JitConstants BorderKernelBase::GetJitConstants(const border_params& params) const { JitConstants jit = MakeBaseParamsJitConstants(params); - size_t input_offset = 1; + uint32_t input_offset = 1; if (params.begin_type == base_params::ArgType::Input) { jit.AddConstant(MakeJitConstant("BEGIN_TYPE", GetInputTypeStr(input_offset))); input_offset += 1; diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/detection_output/detection_output_kernel_ref.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/detection_output/detection_output_kernel_ref.cpp index 8d5931cc3f00e6..3e82d684d08265 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/detection_output/detection_output_kernel_ref.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/detection_output/detection_output_kernel_ref.cpp @@ -229,7 +229,7 @@ KernelsData DetectionOutputKernelRef::GetKernelsData(const Params& params, const kd.internalBufferDataType = GetUnitType(detectOutParams); for (size_t i = 0; i < kKernelsNum; i++) { - DispatchData dispatchData = SetDefault(detectOutParams, i); + DispatchData dispatchData = SetDefault(detectOutParams, static_cast(i)); auto cldnnJit = GetJitConstants(detectOutParams); auto entryPoint = GetEntryPoint(kernelName, detectOutParams.layerID, params, options, i); cldnnJit.AddConstant(MakeJitConstant("BUFFER_STRIDE", buffer_stride)); @@ -256,7 +256,7 @@ KernelsData DetectionOutputKernelRef::GetKernelsData(const Params& params, const cldnnJit.AddConstant(MakeJitConstant("USE_LOCAL_MEMORY_FOR_STACK", true)); cldnnJit.AddConstants({MakeJitConstant("DO_STAGE_" + std::to_string(i) + "_MXNET", "true"), MakeJitConstant("LOCAL_WORK_NUM", dispatchData.lws[2]), - MakeJitConstant("PARTITION_STEP", GetPartitionStep(dispatchData.lws[2]))}); + MakeJitConstant("PARTITION_STEP", GetPartitionStep(static_cast(dispatchData.lws[2])))}); } else { // Limit local memory usage for two buffers: __range [LWS1 * LWS2 * 2 * 4 (int size) bytes] // stack [LWS1 * LWS2 * 100 (stack_size) * 4 (int size) bytes] @@ -267,7 +267,7 @@ KernelsData DetectionOutputKernelRef::GetKernelsData(const Params& params, const cldnnJit.AddConstants({MakeJitConstant("DO_STAGE_" + std::to_string(i) + "_CAFFE", "true"), MakeJitConstant("LOCAL_CLASS_NUM", dispatchData.lws[1]), MakeJitConstant("LOCAL_WORK_NUM", dispatchData.lws[2]), - MakeJitConstant("PARTITION_STEP", GetPartitionStep(dispatchData.lws[2]))}); + MakeJitConstant("PARTITION_STEP", GetPartitionStep(static_cast(dispatchData.lws[2])))}); } } else if (i == 2) { if (detectOutParams.detectOutParams.decrease_label_id) { diff --git a/src/plugins/intel_gpu/src/plugin/infer_request_legacy.cpp b/src/plugins/intel_gpu/src/plugin/infer_request_legacy.cpp index bee48411b4b98c..032556a06ef311 100644 --- a/src/plugins/intel_gpu/src/plugin/infer_request_legacy.cpp +++ b/src/plugins/intel_gpu/src/plugin/infer_request_legacy.cpp @@ -352,7 +352,7 @@ void InferRequestLegacy::SetBlob(const std::string& name, const Blob::Ptr& data) if (m_graph->GetMaxDynamicBatchSize() > 1) { const auto batch_idx = m_graph->GetInputDynBatchDims()[name].first; if (batch_idx >= 0) - SetBatch(blobDesc.getDims()[batch_idx]); + SetBatch(static_cast(blobDesc.getDims()[batch_idx])); } } else { size_t blobSize = desc.getLayout() != SCALAR @@ -735,9 +735,9 @@ void InferRequestLegacy::enqueue() { bool is_nv12 = nv12_ptr != nullptr; if (is_nv12 || is_batched) { - int num_blobs = is_batched ? batched_ptr->size() : 1; + int num_blobs = is_batched ? static_cast(batched_ptr->size()) : 1; int expected_batch = is_batched - ? _networkInputs.at(inputName)->getTensorDesc().getDims()[0] + ? static_cast(_networkInputs.at(inputName)->getTensorDesc().getDims()[0]) : 1; for (auto i = 0; i < expected_batch; i++) { std::string y_name = inputName + "_Y" + std::to_string(i); @@ -890,7 +890,7 @@ void InferRequestLegacy::setup_stream_graph() { auto& streamGraphs = static_cast(_exeNetwork.get())->m_graphs; if (nullptr != streamExecutor) { streamID = streamExecutor->GetStreamId(); - int numGraphs = streamGraphs.size(); + auto numGraphs = streamGraphs.size(); streamID = streamID % numGraphs; } m_graph = streamGraphs[streamID]; @@ -904,7 +904,7 @@ void InferRequestLegacy::setup_stream_graph() { // extract new batch size from blob const auto batch_idx = m_graph->GetInputDynBatchDims()[input.first].first; if (batch_idx >= 0) { - SetBatch(_inputs[input.first]->getTensorDesc().getDims()[batch_idx]); + SetBatch(static_cast(_inputs[input.first]->getTensorDesc().getDims()[batch_idx])); break; } } diff --git a/src/plugins/intel_gpu/src/plugin/ops/experimental_detectron_topk_rois.cpp b/src/plugins/intel_gpu/src/plugin/ops/experimental_detectron_topk_rois.cpp index f6907e0e4bad14..e76d796df473d4 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/experimental_detectron_topk_rois.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/experimental_detectron_topk_rois.cpp @@ -21,7 +21,7 @@ void CreateExperimentalDetectronTopKROIsOp(Program &p, const std::shared_ptr &op) { validate_inputs_count(op, {2}); auto inputs = p.GetInputInfo(op); - auto max_rois = op->get_max_rois(); + auto max_rois = static_cast(op->get_max_rois()); auto layer_name = layer_type_name_ID(op); auto argmax_layer_name = layer_name + "_topk"; auto top_k_indices = arg_max_min(argmax_layer_name, diff --git a/src/plugins/intel_gpu/src/plugin/ops/extract_image_patches.cpp b/src/plugins/intel_gpu/src/plugin/ops/extract_image_patches.cpp index da42fb9d83b9b7..7e9dca2fcaf835 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/extract_image_patches.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/extract_image_patches.cpp @@ -28,9 +28,18 @@ static void CreateExtractImagePatchesOp(Program& p, const std::shared_ptr sizes = std::vector(op->get_sizes().begin(), op->get_sizes().end()); - std::vector strides = std::vector(op->get_strides().begin(), op->get_strides().end()); - std::vector rates = std::vector(op->get_rates().begin(), op->get_rates().end()); + std::vector sizes; + std::vector strides; + std::vector rates; + for (auto size : op->get_sizes()) { + sizes.push_back(static_cast(size)); + } + for (auto stride : op->get_strides()) { + strides.push_back(static_cast(stride)); + } + for (auto rate : op->get_rates()) { + rates.push_back(static_cast(rate)); + } std::string auto_pad = PadToString(op->get_auto_pad()); auto extractImagePatchesPrim = cldnn::extract_image_patches(layerName, diff --git a/src/plugins/intel_gpu/src/plugin/ops/gather_nd.cpp b/src/plugins/intel_gpu/src/plugin/ops/gather_nd.cpp index 902db8426b2714..99861fa36c2581 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/gather_nd.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/gather_nd.cpp @@ -18,8 +18,8 @@ static void CreateGatherNDOp(Program& p, const std::shared_ptrget_input_partial_shape(0).size(); - auto indices_rank = op->get_input_partial_shape(1).size(); + auto input_rank = static_cast(op->get_input_partial_shape(0).size()); + auto indices_rank = static_cast(op->get_input_partial_shape(1).size()); auto batch_dims = static_cast(op->get_batch_dims()); auto primitive = cldnn::gather_nd(layerName, @@ -39,9 +39,8 @@ static void CreateGatherNDOp(Program& p, const std::shared_ptrget_input_partial_shape(0).size(); - auto indices_rank = op->get_input_partial_shape(1).size(); - + auto input_rank = static_cast(op->get_input_partial_shape(0).size()); + auto indices_rank = static_cast(op->get_input_partial_shape(1).size()); auto batch_dims = static_cast(op->get_batch_dims()); auto primitive = cldnn::gather_nd(layerName, diff --git a/src/plugins/intel_gpu/src/plugin/ops/lrn.cpp b/src/plugins/intel_gpu/src/plugin/ops/lrn.cpp index 2ff91d0b2820a7..71a6ced9da9ef0 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/lrn.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/lrn.cpp @@ -31,7 +31,7 @@ static void CreateLRNOp(Program& p, const std::shared_ptr& IE_THROW() << "Unsupported axes node type in " << op->get_friendly_name() << " (" << op->get_type_name() << ")"; } auto axis_value = axis_const->cast_vector(); - auto localSize = op->get_nsize(); + auto localSize = static_cast(op->get_nsize()); auto lrnPrim = cldnn::lrn(layerName, inputs[0], diff --git a/src/plugins/intel_gpu/src/plugin/ops/roll.cpp b/src/plugins/intel_gpu/src/plugin/ops/roll.cpp index e4ba889ce806c9..f8d20d9f78ae45 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/roll.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/roll.cpp @@ -23,7 +23,7 @@ void CreateRollOp(Program& p, const std::shared_ptr& op) { const auto& input_pshape = op->get_input_partial_shape(0); OPENVINO_ASSERT(input_pshape.is_static(), "Dynamic shapes are not supported for Roll operation yet"); const auto& input_shape = input_pshape.to_shape(); - const auto rank = input_shape.size(); + const auto rank = static_cast(input_shape.size()); const auto format = cldnn::format::get_default_format(rank); const auto default_rank = format.dimension(); @@ -53,7 +53,7 @@ void CreateRollOp(Program& p, const std::shared_ptr& op) { } // Normalize shift - for (size_t s = 0; s < static_cast(rank); ++s) { + for (size_t s = 0; s < rank; ++s) { auto& sh = shift[s]; const auto dim = static_cast(input_shape[s]); sh %= dim; diff --git a/src/plugins/intel_gpu/src/runtime/layout.cpp b/src/plugins/intel_gpu/src/runtime/layout.cpp index fad8eb1bb27151..ee825df8ec5cff 100644 --- a/src/plugins/intel_gpu/src/runtime/layout.cpp +++ b/src/plugins/intel_gpu/src/runtime/layout.cpp @@ -197,7 +197,10 @@ std::vector layout::get_dims() const { if (is_dynamic()) throw std::runtime_error("[GPU] get_dims() is called for dynamic shape"); auto shape = size.to_shape(); - std::vector res(shape.begin(), shape.end()); + std::vector res; + for (auto dim : shape) { + res.push_back(static_cast(dim)); + } if (res.size() < format.dimension()) res.insert(res.end(), format.dimension() - res.size(), 1); @@ -333,7 +336,10 @@ tensor layout::get_tensor() const { shape = size.to_shape(); } - std::vector dims(shape.begin(), shape.end()); + std::vector dims; + for (auto dim : shape) { + dims.push_back(static_cast(dim)); + } auto rank = std::max(format.dimension(), dims.size()); auto default_fmt = format::get_default_format(rank, format::is_weights_format(format), format::is_grouped(format)); @@ -513,7 +519,10 @@ ov::PartialShape layout::transform(cldnn::format new_fmt) const { cldnn::tensor::value_type default_size = -1; auto shape = size.to_shape(); - std::vector dims(shape.begin(), shape.end()); + std::vector dims; + for (auto dim : shape) { + dims.push_back(static_cast(dim)); + } const cldnn::format default_fmt = cldnn::format::bfwzyx; auto old_sizes = convert_dimensions(dims, format.order(), default_fmt.internal_order()); // convert to internal order (bfxyzw) From f7458d7521aa5e94c6fc2aa9eadcf3a583e45aeb Mon Sep 17 00:00:00 2001 From: Gorbachev Date: Thu, 9 Mar 2023 18:56:11 +0000 Subject: [PATCH 05/12] fixed linux plugin --- src/plugins/intel_gpu/src/plugin/ops/roll.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/plugins/intel_gpu/src/plugin/ops/roll.cpp b/src/plugins/intel_gpu/src/plugin/ops/roll.cpp index f8d20d9f78ae45..a80cb0222cf12a 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/roll.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/roll.cpp @@ -53,7 +53,7 @@ void CreateRollOp(Program& p, const std::shared_ptr& op) { } // Normalize shift - for (size_t s = 0; s < rank; ++s) { + for (int s = 0; s < rank; ++s) { auto& sh = shift[s]; const auto dim = static_cast(input_shape[s]); sh %= dim; From 3ef91fc60dfccc1a53ada407147fb1bce7067553 Mon Sep 17 00:00:00 2001 From: Gorbachev Date: Fri, 10 Mar 2023 00:35:19 +0000 Subject: [PATCH 06/12] fixed a part of tests --- .../intel_gpu/tests/gtest_main_gpu.cpp | 2 +- .../tests/module_tests/device_test.cpp | 2 +- .../tests/module_tests/layout_test.cpp | 11 +++-- .../tests/shape_infer/crop_si_test.cpp | 2 +- .../adaptive_max_pooling_gpu_test.cpp | 4 +- .../tests/test_cases/broadcast_gpu_test.cpp | 5 ++- .../tests/test_cases/convolution_gpu_test.cpp | 21 +++++----- .../tests/test_cases/crop_gpu_test.cpp | 2 +- .../test_cases/deconvolution_gpu_test.cpp | 4 +- .../tests/test_cases/eltwise_gpu_test.cpp | 14 +++---- .../tests/test_cases/gather_gpu_test.cpp | 2 +- .../tests/test_cases/lru_caches_gpu_test.cpp | 6 +-- .../tests/test_cases/pooling_gpu_test.cpp | 40 +++++++++---------- .../test_cases/set_output_memory_gpu_test.cpp | 6 +-- .../intel_gpu/tests/test_cases/slice.cpp | 2 +- 15 files changed, 66 insertions(+), 57 deletions(-) diff --git a/src/plugins/intel_gpu/tests/gtest_main_gpu.cpp b/src/plugins/intel_gpu/tests/gtest_main_gpu.cpp index aa299e0dc6f427..856c541a607df9 100644 --- a/src/plugins/intel_gpu/tests/gtest_main_gpu.cpp +++ b/src/plugins/intel_gpu/tests/gtest_main_gpu.cpp @@ -47,7 +47,7 @@ GTEST_API_ int main(int argc, char** argv) { cldnn::device_query::device_id = FLAGS_device_suffix; //restore cmdline arg for gtest auto varg=gflags::GetArgvs(); - int new_argc=varg.size(); + int new_argc = static_cast(varg.size()); char** new_argv=new char*[new_argc]; for(int i=0;i(device_id); } device_info get_info() const override { return _info; } diff --git a/src/plugins/intel_gpu/tests/module_tests/layout_test.cpp b/src/plugins/intel_gpu/tests/module_tests/layout_test.cpp index 4be438acda5f13..4e110514f115ad 100644 --- a/src/plugins/intel_gpu/tests/module_tests/layout_test.cpp +++ b/src/plugins/intel_gpu/tests/module_tests/layout_test.cpp @@ -34,8 +34,8 @@ TEST_P(data_layout_test, size_check) { auto l = layout(p.dt, p.fmt, tensor{default_fmt, p.size}); - size_t expected_count = std::accumulate(p.size.begin(), p.size.end(), 1, std::multiplies()); - size_t expected_bytes_count = std::accumulate(p.expected_aligned_size.begin(), p.expected_aligned_size.end(), 1, std::multiplies()) * + size_t expected_count = std::accumulate(p.size.begin(), p.size.end(), 1, std::multiplies()); + size_t expected_bytes_count = std::accumulate(p.expected_aligned_size.begin(), p.expected_aligned_size.end(), 1, std::multiplies()) * data_type_traits::size_of(p.dt); ASSERT_EQ(l.bytes_count(), expected_bytes_count); @@ -117,8 +117,11 @@ TEST_P(weights_layout_test, size_check) { auto l = layout(p.dt, p.fmt, tensor{default_fmt, p.size}); - size_t expected_count = std::accumulate(p.size.begin(), p.size.end(), 1, std::multiplies()); - size_t expected_bytes_count = std::accumulate(p.expected_aligned_size.begin(), p.expected_aligned_size.end(), 1, std::multiplies()) * + size_t expected_count = std::accumulate(p.size.begin(), p.size.end(), 1, std::multiplies()); + size_t expected_bytes_count = std::accumulate(p.expected_aligned_size.begin(), + p.expected_aligned_size.end(), + 1, + std::multiplies()) * data_type_traits::size_of(p.dt); ASSERT_EQ(l.bytes_count(), expected_bytes_count); diff --git a/src/plugins/intel_gpu/tests/shape_infer/crop_si_test.cpp b/src/plugins/intel_gpu/tests/shape_infer/crop_si_test.cpp index 73c84a8ff80a87..c7db35f5d73a70 100644 --- a/src/plugins/intel_gpu/tests/shape_infer/crop_si_test.cpp +++ b/src/plugins/intel_gpu/tests/shape_infer/crop_si_test.cpp @@ -61,7 +61,7 @@ TEST_P(crop_si_test, shape_infer) { for (size_t output_idx = 0; output_idx < p.expected_layouts.size(); output_idx++) { auto prim_id = "crop.out" + std::to_string(output_idx); - auto crop_prim = std::make_shared(prim_id, input_prim_ids, p.reference_input_size, p.offsets[output_idx], op_mode, output_idx, p.param_num_splits); + auto crop_prim = std::make_shared(prim_id, input_prim_ids, p.reference_input_size, p.offsets[output_idx], op_mode, static_cast(output_idx), p.param_num_splits); auto& crop_node = prog.get_or_create(crop_prim); for (auto& prim : input_prims) { diff --git a/src/plugins/intel_gpu/tests/test_cases/adaptive_max_pooling_gpu_test.cpp b/src/plugins/intel_gpu/tests/test_cases/adaptive_max_pooling_gpu_test.cpp index 878761b7b6ca8b..2c582dd4b57b66 100644 --- a/src/plugins/intel_gpu/tests/test_cases/adaptive_max_pooling_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/test_cases/adaptive_max_pooling_gpu_test.cpp @@ -182,8 +182,8 @@ struct adaptive_max_pooling_test return; const auto block_sizes = format::traits(target_layout).block_sizes; - const auto index_offset = std::accumulate(block_sizes.begin(), block_sizes.end(), 1u, - [](size_t total, const std::pair& b) { + const auto index_offset = std::accumulate(block_sizes.begin(), block_sizes.end(), 1, + [](int total, const std::pair& b) { return total * b.second; } ); diff --git a/src/plugins/intel_gpu/tests/test_cases/broadcast_gpu_test.cpp b/src/plugins/intel_gpu/tests/test_cases/broadcast_gpu_test.cpp index 56fb98b79e7aaa..0ed3d621325b36 100644 --- a/src/plugins/intel_gpu/tests/test_cases/broadcast_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/test_cases/broadcast_gpu_test.cpp @@ -133,7 +133,10 @@ void start_broadcast_test_dynamic(format input_format, topology.add( broadcast("broadcast", input_info("reorder"), input_info("target_shape"), ov::AxisSet(broadcast_axes))); topology.add(reorder("output", input_info("broadcast"), fmt, input_data_type)); - std::vector target_shape_data(output_shape.begin(), output_shape.end()); + std::vector target_shape_data; + for (auto out_shape : output_shape) { + target_shape_data.push_back(static_cast(out_shape)); + } set_values(target_shape_mem, target_shape_data); } diff --git a/src/plugins/intel_gpu/tests/test_cases/convolution_gpu_test.cpp b/src/plugins/intel_gpu/tests/test_cases/convolution_gpu_test.cpp index fd00fa0b308384..fbc48b98cd3789 100644 --- a/src/plugins/intel_gpu/tests/test_cases/convolution_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/test_cases/convolution_gpu_test.cpp @@ -8732,15 +8732,18 @@ class convolution_test : public tests::generic_test { auto pad = convolution->pad; tensor weights_size = generic_params->input_layouts[1].get_tensor(); - int kernel_extent_y = dilation[dilation.size() - 2] * (weights_size.spatial[1] - 1) + 1; - int kernel_extent_x = dilation[dilation.size() - 1] * (weights_size.spatial[0] - 1) + 1; + auto kernel_extent_y = dilation[dilation.size() - 2] * (weights_size.spatial[1] - 1) + 1; + auto kernel_extent_x = dilation[dilation.size() - 1] * (weights_size.spatial[0] - 1) + 1; // Calculate output size - int output_size_y = 1 + (input_size.spatial[1] - kernel_extent_y + 2 * pad[0]) / stride[0]; - int output_size_x = 1 + (input_size.spatial[0] - kernel_extent_x + 2 * pad[1]) / stride[1]; - int output_features = weights_size.batch[0]; + auto output_size_y = 1 + (input_size.spatial[1] - kernel_extent_y + 2 * pad[0]) / stride[0]; + auto output_size_x = 1 + (input_size.spatial[0] - kernel_extent_x + 2 * pad[1]) / stride[1]; + auto output_features = weights_size.batch[0]; - return cldnn::tensor(input_size.batch[0], output_features, output_size_x, output_size_y); + return cldnn::tensor(input_size.batch[0], + static_cast(output_features), + static_cast(output_size_x), + static_cast(output_size_y)); } void prepare_input_for_test(std::vector& inputs) override { @@ -8841,18 +8844,18 @@ class convolution_test : public tests::generic_test { int output_fi = out_f; int output_yi = y; int output_xi = x; - int output_index = (output_bi * output_buffer_size.feature[0] + output_fi) * output_buffer_size.spatial[1] * output_buffer_size.spatial[0]; + auto output_index = (output_bi * output_buffer_size.feature[0] + output_fi) * output_buffer_size.spatial[1] * output_buffer_size.spatial[0]; tensor lower_output_padding = convolution->output_paddings[0].lower_size(); output_index += (lower_output_padding.spatial[1] + output_yi) * output_buffer_size.spatial[0] + lower_output_padding.spatial[0] + output_xi; for (int kernel_y = 0; kernel_y < weights_size.spatial[1]; kernel_y++) { - int input_yi = y * stride[0] - pad[0] + kernel_y * dilation[0]; + auto input_yi = y * stride[0] - pad[0] + kernel_y * dilation[0]; if ((input_yi < 0) || (input_yi >= input_size.spatial[1])) { continue; } for (int kernel_x = 0; kernel_x < weights_size.spatial[0]; kernel_x++) { - int input_xi = x * stride[1] - pad[1] + kernel_x * dilation[1]; + auto input_xi = x * stride[1] - pad[1] + kernel_x * dilation[1]; if ((input_xi < 0) || (input_xi >= input_size.spatial[0])) { continue; } diff --git a/src/plugins/intel_gpu/tests/test_cases/crop_gpu_test.cpp b/src/plugins/intel_gpu/tests/test_cases/crop_gpu_test.cpp index 0c147209c0b241..d0492aa55b19e8 100644 --- a/src/plugins/intel_gpu/tests/test_cases/crop_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/test_cases/crop_gpu_test.cpp @@ -1460,7 +1460,7 @@ TEST(crop_gpu, static_split_batch) { topology.add(crop("crop3", { input_info("input") }, tensor(1, 4, 1, 1), { tensor(2, 0, 0, 0) }, op_mode, 2)); std::vector input_vec(12); - for (size_t i = 0; i < 12; i++) { + for (int32_t i = 0; i < 12; i++) { input_vec[i] = i; } diff --git a/src/plugins/intel_gpu/tests/test_cases/deconvolution_gpu_test.cpp b/src/plugins/intel_gpu/tests/test_cases/deconvolution_gpu_test.cpp index 2a41b952054713..e0aa1d1ff1bf8f 100644 --- a/src/plugins/intel_gpu/tests/test_cases/deconvolution_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/test_cases/deconvolution_gpu_test.cpp @@ -2425,9 +2425,9 @@ struct deconvolution_random_test_params { }; auto print_strides = [&](const ov::Strides& s) { - std::string res = to_string_neg(s[0]); + std::string res = to_string_neg(static_cast(s[0])); for (size_t i = 1; i < s.size(); i++) { - res += "x" + to_string_neg(s[i]); + res += "x" + to_string_neg(static_cast(s[i])); } return res; }; diff --git a/src/plugins/intel_gpu/tests/test_cases/eltwise_gpu_test.cpp b/src/plugins/intel_gpu/tests/test_cases/eltwise_gpu_test.cpp index 4a7e3eaa5b7ff6..4bbe8ae9413c96 100644 --- a/src/plugins/intel_gpu/tests/test_cases/eltwise_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/test_cases/eltwise_gpu_test.cpp @@ -2924,10 +2924,10 @@ TEST(eltwise_gpu_f16, fs_b_yx_fsv32_broadcast) return; } - size_t input_b = 2; - size_t input_f = 72; - size_t input1_y = 10, input1_x = 10; - size_t input2_y = 1, input2_x = 1; + tensor::value_type input_b = 2; + tensor::value_type input_f = 72; + tensor::value_type input1_y = 10, input1_x = 10; + tensor::value_type input2_y = 1, input2_x = 1; tensor input1_tensor(input_b, input_f, input1_x, input1_y); tensor input2_tensor(input_b, input_f, input2_x, input2_y); @@ -2989,9 +2989,9 @@ TEST(eltwise_gpu_f16, fs_b_yx_fsv32_broadcast_bfyx) return; } - size_t input_b = 2; - size_t input_f = 72; - size_t input1_y = 10, input1_x = 10; + tensor::value_type input_b = 2; + tensor::value_type input_f = 72; + tensor::value_type input1_y = 10, input1_x = 10; tensor input1_tensor(input_b, input_f, input1_x, input1_y); tensor input2_tensor(1, input_f, 1, 1); diff --git a/src/plugins/intel_gpu/tests/test_cases/gather_gpu_test.cpp b/src/plugins/intel_gpu/tests/test_cases/gather_gpu_test.cpp index fe3d11630e0bb9..9f421837ca46ae 100644 --- a/src/plugins/intel_gpu/tests/test_cases/gather_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/test_cases/gather_gpu_test.cpp @@ -17,7 +17,7 @@ using namespace ::tests; template int get_not_one_dim(const T& a) { - int ret = a.size(); + int ret = static_cast(a.size()); while (ret - 1 >= 0 && a[ret - 1] == 1) ret--; return ret; diff --git a/src/plugins/intel_gpu/tests/test_cases/lru_caches_gpu_test.cpp b/src/plugins/intel_gpu/tests/test_cases/lru_caches_gpu_test.cpp index 8016ccddad6a3e..da952790365313 100644 --- a/src/plugins/intel_gpu/tests/test_cases/lru_caches_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/test_cases/lru_caches_gpu_test.cpp @@ -43,11 +43,11 @@ TEST(lru_cache, basic_data_type) std::vector> expected_value; for (size_t i = ca.size(); i > 0; i--) { // 5, 1, 2, 4 - int idx = input_values.size() - i; + int idx = static_cast(input_values.size() - i); expected_value.push_back(input_values[idx]); } - int idx = expected_value.size() - 1; + auto idx = expected_value.size() - 1; for (auto key : ca.get_all_keys()) { ASSERT_EQ(key, expected_value[idx--].first); } @@ -118,7 +118,7 @@ TEST(lru_cache, custom_data_type) { expected_keys.push_back(inputs[inputs.size() - i]->key); } - int idx = expected_keys.size() - 1; + auto idx = expected_keys.size() - 1; for (auto key : ca.get_all_keys()) { ASSERT_EQ(key, expected_keys[idx--]); } diff --git a/src/plugins/intel_gpu/tests/test_cases/pooling_gpu_test.cpp b/src/plugins/intel_gpu/tests/test_cases/pooling_gpu_test.cpp index adb69a1b0023df..1cac6642b0b4ec 100644 --- a/src/plugins/intel_gpu/tests/test_cases/pooling_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/test_cases/pooling_gpu_test.cpp @@ -1216,7 +1216,7 @@ static void generic_average_wo_padding_test(format fmt, tensor output, tensor in tensor off(0); for (size_t i = 0; i < offset.size(); i++) { - off.spatial[i] = offset[offset.size() - i - 1]; + off.spatial[i] = static_cast(offset[offset.size() - i - 1]); } auto pool_in = "in"; @@ -2081,15 +2081,15 @@ class pooling_random_test_base : public pooling_test_base { for (size_t fi = 0; fi < this->input_features(); ++fi) { reference[bi][fi] = reference_pooling( this->_input[bi][fi], - this->pool_x(), - this->pool_y(), - this->pool_z(), - this->stride_x(), - this->stride_y(), - this->stride_z(), - this->offset_x(), - this->offset_y(), - this->offset_z()); + static_cast(this->pool_x()), + static_cast(this->pool_y()), + static_cast(this->pool_z()), + static_cast(this->stride_x()), + static_cast(this->stride_y()), + static_cast(this->stride_z()), + static_cast(this->offset_x()), + static_cast(this->offset_y()), + static_cast(this->offset_z())); } } return reference; @@ -3008,14 +3008,14 @@ class pooling_test : public tests::generic_test cldnn::pooling_mode pooling_mode = pooling->mode; - int pad_width = pooling->pads_begin[1]; - int pad_height = pooling->pads_begin[0]; + int pad_width = static_cast(pooling->pads_begin[1]); + int pad_height = static_cast(pooling->pads_begin[0]); - int kernel_width = pooling->size[1]; - int kernel_height = pooling->size[0]; + int kernel_width = static_cast(pooling->size[1]); + int kernel_height = static_cast(pooling->size[0]); - int stride_width = pooling->stride[1]; - int stride_height = pooling->stride[0]; + int stride_width = static_cast(pooling->stride[1]); + int stride_height = static_cast(pooling->stride[0]); auto output_tensor = get_expected_output_tensor(); @@ -3060,9 +3060,9 @@ class pooling_test : public tests::generic_test const size_t output_index = get_linear_index(output->get_layout(), b, f, h, w, output_desc); - for (int y = pad_y_start; y < pad_y_end; y++) + for (auto y = pad_y_start; y < pad_y_end; y++) { - for (int x = pad_x_start; x < pad_x_end; x++) + for (auto x = pad_x_start; x < pad_x_end; x++) { const size_t input_index = get_linear_index(inputs[0]->get_layout(), b, f, y, x, input_desc); @@ -3081,8 +3081,8 @@ class pooling_test : public tests::generic_test case cldnn::pooling_mode::average: case cldnn::pooling_mode::average_no_padding: { - int pool_size_w = pooling->size[1]; - int pool_size_h = pooling->size[0]; + auto pool_size_w = pooling->size[1]; + auto pool_size_h = pooling->size[0]; auto dynamic_mode = (((output_tensor.spatial[0] - 1) * stride_width) + pool_size_w) > -2 * pad_width + width || (((output_tensor.spatial[1] - 1) * stride_height) + pool_size_h) > -2 * pad_height + height; diff --git a/src/plugins/intel_gpu/tests/test_cases/set_output_memory_gpu_test.cpp b/src/plugins/intel_gpu/tests/test_cases/set_output_memory_gpu_test.cpp index 19ad6572b81460..2e6c9f55bc7561 100644 --- a/src/plugins/intel_gpu/tests/test_cases/set_output_memory_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/test_cases/set_output_memory_gpu_test.cpp @@ -36,7 +36,7 @@ void test_basic(bool is_caching_test) { auto input_data = engine.allocate_memory({ data_types::f32, format::bfyx, { b, f, x, y } }); auto output_mem = engine.allocate_memory({ data_types::f32, format::bfyx, { b, f, x, y } }); - const int inputSize = input_data->get_layout().count(); + const auto inputSize = input_data->get_layout().count(); auto inputVals = generateVector(inputSize); set_values(input_data, inputVals); @@ -80,7 +80,7 @@ TEST(set_output_memory_gpu, basic_const) { auto output_mem = engine.allocate_memory({ data_types::f32, format::bfyx, { b, f, x, y } }); auto output_const_mem = engine.allocate_memory({ data_types::f32, format::bfyx, { b, f, x, y } }); - const int inputSize = input_data->get_layout().count(); + const int inputSize = static_cast(input_data->get_layout().count()); auto inputVals = generateVector(inputSize); auto constVals = generateVector(inputSize); set_values(input_data, inputVals); @@ -129,7 +129,7 @@ TEST(set_output_memory_gpu, basic_mutable) { auto md = engine.allocate_memory({ data_types::f32, format::bfyx, { b, f, x, y } }); auto output_mem = engine.allocate_memory({ data_types::f32, format::bfyx, { b, f, x, y } }); auto output_mutable_mem = engine.allocate_memory({ data_types::f32, format::bfyx, { b, f, x, y } }); - const int inputSize = input_data->get_layout().count(); + const auto inputSize = input_data->get_layout().count(); auto inputVals = generateVector(inputSize); auto mutableVals = generateVector(inputSize); set_values(input_data, inputVals); diff --git a/src/plugins/intel_gpu/tests/test_cases/slice.cpp b/src/plugins/intel_gpu/tests/test_cases/slice.cpp index ebb6b3f28eac99..9e06a840b5e645 100644 --- a/src/plugins/intel_gpu/tests/test_cases/slice.cpp +++ b/src/plugins/intel_gpu/tests/test_cases/slice.cpp @@ -30,7 +30,7 @@ class SliceTest : public ::testing::Test { assert(input_shape_.size() == 4 || input_shape_.size() == 5); format input_format = input_shape_.size() == 4 ? format::bfyx : format::bfzyx; layout data_layout ( input_type_, input_format, tensor{input_shape_} ); - std::vector input_vals = GenInput(data_layout.get_linear_size()); + std::vector input_vals = GenInput(static_cast(data_layout.get_linear_size())); memory::ptr input = engine_.allocate_memory(data_layout); set_values(input, input_vals); topology topology; From b0fc46fcb6ebe8e054e193d7cdebec61bd554c9a Mon Sep 17 00:00:00 2001 From: Gorbachev Date: Fri, 10 Mar 2023 00:47:18 +0000 Subject: [PATCH 07/12] fixed test fot linux --- .../intel_gpu/tests/test_cases/convolution_gpu_test.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/plugins/intel_gpu/tests/test_cases/convolution_gpu_test.cpp b/src/plugins/intel_gpu/tests/test_cases/convolution_gpu_test.cpp index fbc48b98cd3789..c8607728374de4 100644 --- a/src/plugins/intel_gpu/tests/test_cases/convolution_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/test_cases/convolution_gpu_test.cpp @@ -8849,14 +8849,14 @@ class convolution_test : public tests::generic_test { output_index += (lower_output_padding.spatial[1] + output_yi) * output_buffer_size.spatial[0] + lower_output_padding.spatial[0] + output_xi; for (int kernel_y = 0; kernel_y < weights_size.spatial[1]; kernel_y++) { - auto input_yi = y * stride[0] - pad[0] + kernel_y * dilation[0]; - if ((input_yi < 0) || (input_yi >= input_size.spatial[1])) { + int input_yi = static_cast(y * stride[0] - pad[0] + kernel_y * dilation[0]); + if ((input_yi < 0) || (input_yi >= static_cast(input_size.spatial[1]))) { continue; } for (int kernel_x = 0; kernel_x < weights_size.spatial[0]; kernel_x++) { - auto input_xi = x * stride[1] - pad[1] + kernel_x * dilation[1]; - if ((input_xi < 0) || (input_xi >= input_size.spatial[0])) { + int input_xi = static_cast(x * stride[1] - pad[1] + kernel_x * dilation[1]); + if ((input_xi < 0) || (input_xi >= static_cast(input_size.spatial[0]))) { continue; } From de6fad7c649dc103e73cc60101c5c96f6400fbfb Mon Sep 17 00:00:00 2001 From: Gorbachev Date: Fri, 10 Mar 2023 00:50:34 +0000 Subject: [PATCH 08/12] fixed pooling_gpu_test fot linux --- src/plugins/intel_gpu/tests/test_cases/pooling_gpu_test.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/plugins/intel_gpu/tests/test_cases/pooling_gpu_test.cpp b/src/plugins/intel_gpu/tests/test_cases/pooling_gpu_test.cpp index 1cac6642b0b4ec..c62235ed5d12ae 100644 --- a/src/plugins/intel_gpu/tests/test_cases/pooling_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/test_cases/pooling_gpu_test.cpp @@ -3083,8 +3083,9 @@ class pooling_test : public tests::generic_test { auto pool_size_w = pooling->size[1]; auto pool_size_h = pooling->size[0]; - auto dynamic_mode = (((output_tensor.spatial[0] - 1) * stride_width) + pool_size_w) > -2 * pad_width + width || - (((output_tensor.spatial[1] - 1) * stride_height) + pool_size_h) > -2 * pad_height + height; + auto dynamic_mode = statis_cast(((output_tensor.spatial[0] - 1) * stride_width) + pool_size_w) > -2 * pad_width + width || + statis_cast(((output_tensor.spatial[1] - 1) * stride_height) + pool_size_h) > + -2 * pad_height + height; auto divider = [=](int actual_x, int actual_y) { auto x = kernel_width; From 4461bfa466a04adc289b34e641a9b927acda4f56 Mon Sep 17 00:00:00 2001 From: Gorbachev Date: Fri, 10 Mar 2023 00:53:42 +0000 Subject: [PATCH 09/12] fixed pooling_gpu_test fot linux --- src/plugins/intel_gpu/tests/test_cases/pooling_gpu_test.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/plugins/intel_gpu/tests/test_cases/pooling_gpu_test.cpp b/src/plugins/intel_gpu/tests/test_cases/pooling_gpu_test.cpp index c62235ed5d12ae..42312c5bfd91ca 100644 --- a/src/plugins/intel_gpu/tests/test_cases/pooling_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/test_cases/pooling_gpu_test.cpp @@ -3083,8 +3083,9 @@ class pooling_test : public tests::generic_test { auto pool_size_w = pooling->size[1]; auto pool_size_h = pooling->size[0]; - auto dynamic_mode = statis_cast(((output_tensor.spatial[0] - 1) * stride_width) + pool_size_w) > -2 * pad_width + width || - statis_cast(((output_tensor.spatial[1] - 1) * stride_height) + pool_size_h) > + auto dynamic_mode = static_cast(((output_tensor.spatial[0] - 1) * stride_width) + pool_size_w) > + -2 * pad_width + width || + static_cast(((output_tensor.spatial[1] - 1) * stride_height) + pool_size_h) > -2 * pad_height + height; auto divider = [=](int actual_x, int actual_y) { From 2b75a2c72a096eaf0c3a0b606d1258e6d6e60ac8 Mon Sep 17 00:00:00 2001 From: Gorbachev Date: Sat, 11 Mar 2023 16:34:35 +0000 Subject: [PATCH 10/12] fix after review and enable wd4267 in makefile --- src/inference/dev_api/performance_heuristics.hpp | 4 ++-- src/plugins/intel_gpu/CMakeLists.txt | 2 +- src/plugins/intel_gpu/include/intel_gpu/plugin/graph.hpp | 2 +- src/plugins/intel_gpu/src/graph/layout_optimizer.cpp | 4 ++-- src/plugins/intel_gpu/src/graph/network.cpp | 3 +-- .../kernels/scatter_update/scatter_nd_update_kernel_ref.cpp | 2 +- src/plugins/intel_gpu/src/plugin/infer_request_legacy.cpp | 2 +- .../intel_gpu/tests/test_cases/deconvolution_gpu_test.cpp | 6 +++--- .../intel_gpu/tests/test_cases/lru_caches_gpu_test.cpp | 2 +- 9 files changed, 13 insertions(+), 14 deletions(-) diff --git a/src/inference/dev_api/performance_heuristics.hpp b/src/inference/dev_api/performance_heuristics.hpp index 5111e676e12329..563d7627393701 100644 --- a/src/inference/dev_api/performance_heuristics.hpp +++ b/src/inference/dev_api/performance_heuristics.hpp @@ -29,7 +29,7 @@ static MemBandwidthPressure MemBandwidthPressureTolerance( const float memThresholdAssumeLimited = MemBandwidthPressure::LIMITED) { int total_convs = 0, mem_limited_convs = 0, compute_convs = 0, total_gemms = 0, mem_limited_gemms = 0, total_deconvs = 0, compute_deconvs = 0, mem_limited_deconvs = 0; - auto memLimitedFactor = [&](int size_data_moved, int datatype_size = 4) -> float { + auto memLimitedFactor = [&](size_t size_data_moved, int datatype_size = 4) -> float { return (cache_size / (size_data_moved * datatype_size)); }; auto isLowPrecision = [&](ngraph::element::Type type) -> bool { @@ -77,7 +77,7 @@ static MemBandwidthPressure MemBandwidthPressureTolerance( std::accumulate(shapeOutput.begin(), shapeOutput.end(), size_t(1), std::multiplies()); const auto total_data = dataSizeInput0 + non_const * dataSizeInput1 + dataSizeOutput; total_gemms++; - const auto factor = memLimitedFactor(static_cast(total_data), data_type_size); + const auto factor = memLimitedFactor(total_data, data_type_size); mem_limited_gemms += factor < memThresholdAssumeLimited; worst_case = std::min(factor, worst_case); } diff --git a/src/plugins/intel_gpu/CMakeLists.txt b/src/plugins/intel_gpu/CMakeLists.txt index d4420d36c5602b..a76e015346b82a 100644 --- a/src/plugins/intel_gpu/CMakeLists.txt +++ b/src/plugins/intel_gpu/CMakeLists.txt @@ -15,7 +15,7 @@ endif() if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") # 4267 4244 conversion from 'XXX' to 'YYY', possible loss of data ie_add_compiler_flags(/wd4244) - # ie_add_compiler_flags(/wd4267) + ie_add_compiler_flags(/wd4267) # '<': signed/unsigned mismatch ie_add_compiler_flags(/wd4018) endif() diff --git a/src/plugins/intel_gpu/include/intel_gpu/plugin/graph.hpp b/src/plugins/intel_gpu/include/intel_gpu/plugin/graph.hpp index ec3556e684fd8c..39a36443a68fe9 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/plugin/graph.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/plugin/graph.hpp @@ -59,7 +59,7 @@ class Graph { cldnn::engine& get_engine() const { return m_context->get_engine(); } const ExecutionConfig& get_config() const { return m_config; } - int GetMaxDynamicBatchSize() const { return static_cast(m_config.get_property(ov::intel_gpu::max_dynamic_batch));} + size_t GetMaxDynamicBatchSize() const { return m_config.get_property(ov::intel_gpu::max_dynamic_batch);} const std::map& GetInputLayouts() const { return m_program->GetInputLayouts(); } const InferenceEngine::InputsDataMap GetNetworkInputs() const { return m_program->GetNetworkInputs(); } const InferenceEngine::OutputsDataMap GetNetworkOutputs() const { return m_program->GetNetworkOutputs(); } diff --git a/src/plugins/intel_gpu/src/graph/layout_optimizer.cpp b/src/plugins/intel_gpu/src/graph/layout_optimizer.cpp index ae0ed7f122359c..c64735c0602872 100644 --- a/src/plugins/intel_gpu/src/graph/layout_optimizer.cpp +++ b/src/plugins/intel_gpu/src/graph/layout_optimizer.cpp @@ -827,10 +827,10 @@ static bool is_node_for_onednn(fully_connected_node const& node) { auto fc_prim = node.get_primitive(); auto ps = node.get_output_layout().get_partial_shape(); int non_spatial_count = 2 + (fc_prim->input_size == 3 ? 1 : 0); - int rank = static_cast(ps.size()); + auto rank = ps.size(); // OneDnn doesn't support spatial dimensions for output - for (int i = non_spatial_count; i < rank; i++) { + for (auto i = non_spatial_count; i < rank; i++) { if (ps[i].is_dynamic() || ps[i] != 1) { return false; } diff --git a/src/plugins/intel_gpu/src/graph/network.cpp b/src/plugins/intel_gpu/src/graph/network.cpp index 13c80f330e307b..cbfcf63c39aac9 100644 --- a/src/plugins/intel_gpu/src/graph/network.cpp +++ b/src/plugins/intel_gpu/src/graph/network.cpp @@ -540,8 +540,7 @@ void network::save(cldnn::BinaryOutputBuffer& ob) { } } - int exec_order_size; - exec_order_size = static_cast(_exec_order.size()); + auto exec_order_size = _exec_order.size(); ob << exec_order_size; for (const auto& p_inst : _exec_order) { diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/scatter_update/scatter_nd_update_kernel_ref.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/scatter_update/scatter_nd_update_kernel_ref.cpp index 9ae4663b309a8e..cf7951cc5a0a4e 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/scatter_update/scatter_nd_update_kernel_ref.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/scatter_update/scatter_nd_update_kernel_ref.cpp @@ -130,7 +130,7 @@ static std::string GetInputBlockND(const scatter_nd_update_params& params, size_ block_nd_s[rank] = "1"; size_t input_offset = num * 6; - for (int32_t idx = static_cast(rank - 1); idx >= 0; --idx) { + for (size_t idx = rank - 1; idx >= 0; --idx) { block_nd[idx] = input_dims[idx] * block_nd[idx + 1]; size_t dim_offset = idx < 2 ? idx : idx + 6 - rank; diff --git a/src/plugins/intel_gpu/src/plugin/infer_request_legacy.cpp b/src/plugins/intel_gpu/src/plugin/infer_request_legacy.cpp index 032556a06ef311..596c0740df6b45 100644 --- a/src/plugins/intel_gpu/src/plugin/infer_request_legacy.cpp +++ b/src/plugins/intel_gpu/src/plugin/infer_request_legacy.cpp @@ -531,7 +531,7 @@ void InferRequestLegacy::SetGraph(std::shared_ptr graph) { } if (m_graph->GetMaxDynamicBatchSize() > 1) { - SetBatch(m_graph->GetMaxDynamicBatchSize()); + SetBatch(static_cast(m_graph->GetMaxDynamicBatchSize())); allocate_inputs_dynamic(); allocate_outputs_dynamic(); } else { diff --git a/src/plugins/intel_gpu/tests/test_cases/deconvolution_gpu_test.cpp b/src/plugins/intel_gpu/tests/test_cases/deconvolution_gpu_test.cpp index e0aa1d1ff1bf8f..4c1a6431488d60 100644 --- a/src/plugins/intel_gpu/tests/test_cases/deconvolution_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/test_cases/deconvolution_gpu_test.cpp @@ -2408,7 +2408,7 @@ struct deconvolution_random_test_params { static std::string print_params(const testing::TestParamInfo& param_info) { auto& param = param_info.param; - auto to_string_neg = [](int v) { + auto to_string_neg = [](int64_t v) { if (v >= 0) { return std::to_string(v); } else { @@ -2425,9 +2425,9 @@ struct deconvolution_random_test_params { }; auto print_strides = [&](const ov::Strides& s) { - std::string res = to_string_neg(static_cast(s[0])); + std::string res = to_string_neg(s[0]); for (size_t i = 1; i < s.size(); i++) { - res += "x" + to_string_neg(static_cast(s[i])); + res += "x" + to_string_neg(s[i]); } return res; }; diff --git a/src/plugins/intel_gpu/tests/test_cases/lru_caches_gpu_test.cpp b/src/plugins/intel_gpu/tests/test_cases/lru_caches_gpu_test.cpp index da952790365313..627d7eb499f1b8 100644 --- a/src/plugins/intel_gpu/tests/test_cases/lru_caches_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/test_cases/lru_caches_gpu_test.cpp @@ -43,7 +43,7 @@ TEST(lru_cache, basic_data_type) std::vector> expected_value; for (size_t i = ca.size(); i > 0; i--) { // 5, 1, 2, 4 - int idx = static_cast(input_values.size() - i); + auto idx = input_values.size() - i; expected_value.push_back(input_values[idx]); } From 607ffb62efe3e7e1303905ed3e8e14446342b786 Mon Sep 17 00:00:00 2001 From: Gorbachev Date: Mon, 13 Mar 2023 19:18:00 +0000 Subject: [PATCH 11/12] fix after review --- .../intel_gpu/include/intel_gpu/primitives/arg_max_min.hpp | 4 ++-- src/plugins/intel_gpu/src/graph/broadcast.cpp | 6 +++--- src/plugins/intel_gpu/src/graph/impls/ocl/arg_max_min.cpp | 2 +- src/plugins/intel_gpu/src/graph/layout_optimizer.cpp | 4 ++-- src/plugins/intel_gpu/src/plugin/infer_request_legacy.cpp | 2 +- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/plugins/intel_gpu/include/intel_gpu/primitives/arg_max_min.hpp b/src/plugins/intel_gpu/include/intel_gpu/primitives/arg_max_min.hpp index afca08f6d3d37c..cabb7d7be363ff 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/primitives/arg_max_min.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/primitives/arg_max_min.hpp @@ -103,8 +103,8 @@ struct arg_max_min : public primitive_base { values_first == rhs_casted.values_first; } - uint32_t get_output_nums() const { - return (input_size() == 3 ? 2 : static_cast(output_size())); + size_t get_output_nums() const { + return (input_size() == 3 ? 2 : output_size()); } bool has_second_output() const { return get_output_nums() == 2; } bool use_multiple_outputs() const { return input_size() != 3; } diff --git a/src/plugins/intel_gpu/src/graph/broadcast.cpp b/src/plugins/intel_gpu/src/graph/broadcast.cpp index 0b74c0020dd8c1..348bac0081c3c9 100644 --- a/src/plugins/intel_gpu/src/graph/broadcast.cpp +++ b/src/plugins/intel_gpu/src/graph/broadcast.cpp @@ -84,11 +84,11 @@ std::vector broadcast_inst::calc_output_layouts(broadcast_node const& /* ov::op::v3::shape_infer(&op, input_shapes, output_shapes, const_data); } else if (impl_param.input_layouts.size() >= 2) { auto input1 = impl_param.get_input_layout(1); - int output_rank = static_cast(input1.get().size()); + int output_rank = input1.get().size(); if (input1.is_static()) { - output_rank = static_cast(input1.get_dim(0)); // target shape rank is set as second input. + output_rank = input1.get_dim(0); // target shape rank is set as second input. } - output_shapes[0] = ShapeType::dynamic(std::max(output_rank, static_cast(1))); + output_shapes[0] = ShapeType::dynamic(std::max(output_rank, 1)); } format output_format = format::adjust_to_rank(input0_layout.format, output_shapes[0].size()); diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/arg_max_min.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/arg_max_min.cpp index ec6c5c9a83d795..150ae0e61222b9 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/arg_max_min.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/arg_max_min.cpp @@ -66,7 +66,7 @@ struct arg_max_min_impl : typed_primitive_impl_ocl { const auto& mode = primitive->mode; const auto& sort_type = primitive->sort; const auto& values_first = primitive->values_first; - const auto& outputs_num = static_cast(primitive->input_size() == 3 ? 2 : primitive->output_size()); + const auto& outputs_num = primitive->input_size() == 3 ? 2 : primitive->output_size(); auto argm_params = get_default_params(impl_param); auto argm_optional_params = diff --git a/src/plugins/intel_gpu/src/graph/layout_optimizer.cpp b/src/plugins/intel_gpu/src/graph/layout_optimizer.cpp index c64735c0602872..e2bbf5886a3464 100644 --- a/src/plugins/intel_gpu/src/graph/layout_optimizer.cpp +++ b/src/plugins/intel_gpu/src/graph/layout_optimizer.cpp @@ -826,8 +826,8 @@ static bool is_node_for_onednn(deconvolution_node const& node) { static bool is_node_for_onednn(fully_connected_node const& node) { auto fc_prim = node.get_primitive(); auto ps = node.get_output_layout().get_partial_shape(); - int non_spatial_count = 2 + (fc_prim->input_size == 3 ? 1 : 0); - auto rank = ps.size(); + size_t non_spatial_count = 2 + (fc_prim->input_size == 3 ? 1 : 0); + size_t rank = ps.size(); // OneDnn doesn't support spatial dimensions for output for (auto i = non_spatial_count; i < rank; i++) { diff --git a/src/plugins/intel_gpu/src/plugin/infer_request_legacy.cpp b/src/plugins/intel_gpu/src/plugin/infer_request_legacy.cpp index 596c0740df6b45..45641b1b35bb22 100644 --- a/src/plugins/intel_gpu/src/plugin/infer_request_legacy.cpp +++ b/src/plugins/intel_gpu/src/plugin/infer_request_legacy.cpp @@ -546,7 +546,7 @@ void InferRequestLegacy::SetBatch(int new_batch) { if (m_graph->GetMaxDynamicBatchSize() < 0) IE_THROW() << "Dynamic batch is not enabled."; - if (new_batch < 1 || new_batch > m_graph->GetMaxDynamicBatchSize()) { + if (new_batch < 1 || static_cast(new_batch) > m_graph->GetMaxDynamicBatchSize()) { IE_THROW() << "Invalid dynamic batch size " << new_batch << " for this request. Got: " << new_batch << ". Expected value in range [1;" << m_graph->GetMaxDynamicBatchSize() << "]"; } From 698fa6819e68821c4e2ceea8ad438143bff47850 Mon Sep 17 00:00:00 2001 From: andrei-cv Date: Tue, 14 Mar 2023 21:46:31 +0400 Subject: [PATCH 12/12] errors of unit test are fixed --- src/plugins/intel_gpu/src/graph/impls/ocl/resample.cpp | 4 ++-- .../intel_gpu/src/graph/include/sliding_window_utils.hpp | 2 +- src/plugins/intel_gpu/src/graph/network.cpp | 2 +- .../kernels/scatter_update/scatter_nd_update_kernel_ref.cpp | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/resample.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/resample.cpp index ae7cd59eb38980..bd481afb8470fe 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/resample.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/resample.cpp @@ -79,8 +79,8 @@ inline std::vector convert_pads(const std::vector& pad, size_t if (pad.empty()) { new_pad = std::vector(rank, 0); } else { - for (auto pad : pad) { - new_pad.push_back(static_cast(pad)); + for (auto p : pad) { + new_pad.push_back(static_cast(p)); } if (new_pad.size() > 2) std::reverse(new_pad.begin() + 2, new_pad.end()); diff --git a/src/plugins/intel_gpu/src/graph/include/sliding_window_utils.hpp b/src/plugins/intel_gpu/src/graph/include/sliding_window_utils.hpp index d9945e3b42cf08..fd9af3a1d67827 100644 --- a/src/plugins/intel_gpu/src/graph/include/sliding_window_utils.hpp +++ b/src/plugins/intel_gpu/src/graph/include/sliding_window_utils.hpp @@ -105,7 +105,7 @@ inline tensor calc_sliding_window_output_range(const tensor& inp auto stride_x = stride.size() >= 1 ? stride[stride.size() - 1] : 1; tensor::value_type dilation_z = dilation.size() >= 3 ? static_cast(dilation[dilation.size() - 3]) : 1; - tensor::value_type dilation_y = dilation.size() >= 2 ? static_cast(static_cast(dilation[dilation.size() - 2])) : 1; + tensor::value_type dilation_y = dilation.size() >= 2 ? static_cast(dilation[dilation.size() - 2]) : 1; tensor::value_type dilation_x = dilation.size() >= 1 ? static_cast(dilation[dilation.size() - 1]) : 1; auto pad_z = pad.size() >= 3 ? pad[pad.size() - 3] : 0; diff --git a/src/plugins/intel_gpu/src/graph/network.cpp b/src/plugins/intel_gpu/src/graph/network.cpp index cbfcf63c39aac9..44d361ac35aafe 100644 --- a/src/plugins/intel_gpu/src/graph/network.cpp +++ b/src/plugins/intel_gpu/src/graph/network.cpp @@ -540,7 +540,7 @@ void network::save(cldnn::BinaryOutputBuffer& ob) { } } - auto exec_order_size = _exec_order.size(); + int exec_order_size = _exec_order.size(); ob << exec_order_size; for (const auto& p_inst : _exec_order) { diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/scatter_update/scatter_nd_update_kernel_ref.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/scatter_update/scatter_nd_update_kernel_ref.cpp index cf7951cc5a0a4e..7a4e7f85fe839b 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/scatter_update/scatter_nd_update_kernel_ref.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/scatter_update/scatter_nd_update_kernel_ref.cpp @@ -130,7 +130,7 @@ static std::string GetInputBlockND(const scatter_nd_update_params& params, size_ block_nd_s[rank] = "1"; size_t input_offset = num * 6; - for (size_t idx = rank - 1; idx >= 0; --idx) { + for (int32_t idx = rank - 1; idx >= 0; --idx) { block_nd[idx] = input_dims[idx] * block_nd[idx + 1]; size_t dim_offset = idx < 2 ? idx : idx + 6 - rank;