diff --git a/src/plugins/intel_gpu/include/intel_gpu/runtime/layout.hpp b/src/plugins/intel_gpu/include/intel_gpu/runtime/layout.hpp index 9a8ba67ab3a7b6..a454fc7afdee15 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/runtime/layout.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/runtime/layout.hpp @@ -193,9 +193,9 @@ struct padding { } static padding max(padding const& lhs, padding const& rhs, float filling_value = 0.0f) { - auto lower = tensor::max(lhs.lower_size(), rhs.lower_size()); - auto upper = tensor::max(lhs.upper_size(), rhs.upper_size()); - auto dynamic_pad_dims = tensor::max(lhs.get_dynamic_pad_dims(), rhs.get_dynamic_pad_dims()); + const auto& lower = tensor::max(lhs.lower_size(), rhs.lower_size()); + const auto& upper = tensor::max(lhs.upper_size(), rhs.upper_size()); + const auto& dynamic_pad_dims = tensor::max(lhs.get_dynamic_pad_dims(), rhs.get_dynamic_pad_dims()); return padding{lower.sizes(), upper.sizes(), filling_value, dynamic_pad_dims}; } diff --git a/src/plugins/intel_gpu/include/intel_gpu/runtime/tensor.hpp b/src/plugins/intel_gpu/include/intel_gpu/runtime/tensor.hpp index 20c8bc8052f031..cb74433ec18483 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/runtime/tensor.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/runtime/tensor.hpp @@ -262,8 +262,8 @@ struct tensor { tensor(format fmt, const std::vector& sizes, value_type default_size = 1) : tensor(default_size) { - auto in_order = fmt.order(); - auto out_order = fmt.internal_order(); + const auto& in_order = fmt.order(); + const auto& out_order = fmt.internal_order(); if (in_order.size() != sizes.size()) throw std::invalid_argument("The count of values passed to initialize tensor does not match passed format."); @@ -417,8 +417,8 @@ struct tensor { /// @brief Returns a vector of tensors values, ordered regarding to @p format. std::vector sizes(cldnn::format fmt) const { - auto output_order = fmt.order(); - auto internal_order = fmt.internal_order(); + const auto& output_order = fmt.order(); + const auto& internal_order = fmt.internal_order(); std::vector sizes(output_order.size(), 0); for (size_t i = 0; i < sizes.size(); ++i) { @@ -472,9 +472,9 @@ struct tensor { */ tensor transform(cldnn::format new_fmt, value_type default_size) const { cldnn::format default_fmt = cldnn::format::bfvuwzyx; - auto val_order = default_fmt.internal_order(); - auto new_order = new_fmt.internal_order(); - std::vector old_sizes = sizes(); + const auto& val_order = default_fmt.internal_order(); + const auto& new_order = new_fmt.internal_order(); + const std::vector& old_sizes = sizes(); std::vector new_sizes(old_sizes.size(), default_size); const auto& new_traits = new_fmt.traits(); static const std::map flatten_mapping = { diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/kernel_selector_helper.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/kernel_selector_helper.cpp index 1f492e14c9fc7e..2ebcebd3b0b48f 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/kernel_selector_helper.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/kernel_selector_helper.cpp @@ -822,7 +822,7 @@ kernel_selector::data_tensor convert_data_tensor(const layout& l, const tensor v // legacy get_tensor().sizes() impl return dims in external order, so we need to transpose dims ov::PartialShape vals_ordered; - auto axis_order = l.format.dims_order(); + const auto& axis_order = l.format.dims_order(); for (size_t i = 0; i < axis_order.size(); i++) { if (axis_order[i] >= vals_original.size()) vals_ordered.push_back(ov::Dimension(1)); diff --git a/src/plugins/intel_gpu/src/runtime/layout.cpp b/src/plugins/intel_gpu/src/runtime/layout.cpp index fb4ee3e88841c3..331859167c39bd 100644 --- a/src/plugins/intel_gpu/src/runtime/layout.cpp +++ b/src/plugins/intel_gpu/src/runtime/layout.cpp @@ -41,18 +41,18 @@ size_t layout::get_spatial_rank() const { } tensor::value_type layout::get_dim(size_t idx) const { - auto dims = get_dims(); + const auto& dims = get_dims(); return dims[idx]; } tensor::value_type layout::batch() const { - auto dims = get_dims(); + const auto& dims = get_dims(); const size_t dim_idx = 0; return dims[dim_idx]; } tensor::value_type layout::feature() const { - auto dims = get_dims(); + const auto& dims = get_dims(); const size_t dim_idx = 1; return dims[dim_idx]; } @@ -60,13 +60,13 @@ tensor::value_type layout::feature() const { tensor::value_type layout::spatial(size_t spatial_idx) const { if (spatial_idx >= format.spatial_num() ) return 1; - auto dims = get_dims(); + const auto& dims = get_dims(); const size_t dim_idx = (format::is_grouped(format) ? 3 : 2) + (format.spatial_num() - 1 - spatial_idx); return dims[dim_idx]; } tensor::value_type layout::group() const { - auto dims = get_dims(); + const auto& dims = get_dims(); if (!format::is_weights_format(format)) { throw std::logic_error("[GPU] can't get group dimension for data layout"); } @@ -81,7 +81,7 @@ tensor::value_type layout::ofm() const { if (!format::is_weights_format(format)) { throw std::logic_error("[GPU] can't get OFM dimension for data layout"); } - auto dims = get_dims(); + const auto& dims = get_dims(); const size_t dim_idx = format::is_grouped(format) ? 1 : 0; return dims[dim_idx]; @@ -91,7 +91,7 @@ tensor::value_type layout::ifm() const { if (!format::is_weights_format(format)) { throw std::logic_error("[GPU] can't get IFM dimension for data layout"); } - auto dims = get_dims(); + const auto& dims = get_dims(); const size_t dim_idx = format::is_grouped(format) ? 2 : 1; return dims[dim_idx]; } @@ -99,10 +99,10 @@ tensor::value_type layout::ifm() const { std::vector layout::get_dims() const { if (is_dynamic()) throw std::runtime_error("[GPU] get_dims() is called for dynamic shape"); - auto shape = size.to_shape(); + std::vector res; - for (auto dim : shape) { - res.push_back(static_cast(dim)); + for (const auto& dim : size) { + res.push_back(static_cast(dim.get_length())); } if (res.size() < format.dimension()) @@ -116,7 +116,7 @@ std::vector layout::get_padded_dims() const { throw std::runtime_error("[GPU] get_padded_dims() is called for dynamic shape"); auto default_fmt = format::get_default_format(format.dimension(), format::is_weights_format(format), format::is_grouped(format)); - auto t = get_tensor(); + const auto& t = get_tensor(); auto padded_size = t.add(data_padding.lower_size()).add(data_padding.upper_size()); return padded_size.sizes(default_fmt); } @@ -168,7 +168,7 @@ std::vector layout::get_ordered_dims() const { if (is_dynamic()) throw std::runtime_error("[GPU] get_ordered_dims() is called for dynamic shape"); - auto t = get_tensor(); + const auto& t = get_tensor(); return t.sizes(format); } @@ -245,8 +245,8 @@ tensor layout::get_tensor() const { OPENVINO_ASSERT(!is_dynamic() || has_upper_bound(), "[GPU] get_tensor() is called for dynamic shape without upper bound"); ov::Shape shape; if (is_dynamic() && has_upper_bound()) { - for (auto dim : size) { - shape.push_back(dim.get_max_length()); + for (const auto& dim : size) { + shape.push_back(dim.get_max_length()); } } else { shape = size.to_shape(); @@ -295,16 +295,16 @@ void layout::set_partial_shape(const ov::PartialShape& size) { tensor layout::get_buffer_size() const { if (is_dynamic() && !has_upper_bound()) { - throw std::runtime_error("[GPU] get_buffer_size() is called for dynamic shape"); + throw std::runtime_error("[GPU] get_buffer_size() is called for dynamic shape"); } - auto t = get_tensor(); + const auto& t = get_tensor(); return t.add(data_padding.lower_size()).add(data_padding.upper_size()); } tensor layout::get_pitches() const { - auto sizes = get_buffer_size().sizes(format); + const auto& sizes = get_buffer_size().sizes(format); std::vector pitches(sizes.size(), tensor::value_type(1)); std::partial_sum(sizes.rbegin(), sizes.rend() - 1, pitches.rbegin() + 1, std::multiplies()); @@ -312,10 +312,10 @@ tensor layout::get_pitches() const { } size_t layout::get_linear_offset(tensor element) const { - auto l_padd = data_padding.lower_size(); - auto u_padd = data_padding.upper_size(); + const auto& l_padd = data_padding.lower_size(); + const auto& u_padd = data_padding.upper_size(); - auto t = get_tensor(); + const auto& t = get_tensor(); if ((element.batch[0] < 0 && -element.batch[0] > l_padd.batch[0]) || (element.feature[0] < 0 && -element.feature[0] > l_padd.feature[0]) || @@ -524,12 +524,12 @@ ov::PartialShape layout::transform(const ov::PartialShape& pshape, const cldnn:: int32_t default_size = -1; std::vector dims; dims.reserve(pshape.size()); - for (auto dim : pshape) { + for (const auto& dim : pshape) { dims.push_back(static_cast(dim.get_length())); } const cldnn::format default_fmt = cldnn::format::bfvuwzyx; - auto old_sizes = convert_dimensions(dims, old_fmt.order(), default_fmt.internal_order()); // convert to internal order (bfxyzwuv) + const auto& old_sizes = convert_dimensions(dims, old_fmt.order(), default_fmt.internal_order()); // convert to internal order (bfxyzwuv) const auto& val_order = default_fmt.internal_order(); const auto& new_order = new_fmt.internal_order(); diff --git a/src/plugins/intel_gpu/src/runtime/shape_predictor.cpp b/src/plugins/intel_gpu/src/runtime/shape_predictor.cpp index feb72d1879df7b..4ed02065da3289 100644 --- a/src/plugins/intel_gpu/src/runtime/shape_predictor.cpp +++ b/src/plugins/intel_gpu/src/runtime/shape_predictor.cpp @@ -63,7 +63,7 @@ std::pair ShapePredictor::predict_preallocation_shape(const std size_t next_iters_prealloc_count = custom_next_iters_prealloc_count > 0 ? static_cast(custom_next_iters_prealloc_count) : _next_iters_preallocation_count; - auto current_shape = layout.get_shape(); + const auto& current_shape = layout.get_shape(); auto dt_bitwidth = ov::element::Type(layout.data_type).bitwidth(); add_shape(id, current_shape); @@ -74,7 +74,7 @@ std::pair ShapePredictor::predict_preallocation_shape(const std return {false, {}}; // Check if there is enough data for prediction - auto& shapes = _shapes_info[id]; + const auto& shapes = _shapes_info[id]; const auto shapes_num = shapes.size(); // Number of shapes used for iterations mode predictions