Skip to content

Commit

Permalink
[GPU] Added more uses of const references
Browse files Browse the repository at this point in the history
  • Loading branch information
Lyamin-Roman committed Jul 3, 2024
1 parent b5a66b0 commit 05f83a1
Show file tree
Hide file tree
Showing 5 changed files with 35 additions and 35 deletions.
6 changes: 3 additions & 3 deletions src/plugins/intel_gpu/include/intel_gpu/runtime/layout.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -193,9 +193,9 @@ struct padding {
}

static padding max(padding const& lhs, padding const& rhs, float filling_value = 0.0f) {
auto lower = tensor::max(lhs.lower_size(), rhs.lower_size());
auto upper = tensor::max(lhs.upper_size(), rhs.upper_size());
auto dynamic_pad_dims = tensor::max(lhs.get_dynamic_pad_dims(), rhs.get_dynamic_pad_dims());
const auto& lower = tensor::max(lhs.lower_size(), rhs.lower_size());
const auto& upper = tensor::max(lhs.upper_size(), rhs.upper_size());
const auto& dynamic_pad_dims = tensor::max(lhs.get_dynamic_pad_dims(), rhs.get_dynamic_pad_dims());
return padding{lower.sizes(), upper.sizes(), filling_value, dynamic_pad_dims};
}

Expand Down
14 changes: 7 additions & 7 deletions src/plugins/intel_gpu/include/intel_gpu/runtime/tensor.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -262,8 +262,8 @@ struct tensor {

tensor(format fmt, const std::vector<value_type>& sizes, value_type default_size = 1)
: tensor(default_size) {
auto in_order = fmt.order();
auto out_order = fmt.internal_order();
const auto& in_order = fmt.order();
const auto& out_order = fmt.internal_order();
if (in_order.size() != sizes.size())
throw std::invalid_argument("The count of values passed to initialize tensor does not match passed format.");

Expand Down Expand Up @@ -417,8 +417,8 @@ struct tensor {

/// @brief Returns a vector of tensors values, ordered regarding to @p format.
std::vector<value_type> sizes(cldnn::format fmt) const {
auto output_order = fmt.order();
auto internal_order = fmt.internal_order();
const auto& output_order = fmt.order();
const auto& internal_order = fmt.internal_order();
std::vector<value_type> sizes(output_order.size(), 0);

for (size_t i = 0; i < sizes.size(); ++i) {
Expand Down Expand Up @@ -472,9 +472,9 @@ struct tensor {
*/
tensor transform(cldnn::format new_fmt, value_type default_size) const {
cldnn::format default_fmt = cldnn::format::bfvuwzyx;
auto val_order = default_fmt.internal_order();
auto new_order = new_fmt.internal_order();
std::vector<value_type> old_sizes = sizes();
const auto& val_order = default_fmt.internal_order();
const auto& new_order = new_fmt.internal_order();
const std::vector<value_type>& old_sizes = sizes();
std::vector<value_type> new_sizes(old_sizes.size(), default_size);
const auto& new_traits = new_fmt.traits();
static const std::map<char, char> flatten_mapping = {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -822,7 +822,7 @@ kernel_selector::data_tensor convert_data_tensor(const layout& l, const tensor v

// legacy get_tensor().sizes() impl return dims in external order, so we need to transpose dims
ov::PartialShape vals_ordered;
auto axis_order = l.format.dims_order();
const auto& axis_order = l.format.dims_order();
for (size_t i = 0; i < axis_order.size(); i++) {
if (axis_order[i] >= vals_original.size())
vals_ordered.push_back(ov::Dimension(1));
Expand Down
44 changes: 22 additions & 22 deletions src/plugins/intel_gpu/src/runtime/layout.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -41,32 +41,32 @@ size_t layout::get_spatial_rank() const {
}

tensor::value_type layout::get_dim(size_t idx) const {
auto dims = get_dims();
const auto& dims = get_dims();
return dims[idx];
}

tensor::value_type layout::batch() const {
auto dims = get_dims();
const auto& dims = get_dims();
const size_t dim_idx = 0;
return dims[dim_idx];
}

tensor::value_type layout::feature() const {
auto dims = get_dims();
const auto& dims = get_dims();
const size_t dim_idx = 1;
return dims[dim_idx];
}

tensor::value_type layout::spatial(size_t spatial_idx) const {
if (spatial_idx >= format.spatial_num() )
return 1;
auto dims = get_dims();
const auto& dims = get_dims();
const size_t dim_idx = (format::is_grouped(format) ? 3 : 2) + (format.spatial_num() - 1 - spatial_idx);
return dims[dim_idx];
}

tensor::value_type layout::group() const {
auto dims = get_dims();
const auto& dims = get_dims();
if (!format::is_weights_format(format)) {
throw std::logic_error("[GPU] can't get group dimension for data layout");
}
Expand All @@ -81,7 +81,7 @@ tensor::value_type layout::ofm() const {
if (!format::is_weights_format(format)) {
throw std::logic_error("[GPU] can't get OFM dimension for data layout");
}
auto dims = get_dims();
const auto& dims = get_dims();
const size_t dim_idx = format::is_grouped(format) ? 1 : 0;

return dims[dim_idx];
Expand All @@ -91,18 +91,18 @@ tensor::value_type layout::ifm() const {
if (!format::is_weights_format(format)) {
throw std::logic_error("[GPU] can't get IFM dimension for data layout");
}
auto dims = get_dims();
const auto& dims = get_dims();
const size_t dim_idx = format::is_grouped(format) ? 2 : 1;
return dims[dim_idx];
}

std::vector<tensor::value_type> layout::get_dims() const {
if (is_dynamic())
throw std::runtime_error("[GPU] get_dims() is called for dynamic shape");
auto shape = size.to_shape();

std::vector<tensor::value_type> res;
for (auto dim : shape) {
res.push_back(static_cast<tensor::value_type>(dim));
for (const auto& dim : size) {
res.push_back(static_cast<tensor::value_type>(dim.get_length()));
}

if (res.size() < format.dimension())
Expand All @@ -116,7 +116,7 @@ std::vector<tensor::value_type> layout::get_padded_dims() const {
throw std::runtime_error("[GPU] get_padded_dims() is called for dynamic shape");

auto default_fmt = format::get_default_format(format.dimension(), format::is_weights_format(format), format::is_grouped(format));
auto t = get_tensor();
const auto& t = get_tensor();
auto padded_size = t.add(data_padding.lower_size()).add(data_padding.upper_size());
return padded_size.sizes(default_fmt);
}
Expand Down Expand Up @@ -168,7 +168,7 @@ std::vector<tensor::value_type> layout::get_ordered_dims() const {
if (is_dynamic())
throw std::runtime_error("[GPU] get_ordered_dims() is called for dynamic shape");

auto t = get_tensor();
const auto& t = get_tensor();
return t.sizes(format);
}

Expand Down Expand Up @@ -245,8 +245,8 @@ tensor layout::get_tensor() const {
OPENVINO_ASSERT(!is_dynamic() || has_upper_bound(), "[GPU] get_tensor() is called for dynamic shape without upper bound");
ov::Shape shape;
if (is_dynamic() && has_upper_bound()) {
for (auto dim : size) {
shape.push_back(dim.get_max_length());
for (const auto& dim : size) {
shape.push_back(dim.get_max_length());
}
} else {
shape = size.to_shape();
Expand Down Expand Up @@ -295,27 +295,27 @@ void layout::set_partial_shape(const ov::PartialShape& size) {

tensor layout::get_buffer_size() const {
if (is_dynamic() && !has_upper_bound()) {
throw std::runtime_error("[GPU] get_buffer_size() is called for dynamic shape");
throw std::runtime_error("[GPU] get_buffer_size() is called for dynamic shape");
}

auto t = get_tensor();
const auto& t = get_tensor();

return t.add(data_padding.lower_size()).add(data_padding.upper_size());
}

tensor layout::get_pitches() const {
auto sizes = get_buffer_size().sizes(format);
const auto& sizes = get_buffer_size().sizes(format);

std::vector<tensor::value_type> pitches(sizes.size(), tensor::value_type(1));
std::partial_sum(sizes.rbegin(), sizes.rend() - 1, pitches.rbegin() + 1, std::multiplies<tensor::value_type>());
return {format, pitches};
}

size_t layout::get_linear_offset(tensor element) const {
auto l_padd = data_padding.lower_size();
auto u_padd = data_padding.upper_size();
const auto& l_padd = data_padding.lower_size();
const auto& u_padd = data_padding.upper_size();

auto t = get_tensor();
const auto& t = get_tensor();

if ((element.batch[0] < 0 && -element.batch[0] > l_padd.batch[0]) ||
(element.feature[0] < 0 && -element.feature[0] > l_padd.feature[0]) ||
Expand Down Expand Up @@ -524,12 +524,12 @@ ov::PartialShape layout::transform(const ov::PartialShape& pshape, const cldnn::
int32_t default_size = -1;
std::vector<int32_t> dims;
dims.reserve(pshape.size());
for (auto dim : pshape) {
for (const auto& dim : pshape) {
dims.push_back(static_cast<int32_t>(dim.get_length()));
}

const cldnn::format default_fmt = cldnn::format::bfvuwzyx;
auto old_sizes = convert_dimensions(dims, old_fmt.order(), default_fmt.internal_order()); // convert to internal order (bfxyzwuv)
const auto& old_sizes = convert_dimensions(dims, old_fmt.order(), default_fmt.internal_order()); // convert to internal order (bfxyzwuv)

const auto& val_order = default_fmt.internal_order();
const auto& new_order = new_fmt.internal_order();
Expand Down
4 changes: 2 additions & 2 deletions src/plugins/intel_gpu/src/runtime/shape_predictor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ std::pair<bool, ov::Shape> ShapePredictor::predict_preallocation_shape(const std
size_t next_iters_prealloc_count = custom_next_iters_prealloc_count > 0
? static_cast<size_t>(custom_next_iters_prealloc_count)
: _next_iters_preallocation_count;
auto current_shape = layout.get_shape();
const auto& current_shape = layout.get_shape();
auto dt_bitwidth = ov::element::Type(layout.data_type).bitwidth();

add_shape(id, current_shape);
Expand All @@ -74,7 +74,7 @@ std::pair<bool, ov::Shape> ShapePredictor::predict_preallocation_shape(const std
return {false, {}};

// Check if there is enough data for prediction
auto& shapes = _shapes_info[id];
const auto& shapes = _shapes_info[id];
const auto shapes_num = shapes.size();

// Number of shapes used for iterations mode predictions
Expand Down

0 comments on commit 05f83a1

Please sign in to comment.