diff --git a/cpp/src/arrow/sparse_tensor.cc b/cpp/src/arrow/sparse_tensor.cc index b83d42e5fb219..95d700b711743 100644 --- a/cpp/src/arrow/sparse_tensor.cc +++ b/cpp/src/arrow/sparse_tensor.cc @@ -151,27 +151,30 @@ void GetCOOIndexTensorRow(const std::shared_ptr& coords, const int64_t r DCHECK(0 <= row && row < non_zero_length); const int64_t ndim = shape[1]; - out_index->resize(ndim); + out_index->resize(static_cast(ndim)); switch (indices_elsize) { case 1: // Int8, UInt8 for (int64_t i = 0; i < ndim; ++i) { - (*out_index)[i] = static_cast(coords->Value({row, i})); + (*out_index)[static_cast(i)] = + static_cast(coords->Value({row, i})); } break; case 2: // Int16, UInt16 for (int64_t i = 0; i < ndim; ++i) { - (*out_index)[i] = static_cast(coords->Value({row, i})); + (*out_index)[static_cast(i)] = + static_cast(coords->Value({row, i})); } break; case 4: // Int32, UInt32 for (int64_t i = 0; i < ndim; ++i) { - (*out_index)[i] = static_cast(coords->Value({row, i})); + (*out_index)[static_cast(i)] = + static_cast(coords->Value({row, i})); } break; case 8: // Int64 for (int64_t i = 0; i < ndim; ++i) { - (*out_index)[i] = coords->Value({row, i}); + (*out_index)[static_cast(i)] = coords->Value({row, i}); } break; default: @@ -187,12 +190,12 @@ bool DetectSparseCOOIndexCanonicality(const std::shared_ptr& coords) { const int64_t non_zero_length = shape[0]; if (non_zero_length <= 1) return true; - const int64_t ndim = shape[1]; + const auto ndim = static_cast(shape[1]); std::vector last_index, index; GetCOOIndexTensorRow(coords, 0, &last_index); for (int64_t i = 1; i < non_zero_length; ++i) { GetCOOIndexTensorRow(coords, i, &index); - int64_t j = 0; + size_t j = 0; while (j < ndim) { if (last_index[j] > index[j]) { // last_index > index, so we can detect non-canonical here @@ -367,14 +370,14 @@ Result> SparseCSFIndex::Make( const std::vector& indices_shapes, const std::vector& axis_order, const std::vector>& indptr_data, const std::vector>& indices_data) { - int64_t ndim = axis_order.size(); + auto ndim = axis_order.size(); std::vector> indptr(ndim - 1); std::vector> indices(ndim); - for (int64_t i = 0; i < ndim - 1; ++i) + for (size_t i = 0; i < ndim - 1; ++i) indptr[i] = std::make_shared(indptr_type, indptr_data[i], std::vector({indices_shapes[i] + 1})); - for (int64_t i = 0; i < ndim; ++i) + for (size_t i = 0; i < ndim; ++i) indices[i] = std::make_shared(indices_type, indices_data[i], std::vector({indices_shapes[i]})); @@ -405,10 +408,10 @@ SparseCSFIndex::SparseCSFIndex(const std::vector>& indpt std::string SparseCSFIndex::ToString() const { return std::string("SparseCSFIndex"); } bool SparseCSFIndex::Equals(const SparseCSFIndex& other) const { - for (int64_t i = 0; i < static_cast(indices().size()); ++i) { + for (size_t i = 0; i < indices().size(); ++i) { if (!indices()[i]->Equals(*other.indices()[i])) return false; } - for (int64_t i = 0; i < static_cast(indptr().size()); ++i) { + for (size_t i = 0; i < indptr().size(); ++i) { if (!indptr()[i]->Equals(*other.indptr()[i])) return false; } return axis_order() == other.axis_order(); diff --git a/cpp/src/arrow/tensor.h b/cpp/src/arrow/tensor.h index ff6f3735f9193..b13172e38fb09 100644 --- a/cpp/src/arrow/tensor.h +++ b/cpp/src/arrow/tensor.h @@ -155,9 +155,9 @@ class ARROW_EXPORT Tensor { /// Return the offset of the given index on the given strides static int64_t CalculateValueOffset(const std::vector& strides, const std::vector& index) { - const int64_t n = static_cast(index.size()); + const auto n = index.size(); int64_t offset = 0; - for (int64_t i = 0; i < n; ++i) { + for (size_t i = 0; i < n; ++i) { offset += index[i] * strides[i]; } return offset; diff --git a/cpp/src/arrow/tensor/coo_converter.cc b/cpp/src/arrow/tensor/coo_converter.cc index 7e29b668f53ec..77d3e0313e305 100644 --- a/cpp/src/arrow/tensor/coo_converter.cc +++ b/cpp/src/arrow/tensor/coo_converter.cc @@ -40,10 +40,10 @@ namespace { template inline void IncrementRowMajorIndex(std::vector& coord, const std::vector& shape) { - const int64_t ndim = shape.size(); + const auto ndim = shape.size(); ++coord[ndim - 1]; if (coord[ndim - 1] == shape[ndim - 1]) { - int64_t d = ndim - 1; + auto d = ndim - 1; while (d > 0 && coord[d] == shape[d]) { coord[d] = 0; ++coord[d - 1]; @@ -78,25 +78,25 @@ void ConvertRowMajorTensor(const Tensor& tensor, c_index_type* indices, template void ConvertColumnMajorTensor(const Tensor& tensor, c_index_type* out_indices, c_value_type* out_values, const int64_t size) { - const auto ndim = tensor.ndim(); - std::vector indices(ndim * size); - std::vector values(size); + const auto ndim = static_cast(tensor.ndim()); + std::vector indices(static_cast(ndim * size)); + std::vector values(static_cast(size)); ConvertRowMajorTensor(tensor, indices.data(), values.data(), size); // transpose indices - for (int64_t i = 0; i < size; ++i) { - for (int j = 0; j < ndim / 2; ++j) { + for (size_t i = 0; i < static_cast(size); ++i) { + for (size_t j = 0; j < ndim / 2; ++j) { std::swap(indices[i * ndim + j], indices[i * ndim + ndim - j - 1]); } } // sort indices - std::vector order(size); + std::vector order(static_cast(size)); std::iota(order.begin(), order.end(), 0); std::sort(order.begin(), order.end(), [&](const int64_t xi, const int64_t yi) { - const int64_t x_offset = xi * ndim; - const int64_t y_offset = yi * ndim; - for (int j = 0; j < ndim; ++j) { + const auto x_offset = static_cast(xi * ndim); + const auto y_offset = static_cast(yi * ndim); + for (size_t j = 0; j < ndim; ++j) { const auto x = indices[x_offset + j]; const auto y = indices[y_offset + j]; if (x < y) return true; @@ -107,7 +107,7 @@ void ConvertColumnMajorTensor(const Tensor& tensor, c_index_type* out_indices, // transfer result const auto* indices_data = indices.data(); - for (int64_t i = 0; i < size; ++i) { + for (size_t i = 0; i < static_cast(size); ++i) { out_values[i] = values[i]; std::copy_n(indices_data, ndim, out_indices); @@ -121,12 +121,12 @@ void ConvertStridedTensor(const Tensor& tensor, c_index_type* indices, c_value_type* values, const int64_t size) { using ValueType = typename CTypeTraits::ArrowType; const auto& shape = tensor.shape(); - const auto ndim = tensor.ndim(); + const auto ndim = static_cast(tensor.ndim()); std::vector coord(ndim, 0); constexpr c_value_type zero = 0; c_value_type x; - int64_t i; + size_t i; for (int64_t n = tensor.size(); n > 0; --n) { x = tensor.Value(coord); if (ARROW_PREDICT_FALSE(x != zero)) { diff --git a/cpp/src/arrow/tensor/csf_converter.cc b/cpp/src/arrow/tensor/csf_converter.cc index 2d925ddbbb01b..893c5e708a926 100644 --- a/cpp/src/arrow/tensor/csf_converter.cc +++ b/cpp/src/arrow/tensor/csf_converter.cc @@ -41,14 +41,15 @@ namespace { inline void IncrementIndex(std::vector& coord, const std::vector& shape, const std::vector& axis_order) { - const int64_t ndim = shape.size(); - const int64_t last_axis = axis_order[ndim - 1]; + const auto ndim = shape.size(); + const auto last_axis = static_cast(axis_order[ndim - 1]); ++coord[last_axis]; if (coord[last_axis] == shape[last_axis]) { - int64_t d = ndim - 1; - while (d > 0 && coord[axis_order[d]] == shape[axis_order[d]]) { - coord[axis_order[d]] = 0; - ++coord[axis_order[d - 1]]; + auto d = ndim - 1; + while (d > 0 && coord[static_cast(axis_order[d])] == + shape[static_cast(axis_order[d])]) { + coord[static_cast(axis_order[d])] = 0; + ++coord[static_cast(axis_order[d - 1])]; --d; } } @@ -74,7 +75,7 @@ class SparseCSFTensorConverter : private SparseTensorConverterMixin { const int index_elsize = index_value_type_->byte_width(); const int value_elsize = tensor_.type()->byte_width(); - const int64_t ndim = tensor_.ndim(); + const auto ndim = static_cast(tensor_.ndim()); // Axis order as ascending order of dimension size is a good heuristic but is not // necessarily optimal. std::vector axis_order = internal::ArgSort(tensor_.shape()); @@ -107,8 +108,8 @@ class SparseCSFTensorConverter : private SparseTensorConverterMixin { std::copy_n(xp, value_elsize, values); values += value_elsize; - for (int64_t i = 0; i < ndim; ++i) { - int64_t dimension = axis_order[i]; + for (size_t i = 0; i < ndim; ++i) { + auto dimension = static_cast(axis_order[i]); tree_split = tree_split || (coord[dimension] != previous_coord[dimension]); if (tree_split) { @@ -133,7 +134,7 @@ class SparseCSFTensorConverter : private SparseTensorConverterMixin { } } - for (int64_t column = 0; column < ndim - 1; ++column) { + for (size_t column = 0; column < ndim - 1; ++column) { AssignIndex(index_buffer, counts[column + 1], index_elsize); RETURN_NOT_OK(indptr_buffer_builders[column].Append(index_buffer, index_elsize)); } @@ -146,11 +147,11 @@ class SparseCSFTensorConverter : private SparseTensorConverterMixin { std::vector indptr_shapes(counts.begin(), counts.end() - 1); std::vector indices_shapes = counts; - for (int64_t column = 0; column < ndim; ++column) { + for (size_t column = 0; column < ndim; ++column) { RETURN_NOT_OK( indices_buffer_builders[column].Finish(&indices_buffers[column], true)); } - for (int64_t column = 0; column < ndim - 1; ++column) { + for (size_t column = 0; column < ndim - 1; ++column) { RETURN_NOT_OK(indptr_buffer_builders[column].Finish(&indptr_buffers[column], true)); } @@ -228,7 +229,7 @@ class TensorBuilderFromSparseCSFTensor : private SparseTensorConverterMixin { void ExpandValues(const int64_t dim, const int64_t dim_offset, const int64_t start, const int64_t stop) { - const auto& cur_indices = indices_[dim]; + const auto& cur_indices = indices_[static_cast(dim)]; const int indices_elsize = ElementSize(cur_indices); const auto* indices_data = cur_indices->raw_data() + start * indices_elsize; @@ -236,21 +237,25 @@ class TensorBuilderFromSparseCSFTensor : private SparseTensorConverterMixin { for (auto i = start; i < stop; ++i) { const int64_t index = SparseTensorConverterMixin::GetIndexValue(indices_data, indices_elsize); - const int64_t offset = dim_offset + index * strides_[axis_order_[dim]]; + const int64_t offset = + dim_offset + + index * strides_[static_cast(axis_order_[static_cast(dim)])]; std::copy_n(raw_data_ + i * value_elsize_, value_elsize_, values_ + offset); indices_data += indices_elsize; } } else { - const auto& cur_indptr = indptr_[dim]; + const auto& cur_indptr = indptr_[static_cast(dim)]; const int indptr_elsize = ElementSize(cur_indptr); const auto* indptr_data = cur_indptr->raw_data() + start * indptr_elsize; for (int64_t i = start; i < stop; ++i) { const int64_t index = SparseTensorConverterMixin::GetIndexValue(indices_data, indices_elsize); - const int64_t offset = dim_offset + index * strides_[axis_order_[dim]]; + const int64_t offset = + dim_offset + + index * strides_[static_cast(axis_order_[static_cast(dim)])]; const int64_t next_start = GetIndexValue(indptr_data, indptr_elsize); const int64_t next_stop = GetIndexValue(indptr_data + indptr_elsize, indptr_elsize);