Skip to content

Commit

Permalink
apacheGH-38090: [C++][Emscripten] compute/kernels/hash_aggregate: Sup…
Browse files Browse the repository at this point in the history
…press shorten-64-to-32 warnings

We need explicit cast to use `int64_t` for `size_t` on Emscripten.

Explicit casts.
  • Loading branch information
kou committed Oct 6, 2023
1 parent 3697bcd commit a7b829f
Showing 1 changed file with 17 additions and 16 deletions.
33 changes: 17 additions & 16 deletions cpp/src/arrow/compute/kernels/hash_aggregate.cc
Original file line number Diff line number Diff line change
Expand Up @@ -893,7 +893,7 @@ struct GroupedVarStdImpl : public GroupedAggregator {

// XXX this uses naive summation; we should switch to pairwise summation as was
// done for the scalar aggregate kernel in ARROW-11567
std::vector<SumType> sums(num_groups_);
std::vector<SumType> sums(static_cast<size_t>(num_groups_));
VisitGroupedValues<Type>(
batch,
[&](uint32_t g, typename TypeTraits<Type>::CType value) {
Expand All @@ -902,7 +902,7 @@ struct GroupedVarStdImpl : public GroupedAggregator {
},
[&](uint32_t g) { bit_util::ClearBit(no_nulls, g); });

for (int64_t i = 0; i < num_groups_; i++) {
for (size_t i = 0; i < static_cast<size_t>(num_groups_); i++) {
means[i] = ToDouble(sums[i]) / counts[i];
}

Expand Down Expand Up @@ -941,7 +941,7 @@ struct GroupedVarStdImpl : public GroupedAggregator {
return Status::OK();
}

std::vector<IntegerVarStd<Type>> var_std(num_groups_);
std::vector<IntegerVarStd<Type>> var_std(static_cast<size_t>(num_groups_));

ARROW_ASSIGN_OR_RAISE(auto mapping,
AllocateBuffer(num_groups_ * sizeof(uint32_t), pool_));
Expand All @@ -956,7 +956,7 @@ struct GroupedVarStdImpl : public GroupedAggregator {

// reset state
var_std.clear();
var_std.resize(num_groups_);
var_std.resize(static_cast<size_t>(num_groups_));
GroupedVarStdImpl<Type> state;
RETURN_NOT_OK(state.InitInternal(ctx_, decimal_scale_, &options_));
RETURN_NOT_OK(state.Resize(num_groups_));
Expand Down Expand Up @@ -999,12 +999,12 @@ struct GroupedVarStdImpl : public GroupedAggregator {
} else {
const auto value = UnboxScalar<Type>::Unbox(*batch[0].scalar);
for (int64_t i = 0; i < std::min(max_length, batch.length - start_index); ++i) {
const int64_t index = start_index + i;
const auto index = static_cast<size_t>(start_index + i);
var_std[g[index]].ConsumeOne(value);
}
}

for (int64_t i = 0; i < num_groups_; i++) {
for (size_t i = 0; i < static_cast<size_t>(num_groups_); i++) {
if (var_std[i].count == 0) continue;

other_counts[i] = var_std[i].count;
Expand Down Expand Up @@ -1159,7 +1159,7 @@ struct GroupedTDigestImpl : public GroupedAggregator {

Status Resize(int64_t new_num_groups) override {
const int64_t added_groups = new_num_groups - tdigests_.size();
tdigests_.reserve(new_num_groups);
tdigests_.reserve(static_cast<size_t>(new_num_groups));
for (int64_t i = 0; i < added_groups; i++) {
tdigests_.emplace_back(options_.delta, options_.buffer_size);
}
Expand Down Expand Up @@ -1203,7 +1203,8 @@ struct GroupedTDigestImpl : public GroupedAggregator {
const uint8_t* other_no_nulls = no_nulls_.mutable_data();

auto g = group_id_mapping.GetValues<uint32_t>(1);
for (int64_t other_g = 0; other_g < group_id_mapping.length; ++other_g, ++g) {
for (size_t other_g = 0; other_g < static_cast<size_t>(group_id_mapping.length);
++other_g, ++g) {
tdigests_[*g].Merge(other->tdigests_[other_g]);
counts[*g] += other_counts[other_g];
bit_util::SetBitTo(
Expand All @@ -1215,7 +1216,7 @@ struct GroupedTDigestImpl : public GroupedAggregator {
}

Result<Datum> Finalize() override {
const int64_t slot_length = options_.q.size();
const auto slot_length = options_.q.size();
const int64_t num_values = tdigests_.size() * slot_length;
const int64_t* counts = counts_.data();
std::shared_ptr<Buffer> null_bitmap;
Expand All @@ -1224,10 +1225,10 @@ struct GroupedTDigestImpl : public GroupedAggregator {
int64_t null_count = 0;

auto* results = values->mutable_data_as<double>();
for (int64_t i = 0; static_cast<size_t>(i) < tdigests_.size(); ++i) {
for (size_t i = 0; i < tdigests_.size(); ++i) {
if (!tdigests_[i].is_empty() && counts[i] >= options_.min_count &&
(options_.skip_nulls || bit_util::GetBit(no_nulls_.data(), i))) {
for (int64_t j = 0; j < slot_length; j++) {
for (size_t j = 0; j < slot_length; j++) {
results[i * slot_length + j] = tdigests_[i].Quantile(options_.q[j]);
}
continue;
Expand Down Expand Up @@ -1490,8 +1491,8 @@ struct GroupedMinMaxImpl<Type,
auto added_groups = new_num_groups - num_groups_;
DCHECK_GE(added_groups, 0);
num_groups_ = new_num_groups;
mins_.resize(new_num_groups);
maxes_.resize(new_num_groups);
mins_.resize(static_cast<size_t>(new_num_groups));
maxes_.resize(static_cast<size_t>(new_num_groups));
RETURN_NOT_OK(has_values_.Append(added_groups, false));
RETURN_NOT_OK(has_nulls_.Append(added_groups, false));
return Status::OK();
Expand Down Expand Up @@ -1985,8 +1986,8 @@ struct GroupedFirstLastImpl<Type,
auto added_groups = new_num_groups - num_groups_;
DCHECK_GE(added_groups, 0);
num_groups_ = new_num_groups;
firsts_.resize(new_num_groups);
lasts_.resize(new_num_groups);
firsts_.resize(static_cast<size_t>(new_num_groups));
lasts_.resize(static_cast<size_t>(new_num_groups));
RETURN_NOT_OK(has_values_.Append(added_groups, false));
RETURN_NOT_OK(has_any_values_.Append(added_groups, false));
RETURN_NOT_OK(first_is_nulls_.Append(added_groups, false));
Expand Down Expand Up @@ -2705,7 +2706,7 @@ struct GroupedOneImpl<Type, enable_if_t<is_base_binary_type<Type>::value ||
auto added_groups = new_num_groups - num_groups_;
DCHECK_GE(added_groups, 0);
num_groups_ = new_num_groups;
ones_.resize(new_num_groups);
ones_.resize(static_cast<size_t>(new_num_groups));
RETURN_NOT_OK(has_one_.Append(added_groups, false));
return Status::OK();
}
Expand Down

0 comments on commit a7b829f

Please sign in to comment.