Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Clean up: use std::size_t, include cstddef and aligned.hpp where missing #852

Merged
merged 5 commits into from
Aug 30, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,8 @@

#include <cuda_runtime_api.h>

#include <cstddef>

__global__ void compute_bound_kernel(int64_t* out)
{
clock_t clock_begin = clock64();
Expand All @@ -56,7 +58,7 @@ static void run_prewarm(rmm::cuda_stream_pool& stream_pool, rmm::mr::device_memo
}
}

static void run_test(size_t num_kernels,
static void run_test(std::size_t num_kernels,
rmm::cuda_stream_pool& stream_pool,
rmm::mr::device_memory_resource* mr)
{
Expand Down
35 changes: 18 additions & 17 deletions benchmarks/random_allocations/random_allocations.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
#include <benchmark/benchmark.h>

#include <array>
#include <cstddef>
#include <cstdlib>
#include <functional>
#include <random>
Expand Down Expand Up @@ -61,8 +62,8 @@ allocation remove_at(allocation_vector& allocs, std::size_t index)
template <typename SizeDistribution>
void random_allocation_free(rmm::mr::device_memory_resource& mr,
SizeDistribution size_distribution,
size_t num_allocations,
size_t max_usage, // in MiB
std::size_t num_allocations,
std::size_t max_usage, // in MiB
rmm::cuda_stream_view stream = {})
{
std::default_random_engine generator;
Expand All @@ -77,11 +78,11 @@ void random_allocation_free(rmm::mr::device_memory_resource& mr,
std::size_t allocation_count{0};

allocation_vector allocations{};
size_t allocation_size{0};
std::size_t allocation_size{0};

for (std::size_t i = 0; i < num_allocations * 2; ++i) {
bool do_alloc = true;
size_t size = static_cast<size_t>(size_distribution(generator));
auto size = static_cast<std::size_t>(size_distribution(generator));

if (active_allocations > 0) {
int chance = op_distribution(generator);
Expand Down Expand Up @@ -113,7 +114,7 @@ void random_allocation_free(rmm::mr::device_memory_resource& mr,
#endif
} else { // dealloc, or alloc failed
if (active_allocations > 0) {
size_t index = index_distribution(generator) % active_allocations;
std::size_t index = index_distribution(generator) % active_allocations;
active_allocations--;
allocation to_free = remove_at(allocations, index);
mr.deallocate(to_free.p, to_free.size, stream);
Expand All @@ -136,9 +137,9 @@ void random_allocation_free(rmm::mr::device_memory_resource& mr,
} // namespace

void uniform_random_allocations(rmm::mr::device_memory_resource& mr,
size_t num_allocations,
size_t max_allocation_size, // in MiB
size_t max_usage,
std::size_t num_allocations,
std::size_t max_allocation_size, // in MiB
std::size_t max_usage,
rmm::cuda_stream_view stream = {})
{
std::uniform_int_distribution<std::size_t> size_distribution(1, max_allocation_size * size_mb);
Expand All @@ -147,10 +148,10 @@ void uniform_random_allocations(rmm::mr::device_memory_resource& mr,

// TODO figure out how to map a normal distribution to integers between 1 and max_allocation_size
/*void normal_random_allocations(rmm::mr::device_memory_resource& mr,
size_t num_allocations = 1000,
size_t mean_allocation_size = 500, // in MiB
size_t stddev_allocation_size = 500, // in MiB
size_t max_usage = 8 << 20,
std::size_t num_allocations = 1000,
std::size_t mean_allocation_size = 500, // in MiB
std::size_t stddev_allocation_size = 500, // in MiB
std::size_t max_usage = 8 << 20,
cuda_stream_view stream) {
std::normal_distribution<std::size_t> size_distribution(, max_allocation_size * size_mb);
}*/
Expand Down Expand Up @@ -181,14 +182,14 @@ inline auto make_binning()

using MRFactoryFunc = std::function<std::shared_ptr<rmm::mr::device_memory_resource>()>;

constexpr size_t max_usage = 16000;
constexpr std::size_t max_usage = 16000;

static void BM_RandomAllocations(benchmark::State& state, MRFactoryFunc factory)
{
auto mr = factory();

size_t num_allocations = state.range(0);
size_t max_size = state.range(1);
std::size_t num_allocations = state.range(0);
std::size_t max_size = state.range(1);

try {
for (auto _ : state)
Expand Down Expand Up @@ -252,8 +253,8 @@ void declare_benchmark(std::string name)
}

static void profile_random_allocations(MRFactoryFunc factory,
size_t num_allocations,
size_t max_size)
std::size_t num_allocations,
std::size_t max_size)
{
auto mr = factory();

Expand Down
5 changes: 3 additions & 2 deletions include/rmm/cuda_stream_pool.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand All @@ -21,6 +21,7 @@
#include <rmm/detail/error.hpp>

#include <atomic>
#include <cstddef>
#include <vector>

namespace rmm {
Expand Down Expand Up @@ -84,7 +85,7 @@ class cuda_stream_pool {
*
* @return the number of streams in the pool
*/
size_t get_pool_size() const noexcept { return streams_.size(); }
std::size_t get_pool_size() const noexcept { return streams_.size(); }

private:
std::vector<rmm::cuda_stream> streams_;
Expand Down
3 changes: 2 additions & 1 deletion include/rmm/detail/stack_trace.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -28,6 +28,7 @@
#include <cxxabi.h>
#include <dlfcn.h>
#include <execinfo.h>
#include <cstddef>
#include <memory>
#include <vector>
#endif
Expand Down
2 changes: 2 additions & 0 deletions include/rmm/device_buffer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,9 @@
#include <rmm/mr/device/per_device_resource.hpp>

#include <cuda_runtime_api.h>

#include <cassert>
#include <cstddef>
#include <stdexcept>
#include <utility>

Expand Down
1 change: 1 addition & 0 deletions include/rmm/device_uvector.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
#include <rmm/mr/device/device_memory_resource.hpp>
#include <rmm/mr/device/per_device_resource.hpp>

#include <cstddef>
#include <vector>

namespace rmm {
Expand Down
2 changes: 1 addition & 1 deletion include/rmm/exec_policy.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down
12 changes: 7 additions & 5 deletions include/rmm/mr/device/aligned_resource_adaptor.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,15 +15,16 @@
*/
#pragma once

#include <mutex>
#include <optional>
#include <unordered_map>

#include <rmm/cuda_stream_view.hpp>
#include <rmm/detail/aligned.hpp>
#include <rmm/detail/error.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>

#include <cstddef>
#include <mutex>
#include <optional>
#include <unordered_map>

namespace rmm::mr {
/**
* @brief Resource that adapts `Upstream` memory resource to allocate memory in a specified
Expand Down Expand Up @@ -195,7 +196,8 @@ class aligned_resource_adaptor final : public device_memory_resource {
* @param stream Stream on which to get the mem info.
* @return std::pair containing free_size and total_size of memory
*/
[[nodiscard]] std::pair<size_t, size_t> do_get_mem_info(cuda_stream_view stream) const override
[[nodiscard]] std::pair<std::size_t, std::size_t> do_get_mem_info(
cuda_stream_view stream) const override
{
return upstream_->get_mem_info(stream);
}
Expand Down
3 changes: 2 additions & 1 deletion include/rmm/mr/device/arena_memory_resource.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand All @@ -21,6 +21,7 @@

#include <cuda_runtime_api.h>

#include <cstddef>
#include <map>
#include <shared_mutex>

Expand Down
8 changes: 5 additions & 3 deletions include/rmm/mr/device/cuda_async_memory_resource.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,16 +15,18 @@
*/
#pragma once

#include <limits>
#include <rmm/cuda_device.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/detail/cuda_util.hpp>
#include <rmm/detail/error.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>

#include <thrust/optional.h>

#include <cuda_runtime_api.h>

#include <thrust/optional.h>
#include <cstddef>
#include <limits>

#if CUDART_VERSION >= 11020 // 11.2 introduced cudaMallocAsync
#define RMM_CUDA_MALLOC_ASYNC_SUPPORT
Expand Down Expand Up @@ -194,7 +196,7 @@ class cuda_async_memory_resource final : public device_memory_resource {
*
* @return std::pair contaiing free_size and total_size of memory
*/
std::pair<size_t, size_t> do_get_mem_info(rmm::cuda_stream_view) const override
std::pair<std::size_t, std::size_t> do_get_mem_info(rmm::cuda_stream_view) const override
{
return std::make_pair(0, 0);
}
Expand Down
8 changes: 5 additions & 3 deletions include/rmm/mr/device/cuda_memory_resource.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand All @@ -15,11 +15,13 @@
*/
#pragma once

#include "device_memory_resource.hpp"
#include <rmm/mr/device/device_memory_resource.hpp>

#include <rmm/cuda_stream_view.hpp>
#include <rmm/detail/error.hpp>

#include <cstddef>

namespace rmm {
namespace mr {
/**
Expand Down Expand Up @@ -108,7 +110,7 @@ class cuda_memory_resource final : public device_memory_resource {
*
* @return std::pair contaiing free_size and total_size of memory
*/
std::pair<size_t, size_t> do_get_mem_info(cuda_stream_view) const override
std::pair<std::size_t, std::size_t> do_get_mem_info(cuda_stream_view) const override
{
std::size_t free_size;
std::size_t total_size;
Expand Down
9 changes: 5 additions & 4 deletions include/rmm/mr/device/detail/arena.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand All @@ -23,6 +23,7 @@
#include <cuda_runtime_api.h>

#include <algorithm>
#include <cstddef>
#include <limits>
#include <memory>
#include <mutex>
Expand Down Expand Up @@ -55,21 +56,21 @@ class block {
* @param pointer The address for the beginning of the block.
* @param size The size of the block.
*/
block(char* pointer, size_t size) : pointer_(pointer), size_(size) {}
block(char* pointer, std::size_t size) : pointer_(pointer), size_(size) {}

/**
* @brief Construct a block given a void pointer and size.
*
* @param pointer The address for the beginning of the block.
* @param size The size of the block.
*/
block(void* pointer, size_t size) : pointer_(static_cast<char*>(pointer)), size_(size) {}
block(void* pointer, std::size_t size) : pointer_(static_cast<char*>(pointer)), size_(size) {}

/// Returns the underlying pointer.
void* pointer() const { return pointer_; }

/// Returns the size of the block.
size_t size() const { return size_; }
std::size_t size() const { return size_; }

/// Returns true if this block is valid (non-null), false otherwise.
bool is_valid() const { return pointer_ != nullptr; }
Expand Down
Loading