-
Notifications
You must be signed in to change notification settings - Fork 5.6k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Memory CPU allocator #2596
Memory CPU allocator #2596
Changes from all commits
84d1c73
67481ca
db128c4
ce938ae
ce70df8
e02859c
f7530e8
6250d10
f329454
f149d18
cd16192
e14e687
09d9794
dd08d33
dde0da9
29c7512
b22dd12
79373da
b8f5922
3e087f7
55648b4
3e9aa7f
9490d24
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,5 @@ | ||
--- | ||
Language: Cpp | ||
BasedOnStyle: Google | ||
Standard: Cpp11 | ||
... |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1 @@ | ||
add_subdirectory(detail) |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,7 @@ | ||
if(${WITH_GPU}) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It's unnecessary to wrap nv_test in There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. nv_test was designed to handle *.cu files. But here it is a .cc file, because it doesn't contain CUDA code. However, here the source code calls cudaMallocHost defined in CUDA libraries, but we don't have a external/cuda.cmake. I think a complete solution here should be a single line: cc_library(cpu_allocator_test SRCS cpu_allocator_test.cc DEPS cuda cudart) Actually, I tried and succeeded to add There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I will try this. |
||
nv_library(system_allocator SRCS system_allocator.cc DEPS gflags) | ||
nv_test(system_allocator_test SRCS system_allocator_test.cc DEPS system_allocator gflags) | ||
else(${WITH_GPU}) | ||
cc_library(system_allocator SRCS system_allocator.cc DEPS gflags) | ||
cc_test(system_allocator_test SRCS system_allocator_test.cc DEPS system_allocator gflags) | ||
endif(${WITH_GPU}) |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,35 @@ | ||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. | ||
|
||
Licensed under the Apache License, Version 2.0 (the "License"); | ||
you may not use this file except in compliance with the License. | ||
You may obtain a copy of the License at | ||
|
||
http://www.apache.org/licenses/LICENSE-2.0 | ||
|
||
Unless required by applicable law or agreed to in writing, software | ||
distributed under the License is distributed on an "AS IS" BASIS, | ||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
See the License for the specific language governing permissions and | ||
limitations under the License. */ | ||
|
||
#pragma once | ||
|
||
#include "paddle/memory/detail/buddy_allocator.h" | ||
|
||
namespace paddle { | ||
namespace memory { | ||
namespace detail { | ||
|
||
BuddyAllocator::BuddyAllocator(size_t pool_size, size_t max_pools, | ||
SystemAllocator* system_allocator) | ||
: pool_size_(pool_size), | ||
max_pools_(max_pools), | ||
system_allocator_(system_allocator) { | ||
PADDLE_ASSERT(pool_size > 0); | ||
PADDLE_ASSERT(max_pools > 0); | ||
PADDLE_ASSERT(system_allocator != nullptr); | ||
} | ||
|
||
} // namespace detail | ||
} // namespace memory | ||
} // namespace paddle |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,86 @@ | ||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. | ||
|
||
Licensed under the Apache License, Version 2.0 (the "License"); | ||
you may not use this file except in compliance with the License. | ||
You may obtain a copy of the License at | ||
|
||
http://www.apache.org/licenses/LICENSE-2.0 | ||
|
||
Unless required by applicable law or agreed to in writing, software | ||
distributed under the License is distributed on an "AS IS" BASIS, | ||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
See the License for the specific language governing permissions and | ||
limitations under the License. */ | ||
|
||
#pragma once | ||
|
||
#include "paddle/memory/detail/system_allocator.h" | ||
|
||
#include <mutex> | ||
#include <vector> | ||
|
||
namespace paddle { | ||
namespace memory { | ||
namespace detail { | ||
|
||
class BuddyAllocator { | ||
public: | ||
BuddyAllocator(size_t pool_size, size_t max_pools, | ||
SystemAllocator* system_allocator); | ||
~BuddyAllocator(); | ||
|
||
void* Alloc(size_t size); | ||
void Free(void*); | ||
size_t Used(); | ||
|
||
private: | ||
struct Block { | ||
size_t size_; | ||
Block* left_; // left buddy | ||
Block* right_; // right buddy | ||
}; | ||
|
||
// Initially, there is only one pool. If a Alloc founds not enough | ||
// memory from that pool, and there has not been max_num_pools_, | ||
// create a new pool by calling system_allocator_.Alloc(pool_size_). | ||
std::vector<void*> pools_; | ||
|
||
size_t pool_size_; // the size of each pool; | ||
size_t max_num_pools_; // the size of all pools; | ||
|
||
SystemAllocator* system_allocator_; | ||
|
||
std::mutex mutex_; | ||
|
||
// Disable copy and assignment. | ||
BuddyAllocator(const BuddyAllocator&) = delete; | ||
BuddyAllocator& operator=(const BuddyAllocator&) = delete; | ||
}; | ||
|
||
BuddyAllocator<CPUAllocator>* GetCPUBuddyAllocator() { | ||
static BuddyAllocator<CPUAllocator>* a = nullptr; | ||
if (a == nullptr) { | ||
a = new BuddyAllocator<CPUAllocator>(); | ||
} | ||
return a; | ||
} | ||
|
||
#ifndef PADDLE_ONLY_CPU // The following code are for CUDA. | ||
|
||
BuddyAllocator<GPUAllocator>* GetGPUBuddyAllocator(int gpu_id) { | ||
static BuddyAllocator<GPUAllocator>** as = NULL; | ||
if (as == NULL) { | ||
int gpu_num = platform::GetDeviceCount(); | ||
as = new BuddyAllocator<GPUAllocator>*[gpu_num]; | ||
for (int gpu = 0; gpu < gpu_num; gpu++) { | ||
as[gpu] = new BuddyAllocator<GPUAllocator>(); | ||
} | ||
} | ||
return as[gpu_id]; | ||
} | ||
|
||
#endif // PADDLE_ONLY_CPU | ||
|
||
} // namespace detail | ||
} // namespace memory | ||
} // namespace paddle |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,90 @@ | ||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. | ||
|
||
Licensed under the Apache License, Version 2.0 (the "License"); | ||
you may not use this file except in compliance with the License. | ||
You may obtain a copy of the License at | ||
|
||
http://www.apache.org/licenses/LICENSE-2.0 | ||
|
||
Unless required by applicable law or agreed to in writing, software | ||
distributed under the License is distributed on an "AS IS" BASIS, | ||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
See the License for the specific language governing permissions and | ||
limitations under the License. */ | ||
|
||
#include "paddle/memory/detail/system_allocator.h" | ||
|
||
#include <stdlib.h> // for malloc and free | ||
#include <sys/mman.h> // for mlock and munlock | ||
|
||
#include "gflags/gflags.h" | ||
#include "paddle/platform/assert.h" | ||
#include "paddle/platform/cuda.h" | ||
|
||
// If use_pinned_memory is true, CPUAllocator calls mlock, which | ||
// returns pinned and locked memory as staging areas for data exchange | ||
// between host and device. Allocates too much would reduce the amount | ||
// of memory available to the system for paging. So, by default, we | ||
// should set false to use_pinned_memory. | ||
DEFINE_bool(use_pinned_memory, false, | ||
"If set, allocate cpu/gpu pinned memory."); | ||
|
||
namespace paddle { | ||
namespace memory { | ||
namespace detail { | ||
|
||
void* CPUAllocator::Alloc(size_t size) { | ||
// According to http://www.cplusplus.com/reference/cstdlib/malloc/, | ||
// malloc might not return nullptr if size is zero, but the returned | ||
// pointer shall not be dereferenced -- so we make it nullptr. | ||
if (size <= 0) return nullptr; | ||
|
||
void* p = malloc(size); | ||
if (p != nullptr && FLAGS_use_pinned_memory) { | ||
mlock(p, size); | ||
} | ||
return p; | ||
} | ||
|
||
void CPUAllocator::Free(void* p, size_t size) { | ||
if (p != nullptr && FLAGS_use_pinned_memory) { | ||
munlock(p, size); | ||
} | ||
free(p); | ||
} | ||
|
||
#ifndef PADDLE_ONLY_CPU | ||
|
||
void* GPUAllocator::Alloc(size_t size) { | ||
// CUDA documentation doesn't explain if cudaMalloc returns nullptr | ||
// if size is 0. We just make sure it does. | ||
if (size <= 0) { | ||
return nullptr; | ||
} | ||
|
||
void* p = 0; | ||
cudaError_t result = | ||
FLAGS_use_pinned_memory ? cudaMallocHost(&p, size) : cudaMalloc(&p, size); | ||
if (result != cudaSuccess) { | ||
cudaGetLastError(); // clear error if there is any. | ||
} | ||
return result == cudaSuccess ? p : nullptr; | ||
} | ||
|
||
void GPUAllocator::Free(void* p, size_t size) { | ||
// Purposefully allow cudaErrorCudartUnloading, because | ||
// that is returned if you ever call cudaFree after the | ||
// driver has already shutdown. This happens only if the | ||
// process is terminating, in which case we don't care if | ||
// cudaFree succeeds. | ||
cudaError_t err = FLAGS_use_pinned_memory ? cudaFreeHost(p) : cudaFree(p); | ||
if (err != cudaErrorCudartUnloading) { | ||
platform::throw_on_error(err, "cudaFree{Host} failed"); | ||
} | ||
} | ||
|
||
#endif // PADDLE_ONLY_CPU | ||
|
||
} // namespace detail | ||
} // namespace memory | ||
} // namespace paddle |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,53 @@ | ||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. | ||
|
||
Licensed under the Apache License, Version 2.0 (the "License"); | ||
you may not use this file except in compliance with the License. | ||
You may obtain a copy of the License at | ||
|
||
http://www.apache.org/licenses/LICENSE-2.0 | ||
|
||
Unless required by applicable law or agreed to in writing, software | ||
distributed under the License is distributed on an "AS IS" BASIS, | ||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
See the License for the specific language governing permissions and | ||
limitations under the License. */ | ||
|
||
#pragma once | ||
|
||
#include <stddef.h> // for size_t | ||
|
||
namespace paddle { | ||
namespace memory { | ||
namespace detail { | ||
|
||
// SystemAllocator is the parent class of CPUAllocator and | ||
// GPUAllocator. A BuddyAllocator object uses a SystemAllocator* | ||
// pointing to the underlying system allocator. An alternative to | ||
// this class hierarchy is to pass a system allocator class to | ||
// BuddyAllocator as a template parameter. This approach makes | ||
// BuddyAllocator a class template, and it's very complicated | ||
// algorithm would make the buddy_allocator.h messy. | ||
class SystemAllocator { | ||
public: | ||
virtual ~SystemAllocator() {} | ||
virtual void* Alloc(size_t size) = 0; | ||
virtual void Free(void* p, size_t size) = 0; | ||
}; | ||
|
||
class CPUAllocator : public SystemAllocator { | ||
public: | ||
virtual void* Alloc(size_t size); | ||
virtual void Free(void* p, size_t size); | ||
}; | ||
|
||
#ifndef PADDLE_ONLY_CPU | ||
class GPUAllocator : public SystemAllocator { | ||
public: | ||
virtual void* Alloc(size_t size); | ||
virtual void Free(void* p, size_t size); | ||
}; | ||
#endif // PADDLE_ONLY_CPU | ||
|
||
} // namespace detail | ||
} // namespace memory | ||
} // namespace paddle |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,71 @@ | ||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. | ||
|
||
Licensed under the Apache License, Version 2.0 (the "License"); | ||
you may not use this file except in compliance with the License. | ||
You may obtain a copy of the License at | ||
|
||
http://www.apache.org/licenses/LICENSE-2.0 | ||
|
||
Unless required by applicable law or agreed to in writing, software | ||
distributed under the License is distributed on an "AS IS" BASIS, | ||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
See the License for the specific language governing permissions and | ||
limitations under the License. */ | ||
|
||
#include "paddle/memory/detail/system_allocator.h" | ||
|
||
#include <memory> | ||
#include <vector> | ||
|
||
#include "gflags/gflags.h" | ||
#include "gtest/gtest.h" | ||
|
||
DECLARE_bool(use_pinned_memory); | ||
|
||
void TestAllocator(paddle::memory::detail::SystemAllocator& a, size_t size) { | ||
bool freed = false; | ||
{ | ||
void* p = a.Alloc(size); | ||
if (size > 0) { | ||
EXPECT_NE(p, nullptr); | ||
} else { | ||
EXPECT_EQ(p, nullptr); | ||
} | ||
|
||
int* i = static_cast<int*>(p); | ||
std::shared_ptr<int> ptr(i, [&](void* p) { | ||
freed = true; | ||
a.Free(p, size); | ||
}); | ||
} | ||
EXPECT_TRUE(freed); | ||
} | ||
|
||
TEST(CPUAllocator, NoLockMem) { | ||
FLAGS_use_pinned_memory = false; | ||
paddle::memory::detail::CPUAllocator a; | ||
TestAllocator(a, 2048); | ||
TestAllocator(a, 0); | ||
} | ||
|
||
TEST(CPUAllocator, LockMem) { | ||
FLAGS_use_pinned_memory = true; | ||
paddle::memory::detail::CPUAllocator a; | ||
TestAllocator(a, 2048); | ||
TestAllocator(a, 0); | ||
} | ||
|
||
#ifndef PADDLE_ONLY_CPU | ||
TEST(GPUAllocator, NoStaging) { | ||
FLAGS_use_pinned_memory = false; | ||
paddle::memory::detail::GPUAllocator a; | ||
TestAllocator(a, 2048); | ||
TestAllocator(a, 0); | ||
} | ||
TEST(GPUAllocator, Staging) { | ||
FLAGS_use_pinned_memory = true; | ||
paddle::memory::detail::GPUAllocator a; | ||
TestAllocator(a, 2048); | ||
TestAllocator(a, 0); | ||
} | ||
#endif // PADDLE_ONLY_CPU |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
different allocation has different malloc and free methods.