From 5d7e4d67826d79ef53f8476313f0b0f6245d2ebb Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Fri, 23 Mar 2018 23:42:47 +0800 Subject: [PATCH 1/4] support empty tensor --- paddle/fluid/framework/tensor_impl.h | 8 ++++---- paddle/fluid/memory/memory_test.cc | 4 ++-- .../fluid/tests/unittests/test_tensor.py | 20 ++++++++++++++++++- 3 files changed, 25 insertions(+), 7 deletions(-) diff --git a/paddle/fluid/framework/tensor_impl.h b/paddle/fluid/framework/tensor_impl.h index 638bd0db9d702..7a48390440083 100644 --- a/paddle/fluid/framework/tensor_impl.h +++ b/paddle/fluid/framework/tensor_impl.h @@ -117,10 +117,10 @@ inline void* Tensor::mutable_data(platform::Place place, std::type_index type) { if (holder_ != nullptr) { holder_->set_type(type); } - PADDLE_ENFORCE_GT( - numel(), 0, - "When calling this method, the Tensor's numel must be larger than zero. " - "Please check Tensor::Resize has been called first."); + PADDLE_ENFORCE_GE(numel(), 0, + "When calling this method, the Tensor's numel must be " + "equal or larger than zero. " + "Please check Tensor::Resize has been called first."); int64_t size = numel() * SizeOfType(type); /* some versions of boost::variant don't have operator!= */ if (holder_ == nullptr || !(holder_->place() == place) || diff --git a/paddle/fluid/memory/memory_test.cc b/paddle/fluid/memory/memory_test.cc index ae98d0d52542c..eb27a52b254c1 100644 --- a/paddle/fluid/memory/memory_test.cc +++ b/paddle/fluid/memory/memory_test.cc @@ -59,7 +59,7 @@ TEST(BuddyAllocator, CPUMultAlloc) { EXPECT_EQ(total_size, 0UL); for (auto size : - {128, 256, 1024, 4096, 16384, 65536, 262144, 1048576, 4194304}) { + {0, 128, 256, 1024, 4096, 16384, 65536, 262144, 1048576, 4194304}) { ps[paddle::memory::Alloc(cpu, size)] = size; // Buddy Allocator doesn't manage too large memory chunk @@ -117,7 +117,7 @@ TEST(BuddyAllocator, GPUMultAlloc) { EXPECT_EQ(total_size, 0UL); for (auto size : - {128, 256, 1024, 4096, 16384, 65536, 262144, 1048576, 4194304}) { + {0, 128, 256, 1024, 4096, 16384, 65536, 262144, 1048576, 4194304}) { ps[paddle::memory::Alloc(gpu, size)] = size; // Buddy Allocator doesn't manage too large memory chunk diff --git a/python/paddle/fluid/tests/unittests/test_tensor.py b/python/paddle/fluid/tests/unittests/test_tensor.py index a369783245ae2..379081c3287ce 100644 --- a/python/paddle/fluid/tests/unittests/test_tensor.py +++ b/python/paddle/fluid/tests/unittests/test_tensor.py @@ -126,7 +126,6 @@ def test_lod_tensor_init(self): def test_lod_tensor_gpu_init(self): if not core.is_compiled_with_cuda(): return - scope = core.Scope() place = core.CUDAPlace(0) lod_py = [[0, 2, 5], [0, 2, 4, 5]] lod_tensor = core.LoDTensor() @@ -144,6 +143,25 @@ def test_lod_tensor_gpu_init(self): self.assertAlmostEqual(2.0, lod_v[0, 0, 0, 1]) self.assertListEqual(lod_py, lod_tensor.lod()) + def test_empty_tensor(self): + place = core.CPUPlace() + scope = core.Scope() + var = scope.var("test_tensor") + + tensor = var.get_tensor() + + tensor.set_dims([0, 1]) + tensor.alloc_float(place) + + tensor_array = numpy.array(tensor) + self.assertEqual((0, 1), tensor_array.shape) + + if core.is_compiled_with_cuda(): + gpu_place = core.CUDAPlace(0) + tensor.alloc_float(gpu_place) + tensor_array = numpy.array(tensor) + self.assertEqual((0, 1), tensor_array.shape) + if __name__ == '__main__': unittest.main() From 043dae6557eeb729840cf53fc21960a053605660 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Sat, 24 Mar 2018 00:32:29 +0800 Subject: [PATCH 2/4] add depends for device_tracer --- paddle/fluid/platform/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/fluid/platform/CMakeLists.txt b/paddle/fluid/platform/CMakeLists.txt index 7eec6ab657723..c2432ad6107e1 100644 --- a/paddle/fluid/platform/CMakeLists.txt +++ b/paddle/fluid/platform/CMakeLists.txt @@ -49,7 +49,7 @@ nv_test(device_context_test SRCS device_context_test.cu DEPS device_context gpu_ nv_test(cudnn_helper_test SRCS cudnn_helper_test.cc DEPS dynload_cuda) nv_test(transform_test SRCS transform_test.cu DEPS paddle_memory place device_context) -cc_library(device_tracer SRCS device_tracer.cc DEPS profiler_proto ${GPU_CTX_DEPS}) +cc_library(device_tracer SRCS device_tracer.cc DEPS profiler_proto proto_desc ${GPU_CTX_DEPS}) cc_library(profiler SRCS profiler.cc DEPS device_context device_tracer) cc_test(profiler_test SRCS profiler_test.cc DEPS profiler) From 60ba3650048572b009b54401dc121a509f63e608 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Sat, 24 Mar 2018 00:39:35 +0800 Subject: [PATCH 3/4] device_tracer depent on boost --- paddle/fluid/platform/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/fluid/platform/CMakeLists.txt b/paddle/fluid/platform/CMakeLists.txt index c2432ad6107e1..b8f2aed9b8c12 100644 --- a/paddle/fluid/platform/CMakeLists.txt +++ b/paddle/fluid/platform/CMakeLists.txt @@ -49,7 +49,7 @@ nv_test(device_context_test SRCS device_context_test.cu DEPS device_context gpu_ nv_test(cudnn_helper_test SRCS cudnn_helper_test.cc DEPS dynload_cuda) nv_test(transform_test SRCS transform_test.cu DEPS paddle_memory place device_context) -cc_library(device_tracer SRCS device_tracer.cc DEPS profiler_proto proto_desc ${GPU_CTX_DEPS}) +cc_library(device_tracer SRCS device_tracer.cc DEPS profiler_proto boost ${GPU_CTX_DEPS}) cc_library(profiler SRCS profiler.cc DEPS device_context device_tracer) cc_test(profiler_test SRCS profiler_test.cc DEPS profiler) From 59c47499cc3a6e9b0cc93c3ad5363bcd214ab167 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Mon, 26 Mar 2018 09:26:15 +0800 Subject: [PATCH 4/4] revert --- paddle/fluid/platform/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/fluid/platform/CMakeLists.txt b/paddle/fluid/platform/CMakeLists.txt index b8f2aed9b8c12..686c0889140f0 100644 --- a/paddle/fluid/platform/CMakeLists.txt +++ b/paddle/fluid/platform/CMakeLists.txt @@ -49,7 +49,7 @@ nv_test(device_context_test SRCS device_context_test.cu DEPS device_context gpu_ nv_test(cudnn_helper_test SRCS cudnn_helper_test.cc DEPS dynload_cuda) nv_test(transform_test SRCS transform_test.cu DEPS paddle_memory place device_context) -cc_library(device_tracer SRCS device_tracer.cc DEPS profiler_proto boost ${GPU_CTX_DEPS}) +cc_library(device_tracer SRCS device_tracer.cc DEPS boost profiler_proto ${GPU_CTX_DEPS}) cc_library(profiler SRCS profiler.cc DEPS device_context device_tracer) cc_test(profiler_test SRCS profiler_test.cc DEPS profiler)