Skip to content

Commit

Permalink
[cherry-pick] Fix numpy2.1 bug in py3.10 (#67866)
Browse files Browse the repository at this point in the history
* fix

* fix

* Update requirements.txt
  • Loading branch information
risemeup1 authored Sep 2, 2024
1 parent 94c6682 commit 64c6937
Show file tree
Hide file tree
Showing 3 changed files with 20 additions and 8 deletions.
12 changes: 10 additions & 2 deletions paddle/fluid/pybind/tensor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -210,8 +210,16 @@ void BindTensor(pybind11::module &m) { // NOLINT
g_framework_tensor_pytype =
reinterpret_cast<PyTypeObject *>(framework_tensor.ptr());
framework_tensor
.def("__array__",
[](phi::DenseTensor &self) { return TensorToPyArray(self); })
.def(
// TODO(risemeup): Modify the logic of
// TensorToPyArray() according to the dtype and copy
// parameters.
"__array__",
[](phi::DenseTensor &self, py::object dtype, py::object copy) {
return TensorToPyArray(self);
},
py::arg("dtype") = py::none(),
py::arg("copy") = py::none())
.def("_ptr",
[](const phi::DenseTensor &self) {
return reinterpret_cast<uintptr_t>(self.data());
Expand Down
10 changes: 5 additions & 5 deletions paddle/fluid/pybind/tensor_py.h
Original file line number Diff line number Diff line change
Expand Up @@ -411,7 +411,7 @@ void SetTensorFromPyArrayT(
static_cast<void *>(dst),
platform::CPUPlace(),
static_cast<const void *>(array.data()),
array.nbytes());
array.size()*sizeof(T));
#else
PADDLE_THROW(platform::errors::PermissionDenied(
"Cannot use XPUPlace in CPU/GPU version, "
Expand Down Expand Up @@ -447,7 +447,7 @@ void SetTensorFromPyArrayT(
phi::DeviceManager::GetDeviceWithPlace(tmp_place)->MemoryCopyH2D(
reinterpret_cast<void *>(dst),
const_cast<void *>(reinterpret_cast<const void *>(array.data())),
array.nbytes());
array.size()*sizeof(T));
platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
auto &ctx = *pool.Get(place);
ctx.Wait();
Expand All @@ -465,10 +465,10 @@ void SetTensorFromPyArrayT(
auto dst = self->mutable_data<T>(place);
#ifdef PADDLE_WITH_HIP
paddle::platform::GpuMemcpySync(
dst, array.data(), array.nbytes(), hipMemcpyHostToDevice);
dst, array.data(),array.size()*sizeof(T), hipMemcpyHostToDevice); //NOLINT
#else
paddle::platform::GpuMemcpySync(
dst, array.data(), array.nbytes(), cudaMemcpyHostToDevice);
dst, array.data(), array.size()*sizeof(T), cudaMemcpyHostToDevice);
#endif

} else if (paddle::platform::is_cuda_pinned_place(place)) {
Expand Down Expand Up @@ -617,7 +617,7 @@ void SetUVATensorFromPyArrayImpl(
cudaHostAlloc(reinterpret_cast<void **>(&data_ptr),
need_allocate_size,
cudaHostAllocWriteCombined | cudaHostAllocMapped);
std::memcpy(data_ptr, array.data(), array.nbytes());
std::memcpy(data_ptr, array.data(), array.size()*sizeof(T));

void *cuda_device_pointer = nullptr;
cudaHostGetDevicePointer(reinterpret_cast<void **>(&cuda_device_pointer),
Expand Down
6 changes: 5 additions & 1 deletion python/paddle/base/dygraph/tensor_patch_methods.py
Original file line number Diff line number Diff line change
Expand Up @@ -848,7 +848,11 @@ def __nonzero__(self):
def __bool__(self):
return self.__nonzero__()

def __array__(self, dtype=None):
def __array__(
self,
dtype=None,
copy=None,
):
"""
Returns a numpy array shows the value of current Tensor.
Expand Down

0 comments on commit 64c6937

Please sign in to comment.