diff --git a/paddle/fluid/eager/api/generated/eager_generated/backwards/scale_node.cc b/paddle/fluid/eager/api/generated/eager_generated/backwards/scale_node.cc index 8bd40140f53cc..38f67cb5bdf2a 100644 --- a/paddle/fluid/eager/api/generated/eager_generated/backwards/scale_node.cc +++ b/paddle/fluid/eager/api/generated/eager_generated/backwards/scale_node.cc @@ -90,9 +90,7 @@ void ScaleAPI(const paddle::experimental::Tensor& x, float scale, float bias, size_t bytes_size = phi::product(dense_tensor->dims()) * SizeOf(dense_tensor->dtype()); auto dense_out = std::make_shared( - phi::make_intrusive( - paddle::memory::Alloc(place, bytes_size)), - std::move(tensor_meta)); + paddle::memory::Alloc(place, bytes_size), std::move(tensor_meta)); // Handle Device Context const paddle::platform::Place& expected_kernel_place = Controller::Instance().GetExpectedPlace(); diff --git a/paddle/fluid/eager/tests/task_tests/fwd_bwd_joint_test.cc b/paddle/fluid/eager/tests/task_tests/fwd_bwd_joint_test.cc index 2d69380cf78d9..1f8fdb7de0c17 100644 --- a/paddle/fluid/eager/tests/task_tests/fwd_bwd_joint_test.cc +++ b/paddle/fluid/eager/tests/task_tests/fwd_bwd_joint_test.cc @@ -50,9 +50,7 @@ paddle::experimental::Tensor hook_function( auto place = t_dense->place(); size_t bytes_size = phi::product(t_dense->dims()) * SizeOf(t_dense->dtype()); auto ret_dense = std::make_shared( - phi::make_intrusive( - paddle::memory::Alloc(place, bytes_size)), - std::move(ret_meta)); + paddle::memory::Alloc(place, bytes_size), std::move(ret_meta)); float* t_ptr = t_dense->mutable_data(place); float* ret_ptr = ret_dense->mutable_data(place); diff --git a/paddle/fluid/eager/tests/task_tests/hook_test.cc b/paddle/fluid/eager/tests/task_tests/hook_test.cc index 855fe526c10c8..d7b887b28bde8 100644 --- a/paddle/fluid/eager/tests/task_tests/hook_test.cc +++ b/paddle/fluid/eager/tests/task_tests/hook_test.cc @@ -46,9 +46,7 @@ paddle::experimental::Tensor hook_function( auto place = t_dense->place(); size_t bytes_size = phi::product(t_dense->dims()) * SizeOf(t_dense->dtype()); auto ret_dense = std::make_shared( - phi::make_intrusive( - paddle::memory::Alloc(place, bytes_size)), - std::move(ret_meta)); + paddle::memory::Alloc(place, bytes_size), std::move(ret_meta)); float* t_ptr = t_dense->mutable_data(place); float* ret_ptr = ret_dense->mutable_data(place); diff --git a/paddle/fluid/eager/tests/task_tests/hook_test_intermidiate.cc b/paddle/fluid/eager/tests/task_tests/hook_test_intermidiate.cc index 8524be7800bfd..c4d4ff9110682 100644 --- a/paddle/fluid/eager/tests/task_tests/hook_test_intermidiate.cc +++ b/paddle/fluid/eager/tests/task_tests/hook_test_intermidiate.cc @@ -46,9 +46,7 @@ paddle::experimental::Tensor hook_function( auto place = t_dense->place(); size_t bytes_size = phi::product(t_dense->dims()) * SizeOf(t_dense->dtype()); auto ret_dense = std::make_shared( - phi::make_intrusive( - paddle::memory::Alloc(place, bytes_size)), - std::move(ret_meta)); + paddle::memory::Alloc(place, bytes_size), std::move(ret_meta)); float* t_ptr = t_dense->mutable_data(place); float* ret_ptr = ret_dense->mutable_data(place); diff --git a/paddle/fluid/pybind/eager.cc b/paddle/fluid/pybind/eager.cc index b94a3b0edcabc..530cc6992d391 100644 --- a/paddle/fluid/pybind/eager.cc +++ b/paddle/fluid/pybind/eager.cc @@ -84,7 +84,7 @@ void EmptyTensorInitializer(TensorObject* self, const std::string& name, } else { // TODO(dev): we need enhance check for ddims. dense_tensor = std::make_shared( - phi::make_intrusive(place), + std::make_shared(), phi::DenseTensorMeta(paddle::framework::TransToPhiDataType(dtype), ddims)); } diff --git a/paddle/phi/api/lib/sparse_api_custom_impl.cc b/paddle/phi/api/lib/sparse_api_custom_impl.cc index 01e2ee14f4301..c88e2e367feed 100644 --- a/paddle/phi/api/lib/sparse_api_custom_impl.cc +++ b/paddle/phi/api/lib/sparse_api_custom_impl.cc @@ -65,14 +65,10 @@ Tensor to_sparse_coo_impl(const Tensor& x, const int64_t sparse_dim) { // 5. Prepare outputs // create empty SparseCooTensor - phi::DenseTensor non_zero_indices( - phi::make_intrusive( - phi::TransToPhiPlace(kernel_key.backend())), - std::move(indices_meta)); - phi::DenseTensor non_zero_elements( - phi::make_intrusive( - phi::TransToPhiPlace(kernel_key.backend())), - std::move(elements_meta)); + phi::DenseTensor non_zero_indices(std::make_shared(), + std::move(indices_meta)); + phi::DenseTensor non_zero_elements(std::make_shared(), + std::move(elements_meta)); auto coo = std::make_shared( non_zero_indices, non_zero_elements, x.dims()); @@ -127,18 +123,12 @@ Tensor to_sparse_csr_impl(const Tensor& x) { // 5. Prepare outputs // create empty SparseCooTensor - phi::DenseTensor non_zero_crows( - phi::make_intrusive( - phi::TransToPhiPlace(kernel_key.backend())), - std::move(crows_meta)); - phi::DenseTensor non_zero_cols( - phi::make_intrusive( - phi::TransToPhiPlace(kernel_key.backend())), - std::move(cols_meta)); - phi::DenseTensor non_zero_elements( - phi::make_intrusive( - phi::TransToPhiPlace(kernel_key.backend())), - std::move(elements_meta)); + phi::DenseTensor non_zero_crows(std::make_shared(), + std::move(crows_meta)); + phi::DenseTensor non_zero_cols(std::make_shared(), + std::move(cols_meta)); + phi::DenseTensor non_zero_elements(std::make_shared(), + std::move(elements_meta)); auto csr = std::make_shared( non_zero_crows, non_zero_cols, non_zero_elements, x.dims()); @@ -192,9 +182,7 @@ Tensor to_dense_impl(const Tensor& x) { // 5. Prepare outputs // create empty SparseCooTensor auto dense_out = std::make_shared( - phi::make_intrusive( - phi::TransToPhiPlace(kernel_key.backend())), - std::move(dense_meta)); + std::make_shared(), std::move(dense_meta)); kernel_context.EmplaceBackOutput(dense_out.get()); Tensor out; diff --git a/paddle/phi/api/lib/utils/storage.h b/paddle/phi/api/lib/utils/storage.h index c2eedd0fa63f7..5fe17bc51b68a 100644 --- a/paddle/phi/api/lib/utils/storage.h +++ b/paddle/phi/api/lib/utils/storage.h @@ -65,79 +65,6 @@ class ExternalStorage : public phi::Storage { int64_t size_{0}; }; -class SharedStorage : public phi::Storage { - public: - explicit SharedStorage( - const std::shared_ptr& allocation) - : Storage(allocation) { - CHECK(allocation); - place_ = allocation->place(); - size_ = allocation->size(); - } - - // In order to be compatible with the original Tensor design and execution - // system, we need to allow the uninitialized SharedStorage to exist, - // and it can be removed after the compatibility phase is over in the future - explicit SharedStorage(const phi::Place& place) { place_ = place; } - - void Realloc(size_t n) override { - this->Clear(); - data_ = paddle::memory::AllocShared(place(), n); - size_ = n; - } - - static const char* name() { return "SharedStorage"; } - - void Clear() override { - data_ = nullptr; - size_ = 0; - } - - void set_data_shared( - const std::shared_ptr& holder) override { - data_ = holder; - if (holder) { - size_ = holder->size(); - place_ = holder->place(); - } - } - - std::shared_ptr&& move_data_shared() override { - size_ = 0; - place_ = phi::Place(); - return std::move(data_); - } - - size_t size() const noexcept override { - return data_ ? data_->size() : size_; - } - const phi::Place& place() const override { - return data_ ? data_->place() : place_; - } - bool OwnsMemory() const noexcept override { return false; } - - const std::shared_ptr& GetAllocation() { - return data_; - } - - // Temporary method: For compatible with fluid Tensor and improve performance - void ResetAllocation(std::shared_ptr allocation) { - data_ = allocation; - size_ = allocation->size(); - place_ = allocation->place(); - } - - // Temporary method: For compatible with fluid Tensor and improve performance - void ResetAllocationPlace(const phi::Place& place) { place_ = place; } - - // Temporary method: For compatible with fluid Tensor and improve performance - void Reset() { this->Clear(); } - - private: - phi::Place place_; - int64_t size_{0}; -}; - class TensorStorage : public paddle::memory::allocation::Allocation { public: explicit TensorStorage(phi::intrusive_ptr storage) diff --git a/paddle/phi/kernels/cpu/reduce.h b/paddle/phi/kernels/cpu/reduce.h index 06a458832d19f..b0e43b6526cdd 100644 --- a/paddle/phi/kernels/cpu/reduce.h +++ b/paddle/phi/kernels/cpu/reduce.h @@ -118,7 +118,7 @@ void GetShuffledInput(const DeviceContext& dev_ctx, std::vector perm_axis(input.dims().size()); GetShuffledDim(input.dims(), &shuffled_dims, dims, &perm_axis); - shuffled_input->ResizeAndAllocate(shuffled_dims); + shuffled_input->Resize(shuffled_dims); dev_ctx.template Alloc(shuffled_input); phi::funcs::TransposeNormal trans; @@ -132,10 +132,7 @@ void HandleLargeDim(const DeviceContext& dev_ctx, const std::vector& dims, bool keep_dim) { // shuffle the reduced dim to the end - phi::DenseTensor shuffled_input = phi::DenseTensor( - phi::make_intrusive(input.place()), - input.meta()); - + phi::DenseTensor shuffled_input; GetShuffledInput(dev_ctx, input, &shuffled_input, dims); // transpose to 2D tensor whose shape is {unreduced, reduced}. diff --git a/paddle/phi/kernels/gpu/rnn_functor.h b/paddle/phi/kernels/gpu/rnn_functor.h index b29b7ed2d8010..68d8b2e5eef0e 100644 --- a/paddle/phi/kernels/gpu/rnn_functor.h +++ b/paddle/phi/kernels/gpu/rnn_functor.h @@ -99,7 +99,7 @@ class RNNDescriptors { // ------------------- cudnn dropout descriptors --------------------- size_t state_size; - bool is_initialized = dropout_state->IsInitialized(); + bool is_initialized = dropout_state->initialized(); if (!is_test_ && !is_initialized) { #ifdef PADDLE_WITH_HIP PADDLE_ENFORCE_GPU_SUCCESS( diff --git a/paddle/phi/kernels/sparse/cpu/sparse_utils_kernel.cc b/paddle/phi/kernels/sparse/cpu/sparse_utils_kernel.cc index 69ac0417f763d..2301d31d7a6c2 100644 --- a/paddle/phi/kernels/sparse/cpu/sparse_utils_kernel.cc +++ b/paddle/phi/kernels/sparse/cpu/sparse_utils_kernel.cc @@ -171,24 +171,17 @@ void SparseCooToCsrKernel(const Context& dev_ctx, int batchs = x_dims.size() == 2 ? 1 : x_dims[0]; int rows = x_dims.size() == 2 ? x_dims[0] : x_dims[1]; - const auto place = dev_ctx.GetPlace(); - DenseTensorMeta crows_meta( - DataType::INT64, {batchs * (rows + 1)}, DataLayout::NCHW); - DenseTensorMeta cols_meta(DataType::INT64, {non_zero_num}, DataLayout::NCHW); - DenseTensorMeta values_meta( - x.dtype(), {non_zero_num}, x.non_zero_elements().layout()); - phi::DenseTensor non_zero_crows( - phi::make_intrusive(place), - std::move(crows_meta)); - phi::DenseTensor non_zero_cols( - phi::make_intrusive(place), - std::move(cols_meta)); - phi::DenseTensor non_zero_elements( - phi::make_intrusive(place), - std::move(values_meta)); - int64_t* csr_crows_data = non_zero_crows.mutable_data(place); - int64_t* csr_cols_data = non_zero_cols.mutable_data(place); - T* csr_values_data = non_zero_elements.mutable_data(place); + phi::DenseTensor non_zero_crows; + non_zero_crows.Resize({batchs * (rows + 1)}); + int64_t* csr_crows_data = dev_ctx.template Alloc(&non_zero_crows); + + phi::DenseTensor non_zero_cols; + non_zero_cols.Resize({non_zero_num}); + int64_t* csr_cols_data = dev_ctx.template Alloc(&non_zero_cols); + + phi::DenseTensor non_zero_elements; + non_zero_elements.Resize({non_zero_num}); + T* csr_values_data = dev_ctx.template Alloc(&non_zero_elements); const auto& coo_indices = x.non_zero_indices(); const auto& coo_values = x.non_zero_elements(); diff --git a/paddle/phi/kernels/sparse/gpu/sparse_utils_kernel.cu b/paddle/phi/kernels/sparse/gpu/sparse_utils_kernel.cu index 960d7eab26463..ff2647de731d7 100644 --- a/paddle/phi/kernels/sparse/gpu/sparse_utils_kernel.cu +++ b/paddle/phi/kernels/sparse/gpu/sparse_utils_kernel.cu @@ -173,20 +173,12 @@ void DenseToSparseCooKernel(const Context& dev_ctx, const auto values_dims = phi::funcs::sparse::InferDenseDims(x_dims, sparse_dim, non_zero_num); - DenseTensorMeta indices_meta(DataType::INT64, - {sparse_dim, static_cast(non_zero_num)}, - DataLayout::NCHW); - DenseTensorMeta values_meta(x.meta().dtype, values_dims, x.meta().layout); - phi::DenseTensor indices( - phi::make_intrusive( - dev_ctx.GetPlace()), - std::move(indices_meta)); - phi::DenseTensor values( - phi::make_intrusive( - dev_ctx.GetPlace()), - std::move(values_meta)); - int64_t* indices_data = indices.mutable_data(place); - T* sparse_data = values.mutable_data(place); + phi::DenseTensor indices = phi::Empty( + dev_ctx, {sparse_dim, static_cast(non_zero_num)}); + int64_t* indices_data = indices.data(); + phi::DenseTensor values; + values.Resize(values_dims); + T* sparse_data = dev_ctx.template Alloc(&values); // 3. calc indices by indexs and get values by indexs config = phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, non_zero_num, 1); @@ -382,24 +374,13 @@ void SparseCooToCsrKernel(const Context& dev_ctx, int batchs = x_dims.size() == 2 ? 1 : x_dims[0]; int rows = x_dims.size() == 2 ? x_dims[0] : x_dims[1]; - const auto place = dev_ctx.GetPlace(); - DenseTensorMeta crows_meta( - DataType::INT64, {batchs * (rows + 1)}, DataLayout::NCHW); - DenseTensorMeta cols_meta(DataType::INT64, {non_zero_num}, DataLayout::NCHW); - DenseTensorMeta values_meta( - x.dtype(), {non_zero_num}, x.non_zero_elements().layout()); - phi::DenseTensor non_zero_crows( - phi::make_intrusive(place), - std::move(crows_meta)); - phi::DenseTensor non_zero_cols( - phi::make_intrusive(place), - std::move(cols_meta)); - phi::DenseTensor non_zero_elements( - phi::make_intrusive(place), - std::move(values_meta)); - int64_t* csr_crows_data = non_zero_crows.mutable_data(place); - int64_t* csr_cols_data = non_zero_cols.mutable_data(place); - T* csr_values_data = non_zero_elements.mutable_data(place); + phi::DenseTensor non_zero_crows = + phi::Empty(dev_ctx, {batchs * (rows + 1)}); + phi::DenseTensor non_zero_cols = phi::Empty(dev_ctx, {non_zero_num}); + phi::DenseTensor non_zero_elements = phi::Empty(dev_ctx, {non_zero_num}); + int64_t* csr_crows_data = non_zero_crows.data(); + int64_t* csr_cols_data = non_zero_cols.data(); + T* csr_values_data = non_zero_elements.data(); const auto& coo_indices = x.non_zero_indices(); const auto& coo_values = x.non_zero_elements(); @@ -416,10 +397,8 @@ void SparseCooToCsrKernel(const Context& dev_ctx, auto config = phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, batchs, 1); if (batchs > 1) { DenseTensorMeta batchs_meta(DataType::INT64, {batchs}, DataLayout::NCHW); - phi::DenseTensor batchs_offset( - phi::make_intrusive(place), - std::move(batchs_meta)); - int64_t* batchs_offset_ptr = batchs_offset.mutable_data(place); + phi::DenseTensor batchs_offset = phi::Empty(dev_ctx, {batchs}); + int64_t* batchs_offset_ptr = batchs_offset.data(); GetBatchsOffset<<( - phi::make_intrusive( - phi::TransToPhiPlace(kernel_backend)), - phi::DenseTensorMeta()); + auto dense_out = std::make_shared(); phi::MetaTensor meta_out(dense_out.get()); phi::UnchangedInferMeta(*dense_x, &meta_out); kernel_context.EmplaceBackOutput(dense_out.get()); @@ -236,10 +233,7 @@ Tensor scale_switch_case(const Tensor& x, auto dense_x = std::dynamic_pointer_cast(x.impl()); - auto dense_out = std::make_shared( - phi::make_intrusive( - phi::TransToPhiPlace(kernel_backend)), - phi::DenseTensorMeta()); + auto dense_out = std::make_shared(); phi::MetaTensor meta_out(dense_out.get()); phi::UnchangedInferMeta(*dense_x, &meta_out); diff --git a/paddle/phi/tests/core/test_custom_kernel.cc b/paddle/phi/tests/core/test_custom_kernel.cc index 2a5b8ec8fa000..634edaec96d29 100644 --- a/paddle/phi/tests/core/test_custom_kernel.cc +++ b/paddle/phi/tests/core/test_custom_kernel.cc @@ -264,10 +264,7 @@ TEST(CustomKernel, custom_kernel_dot) { kernel_context.EmplaceBackAttr(fake_attr_int64_vec); kernel_context.EmplaceBackAttr(fake_attr_int_vec); - auto dense_out = std::make_shared( - phi::make_intrusive( - phi::TransToPhiPlace(backend)), - phi::DenseTensorMeta()); + auto dense_out = std::make_shared(); phi::MetaTensor meta_out(dense_out.get()); phi::DotInferMeta(*dense_x, *dense_y, &meta_out);