Skip to content

Commit

Permalink
Fix
Browse files Browse the repository at this point in the history
  • Loading branch information
co63oc committed May 22, 2024
1 parent a8e29ef commit 7a68d53
Show file tree
Hide file tree
Showing 13 changed files with 391 additions and 307 deletions.
196 changes: 0 additions & 196 deletions paddle/fluid/operators/uniform_random_batch_size_like_op.cc

This file was deleted.

68 changes: 0 additions & 68 deletions paddle/fluid/operators/uniform_random_batch_size_like_op.cu

This file was deleted.

1 change: 0 additions & 1 deletion paddle/fluid/operators/uniform_random_inplace_op_xpu.cc
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@ limitations under the License. */

#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/operators/uniform_random_op.h"
#include "paddle/phi/common/memory_utils.h"
#include "paddle/phi/core/generator.h"

Expand Down
21 changes: 21 additions & 0 deletions paddle/phi/infermeta/unary.cc
Original file line number Diff line number Diff line change
Expand Up @@ -5381,6 +5381,27 @@ void UniformRandomInplaceInferMeta(const MetaTensor& x,
out->set_dtype(x.dtype());
}

void UniformRandomBatchSizeLikeInferMeta(const MetaTensor& input,
const std::vector<int>& shape,
int input_dim_idx,
int output_dim_idx,
float min,
float max,
int seed,
int diag_num,
int diag_step,
float diag_val,
DataType dtype,
MetaTensor* out,
MetaConfig config) {
// const std::vector<int64_t>& shape_data = shape.GetData();
// std::vector<int> shape_data_new;
// for (unsigned int i = 0; i < shape_data.size(); i++) {
// shape_data_new.push_back(static_cast<int>(shape_data[i]));
// }
phi::BatchSizeLikeInferMeta(input, shape, input_dim_idx, output_dim_idx, out);
}

void UniqueConsecutiveInferMeta(const MetaTensor& x,
bool return_inverse,
bool return_counts,
Expand Down
14 changes: 14 additions & 0 deletions paddle/phi/infermeta/unary.h
Original file line number Diff line number Diff line change
Expand Up @@ -833,6 +833,20 @@ void UniformRandomInplaceInferMeta(const MetaTensor& x,
float diag_val,
MetaTensor* out);

void UniformRandomBatchSizeLikeInferMeta(const MetaTensor& input,
const std::vector<int>& shape,
int input_dim_idx,
int output_dim_idx,
float min,
float max,
int seed,
int diag_num,
int diag_step,
float diag_val,
DataType dtype,
MetaTensor* out,
MetaConfig config = MetaConfig());

void UniqueConsecutiveInferMeta(const MetaTensor& x,
bool return_inverse,
bool return_counts,
Expand Down
74 changes: 74 additions & 0 deletions paddle/phi/kernels/cpu/uniform_random_batch_size_like_kernel.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
// Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/phi/kernels/funcs/uniform_random_functor.h"

#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"

namespace phi {

// It seems that Eigen::Tensor::random in GPU will SEGFAULT.
// Use std::random and thrust::random(thrust is a std library in CUDA) to
// implement uniform random.
template <typename T, typename Context>
void CPUUniformRandomKernel(const Context& dev_ctx,
const DenseTensor& input,
const std::vector<int>& shape,
int input_dim_idx,
int output_dim_idx,
float min,
float max,
int seed,
int diag_num,
int diag_step,
float diag_val,
DataType dtype,
DenseTensor* out) {
out->Resize(common::make_ddim(shape));
T* data = dev_ctx.template Alloc<T>(out);
int64_t size = out->numel();
phi::funcs::UniformRealDistribution<T>(
data, size, min, max, static_cast<unsigned int>(seed));

unsigned int diag_num_tmp = static_cast<unsigned int>(diag_num);
unsigned int diag_step_tmp = static_cast<unsigned int>(diag_step);
auto diag_val_tmp = static_cast<T>(diag_val);
if (diag_num_tmp > 0) {
PADDLE_ENFORCE_GT(
size,
(diag_num_tmp - 1) * (diag_step_tmp + 1),
phi::errors::InvalidArgument(
"ShapeInvalid: the diagonal's elements is equal (num-1) "
"* (step-1) with num %d, step %d,"
"It should be smaller than %d, but received %d",
diag_num_tmp,
diag_step_tmp,
(diag_num_tmp - 1) * (diag_step_tmp + 1),
size));
for (int64_t i = 0; i < diag_num_tmp; ++i) {
int64_t pos = i * diag_step_tmp + i;
data[pos] = diag_val_tmp;
}
}
}
} // namespace phi

PD_REGISTER_KERNEL(uniform_random_batch_size_like,
CPU,
ALL_LAYOUT,
phi::CPUUniformRandomKernel,
float,
double,
phi::dtype::bfloat16) {}
Loading

0 comments on commit 7a68d53

Please sign in to comment.