Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

remove some [-Wunused-paramter]warning #53681

Merged
merged 10 commits into from
May 15, 2023
2 changes: 1 addition & 1 deletion paddle/fluid/operators/collective/send_v2_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ namespace operators {
template <typename T, typename DeviceContext>
class SendOpV2CPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
void Compute(const framework::ExecutionContext& ctx UNUSED) const override {
PADDLE_THROW(platform::errors::Unavailable(
"Do not support send for cpu kernel now."));
}
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/operators/fused/mkldnn/fusion_gru_mkldnn_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ class GRUMKLDNNHandler : public RNNMKLDNNHandler<T, dnnl::gru_forward, T_out> {
GRUMKLDNNHandler(const paddle::framework::ExecutionContext& ctx,
const OneDNNContext& dev_ctx,
const dnnl::engine onednn_engine,
platform::Place cpu_place,
platform::Place cpu_place UNUSED,
const phi::DenseTensor* input,
const phi::DenseTensor* weight_h,
const phi::DenseTensor* h0,
Expand All @@ -42,7 +42,7 @@ class GRUMKLDNNHandler : public RNNMKLDNNHandler<T, dnnl::gru_forward, T_out> {
const int64_t Ti,
const int64_t IC,
const int64_t OC,
const std::string& unique_name)
const std::string& unique_name UNUSED)
: RNNMKLDNNHandler<T, dnnl::gru_forward, T_out>(
ctx,
dev_ctx,
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/operators/fused/mkldnn/fusion_lstm_mkldnn_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -33,17 +33,17 @@ class LSTMMKLDNNHandler
LSTMMKLDNNHandler(const paddle::framework::ExecutionContext& ctx,
const OneDNNContext& dev_ctx,
const dnnl::engine onednn_engine,
platform::Place cpu_place,
platform::Place cpu_place UNUSED,
const phi::DenseTensor* input,
const phi::DenseTensor* weight_h,
const phi::DenseTensor* h0,
const phi::DenseTensor* c0,
const phi::DenseTensor* c0 UNUSED,
const bool is_reverse,
const int64_t N,
const int64_t Ti,
const int64_t IC,
const int64_t OC,
const std::string& unique_name)
const std::string& unique_name UNUSED)
: RNNMKLDNNHandler<T, dnnl::lstm_forward, T_out>(
ctx,
dev_ctx,
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/memcpy_h2d_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ class MemcpyH2DFunctor {
}

template <typename T>
void operator()(const T &v) const {
void operator()(const T &v UNUSED) const {
PADDLE_ENFORCE_EQ(
true,
false,
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/memcpy_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ class MemcpyFunctor {
}

template <typename T>
void operator()(const T &v) const {
void operator()(const T &v UNUSED) const {
PADDLE_ENFORCE_EQ(
true,
false,
Expand Down
10 changes: 5 additions & 5 deletions paddle/phi/core/tensor_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -318,11 +318,11 @@ void Copy(const Context& dev_ctx,
}

template <typename Context>
void Copy(const Context& dev_ctx,
const TensorArray& src,
Place dst_place,
bool blocking,
TensorArray* dst) {
void Copy(const Context& dev_ctx UNUSED,
const TensorArray& src UNUSED,
Place dst_place UNUSED,
bool blocking UNUSED,
TensorArray* dst UNUSED) {
// NOTE(Ruibiao): implements Copy() for TensorArray when needed.
PADDLE_THROW(errors::Unimplemented("Copy for TensorArray is unimplemented."));
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/cpu/diag_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ namespace phi {

template <typename T, typename Context>
void DiagGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& x UNUSED,
const DenseTensor& out_grad,
int offset,
DenseTensor* x_grad) {
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/cpu/fill_diagonal_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ namespace phi {
template <typename T, typename Context>
void FillDiagonalGradKernel(const Context& ctx,
const DenseTensor& out_grad,
float value,
float value UNUSED,
int offset,
bool wrap,
DenseTensor* x_grad) {
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/cpu/gather_nd_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ namespace phi {

template <typename T, typename Context>
void GatherNdGradKernel(const Context &ctx,
const DenseTensor &x,
const DenseTensor &x UNUSED,
const DenseTensor &index,
const DenseTensor &out_grad,
DenseTensor *x_grad) {
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/cpu/index_add_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ namespace phi {
template <typename T, typename Context>
void IndexAddGradKernel(const Context& ctx,
const DenseTensor& index,
const DenseTensor& add_value,
const DenseTensor& add_value UNUSED,
const DenseTensor& out_grad,
int axis,
DenseTensor* x_grad,
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/cpu/index_select_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ namespace phi {

template <typename T, typename Context>
void IndexSelectGradKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& x UNUSED,
const DenseTensor& index,
const DenseTensor& out_grad,
int dim,
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/cpu/mean_all_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ namespace phi {

template <typename T, typename Context>
void MeanAllGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& x UNUSED,
const DenseTensor& out_grad,
DenseTensor* x_grad) {
PADDLE_ENFORCE_EQ(out_grad.numel(),
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/kernels/cpu/put_along_axis_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -25,11 +25,11 @@ namespace phi {

template <typename T, typename Context>
void PutAlongAxisGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& x UNUSED,
const DenseTensor& index,
const DenseTensor& out_grad,
int axis,
const std::string& reduce,
const std::string& reduce UNUSED,
DenseTensor* x_grad,
DenseTensor* value_grad) {
PADDLE_ENFORCE_EQ(
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/cpu/scatter_nd_add_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ namespace phi {
template <typename T, typename Context>
void ScatterNdAddGradKernel(const Context &ctx,
const DenseTensor &index,
const DenseTensor &updates,
const DenseTensor &updates UNUSED,
const DenseTensor &out_grad,
DenseTensor *x_grad,
DenseTensor *updates_grad) {
Expand Down
30 changes: 20 additions & 10 deletions paddle/phi/kernels/impl/elementwise_grad_kernel_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -362,13 +362,18 @@ void ElementwiseFMinGradKernel(const Context& dev_ctx,

template <typename T>
struct MulGradDX {
HOSTDEVICE T operator()(T x, T y, T out, T dout) const { return dout * y; }
HOSTDEVICE T operator()(T x UNUSED, T y, T out UNUSED, T dout) const {
return dout * y;
}
};

// avoid [-Wint-in-bool-context] warning
template <>
struct MulGradDX<bool> {
HOSTDEVICE bool operator()(bool x, bool y, bool out, bool dout) const {
HOSTDEVICE bool operator()(bool x UNUSED,
bool y,
bool out UNUSED,
bool dout) const {
return dout && y;
}
};
Expand All @@ -393,13 +398,18 @@ struct MulGradDX<phi::dtype::complex<T>> {

template <typename T>
struct MulGradDY {
HOSTDEVICE T operator()(T x, T y, T out, T dout) const { return dout * x; }
HOSTDEVICE T operator()(T x, T y UNUSED, T out UNUSED, T dout) const {
return dout * x;
}
};

// avoid [-Wint-in-bool-context] warning
template <>
struct MulGradDY<bool> {
HOSTDEVICE bool operator()(bool x, bool y, bool out, bool dout) const {
HOSTDEVICE bool operator()(bool x,
bool y UNUSED,
bool out UNUSED,
bool dout) const {
return dout && x;
}
};
Expand Down Expand Up @@ -824,14 +834,14 @@ void MultiplyTripleGradKernel(const Context& dev_ctx,

template <typename T>
struct MaxGradDx {
HOSTDEVICE T operator()(T x, T y, T out, T dout) const {
HOSTDEVICE T operator()(T x, T y, T out UNUSED, T dout) const {
return dout * static_cast<T>(x > y);
}
};

template <typename T>
struct MaxGradDy {
HOSTDEVICE T operator()(T x, T y, T out, T dout) const {
HOSTDEVICE T operator()(T x, T y, T out UNUSED, T dout) const {
return dout * static_cast<T>(x <= y);
}
};
Expand All @@ -843,14 +853,14 @@ struct MaxGradDy {
*/
template <typename T>
struct MinGradDx {
HOSTDEVICE T operator()(T x, T y, T out, T dout) const {
HOSTDEVICE T operator()(T x, T y, T out UNUSED, T dout) const {
return dout * static_cast<T>(x < y);
}
};

template <typename T>
struct MinGradDy {
HOSTDEVICE T operator()(T x, T y, T out, T dout) const {
HOSTDEVICE T operator()(T x, T y, T out UNUSED, T dout) const {
return dout * static_cast<T>(x >= y);
}
};
Expand Down Expand Up @@ -922,14 +932,14 @@ compute_pow_grad_dy(T x, T y, T out, T dout) {
}
#else
template <typename T, typename MPType>
HOSTDEVICE T compute_pow_grad_dx(T x, T y, T out, T dout) {
HOSTDEVICE T compute_pow_grad_dx(T x, T y, T out UNUSED, T dout) {
MPType x_val = static_cast<MPType>(x);
MPType y_val = static_cast<MPType>(y);
return static_cast<T>(static_cast<MPType>(dout) * y_val *
std::pow(x_val, y_val - 1));
}
template <typename T, typename MPType>
HOSTDEVICE T compute_pow_grad_dy(T x, T y, T out, T dout) {
HOSTDEVICE T compute_pow_grad_dy(T x, T y, T out UNUSED, T dout) {
MPType x_val = static_cast<MPType>(x);
MPType y_val = static_cast<MPType>(y);
return static_cast<T>(static_cast<MPType>(dout) * std::log(x_val) *
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/impl/eye_kernel_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ template <typename T, typename Context>
void EyeKernel(const Context& ctx,
const Scalar& num_rows,
const Scalar& num_columns,
DataType dtype,
DataType dtype UNUSED,
DenseTensor* out) {
auto columns = num_columns.to<int64_t>();
auto rows = num_rows.to<int64_t>();
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/impl/fill_kernel_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ namespace phi {

template <typename T, typename Context>
void FillKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& x UNUSED,
const Scalar& value,
DenseTensor* out) {
T fill_var = value.to<T>();
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/kernels/onednn/interpolate_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -239,8 +239,8 @@ void NearestInterpKernel(
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
bool align_corners UNUSED,
int align_mode UNUSED,
DenseTensor* output) {
InterpolateKernel<T, Context>(ctx,
x,
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/unsqueeze_kernel.h
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ void Unsqueeze(const Context& dev_ctx,
const DenseTensor& x,
const IntArray& axes,
DenseTensor* out,
DenseTensor* xshape) {
DenseTensor* xshape UNUSED) {
MetaTensor meta_out(out);
UnsqueezeInferMeta(x, axes, &meta_out);
UnsqueezeInferKernel<T, Context>(dev_ctx, x, axes, out);
Expand Down