diff --git a/paddle/fluid/operators/collective/send_v2_op.h b/paddle/fluid/operators/collective/send_v2_op.h index 047796dfe2430..7f51861008942 100644 --- a/paddle/fluid/operators/collective/send_v2_op.h +++ b/paddle/fluid/operators/collective/send_v2_op.h @@ -28,7 +28,7 @@ namespace operators { template class SendOpV2CPUKernel : public framework::OpKernel { public: - void Compute(const framework::ExecutionContext& ctx) const override { + void Compute(const framework::ExecutionContext& ctx UNUSED) const override { PADDLE_THROW(platform::errors::Unavailable( "Do not support send for cpu kernel now.")); } diff --git a/paddle/fluid/operators/fused/mkldnn/fusion_gru_mkldnn_op.cc b/paddle/fluid/operators/fused/mkldnn/fusion_gru_mkldnn_op.cc index 806c883228035..05d1e64f92ae7 100644 --- a/paddle/fluid/operators/fused/mkldnn/fusion_gru_mkldnn_op.cc +++ b/paddle/fluid/operators/fused/mkldnn/fusion_gru_mkldnn_op.cc @@ -33,7 +33,7 @@ class GRUMKLDNNHandler : public RNNMKLDNNHandler { GRUMKLDNNHandler(const paddle::framework::ExecutionContext& ctx, const OneDNNContext& dev_ctx, const dnnl::engine onednn_engine, - platform::Place cpu_place, + platform::Place cpu_place UNUSED, const phi::DenseTensor* input, const phi::DenseTensor* weight_h, const phi::DenseTensor* h0, @@ -42,7 +42,7 @@ class GRUMKLDNNHandler : public RNNMKLDNNHandler { const int64_t Ti, const int64_t IC, const int64_t OC, - const std::string& unique_name) + const std::string& unique_name UNUSED) : RNNMKLDNNHandler( ctx, dev_ctx, diff --git a/paddle/fluid/operators/fused/mkldnn/fusion_lstm_mkldnn_op.cc b/paddle/fluid/operators/fused/mkldnn/fusion_lstm_mkldnn_op.cc index bda5eab2d725f..d973c5e89a626 100644 --- a/paddle/fluid/operators/fused/mkldnn/fusion_lstm_mkldnn_op.cc +++ b/paddle/fluid/operators/fused/mkldnn/fusion_lstm_mkldnn_op.cc @@ -33,17 +33,17 @@ class LSTMMKLDNNHandler LSTMMKLDNNHandler(const paddle::framework::ExecutionContext& ctx, const OneDNNContext& dev_ctx, const dnnl::engine onednn_engine, - platform::Place cpu_place, + platform::Place cpu_place UNUSED, const phi::DenseTensor* input, const phi::DenseTensor* weight_h, const phi::DenseTensor* h0, - const phi::DenseTensor* c0, + const phi::DenseTensor* c0 UNUSED, const bool is_reverse, const int64_t N, const int64_t Ti, const int64_t IC, const int64_t OC, - const std::string& unique_name) + const std::string& unique_name UNUSED) : RNNMKLDNNHandler( ctx, dev_ctx, diff --git a/paddle/fluid/operators/memcpy_h2d_op.h b/paddle/fluid/operators/memcpy_h2d_op.h index 9946e80bbc30f..5f480461d77cd 100644 --- a/paddle/fluid/operators/memcpy_h2d_op.h +++ b/paddle/fluid/operators/memcpy_h2d_op.h @@ -66,7 +66,7 @@ class MemcpyH2DFunctor { } template - void operator()(const T &v) const { + void operator()(const T &v UNUSED) const { PADDLE_ENFORCE_EQ( true, false, diff --git a/paddle/fluid/operators/memcpy_op.h b/paddle/fluid/operators/memcpy_op.h index 3c989b151a315..1cb077ee8a4b9 100644 --- a/paddle/fluid/operators/memcpy_op.h +++ b/paddle/fluid/operators/memcpy_op.h @@ -81,7 +81,7 @@ class MemcpyFunctor { } template - void operator()(const T &v) const { + void operator()(const T &v UNUSED) const { PADDLE_ENFORCE_EQ( true, false, diff --git a/paddle/phi/core/tensor_utils.cc b/paddle/phi/core/tensor_utils.cc index c73d6dc369bb8..abe44d3e2550b 100644 --- a/paddle/phi/core/tensor_utils.cc +++ b/paddle/phi/core/tensor_utils.cc @@ -318,11 +318,11 @@ void Copy(const Context& dev_ctx, } template -void Copy(const Context& dev_ctx, - const TensorArray& src, - Place dst_place, - bool blocking, - TensorArray* dst) { +void Copy(const Context& dev_ctx UNUSED, + const TensorArray& src UNUSED, + Place dst_place UNUSED, + bool blocking UNUSED, + TensorArray* dst UNUSED) { // NOTE(Ruibiao): implements Copy() for TensorArray when needed. PADDLE_THROW(errors::Unimplemented("Copy for TensorArray is unimplemented.")); } diff --git a/paddle/phi/kernels/cpu/diag_grad_kernel.cc b/paddle/phi/kernels/cpu/diag_grad_kernel.cc index 13d3d679ff006..5a2f15d11428a 100644 --- a/paddle/phi/kernels/cpu/diag_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/diag_grad_kernel.cc @@ -23,7 +23,7 @@ namespace phi { template void DiagGradKernel(const Context& dev_ctx, - const DenseTensor& x, + const DenseTensor& x UNUSED, const DenseTensor& out_grad, int offset, DenseTensor* x_grad) { diff --git a/paddle/phi/kernels/cpu/fill_diagonal_grad_kernel.cc b/paddle/phi/kernels/cpu/fill_diagonal_grad_kernel.cc index 0fe9c50dc15e8..204c544e2d95f 100644 --- a/paddle/phi/kernels/cpu/fill_diagonal_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/fill_diagonal_grad_kernel.cc @@ -22,7 +22,7 @@ namespace phi { template void FillDiagonalGradKernel(const Context& ctx, const DenseTensor& out_grad, - float value, + float value UNUSED, int offset, bool wrap, DenseTensor* x_grad) { diff --git a/paddle/phi/kernels/cpu/gather_nd_grad_kernel.cc b/paddle/phi/kernels/cpu/gather_nd_grad_kernel.cc index 88a288afd318e..5aaec6f6139e5 100644 --- a/paddle/phi/kernels/cpu/gather_nd_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/gather_nd_grad_kernel.cc @@ -23,7 +23,7 @@ namespace phi { template void GatherNdGradKernel(const Context &ctx, - const DenseTensor &x, + const DenseTensor &x UNUSED, const DenseTensor &index, const DenseTensor &out_grad, DenseTensor *x_grad) { diff --git a/paddle/phi/kernels/cpu/index_add_grad_kernel.cc b/paddle/phi/kernels/cpu/index_add_grad_kernel.cc index 007d8927377f8..a60d52f2005a4 100644 --- a/paddle/phi/kernels/cpu/index_add_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/index_add_grad_kernel.cc @@ -23,7 +23,7 @@ namespace phi { template void IndexAddGradKernel(const Context& ctx, const DenseTensor& index, - const DenseTensor& add_value, + const DenseTensor& add_value UNUSED, const DenseTensor& out_grad, int axis, DenseTensor* x_grad, diff --git a/paddle/phi/kernels/cpu/index_select_grad_kernel.cc b/paddle/phi/kernels/cpu/index_select_grad_kernel.cc index cf8176687eab2..fc2e88b2f9906 100644 --- a/paddle/phi/kernels/cpu/index_select_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/index_select_grad_kernel.cc @@ -22,7 +22,7 @@ namespace phi { template void IndexSelectGradKernel(const Context& ctx, - const DenseTensor& x, + const DenseTensor& x UNUSED, const DenseTensor& index, const DenseTensor& out_grad, int dim, diff --git a/paddle/phi/kernels/cpu/mean_all_grad_kernel.cc b/paddle/phi/kernels/cpu/mean_all_grad_kernel.cc index 00321b010598f..81556643284f0 100644 --- a/paddle/phi/kernels/cpu/mean_all_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/mean_all_grad_kernel.cc @@ -21,7 +21,7 @@ namespace phi { template void MeanAllGradKernel(const Context& dev_ctx, - const DenseTensor& x, + const DenseTensor& x UNUSED, const DenseTensor& out_grad, DenseTensor* x_grad) { PADDLE_ENFORCE_EQ(out_grad.numel(), diff --git a/paddle/phi/kernels/cpu/put_along_axis_grad_kernel.cc b/paddle/phi/kernels/cpu/put_along_axis_grad_kernel.cc index 5dce970f32ef0..d44af05357a9a 100644 --- a/paddle/phi/kernels/cpu/put_along_axis_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/put_along_axis_grad_kernel.cc @@ -25,11 +25,11 @@ namespace phi { template void PutAlongAxisGradKernel(const Context& dev_ctx, - const DenseTensor& x, + const DenseTensor& x UNUSED, const DenseTensor& index, const DenseTensor& out_grad, int axis, - const std::string& reduce, + const std::string& reduce UNUSED, DenseTensor* x_grad, DenseTensor* value_grad) { PADDLE_ENFORCE_EQ( diff --git a/paddle/phi/kernels/cpu/scatter_nd_add_grad_kernel.cc b/paddle/phi/kernels/cpu/scatter_nd_add_grad_kernel.cc index 844e6370caf73..d8009bc85881c 100644 --- a/paddle/phi/kernels/cpu/scatter_nd_add_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/scatter_nd_add_grad_kernel.cc @@ -24,7 +24,7 @@ namespace phi { template void ScatterNdAddGradKernel(const Context &ctx, const DenseTensor &index, - const DenseTensor &updates, + const DenseTensor &updates UNUSED, const DenseTensor &out_grad, DenseTensor *x_grad, DenseTensor *updates_grad) { diff --git a/paddle/phi/kernels/impl/elementwise_grad_kernel_impl.h b/paddle/phi/kernels/impl/elementwise_grad_kernel_impl.h index 15f99a58fa5a5..e4a29716e7db4 100644 --- a/paddle/phi/kernels/impl/elementwise_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/elementwise_grad_kernel_impl.h @@ -362,13 +362,18 @@ void ElementwiseFMinGradKernel(const Context& dev_ctx, template struct MulGradDX { - HOSTDEVICE T operator()(T x, T y, T out, T dout) const { return dout * y; } + HOSTDEVICE T operator()(T x UNUSED, T y, T out UNUSED, T dout) const { + return dout * y; + } }; // avoid [-Wint-in-bool-context] warning template <> struct MulGradDX { - HOSTDEVICE bool operator()(bool x, bool y, bool out, bool dout) const { + HOSTDEVICE bool operator()(bool x UNUSED, + bool y, + bool out UNUSED, + bool dout) const { return dout && y; } }; @@ -393,13 +398,18 @@ struct MulGradDX> { template struct MulGradDY { - HOSTDEVICE T operator()(T x, T y, T out, T dout) const { return dout * x; } + HOSTDEVICE T operator()(T x, T y UNUSED, T out UNUSED, T dout) const { + return dout * x; + } }; // avoid [-Wint-in-bool-context] warning template <> struct MulGradDY { - HOSTDEVICE bool operator()(bool x, bool y, bool out, bool dout) const { + HOSTDEVICE bool operator()(bool x, + bool y UNUSED, + bool out UNUSED, + bool dout) const { return dout && x; } }; @@ -824,14 +834,14 @@ void MultiplyTripleGradKernel(const Context& dev_ctx, template struct MaxGradDx { - HOSTDEVICE T operator()(T x, T y, T out, T dout) const { + HOSTDEVICE T operator()(T x, T y, T out UNUSED, T dout) const { return dout * static_cast(x > y); } }; template struct MaxGradDy { - HOSTDEVICE T operator()(T x, T y, T out, T dout) const { + HOSTDEVICE T operator()(T x, T y, T out UNUSED, T dout) const { return dout * static_cast(x <= y); } }; @@ -843,14 +853,14 @@ struct MaxGradDy { */ template struct MinGradDx { - HOSTDEVICE T operator()(T x, T y, T out, T dout) const { + HOSTDEVICE T operator()(T x, T y, T out UNUSED, T dout) const { return dout * static_cast(x < y); } }; template struct MinGradDy { - HOSTDEVICE T operator()(T x, T y, T out, T dout) const { + HOSTDEVICE T operator()(T x, T y, T out UNUSED, T dout) const { return dout * static_cast(x >= y); } }; @@ -922,14 +932,14 @@ compute_pow_grad_dy(T x, T y, T out, T dout) { } #else template -HOSTDEVICE T compute_pow_grad_dx(T x, T y, T out, T dout) { +HOSTDEVICE T compute_pow_grad_dx(T x, T y, T out UNUSED, T dout) { MPType x_val = static_cast(x); MPType y_val = static_cast(y); return static_cast(static_cast(dout) * y_val * std::pow(x_val, y_val - 1)); } template -HOSTDEVICE T compute_pow_grad_dy(T x, T y, T out, T dout) { +HOSTDEVICE T compute_pow_grad_dy(T x, T y, T out UNUSED, T dout) { MPType x_val = static_cast(x); MPType y_val = static_cast(y); return static_cast(static_cast(dout) * std::log(x_val) * diff --git a/paddle/phi/kernels/impl/eye_kernel_impl.h b/paddle/phi/kernels/impl/eye_kernel_impl.h index 2d373f99a277a..c93ac260dd874 100644 --- a/paddle/phi/kernels/impl/eye_kernel_impl.h +++ b/paddle/phi/kernels/impl/eye_kernel_impl.h @@ -37,7 +37,7 @@ template void EyeKernel(const Context& ctx, const Scalar& num_rows, const Scalar& num_columns, - DataType dtype, + DataType dtype UNUSED, DenseTensor* out) { auto columns = num_columns.to(); auto rows = num_rows.to(); diff --git a/paddle/phi/kernels/impl/fill_kernel_impl.h b/paddle/phi/kernels/impl/fill_kernel_impl.h index 7d10ea42bd6b6..6894204cd06a4 100644 --- a/paddle/phi/kernels/impl/fill_kernel_impl.h +++ b/paddle/phi/kernels/impl/fill_kernel_impl.h @@ -24,7 +24,7 @@ namespace phi { template void FillKernel(const Context& dev_ctx, - const DenseTensor& x, + const DenseTensor& x UNUSED, const Scalar& value, DenseTensor* out) { T fill_var = value.to(); diff --git a/paddle/phi/kernels/onednn/interpolate_kernel.cc b/paddle/phi/kernels/onednn/interpolate_kernel.cc index b9a8672d792ce..508cdcc329a2e 100644 --- a/paddle/phi/kernels/onednn/interpolate_kernel.cc +++ b/paddle/phi/kernels/onednn/interpolate_kernel.cc @@ -239,8 +239,8 @@ void NearestInterpKernel( int out_w, const std::vector& scale, const std::string& interp_method, - bool align_corners, - int align_mode, + bool align_corners UNUSED, + int align_mode UNUSED, DenseTensor* output) { InterpolateKernel(ctx, x, diff --git a/paddle/phi/kernels/unsqueeze_kernel.h b/paddle/phi/kernels/unsqueeze_kernel.h index bb190e14179c0..e6ad4dc75d3d5 100644 --- a/paddle/phi/kernels/unsqueeze_kernel.h +++ b/paddle/phi/kernels/unsqueeze_kernel.h @@ -39,7 +39,7 @@ void Unsqueeze(const Context& dev_ctx, const DenseTensor& x, const IntArray& axes, DenseTensor* out, - DenseTensor* xshape) { + DenseTensor* xshape UNUSED) { MetaTensor meta_out(out); UnsqueezeInferMeta(x, axes, &meta_out); UnsqueezeInferKernel(dev_ctx, x, axes, out);