From c6559bd95aa1d8d3efc8aef9e780771fc5627ce5 Mon Sep 17 00:00:00 2001 From: Yangrl <2535184404@qq.com> Date: Fri, 5 May 2023 11:09:16 +0000 Subject: [PATCH 1/6] test,test=develop --- paddle/fluid/framework/CMakeLists.txt | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/paddle/fluid/framework/CMakeLists.txt b/paddle/fluid/framework/CMakeLists.txt index a96cb146f6833..b95d996b88a94 100755 --- a/paddle/fluid/framework/CMakeLists.txt +++ b/paddle/fluid/framework/CMakeLists.txt @@ -928,9 +928,7 @@ if(WITH_DISTRIBUTE) fleet_executor) endif() elseif(WITH_PSLIB) - set(DISTRIBUTE_COMPILE_FLAGS - "-Wno-non-virtual-dtor -Wno-error=non-virtual-dtor -Wno-error=delete-non-virtual-dtor" - ) + set(DISTRIBUTE_COMPILE_FLAGS "") if(CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 7.0) set(DISTRIBUTE_COMPILE_FLAGS "${DISTRIBUTE_COMPILE_FLAGS} -faligned-new") endif() From f06f0fd69262b7079d722cb3edec1d08cdf011db Mon Sep 17 00:00:00 2001 From: Yangrl <2535184404@qq.com> Date: Fri, 5 May 2023 11:42:07 +0000 Subject: [PATCH 2/6] test,test=develop --- .../fluid/distributed/ps/table/common_graph_table.h | 11 ++++++----- paddle/phi/backends/onednn/onednn_reuse.h | 2 +- paddle/phi/core/utils/unroll_array_ops.h | 5 +++-- paddle/phi/kernels/complex_kernel.h | 2 +- paddle/phi/kernels/cpu/cross_entropy_grad_kernel.cc | 2 +- paddle/phi/kernels/cpu/full_kernel.cc | 6 +++--- paddle/phi/kernels/cpu/graph_send_recv_funcs.h | 2 +- paddle/phi/kernels/full_kernel.cc | 2 +- paddle/phi/kernels/funcs/gather_scatter_functor.cc | 4 ++-- paddle/phi/kernels/funcs/math_function.cc | 2 +- paddle/phi/kernels/funcs/reduce_functor.h | 2 +- paddle/phi/kernels/funcs/segment_pooling.cc | 4 ++-- paddle/phi/kernels/funcs/unique_functor.h | 2 +- paddle/phi/kernels/impl/gumbel_softmax_kernel_impl.h | 2 +- paddle/phi/kernels/impl/lerp_grad_kernel_impl.h | 4 ++-- paddle/phi/kernels/reverse_kernel.cc | 2 +- paddle/phi/kernels/sparse/cpu/full_kernel.cc | 2 +- paddle/phi/kernels/sparse/sparse_utils_kernel.h | 4 ++-- 18 files changed, 31 insertions(+), 29 deletions(-) diff --git a/paddle/fluid/distributed/ps/table/common_graph_table.h b/paddle/fluid/distributed/ps/table/common_graph_table.h index f5288a3f90b12..142327368281a 100644 --- a/paddle/fluid/distributed/ps/table/common_graph_table.h +++ b/paddle/fluid/distributed/ps/table/common_graph_table.h @@ -511,7 +511,7 @@ class GraphTable : public Table { } virtual ~GraphTable(); - virtual void *GetShard(size_t shard_idx) { return 0; } + virtual void *GetShard(size_t shard_idx UNUSED) { return 0; } static int32_t sparse_local_shard_num(uint32_t shard_num, uint32_t server_num) { @@ -624,15 +624,16 @@ class GraphTable : public Table { Node *find_node(GraphTableType table_type, int idx, uint64_t id); Node *find_node(GraphTableType table_type, uint64_t id); - virtual int32_t Pull(TableContext &context) { return 0; } // NOLINT - virtual int32_t Push(TableContext &context) { return 0; } // NOLINT + virtual int32_t Pull(TableContext &context UNUSED) { return 0; } // NOLINT + virtual int32_t Push(TableContext &context UNUSED) { return 0; } // NOLINT virtual int32_t clear_nodes(GraphTableType table_type, int idx); virtual void Clear() {} virtual int32_t Flush() { return 0; } - virtual int32_t Shrink(const std::string ¶m) { return 0; } + virtual int32_t Shrink(const std::string ¶m UNUSED) { return 0; } // 指定保存路径 - virtual int32_t Save(const std::string &path, const std::string &converter) { + virtual int32_t Save(const std::string &path UNUSED, + const std::string &converter UNUSED) { return 0; } virtual int32_t InitializeShard() { return 0; } diff --git a/paddle/phi/backends/onednn/onednn_reuse.h b/paddle/phi/backends/onednn/onednn_reuse.h index c7dcdea0e487c..463c55a7c5317 100644 --- a/paddle/phi/backends/onednn/onednn_reuse.h +++ b/paddle/phi/backends/onednn/onednn_reuse.h @@ -1178,7 +1178,7 @@ class ReductionOneDNNHandler const dnnl::engine engine, Place cpu_place, const DenseTensor* x, - const DenseTensor* out, + const DenseTensor* out UNUSED, std::vector out_tz, const dnnl::primitive_attr& attrs = NULL) : OneDNNHandlerNoCachingT(engine, cpu_place) { diff --git a/paddle/phi/core/utils/unroll_array_ops.h b/paddle/phi/core/utils/unroll_array_ops.h index 2e1d84080af48..665c2ae512829 100644 --- a/paddle/phi/core/utils/unroll_array_ops.h +++ b/paddle/phi/core/utils/unroll_array_ops.h @@ -87,7 +87,8 @@ struct UnrollCompare { template struct UnrollCompare { template - HOSTDEVICE inline constexpr static bool Run(const T *d1, const T *d2) { + HOSTDEVICE inline constexpr static bool Run(const T *d1 UNUSED, + const T *d2 UNUSED) { return true; } }; @@ -104,7 +105,7 @@ struct UnrollProduct { template struct UnrollProduct { template - HOSTDEVICE inline constexpr static T Run(const T *d) { + HOSTDEVICE inline constexpr static T Run(const T *d UNUSED) { return 1; } }; diff --git a/paddle/phi/kernels/complex_kernel.h b/paddle/phi/kernels/complex_kernel.h index ad66b890b3d5a..4f1a1c40402de 100644 --- a/paddle/phi/kernels/complex_kernel.h +++ b/paddle/phi/kernels/complex_kernel.h @@ -58,7 +58,7 @@ template < std::enable_if_t>::value && !std::is_same>::value, bool> = true> -DenseTensor Conj(const Context& dev_ctx, const DenseTensor& x) { +DenseTensor Conj(const Context& dev_ctx UNUSED, const DenseTensor& x) { return x; } diff --git a/paddle/phi/kernels/cpu/cross_entropy_grad_kernel.cc b/paddle/phi/kernels/cpu/cross_entropy_grad_kernel.cc index 1499f01002b6b..eacf1ad0cf3ad 100644 --- a/paddle/phi/kernels/cpu/cross_entropy_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/cross_entropy_grad_kernel.cc @@ -30,7 +30,7 @@ void CrossEntropyWithSoftmaxGradCPUKernel(const CPUContext& dev_ctx, const DenseTensor& loss_grad, bool soft_label, bool use_softmax, - bool numeric_stable_mode, + bool numeric_stable_mode UNUSED, int ignore_index, int axis, DenseTensor* logits_grad) { diff --git a/paddle/phi/kernels/cpu/full_kernel.cc b/paddle/phi/kernels/cpu/full_kernel.cc index d9ab771664a8f..a295bfcc20c3a 100644 --- a/paddle/phi/kernels/cpu/full_kernel.cc +++ b/paddle/phi/kernels/cpu/full_kernel.cc @@ -32,7 +32,7 @@ template void FullKernel(const Context& dev_ctx, const IntArray& shape, const Scalar& val, - DataType dtype, + DataType dtype UNUSED, DenseTensor* out) { out->Resize(phi::make_ddim(shape.GetData())); FullValue(dev_ctx, out, val.to()); @@ -40,9 +40,9 @@ void FullKernel(const Context& dev_ctx, template void FullLikeKernel(const Context& dev_ctx, - const DenseTensor& x, + const DenseTensor& x UNUSED, const Scalar& val, - DataType dtype, + DataType dtype UNUSED, DenseTensor* out) { auto value = val.to(); using CommonType = typename std::common_type< diff --git a/paddle/phi/kernels/cpu/graph_send_recv_funcs.h b/paddle/phi/kernels/cpu/graph_send_recv_funcs.h index df6d9c87be0ed..c67480cc9e33e 100644 --- a/paddle/phi/kernels/cpu/graph_send_recv_funcs.h +++ b/paddle/phi/kernels/cpu/graph_send_recv_funcs.h @@ -25,7 +25,7 @@ namespace phi { template struct GraphSendRecvSumFunctor { - void operator()(const bool& first_flag, + void operator()(const bool& first_flag UNUSED, const DenseTensor& src_slice, DenseTensor* dst_slice) { auto eigen_src = phi::EigenVector::Flatten(src_slice); diff --git a/paddle/phi/kernels/full_kernel.cc b/paddle/phi/kernels/full_kernel.cc index ce898210633b7..38beafbfa51b9 100644 --- a/paddle/phi/kernels/full_kernel.cc +++ b/paddle/phi/kernels/full_kernel.cc @@ -21,7 +21,7 @@ namespace phi { template void FullBatchSizeLikeKernel(const Context& dev_ctx, const DenseTensor& x, - const std::vector& shape, + const std::vector& shape UNUSED, const Scalar& val, DataType dtype, int x_batch_size_dim, diff --git a/paddle/phi/kernels/funcs/gather_scatter_functor.cc b/paddle/phi/kernels/funcs/gather_scatter_functor.cc index 57a8d679f346f..e88dbf0f7ccdb 100644 --- a/paddle/phi/kernels/funcs/gather_scatter_functor.cc +++ b/paddle/phi/kernels/funcs/gather_scatter_functor.cc @@ -181,11 +181,11 @@ void cpu_scatter_mul_kernel(phi::DenseTensor self, } template -void cpu_scatter_input_grad_kernel(phi::DenseTensor self, +void cpu_scatter_input_grad_kernel(phi::DenseTensor self UNUSED, int dim, const phi::DenseTensor& index, phi::DenseTensor output, - const phi::DeviceContext& ctx) { + const phi::DeviceContext& ctx UNUSED) { auto* index_data = index.data(); auto* output_data = output.data(); diff --git a/paddle/phi/kernels/funcs/math_function.cc b/paddle/phi/kernels/funcs/math_function.cc index 6e54718afb542..c9d0f17e6d8dc 100644 --- a/paddle/phi/kernels/funcs/math_function.cc +++ b/paddle/phi/kernels/funcs/math_function.cc @@ -92,7 +92,7 @@ DEFINE_CPU_TRANS(6); template void TransposeNormal::operator()( - const DeviceContext& context, + const DeviceContext& context UNUSED, const phi::DenseTensor& in, phi::DenseTensor* out, const std::vector& axis) { diff --git a/paddle/phi/kernels/funcs/reduce_functor.h b/paddle/phi/kernels/funcs/reduce_functor.h index 80217520b126f..e5e440d64d541 100644 --- a/paddle/phi/kernels/funcs/reduce_functor.h +++ b/paddle/phi/kernels/funcs/reduce_functor.h @@ -41,7 +41,7 @@ struct FrobeniusNormGradFunctor { DX* dx, DY* dy, const Dim& dim, - int size) { + int size UNUSED) { dx->device(place) = y->broadcast(dim); dx->device(place) = *dx + dx->constant(1e-12f); dx->device(place) = (*x / *dx) * (dy->broadcast(dim)); diff --git a/paddle/phi/kernels/funcs/segment_pooling.cc b/paddle/phi/kernels/funcs/segment_pooling.cc index c5d0944d8c190..9dd800aba1a7b 100644 --- a/paddle/phi/kernels/funcs/segment_pooling.cc +++ b/paddle/phi/kernels/funcs/segment_pooling.cc @@ -31,7 +31,7 @@ class SegmentPoolFunctor { const DenseTensor& input, const DenseTensor& segments, DenseTensor* output, - DenseTensor* index, + DenseTensor* index UNUSED, const std::string pooltype = "SUM") { const IndexT* segment_ids = segments.data(); auto curent_id = segment_ids[0]; @@ -90,7 +90,7 @@ class SegmentPoolGradFunctor { const DenseTensor& out_grad, const DenseTensor& segments, DenseTensor* in_grad, - const paddle::optional& index, + const paddle::optional& index UNUSED, const std::string pooltype = "SUM") { const IndexT* segment_ids = segments.data(); auto& place = *dev_ctx.eigen_device(); diff --git a/paddle/phi/kernels/funcs/unique_functor.h b/paddle/phi/kernels/funcs/unique_functor.h index d704d2d60fa8d..806d7cca84851 100644 --- a/paddle/phi/kernels/funcs/unique_functor.h +++ b/paddle/phi/kernels/funcs/unique_functor.h @@ -190,7 +190,7 @@ static void UniqueFlattendTensor(const Context& context, } template -static ForwardIt UniqueDimImpl(const Context& context, +static ForwardIt UniqueDimImpl(const Context& context UNUSED, ForwardIt first, ForwardIt last, const std::vector& sorted_indices_vec, diff --git a/paddle/phi/kernels/impl/gumbel_softmax_kernel_impl.h b/paddle/phi/kernels/impl/gumbel_softmax_kernel_impl.h index c2229b50deee1..184d149110e9b 100644 --- a/paddle/phi/kernels/impl/gumbel_softmax_kernel_impl.h +++ b/paddle/phi/kernels/impl/gumbel_softmax_kernel_impl.h @@ -27,7 +27,7 @@ namespace phi { template struct ArgMaxFunctor { - void operator()(const Context& ctx, + void operator()(const Context& ctx UNUSED, const DenseTensor& in, DenseTensor* index_tensor, const int64_t& axis) { diff --git a/paddle/phi/kernels/impl/lerp_grad_kernel_impl.h b/paddle/phi/kernels/impl/lerp_grad_kernel_impl.h index 541de0cc162cc..e9c54c7aca81a 100644 --- a/paddle/phi/kernels/impl/lerp_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/lerp_grad_kernel_impl.h @@ -21,8 +21,8 @@ namespace phi { template static void LerpGradFunction(const Context& ctx, - const DenseTensor& x, - const DenseTensor& y, + const DenseTensor& x UNUSED, + const DenseTensor& y UNUSED, const DenseTensor& weight, const DenseTensor& out, const DenseTensor& out_grad, diff --git a/paddle/phi/kernels/reverse_kernel.cc b/paddle/phi/kernels/reverse_kernel.cc index b2fe61ad41fc6..771acacedf024 100644 --- a/paddle/phi/kernels/reverse_kernel.cc +++ b/paddle/phi/kernels/reverse_kernel.cc @@ -23,7 +23,7 @@ namespace phi { template void ReverseArrayKernel(const Context& dev_ctx, const TensorArray& x, - const IntArray& axis, + const IntArray& axis UNUSED, TensorArray* out) { PADDLE_ENFORCE_EQ( x.size(), diff --git a/paddle/phi/kernels/sparse/cpu/full_kernel.cc b/paddle/phi/kernels/sparse/cpu/full_kernel.cc index ac13327caeeaa..5659bcf2159da 100644 --- a/paddle/phi/kernels/sparse/cpu/full_kernel.cc +++ b/paddle/phi/kernels/sparse/cpu/full_kernel.cc @@ -34,7 +34,7 @@ template void FullLikeCooKernel(const Context& dev_ctx, const SparseCooTensor& x, const Scalar& val, - DataType dtype, + DataType dtype UNUSED, SparseCooTensor* out) { phi::Copy(dev_ctx, x.non_zero_indices(), diff --git a/paddle/phi/kernels/sparse/sparse_utils_kernel.h b/paddle/phi/kernels/sparse/sparse_utils_kernel.h index 8639f91469454..241f3d8b0670a 100644 --- a/paddle/phi/kernels/sparse/sparse_utils_kernel.h +++ b/paddle/phi/kernels/sparse/sparse_utils_kernel.h @@ -144,14 +144,14 @@ DenseTensor CsrToDense(const Context& dev_ctx, const SparseCsrTensor& x) { } template -void ValuesCooKernel(const Context& dev_ctx, +void ValuesCooKernel(const Context& dev_ctx UNUSED, const SparseCooTensor& x, DenseTensor* out) { *out = x.non_zero_elements(); } template -void ValuesCsrKernel(const Context& dev_ctx, +void ValuesCsrKernel(const Context& dev_ctx UNUSED, const SparseCsrTensor& x, DenseTensor* out) { *out = x.non_zero_elements(); From b11513498fd903040da4eadc1e18cf28be345f66 Mon Sep 17 00:00:00 2001 From: Yangrl <2535184404@qq.com> Date: Tue, 9 May 2023 02:29:43 +0000 Subject: [PATCH 3/6] test,test=develop --- .../collective/process_group_custom.cc | 2 +- .../distributed/ps/service/brpc_ps_server.h | 6 ++++-- .../ps/service/communicator/communicator.h | 20 +++++++++---------- .../ps/service/coordinator_client.h | 4 ++-- paddle/fluid/framework/ir/generate_pass.cc | 2 +- paddle/fluid/framework/trainer.h | 2 +- paddle/fluid/inference/api/paddle_api.h | 11 +++++----- .../fused/mkldnn/fusion_rnn_mkldnn.h | 10 +++++----- paddle/fluid/pybind/process_group_utils.h | 4 ++-- paddle/phi/backends/onednn/onednn_helper.h | 3 ++- paddle/phi/backends/onednn/onednn_reuse.h | 2 +- paddle/phi/kernels/autotune/cache_base.h | 2 +- .../phi/kernels/cpu/viterbi_decode_kernel.cc | 2 +- .../kernels/impl/logsumexp_grad_kernel_impl.h | 2 +- paddle/phi/kernels/impl/matmul_kernel_impl.h | 2 +- .../kernels/impl/searchsorted_kernel_impl.h | 8 ++++---- paddle/phi/kernels/impl/split_kernel_impl.h | 2 +- paddle/phi/kernels/sparse/cpu/full_kernel.cc | 2 +- .../phi/kernels/sparse/sparse_utils_kernel.h | 2 +- 19 files changed, 46 insertions(+), 42 deletions(-) diff --git a/paddle/fluid/distributed/collective/process_group_custom.cc b/paddle/fluid/distributed/collective/process_group_custom.cc index f0222b007c53e..b6c7063fd6fb7 100644 --- a/paddle/fluid/distributed/collective/process_group_custom.cc +++ b/paddle/fluid/distributed/collective/process_group_custom.cc @@ -189,7 +189,7 @@ std::shared_ptr ProcessGroupCustom::Collective( std::vector& outputs, Fn fn, CommType op_type, - bool sync_op, + bool sync_op UNUSED, bool use_calc_stream) { const auto places = GetPlaceList(inputs); const auto key = GetKeyFromPlaces(places); diff --git a/paddle/fluid/distributed/ps/service/brpc_ps_server.h b/paddle/fluid/distributed/ps/service/brpc_ps_server.h index 0343b3f8c58a7..321adf156c4d8 100644 --- a/paddle/fluid/distributed/ps/service/brpc_ps_server.h +++ b/paddle/fluid/distributed/ps/service/brpc_ps_server.h @@ -227,8 +227,10 @@ class DownpourPServerBrpcClosure : public PServerClosure { PsRequestMessage *request(size_t i) { return &_requests[i]; } PsResponseMessage *response(size_t i) { return &_responses[i]; } brpc::Controller *cntl(size_t i) { return _cntls[i].get(); } - int check_response(size_t request_idx, int cmd_id) { return 1; } - int check_save_response(size_t request_idx, int cmd_id) { return 1; } + int check_response(size_t request_idx UNUSED, int cmd_id UNUSED) { return 1; } + int check_save_response(size_t request_idx UNUSED, int cmd_id UNUSED) { + return 1; + } private: std::atomic _waiting_num; diff --git a/paddle/fluid/distributed/ps/service/communicator/communicator.h b/paddle/fluid/distributed/ps/service/communicator/communicator.h index f3aa23a77826d..643c91b5b05e9 100644 --- a/paddle/fluid/distributed/ps/service/communicator/communicator.h +++ b/paddle/fluid/distributed/ps/service/communicator/communicator.h @@ -287,10 +287,10 @@ class Communicator { return {}; } virtual void SaveFLStrategy( - const std::unordered_map &fl_strategy) {} + const std::unordered_map &fl_strategy UNUSED) {} virtual void StartCoordinator( - const std::string &self_endpoint, - const std::vector &trainer_endpoints) {} + const std::string &self_endpoint UNUSED, + const std::vector &trainer_endpoints UNUSED) {} virtual ~Communicator() {} virtual void RpcProfilerControl(); @@ -337,13 +337,13 @@ class Communicator { virtual void BarrierTriggerDecrement() {} - virtual void BarrierTriggerReset(int init_counter) {} + virtual void BarrierTriggerReset(int init_counter UNUSED) {} virtual void InitEnvs() = 0; - virtual void InitImpl(const RpcCtxMap &send_varname_to_ctx, - const RecvCtxMap &recv_varname_to_ctx, - Scope *recv_scope) {} + virtual void InitImpl(const RpcCtxMap &send_varname_to_ctx UNUSED, + const RecvCtxMap &recv_varname_to_ctx UNUSED, + Scope *recv_scope UNUSED) {} static Communicator *GetInstance() { return communicator_.get(); } @@ -682,9 +682,9 @@ class FLCommunicator : public GeoCommunicator { virtual void InitBrpcClient(const std::string &dist_desc, const std::vector &host_sign_list); - void InitImpl(const RpcCtxMap &send_varname_to_ctx, - const RecvCtxMap &recv_varname_to_ctx, - Scope *recv_scope) {} + void InitImpl(const RpcCtxMap &send_varname_to_ctx UNUSED, + const RecvCtxMap &recv_varname_to_ctx UNUSED, + Scope *recv_scope UNUSED) {} void StartCoordinatorClient( const std::vector &trainer_endpoints); diff --git a/paddle/fluid/distributed/ps/service/coordinator_client.h b/paddle/fluid/distributed/ps/service/coordinator_client.h index 883799fe50038..bd1f0f7754d8c 100644 --- a/paddle/fluid/distributed/ps/service/coordinator_client.h +++ b/paddle/fluid/distributed/ps/service/coordinator_client.h @@ -151,8 +151,8 @@ class CoordinatorService : public PsService { ::google::protobuf::Closure* done); int32_t SaveFLClientInfo(const CoordinatorReqMessage& request, - CoordinatorResMessage* response, - brpc::Controller* cntl) { + CoordinatorResMessage* response UNUSED, + brpc::Controller* cntl UNUSED) { _coordinator_service_handle->SaveFLClientInfo(request); return 0; } diff --git a/paddle/fluid/framework/ir/generate_pass.cc b/paddle/fluid/framework/ir/generate_pass.cc index dd58b090764cb..61c6ce5757aa1 100644 --- a/paddle/fluid/framework/ir/generate_pass.cc +++ b/paddle/fluid/framework/ir/generate_pass.cc @@ -26,7 +26,7 @@ class element_visitor { explicit element_visitor(int index) : index_(index) {} template - Attribute operator()(const T& attr) const { + Attribute operator()(const T& attr UNUSED) const { PADDLE_THROW(platform::errors::Unimplemented("Unimplemented operand.")); } diff --git a/paddle/fluid/framework/trainer.h b/paddle/fluid/framework/trainer.h index 455487541abb9..bf69bed9d4851 100644 --- a/paddle/fluid/framework/trainer.h +++ b/paddle/fluid/framework/trainer.h @@ -70,7 +70,7 @@ class TrainerBase { virtual Scope* GetWorkerScope(int thread_id) = 0; virtual void InitDumpEnv() = 0; virtual void DumpWork(int tid); - virtual void ResetDataset(Dataset* dataset_ptr) {} + virtual void ResetDataset(Dataset* dataset_ptr UNUSED) {} protected: virtual std::string GetDumpPath(int tid) = 0; diff --git a/paddle/fluid/inference/api/paddle_api.h b/paddle/fluid/inference/api/paddle_api.h index 3a51f91b3afc2..572643b0ed0f4 100644 --- a/paddle/fluid/inference/api/paddle_api.h +++ b/paddle/fluid/inference/api/paddle_api.h @@ -29,6 +29,7 @@ #include #include "crypto/cipher.h" +#include "paddle/phi/core/macros.h" #include "paddle_infer_declare.h" // NOLINT #include "paddle_tensor.h" // NOLINT /*! \namespace paddle @@ -226,8 +227,8 @@ class PD_INFER_DECL PaddlePredictor { /// \param[out] output_data Pointer to the tensor list, which holds the output /// Tensor /// \return Whether the run is successful - virtual bool Run(const std::vector& inputs, - std::vector* outputs) { + virtual bool Run(const std::vector& inputs UNUSED, + std::vector* outputs UNUSED) { return false; } @@ -272,7 +273,7 @@ class PD_INFER_DECL PaddlePredictor { /// \param name The input tensor name. /// \return Return the corresponding input ZeroCopyTensor. virtual std::unique_ptr GetInputTensor( - const std::string& name) { + const std::string& name UNUSED) { return nullptr; } @@ -282,7 +283,7 @@ class PD_INFER_DECL PaddlePredictor { /// \param name The output tensor name. /// \return Return the corresponding output ZeroCopyTensor. virtual std::unique_ptr GetOutputTensor( - const std::string& name) { + const std::string& name UNUSED) { return nullptr; } /// \brief Run the network with zero-copied inputs and outputs. @@ -321,7 +322,7 @@ class PD_INFER_DECL PaddlePredictor { /// type, the second param is output var name of the op, and the third /// parameter is output tensor with the var name. /// - virtual void RegisterOutputHook(const Exp_OutputHookFunc& hookfunc) {} + virtual void RegisterOutputHook(const Exp_OutputHookFunc& hookfunc UNUSED) {} /// \brief Clone an existing predictor /// When using clone, the same network will be created, diff --git a/paddle/fluid/operators/fused/mkldnn/fusion_rnn_mkldnn.h b/paddle/fluid/operators/fused/mkldnn/fusion_rnn_mkldnn.h index f8854d3d7b489..db3967ceddfd2 100644 --- a/paddle/fluid/operators/fused/mkldnn/fusion_rnn_mkldnn.h +++ b/paddle/fluid/operators/fused/mkldnn/fusion_rnn_mkldnn.h @@ -30,12 +30,12 @@ class RNNMKLDNNHandler : public phi::funcs::OneDNNHandlerT { public: RNNMKLDNNHandler(const paddle::framework::ExecutionContext& ctx, const phi::OneDNNContext& dev_ctx, - const dnnl::engine onednn_engine, + const dnnl::engine onednn_engine UNUSED, platform::Place cpu_place, - const phi::DenseTensor* input, - const phi::DenseTensor* weight_h, - const phi::DenseTensor* h0, - const bool is_reverse, + const phi::DenseTensor* input UNUSED, + const phi::DenseTensor* weight_h UNUSED, + const phi::DenseTensor* h0 UNUSED, + const bool is_reverse UNUSED, const int64_t N, const int64_t Ti, const int64_t IC, diff --git a/paddle/fluid/pybind/process_group_utils.h b/paddle/fluid/pybind/process_group_utils.h index a35962ce84133..1a6b640b3a3cf 100644 --- a/paddle/fluid/pybind/process_group_utils.h +++ b/paddle/fluid/pybind/process_group_utils.h @@ -56,7 +56,7 @@ struct ConcatDenseTensor { void operator()(const platform::CustomDeviceContext &context, const std::vector &in, phi::DenseTensor *out, - int axis = 0) { + int axis UNUSED = 0) { auto *out_data = out->data(); auto *device = phi::DeviceManager::GetDeviceWithPlace(context.GetPlace()); size_t offset = 0; @@ -80,7 +80,7 @@ struct SplitDenseTensor { void operator()(const platform::CustomDeviceContext &context, const phi::DenseTensor &in, std::vector *out, - int axis = 0) { + int axis UNUSED = 0) { auto *in_data = in.data(); auto *device = phi::DeviceManager::GetDeviceWithPlace(context.GetPlace()); size_t offset = 0; diff --git a/paddle/phi/backends/onednn/onednn_helper.h b/paddle/phi/backends/onednn/onednn_helper.h index 9b5aa167a6276..84e36a26ca487 100644 --- a/paddle/phi/backends/onednn/onednn_helper.h +++ b/paddle/phi/backends/onednn/onednn_helper.h @@ -191,7 +191,8 @@ inline void AppendKey(std::string* key, const std::vector& dims) { } template -inline std::string CreateKey(const OneDNNContext& dev_ctx, ArgTypes&&... args) { +inline std::string CreateKey(const OneDNNContext& dev_ctx UNUSED, + ArgTypes&&... args) { std::string key; key.reserve(64); using expand_type = int[]; diff --git a/paddle/phi/backends/onednn/onednn_reuse.h b/paddle/phi/backends/onednn/onednn_reuse.h index 463c55a7c5317..330f0318a6e09 100644 --- a/paddle/phi/backends/onednn/onednn_reuse.h +++ b/paddle/phi/backends/onednn/onednn_reuse.h @@ -676,7 +676,7 @@ class OneDNNHandlerNoCachingT { const dnnl::memory::desc& user_md, const dnnl::memory::desc& target_md, void* ptr, - bool is_persistent = false, + bool is_persistent UNUSED = false, std::function(const F*)> custom_reorder_func = {}) { std::shared_ptr target_memory_p; if (custom_reorder_func) { diff --git a/paddle/phi/kernels/autotune/cache_base.h b/paddle/phi/kernels/autotune/cache_base.h index 267c8ef3f6859..798898f4dd7d1 100644 --- a/paddle/phi/kernels/autotune/cache_base.h +++ b/paddle/phi/kernels/autotune/cache_base.h @@ -24,7 +24,7 @@ DECLARE_int32(search_cache_max_number); -inline void HashCombine(std::size_t* seed) {} +inline void HashCombine(std::size_t* seed UNUSED) {} // combine hash value // https://stackoverflow.com/questions/2590677/how-do-i-combine-hash-values-in-c0x diff --git a/paddle/phi/kernels/cpu/viterbi_decode_kernel.cc b/paddle/phi/kernels/cpu/viterbi_decode_kernel.cc index 6342bfee4dfe9..42fdd78c6205f 100644 --- a/paddle/phi/kernels/cpu/viterbi_decode_kernel.cc +++ b/paddle/phi/kernels/cpu/viterbi_decode_kernel.cc @@ -127,7 +127,7 @@ template struct BinaryOperation { - void operator()(const Context& dev_ctx, + void operator()(const Context& dev_ctx UNUSED, const DenseTensor& lhs, const DenseTensor& rhs, DenseTensor* output) { diff --git a/paddle/phi/kernels/impl/logsumexp_grad_kernel_impl.h b/paddle/phi/kernels/impl/logsumexp_grad_kernel_impl.h index 0db6c12d4a07c..ab65769ee6931 100644 --- a/paddle/phi/kernels/impl/logsumexp_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/logsumexp_grad_kernel_impl.h @@ -38,7 +38,7 @@ struct LogsumexpGradFunctor { DX* dx, DY* dy, const Dim& dim, - int size) { + int size UNUSED) { using MT = typename phi::dtype::MPTypeTrait::Type; auto x_mt = (*x).template cast(); auto y_mt = (*y).template cast(); diff --git a/paddle/phi/kernels/impl/matmul_kernel_impl.h b/paddle/phi/kernels/impl/matmul_kernel_impl.h index acc7affc00e26..14f50786d0c86 100644 --- a/paddle/phi/kernels/impl/matmul_kernel_impl.h +++ b/paddle/phi/kernels/impl/matmul_kernel_impl.h @@ -103,7 +103,7 @@ void MatMulFunctionImplWithBlas( bool trans_x, bool trans_y, bool flag = false, - phi::funcs::MatmulPlanner* matmul_planner = nullptr) { + phi::funcs::MatmulPlanner* matmul_planner UNUSED = nullptr) { const int x_ndim = x_dims.size(); const int y_ndim = y_dims.size(); diff --git a/paddle/phi/kernels/impl/searchsorted_kernel_impl.h b/paddle/phi/kernels/impl/searchsorted_kernel_impl.h index 6c0891e59bcb9..2b28e3fed49a2 100644 --- a/paddle/phi/kernels/impl/searchsorted_kernel_impl.h +++ b/paddle/phi/kernels/impl/searchsorted_kernel_impl.h @@ -39,8 +39,8 @@ class GpuAndCpuSearchSortedCompute { return std::isnan(x); #endif } - static HOSTDEVICE bool IsNan(int x) { return false; } - static HOSTDEVICE bool IsNan(int64_t x) { return false; } + static HOSTDEVICE bool IsNan(int x UNUSED) { return false; } + static HOSTDEVICE bool IsNan(int64_t x UNUSED) { return false; } static HOSTDEVICE bool IsInf(float x) { #ifdef __NVCC__ @@ -56,8 +56,8 @@ class GpuAndCpuSearchSortedCompute { return std::isinf(x); #endif } - static HOSTDEVICE bool IsInf(int x) { return false; } - static HOSTDEVICE bool IsInf(int64_t x) { return false; } + static HOSTDEVICE bool IsInf(int x UNUSED) { return false; } + static HOSTDEVICE bool IsInf(int64_t x UNUSED) { return false; } HOSTDEVICE GpuAndCpuSearchSortedCompute(const T1* sequence_data, const T2* value_data, diff --git a/paddle/phi/kernels/impl/split_kernel_impl.h b/paddle/phi/kernels/impl/split_kernel_impl.h index 83968d913feb4..2df379fcdb3b8 100644 --- a/paddle/phi/kernels/impl/split_kernel_impl.h +++ b/paddle/phi/kernels/impl/split_kernel_impl.h @@ -25,7 +25,7 @@ namespace phi { template void SplitKernel(const Context& dev_ctx, const DenseTensor& x, - const IntArray& sections, + const IntArray& sections UNUSED, const Scalar& axis_scalar, std::vector outs) { std::vector shape_refer; diff --git a/paddle/phi/kernels/sparse/cpu/full_kernel.cc b/paddle/phi/kernels/sparse/cpu/full_kernel.cc index 5659bcf2159da..d9209544ec7b9 100644 --- a/paddle/phi/kernels/sparse/cpu/full_kernel.cc +++ b/paddle/phi/kernels/sparse/cpu/full_kernel.cc @@ -54,7 +54,7 @@ template void FullLikeCsrKernel(const Context& dev_ctx, const SparseCsrTensor& x, const Scalar& val, - DataType dtype, + DataType dtype UNUSED, SparseCsrTensor* out) { phi::Copy(dev_ctx, x.non_zero_crows(), diff --git a/paddle/phi/kernels/sparse/sparse_utils_kernel.h b/paddle/phi/kernels/sparse/sparse_utils_kernel.h index 241f3d8b0670a..e06391d9404ef 100644 --- a/paddle/phi/kernels/sparse/sparse_utils_kernel.h +++ b/paddle/phi/kernels/sparse/sparse_utils_kernel.h @@ -158,7 +158,7 @@ void ValuesCsrKernel(const Context& dev_ctx UNUSED, } template -void IndicesCooKernel(const Context& dev_ctx, +void IndicesCooKernel(const Context& dev_ctx UNUSED, const SparseCooTensor& x, DenseTensor* out) { *out = x.indices(); From 638313d43aa308a683555e7f5c292d8f00226a9b Mon Sep 17 00:00:00 2001 From: Yangrl <2535184404@qq.com> Date: Tue, 9 May 2023 04:58:27 +0000 Subject: [PATCH 4/6] test,test=develop --- paddle/fluid/inference/api/paddle_api.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/paddle/fluid/inference/api/paddle_api.h b/paddle/fluid/inference/api/paddle_api.h index 572643b0ed0f4..ee15468c9b81e 100644 --- a/paddle/fluid/inference/api/paddle_api.h +++ b/paddle/fluid/inference/api/paddle_api.h @@ -29,7 +29,6 @@ #include #include "crypto/cipher.h" -#include "paddle/phi/core/macros.h" #include "paddle_infer_declare.h" // NOLINT #include "paddle_tensor.h" // NOLINT /*! \namespace paddle @@ -227,8 +226,8 @@ class PD_INFER_DECL PaddlePredictor { /// \param[out] output_data Pointer to the tensor list, which holds the output /// Tensor /// \return Whether the run is successful - virtual bool Run(const std::vector& inputs UNUSED, - std::vector* outputs UNUSED) { + virtual bool Run(const std::vector& inputs, + std::vector* outputs) { return false; } @@ -273,7 +272,7 @@ class PD_INFER_DECL PaddlePredictor { /// \param name The input tensor name. /// \return Return the corresponding input ZeroCopyTensor. virtual std::unique_ptr GetInputTensor( - const std::string& name UNUSED) { + const std::string& name) { return nullptr; } @@ -283,7 +282,7 @@ class PD_INFER_DECL PaddlePredictor { /// \param name The output tensor name. /// \return Return the corresponding output ZeroCopyTensor. virtual std::unique_ptr GetOutputTensor( - const std::string& name UNUSED) { + const std::string& name) { return nullptr; } /// \brief Run the network with zero-copied inputs and outputs. @@ -322,7 +321,7 @@ class PD_INFER_DECL PaddlePredictor { /// type, the second param is output var name of the op, and the third /// parameter is output tensor with the var name. /// - virtual void RegisterOutputHook(const Exp_OutputHookFunc& hookfunc UNUSED) {} + virtual void RegisterOutputHook(const Exp_OutputHookFunc& hookfunc) {} /// \brief Clone an existing predictor /// When using clone, the same network will be created, @@ -481,7 +480,8 @@ class PD_INFER_DECL InternalUtils { cudaStream_t stream); static bool RunWithExternalStream(paddle_infer::Predictor* pred, hipStream_t stream); - + static bool RunWithExternalStream(paddle_infer::Predictor* pred, + void* stream); static void UpdateConfigInterleaved(paddle_infer::Config* c, bool with_interleaved); From 07712a822f9c9fd882c9082acee6abbb6251683a Mon Sep 17 00:00:00 2001 From: Yangrl <2535184404@qq.com> Date: Tue, 9 May 2023 11:08:48 +0000 Subject: [PATCH 5/6] test,test=develop --- paddle/fluid/framework/CMakeLists.txt | 8 +--- .../fluid/framework/details/nccl_op_handle.h | 4 +- paddle/fluid/operators/save_combine_op.h | 4 +- paddle/phi/common/cpstring_impl.h | 8 +++- paddle/phi/kernels/cpu/p_recv_kernel.cc | 20 ++++---- paddle/phi/kernels/cpu/p_send_kernel.cc | 18 +++---- paddle/phi/kernels/cpu/rnn_grad_kernel.cc | 48 +++++++++---------- paddle/phi/kernels/cpu/rnn_kernel.cc | 40 ++++++++-------- .../kernels/cpu/send_ue_recv_grad_kernel.cc | 8 ++-- paddle/phi/kernels/cpu/send_uv_grad_kernel.cc | 2 +- .../kernels/cpu/unique_consecutive_functor.h | 2 +- .../phi/kernels/cpu/viterbi_decode_kernel.cc | 2 +- paddle/phi/kernels/funcs/activation_functor.h | 4 +- paddle/phi/kernels/funcs/compound_functors.h | 2 +- paddle/phi/kernels/funcs/cpu_vec.h | 2 +- .../phi/kernels/funcs/detail/strided_memcpy.h | 10 ++-- paddle/phi/kernels/funcs/jit/more/mkl/mkl.cc | 8 ++-- paddle/phi/kernels/funcs/scatter.h | 4 +- paddle/phi/kernels/sparse/cpu/conv.h | 2 +- 19 files changed, 98 insertions(+), 98 deletions(-) diff --git a/paddle/fluid/framework/CMakeLists.txt b/paddle/fluid/framework/CMakeLists.txt index b95d996b88a94..3cab75f0971dd 100755 --- a/paddle/fluid/framework/CMakeLists.txt +++ b/paddle/fluid/framework/CMakeLists.txt @@ -771,9 +771,7 @@ if(WITH_DISTRIBUTE) heter_service_proto fleet_executor ${BRPC_DEP}) - set(DISTRIBUTE_COMPILE_FLAGS - "-Wno-non-virtual-dtor -Wno-error=non-virtual-dtor -Wno-error=delete-non-virtual-dtor -Wno-error=parentheses" - ) + set(DISTRIBUTE_COMPILE_FLAGS "") if(CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 7.0) set(DISTRIBUTE_COMPILE_FLAGS "${DISTRIBUTE_COMPILE_FLAGS} -faligned-new") endif() @@ -855,9 +853,7 @@ if(WITH_DISTRIBUTE) brpc fleet_executor flags) - set(DISTRIBUTE_COMPILE_FLAGS - "-Wno-non-virtual-dtor -Wno-error=non-virtual-dtor -Wno-error=delete-non-virtual-dtor -Wno-error=parentheses" - ) + set(DISTRIBUTE_COMPILE_FLAGS "") if(CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 7.0) set(DISTRIBUTE_COMPILE_FLAGS "${DISTRIBUTE_COMPILE_FLAGS} -faligned-new") endif() diff --git a/paddle/fluid/framework/details/nccl_op_handle.h b/paddle/fluid/framework/details/nccl_op_handle.h index db7fc45b246c6..e4472e8d989dd 100644 --- a/paddle/fluid/framework/details/nccl_op_handle.h +++ b/paddle/fluid/framework/details/nccl_op_handle.h @@ -225,7 +225,7 @@ class NCCLOpHandleBase : public OpHandleBase { void* recvbuff, size_t count, ncclDataType_t datatype, - ncclRedOp_t op) { + ncclRedOp_t op UNUSED) { auto nccl_ctxs = nccl_ctxs_->GetHierarchicalInterCtx(run_order_); int dev_id = place.device; auto& nccl_ctx = nccl_ctxs->at(dev_id); @@ -297,7 +297,7 @@ class NCCLOpHandleBase : public OpHandleBase { void* sendbuff, size_t count, ncclDataType_t datatype, - ncclRedOp_t op) { + ncclRedOp_t op UNUSED) { auto nccl_ctxs = nccl_ctxs_->GetHierarchicalInterCtx(run_order_); int dev_id = place.device; auto& nccl_ctx = nccl_ctxs->at(dev_id); diff --git a/paddle/fluid/operators/save_combine_op.h b/paddle/fluid/operators/save_combine_op.h index 817e18791f9a3..1888ce5b57493 100644 --- a/paddle/fluid/operators/save_combine_op.h +++ b/paddle/fluid/operators/save_combine_op.h @@ -121,11 +121,11 @@ void SaveCombineTensorKernel(const Context& dev_ctx, template void SaveCombineVocabKernel( - const Context& dev_ctx, + const Context& dev_ctx UNUSED, const std::vector& inputs, const std::string& file_path, bool overwrite, - bool save_as_fp16, + bool save_as_fp16 UNUSED, bool save_to_memory, phi::ExtendedTensor* out) { std::string* y = nullptr; diff --git a/paddle/phi/common/cpstring_impl.h b/paddle/phi/common/cpstring_impl.h index 99a04e7ce4924..6783799026d44 100644 --- a/paddle/phi/common/cpstring_impl.h +++ b/paddle/phi/common/cpstring_impl.h @@ -24,6 +24,8 @@ limitations under the License. */ #include #include +#include "paddle/phi/core/macros.h" + #if (defined(__NVCC__) || defined(__HIPCC__)) #define HOSTDEVICE __host__ __device__ #define DEVICE __device__ @@ -181,7 +183,9 @@ HOSTDEVICE static inline size_t PD_PString_ToInternalSizeT( /* * Need to implement in other source file. */ -HOSTDEVICE static inline void PD_Free(void *ptr, size_t size) { free(ptr); } +HOSTDEVICE static inline void PD_Free(void *ptr, size_t size UNUSED) { + free(ptr); +} HOSTDEVICE static inline void *PD_Memset(void *src, int ch, size_t size) { char *dst = (char *)src; // NOLINT @@ -203,7 +207,7 @@ HOSTDEVICE static inline void *PD_Memcpy(void *dst, HOSTDEVICE static inline void *PD_Malloc(size_t size) { return malloc(size); } HOSTDEVICE static inline void *PD_Realloc(void *ptr, - size_t old_size, + size_t old_size UNUSED, size_t new_size) { #if (defined(__NVCC__) || defined(__HIPCC__)) if (old_size >= new_size) { diff --git a/paddle/phi/kernels/cpu/p_recv_kernel.cc b/paddle/phi/kernels/cpu/p_recv_kernel.cc index 72d312e71ca37..10526e6935e1e 100644 --- a/paddle/phi/kernels/cpu/p_recv_kernel.cc +++ b/paddle/phi/kernels/cpu/p_recv_kernel.cc @@ -24,20 +24,20 @@ namespace phi { template -void PRecvKernel(const Context& dev_ctx, - int peer, - DataType dtype, - bool dynamic_shape, - DenseTensor* out) { +void PRecvKernel(const Context& dev_ctx UNUSED, + int peer UNUSED, + DataType dtype UNUSED, + bool dynamic_shape UNUSED, + DenseTensor* out UNUSED) { PADDLE_THROW(errors::Unavailable("Do not support recv for cpu kernel now.")); } template -void PRecvArrayKernel(const Context& dev_ctx, - int peer, - DataType dtype, - const std::vector& out_shape, - TensorArray* out_array) { +void PRecvArrayKernel(const Context& dev_ctx UNUSED, + int peer UNUSED, + DataType dtype UNUSED, + const std::vector& out_shape UNUSED, + TensorArray* out_array UNUSED) { PADDLE_THROW( errors::Unavailable("Do not support recv array for cpu kernel now.")); } diff --git a/paddle/phi/kernels/cpu/p_send_kernel.cc b/paddle/phi/kernels/cpu/p_send_kernel.cc index 60f9d458ce6e3..a786de7ecaf3b 100644 --- a/paddle/phi/kernels/cpu/p_send_kernel.cc +++ b/paddle/phi/kernels/cpu/p_send_kernel.cc @@ -24,19 +24,19 @@ namespace phi { template -void PSendKernel(const Context& dev_ctx, - const DenseTensor& x, - int peer, - bool dynamic_shape) { +void PSendKernel(const Context& dev_ctx UNUSED, + const DenseTensor& x UNUSED, + int peer UNUSED, + bool dynamic_shape UNUSED) { PADDLE_THROW(errors::Unavailable("Do not support send for cpu kernel now.")); } template -void PSendArrayKernel(const Context& dev_ctx, - const TensorArray& x, - int peer, - bool dynamic_shape, - DenseTensor* out) { +void PSendArrayKernel(const Context& dev_ctx UNUSED, + const TensorArray& x UNUSED, + int peer UNUSED, + bool dynamic_shape UNUSED, + DenseTensor* out UNUSED) { PADDLE_THROW( errors::Unavailable("Do not support send array for cpu kernel now.")); } diff --git a/paddle/phi/kernels/cpu/rnn_grad_kernel.cc b/paddle/phi/kernels/cpu/rnn_grad_kernel.cc index b4ec6652eb975..cb479cb755cfa 100644 --- a/paddle/phi/kernels/cpu/rnn_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/rnn_grad_kernel.cc @@ -553,28 +553,28 @@ struct GradLayer { } virtual void operator()( - const CPUContext& dev_ctx, - const DenseTensor* input, - const DenseTensor* output, - const std::vector& init_h_unbind, - const std::vector& init_c_unbind, - const std::vector& last_h_grad_unbind, - const std::vector& last_c_grad_unbind, - const std::vector& gate_tensor_unbind, - const std::vector& state_tensor_unbind, - const std::vector& act_state_tensor_unbind, - const DenseTensor* output_grad, - const std::vector>& parameter_lists, - const DenseTensor* sequence_length, - DenseTensor* input_grad, - std::vector* init_h_grad_unbind, - std::vector* init_c_grad_unbind, - const std::vector>& weight_list_grad, - int layer_idx, - bool is_bidirec, - int hidden_size, - const std::string& mode, - int gate_num) {} + const CPUContext& dev_ctx UNUSED, + const DenseTensor* input UNUSED, + const DenseTensor* output UNUSED, + const std::vector& init_h_unbind UNUSED, + const std::vector& init_c_unbind UNUSED, + const std::vector& last_h_grad_unbind UNUSED, + const std::vector& last_c_grad_unbind UNUSED, + const std::vector& gate_tensor_unbind UNUSED, + const std::vector& state_tensor_unbind UNUSED, + const std::vector& act_state_tensor_unbind UNUSED, + const DenseTensor* output_grad UNUSED, + const std::vector>& parameter_lists UNUSED, + const DenseTensor* sequence_length UNUSED, + DenseTensor* input_grad UNUSED, + std::vector* init_h_grad_unbind UNUSED, + std::vector* init_c_grad_unbind UNUSED, + const std::vector>& weight_list_grad UNUSED, + int layer_idx UNUSED, + bool is_bidirec UNUSED, + int hidden_size UNUSED, + const std::string& mode UNUSED, + int gate_num UNUSED) {} void preprocess(const CPUContext& dev_ctx, const DenseTensor* grad_output, @@ -978,11 +978,11 @@ void RnnGradFunc(const CPUContext& dev_ctx, const std::vector& state_grad, float dropout_prob, bool is_bidirec, - int input_size, + int input_size UNUSED, int hidden_size, int num_layers, const std::string& mode, - int seed, + int seed UNUSED, bool is_test, int gate_num, DenseTensor* x_grad, diff --git a/paddle/phi/kernels/cpu/rnn_kernel.cc b/paddle/phi/kernels/cpu/rnn_kernel.cc index ae3a4402f5138..de1caf16f621e 100644 --- a/paddle/phi/kernels/cpu/rnn_kernel.cc +++ b/paddle/phi/kernels/cpu/rnn_kernel.cc @@ -292,22 +292,22 @@ struct Layer { } } - virtual void operator()(const CPUContext& dev_ctx, - const DenseTensor* input, - const std::vector& vec, - const std::vector& init_h, - const std::vector& init_c, - const DenseTensor* sequence_length, - std::vector last_h, - std::vector last_c, - DenseTensor* output, - const int& layer_idx, - const int& gate_num, - DenseTensor* gate_value, - DenseTensor* cell_value, - DenseTensor* cell_act_value, - const std::string& mode, - bool is_test) {} + virtual void operator()(const CPUContext& dev_ctx UNUSED, + const DenseTensor* input UNUSED, + const std::vector& vec UNUSED, + const std::vector& init_h UNUSED, + const std::vector& init_c UNUSED, + const DenseTensor* sequence_length UNUSED, + std::vector last_h UNUSED, + std::vector last_c UNUSED, + DenseTensor* output UNUSED, + const int& layer_idx UNUSED, + const int& gate_num UNUSED, + DenseTensor* gate_value UNUSED, + DenseTensor* cell_value UNUSED, + DenseTensor* cell_act_value UNUSED, + const std::string& mode UNUSED, + bool is_test UNUSED) {} void RunTestIter(const CPUContext& dev_ctx, const DenseTensor* input, @@ -320,8 +320,8 @@ struct Layer { DenseTensor* output, int layer_idx, DenseTensor* gate_value, - DenseTensor* cell_value, - DenseTensor* cell_act_value, + DenseTensor* cell_value UNUSED, + DenseTensor* cell_act_value UNUSED, bool is_bidirect, int offset, const std::string& mode) { @@ -701,7 +701,7 @@ struct SingleLayer : public Layer { std::vector last_c, DenseTensor* output, const int& layer_idx, - const int& gate_num, + const int& gate_num UNUSED, DenseTensor* gate_value, DenseTensor* cell_value, DenseTensor* cell_act_value, @@ -740,7 +740,7 @@ struct BidirLayer : public Layer { std::vector last_c, DenseTensor* output, const int& layer_idx, - const int& gate_num, + const int& gate_num UNUSED, DenseTensor* gate_value, DenseTensor* cell_value, DenseTensor* cell_act_value, diff --git a/paddle/phi/kernels/cpu/send_ue_recv_grad_kernel.cc b/paddle/phi/kernels/cpu/send_ue_recv_grad_kernel.cc index 659c6502803f0..0d06245e068fc 100644 --- a/paddle/phi/kernels/cpu/send_ue_recv_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/send_ue_recv_grad_kernel.cc @@ -32,7 +32,7 @@ namespace phi { template void CalculateXGrad(const Context& ctx, const T* out_grad, - const T* x_data, + const T* x_data UNUSED, const T* e_data, const phi::DDim& out_grad_dims, const phi::DDim& x_dims, @@ -46,7 +46,7 @@ void CalculateXGrad(const Context& ctx, const DenseTensor& out_grad_tensor, DenseTensor* x_grad_tensor, const DenseTensor* dst_count = nullptr, - const DenseTensor* out = nullptr) { + const DenseTensor* out UNUSED = nullptr) { std::vector reduce_idx; bool reduce = ReduceGrad(out_grad_dims, x_dims, reduce_idx); @@ -232,7 +232,7 @@ void CalculateXGrad(const Context& ctx, template void CalculateEGrad(const T* out_grad_data, const T* x_data, - const T* e_data, + const T* e_data UNUSED, const phi::DDim& x_dims, const phi::DDim& e_dims, const IndexT* s_index, @@ -308,7 +308,7 @@ void CalculateXEGradForMinMax(const T* out_grad, const IndexT* s_index, const IndexT* d_index, const std::string& message_op, - const std::string& reduce_op, + const std::string& reduce_op UNUSED, int64_t index_size, T* x_grad, T* e_grad, diff --git a/paddle/phi/kernels/cpu/send_uv_grad_kernel.cc b/paddle/phi/kernels/cpu/send_uv_grad_kernel.cc index 8e5873ac01769..c04bdaec0177e 100644 --- a/paddle/phi/kernels/cpu/send_uv_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/send_uv_grad_kernel.cc @@ -35,7 +35,7 @@ void CalculateGrad(const Context& ctx, int64_t index_size, int64_t slice_size, T* x_grad, - const DenseTensor& out_grad_tensor, + const DenseTensor& out_grad_tensor UNUSED, const DenseTensor& y) { std::vector reduce_idx; bool reduce = ReduceGrad(out_grad_dims, x_grad_dims, reduce_idx); diff --git a/paddle/phi/kernels/cpu/unique_consecutive_functor.h b/paddle/phi/kernels/cpu/unique_consecutive_functor.h index 73d196bbb98d9..2daee69eed92d 100644 --- a/paddle/phi/kernels/cpu/unique_consecutive_functor.h +++ b/paddle/phi/kernels/cpu/unique_consecutive_functor.h @@ -111,7 +111,7 @@ struct UniqueConsecutiveFlattenedTensorFunctor { template static ForwardIt UniqueConsecutiveDimImpl( - const Context& context, + const Context& context UNUSED, ForwardIt first, ForwardIt last, const std::vector& sorted_indices_vec, diff --git a/paddle/phi/kernels/cpu/viterbi_decode_kernel.cc b/paddle/phi/kernels/cpu/viterbi_decode_kernel.cc index 42fdd78c6205f..f129000e0a97a 100644 --- a/paddle/phi/kernels/cpu/viterbi_decode_kernel.cc +++ b/paddle/phi/kernels/cpu/viterbi_decode_kernel.cc @@ -113,7 +113,7 @@ template struct GetMask { - void operator()(const Context& dev_ctx, + void operator()(const Context& dev_ctx UNUSED, const DenseTensor& lhs, const DenseTensor& rhs, DenseTensor* mask) { diff --git a/paddle/phi/kernels/funcs/activation_functor.h b/paddle/phi/kernels/funcs/activation_functor.h index bc1b21c1c8cce..ccc2c474499d7 100644 --- a/paddle/phi/kernels/funcs/activation_functor.h +++ b/paddle/phi/kernels/funcs/activation_functor.h @@ -597,7 +597,7 @@ struct SquareGradFunctor : public BaseActivationFunctor { typename Out, typename dOut, typename dX> - void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const { dx.device(d) = dout * static_cast(2) * x; } @@ -1087,7 +1087,7 @@ struct ExpGradFunctor : public BaseActivationFunctor { typename Out, typename dOut, typename dX> - void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + void operator()(Device d, X x UNUSED, Out out, dOut dout, dX dx) const { dx.device(d) = dout * out; } diff --git a/paddle/phi/kernels/funcs/compound_functors.h b/paddle/phi/kernels/funcs/compound_functors.h index d51c13950aa33..121597bca6873 100644 --- a/paddle/phi/kernels/funcs/compound_functors.h +++ b/paddle/phi/kernels/funcs/compound_functors.h @@ -47,7 +47,7 @@ struct UnaryCompoundFunctor { inline HOSTDEVICE T GetOut(T x, T y) { return func1_(func2_(x, y)); } - inline HOSTDEVICE T GetOutUseIntermediateOut(T x, T intermediat_out) { + inline HOSTDEVICE T GetOutUseIntermediateOut(T x UNUSED, T intermediat_out) { return func1_(intermediat_out); } diff --git a/paddle/phi/kernels/funcs/cpu_vec.h b/paddle/phi/kernels/funcs/cpu_vec.h index e7dc6535c1547..6774cd391dd5d 100644 --- a/paddle/phi/kernels/funcs/cpu_vec.h +++ b/paddle/phi/kernels/funcs/cpu_vec.h @@ -475,7 +475,7 @@ inline void vec_add_bias(const int n, } template -inline void vec_identity(const int n, const T* x, T* y) { +inline void vec_identity(const int n UNUSED, const T* x UNUSED, T* y UNUSED) { // do nothing return; } diff --git a/paddle/phi/kernels/funcs/detail/strided_memcpy.h b/paddle/phi/kernels/funcs/detail/strided_memcpy.h index 209d463065b97..0cd07fdfd0e1a 100644 --- a/paddle/phi/kernels/funcs/detail/strided_memcpy.h +++ b/paddle/phi/kernels/funcs/detail/strided_memcpy.h @@ -32,9 +32,9 @@ template struct StridedMemcpyFunctor { void operator()(const phi::DeviceContext& dev_ctx, const T* src, - const int64_t* src_stride, - const int64_t* dst_dim, - const int64_t* dst_stride, + const int64_t* src_stride UNUSED, + const int64_t* dst_dim UNUSED, + const int64_t* dst_stride UNUSED, T* dst) const { auto place = dev_ctx.GetPlace(); if (place.GetType() == phi::AllocationType::CPU) { @@ -58,9 +58,9 @@ template struct StridedMemcpyFunctor { void operator()(const phi::DeviceContext& dev_ctx, const T* src, - const int64_t* src_stride, + const int64_t* src_stride UNUSED, const int64_t* dst_dim, - const int64_t* dst_stride, + const int64_t* dst_stride UNUSED, T* dst) const { auto place = dev_ctx.GetPlace(); if (place.GetType() == phi::AllocationType::CPU) { diff --git a/paddle/phi/kernels/funcs/jit/more/mkl/mkl.cc b/paddle/phi/kernels/funcs/jit/more/mkl/mkl.cc index daf9eac988a16..deaeba0224fb9 100644 --- a/paddle/phi/kernels/funcs/jit/more/mkl/mkl.cc +++ b/paddle/phi/kernels/funcs/jit/more/mkl/mkl.cc @@ -246,10 +246,10 @@ bool MatMulKernel::CanBeUsed(const matmul_attr_t& attr) const { return true; } -#define AWALYS_USE_ME_WITH_DOUBLE(func) \ - template <> \ - bool func##Kernel::CanBeUsed(const int& d) const { \ - return true; \ +#define AWALYS_USE_ME_WITH_DOUBLE(func) \ + template <> \ + bool func##Kernel::CanBeUsed(const int& d UNUSED) const { \ + return true; \ } AWALYS_USE_ME_WITH_DOUBLE(VMul); diff --git a/paddle/phi/kernels/funcs/scatter.h b/paddle/phi/kernels/funcs/scatter.h index d430588541837..64bca648251b9 100644 --- a/paddle/phi/kernels/funcs/scatter.h +++ b/paddle/phi/kernels/funcs/scatter.h @@ -72,7 +72,7 @@ elementwise_inner_add(const phi::CPUContext& ctx UNUSED, * return: output tensor */ template -void ScatterAssign(const phi::CPUContext& ctx, +void ScatterAssign(const phi::CPUContext& ctx UNUSED, const DenseTensor& src, const DenseTensor& index, DenseTensor* output) { @@ -241,7 +241,7 @@ void ScatterAssignAdd(const phi::CPUContext& ctx, // The function is only for scatter grad x, // however update grad use gather template -void CPUScatterGradForX(const phi::CPUContext& ctx, +void CPUScatterGradForX(const phi::CPUContext& ctx UNUSED, const DenseTensor& index, DenseTensor* output) { int64_t index_size = index.dims()[0]; diff --git a/paddle/phi/kernels/sparse/cpu/conv.h b/paddle/phi/kernels/sparse/cpu/conv.h index bbbfc625c127a..abbedd6b491cb 100644 --- a/paddle/phi/kernels/sparse/cpu/conv.h +++ b/paddle/phi/kernels/sparse/cpu/conv.h @@ -136,7 +136,7 @@ void ProductRuleBook(const Context& dev_ctx, template void UpdateRulebookAndOutIndex(const Context& dev_ctx, const SparseCooTensor& x, - const int kernel_size, + const int kernel_size UNUSED, const int out_channels, const DDim& out_dims, DenseTensor* rulebook, From 98637169a45f9d0a88d5719e636d0939a2ac3c00 Mon Sep 17 00:00:00 2001 From: Yangrl <2535184404@qq.com> Date: Wed, 10 May 2023 06:51:00 +0000 Subject: [PATCH 6/6] test,test=develop --- .../fluid/operators/collective/send_v2_op.h | 2 +- .../fused/mkldnn/fusion_gru_mkldnn_op.cc | 4 +-- .../fused/mkldnn/fusion_lstm_mkldnn_op.cc | 6 ++-- paddle/fluid/operators/memcpy_h2d_op.h | 2 +- paddle/fluid/operators/memcpy_op.h | 2 +- paddle/phi/core/tensor_utils.cc | 10 +++---- paddle/phi/kernels/cpu/diag_grad_kernel.cc | 2 +- .../kernels/cpu/fill_diagonal_grad_kernel.cc | 2 +- .../phi/kernels/cpu/gather_nd_grad_kernel.cc | 2 +- .../phi/kernels/cpu/index_add_grad_kernel.cc | 2 +- .../kernels/cpu/index_select_grad_kernel.cc | 2 +- .../phi/kernels/cpu/mean_all_grad_kernel.cc | 2 +- .../kernels/cpu/put_along_axis_grad_kernel.cc | 4 +-- .../kernels/cpu/scatter_nd_add_grad_kernel.cc | 2 +- .../impl/elementwise_grad_kernel_impl.h | 30 ++++++++++++------- paddle/phi/kernels/impl/eye_kernel_impl.h | 2 +- paddle/phi/kernels/impl/fill_kernel_impl.h | 2 +- .../phi/kernels/onednn/interpolate_kernel.cc | 4 +-- paddle/phi/kernels/unsqueeze_kernel.h | 2 +- 19 files changed, 47 insertions(+), 37 deletions(-) diff --git a/paddle/fluid/operators/collective/send_v2_op.h b/paddle/fluid/operators/collective/send_v2_op.h index 047796dfe2430..7f51861008942 100644 --- a/paddle/fluid/operators/collective/send_v2_op.h +++ b/paddle/fluid/operators/collective/send_v2_op.h @@ -28,7 +28,7 @@ namespace operators { template class SendOpV2CPUKernel : public framework::OpKernel { public: - void Compute(const framework::ExecutionContext& ctx) const override { + void Compute(const framework::ExecutionContext& ctx UNUSED) const override { PADDLE_THROW(platform::errors::Unavailable( "Do not support send for cpu kernel now.")); } diff --git a/paddle/fluid/operators/fused/mkldnn/fusion_gru_mkldnn_op.cc b/paddle/fluid/operators/fused/mkldnn/fusion_gru_mkldnn_op.cc index 806c883228035..05d1e64f92ae7 100644 --- a/paddle/fluid/operators/fused/mkldnn/fusion_gru_mkldnn_op.cc +++ b/paddle/fluid/operators/fused/mkldnn/fusion_gru_mkldnn_op.cc @@ -33,7 +33,7 @@ class GRUMKLDNNHandler : public RNNMKLDNNHandler { GRUMKLDNNHandler(const paddle::framework::ExecutionContext& ctx, const OneDNNContext& dev_ctx, const dnnl::engine onednn_engine, - platform::Place cpu_place, + platform::Place cpu_place UNUSED, const phi::DenseTensor* input, const phi::DenseTensor* weight_h, const phi::DenseTensor* h0, @@ -42,7 +42,7 @@ class GRUMKLDNNHandler : public RNNMKLDNNHandler { const int64_t Ti, const int64_t IC, const int64_t OC, - const std::string& unique_name) + const std::string& unique_name UNUSED) : RNNMKLDNNHandler( ctx, dev_ctx, diff --git a/paddle/fluid/operators/fused/mkldnn/fusion_lstm_mkldnn_op.cc b/paddle/fluid/operators/fused/mkldnn/fusion_lstm_mkldnn_op.cc index bda5eab2d725f..d973c5e89a626 100644 --- a/paddle/fluid/operators/fused/mkldnn/fusion_lstm_mkldnn_op.cc +++ b/paddle/fluid/operators/fused/mkldnn/fusion_lstm_mkldnn_op.cc @@ -33,17 +33,17 @@ class LSTMMKLDNNHandler LSTMMKLDNNHandler(const paddle::framework::ExecutionContext& ctx, const OneDNNContext& dev_ctx, const dnnl::engine onednn_engine, - platform::Place cpu_place, + platform::Place cpu_place UNUSED, const phi::DenseTensor* input, const phi::DenseTensor* weight_h, const phi::DenseTensor* h0, - const phi::DenseTensor* c0, + const phi::DenseTensor* c0 UNUSED, const bool is_reverse, const int64_t N, const int64_t Ti, const int64_t IC, const int64_t OC, - const std::string& unique_name) + const std::string& unique_name UNUSED) : RNNMKLDNNHandler( ctx, dev_ctx, diff --git a/paddle/fluid/operators/memcpy_h2d_op.h b/paddle/fluid/operators/memcpy_h2d_op.h index 9946e80bbc30f..5f480461d77cd 100644 --- a/paddle/fluid/operators/memcpy_h2d_op.h +++ b/paddle/fluid/operators/memcpy_h2d_op.h @@ -66,7 +66,7 @@ class MemcpyH2DFunctor { } template - void operator()(const T &v) const { + void operator()(const T &v UNUSED) const { PADDLE_ENFORCE_EQ( true, false, diff --git a/paddle/fluid/operators/memcpy_op.h b/paddle/fluid/operators/memcpy_op.h index 3c989b151a315..1cb077ee8a4b9 100644 --- a/paddle/fluid/operators/memcpy_op.h +++ b/paddle/fluid/operators/memcpy_op.h @@ -81,7 +81,7 @@ class MemcpyFunctor { } template - void operator()(const T &v) const { + void operator()(const T &v UNUSED) const { PADDLE_ENFORCE_EQ( true, false, diff --git a/paddle/phi/core/tensor_utils.cc b/paddle/phi/core/tensor_utils.cc index c73d6dc369bb8..abe44d3e2550b 100644 --- a/paddle/phi/core/tensor_utils.cc +++ b/paddle/phi/core/tensor_utils.cc @@ -318,11 +318,11 @@ void Copy(const Context& dev_ctx, } template -void Copy(const Context& dev_ctx, - const TensorArray& src, - Place dst_place, - bool blocking, - TensorArray* dst) { +void Copy(const Context& dev_ctx UNUSED, + const TensorArray& src UNUSED, + Place dst_place UNUSED, + bool blocking UNUSED, + TensorArray* dst UNUSED) { // NOTE(Ruibiao): implements Copy() for TensorArray when needed. PADDLE_THROW(errors::Unimplemented("Copy for TensorArray is unimplemented.")); } diff --git a/paddle/phi/kernels/cpu/diag_grad_kernel.cc b/paddle/phi/kernels/cpu/diag_grad_kernel.cc index 13d3d679ff006..5a2f15d11428a 100644 --- a/paddle/phi/kernels/cpu/diag_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/diag_grad_kernel.cc @@ -23,7 +23,7 @@ namespace phi { template void DiagGradKernel(const Context& dev_ctx, - const DenseTensor& x, + const DenseTensor& x UNUSED, const DenseTensor& out_grad, int offset, DenseTensor* x_grad) { diff --git a/paddle/phi/kernels/cpu/fill_diagonal_grad_kernel.cc b/paddle/phi/kernels/cpu/fill_diagonal_grad_kernel.cc index 0fe9c50dc15e8..204c544e2d95f 100644 --- a/paddle/phi/kernels/cpu/fill_diagonal_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/fill_diagonal_grad_kernel.cc @@ -22,7 +22,7 @@ namespace phi { template void FillDiagonalGradKernel(const Context& ctx, const DenseTensor& out_grad, - float value, + float value UNUSED, int offset, bool wrap, DenseTensor* x_grad) { diff --git a/paddle/phi/kernels/cpu/gather_nd_grad_kernel.cc b/paddle/phi/kernels/cpu/gather_nd_grad_kernel.cc index 88a288afd318e..5aaec6f6139e5 100644 --- a/paddle/phi/kernels/cpu/gather_nd_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/gather_nd_grad_kernel.cc @@ -23,7 +23,7 @@ namespace phi { template void GatherNdGradKernel(const Context &ctx, - const DenseTensor &x, + const DenseTensor &x UNUSED, const DenseTensor &index, const DenseTensor &out_grad, DenseTensor *x_grad) { diff --git a/paddle/phi/kernels/cpu/index_add_grad_kernel.cc b/paddle/phi/kernels/cpu/index_add_grad_kernel.cc index 007d8927377f8..a60d52f2005a4 100644 --- a/paddle/phi/kernels/cpu/index_add_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/index_add_grad_kernel.cc @@ -23,7 +23,7 @@ namespace phi { template void IndexAddGradKernel(const Context& ctx, const DenseTensor& index, - const DenseTensor& add_value, + const DenseTensor& add_value UNUSED, const DenseTensor& out_grad, int axis, DenseTensor* x_grad, diff --git a/paddle/phi/kernels/cpu/index_select_grad_kernel.cc b/paddle/phi/kernels/cpu/index_select_grad_kernel.cc index cf8176687eab2..fc2e88b2f9906 100644 --- a/paddle/phi/kernels/cpu/index_select_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/index_select_grad_kernel.cc @@ -22,7 +22,7 @@ namespace phi { template void IndexSelectGradKernel(const Context& ctx, - const DenseTensor& x, + const DenseTensor& x UNUSED, const DenseTensor& index, const DenseTensor& out_grad, int dim, diff --git a/paddle/phi/kernels/cpu/mean_all_grad_kernel.cc b/paddle/phi/kernels/cpu/mean_all_grad_kernel.cc index 00321b010598f..81556643284f0 100644 --- a/paddle/phi/kernels/cpu/mean_all_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/mean_all_grad_kernel.cc @@ -21,7 +21,7 @@ namespace phi { template void MeanAllGradKernel(const Context& dev_ctx, - const DenseTensor& x, + const DenseTensor& x UNUSED, const DenseTensor& out_grad, DenseTensor* x_grad) { PADDLE_ENFORCE_EQ(out_grad.numel(), diff --git a/paddle/phi/kernels/cpu/put_along_axis_grad_kernel.cc b/paddle/phi/kernels/cpu/put_along_axis_grad_kernel.cc index 5dce970f32ef0..d44af05357a9a 100644 --- a/paddle/phi/kernels/cpu/put_along_axis_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/put_along_axis_grad_kernel.cc @@ -25,11 +25,11 @@ namespace phi { template void PutAlongAxisGradKernel(const Context& dev_ctx, - const DenseTensor& x, + const DenseTensor& x UNUSED, const DenseTensor& index, const DenseTensor& out_grad, int axis, - const std::string& reduce, + const std::string& reduce UNUSED, DenseTensor* x_grad, DenseTensor* value_grad) { PADDLE_ENFORCE_EQ( diff --git a/paddle/phi/kernels/cpu/scatter_nd_add_grad_kernel.cc b/paddle/phi/kernels/cpu/scatter_nd_add_grad_kernel.cc index 844e6370caf73..d8009bc85881c 100644 --- a/paddle/phi/kernels/cpu/scatter_nd_add_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/scatter_nd_add_grad_kernel.cc @@ -24,7 +24,7 @@ namespace phi { template void ScatterNdAddGradKernel(const Context &ctx, const DenseTensor &index, - const DenseTensor &updates, + const DenseTensor &updates UNUSED, const DenseTensor &out_grad, DenseTensor *x_grad, DenseTensor *updates_grad) { diff --git a/paddle/phi/kernels/impl/elementwise_grad_kernel_impl.h b/paddle/phi/kernels/impl/elementwise_grad_kernel_impl.h index 15f99a58fa5a5..e4a29716e7db4 100644 --- a/paddle/phi/kernels/impl/elementwise_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/elementwise_grad_kernel_impl.h @@ -362,13 +362,18 @@ void ElementwiseFMinGradKernel(const Context& dev_ctx, template struct MulGradDX { - HOSTDEVICE T operator()(T x, T y, T out, T dout) const { return dout * y; } + HOSTDEVICE T operator()(T x UNUSED, T y, T out UNUSED, T dout) const { + return dout * y; + } }; // avoid [-Wint-in-bool-context] warning template <> struct MulGradDX { - HOSTDEVICE bool operator()(bool x, bool y, bool out, bool dout) const { + HOSTDEVICE bool operator()(bool x UNUSED, + bool y, + bool out UNUSED, + bool dout) const { return dout && y; } }; @@ -393,13 +398,18 @@ struct MulGradDX> { template struct MulGradDY { - HOSTDEVICE T operator()(T x, T y, T out, T dout) const { return dout * x; } + HOSTDEVICE T operator()(T x, T y UNUSED, T out UNUSED, T dout) const { + return dout * x; + } }; // avoid [-Wint-in-bool-context] warning template <> struct MulGradDY { - HOSTDEVICE bool operator()(bool x, bool y, bool out, bool dout) const { + HOSTDEVICE bool operator()(bool x, + bool y UNUSED, + bool out UNUSED, + bool dout) const { return dout && x; } }; @@ -824,14 +834,14 @@ void MultiplyTripleGradKernel(const Context& dev_ctx, template struct MaxGradDx { - HOSTDEVICE T operator()(T x, T y, T out, T dout) const { + HOSTDEVICE T operator()(T x, T y, T out UNUSED, T dout) const { return dout * static_cast(x > y); } }; template struct MaxGradDy { - HOSTDEVICE T operator()(T x, T y, T out, T dout) const { + HOSTDEVICE T operator()(T x, T y, T out UNUSED, T dout) const { return dout * static_cast(x <= y); } }; @@ -843,14 +853,14 @@ struct MaxGradDy { */ template struct MinGradDx { - HOSTDEVICE T operator()(T x, T y, T out, T dout) const { + HOSTDEVICE T operator()(T x, T y, T out UNUSED, T dout) const { return dout * static_cast(x < y); } }; template struct MinGradDy { - HOSTDEVICE T operator()(T x, T y, T out, T dout) const { + HOSTDEVICE T operator()(T x, T y, T out UNUSED, T dout) const { return dout * static_cast(x >= y); } }; @@ -922,14 +932,14 @@ compute_pow_grad_dy(T x, T y, T out, T dout) { } #else template -HOSTDEVICE T compute_pow_grad_dx(T x, T y, T out, T dout) { +HOSTDEVICE T compute_pow_grad_dx(T x, T y, T out UNUSED, T dout) { MPType x_val = static_cast(x); MPType y_val = static_cast(y); return static_cast(static_cast(dout) * y_val * std::pow(x_val, y_val - 1)); } template -HOSTDEVICE T compute_pow_grad_dy(T x, T y, T out, T dout) { +HOSTDEVICE T compute_pow_grad_dy(T x, T y, T out UNUSED, T dout) { MPType x_val = static_cast(x); MPType y_val = static_cast(y); return static_cast(static_cast(dout) * std::log(x_val) * diff --git a/paddle/phi/kernels/impl/eye_kernel_impl.h b/paddle/phi/kernels/impl/eye_kernel_impl.h index 2d373f99a277a..c93ac260dd874 100644 --- a/paddle/phi/kernels/impl/eye_kernel_impl.h +++ b/paddle/phi/kernels/impl/eye_kernel_impl.h @@ -37,7 +37,7 @@ template void EyeKernel(const Context& ctx, const Scalar& num_rows, const Scalar& num_columns, - DataType dtype, + DataType dtype UNUSED, DenseTensor* out) { auto columns = num_columns.to(); auto rows = num_rows.to(); diff --git a/paddle/phi/kernels/impl/fill_kernel_impl.h b/paddle/phi/kernels/impl/fill_kernel_impl.h index 7d10ea42bd6b6..6894204cd06a4 100644 --- a/paddle/phi/kernels/impl/fill_kernel_impl.h +++ b/paddle/phi/kernels/impl/fill_kernel_impl.h @@ -24,7 +24,7 @@ namespace phi { template void FillKernel(const Context& dev_ctx, - const DenseTensor& x, + const DenseTensor& x UNUSED, const Scalar& value, DenseTensor* out) { T fill_var = value.to(); diff --git a/paddle/phi/kernels/onednn/interpolate_kernel.cc b/paddle/phi/kernels/onednn/interpolate_kernel.cc index b9a8672d792ce..508cdcc329a2e 100644 --- a/paddle/phi/kernels/onednn/interpolate_kernel.cc +++ b/paddle/phi/kernels/onednn/interpolate_kernel.cc @@ -239,8 +239,8 @@ void NearestInterpKernel( int out_w, const std::vector& scale, const std::string& interp_method, - bool align_corners, - int align_mode, + bool align_corners UNUSED, + int align_mode UNUSED, DenseTensor* output) { InterpolateKernel(ctx, x, diff --git a/paddle/phi/kernels/unsqueeze_kernel.h b/paddle/phi/kernels/unsqueeze_kernel.h index bb190e14179c0..e6ad4dc75d3d5 100644 --- a/paddle/phi/kernels/unsqueeze_kernel.h +++ b/paddle/phi/kernels/unsqueeze_kernel.h @@ -39,7 +39,7 @@ void Unsqueeze(const Context& dev_ctx, const DenseTensor& x, const IntArray& axes, DenseTensor* out, - DenseTensor* xshape) { + DenseTensor* xshape UNUSED) { MetaTensor meta_out(out); UnsqueezeInferMeta(x, axes, &meta_out); UnsqueezeInferKernel(dev_ctx, x, axes, out);