Skip to content

Commit

Permalink
[cherry-pick] rename WITH_INFERENCE_NVTX to WITH_NVTX and fix compile…
Browse files Browse the repository at this point in the history
… error (#55220)
  • Loading branch information
yuanlehome authored Jul 7, 2023
1 parent 32bbf74 commit 2817610
Show file tree
Hide file tree
Showing 5 changed files with 32 additions and 42 deletions.
6 changes: 5 additions & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -269,7 +269,7 @@ option(WITH_PSCORE "Compile with parameter server support" ${WITH_DISTRIBUTE})
option(WITH_HETERPS "Compile with heterps" OFF)
option(WITH_INFERENCE_API_TEST
"Test fluid inference C++ high-level api interface" OFF)
option(WITH_INFERENCE_NVTX "Paddle inference with nvtx for profiler" OFF)
option(WITH_NVTX "Paddle with nvtx for profiler" OFF)
option(PY_VERSION "Compile PaddlePaddle with python3 support" ${PY_VERSION})
option(WITH_DGC "Use DGC(Deep Gradient Compression) or not" ${WITH_DISTRIBUTE})
option(
Expand Down Expand Up @@ -623,6 +623,10 @@ if(WITH_MIPS)
add_definitions(-DPADDLE_WITH_MIPS)
endif()

if(WITH_NVTX AND NOT WIN32)
add_definitions(-DPADDLE_WITH_NVTX)
endif()

if(WITH_LOONGARCH)
set(WITH_XBYAK
OFF
Expand Down
4 changes: 0 additions & 4 deletions cmake/inference_lib.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -366,10 +366,6 @@ else()
)
endif()

if(WITH_INFERENCE_NVTX AND NOT WIN32)
add_definitions(-DPADDLE_WITH_INFERENCE_NVTX)
endif()

copy(
inference_lib_dist
SRCS ${src_dir}/inference/capi_exp/pd_*.h ${paddle_inference_c_lib}
Expand Down
50 changes: 22 additions & 28 deletions paddle/fluid/framework/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -661,37 +661,31 @@ cc_library(
SRCS variable_helper.cc
DEPS lod_tensor)

set(NAIVE_EXECUTOR_DEPS
op_registry
denormal
device_context
scope
framework_proto
glog
lod_rank_table
feed_fetch_method
graph_to_program_pass
variable_helper)

if(TENSORRT_FOUND)
cc_library(
naive_executor
SRCS naive_executor.cc
DEPS op_registry
denormal
device_context
scope
framework_proto
glog
lod_rank_table
feed_fetch_method
graph_to_program_pass
variable_helper
tensorrt_engine_op)
else()
cc_library(
naive_executor
SRCS naive_executor.cc
DEPS op_registry
denormal
device_context
scope
framework_proto
glog
lod_rank_table
feed_fetch_method
graph_to_program_pass
variable_helper)
set(NAIVE_EXECUTOR_DEPS ${NAIVE_EXECUTOR_DEPS} tensorrt_engine_op)
endif()

if(WITH_NVTX AND NOT WIN32)
set(NAIVE_EXECUTOR_DEPS ${NAIVE_EXECUTOR_DEPS} cuda_profiler)
endif()

cc_library(
naive_executor
SRCS naive_executor.cc
DEPS ${NAIVE_EXECUTOR_DEPS})

cc_library(
executor_gc_helper
SRCS executor_gc_helper.cc
Expand Down
10 changes: 5 additions & 5 deletions paddle/fluid/framework/naive_executor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
#ifdef PADDLE_WITH_TENSORRT
#include "paddle/fluid/operators/tensorrt/tensorrt_engine_op.h"
#endif
#ifdef PADDLE_WITH_INFERENCE_NVTX
#ifdef PADDLE_WITH_NVTX
#include "paddle/fluid/platform/device/gpu/cuda/cuda_profiler.h"
#endif

Expand All @@ -54,14 +54,14 @@ void NaiveExecutor::Run() {
platform::RegisterModelLayout(ops_, place_);
#endif
platform::ScopedFlushDenormal flush;
#ifdef PADDLE_WITH_INFERENCE_NVTX
#ifdef PADDLE_WITH_NVTX
platform::CudaNvtxRangePush("model", platform::NvtxRangeColor::Yellow);
#endif
for (auto &op : ops_) {
VLOG(4) << std::this_thread::get_id() << " run "
<< op->DebugStringEx(scope_) << " on scope " << scope_;
op->SetIsCalledByExecutor(false);
#ifdef PADDLE_WITH_INFERENCE_NVTX
#ifdef PADDLE_WITH_NVTX
platform::CudaNvtxRangePush(op->Type() + "|" + op->OutputVars(true).front(),
platform::NvtxRangeColor::Green);
#endif
Expand Down Expand Up @@ -98,14 +98,14 @@ void NaiveExecutor::Run() {
}
}

#ifdef PADDLE_WITH_INFERENCE_NVTX
#ifdef PADDLE_WITH_NVTX
platform::CudaNvtxRangePop();
#endif
for (auto &func : hookfuncs_) {
func(op.get(), scope_);
}
}
#ifdef PADDLE_WITH_INFERENCE_NVTX
#ifdef PADDLE_WITH_NVTX
platform::CudaNvtxRangePop();
#endif
}
Expand Down
4 changes: 0 additions & 4 deletions paddle/fluid/inference/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -107,10 +107,6 @@ if(WITH_PSCORE)
set(SHARED_INFERENCE_DEPS ${SHARED_INFERENCE_DEPS} fleet ps_service)
endif()

if(WITH_INFERENCE_NVTX AND NOT WIN32)
set(SHARED_INFERENCE_DEPS ${SHARED_INFERENCE_DEPS} cuda_profiler)
endif()

if(WITH_ONNXRUNTIME)
set(SHARED_INFERENCE_SRCS
${SHARED_INFERENCE_SRCS}
Expand Down

0 comments on commit 2817610

Please sign in to comment.