Skip to content

Commit

Permalink
Add RConfigure preprocessor flag for pymva and rmva (R__HAS_PYMVA) an…
Browse files Browse the repository at this point in the history
…d (R__HAS_RMVA) (root-project#5630)

* Add preprocessor flag in RConfigure.h for pymva and rmva

* Enable RMVA when R is found and tmva is on

* Update CNN and RNN tutorial to work also when ROOT is built without Pymva
  • Loading branch information
lmoneta authored May 18, 2020
1 parent 9daf978 commit 83502c8
Show file tree
Hide file tree
Showing 5 changed files with 45 additions and 7 deletions.
10 changes: 10 additions & 0 deletions cmake/modules/RootConfiguration.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -486,6 +486,16 @@ if (tmva-cudnn)
else()
set(hastmvacudnn undef)
endif()
if (tmva-pymva)
set(haspymva define)
else()
set(haspymva undef)
endif()
if (tmva-rmva)
set(hasrmva define)
else()
set(hasrmva undef)
endif()

# clear cache to allow reconfiguring
# with a different CMAKE_CXX_STANDARD
Expand Down
4 changes: 4 additions & 0 deletions cmake/modules/SearchInstalledSoftware.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -1519,6 +1519,10 @@ if(tmva)
set(tmva-pymva OFF CACHE BOOL "Disabled because Numpy or Python development package were not found (${tmva-pymva_description})" FORCE)
endif()
endif()
if (R_FOUND)
#Rmva is enable when r is found and tmva is on
set(tmva-rmva ON)
endif()
if(tmva-rmva AND NOT R_FOUND)
set(tmva-rmva OFF CACHE BOOL "Disabled because R was not found (${tmva-rmva_description})" FORCE)
endif()
Expand Down
2 changes: 2 additions & 0 deletions config/RConfigure.in
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,8 @@
#@hastmvacpu@ R__HAS_TMVACPU /**/
#@hastmvagpu@ R__HAS_TMVAGPU /**/
#@hastmvacudnn@ R__HAS_CUDNN /**/
#@haspymva@ R__HAS_PYMVA /**/
#@hasrmva@ R__HAS_RMVA /**/


#endif
19 changes: 15 additions & 4 deletions tutorials/tmva/TMVA_CNN_Classification.C
Original file line number Diff line number Diff line change
Expand Up @@ -125,16 +125,27 @@ void TMVA_CNN_Classification(std::vector<bool> opt = {1, 1, 1, 1})

bool writeOutputFile = true;

int num_threads = 0; // use default threads

TMVA::Tools::Instance();

// do enable MT running
ROOT::EnableImplicitMT();
if (num_threads >= 0) {
ROOT::EnableImplicitMT(num_threads);
if (num_threads > 0) gSystem->Setenv("OMP_NUM_THREADS", TString::Format("%d",num_threads));
}
else
gSystem->Setenv("OMP_NUM_THREADS", "1");

// for using Keras
std::cout << "Running with nthreads = " << ROOT::GetThreadPoolSize() << std::endl;

#ifdef R__HAS_PYMVA
gSystem->Setenv("KERAS_BACKEND", "tensorflow");
// for setting openblas in single thread on SWAN
gSystem->Setenv("OMP_NUM_THREADS", "1");
// for using Keras
TMVA::PyMethodBase::PyInitialize();
#else
useKerasCNN = false;
#endif

TFile *outputFile = nullptr;
if (writeOutputFile)
Expand Down
17 changes: 14 additions & 3 deletions tutorials/tmva/TMVA_RNN_Classification.C
Original file line number Diff line number Diff line change
Expand Up @@ -184,15 +184,26 @@ void TMVA_RNN_Classification(int use_type = 1)

const char *rnn_type = "RNN";

#ifdef R__HAS_PYMVA
TMVA::PyMethodBase::PyInitialize();
#else
useKeras = false;
#endif

int num_threads = 0; // use by default all threads
// do enable MT running
if (num_threads >= 0) {
ROOT::EnableImplicitMT(num_threads);
if (num_threads > 0) gSystem->Setenv("OMP_NUM_THREADS", TString::Format("%d",num_threads));
}
else
gSystem->Setenv("OMP_NUM_THREADS", "1");

ROOT::EnableImplicitMT();
TMVA::Config::Instance();

std::cout << "nthreads = " << ROOT::GetThreadPoolSize() << std::endl;
std::cout << "Running with nthreads = " << ROOT::GetThreadPoolSize() << std::endl;

TString inputFileName = "time_data_t10_d30.root";
// TString inputFileName = "/home/moneta/data/sample_images_32x32.gsoc.root";

bool fileExist = !gSystem->AccessPathName(inputFileName);

Expand Down

0 comments on commit 83502c8

Please sign in to comment.