Skip to content

Commit

Permalink
merge conflicts #2
Browse files Browse the repository at this point in the history
  • Loading branch information
vlnistor committed Mar 23, 2024
1 parent d5bde4e commit 9e322bb
Showing 1 changed file with 7 additions and 13 deletions.
20 changes: 7 additions & 13 deletions src/kbmod/search/stack_search.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,6 @@ extern "C" void evaluateTrajectory(PsiPhiArrayMeta psi_phi_meta, void* psi_phi_v
// I'd imaging...
auto rs_logger = logging::getLogger("kbmod.search.run_search");

// This logger is often used in this module so we might as well declare it
// global, but this would generally be a one-liner like:
// logging::getLogger("kbmod.search.run_search") -> level(msg)
// I'd imaging...
auto rs_logger = logging::getLogger("kbmod.search.run_search");

StackSearch::StackSearch(ImageStack& imstack) : stack(imstack), results(0), gpu_search_list(0) {
debug_info = false;
psi_phi_generated = false;
Expand Down Expand Up @@ -167,16 +161,16 @@ void StackSearch::finish_search(){
}

void StackSearch::prepare_batch_search(std::vector<Trajectory>& search_list, int min_observations){
DebugTimer psi_phi_timer = DebugTimer("Creating psi/phi buffers", debug_info);
DebugTimer psi_phi_timer = DebugTimer("Creating psi/phi buffers", rs_logger);
prepare_psi_phi();
psi_phi_array.move_to_gpu();
psi_phi_timer.stop();


int num_to_search = search_list.size();
if (debug_info) std::cout << "Preparing to search " << num_to_search << " trajectories... \n" << std::flush;
gpu_search_list = TrajectoryList(search_list);
gpu_search_list.move_to_gpu();
// gpu_search_list = TrajectoryList(search_list);
// gpu_search_list.move_to_gpu();

params.min_observations = min_observations;
}
Expand Down Expand Up @@ -244,7 +238,7 @@ std::vector<Trajectory> StackSearch::search_batch(){
throw std::runtime_error("PsiPhiArray array not allocated on GPU. Did you forget to call prepare_search?");
}

DebugTimer core_timer = DebugTimer("Running batch search", debug_info);
DebugTimer core_timer = DebugTimer("Running batch search", rs_logger);
// Allocate a vector for the results and move it onto the GPU.
int search_width = params.x_start_max - params.x_start_min;
int search_height = params.y_start_max - params.y_start_min;
Expand All @@ -256,11 +250,11 @@ std::vector<Trajectory> StackSearch::search_batch(){
<< " Y=[" << params.y_start_min << ", " << params.y_start_max << "]\n";
std::cout << "Allocating space for " << max_results << " results.\n";
}
results = TrajectoryList(max_results);
results.resize(max_results);
results.move_to_gpu();

// Do the actual search on the GPU.
DebugTimer search_timer = DebugTimer("Running search", debug_info);
DebugTimer search_timer = DebugTimer("Running search", rs_logger);
#ifdef HAVE_CUDA
deviceSearchFilter(psi_phi_array, params, gpu_search_list, results);
#else
Expand All @@ -269,7 +263,7 @@ std::vector<Trajectory> StackSearch::search_batch(){
search_timer.stop();

results.move_to_cpu();
DebugTimer sort_timer = DebugTimer("Sorting results", debug_info);
DebugTimer sort_timer = DebugTimer("Sorting results", rs_logger);
results.sort_by_likelihood();
sort_timer.stop();
core_timer.stop();
Expand Down

0 comments on commit 9e322bb

Please sign in to comment.