From 9e322bb730418e6d881a4eb33bdea3651ddd498c Mon Sep 17 00:00:00 2001 From: Vlad Date: Sat, 23 Mar 2024 20:45:34 +0000 Subject: [PATCH] merge conflicts #2 --- src/kbmod/search/stack_search.cpp | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) diff --git a/src/kbmod/search/stack_search.cpp b/src/kbmod/search/stack_search.cpp index dc998cf12..198170f8f 100644 --- a/src/kbmod/search/stack_search.cpp +++ b/src/kbmod/search/stack_search.cpp @@ -15,12 +15,6 @@ extern "C" void evaluateTrajectory(PsiPhiArrayMeta psi_phi_meta, void* psi_phi_v // I'd imaging... auto rs_logger = logging::getLogger("kbmod.search.run_search"); -// This logger is often used in this module so we might as well declare it -// global, but this would generally be a one-liner like: -// logging::getLogger("kbmod.search.run_search") -> level(msg) -// I'd imaging... -auto rs_logger = logging::getLogger("kbmod.search.run_search"); - StackSearch::StackSearch(ImageStack& imstack) : stack(imstack), results(0), gpu_search_list(0) { debug_info = false; psi_phi_generated = false; @@ -167,7 +161,7 @@ void StackSearch::finish_search(){ } void StackSearch::prepare_batch_search(std::vector& search_list, int min_observations){ - DebugTimer psi_phi_timer = DebugTimer("Creating psi/phi buffers", debug_info); + DebugTimer psi_phi_timer = DebugTimer("Creating psi/phi buffers", rs_logger); prepare_psi_phi(); psi_phi_array.move_to_gpu(); psi_phi_timer.stop(); @@ -175,8 +169,8 @@ void StackSearch::prepare_batch_search(std::vector& search_list, int int num_to_search = search_list.size(); if (debug_info) std::cout << "Preparing to search " << num_to_search << " trajectories... \n" << std::flush; - gpu_search_list = TrajectoryList(search_list); - gpu_search_list.move_to_gpu(); + // gpu_search_list = TrajectoryList(search_list); + // gpu_search_list.move_to_gpu(); params.min_observations = min_observations; } @@ -244,7 +238,7 @@ std::vector StackSearch::search_batch(){ throw std::runtime_error("PsiPhiArray array not allocated on GPU. Did you forget to call prepare_search?"); } - DebugTimer core_timer = DebugTimer("Running batch search", debug_info); + DebugTimer core_timer = DebugTimer("Running batch search", rs_logger); // Allocate a vector for the results and move it onto the GPU. int search_width = params.x_start_max - params.x_start_min; int search_height = params.y_start_max - params.y_start_min; @@ -256,11 +250,11 @@ std::vector StackSearch::search_batch(){ << " Y=[" << params.y_start_min << ", " << params.y_start_max << "]\n"; std::cout << "Allocating space for " << max_results << " results.\n"; } - results = TrajectoryList(max_results); + results.resize(max_results); results.move_to_gpu(); // Do the actual search on the GPU. - DebugTimer search_timer = DebugTimer("Running search", debug_info); + DebugTimer search_timer = DebugTimer("Running search", rs_logger); #ifdef HAVE_CUDA deviceSearchFilter(psi_phi_array, params, gpu_search_list, results); #else @@ -269,7 +263,7 @@ std::vector StackSearch::search_batch(){ search_timer.stop(); results.move_to_cpu(); - DebugTimer sort_timer = DebugTimer("Sorting results", debug_info); + DebugTimer sort_timer = DebugTimer("Sorting results", rs_logger); results.sort_by_likelihood(); sort_timer.stop(); core_timer.stop();