diff --git a/RecoTracker/LSTCore/interface/Constants.h b/RecoTracker/LSTCore/interface/Constants.h index 8fe8d99aa1b29..6280ea8670540 100644 --- a/RecoTracker/LSTCore/interface/Constants.h +++ b/RecoTracker/LSTCore/interface/Constants.h @@ -111,6 +111,7 @@ namespace lst { using ArrayUxHits = edm::StdArray; }; + using ArrayIx2 = edm::StdArray; using ArrayUx2 = edm::StdArray; } //namespace lst diff --git a/RecoTracker/LSTCore/interface/ObjectRangesSoA.h b/RecoTracker/LSTCore/interface/ObjectRangesSoA.h new file mode 100644 index 0000000000000..c0a2681859467 --- /dev/null +++ b/RecoTracker/LSTCore/interface/ObjectRangesSoA.h @@ -0,0 +1,50 @@ +#ifndef RecoTracker_LSTCore_interface_ObjectRangesSoA_h +#define RecoTracker_LSTCore_interface_ObjectRangesSoA_h + +#include "DataFormats/SoATemplate/interface/SoALayout.h" +#include "DataFormats/Portable/interface/PortableCollection.h" + +namespace lst { + + GENERATE_SOA_LAYOUT(ObjectRangesSoALayout, + SOA_COLUMN(ArrayIx2, hitRanges), + SOA_COLUMN(int, hitRangesLower), + SOA_COLUMN(int, hitRangesUpper), + SOA_COLUMN(int8_t, hitRangesnLower), + SOA_COLUMN(int8_t, hitRangesnUpper), + SOA_COLUMN(ArrayIx2, mdRanges), + SOA_COLUMN(ArrayIx2, segmentRanges), + SOA_COLUMN(ArrayIx2, trackletRanges), + SOA_COLUMN(ArrayIx2, tripletRanges), + SOA_COLUMN(ArrayIx2, quintupletRanges)) + + // triplets and quintuplets end up with an ununsed pixel entry at the end + GENERATE_SOA_LAYOUT(ObjectOccupancySoALayout, + SOA_COLUMN(int, miniDoubletModuleIndices), + SOA_COLUMN(int, miniDoubletModuleOccupancy), + SOA_COLUMN(int, segmentModuleIndices), + SOA_COLUMN(int, segmentModuleOccupancy), + SOA_COLUMN(int, tripletModuleIndices), + SOA_COLUMN(int, tripletModuleOccupancy), + SOA_COLUMN(int, quintupletModuleIndices), + SOA_COLUMN(int, quintupletModuleOccupancy), + SOA_COLUMN(uint16_t, indicesOfEligibleT5Modules), + SOA_SCALAR(unsigned int, nTotalMDs), + SOA_SCALAR(unsigned int, nTotalSegs), + SOA_SCALAR(unsigned int, nTotalTrips), + SOA_SCALAR(unsigned int, nTotalQuints), + SOA_SCALAR(uint16_t, nEligibleT5Modules)) + + using ObjectRangesSoA = ObjectRangesSoALayout<>; + using ObjectOccupancySoA = ObjectOccupancySoALayout<>; + + using ObjectRanges = ObjectRangesSoA::View; + using ObjectRangesConst = ObjectRangesSoA::ConstView; + using ObjectOccupancy = ObjectOccupancySoA::View; + using ObjectOccupancyConst = ObjectOccupancySoA::ConstView; + + using ObjectRangesHostCollection = PortableHostMultiCollection; + +} // namespace lst + +#endif diff --git a/RecoTracker/LSTCore/interface/alpaka/ObjectRangesDeviceCollection.h b/RecoTracker/LSTCore/interface/alpaka/ObjectRangesDeviceCollection.h new file mode 100644 index 0000000000000..f923ed13827eb --- /dev/null +++ b/RecoTracker/LSTCore/interface/alpaka/ObjectRangesDeviceCollection.h @@ -0,0 +1,12 @@ +#ifndef RecoTracker_LSTCore_interface_alpaka_ObjectRangesDeviceCollection_h +#define RecoTracker_LSTCore_interface_alpaka_ObjectRangesDeviceCollection_h + +#include "DataFormats/Portable/interface/alpaka/PortableCollection.h" + +#include "RecoTracker/LSTCore/interface/ObjectRangesSoA.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { + using ObjectRangesDeviceCollection = PortableCollection2; +} // namespace ALPAKA_ACCELERATOR_NAMESPACE::lst + +#endif diff --git a/RecoTracker/LSTCore/src/alpaka/Event.dev.cc b/RecoTracker/LSTCore/src/alpaka/Event.dev.cc index 09f2b2ed8c9ee..0f76fdb2de153 100644 --- a/RecoTracker/LSTCore/src/alpaka/Event.dev.cc +++ b/RecoTracker/LSTCore/src/alpaka/Event.dev.cc @@ -54,8 +54,7 @@ void Event::resetEventSync() { hitsInGPU_.reset(); hitsBuffers_.reset(); miniDoubletsDC_.reset(); - rangesInGPU_.reset(); - rangesBuffers_.reset(); + rangesDC_.reset(); segmentsDC_.reset(); tripletsInGPU_.reset(); tripletsBuffers_.reset(); @@ -68,7 +67,7 @@ void Event::resetEventSync() { pixelQuintupletsBuffers_.reset(); hitsInCPU_.reset(); - rangesInCPU_.reset(); + rangesHC_.reset(); miniDoubletsHC_.reset(); segmentsHC_.reset(); tripletsInCPU_.reset(); @@ -94,10 +93,11 @@ void Event::addHitToEvent(std::vector const& x, hitsInGPU_->setData(*hitsBuffers_); } - if (!rangesInGPU_) { - rangesInGPU_.emplace(); - rangesBuffers_.emplace(nModules_, nLowerModules_, devAcc_, queue_); - rangesInGPU_->setData(*rangesBuffers_); + if (!rangesDC_) { + std::array const ranges_sizes{{static_cast(nModules_), static_cast(nLowerModules_ + 1)}}; + rangesDC_.emplace(ranges_sizes, queue_); + auto buf = rangesDC_->buffer(); + alpaka::memset(queue_, buf, 0xff); } // Need a view here before transferring to the device. @@ -170,9 +170,12 @@ void Event::addPixelSegmentToEvent(std::vector const& hitIndices0, uint16_t pixelModuleIndex = pixelMapping_.pixelModuleIndex; if (!miniDoubletsDC_) { - // Create a view for the element nLowerModules_ inside rangesBuffers_->miniDoubletModuleOccupancy + // Create a view for the element nLowerModules_ inside rangesOccupancy->miniDoubletModuleOccupancy + auto rangesOccupancy = rangesDC_->view(); + auto miniDoubletModuleOccupancy_view = alpaka::createView( + devAcc_, rangesOccupancy.miniDoubletModuleOccupancy(), (Idx)rangesOccupancy.metadata().size()); auto dst_view_miniDoubletModuleOccupancy = - alpaka::createSubView(rangesBuffers_->miniDoubletModuleOccupancy_buf, (Idx)1u, (Idx)nLowerModules_); + alpaka::createSubView(miniDoubletModuleOccupancy_view, (Idx)1u, (Idx)nLowerModules_); // Create a host buffer for a value to be passed to the device auto pixelMaxMDs_buf_h = cms::alpakatools::make_host_buffer(queue_, (Idx)1u); @@ -182,11 +185,15 @@ void Event::addPixelSegmentToEvent(std::vector const& hitIndices0, WorkDiv1D const createMDArrayRangesGPU_workDiv = createWorkDiv({1}, {1024}, {1}); - alpaka::exec( - queue_, createMDArrayRangesGPU_workDiv, CreateMDArrayRangesGPU{}, *modulesBuffers_.data(), *rangesInGPU_); + alpaka::exec(queue_, + createMDArrayRangesGPU_workDiv, + CreateMDArrayRangesGPU{}, + *modulesBuffers_.data(), + rangesDC_->view()); auto nTotalMDs_buf_h = cms::alpakatools::make_host_buffer(queue_, (Idx)1u); - alpaka::memcpy(queue_, nTotalMDs_buf_h, rangesBuffers_->device_nTotalMDs_buf); + auto nTotalMDs_buf_d = alpaka::createView(devAcc_, &rangesOccupancy.nTotalMDs(), (Idx)1u); + alpaka::memcpy(queue_, nTotalMDs_buf_h, nTotalMDs_buf_d); alpaka::wait(queue_); // wait to get the data before manipulation *nTotalMDs_buf_h.data() += n_max_pixel_md_per_modules; @@ -212,12 +219,13 @@ void Event::addPixelSegmentToEvent(std::vector const& hitIndices0, createSegmentArrayRanges_workDiv, CreateSegmentArrayRanges{}, *modulesBuffers_.data(), - *rangesInGPU_, + rangesDC_->view(), miniDoubletsDC_->const_view()); - auto nTotalSegments_view = alpaka::createView(cms::alpakatools::host(), &nTotalSegments_, (Idx)1u); - - alpaka::memcpy(queue_, nTotalSegments_view, rangesBuffers_->device_nTotalSegs_buf); + auto rangesOccupancy = rangesDC_->view(); + auto nTotalSegments_view_h = alpaka::createView(cms::alpakatools::host(), &nTotalSegments_, (Idx)1u); + auto nTotalSegments_view_d = alpaka::createView(devAcc_, &rangesOccupancy.nTotalSegs(), (Idx)1u); + alpaka::memcpy(queue_, nTotalSegments_view_h, nTotalSegments_view_d); alpaka::wait(queue_); // wait to get the value before manipulation nTotalSegments_ += n_max_pixel_segments_per_module; @@ -298,7 +306,7 @@ void Event::addPixelSegmentToEvent(std::vector const& hitIndices0, addPixelSegmentToEvent_workdiv, AddPixelSegmentToEventKernel{}, *modulesBuffers_.data(), - *rangesInGPU_, + rangesDC_->const_view(), *hitsInGPU_, miniDoubletsDC_->view(), segmentsDC_->view(), @@ -313,9 +321,12 @@ void Event::addPixelSegmentToEvent(std::vector const& hitIndices0, } void Event::createMiniDoublets() { - // Create a view for the element nLowerModules_ inside rangesBuffers_->miniDoubletModuleOccupancy + // Create a view for the element nLowerModules_ inside rangesOccupancy->miniDoubletModuleOccupancy + auto rangesOccupancy = rangesDC_->view(); + auto miniDoubletModuleOccupancy_view = + alpaka::createView(devAcc_, rangesOccupancy.miniDoubletModuleOccupancy(), (Idx)rangesOccupancy.metadata().size()); auto dst_view_miniDoubletModuleOccupancy = - alpaka::createSubView(rangesBuffers_->miniDoubletModuleOccupancy_buf, (Idx)1u, (Idx)nLowerModules_); + alpaka::createSubView(miniDoubletModuleOccupancy_view, (Idx)1u, (Idx)nLowerModules_); // Create a host buffer for a value to be passed to the device auto pixelMaxMDs_buf_h = cms::alpakatools::make_host_buffer(queue_, (Idx)1u); @@ -325,11 +336,15 @@ void Event::createMiniDoublets() { WorkDiv1D const createMDArrayRangesGPU_workDiv = createWorkDiv({1}, {1024}, {1}); - alpaka::exec( - queue_, createMDArrayRangesGPU_workDiv, CreateMDArrayRangesGPU{}, *modulesBuffers_.data(), *rangesInGPU_); + alpaka::exec(queue_, + createMDArrayRangesGPU_workDiv, + CreateMDArrayRangesGPU{}, + *modulesBuffers_.data(), + rangesDC_->view()); auto nTotalMDs_buf_h = cms::alpakatools::make_host_buffer(queue_, (Idx)1u); - alpaka::memcpy(queue_, nTotalMDs_buf_h, rangesBuffers_->device_nTotalMDs_buf); + auto nTotalMDs_buf_d = alpaka::createView(devAcc_, &rangesOccupancy.nTotalMDs(), (Idx)1u); + alpaka::memcpy(queue_, nTotalMDs_buf_h, nTotalMDs_buf_d); alpaka::wait(queue_); // wait to get the data before manipulation *nTotalMDs_buf_h.data() += n_max_pixel_md_per_modules; @@ -359,7 +374,7 @@ void Event::createMiniDoublets() { *hitsInGPU_, miniDoubletsDC_->view(), miniDoubletsDC_->view(), - *rangesInGPU_); + rangesDC_->const_view()); WorkDiv1D const addMiniDoubletRangesToEventExplicit_workDiv = createWorkDiv({1}, {1024}, {1}); @@ -368,7 +383,8 @@ void Event::createMiniDoublets() { AddMiniDoubletRangesToEventExplicit{}, *modulesBuffers_.data(), miniDoubletsDC_->view(), - *rangesInGPU_, + rangesDC_->view(), + rangesDC_->const_view(), *hitsInGPU_); if (addObjects_) { @@ -405,7 +421,8 @@ void Event::createSegmentsWithModuleMap() { miniDoubletsDC_->const_view(), segmentsDC_->view(), segmentsDC_->view(), - *rangesInGPU_); + rangesDC_->const_view(), + rangesDC_->const_view()); WorkDiv1D const addSegmentRangesToEventExplicit_workDiv = createWorkDiv({1}, {1024}, {1}); @@ -414,7 +431,8 @@ void Event::createSegmentsWithModuleMap() { AddSegmentRangesToEventExplicit{}, *modulesBuffers_.data(), segmentsDC_->view(), - *rangesInGPU_); + rangesDC_->view(), + rangesDC_->const_view()); if (addObjects_) { addSegmentsToEventExplicit(); @@ -429,13 +447,14 @@ void Event::createTriplets() { createTripletArrayRanges_workDiv, CreateTripletArrayRanges{}, *modulesBuffers_.data(), - *rangesInGPU_, + rangesDC_->view(), segmentsDC_->const_view()); // TODO: Why are we pulling this back down only to put it back on the device in a new struct? + auto rangesOccupancy = rangesDC_->view(); auto maxTriplets_buf_h = cms::alpakatools::make_host_buffer(queue_, (Idx)1u); - - alpaka::memcpy(queue_, maxTriplets_buf_h, rangesBuffers_->device_nTotalTrips_buf); + auto maxTriplets_buf_d = alpaka::createView(devAcc_, &rangesOccupancy.nTotalTrips(), (Idx)1u); + alpaka::memcpy(queue_, maxTriplets_buf_h, maxTriplets_buf_d); alpaka::wait(queue_); // wait to get the value before using it tripletsInGPU_.emplace(); @@ -495,7 +514,8 @@ void Event::createTriplets() { segmentsDC_->const_view(), segmentsDC_->const_view(), *tripletsInGPU_, - *rangesInGPU_, + rangesDC_->const_view(), + rangesDC_->const_view(), index_gpu_buf.data(), nonZeroModules); @@ -506,7 +526,8 @@ void Event::createTriplets() { AddTripletRangesToEventExplicit{}, *modulesBuffers_.data(), *tripletsInGPU_, - *rangesInGPU_); + rangesDC_->view(), + rangesDC_->const_view()); if (addObjects_) { addTripletsToEventExplicit(); @@ -529,7 +550,7 @@ void Event::createTrackCandidates(bool no_pls_dupclean, bool tc_pls_triplets) { crossCleanpT3_workDiv, CrossCleanpT3{}, *modulesBuffers_.data(), - *rangesInGPU_, + rangesDC_->const_view(), *pixelTripletsInGPU_, segmentsDC_->const_view(), *pixelQuintupletsInGPU_); @@ -543,11 +564,13 @@ void Event::createTrackCandidates(bool no_pls_dupclean, bool tc_pls_triplets) { *pixelTripletsInGPU_, trackCandidatesDC_->view(), segmentsDC_->const_view(), - *rangesInGPU_); + rangesDC_->const_view()); // Pull nEligibleT5Modules from the device. + auto rangesOccupancy = rangesDC_->view(); auto nEligibleModules_buf_h = cms::alpakatools::make_host_buffer(queue_, 1u); - alpaka::memcpy(queue_, nEligibleModules_buf_h, rangesBuffers_->nEligibleT5Modules_buf); + auto nEligibleModules_buf_d = alpaka::createView(devAcc_, &rangesOccupancy.nEligibleT5Modules(), (Idx)1u); + alpaka::memcpy(queue_, nEligibleModules_buf_h, nEligibleModules_buf_d); alpaka::wait(queue_); // wait to get the value before using auto const nEligibleModules = *nEligibleModules_buf_h.data(); @@ -560,7 +583,7 @@ void Event::createTrackCandidates(bool no_pls_dupclean, bool tc_pls_triplets) { removeDupQuintupletsInGPUBeforeTC_workDiv, RemoveDupQuintupletsInGPUBeforeTC{}, *quintupletsInGPU_, - *rangesInGPU_); + rangesDC_->const_view()); Vec3D const threadsPerBlock_crossCleanT5{32, 1, 32}; Vec3D const blocksPerGrid_crossCleanT5{(13296 / 32) + 1, 1, max_blocks}; @@ -574,7 +597,7 @@ void Event::createTrackCandidates(bool no_pls_dupclean, bool tc_pls_triplets) { *quintupletsInGPU_, *pixelQuintupletsInGPU_, *pixelTripletsInGPU_, - *rangesInGPU_); + rangesDC_->const_view()); Vec3D const threadsPerBlock_addT5asTrackCandidateInGPU{1, 8, 128}; Vec3D const blocksPerGrid_addT5asTrackCandidateInGPU{1, 8, 10}; @@ -587,7 +610,7 @@ void Event::createTrackCandidates(bool no_pls_dupclean, bool tc_pls_triplets) { nLowerModules_, *quintupletsInGPU_, trackCandidatesDC_->view(), - *rangesInGPU_); + rangesDC_->const_view()); if (!no_pls_dupclean) { Vec3D const threadsPerBlockCheckHitspLS{1, 16, 16}; @@ -613,7 +636,7 @@ void Event::createTrackCandidates(bool no_pls_dupclean, bool tc_pls_triplets) { crossCleanpLS_workDiv, CrossCleanpLS{}, *modulesBuffers_.data(), - *rangesInGPU_, + rangesDC_->const_view(), *pixelTripletsInGPU_, trackCandidatesDC_->view(), segmentsDC_->const_view(), @@ -760,7 +783,7 @@ void Event::createPixelTriplets() { createPixelTripletsInGPUFromMapv2_workDiv, CreatePixelTripletsInGPUFromMapv2{}, *modulesBuffers_.data(), - *rangesInGPU_, + rangesDC_->const_view(), miniDoubletsDC_->const_view(), segmentsDC_->const_view(), segmentsDC_->const_view(), @@ -798,13 +821,15 @@ void Event::createQuintuplets() { CreateEligibleModulesListForQuintupletsGPU{}, *modulesBuffers_.data(), *tripletsInGPU_, - *rangesInGPU_); + rangesDC_->view()); auto nEligibleT5Modules_buf = allocBufWrapper(cms::alpakatools::host(), 1, queue_); auto nTotalQuintuplets_buf = allocBufWrapper(cms::alpakatools::host(), 1, queue_); - - alpaka::memcpy(queue_, nEligibleT5Modules_buf, rangesBuffers_->nEligibleT5Modules_buf); - alpaka::memcpy(queue_, nTotalQuintuplets_buf, rangesBuffers_->device_nTotalQuints_buf); + auto rangesOccupancy = rangesDC_->view(); + auto nEligibleT5Modules_view_d = alpaka::createView(devAcc_, &rangesOccupancy.nEligibleT5Modules(), (Idx)1u); + auto nTotalQuintuplets_view_d = alpaka::createView(devAcc_, &rangesOccupancy.nTotalQuints(), (Idx)1u); + alpaka::memcpy(queue_, nEligibleT5Modules_buf, nEligibleT5Modules_view_d); + alpaka::memcpy(queue_, nTotalQuintuplets_buf, nTotalQuintuplets_view_d); alpaka::wait(queue_); // wait for the values before using them auto nEligibleT5Modules = *nEligibleT5Modules_buf.data(); @@ -831,7 +856,7 @@ void Event::createQuintuplets() { segmentsDC_->const_view(), *tripletsInGPU_, *quintupletsInGPU_, - *rangesInGPU_, + rangesDC_->const_view(), nEligibleT5Modules); Vec3D const threadsPerBlockDupQuint{1, 16, 16}; @@ -844,7 +869,7 @@ void Event::createQuintuplets() { RemoveDupQuintupletsInGPUAfterBuild{}, *modulesBuffers_.data(), *quintupletsInGPU_, - *rangesInGPU_); + rangesDC_->const_view()); WorkDiv1D const addQuintupletRangesToEventExplicit_workDiv = createWorkDiv({1}, {1024}, {1}); @@ -853,7 +878,8 @@ void Event::createQuintuplets() { AddQuintupletRangesToEventExplicit{}, *modulesBuffers_.data(), *quintupletsInGPU_, - *rangesInGPU_); + rangesDC_->view(), + rangesDC_->const_view()); if (addObjects_) { addQuintupletsToEventExplicit(); @@ -983,7 +1009,7 @@ void Event::createPixelQuintuplets() { connectedPixelSize_dev_buf.data(), connectedPixelIndex_dev_buf.data(), nInnerSegments, - *rangesInGPU_); + rangesDC_->const_view()); Vec3D const threadsPerBlockDupPix{1, 16, 16}; Vec3D const blocksPerGridDupPix{1, max_blocks, 1}; @@ -1004,7 +1030,7 @@ void Event::createPixelQuintuplets() { *pixelQuintupletsInGPU_, trackCandidatesDC_->view(), segmentsDC_->const_view(), - *rangesInGPU_); + rangesDC_->const_view()); #ifdef WARNINGS auto nPixelQuintuplets_buf = allocBufWrapper(cms::alpakatools::host(), 1, queue_); @@ -1092,7 +1118,10 @@ void Event::addQuintupletsToEventExplicit() { alpaka::memcpy(queue_, module_layers_buf, modulesBuffers_.layers_buf, nLowerModules_); auto module_quintupletModuleIndices_buf = allocBufWrapper(cms::alpakatools::host(), nLowerModules_, queue_); - alpaka::memcpy(queue_, module_quintupletModuleIndices_buf, rangesBuffers_->quintupletModuleIndices_buf); + auto rangesOccupancy = rangesDC_->view(); + auto quintupletModuleIndices_view_d = + alpaka::createView(devAcc_, rangesOccupancy.quintupletModuleIndices(), nLowerModules_); + alpaka::memcpy(queue_, module_quintupletModuleIndices_buf, quintupletModuleIndices_view_d); alpaka::wait(queue_); // wait for inputs before using them @@ -1389,21 +1418,23 @@ HitsBuffer& Event::getHitsInCMSSW(bool sync) { return hitsInCPU_.value(); } -ObjectRangesBuffer& Event::getRanges(bool sync) { - if (!rangesInCPU_) { - rangesInCPU_.emplace(nModules_, nLowerModules_, cms::alpakatools::host(), queue_); - rangesInCPU_->setData(*rangesInCPU_); - - alpaka::memcpy(queue_, rangesInCPU_->hitRanges_buf, rangesBuffers_->hitRanges_buf); - alpaka::memcpy(queue_, rangesInCPU_->quintupletModuleIndices_buf, rangesBuffers_->quintupletModuleIndices_buf); - alpaka::memcpy(queue_, rangesInCPU_->miniDoubletModuleIndices_buf, rangesBuffers_->miniDoubletModuleIndices_buf); - alpaka::memcpy(queue_, rangesInCPU_->segmentModuleIndices_buf, rangesBuffers_->segmentModuleIndices_buf); - alpaka::memcpy(queue_, rangesInCPU_->tripletModuleIndices_buf, rangesBuffers_->tripletModuleIndices_buf); - if (sync) - alpaka::wait(queue_); // wait to get completed host data +template +typename TSoA::ConstView Event::getRanges(bool sync) { + if constexpr (std::is_same_v) { + return rangesDC_->const_view(); + } else { + if (!rangesHC_) { + rangesHC_.emplace( + cms::alpakatools::CopyToHost>::copyAsync( + queue_, *rangesDC_)); + if (sync) + alpaka::wait(queue_); // host consumers expect filled data + } + return rangesHC_->const_view(); } - return rangesInCPU_.value(); } +template ObjectRangesConst Event::getRanges(bool); +template ObjectOccupancyConst Event::getRanges(bool); template typename TSoA::ConstView Event::getMiniDoublets(bool sync) { diff --git a/RecoTracker/LSTCore/src/alpaka/Event.h b/RecoTracker/LSTCore/src/alpaka/Event.h index a3c3a21f09e2c..122b6fdadd98b 100644 --- a/RecoTracker/LSTCore/src/alpaka/Event.h +++ b/RecoTracker/LSTCore/src/alpaka/Event.h @@ -7,6 +7,7 @@ #include "RecoTracker/LSTCore/interface/alpaka/Constants.h" #include "RecoTracker/LSTCore/interface/alpaka/LST.h" #include "RecoTracker/LSTCore/interface/Module.h" +#include "RecoTracker/LSTCore/interface/alpaka/ObjectRangesDeviceCollection.h" #include "Hit.h" #include "Segment.h" @@ -43,8 +44,7 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { unsigned int nTotalSegments_; //Device stuff - std::optional rangesInGPU_; - std::optional> rangesBuffers_; + std::optional rangesDC_; std::optional hitsInGPU_; std::optional> hitsBuffers_; std::optional miniDoubletsDC_; @@ -60,7 +60,7 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { std::optional> pixelQuintupletsBuffers_; //CPU interface stuff - std::optional> rangesInCPU_; + std::optional rangesHC_; std::optional> hitsInCPU_; std::optional miniDoubletsHC_; std::optional segmentsHC_; @@ -180,7 +180,8 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { // HANDLE WITH CARE HitsBuffer& getHits(bool sync = true); HitsBuffer& getHitsInCMSSW(bool sync = true); - ObjectRangesBuffer& getRanges(bool sync = true); + template + typename TSoA::ConstView getRanges(bool sync = true); template typename TSoA::ConstView getMiniDoublets(bool sync = true); template diff --git a/RecoTracker/LSTCore/src/alpaka/Kernels.h b/RecoTracker/LSTCore/src/alpaka/Kernels.h index b4fecca8f90cf..839fb42b23fb2 100644 --- a/RecoTracker/LSTCore/src/alpaka/Kernels.h +++ b/RecoTracker/LSTCore/src/alpaka/Kernels.h @@ -3,10 +3,10 @@ #include "RecoTracker/LSTCore/interface/alpaka/Constants.h" #include "RecoTracker/LSTCore/interface/Module.h" +#include "RecoTracker/LSTCore/interface/ObjectRangesSoA.h" #include "Hit.h" #include "MiniDoublet.h" -#include "ObjectRanges.h" #include "Segment.h" #include "Triplet.h" #include "Quintuplet.h" @@ -147,14 +147,14 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { ALPAKA_FN_ACC void operator()(TAcc const& acc, Modules modulesInGPU, Quintuplets quintupletsInGPU, - ObjectRanges rangesInGPU) const { + ObjectOccupancyConst objectOccupancy) const { auto const globalThreadIdx = alpaka::getIdx(acc); auto const gridThreadExtent = alpaka::getWorkDiv(acc); for (unsigned int lowmod = globalThreadIdx[0]; lowmod < *modulesInGPU.nLowerModules; lowmod += gridThreadExtent[0]) { unsigned int nQuintuplets_lowmod = quintupletsInGPU.nQuintuplets[lowmod]; - int quintupletModuleIndices_lowmod = rangesInGPU.quintupletModuleIndices[lowmod]; + int quintupletModuleIndices_lowmod = objectOccupancy.quintupletModuleIndices()[lowmod]; for (unsigned int ix1 = globalThreadIdx[1]; ix1 < nQuintuplets_lowmod; ix1 += gridThreadExtent[1]) { unsigned int ix = quintupletModuleIndices_lowmod + ix1; @@ -194,27 +194,30 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { struct RemoveDupQuintupletsInGPUBeforeTC { template - ALPAKA_FN_ACC void operator()(TAcc const& acc, Quintuplets quintupletsInGPU, ObjectRanges rangesInGPU) const { + ALPAKA_FN_ACC void operator()(TAcc const& acc, + Quintuplets quintupletsInGPU, + ObjectOccupancyConst objectOccupancy) const { auto const globalThreadIdx = alpaka::getIdx(acc); auto const gridThreadExtent = alpaka::getWorkDiv(acc); - for (unsigned int lowmodIdx1 = globalThreadIdx[1]; lowmodIdx1 < *(rangesInGPU.nEligibleT5Modules); + for (unsigned int lowmodIdx1 = globalThreadIdx[1]; lowmodIdx1 < objectOccupancy.nEligibleT5Modules(); lowmodIdx1 += gridThreadExtent[1]) { - uint16_t lowmod1 = rangesInGPU.indicesOfEligibleT5Modules[lowmodIdx1]; + uint16_t lowmod1 = objectOccupancy.indicesOfEligibleT5Modules()[lowmodIdx1]; unsigned int nQuintuplets_lowmod1 = quintupletsInGPU.nQuintuplets[lowmod1]; if (nQuintuplets_lowmod1 == 0) continue; - unsigned int quintupletModuleIndices_lowmod1 = rangesInGPU.quintupletModuleIndices[lowmod1]; + unsigned int quintupletModuleIndices_lowmod1 = objectOccupancy.quintupletModuleIndices()[lowmod1]; - for (unsigned int lowmodIdx2 = globalThreadIdx[2] + lowmodIdx1; lowmodIdx2 < *(rangesInGPU.nEligibleT5Modules); + for (unsigned int lowmodIdx2 = globalThreadIdx[2] + lowmodIdx1; + lowmodIdx2 < objectOccupancy.nEligibleT5Modules(); lowmodIdx2 += gridThreadExtent[2]) { - uint16_t lowmod2 = rangesInGPU.indicesOfEligibleT5Modules[lowmodIdx2]; + uint16_t lowmod2 = objectOccupancy.indicesOfEligibleT5Modules()[lowmodIdx2]; unsigned int nQuintuplets_lowmod2 = quintupletsInGPU.nQuintuplets[lowmod2]; if (nQuintuplets_lowmod2 == 0) continue; - unsigned int quintupletModuleIndices_lowmod2 = rangesInGPU.quintupletModuleIndices[lowmod2]; + unsigned int quintupletModuleIndices_lowmod2 = objectOccupancy.quintupletModuleIndices()[lowmod2]; for (unsigned int ix1 = 0; ix1 < nQuintuplets_lowmod1; ix1 += 1) { unsigned int ix = quintupletModuleIndices_lowmod1 + ix1; diff --git a/RecoTracker/LSTCore/src/alpaka/MiniDoublet.h b/RecoTracker/LSTCore/src/alpaka/MiniDoublet.h index e81ca10469b35..1b24f561189bb 100644 --- a/RecoTracker/LSTCore/src/alpaka/MiniDoublet.h +++ b/RecoTracker/LSTCore/src/alpaka/MiniDoublet.h @@ -8,9 +8,9 @@ #include "RecoTracker/LSTCore/interface/alpaka/MiniDoubletsDeviceCollection.h" #include "RecoTracker/LSTCore/interface/Module.h" #include "RecoTracker/LSTCore/interface/EndcapGeometry.h" +#include "RecoTracker/LSTCore/interface/ObjectRangesSoA.h" #include "Hit.h" -#include "ObjectRanges.h" namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { template @@ -698,7 +698,7 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { Hits hitsInGPU, MiniDoublets mds, MiniDoubletsOccupancy mdsOccupancy, - ObjectRanges rangesInGPU) const { + ObjectOccupancyConst objectOccupancy) const { auto const globalThreadIdx = alpaka::getIdx(acc); auto const gridThreadExtent = alpaka::getWorkDiv(acc); @@ -757,14 +757,14 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { if (success) { int totOccupancyMDs = alpaka::atomicAdd( acc, &mdsOccupancy.totOccupancyMDs()[lowerModuleIndex], 1u, alpaka::hierarchy::Threads{}); - if (totOccupancyMDs >= (rangesInGPU.miniDoubletModuleOccupancy[lowerModuleIndex])) { + if (totOccupancyMDs >= (objectOccupancy.miniDoubletModuleOccupancy()[lowerModuleIndex])) { #ifdef WARNINGS printf("Mini-doublet excess alert! Module index = %d\n", lowerModuleIndex); #endif } else { int mdModuleIndex = alpaka::atomicAdd(acc, &mdsOccupancy.nMDs()[lowerModuleIndex], 1u, alpaka::hierarchy::Threads{}); - unsigned int mdIndex = rangesInGPU.miniDoubletModuleIndices[lowerModuleIndex] + mdModuleIndex; + unsigned int mdIndex = objectOccupancy.miniDoubletModuleIndices()[lowerModuleIndex] + mdModuleIndex; addMDToMemory(acc, mds, @@ -791,7 +791,7 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { struct CreateMDArrayRangesGPU { template - ALPAKA_FN_ACC void operator()(TAcc const& acc, Modules modulesInGPU, ObjectRanges rangesInGPU) const { + ALPAKA_FN_ACC void operator()(TAcc const& acc, Modules modulesInGPU, ObjectOccupancy objectOccupancy) const { // implementation is 1D with a single block static_assert(std::is_same_v, "Should be Acc1D"); ALPAKA_ASSERT_ACC((alpaka::getWorkDiv(acc)[0] == 1)); @@ -870,15 +870,15 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { unsigned int nTotMDs = alpaka::atomicAdd(acc, &nTotalMDs, occupancy, alpaka::hierarchy::Threads{}); - rangesInGPU.miniDoubletModuleIndices[i] = nTotMDs; - rangesInGPU.miniDoubletModuleOccupancy[i] = occupancy; + objectOccupancy.miniDoubletModuleIndices()[i] = nTotMDs; + objectOccupancy.miniDoubletModuleOccupancy()[i] = occupancy; } // Wait for all threads to finish before reporting final values alpaka::syncBlockThreads(acc); if (cms::alpakatools::once_per_block(acc)) { - rangesInGPU.miniDoubletModuleIndices[*modulesInGPU.nLowerModules] = nTotalMDs; - *rangesInGPU.device_nTotalMDs = nTotalMDs; + objectOccupancy.miniDoubletModuleIndices()[*modulesInGPU.nLowerModules] = nTotalMDs; + objectOccupancy.nTotalMDs() = nTotalMDs; } } }; @@ -888,7 +888,8 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { ALPAKA_FN_ACC void operator()(TAcc const& acc, Modules modulesInGPU, MiniDoubletsOccupancy mdsOccupancy, - ObjectRanges rangesInGPU, + ObjectRanges ranges, + ObjectOccupancyConst objectOccupancy, Hits hitsInGPU) const { // implementation is 1D with a single block static_assert(std::is_same_v, "Should be Acc1D"); @@ -899,11 +900,11 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { for (uint16_t i = globalThreadIdx[0]; i < *modulesInGPU.nLowerModules; i += gridThreadExtent[0]) { if (mdsOccupancy.nMDs()[i] == 0 or hitsInGPU.hitRanges[i * 2] == -1) { - rangesInGPU.mdRanges[i * 2] = -1; - rangesInGPU.mdRanges[i * 2 + 1] = -1; + ranges.mdRanges()[i][0] = -1; + ranges.mdRanges()[i][1] = -1; } else { - rangesInGPU.mdRanges[i * 2] = rangesInGPU.miniDoubletModuleIndices[i]; - rangesInGPU.mdRanges[i * 2 + 1] = rangesInGPU.miniDoubletModuleIndices[i] + mdsOccupancy.nMDs()[i] - 1; + ranges.mdRanges()[i][0] = objectOccupancy.miniDoubletModuleIndices()[i]; + ranges.mdRanges()[i][1] = objectOccupancy.miniDoubletModuleIndices()[i] + mdsOccupancy.nMDs()[i] - 1; } } } diff --git a/RecoTracker/LSTCore/src/alpaka/ObjectRanges.h b/RecoTracker/LSTCore/src/alpaka/ObjectRanges.h deleted file mode 100644 index 81e4358ab30d6..0000000000000 --- a/RecoTracker/LSTCore/src/alpaka/ObjectRanges.h +++ /dev/null @@ -1,154 +0,0 @@ -#ifndef RecoTracker_LSTCore_interface_ObjectRanges_h -#define RecoTracker_LSTCore_interface_ObjectRanges_h - -#include "RecoTracker/LSTCore/interface/Constants.h" - -namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { - - struct ObjectRanges { - int* hitRanges; - int* hitRangesLower; - int* hitRangesUpper; - int8_t* hitRangesnLower; - int8_t* hitRangesnUpper; - int* mdRanges; - int* segmentRanges; - int* trackletRanges; - int* tripletRanges; - int* trackCandidateRanges; - // Others will be added later - int* quintupletRanges; - - // This number is just nEligibleModules - 1, but still we want this to be independent of the TC kernel - uint16_t* nEligibleT5Modules; - // Will be allocated in createQuintuplets kernel! - uint16_t* indicesOfEligibleT5Modules; - // To store different starting points for variable occupancy stuff - int* quintupletModuleIndices; - int* quintupletModuleOccupancy; - int* miniDoubletModuleIndices; - int* miniDoubletModuleOccupancy; - int* segmentModuleIndices; - int* segmentModuleOccupancy; - int* tripletModuleIndices; - int* tripletModuleOccupancy; - - unsigned int* device_nTotalMDs; - unsigned int* device_nTotalSegs; - unsigned int* device_nTotalTrips; - unsigned int* device_nTotalQuints; - - template - void setData(TBuff& buf) { - hitRanges = buf.hitRanges_buf.data(); - hitRangesLower = buf.hitRangesLower_buf.data(); - hitRangesUpper = buf.hitRangesUpper_buf.data(); - hitRangesnLower = buf.hitRangesnLower_buf.data(); - hitRangesnUpper = buf.hitRangesnUpper_buf.data(); - mdRanges = buf.mdRanges_buf.data(); - segmentRanges = buf.segmentRanges_buf.data(); - trackletRanges = buf.trackletRanges_buf.data(); - tripletRanges = buf.tripletRanges_buf.data(); - trackCandidateRanges = buf.trackCandidateRanges_buf.data(); - quintupletRanges = buf.quintupletRanges_buf.data(); - - nEligibleT5Modules = buf.nEligibleT5Modules_buf.data(); - indicesOfEligibleT5Modules = buf.indicesOfEligibleT5Modules_buf.data(); - - quintupletModuleIndices = buf.quintupletModuleIndices_buf.data(); - quintupletModuleOccupancy = buf.quintupletModuleOccupancy_buf.data(); - miniDoubletModuleIndices = buf.miniDoubletModuleIndices_buf.data(); - miniDoubletModuleOccupancy = buf.miniDoubletModuleOccupancy_buf.data(); - segmentModuleIndices = buf.segmentModuleIndices_buf.data(); - segmentModuleOccupancy = buf.segmentModuleOccupancy_buf.data(); - tripletModuleIndices = buf.tripletModuleIndices_buf.data(); - tripletModuleOccupancy = buf.tripletModuleOccupancy_buf.data(); - - device_nTotalMDs = buf.device_nTotalMDs_buf.data(); - device_nTotalSegs = buf.device_nTotalSegs_buf.data(); - device_nTotalTrips = buf.device_nTotalTrips_buf.data(); - device_nTotalQuints = buf.device_nTotalQuints_buf.data(); - } - }; - - template - struct ObjectRangesBuffer { - Buf hitRanges_buf; - Buf hitRangesLower_buf; - Buf hitRangesUpper_buf; - Buf hitRangesnLower_buf; - Buf hitRangesnUpper_buf; - Buf mdRanges_buf; - Buf segmentRanges_buf; - Buf trackletRanges_buf; - Buf tripletRanges_buf; - Buf trackCandidateRanges_buf; - Buf quintupletRanges_buf; - - Buf nEligibleT5Modules_buf; - Buf indicesOfEligibleT5Modules_buf; - - Buf quintupletModuleIndices_buf; - Buf quintupletModuleOccupancy_buf; - Buf miniDoubletModuleIndices_buf; - Buf miniDoubletModuleOccupancy_buf; - Buf segmentModuleIndices_buf; - Buf segmentModuleOccupancy_buf; - Buf tripletModuleIndices_buf; - Buf tripletModuleOccupancy_buf; - - Buf device_nTotalMDs_buf; - Buf device_nTotalSegs_buf; - Buf device_nTotalTrips_buf; - Buf device_nTotalQuints_buf; - - ObjectRanges data_; - - template - ObjectRangesBuffer(unsigned int nMod, unsigned int nLowerMod, TDevAcc const& devAccIn, TQueue& queue) - : hitRanges_buf(allocBufWrapper(devAccIn, nMod * 2, queue)), - hitRangesLower_buf(allocBufWrapper(devAccIn, nMod, queue)), - hitRangesUpper_buf(allocBufWrapper(devAccIn, nMod, queue)), - hitRangesnLower_buf(allocBufWrapper(devAccIn, nMod, queue)), - hitRangesnUpper_buf(allocBufWrapper(devAccIn, nMod, queue)), - mdRanges_buf(allocBufWrapper(devAccIn, nMod * 2, queue)), - segmentRanges_buf(allocBufWrapper(devAccIn, nMod * 2, queue)), - trackletRanges_buf(allocBufWrapper(devAccIn, nMod * 2, queue)), - tripletRanges_buf(allocBufWrapper(devAccIn, nMod * 2, queue)), - trackCandidateRanges_buf(allocBufWrapper(devAccIn, nMod * 2, queue)), - quintupletRanges_buf(allocBufWrapper(devAccIn, nMod * 2, queue)), - nEligibleT5Modules_buf(allocBufWrapper(devAccIn, 1, queue)), - indicesOfEligibleT5Modules_buf(allocBufWrapper(devAccIn, nLowerMod, queue)), - quintupletModuleIndices_buf(allocBufWrapper(devAccIn, nLowerMod, queue)), - quintupletModuleOccupancy_buf(allocBufWrapper(devAccIn, nLowerMod, queue)), - miniDoubletModuleIndices_buf(allocBufWrapper(devAccIn, nLowerMod + 1, queue)), - miniDoubletModuleOccupancy_buf(allocBufWrapper(devAccIn, nLowerMod + 1, queue)), - segmentModuleIndices_buf(allocBufWrapper(devAccIn, nLowerMod + 1, queue)), - segmentModuleOccupancy_buf(allocBufWrapper(devAccIn, nLowerMod + 1, queue)), - tripletModuleIndices_buf(allocBufWrapper(devAccIn, nLowerMod, queue)), - tripletModuleOccupancy_buf(allocBufWrapper(devAccIn, nLowerMod, queue)), - device_nTotalMDs_buf(allocBufWrapper(devAccIn, 1, queue)), - device_nTotalSegs_buf(allocBufWrapper(devAccIn, 1, queue)), - device_nTotalTrips_buf(allocBufWrapper(devAccIn, 1, queue)), - device_nTotalQuints_buf(allocBufWrapper(devAccIn, 1, queue)) { - alpaka::memset(queue, hitRanges_buf, 0xff); - alpaka::memset(queue, hitRangesLower_buf, 0xff); - alpaka::memset(queue, hitRangesUpper_buf, 0xff); - alpaka::memset(queue, hitRangesnLower_buf, 0xff); - alpaka::memset(queue, hitRangesnUpper_buf, 0xff); - alpaka::memset(queue, mdRanges_buf, 0xff); - alpaka::memset(queue, segmentRanges_buf, 0xff); - alpaka::memset(queue, trackletRanges_buf, 0xff); - alpaka::memset(queue, tripletRanges_buf, 0xff); - alpaka::memset(queue, trackCandidateRanges_buf, 0xff); - alpaka::memset(queue, quintupletRanges_buf, 0xff); - alpaka::memset(queue, quintupletModuleIndices_buf, 0xff); - data_.setData(*this); - } - - inline ObjectRanges const* data() const { return &data_; } - void setData(ObjectRangesBuffer& buf) { data_.setData(buf); } - }; - -} // namespace ALPAKA_ACCELERATOR_NAMESPACE::lst -#endif diff --git a/RecoTracker/LSTCore/src/alpaka/PixelQuintuplet.h b/RecoTracker/LSTCore/src/alpaka/PixelQuintuplet.h index d33022cd112b1..61d043b6a9c87 100644 --- a/RecoTracker/LSTCore/src/alpaka/PixelQuintuplet.h +++ b/RecoTracker/LSTCore/src/alpaka/PixelQuintuplet.h @@ -3,6 +3,7 @@ #include "RecoTracker/LSTCore/interface/alpaka/Constants.h" #include "RecoTracker/LSTCore/interface/Module.h" +#include "RecoTracker/LSTCore/interface/ObjectRangesSoA.h" #include "Segment.h" #include "MiniDoublet.h" @@ -673,7 +674,7 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { template ALPAKA_FN_ACC ALPAKA_FN_INLINE bool runPixelQuintupletDefaultAlgo(TAcc const& acc, Modules const& modulesInGPU, - ObjectRanges const& rangesInGPU, + ObjectOccupancyConst objectOccupancy, MiniDoubletsConst mds, SegmentsConst segments, SegmentsPixelConst segmentsPixel, @@ -697,7 +698,7 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { if (not runPixelTripletDefaultAlgo(acc, modulesInGPU, - rangesInGPU, + objectOccupancy, mds, segments, segmentsPixel, @@ -831,7 +832,7 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { unsigned int* connectedPixelSize, unsigned int* connectedPixelIndex, unsigned int nPixelSegments, - ObjectRanges rangesInGPU) const { + ObjectOccupancyConst objectOccupancy) const { auto const globalBlockIdx = alpaka::getIdx(acc); auto const globalThreadIdx = alpaka::getIdx(acc); auto const gridBlockExtent = alpaka::getWorkDiv(acc); @@ -855,14 +856,14 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { if (nOuterQuintuplets == 0) continue; - unsigned int pixelSegmentIndex = rangesInGPU.segmentModuleIndices[pixelModuleIndex] + i_pLS; + unsigned int pixelSegmentIndex = objectOccupancy.segmentModuleIndices()[pixelModuleIndex] + i_pLS; //fetch the quintuplet for (unsigned int outerQuintupletArrayIndex = globalThreadIdx[2]; outerQuintupletArrayIndex < nOuterQuintuplets; outerQuintupletArrayIndex += gridThreadExtent[2]) { unsigned int quintupletIndex = - rangesInGPU.quintupletModuleIndices[quintupletLowerModuleIndex] + outerQuintupletArrayIndex; + objectOccupancy.quintupletModuleIndices()[quintupletLowerModuleIndex] + outerQuintupletArrayIndex; if (quintupletsInGPU.isDup[quintupletIndex]) continue; @@ -871,7 +872,7 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { bool success = runPixelQuintupletDefaultAlgo(acc, modulesInGPU, - rangesInGPU, + objectOccupancy, mds, segments, segmentsPixel, diff --git a/RecoTracker/LSTCore/src/alpaka/PixelTriplet.h b/RecoTracker/LSTCore/src/alpaka/PixelTriplet.h index 1401aefdf797d..442a00d14f274 100644 --- a/RecoTracker/LSTCore/src/alpaka/PixelTriplet.h +++ b/RecoTracker/LSTCore/src/alpaka/PixelTriplet.h @@ -3,15 +3,49 @@ #include "RecoTracker/LSTCore/interface/alpaka/Constants.h" #include "RecoTracker/LSTCore/interface/Module.h" +#include "RecoTracker/LSTCore/interface/ObjectRangesSoA.h" #include "Triplet.h" #include "Segment.h" #include "MiniDoublet.h" #include "Hit.h" -#include "ObjectRanges.h" #include "Quintuplet.h" namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { + + template + ALPAKA_FN_ACC ALPAKA_FN_INLINE bool runTripletDefaultAlgoPPBB(TAcc const& acc, + Modules const& modulesInGPU, + ObjectOccupancyConst objectOccupancy, + MiniDoubletsConst mds, + SegmentsConst segments, + SegmentsPixelConst segmentsPixel, + uint16_t pixelModuleIndex, + uint16_t outerInnerLowerModuleIndex, + uint16_t outerOuterLowerModuleIndex, + unsigned int innerSegmentIndex, + unsigned int outerSegmentIndex, + unsigned int firstMDIndex, + unsigned int secondMDIndex, + unsigned int thirdMDIndex, + unsigned int fourthMDIndex); + template + ALPAKA_FN_ACC ALPAKA_FN_INLINE bool runTripletDefaultAlgoPPEE(TAcc const& acc, + Modules const& modulesInGPU, + ObjectOccupancyConst objectOccupancy, + MiniDoubletsConst mds, + SegmentsConst segments, + SegmentsPixelConst segmentsPixel, + uint16_t pixelModuleIndex, + uint16_t outerInnerLowerModuleIndex, + uint16_t outerOuterLowerModuleIndex, + unsigned int innerSegmentIndex, + unsigned int outerSegmentIndex, + unsigned int firstMDIndex, + unsigned int secondMDIndex, + unsigned int thirdMDIndex, + unsigned int fourthMDIndex); + // One pixel segment, one outer tracker triplet! struct PixelTriplets { unsigned int* pixelSegmentIndices; @@ -211,7 +245,7 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { template ALPAKA_FN_ACC ALPAKA_FN_INLINE bool runPixelTrackletDefaultAlgopT3(TAcc const& acc, Modules const& modulesInGPU, - ObjectRanges const& rangesInGPU, + ObjectOccupancyConst objectOccupancy, MiniDoubletsConst mds, SegmentsConst segments, SegmentsPixelConst segmentsPixel, @@ -233,7 +267,7 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { (outerOuterLowerModuleSubdet == Barrel or outerOuterLowerModuleSubdet == Endcap)) { return runTripletDefaultAlgoPPBB(acc, modulesInGPU, - rangesInGPU, + objectOccupancy, mds, segments, segmentsPixel, @@ -249,7 +283,7 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { } else if (outerInnerLowerModuleSubdet == Endcap and outerOuterLowerModuleSubdet == Endcap) { return runTripletDefaultAlgoPPEE(acc, modulesInGPU, - rangesInGPU, + objectOccupancy, mds, segments, segmentsPixel, @@ -767,7 +801,7 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { template ALPAKA_FN_ACC ALPAKA_FN_INLINE bool runPixelTripletDefaultAlgo(TAcc const& acc, Modules const& modulesInGPU, - ObjectRanges const& rangesInGPU, + ObjectOccupancyConst objectOccupancy, MiniDoubletsConst mds, SegmentsConst segments, SegmentsPixelConst segmentsPixel, @@ -793,7 +827,7 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { // pixel segment vs inner segment of the triplet if (not runPixelTrackletDefaultAlgopT3(acc, modulesInGPU, - rangesInGPU, + objectOccupancy, mds, segments, segmentsPixel, @@ -807,7 +841,7 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { //pixel segment vs outer segment of triplet if (not runPixelTrackletDefaultAlgopT3(acc, modulesInGPU, - rangesInGPU, + objectOccupancy, mds, segments, segmentsPixel, @@ -820,7 +854,7 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { } //pt matching between the pixel ptin and the triplet circle pt - unsigned int pixelSegmentArrayIndex = pixelSegmentIndex - rangesInGPU.segmentModuleIndices[pixelModuleIndex]; + unsigned int pixelSegmentArrayIndex = pixelSegmentIndex - objectOccupancy.segmentModuleIndices()[pixelModuleIndex]; float pixelSegmentPt = segmentsPixel.ptIn()[pixelSegmentArrayIndex]; float pixelSegmentPtError = segmentsPixel.ptErr()[pixelSegmentArrayIndex]; float pixelSegmentPx = segmentsPixel.px()[pixelSegmentArrayIndex]; @@ -926,7 +960,7 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { template ALPAKA_FN_ACC void operator()(TAcc const& acc, Modules modulesInGPU, - ObjectRanges rangesInGPU, + ObjectOccupancyConst objectOccupancy, MiniDoubletsConst mds, SegmentsConst segments, SegmentsPixelConst segmentsPixel, @@ -965,7 +999,7 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { if (nOuterTriplets == 0) continue; - unsigned int pixelSegmentIndex = rangesInGPU.segmentModuleIndices[pixelModuleIndex] + i_pLS; + unsigned int pixelSegmentIndex = objectOccupancy.segmentModuleIndices()[pixelModuleIndex] + i_pLS; if (segmentsPixel.isDup()[i_pLS]) continue; @@ -987,7 +1021,7 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { for (unsigned int outerTripletArrayIndex = globalThreadIdx[2]; outerTripletArrayIndex < nOuterTriplets; outerTripletArrayIndex += gridThreadExtent[2]) { unsigned int outerTripletIndex = - rangesInGPU.tripletModuleIndices[tripletLowerModuleIndex] + outerTripletArrayIndex; + objectOccupancy.tripletModuleIndices()[tripletLowerModuleIndex] + outerTripletArrayIndex; if (modulesInGPU.moduleType[tripletsInGPU.lowerModuleIndices[3 * outerTripletIndex + 1]] == TwoS) continue; //REMOVES PS-2S @@ -997,7 +1031,7 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { float pixelRadius, tripletRadius, rPhiChiSquared, rzChiSquared, rPhiChiSquaredInwards, centerX, centerY; bool success = runPixelTripletDefaultAlgo(acc, modulesInGPU, - rangesInGPU, + objectOccupancy, mds, segments, segmentsPixel, @@ -1161,7 +1195,7 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { template ALPAKA_FN_ACC ALPAKA_FN_INLINE bool runTripletDefaultAlgoPPBB(TAcc const& acc, Modules const& modulesInGPU, - ObjectRanges const& rangesInGPU, + ObjectOccupancyConst objectOccupancy, MiniDoubletsConst mds, SegmentsConst segments, SegmentsPixelConst segmentsPixel, @@ -1201,7 +1235,7 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { if (alpaka::math::abs(acc, deltaPhi(acc, x_InUp, y_InUp, x_OutLo, y_OutLo)) > 0.5f * float(M_PI)) return false; - unsigned int pixelSegmentArrayIndex = innerSegmentIndex - rangesInGPU.segmentModuleIndices[pixelModuleIndex]; + unsigned int pixelSegmentArrayIndex = innerSegmentIndex - objectOccupancy.segmentModuleIndices()[pixelModuleIndex]; float ptIn = segmentsPixel.ptIn()[pixelSegmentArrayIndex]; float ptSLo = ptIn; float px = segmentsPixel.px()[pixelSegmentArrayIndex]; @@ -1419,7 +1453,7 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { template ALPAKA_FN_ACC ALPAKA_FN_INLINE bool runTripletDefaultAlgoPPEE(TAcc const& acc, Modules const& modulesInGPU, - ObjectRanges const& rangesInGPU, + ObjectOccupancyConst objectOccupancy, MiniDoubletsConst mds, SegmentsConst segments, SegmentsPixelConst segmentsPixel, @@ -1457,7 +1491,7 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { float y_OutLo = mds.anchorY()[thirdMDIndex]; float y_OutUp = mds.anchorY()[fourthMDIndex]; - unsigned int pixelSegmentArrayIndex = innerSegmentIndex - rangesInGPU.segmentModuleIndices[pixelModuleIndex]; + unsigned int pixelSegmentArrayIndex = innerSegmentIndex - objectOccupancy.segmentModuleIndices()[pixelModuleIndex]; float ptIn = segmentsPixel.ptIn()[pixelSegmentArrayIndex]; float ptSLo = ptIn; diff --git a/RecoTracker/LSTCore/src/alpaka/Quintuplet.h b/RecoTracker/LSTCore/src/alpaka/Quintuplet.h index 1d506c11c3d63..ec8f7bc339d20 100644 --- a/RecoTracker/LSTCore/src/alpaka/Quintuplet.h +++ b/RecoTracker/LSTCore/src/alpaka/Quintuplet.h @@ -6,12 +6,12 @@ #include "RecoTracker/LSTCore/interface/alpaka/Constants.h" #include "RecoTracker/LSTCore/interface/Module.h" #include "RecoTracker/LSTCore/interface/EndcapGeometry.h" +#include "RecoTracker/LSTCore/interface/ObjectRangesSoA.h" #include "NeuralNetwork.h" #include "Segment.h" #include "MiniDoublet.h" #include "Hit.h" -#include "ObjectRanges.h" #include "Triplet.h" namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { @@ -2506,13 +2506,13 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { SegmentsConst segments, Triplets tripletsInGPU, Quintuplets quintupletsInGPU, - ObjectRanges rangesInGPU, + ObjectOccupancyConst objectOccupancy, uint16_t nEligibleT5Modules) const { auto const globalThreadIdx = alpaka::getIdx(acc); auto const gridThreadExtent = alpaka::getWorkDiv(acc); for (int iter = globalThreadIdx[0]; iter < nEligibleT5Modules; iter += gridThreadExtent[0]) { - uint16_t lowerModule1 = rangesInGPU.indicesOfEligibleT5Modules[iter]; + uint16_t lowerModule1 = objectOccupancy.indicesOfEligibleT5Modules()[iter]; short layer2_adjustment; int layer = modulesInGPU.layers[lowerModule1]; if (layer == 1) { @@ -2527,13 +2527,15 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { unsigned int nInnerTriplets = tripletsInGPU.nTriplets[lowerModule1]; for (unsigned int innerTripletArrayIndex = globalThreadIdx[1]; innerTripletArrayIndex < nInnerTriplets; innerTripletArrayIndex += gridThreadExtent[1]) { - unsigned int innerTripletIndex = rangesInGPU.tripletModuleIndices[lowerModule1] + innerTripletArrayIndex; + unsigned int innerTripletIndex = + objectOccupancy.tripletModuleIndices()[lowerModule1] + innerTripletArrayIndex; uint16_t lowerModule2 = tripletsInGPU.lowerModuleIndices[Params_T3::kLayers * innerTripletIndex + 1]; uint16_t lowerModule3 = tripletsInGPU.lowerModuleIndices[Params_T3::kLayers * innerTripletIndex + 2]; unsigned int nOuterTriplets = tripletsInGPU.nTriplets[lowerModule3]; for (unsigned int outerTripletArrayIndex = globalThreadIdx[2]; outerTripletArrayIndex < nOuterTriplets; outerTripletArrayIndex += gridThreadExtent[2]) { - unsigned int outerTripletIndex = rangesInGPU.tripletModuleIndices[lowerModule3] + outerTripletArrayIndex; + unsigned int outerTripletIndex = + objectOccupancy.tripletModuleIndices()[lowerModule3] + outerTripletArrayIndex; uint16_t lowerModule4 = tripletsInGPU.lowerModuleIndices[Params_T3::kLayers * outerTripletIndex + 1]; uint16_t lowerModule5 = tripletsInGPU.lowerModuleIndices[Params_T3::kLayers * outerTripletIndex + 2]; @@ -2567,7 +2569,7 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { if (success) { int totOccupancyQuintuplets = alpaka::atomicAdd( acc, &quintupletsInGPU.totOccupancyQuintuplets[lowerModule1], 1u, alpaka::hierarchy::Threads{}); - if (totOccupancyQuintuplets >= rangesInGPU.quintupletModuleOccupancy[lowerModule1]) { + if (totOccupancyQuintuplets >= objectOccupancy.quintupletModuleOccupancy()[lowerModule1]) { #ifdef WARNINGS printf("Quintuplet excess alert! Module index = %d\n", lowerModule1); #endif @@ -2575,13 +2577,13 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { int quintupletModuleIndex = alpaka::atomicAdd( acc, &quintupletsInGPU.nQuintuplets[lowerModule1], 1u, alpaka::hierarchy::Threads{}); //this if statement should never get executed! - if (rangesInGPU.quintupletModuleIndices[lowerModule1] == -1) { + if (objectOccupancy.quintupletModuleIndices()[lowerModule1] == -1) { #ifdef WARNINGS printf("Quintuplets : no memory for module at module index = %d\n", lowerModule1); #endif } else { unsigned int quintupletIndex = - rangesInGPU.quintupletModuleIndices[lowerModule1] + quintupletModuleIndex; + objectOccupancy.quintupletModuleIndices()[lowerModule1] + quintupletModuleIndex; float phi = mds.anchorPhi()[segments.mdIndices()[tripletsInGPU.segmentIndices[2 * innerTripletIndex]] [layer2_adjustment]]; float eta = mds.anchorEta()[segments.mdIndices()[tripletsInGPU.segmentIndices[2 * innerTripletIndex]] @@ -2630,7 +2632,7 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { ALPAKA_FN_ACC void operator()(TAcc const& acc, Modules modulesInGPU, Triplets tripletsInGPU, - ObjectRanges rangesInGPU) const { + ObjectOccupancy objectOccupancy) const { // implementation is 1D with a single block static_assert(std::is_same_v, "Should be Acc1D"); ALPAKA_ASSERT_ACC((alpaka::getWorkDiv(acc)[0] == 1)); @@ -2715,16 +2717,16 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { } int nTotQ = alpaka::atomicAdd(acc, &nTotalQuintupletsx, occupancy, alpaka::hierarchy::Threads{}); - rangesInGPU.quintupletModuleIndices[i] = nTotQ; - rangesInGPU.indicesOfEligibleT5Modules[nEligibleT5Modules] = i; - rangesInGPU.quintupletModuleOccupancy[i] = occupancy; + objectOccupancy.quintupletModuleIndices()[i] = nTotQ; + objectOccupancy.indicesOfEligibleT5Modules()[nEligibleT5Modules] = i; + objectOccupancy.quintupletModuleOccupancy()[i] = occupancy; } // Wait for all threads to finish before reporting final values alpaka::syncBlockThreads(acc); if (cms::alpakatools::once_per_block(acc)) { - *rangesInGPU.nEligibleT5Modules = static_cast(nEligibleT5Modulesx); - *rangesInGPU.device_nTotalQuints = static_cast(nTotalQuintupletsx); + objectOccupancy.nEligibleT5Modules() = static_cast(nEligibleT5Modulesx); + objectOccupancy.nTotalQuints() = static_cast(nTotalQuintupletsx); } } }; @@ -2734,7 +2736,8 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { ALPAKA_FN_ACC void operator()(TAcc const& acc, Modules modulesInGPU, Quintuplets quintupletsInGPU, - ObjectRanges rangesInGPU) const { + ObjectRanges ranges, + ObjectOccupancyConst objectOccupancy) const { // implementation is 1D with a single block static_assert(std::is_same_v, "Should be Acc1D"); ALPAKA_ASSERT_ACC((alpaka::getWorkDiv(acc)[0] == 1)); @@ -2743,13 +2746,13 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { auto const gridThreadExtent = alpaka::getWorkDiv(acc); for (uint16_t i = globalThreadIdx[0]; i < *modulesInGPU.nLowerModules; i += gridThreadExtent[0]) { - if (quintupletsInGPU.nQuintuplets[i] == 0 or rangesInGPU.quintupletModuleIndices[i] == -1) { - rangesInGPU.quintupletRanges[i * 2] = -1; - rangesInGPU.quintupletRanges[i * 2 + 1] = -1; + if (quintupletsInGPU.nQuintuplets[i] == 0 or objectOccupancy.quintupletModuleIndices()[i] == -1) { + ranges.quintupletRanges()[i][0] = -1; + ranges.quintupletRanges()[i][1] = -1; } else { - rangesInGPU.quintupletRanges[i * 2] = rangesInGPU.quintupletModuleIndices[i]; - rangesInGPU.quintupletRanges[i * 2 + 1] = - rangesInGPU.quintupletModuleIndices[i] + quintupletsInGPU.nQuintuplets[i] - 1; + ranges.quintupletRanges()[i][0] = objectOccupancy.quintupletModuleIndices()[i]; + ranges.quintupletRanges()[i][1] = + objectOccupancy.quintupletModuleIndices()[i] + quintupletsInGPU.nQuintuplets[i] - 1; } } } diff --git a/RecoTracker/LSTCore/src/alpaka/Segment.h b/RecoTracker/LSTCore/src/alpaka/Segment.h index b795edbcbeb36..0fcfe0cd7c2bb 100644 --- a/RecoTracker/LSTCore/src/alpaka/Segment.h +++ b/RecoTracker/LSTCore/src/alpaka/Segment.h @@ -8,10 +8,10 @@ #include "RecoTracker/LSTCore/interface/alpaka/SegmentsDeviceCollection.h" #include "RecoTracker/LSTCore/interface/Module.h" #include "RecoTracker/LSTCore/interface/EndcapGeometry.h" +#include "RecoTracker/LSTCore/interface/ObjectRangesSoA.h" #include "MiniDoublet.h" #include "Hit.h" -#include "ObjectRanges.h" namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { @@ -545,7 +545,8 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { MiniDoubletsOccupancyConst mdsOccupancy, Segments segments, SegmentsOccupancy segmentsOccupancy, - ObjectRanges rangesInGPU) const { + ObjectRangesConst ranges, + ObjectOccupancyConst objectOccupancy) const { auto const globalBlockIdx = alpaka::getIdx(acc); auto const blockThreadIdx = alpaka::getIdx(acc); auto const gridBlockExtent = alpaka::getWorkDiv(acc); @@ -576,8 +577,8 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { if (outerMDArrayIdx >= nOuterMDs) continue; - unsigned int innerMDIndex = rangesInGPU.mdRanges[innerLowerModuleIndex * 2] + innerMDArrayIdx; - unsigned int outerMDIndex = rangesInGPU.mdRanges[outerLowerModuleIndex * 2] + outerMDArrayIdx; + unsigned int innerMDIndex = ranges.mdRanges()[innerLowerModuleIndex][0] + innerMDArrayIdx; + unsigned int outerMDIndex = ranges.mdRanges()[outerLowerModuleIndex][0] + outerMDArrayIdx; float dPhi, dPhiMin, dPhiMax, dPhiChange, dPhiChangeMin, dPhiChangeMax; @@ -605,14 +606,16 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { &segmentsOccupancy.totOccupancySegments()[innerLowerModuleIndex], 1u, alpaka::hierarchy::Threads{}); - if (static_cast(totOccupancySegments) >= rangesInGPU.segmentModuleOccupancy[innerLowerModuleIndex]) { + if (static_cast(totOccupancySegments) >= + objectOccupancy.segmentModuleOccupancy()[innerLowerModuleIndex]) { #ifdef WARNINGS printf("Segment excess alert! Module index = %d\n", innerLowerModuleIndex); #endif } else { unsigned int segmentModuleIdx = alpaka::atomicAdd( acc, &segmentsOccupancy.nSegments()[innerLowerModuleIndex], 1u, alpaka::hierarchy::Threads{}); - unsigned int segmentIdx = rangesInGPU.segmentModuleIndices[innerLowerModuleIndex] + segmentModuleIdx; + unsigned int segmentIdx = + objectOccupancy.segmentModuleIndices()[innerLowerModuleIndex] + segmentModuleIdx; addSegmentToMemory(segments, innerMDIndex, @@ -640,7 +643,7 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { template ALPAKA_FN_ACC void operator()(TAcc const& acc, Modules modulesInGPU, - ObjectRanges rangesInGPU, + ObjectOccupancy objectOccupancy, MiniDoubletsConst mds) const { // implementation is 1D with a single block static_assert(std::is_same_v, "Should be Acc1D"); @@ -661,8 +664,8 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { for (uint16_t i = globalThreadIdx[0]; i < *modulesInGPU.nLowerModules; i += gridThreadExtent[0]) { if (modulesInGPU.nConnectedModules[i] == 0) { - rangesInGPU.segmentModuleIndices[i] = nTotalSegments; - rangesInGPU.segmentModuleOccupancy[i] = 0; + objectOccupancy.segmentModuleIndices()[i] = nTotalSegments; + objectOccupancy.segmentModuleOccupancy()[i] = 0; continue; } @@ -727,15 +730,15 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { } int nTotSegs = alpaka::atomicAdd(acc, &nTotalSegments, occupancy, alpaka::hierarchy::Threads{}); - rangesInGPU.segmentModuleIndices[i] = nTotSegs; - rangesInGPU.segmentModuleOccupancy[i] = occupancy; + objectOccupancy.segmentModuleIndices()[i] = nTotSegs; + objectOccupancy.segmentModuleOccupancy()[i] = occupancy; } // Wait for all threads to finish before reporting final values alpaka::syncBlockThreads(acc); if (cms::alpakatools::once_per_block(acc)) { - rangesInGPU.segmentModuleIndices[*modulesInGPU.nLowerModules] = nTotalSegments; - *rangesInGPU.device_nTotalSegs = nTotalSegments; + objectOccupancy.segmentModuleIndices()[*modulesInGPU.nLowerModules] = nTotalSegments; + objectOccupancy.nTotalSegs() = nTotalSegments; } } }; @@ -745,7 +748,8 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { ALPAKA_FN_ACC void operator()(TAcc const& acc, Modules modulesInGPU, SegmentsOccupancyConst segmentsOccupancy, - ObjectRanges rangesInGPU) const { + ObjectRanges ranges, + ObjectOccupancyConst objectOccupancy) const { // implementation is 1D with a single block static_assert(std::is_same_v, "Should be Acc1D"); ALPAKA_ASSERT_ACC((alpaka::getWorkDiv(acc)[0] == 1)); @@ -755,12 +759,12 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { for (uint16_t i = globalThreadIdx[0]; i < *modulesInGPU.nLowerModules; i += gridThreadExtent[0]) { if (segmentsOccupancy.nSegments()[i] == 0) { - rangesInGPU.segmentRanges[i * 2] = -1; - rangesInGPU.segmentRanges[i * 2 + 1] = -1; + ranges.segmentRanges()[i][0] = -1; + ranges.segmentRanges()[i][1] = -1; } else { - rangesInGPU.segmentRanges[i * 2] = rangesInGPU.segmentModuleIndices[i]; - rangesInGPU.segmentRanges[i * 2 + 1] = - rangesInGPU.segmentModuleIndices[i] + segmentsOccupancy.nSegments()[i] - 1; + ranges.segmentRanges()[i][0] = objectOccupancy.segmentModuleIndices()[i]; + ranges.segmentRanges()[i][1] = + objectOccupancy.segmentModuleIndices()[i] + segmentsOccupancy.nSegments()[i] - 1; } } } @@ -770,7 +774,7 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { template ALPAKA_FN_ACC void operator()(TAcc const& acc, Modules modulesInGPU, - ObjectRanges rangesInGPU, + ObjectOccupancyConst objectOccupancy, Hits hitsInGPU, MiniDoublets mds, Segments segments, @@ -786,9 +790,9 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { auto const gridThreadExtent = alpaka::getWorkDiv(acc); for (int tid = globalThreadIdx[2]; tid < size; tid += gridThreadExtent[2]) { - unsigned int innerMDIndex = rangesInGPU.miniDoubletModuleIndices[pixelModuleIndex] + 2 * (tid); - unsigned int outerMDIndex = rangesInGPU.miniDoubletModuleIndices[pixelModuleIndex] + 2 * (tid) + 1; - unsigned int pixelSegmentIndex = rangesInGPU.segmentModuleIndices[pixelModuleIndex] + tid; + unsigned int innerMDIndex = objectOccupancy.miniDoubletModuleIndices()[pixelModuleIndex] + 2 * (tid); + unsigned int outerMDIndex = objectOccupancy.miniDoubletModuleIndices()[pixelModuleIndex] + 2 * (tid) + 1; + unsigned int pixelSegmentIndex = objectOccupancy.segmentModuleIndices()[pixelModuleIndex] + tid; addMDToMemory(acc, mds, diff --git a/RecoTracker/LSTCore/src/alpaka/TrackCandidate.h b/RecoTracker/LSTCore/src/alpaka/TrackCandidate.h index 5ff4b7ad478cf..c5ecfdb369540 100644 --- a/RecoTracker/LSTCore/src/alpaka/TrackCandidate.h +++ b/RecoTracker/LSTCore/src/alpaka/TrackCandidate.h @@ -6,6 +6,7 @@ #include "RecoTracker/LSTCore/interface/TrackCandidatesHostCollection.h" #include "RecoTracker/LSTCore/interface/TrackCandidatesSoA.h" #include "RecoTracker/LSTCore/interface/alpaka/TrackCandidatesDeviceCollection.h" +#include "RecoTracker/LSTCore/interface/ObjectRangesSoA.h" #include "Triplet.h" #include "Segment.h" @@ -13,7 +14,6 @@ #include "PixelTriplet.h" #include "Quintuplet.h" #include "Hit.h" -#include "ObjectRanges.h" namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { ALPAKA_FN_ACC ALPAKA_FN_INLINE void addpLSTrackCandidateToMemory(TrackCandidates& cands, @@ -113,7 +113,7 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { template ALPAKA_FN_ACC void operator()(TAcc const& acc, Modules modulesInGPU, - ObjectRanges rangesInGPU, + ObjectOccupancyConst objectOccupancy, PixelTriplets pixelTripletsInGPU, SegmentsPixelConst segmentsPixel, PixelQuintuplets pixelQuintupletsInGPU) const { @@ -131,7 +131,7 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { float phi1 = __H2F(pixelTripletsInGPU.phi_pix[pixelTripletIndex]); int pixelModuleIndex = *modulesInGPU.nLowerModules; - unsigned int prefix = rangesInGPU.segmentModuleIndices[pixelModuleIndex]; + unsigned int prefix = objectOccupancy.segmentModuleIndices()[pixelModuleIndex]; unsigned int nPixelQuintuplets = *pixelQuintupletsInGPU.nPixelQuintuplets; for (unsigned int pixelQuintupletIndex = globalThreadIdx[1]; pixelQuintupletIndex < nPixelQuintuplets; @@ -157,21 +157,21 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { Quintuplets quintupletsInGPU, PixelQuintuplets pixelQuintupletsInGPU, PixelTriplets pixelTripletsInGPU, - ObjectRanges rangesInGPU) const { + ObjectOccupancyConst objectOccupancy) const { auto const globalThreadIdx = alpaka::getIdx(acc); auto const gridThreadExtent = alpaka::getWorkDiv(acc); for (int innerInnerInnerLowerModuleArrayIndex = globalThreadIdx[0]; innerInnerInnerLowerModuleArrayIndex < *(modulesInGPU.nLowerModules); innerInnerInnerLowerModuleArrayIndex += gridThreadExtent[0]) { - if (rangesInGPU.quintupletModuleIndices[innerInnerInnerLowerModuleArrayIndex] == -1) + if (objectOccupancy.quintupletModuleIndices()[innerInnerInnerLowerModuleArrayIndex] == -1) continue; unsigned int nQuints = quintupletsInGPU.nQuintuplets[innerInnerInnerLowerModuleArrayIndex]; for (unsigned int innerObjectArrayIndex = globalThreadIdx[1]; innerObjectArrayIndex < nQuints; innerObjectArrayIndex += gridThreadExtent[1]) { unsigned int quintupletIndex = - rangesInGPU.quintupletModuleIndices[innerInnerInnerLowerModuleArrayIndex] + innerObjectArrayIndex; + objectOccupancy.quintupletModuleIndices()[innerInnerInnerLowerModuleArrayIndex] + innerObjectArrayIndex; // Don't add duplicate T5s or T5s that are accounted in pT5s if (quintupletsInGPU.isDup[quintupletIndex] or quintupletsInGPU.partOfPT5[quintupletIndex]) @@ -209,7 +209,7 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { template ALPAKA_FN_ACC void operator()(TAcc const& acc, Modules modulesInGPU, - ObjectRanges rangesInGPU, + ObjectOccupancyConst objectOccupancy, PixelTriplets pixelTripletsInGPU, TrackCandidates cands, SegmentsConst segments, @@ -230,7 +230,7 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { float eta1 = segmentsPixel.eta()[pixelArrayIndex]; float phi1 = segmentsPixel.phi()[pixelArrayIndex]; - unsigned int prefix = rangesInGPU.segmentModuleIndices[pixelModuleIndex]; + unsigned int prefix = objectOccupancy.segmentModuleIndices()[pixelModuleIndex]; unsigned int nTrackCandidates = cands.nTrackCandidates(); for (unsigned int trackCandidateIndex = globalThreadIdx[1]; trackCandidateIndex < nTrackCandidates; @@ -295,7 +295,7 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { PixelTriplets pixelTripletsInGPU, TrackCandidates cands, SegmentsPixelConst segmentsPixel, - ObjectRanges rangesInGPU) const { + ObjectOccupancyConst objectOccupancy) const { // implementation is 1D with a single block static_assert(std::is_same_v, "Should be Acc1D"); ALPAKA_ASSERT_ACC((alpaka::getWorkDiv(acc)[0] == 1)); @@ -304,7 +304,7 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { auto const gridThreadExtent = alpaka::getWorkDiv(acc); unsigned int nPixelTriplets = *pixelTripletsInGPU.nPixelTriplets; - unsigned int pLS_offset = rangesInGPU.segmentModuleIndices[nLowerModules]; + unsigned int pLS_offset = objectOccupancy.segmentModuleIndices()[nLowerModules]; for (unsigned int pixelTripletIndex = globalThreadIdx[0]; pixelTripletIndex < nPixelTriplets; pixelTripletIndex += gridThreadExtent[0]) { if ((pixelTripletsInGPU.isDup[pixelTripletIndex])) @@ -350,17 +350,17 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { uint16_t nLowerModules, Quintuplets quintupletsInGPU, TrackCandidates cands, - ObjectRanges rangesInGPU) const { + ObjectOccupancyConst objectOccupancy) const { auto const globalThreadIdx = alpaka::getIdx(acc); auto const gridThreadExtent = alpaka::getWorkDiv(acc); for (int idx = globalThreadIdx[1]; idx < nLowerModules; idx += gridThreadExtent[1]) { - if (rangesInGPU.quintupletModuleIndices[idx] == -1) + if (objectOccupancy.quintupletModuleIndices()[idx] == -1) continue; unsigned int nQuints = quintupletsInGPU.nQuintuplets[idx]; for (unsigned int jdx = globalThreadIdx[2]; jdx < nQuints; jdx += gridThreadExtent[2]) { - unsigned int quintupletIndex = rangesInGPU.quintupletModuleIndices[idx] + jdx; + unsigned int quintupletIndex = objectOccupancy.quintupletModuleIndices()[idx] + jdx; if (quintupletsInGPU.isDup[quintupletIndex] or quintupletsInGPU.partOfPT5[quintupletIndex]) continue; if (!(quintupletsInGPU.TightCutFlag[quintupletIndex])) @@ -445,7 +445,7 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { PixelQuintuplets pixelQuintupletsInGPU, TrackCandidates cands, SegmentsPixelConst segmentsPixel, - ObjectRanges rangesInGPU) const { + ObjectOccupancyConst objectOccupancy) const { // implementation is 1D with a single block static_assert(std::is_same_v, "Should be Acc1D"); ALPAKA_ASSERT_ACC((alpaka::getWorkDiv(acc)[0] == 1)); @@ -454,7 +454,7 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { auto const gridThreadExtent = alpaka::getWorkDiv(acc); int nPixelQuintuplets = *pixelQuintupletsInGPU.nPixelQuintuplets; - unsigned int pLS_offset = rangesInGPU.segmentModuleIndices[nLowerModules]; + unsigned int pLS_offset = objectOccupancy.segmentModuleIndices()[nLowerModules]; for (int pixelQuintupletIndex = globalThreadIdx[0]; pixelQuintupletIndex < nPixelQuintuplets; pixelQuintupletIndex += gridThreadExtent[0]) { if (pixelQuintupletsInGPU.isDup[pixelQuintupletIndex]) diff --git a/RecoTracker/LSTCore/src/alpaka/Triplet.h b/RecoTracker/LSTCore/src/alpaka/Triplet.h index 3c8b4cddbe4ab..ba1fd4d1387c8 100644 --- a/RecoTracker/LSTCore/src/alpaka/Triplet.h +++ b/RecoTracker/LSTCore/src/alpaka/Triplet.h @@ -5,11 +5,11 @@ #include "RecoTracker/LSTCore/interface/alpaka/Constants.h" #include "RecoTracker/LSTCore/interface/Module.h" +#include "RecoTracker/LSTCore/interface/ObjectRangesSoA.h" #include "Segment.h" #include "MiniDoublet.h" #include "Hit.h" -#include "ObjectRanges.h" namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { struct Triplets { @@ -806,7 +806,8 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { SegmentsConst segments, SegmentsOccupancyConst segmentsOccupancy, Triplets tripletsInGPU, - ObjectRanges rangesInGPU, + ObjectRangesConst ranges, + ObjectOccupancyConst objectOccupancy, uint16_t* index_gpu, uint16_t nonZeroModules) const { auto const globalThreadIdx = alpaka::getIdx(acc); @@ -826,7 +827,7 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { for (unsigned int innerSegmentArrayIndex = globalThreadIdx[1]; innerSegmentArrayIndex < nInnerSegments; innerSegmentArrayIndex += gridThreadExtent[1]) { unsigned int innerSegmentIndex = - rangesInGPU.segmentRanges[innerInnerLowerModuleIndex * 2] + innerSegmentArrayIndex; + ranges.segmentRanges()[innerInnerLowerModuleIndex][0] + innerSegmentArrayIndex; // middle lower module - outer lower module of inner segment uint16_t middleLowerModuleIndex = segments.outerLowerModuleIndices()[innerSegmentIndex]; @@ -834,8 +835,7 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { unsigned int nOuterSegments = segmentsOccupancy.nSegments()[middleLowerModuleIndex]; for (unsigned int outerSegmentArrayIndex = globalThreadIdx[2]; outerSegmentArrayIndex < nOuterSegments; outerSegmentArrayIndex += gridThreadExtent[2]) { - unsigned int outerSegmentIndex = - rangesInGPU.segmentRanges[2 * middleLowerModuleIndex] + outerSegmentArrayIndex; + unsigned int outerSegmentIndex = ranges.segmentRanges()[middleLowerModuleIndex][0] + outerSegmentArrayIndex; uint16_t outerOuterLowerModuleIndex = segments.outerLowerModuleIndices()[outerSegmentIndex]; @@ -865,7 +865,7 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { 1u, alpaka::hierarchy::Threads{}); if (static_cast(totOccupancyTriplets) >= - rangesInGPU.tripletModuleOccupancy[innerInnerLowerModuleIndex]) { + objectOccupancy.tripletModuleOccupancy()[innerInnerLowerModuleIndex]) { #ifdef WARNINGS printf("Triplet excess alert! Module index = %d\n", innerInnerLowerModuleIndex); #endif @@ -873,7 +873,7 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { unsigned int tripletModuleIndex = alpaka::atomicAdd( acc, &tripletsInGPU.nTriplets[innerInnerLowerModuleIndex], 1u, alpaka::hierarchy::Threads{}); unsigned int tripletIndex = - rangesInGPU.tripletModuleIndices[innerInnerLowerModuleIndex] + tripletModuleIndex; + objectOccupancy.tripletModuleIndices()[innerInnerLowerModuleIndex] + tripletModuleIndex; #ifdef CUT_VALUE_DEBUG addTripletToMemory(modulesInGPU, mds, @@ -920,7 +920,7 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { template ALPAKA_FN_ACC void operator()(TAcc const& acc, Modules modulesInGPU, - ObjectRanges rangesInGPU, + ObjectOccupancy objectOccupancy, SegmentsOccupancyConst segmentsOccupancy) const { // implementation is 1D with a single block static_assert(std::is_same_v, "Should be Acc1D"); @@ -941,8 +941,8 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { for (uint16_t i = globalThreadIdx[0]; i < *modulesInGPU.nLowerModules; i += gridThreadExtent[0]) { if (segmentsOccupancy.nSegments()[i] == 0) { - rangesInGPU.tripletModuleIndices[i] = nTotalTriplets; - rangesInGPU.tripletModuleOccupancy[i] = 0; + objectOccupancy.tripletModuleIndices()[i] = nTotalTriplets; + objectOccupancy.tripletModuleOccupancy()[i] = 0; continue; } @@ -1006,15 +1006,15 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { #endif } - rangesInGPU.tripletModuleOccupancy[i] = occupancy; + objectOccupancy.tripletModuleOccupancy()[i] = occupancy; unsigned int nTotT = alpaka::atomicAdd(acc, &nTotalTriplets, occupancy, alpaka::hierarchy::Threads{}); - rangesInGPU.tripletModuleIndices[i] = nTotT; + objectOccupancy.tripletModuleIndices()[i] = nTotT; } // Wait for all threads to finish before reporting final values alpaka::syncBlockThreads(acc); if (cms::alpakatools::once_per_block(acc)) { - *rangesInGPU.device_nTotalTrips = nTotalTriplets; + objectOccupancy.nTotalTrips() = nTotalTriplets; } } }; @@ -1024,7 +1024,8 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { ALPAKA_FN_ACC void operator()(TAcc const& acc, Modules modulesInGPU, Triplets tripletsInGPU, - ObjectRanges rangesInGPU) const { + ObjectRanges ranges, + ObjectOccupancyConst objectOccupancy) const { // implementation is 1D with a single block static_assert(std::is_same_v, "Should be Acc1D"); ALPAKA_ASSERT_ACC((alpaka::getWorkDiv(acc)[0] == 1)); @@ -1034,11 +1035,11 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::lst { for (uint16_t i = globalThreadIdx[0]; i < *modulesInGPU.nLowerModules; i += gridThreadExtent[0]) { if (tripletsInGPU.nTriplets[i] == 0) { - rangesInGPU.tripletRanges[i * 2] = -1; - rangesInGPU.tripletRanges[i * 2 + 1] = -1; + ranges.tripletRanges()[i][0] = -1; + ranges.tripletRanges()[i][1] = -1; } else { - rangesInGPU.tripletRanges[i * 2] = rangesInGPU.tripletModuleIndices[i]; - rangesInGPU.tripletRanges[i * 2 + 1] = rangesInGPU.tripletModuleIndices[i] + tripletsInGPU.nTriplets[i] - 1; + ranges.tripletRanges()[i][0] = objectOccupancy.tripletModuleIndices()[i]; + ranges.tripletRanges()[i][1] = objectOccupancy.tripletModuleIndices()[i] + tripletsInGPU.nTriplets[i] - 1; } } } diff --git a/RecoTracker/LSTCore/standalone/code/core/AccessHelper.cc b/RecoTracker/LSTCore/standalone/code/core/AccessHelper.cc index 4c325a7be72e4..9fcba6d7d6910 100644 --- a/RecoTracker/LSTCore/standalone/code/core/AccessHelper.cc +++ b/RecoTracker/LSTCore/standalone/code/core/AccessHelper.cc @@ -30,9 +30,9 @@ std::tuple, std::vector> convertHitsToHi std::vector getPixelHitsFrompLS(Event* event, unsigned int pLS) { SegmentsConst segments = event->getSegments(); MiniDoubletsConst miniDoublets = event->getMiniDoublets(); - ObjectRanges const* rangesEvt = event->getRanges().data(); + auto objectOccupancy = event->getRanges(); Modules const* modulesEvt = event->getModules().data(); - const unsigned int pLS_offset = rangesEvt->segmentModuleIndices[*(modulesEvt->nLowerModules)]; + const unsigned int pLS_offset = objectOccupancy.segmentModuleIndices()[*(modulesEvt->nLowerModules)]; unsigned int MD_1 = segments.mdIndices()[pLS + pLS_offset][0]; unsigned int MD_2 = segments.mdIndices()[pLS + pLS_offset][1]; unsigned int hit_1 = miniDoublets.anchorHitIndices()[MD_1]; @@ -226,9 +226,9 @@ std::tuple, std::vector> getHitIdxsAndHi //____________________________________________________________________________________________ unsigned int getPixelLSFrompT3(Event* event, unsigned int pT3) { PixelTriplets const* pixelTriplets = event->getPixelTriplets().data(); - ObjectRanges const* rangesEvt = event->getRanges().data(); + auto objectOccupancy = event->getRanges(); Modules const* modulesEvt = event->getModules().data(); - const unsigned int pLS_offset = rangesEvt->segmentModuleIndices[*(modulesEvt->nLowerModules)]; + const unsigned int pLS_offset = objectOccupancy.segmentModuleIndices()[*(modulesEvt->nLowerModules)]; return pixelTriplets->pixelSegmentIndices[pT3] - pLS_offset; } @@ -315,9 +315,9 @@ std::tuple, std::vector> getHitIdxsAndHi //____________________________________________________________________________________________ unsigned int getPixelLSFrompT5(Event* event, unsigned int pT5) { PixelQuintuplets const* pixelQuintuplets = event->getPixelQuintuplets().data(); - ObjectRanges const* rangesEvt = event->getRanges().data(); + auto objectOccupancy = event->getRanges(); Modules const* modulesEvt = event->getModules().data(); - const unsigned int pLS_offset = rangesEvt->segmentModuleIndices[*(modulesEvt->nLowerModules)]; + const unsigned int pLS_offset = objectOccupancy.segmentModuleIndices()[*(modulesEvt->nLowerModules)]; return pixelQuintuplets->pixelIndices[pT5] - pLS_offset; } diff --git a/RecoTracker/LSTCore/standalone/code/core/write_lst_ntuple.cc b/RecoTracker/LSTCore/standalone/code/core/write_lst_ntuple.cc index abcb324be0dc9..f4802ccdb0868 100644 --- a/RecoTracker/LSTCore/standalone/code/core/write_lst_ntuple.cc +++ b/RecoTracker/LSTCore/standalone/code/core/write_lst_ntuple.cc @@ -367,7 +367,7 @@ void setPixelQuintupletOutputBranches(Event* event) { //________________________________________________________________________________________________________________________________ void setQuintupletOutputBranches(Event* event) { Quintuplets const* quintuplets = event->getQuintuplets().data(); - ObjectRanges const* ranges = event->getRanges().data(); + auto objectOccupancy = event->getRanges(); Modules const* modules = event->getModules().data(); int n_accepted_simtrk = ana.tx->getBranch>("sim_TC_matched").size(); @@ -377,7 +377,7 @@ void setQuintupletOutputBranches(Event* event) { for (unsigned int lowerModuleIdx = 0; lowerModuleIdx < *(modules->nLowerModules); ++lowerModuleIdx) { int nQuintuplets = quintuplets->nQuintuplets[lowerModuleIdx]; for (unsigned int idx = 0; idx < nQuintuplets; idx++) { - unsigned int quintupletIndex = ranges->quintupletModuleIndices[lowerModuleIdx] + idx; + unsigned int quintupletIndex = objectOccupancy.quintupletModuleIndices()[lowerModuleIdx] + idx; float pt = __H2F(quintuplets->innerRadius[quintupletIndex]) * k2Rinv1GeVf * 2; float eta = __H2F(quintuplets->eta[quintupletIndex]); float phi = __H2F(quintuplets->phi[quintupletIndex]); @@ -505,7 +505,7 @@ void setGnnNtupleBranches(Event* event) { MiniDoubletsOccupancyConst miniDoublets = event->getMiniDoublets(); Hits const* hitsEvt = event->getHits().data(); Modules const* modules = event->getModules().data(); - ObjectRanges const* ranges = event->getRanges().data(); + auto objectOccupancy = event->getRanges(); auto const& trackCandidates = event->getTrackCandidates().const_view(); std::set mds_used_in_sg; @@ -558,7 +558,7 @@ void setGnnNtupleBranches(Event* event) { // Loop over segments for (unsigned int jdx = 0; jdx < segmentsOccupancy.nSegments()[idx]; jdx++) { // Get the actual index to the segments using ranges - unsigned int sgIdx = ranges->segmentModuleIndices[idx] + jdx; + unsigned int sgIdx = objectOccupancy.segmentModuleIndices()[idx] + jdx; // Get the hit indices std::vector MDs = getMDsFromLS(event, sgIdx); @@ -945,14 +945,14 @@ std::tuple, std::vectorgetModules().data(); - ObjectRanges const* ranges = event->getRanges().data(); + auto ranges = event->getRanges(); int nHits = 0; for (unsigned int idx = 0; idx <= *(modules->nLowerModules); idx++) // "<=" because cheating to include pixel track candidate lower module { - nHits += ranges->hitRanges[4 * idx + 1] - ranges->hitRanges[4 * idx] + 1; - nHits += ranges->hitRanges[4 * idx + 3] - ranges->hitRanges[4 * idx + 2] + 1; + nHits += ranges.hitRanges()[2 * idx][1] - ranges.hitRanges()[2 * idx][0] + 1; + nHits += ranges.hitRanges()[2 * idx + 1][1] - ranges.hitRanges()[2 * idx + 1][0] + 1; } std::cout << " nHits: " << nHits << std::endl; } @@ -990,12 +990,12 @@ void printMDs(Event* event) { MiniDoubletsOccupancyConst miniDoubletsOccupancy = event->getMiniDoublets(); Hits const* hitsEvt = event->getHits().data(); Modules const* modules = event->getModules().data(); - ObjectRanges const* ranges = event->getRanges().data(); + auto objectOccupancy = event->getRanges(); // Then obtain the lower module index for (unsigned int idx = 0; idx <= *(modules->nLowerModules); ++idx) { for (unsigned int iMD = 0; iMD < miniDoubletsOccupancy.nMDs()[idx]; iMD++) { - unsigned int mdIdx = ranges->miniDoubletModuleIndices[idx] + iMD; + unsigned int mdIdx = objectOccupancy.miniDoubletModuleIndices()[idx] + iMD; unsigned int LowerHitIndex = miniDoublets.anchorHitIndices()[mdIdx]; unsigned int UpperHitIndex = miniDoublets.outerHitIndices()[mdIdx]; unsigned int hit0 = hitsEvt->idxs[LowerHitIndex]; @@ -1014,14 +1014,14 @@ void printLSs(Event* event) { MiniDoubletsConst miniDoublets = event->getMiniDoublets(); Hits const* hitsEvt = event->getHits().data(); Modules const* modules = event->getModules().data(); - ObjectRanges const* ranges = event->getRanges().data(); + auto objectOccupancy = event->getRanges(); int nSegments = 0; for (unsigned int i = 0; i < *(modules->nLowerModules); ++i) { unsigned int idx = i; //modules->lowerModuleIndices[i]; nSegments += segmentsOccupancy.nSegments()[idx]; for (unsigned int jdx = 0; jdx < segmentsOccupancy.nSegments()[idx]; jdx++) { - unsigned int sgIdx = ranges->segmentModuleIndices[idx] + jdx; + unsigned int sgIdx = objectOccupancy.segmentModuleIndices()[idx] + jdx; unsigned int InnerMiniDoubletIndex = segments.mdIndices()[sgIdx][0]; unsigned int OuterMiniDoubletIndex = segments.mdIndices()[sgIdx][1]; unsigned int InnerMiniDoubletLowerHitIndex = miniDoublets.anchorHitIndices()[InnerMiniDoubletIndex]; @@ -1047,13 +1047,13 @@ void printpLSs(Event* event) { MiniDoubletsConst miniDoublets = event->getMiniDoublets(); Hits const* hitsEvt = event->getHits().data(); Modules const* modules = event->getModules().data(); - ObjectRanges const* ranges = event->getRanges().data(); + auto objectOccupancy = event->getRanges(); unsigned int i = *(modules->nLowerModules); unsigned int idx = i; //modules->lowerModuleIndices[i]; int npLS = segmentsOccupancy.nSegments()[idx]; for (unsigned int jdx = 0; jdx < segmentsOccupancy.nSegments()[idx]; jdx++) { - unsigned int sgIdx = ranges->segmentModuleIndices[idx] + jdx; + unsigned int sgIdx = objectOccupancy.segmentModuleIndices()[idx] + jdx; unsigned int InnerMiniDoubletIndex = segments.mdIndices()[sgIdx][0]; unsigned int OuterMiniDoubletIndex = segments.mdIndices()[sgIdx][1]; unsigned int InnerMiniDoubletLowerHitIndex = miniDoublets.anchorHitIndices()[InnerMiniDoubletIndex];