From 30c2ca1d960babae7e930afbb414093a1ccb5e34 Mon Sep 17 00:00:00 2001 From: jeff washington Date: Tue, 28 Feb 2023 17:34:28 -0600 Subject: [PATCH] rework uid to use bits better --- bucket_map/src/bucket_storage.rs | 59 ++++++++++++++++++++++++++++++-- 1 file changed, 57 insertions(+), 2 deletions(-) diff --git a/bucket_map/src/bucket_storage.rs b/bucket_map/src/bucket_storage.rs index 69e3eecd23cb38..27c45f0461bd23 100644 --- a/bucket_map/src/bucket_storage.rs +++ b/bucket_map/src/bucket_storage.rs @@ -61,7 +61,7 @@ impl Header { self.lock = UID_UNLOCKED; } /// uid that has locked this entry or None if unlocked - fn uid(&self) -> Option { + fn uid2(&self) -> Option { if self.lock == UID_UNLOCKED { None } else { @@ -74,6 +74,13 @@ impl Header { } } +struct HeaderOffsets { + offset_of_this_header: usize, + offset_of_this_cell: usize, + /// index of this entry in the current header + index_of_cell_within_header_group: usize, +} + pub struct BucketStorage { path: PathBuf, mmap: MmapMut, @@ -96,6 +103,27 @@ impl Drop for BucketStorage { } impl BucketStorage { + /// try to lock this entry with 'uid' + /// return true if it could be locked + fn try_lock(&mut self, uid: Uid) -> bool { + if self.lock == UID_UNLOCKED { + self.lock = uid; + true + } else { + false + } + } + /// mark this entry as unlocked + fn unlock(&mut self, expected: Uid) { + assert_eq!(expected, self.lock); + self.lock = UID_UNLOCKED; + } + + /// true if this entry is unlocked + fn is_unlocked(&self) -> bool { + self.lock == UID_UNLOCKED + } + pub fn new_with_capacity( drives: Arc>, num_elems: u64, @@ -140,6 +168,22 @@ impl BucketStorage { count, ) } + fn calculate_header_offset(&self, ix: u64) -> HeaderOffsets { + // the header bits are always before the entries they correspond to + let size_header = std::mem::size_of::
(); + let items_per_header = u64::BITS as u64; + let earlier_headers = ix / items_per_header; + let offset_in_header = ix % items_per_header; + let all_entries_with_earlier_header = earlier_headers * items_per_header; + let size_of_previous_headers = earlier_headers * size_header; + let size_of_previous_entries = all_entries_with_earlier_header * self.cell_size; // cell size should be a constant todo + let offset_of_this_header = size_of_previous_headers.saturating_add(size_of_previous_entries); + let offset_of_this_cell = offset_of_this_header + size_header + offset_in_header * self.cell_size; + HeaderOffsets { + offset_of_this_header, + offset_of_this_cell, + } + } /// return ref to header of item 'ix' in mmapped file fn header_ptr(&self, ix: u64) -> &Header { @@ -165,7 +209,18 @@ impl BucketStorage { /// return uid allocated at index 'ix' or None if vacant pub fn uid(&self, ix: u64) -> Option { assert!(ix < self.capacity(), "bad index size"); - self.header_ptr(ix).uid() + + + /// uid that has locked this entry or None if unlocked + fn uid(&self) -> Option { + if self.lock == UID_UNLOCKED { + None + } else { + Some(self.lock) + } + } + + self.header_ptr(ix).uid2() } /// true if the entry at index 'ix' is free (as opposed to being allocated)