diff --git a/src/atomic.rs b/src/atomic.rs index be12e6d..a94b897 100644 --- a/src/atomic.rs +++ b/src/atomic.rs @@ -134,7 +134,7 @@ impl Atomic { #[cfg(not(feature = "nightly"))] pub fn null() -> Self { Atomic { - data: AtomicUsize::new(0), + data: ATOMIC_USIZE_INIT, _marker: PhantomData, } } @@ -179,8 +179,7 @@ impl Atomic { /// let a = Atomic::from_owned(Owned::new(1234)); /// ``` pub fn from_owned(owned: Owned) -> Self { - let data = owned.data; - mem::forget(owned); + let data = owned.into_data(); Self::from_data(data) } @@ -189,12 +188,12 @@ impl Atomic { /// # Examples /// /// ``` - /// use crossbeam_epoch::{Atomic, Ptr}; + /// use crossbeam_epoch::{Atomic, Shared}; /// - /// let a = Atomic::from_ptr(Ptr::::null()); + /// let a = Atomic::from_ptr(Shared::::null()); /// ``` - pub fn from_ptr(ptr: Ptr) -> Self { - Self::from_data(ptr.data) + pub fn from_ptr(ptr: Shared) -> Self { + Self::from_data(ptr.into_data()) } /// Returns a new atomic pointer pointing to `raw`. @@ -203,7 +202,7 @@ impl Atomic { /// /// ``` /// use std::ptr; - /// use crossbeam_epoch::{Atomic, Ptr}; + /// use crossbeam_epoch::{Atomic, Shared}; /// /// let a = Atomic::from_raw(ptr::null::()); /// ``` @@ -211,7 +210,7 @@ impl Atomic { Self::from_data(raw as usize) } - /// Loads a `Ptr` from the atomic pointer. + /// Loads a `Shared` from the atomic pointer. /// /// This method takes an [`Ordering`] argument which describes the memory ordering of this /// operation. @@ -228,11 +227,11 @@ impl Atomic { /// let guard = &epoch::pin(); /// let p = a.load(SeqCst, guard); /// ``` - pub fn load<'g>(&self, ord: Ordering, _: &'g Guard) -> Ptr<'g, T> { - Ptr::from_data(self.data.load(ord)) + pub fn load<'g>(&self, ord: Ordering, _: &'g Guard) -> Shared<'g, T> { + unsafe { Shared::from_data(self.data.load(ord)) } } - /// Stores a `Ptr` into the atomic pointer. + /// Stores a `Shared` or `Owned` pointer into the atomic pointer. /// /// This method takes an [`Ordering`] argument which describes the memory ordering of this /// operation. @@ -242,39 +241,19 @@ impl Atomic { /// # Examples /// /// ``` - /// use crossbeam_epoch::{self as epoch, Atomic, Ptr}; + /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared}; /// use std::sync::atomic::Ordering::SeqCst; /// /// let a = Atomic::new(1234); - /// a.store(Ptr::null(), SeqCst); - /// ``` - pub fn store(&self, new: Ptr, ord: Ordering) { - self.data.store(new.data, ord); - } - - /// Stores an `Owned` into the atomic pointer. - /// - /// This method takes an [`Ordering`] argument which describes the memory ordering of this - /// operation. - /// - /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html - /// - /// # Examples - /// - /// ``` - /// use crossbeam_epoch::{self as epoch, Atomic, Owned}; - /// use std::sync::atomic::Ordering::SeqCst; - /// - /// let a = Atomic::null(); - /// a.store_owned(Owned::new(1234), SeqCst); + /// a.store(Shared::null(), SeqCst); + /// a.store(Owned::new(1234), SeqCst); /// ``` - pub fn store_owned(&self, new: Owned, ord: Ordering) { - let data = new.data; - mem::forget(new); - self.data.store(data, ord); + pub fn store<'g, P: Pointer>(&self, new: P, ord: Ordering) { + self.data.store(new.into_data(), ord); } - /// Stores a `Ptr` into the atomic pointer, returning the previous `Ptr`. + /// Stores a `Shared` or `Owned` pointer into the atomic pointer, returning the previous + /// `Shared`. /// /// This method takes an [`Ordering`] argument which describes the memory ordering of this /// operation. @@ -284,109 +263,22 @@ impl Atomic { /// # Examples /// /// ``` - /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Ptr}; + /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared}; /// use std::sync::atomic::Ordering::SeqCst; /// /// let a = Atomic::new(1234); /// let guard = &epoch::pin(); - /// let p = a.swap(Ptr::null(), SeqCst, guard); + /// let p = a.swap(Shared::null(), SeqCst, guard); /// ``` - pub fn swap<'g>(&self, new: Ptr, ord: Ordering, _: &'g Guard) -> Ptr<'g, T> { - Ptr::from_data(self.data.swap(new.data, ord)) + pub fn swap<'g, P: Pointer>(&self, new: P, ord: Ordering, _: &'g Guard) -> Shared<'g, T> { + unsafe { Shared::from_data(self.data.swap(new.into_data(), ord)) } } - /// Stores `new` into the atomic pointer if the current value is the same as `current`. - /// - /// The return value is a result indicating whether the new pointer was written. On failure the - /// actual current value is returned. - /// - /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory - /// ordering of this operation. - /// - /// [`CompareAndSetOrdering`]: trait.CompareAndSetOrdering.html - /// - /// # Examples - /// - /// ``` - /// use crossbeam_epoch::{self as epoch, Atomic, Ptr}; - /// use std::sync::atomic::Ordering::SeqCst; - /// - /// let a = Atomic::new(1234); - /// - /// let guard = &epoch::pin(); - /// let mut curr = a.load(SeqCst, guard); - /// let res = a.compare_and_set(curr, Ptr::null(), SeqCst, guard); - /// ``` - pub fn compare_and_set<'g, O>( - &self, - current: Ptr, - new: Ptr, - ord: O, - _: &'g Guard, - ) -> Result<(), Ptr<'g, T>> - where - O: CompareAndSetOrdering, - { - match self.data - .compare_exchange(current.data, new.data, ord.success(), ord.failure()) - { - Ok(_) => Ok(()), - Err(previous) => Err(Ptr::from_data(previous)), - } - } - - /// Stores `new` into the atomic pointer if the current value is the same as `current`. - /// - /// Unlike [`compare_and_set`], this method is allowed to spuriously fail even when - /// comparison succeeds, which can result in more efficient code on some platforms. - /// The return value is a result indicating whether the new pointer was written. On failure the - /// actual current value is returned. - /// - /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory - /// ordering of this operation. - /// - /// [`compare_and_set`]: struct.Atomic.html#method.compare_and_set - /// [`CompareAndSetOrdering`]: trait.CompareAndSetOrdering.html - /// - /// # Examples - /// - /// ``` - /// use crossbeam_epoch::{self as epoch, Atomic, Ptr}; - /// use std::sync::atomic::Ordering::SeqCst; - /// - /// let a = Atomic::new(1234); - /// - /// let guard = &epoch::pin(); - /// let mut curr = a.load(SeqCst, guard); - /// loop { - /// match a.compare_and_set_weak(curr, Ptr::null(), SeqCst, guard) { - /// Ok(()) => break, - /// Err(c) => curr = c, - /// } - /// } - /// ``` - pub fn compare_and_set_weak<'g, O>( - &self, - current: Ptr, - new: Ptr, - ord: O, - _: &'g Guard, - ) -> Result<(), Ptr<'g, T>> - where - O: CompareAndSetOrdering, - { - match self.data - .compare_exchange_weak(current.data, new.data, ord.success(), ord.failure()) - { - Ok(_) => Ok(()), - Err(previous) => Err(Ptr::from_data(previous)), - } - } - - /// Stores `new` into the atomic pointer if the current value is the same as `current`. + /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current + /// value is the same as `current`. /// /// The return value is a result indicating whether the new pointer was written. On success the - /// pointer that was written is returned. On failure `new` and the actual current value are + /// pointer that was written is returned. On failure the actual current value and `new` are /// returned. /// /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory @@ -397,64 +289,63 @@ impl Atomic { /// # Examples /// /// ``` - /// use crossbeam_epoch::{self as epoch, Atomic, Owned}; + /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared}; /// use std::sync::atomic::Ordering::SeqCst; /// /// let a = Atomic::new(1234); /// /// let guard = &epoch::pin(); /// let mut curr = a.load(SeqCst, guard); - /// let res = a.compare_and_set_owned(curr, Owned::new(5678), SeqCst, guard); + /// let res1 = a.compare_and_set(curr, Shared::null(), SeqCst, guard); + /// let res2 = a.compare_and_set(curr, Owned::new(5678), SeqCst, guard); /// ``` - pub fn compare_and_set_owned<'g, O>( + pub fn compare_and_set<'g, O, P>( &self, - current: Ptr, - new: Owned, + current: Shared, + new: P, ord: O, _: &'g Guard, - ) -> Result, (Ptr<'g, T>, Owned)> + ) -> Result, (Shared<'g, T>, P)> where O: CompareAndSetOrdering, + P: Pointer, { - match self.data - .compare_exchange(current.data, new.data, ord.success(), ord.failure()) - { - Ok(_) => { - let data = new.data; - mem::forget(new); - Ok(Ptr::from_data(data)) - } - Err(previous) => Err((Ptr::from_data(previous), new)), - } + let new = new.into_data(); + self.data + .compare_exchange(current.into_data(), new, ord.success(), ord.failure()) + .map(|_| unsafe { Shared::from_data(new) }) + .map_err(|previous| unsafe { + (Shared::from_data(previous), P::from_data(new)) + }) } - /// Stores `new` into the atomic pointer if the current value is the same as `current`. + /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current + /// value is the same as `current`. /// - /// Unlike [`compare_and_set_owned`], this method is allowed to spuriously fail even when - /// comparison succeeds, which can result in more efficient code on some platforms. - /// The return value is a result indicating whether the new pointer was written. On success the - /// pointer that was written is returned. On failure `new` and the actual current value are - /// returned. + /// Unlike [`compare_and_set`], this method is allowed to spuriously fail even when comparison + /// succeeds, which can result in more efficient code on some platforms. The return value is a + /// result indicating whether the new pointer was written. On success the pointer that was + /// written is returned. On failure the actual current value and `new` are returned. /// /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory /// ordering of this operation. /// - /// [`compare_and_set_owned`]: struct.Atomic.html#method.compare_and_set_owned + /// [`compare_and_set`]: struct.Atomic.html#method.compare_and_set /// [`CompareAndSetOrdering`]: trait.CompareAndSetOrdering.html /// /// # Examples /// /// ``` - /// use crossbeam_epoch::{self as epoch, Atomic, Owned}; + /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared}; /// use std::sync::atomic::Ordering::SeqCst; /// /// let a = Atomic::new(1234); - /// /// let guard = &epoch::pin(); + /// /// let mut new = Owned::new(5678); /// let mut ptr = a.load(SeqCst, guard); /// loop { - /// match a.compare_and_set_weak_owned(ptr, new, SeqCst, guard) { + /// match a.compare_and_set_weak(ptr, new, SeqCst, guard) { /// Ok(p) => { /// ptr = p; /// break; @@ -465,27 +356,33 @@ impl Atomic { /// } /// } /// } + /// + /// let mut curr = a.load(SeqCst, guard); + /// loop { + /// match a.compare_and_set_weak(curr, Shared::null(), SeqCst, guard) { + /// Ok(_) => break, + /// Err((c, _)) => curr = c, + /// } + /// } /// ``` - pub fn compare_and_set_weak_owned<'g, O>( + pub fn compare_and_set_weak<'g, O, P>( &self, - current: Ptr, - new: Owned, + current: Shared, + new: P, ord: O, _: &'g Guard, - ) -> Result, (Ptr<'g, T>, Owned)> + ) -> Result, (Shared<'g, T>, P)> where O: CompareAndSetOrdering, + P: Pointer, { - match self.data - .compare_exchange_weak(current.data, new.data, ord.success(), ord.failure()) - { - Ok(_) => { - let data = new.data; - mem::forget(new); - Ok(Ptr::from_data(data)) - } - Err(previous) => Err((Ptr::from_data(previous), new)), - } + let new = new.into_data(); + self.data + .compare_exchange_weak(current.into_data(), new, ord.success(), ord.failure()) + .map(|_| unsafe { Shared::from_data(new) }) + .map_err(|previous| unsafe { + (Shared::from_data(previous), P::from_data(new)) + }) } /// Bitwise "and" with the current tag. @@ -501,16 +398,16 @@ impl Atomic { /// # Examples /// /// ``` - /// use crossbeam_epoch::{self as epoch, Atomic, Ptr}; + /// use crossbeam_epoch::{self as epoch, Atomic, Shared}; /// use std::sync::atomic::Ordering::SeqCst; /// - /// let a = Atomic::::from_ptr(Ptr::null().with_tag(3)); + /// let a = Atomic::::from_ptr(Shared::null().with_tag(3)); /// let guard = &epoch::pin(); /// assert_eq!(a.fetch_and(2, SeqCst, guard).tag(), 3); /// assert_eq!(a.load(SeqCst, guard).tag(), 2); /// ``` - pub fn fetch_and<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Ptr<'g, T> { - Ptr::from_data(self.data.fetch_and(val | !low_bits::(), ord)) + pub fn fetch_and<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> { + unsafe { Shared::from_data(self.data.fetch_and(val | !low_bits::(), ord)) } } /// Bitwise "or" with the current tag. @@ -526,16 +423,16 @@ impl Atomic { /// # Examples /// /// ``` - /// use crossbeam_epoch::{self as epoch, Atomic, Ptr}; + /// use crossbeam_epoch::{self as epoch, Atomic, Shared}; /// use std::sync::atomic::Ordering::SeqCst; /// - /// let a = Atomic::::from_ptr(Ptr::null().with_tag(1)); + /// let a = Atomic::::from_ptr(Shared::null().with_tag(1)); /// let guard = &epoch::pin(); /// assert_eq!(a.fetch_or(2, SeqCst, guard).tag(), 1); /// assert_eq!(a.load(SeqCst, guard).tag(), 3); /// ``` - pub fn fetch_or<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Ptr<'g, T> { - Ptr::from_data(self.data.fetch_or(val & low_bits::(), ord)) + pub fn fetch_or<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> { + unsafe { Shared::from_data(self.data.fetch_or(val & low_bits::(), ord)) } } /// Bitwise "xor" with the current tag. @@ -551,16 +448,16 @@ impl Atomic { /// # Examples /// /// ``` - /// use crossbeam_epoch::{self as epoch, Atomic, Ptr}; + /// use crossbeam_epoch::{self as epoch, Atomic, Shared}; /// use std::sync::atomic::Ordering::SeqCst; /// - /// let a = Atomic::::from_ptr(Ptr::null().with_tag(1)); + /// let a = Atomic::::from_ptr(Shared::null().with_tag(1)); /// let guard = &epoch::pin(); /// assert_eq!(a.fetch_xor(3, SeqCst, guard).tag(), 1); /// assert_eq!(a.load(SeqCst, guard).tag(), 2); /// ``` - pub fn fetch_xor<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Ptr<'g, T> { - Ptr::from_data(self.data.fetch_xor(val & low_bits::(), ord)) + pub fn fetch_xor<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> { + unsafe { Shared::from_data(self.data.fetch_xor(val & low_bits::(), ord)) } } } @@ -619,12 +516,20 @@ impl From> for Atomic { } } -impl<'g, T> From> for Atomic { - fn from(ptr: Ptr) -> Self { +impl<'g, T> From> for Atomic { + fn from(ptr: Shared) -> Self { Atomic::from_ptr(ptr) } } +/// A trait for either `Owned` or `Shared` pointers. +pub trait Pointer { + /// Returns the machine representation of the pointer. + fn into_data(self) -> usize; + /// Returns a new pointer pointing to the tagged pointer `data`. + unsafe fn from_data(data: usize) -> Self; +} + /// An owned heap-allocated object. /// /// This type is very similar to `Box`. @@ -636,15 +541,30 @@ pub struct Owned { _marker: PhantomData>, } -impl Owned { - /// Returns a new owned pointer pointing to the tagged pointer `data`. +impl Pointer for Owned { + #[inline] + fn into_data(self) -> usize { + let data = self.data; + mem::forget(self); + data + } + + /// Returns a new pointer pointing to the tagged pointer `data`. + /// + /// # Panics + /// + /// Panics if the data is zero in debug mode. + #[inline] unsafe fn from_data(data: usize) -> Self { + debug_assert!(data != 0, "converting zero into `Owned`"); Owned { data: data, _marker: PhantomData, } } +} +impl Owned { /// Allocates `value` on the heap and returns a new owned pointer pointing to it. /// /// # Examples @@ -697,7 +617,7 @@ impl Owned { Self::from_data(raw as usize) } - /// Converts the owned pointer into a [`Ptr`]. + /// Converts the owned pointer into a [`Shared`]. /// /// # Examples /// @@ -709,11 +629,9 @@ impl Owned { /// let p = o.into_ptr(guard); /// ``` /// - /// [`Ptr`]: struct.Ptr.html - pub fn into_ptr<'g>(self, _: &'g Guard) -> Ptr<'g, T> { - let data = self.data; - mem::forget(self); - Ptr::from_data(data) + /// [`Shared`]: struct.Shared.html + pub fn into_ptr<'g>(self, _: &'g Guard) -> Shared<'g, T> { + unsafe { Shared::from_data(self.into_data()) } } /// Converts the owned pointer into a `Box`. @@ -728,8 +646,7 @@ impl Owned { /// assert_eq!(*b, 1234); /// ``` pub fn into_box(self) -> Box { - let (raw, _) = decompose_data::(self.data); - mem::forget(self); + let (raw, _) = decompose_data::(self.into_data()); unsafe { Box::from_raw(raw) } } @@ -761,8 +678,7 @@ impl Owned { /// assert_eq!(o.tag(), 5); /// ``` pub fn with_tag(self, tag: usize) -> Self { - let data = self.data; - mem::forget(self); + let data = self.into_data(); unsafe { Self::from_data(data_with_tag::(data, tag)) } } } @@ -851,45 +767,52 @@ impl AsMut for Owned { /// /// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused /// least significant bits of the address. -pub struct Ptr<'g, T: 'g> { +pub struct Shared<'g, T: 'g> { data: usize, _marker: PhantomData<(&'g (), *const T)>, } -unsafe impl<'g, T: Send> Send for Ptr<'g, T> {} +unsafe impl<'g, T: Send> Send for Shared<'g, T> {} -impl<'g, T> Clone for Ptr<'g, T> { +impl<'g, T> Clone for Shared<'g, T> { fn clone(&self) -> Self { - Ptr { + Shared { data: self.data, _marker: PhantomData, } } } -impl<'g, T> Copy for Ptr<'g, T> {} +impl<'g, T> Copy for Shared<'g, T> {} -impl<'g, T> Ptr<'g, T> { - /// Returns a new pointer pointing to the tagged pointer `data`. - fn from_data(data: usize) -> Self { - Ptr { +impl<'g, T> Pointer for Shared<'g, T> { + #[inline] + fn into_data(self) -> usize { + self.data + } + + #[inline] + unsafe fn from_data(data: usize) -> Self { + Shared { data: data, _marker: PhantomData, } } +} +impl<'g, T> Shared<'g, T> { /// Returns a new null pointer. /// /// # Examples /// /// ``` - /// use crossbeam_epoch::Ptr; + /// use crossbeam_epoch::Shared; /// - /// let p = Ptr::::null(); + /// let p = Shared::::null(); /// assert!(p.is_null()); /// ``` pub fn null() -> Self { - Ptr { + Shared { data: 0, _marker: PhantomData, } @@ -904,14 +827,14 @@ impl<'g, T> Ptr<'g, T> { /// # Examples /// /// ``` - /// use crossbeam_epoch::Ptr; + /// use crossbeam_epoch::Shared; /// - /// let p = unsafe { Ptr::from_raw(Box::into_raw(Box::new(1234))) }; + /// let p = unsafe { Shared::from_raw(Box::into_raw(Box::new(1234))) }; /// assert!(!p.is_null()); /// ``` pub fn from_raw(raw: *const T) -> Self { ensure_aligned(raw); - Ptr { + Shared { data: raw as usize, _marker: PhantomData, } @@ -928,7 +851,7 @@ impl<'g, T> Ptr<'g, T> { /// let a = Atomic::null(); /// let guard = &epoch::pin(); /// assert!(a.load(SeqCst, guard).is_null()); - /// a.store_owned(Owned::new(1234), SeqCst); + /// a.store(Owned::new(1234), SeqCst); /// assert!(!a.load(SeqCst, guard).is_null()); /// ``` pub fn is_null(&self) -> bool { @@ -967,7 +890,7 @@ impl<'g, T> Ptr<'g, T> { /// Another concern is the possiblity of data races due to lack of proper synchronization. /// For example, consider the following scenario: /// - /// 1. A thread creates a new object: `a.store_owned(Owned::new(10), Relaxed)` + /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)` /// 2. Another thread reads it: `*a.load(Relaxed, guard).as_ref().unwrap()` /// /// The problem is that relaxed orderings don't synchronize initialization of the object with @@ -1002,7 +925,7 @@ impl<'g, T> Ptr<'g, T> { /// Another concern is the possiblity of data races due to lack of proper synchronization. /// For example, consider the following scenario: /// - /// 1. A thread creates a new object: `a.store_owned(Owned::new(10), Relaxed)` + /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)` /// 2. Another thread reads it: `*a.load(Relaxed, guard).as_ref().unwrap()` /// /// The problem is that relaxed orderings don't synchronize initialization of the object with @@ -1092,64 +1015,64 @@ impl<'g, T> Ptr<'g, T> { /// assert_eq!(p1.as_raw(), p2.as_raw()); /// ``` pub fn with_tag(&self, tag: usize) -> Self { - Self::from_data(data_with_tag::(self.data, tag)) + unsafe { Self::from_data(data_with_tag::(self.data, tag)) } } } -impl<'g, T> PartialEq> for Ptr<'g, T> { +impl<'g, T> PartialEq> for Shared<'g, T> { fn eq(&self, other: &Self) -> bool { self.data == other.data } } -impl<'g, T> Eq for Ptr<'g, T> {} +impl<'g, T> Eq for Shared<'g, T> {} -impl<'g, T> PartialOrd> for Ptr<'g, T> { +impl<'g, T> PartialOrd> for Shared<'g, T> { fn partial_cmp(&self, other: &Self) -> Option { self.data.partial_cmp(&other.data) } } -impl<'g, T> Ord for Ptr<'g, T> { +impl<'g, T> Ord for Shared<'g, T> { fn cmp(&self, other: &Self) -> cmp::Ordering { self.data.cmp(&other.data) } } -impl<'g, T> fmt::Debug for Ptr<'g, T> { +impl<'g, T> fmt::Debug for Shared<'g, T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let (raw, tag) = decompose_data::(self.data); - f.debug_struct("Ptr") + f.debug_struct("Shared") .field("raw", &raw) .field("tag", &tag) .finish() } } -impl<'g, T> fmt::Pointer for Ptr<'g, T> { +impl<'g, T> fmt::Pointer for Shared<'g, T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Pointer::fmt(&self.as_raw(), f) } } -impl<'g, T> Default for Ptr<'g, T> { +impl<'g, T> Default for Shared<'g, T> { fn default() -> Self { - Ptr::null() + Shared::null() } } #[cfg(test)] mod tests { - use super::Ptr; + use super::Shared; #[test] fn valid_tag_i8() { - Ptr::::null().with_tag(0); + Shared::::null().with_tag(0); } #[test] fn valid_tag_i64() { - Ptr::::null().with_tag(7); + Shared::::null().with_tag(7); } } diff --git a/src/collector.rs b/src/collector.rs index 53a9995..acfcedd 100644 --- a/src/collector.rs +++ b/src/collector.rs @@ -29,9 +29,7 @@ unsafe impl Sync for Collector {} impl Collector { /// Creates a new collector. pub fn new() -> Self { - Collector { - global: Arc::new(Global::new()), - } + Collector { global: Arc::new(Global::new()) } } /// Creates a new handle for the collector. @@ -43,9 +41,7 @@ impl Collector { impl Clone for Collector { /// Creates another reference to the same garbage collector. fn clone(&self) -> Self { - Collector { - global: self.global.clone(), - } + Collector { global: self.global.clone() } } } diff --git a/src/epoch.rs b/src/epoch.rs index 9100e1c..5a1c61d 100644 --- a/src/epoch.rs +++ b/src/epoch.rs @@ -48,17 +48,13 @@ impl Epoch { /// Returns the same epoch, but marked as pinned. #[inline] pub fn pinned(&self) -> Epoch { - Epoch { - data: self.data | 1, - } + Epoch { data: self.data | 1 } } /// Returns the same epoch, but marked as unpinned. #[inline] pub fn unpinned(&self) -> Epoch { - Epoch { - data: self.data & !1, - } + Epoch { data: self.data & !1 } } /// Returns the successor epoch. @@ -66,9 +62,7 @@ impl Epoch { /// The returned epoch will be marked as pinned only if the previous one was as well. #[inline] pub fn successor(&self) -> Epoch { - Epoch { - data: self.data.wrapping_add(2), - } + Epoch { data: self.data.wrapping_add(2) } } /// Decomposes the internal data into the epoch and the pin state. @@ -97,9 +91,7 @@ impl AtomicEpoch { /// Loads a value from the atomic epoch. #[inline] pub fn load(&self, ord: Ordering) -> Epoch { - Epoch { - data: self.data.load(ord), - } + Epoch { data: self.data.load(ord) } } /// Stores a value into the atomic epoch. diff --git a/src/garbage.rs b/src/garbage.rs index abc7641..dc7ae43 100644 --- a/src/garbage.rs +++ b/src/garbage.rs @@ -51,9 +51,7 @@ impl fmt::Debug for Garbage { impl Garbage { /// Make a closure that will later be called. pub fn new(f: F) -> Self { - Garbage { - func: Deferred::new(move || f()), - } + Garbage { func: Deferred::new(move || f()) } } } diff --git a/src/guard.rs b/src/guard.rs index c6e4d22..ca58777 100644 --- a/src/guard.rs +++ b/src/guard.rs @@ -154,7 +154,7 @@ impl Guard { /// [`unprotected`]: fn.unprotected.html pub unsafe fn defer(&self, f: F) where - F: FnOnce() -> R + Send + F: FnOnce() -> R + Send, { let garbage = Garbage::new(|| drop(f())); diff --git a/src/internal.rs b/src/internal.rs index 384f96e..9232b76 100644 --- a/src/internal.rs +++ b/src/internal.rs @@ -256,11 +256,7 @@ impl Local { // that the second one makes pinning faster in this particular case. let current = Epoch::starting(); let previous = self.epoch.compare_and_swap(current, new_epoch, SeqCst); - debug_assert_eq!( - current, - previous, - "participant was expected to be unpinned" - ); + debug_assert_eq!(current, previous, "participant was expected to be unpinned"); } else { self.epoch.store(new_epoch, Relaxed); atomic::fence(SeqCst); diff --git a/src/lib.rs b/src/lib.rs index 2866efc..d7c484a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -23,7 +23,8 @@ //! //! Concurrent collections are built using atomic pointers. This module provides [`Atomic`], which //! is just a shared atomic pointer to a heap-allocated object. Loading an [`Atomic`] yields a -//! [`Ptr`], which is an epoch-protected pointer through which the loaded object can be safely read. +//! [`Shared`], which is an epoch-protected pointer through which the loaded object can be safely +//! read. //! //! # Pinning //! @@ -49,7 +50,7 @@ //! //! [`Atomic`]: struct.Atomic.html //! [`Collector`]: struct.Collector.html -//! [`Ptr`]: struct.Ptr.html +//! [`Shared`]: struct.Shared.html //! [`pin`]: fn.pin.html //! [`defer`]: fn.defer.html @@ -72,7 +73,7 @@ mod guard; mod internal; mod sync; -pub use self::atomic::{Atomic, CompareAndSetOrdering, Owned, Ptr}; +pub use self::atomic::{Atomic, CompareAndSetOrdering, Owned, Shared}; pub use self::guard::{unprotected, Guard}; pub use self::default::{default_handle, is_pinned, pin}; pub use self::collector::{Collector, Handle}; diff --git a/src/sync/list.rs b/src/sync/list.rs index 220b62f..e4b9b2a 100644 --- a/src/sync/list.rs +++ b/src/sync/list.rs @@ -6,7 +6,7 @@ use std::marker::PhantomData; use std::sync::atomic::Ordering::{Acquire, Relaxed, Release}; -use {Atomic, Ptr, Guard, unprotected}; +use {Atomic, Shared, Guard, unprotected}; /// An entry in a linked list. /// @@ -92,7 +92,7 @@ pub struct Iter<'g, T: 'g, C: Container> { pred: &'g Atomic, /// The current entry. - curr: Ptr<'g, Entry>, + curr: Shared<'g, Entry>, /// The phantom data for container. _marker: PhantomData<(&'g T, C)>, @@ -146,17 +146,17 @@ impl> List { /// - `container` is immovable, e.g. inside a `Box`; /// - An entry is not inserted twice; and /// - The inserted object will be removed before the list is dropped. - pub unsafe fn insert<'g>(&'g self, container: Ptr<'g, T>, guard: &'g Guard) { + pub unsafe fn insert<'g>(&'g self, container: Shared<'g, T>, guard: &'g Guard) { let to = &self.head; let entry = &*C::entry_of(container.as_raw()); - let entry_ptr = Ptr::from_raw(entry); + let entry_ptr = Shared::from_raw(entry); let mut next = to.load(Relaxed, guard); loop { entry.next.store(next, Relaxed); match to.compare_and_set_weak(next, entry_ptr, Release, guard) { Ok(_) => break, - Err(n) => next = n, + Err((_, n)) => next = n, } } } @@ -227,7 +227,7 @@ impl<'g, T: 'g, C: Container> Iterator for Iter<'g, T, C> { } self.curr = succ; } - Err(succ) => { + Err((_, succ)) => { // We lost the race to delete the entry by a concurrent iterator. Set // `self.curr` to the updated pointer, and report that we are stalled. self.curr = succ; @@ -273,9 +273,7 @@ mod tests { } fn finalize(entry: *const Entry) { - unsafe { - drop(Box::from_raw(entry as *mut Entry)) - } + unsafe { drop(Box::from_raw(entry as *mut Entry)) } } } @@ -297,7 +295,9 @@ mod tests { assert!(iter.next().is_some()); assert!(iter.next().is_none()); - unsafe { n2.as_ref().unwrap().delete(&guard); } + unsafe { + n2.as_ref().unwrap().delete(&guard); + } let mut iter = l.iter(&guard); assert!(iter.next().is_some()); diff --git a/src/sync/queue.rs b/src/sync/queue.rs index 25ae8d3..67b6268 100644 --- a/src/sync/queue.rs +++ b/src/sync/queue.rs @@ -11,7 +11,7 @@ use std::sync::atomic::Ordering::{Acquire, Relaxed, Release}; use crossbeam_utils::cache_padded::CachePadded; -use {unprotected, Atomic, Guard, Owned, Ptr}; +use {unprotected, Atomic, Guard, Owned, Shared}; // The representation here is a singly-linked list, with a sentinel node at the front. In general // the `tail` pointer may lag behind the actual tail. Non-sentinel nodes are either all `Data` or @@ -65,7 +65,7 @@ impl Queue { /// Attempts to atomically place `n` into the `next` pointer of `onto`, and returns `true` on /// success. The queue's `tail` pointer may be updated. #[inline(always)] - fn push_internal(&self, onto: Ptr>, new: Ptr>, guard: &Guard) -> bool { + fn push_internal(&self, onto: Shared>, new: Shared>, guard: &Guard) -> bool { // is `onto` the actual tail? let o = unsafe { onto.deref() }; let next = o.next.load(Acquire, guard); @@ -76,7 +76,7 @@ impl Queue { } else { // looks like the actual tail; attempt to link in `n` let result = o.next - .compare_and_set(Ptr::null(), new, Release, guard) + .compare_and_set(Shared::null(), new, Release, guard) .is_ok(); if result { // try to move the tail pointer forward @@ -205,9 +205,7 @@ mod test { impl Queue { pub fn new() -> Queue { - Queue { - queue: super::Queue::new(), - } + Queue { queue: super::Queue::new() } } pub fn push(&self, t: T) {