diff --git a/src/map.rs b/src/map.rs index d305bc7f8..15b3c023c 100644 --- a/src/map.rs +++ b/src/map.rs @@ -6653,7 +6653,7 @@ mod test_map { use super::Entry::{Occupied, Vacant}; use super::EntryRef; use super::{HashMap, RawEntryMut}; - use alloc::string::ToString; + use alloc::string::{String, ToString}; use alloc::sync::Arc; use allocator_api2::alloc::{AllocError, Allocator, Global}; use core::alloc::Layout; @@ -8519,6 +8519,14 @@ mod test_map { _inner: Arc, } + impl MyAlloc { + fn new(drop_count: Arc) -> Self { + MyAlloc { + _inner: Arc::new(MyAllocInner { drop_count }), + } + } + } + impl Drop for MyAllocInner { fn drop(&mut self) { println!("MyAlloc freed."); @@ -8543,14 +8551,7 @@ mod test_map { let dropped: Arc = Arc::new(AtomicI8::new(1)); { - let mut map = crate::HashMap::with_capacity_in( - 10, - MyAlloc { - _inner: Arc::new(MyAllocInner { - drop_count: dropped.clone(), - }), - }, - ); + let mut map = HashMap::with_capacity_in(10, MyAlloc::new(dropped.clone())); for i in 0..10 { map.entry(i).or_insert_with(|| "i".to_string()); } @@ -8564,92 +8565,340 @@ mod test_map { assert_eq!(dropped.load(Ordering::SeqCst), 0); } - #[test] - #[should_panic = "panic in clone"] - fn test_clone_memory_leaks_and_double_drop() { - let dropped: Arc = Arc::new(AtomicI8::new(2)); + #[derive(Debug)] + struct CheckedCloneDrop { + panic_in_clone: bool, + panic_in_drop: bool, + dropped: bool, + data: T, + } - { - let mut map = crate::HashMap::with_capacity_in( - 10, - MyAlloc { - _inner: Arc::new(MyAllocInner { - drop_count: dropped.clone(), - }), - }, - ); - - const DISARMED: bool = false; - const ARMED: bool = true; - - struct CheckedCloneDrop { - panic_in_clone: bool, - dropped: bool, - need_drop: Vec, + impl CheckedCloneDrop { + fn new(panic_in_clone: bool, panic_in_drop: bool, data: T) -> Self { + CheckedCloneDrop { + panic_in_clone, + panic_in_drop, + dropped: false, + data, } + } + } - impl Clone for CheckedCloneDrop { - fn clone(&self) -> Self { - if self.panic_in_clone { - panic!("panic in clone") - } - Self { - panic_in_clone: self.panic_in_clone, - dropped: self.dropped, - need_drop: self.need_drop.clone(), - } - } + impl Clone for CheckedCloneDrop { + fn clone(&self) -> Self { + if self.panic_in_clone { + panic!("panic in clone") } + Self { + panic_in_clone: self.panic_in_clone, + panic_in_drop: self.panic_in_drop, + dropped: self.dropped, + data: self.data.clone(), + } + } + } - impl Drop for CheckedCloneDrop { - fn drop(&mut self) { - if self.dropped { - panic!("double drop"); - } - self.dropped = true; - } + impl Drop for CheckedCloneDrop { + fn drop(&mut self) { + if self.panic_in_drop { + self.dropped = true; + panic!("panic in drop"); } + if self.dropped { + panic!("double drop"); + } + self.dropped = true; + } + } - let armed_flags = [ - DISARMED, DISARMED, ARMED, DISARMED, DISARMED, DISARMED, DISARMED, - ]; + /// Return hashmap with predefined distribution of elements. + /// All elements will be located in the same order as elements + /// returned by iterator. + /// + /// This function does not panic, but returns an error as a `String` + /// to distinguish between a test panic and an error in the input data. + fn get_test_map( + iter: I, + mut fun: impl FnMut(u64) -> T, + alloc: A, + ) -> Result, DefaultHashBuilder, A>, String> + where + I: Iterator + Clone + ExactSizeIterator, + A: Allocator + Clone, + T: PartialEq + core::fmt::Debug, + { + use crate::scopeguard::guard; - // Hash and Key must be equal to each other for controlling the elements placement - // so that we can be sure that we first clone elements that don't panic during cloning. - for (idx, &panic_in_clone) in armed_flags.iter().enumerate() { - let idx = idx as u64; - map.table.insert( - idx, + let mut map: HashMap, _, A> = + HashMap::with_capacity_in(iter.size_hint().0, alloc); + { + let mut guard = guard(&mut map, |map| { + for (_, value) in map.iter_mut() { + value.panic_in_drop = false + } + }); + + let mut count = 0; + // Hash and Key must be equal to each other for controlling the elements placement. + for (panic_in_clone, panic_in_drop) in iter.clone() { + if core::mem::needs_drop::() && panic_in_drop { + return Err(String::from( + "panic_in_drop can be set with a type that doesn't need to be dropped", + )); + } + guard.table.insert( + count, ( - idx, - CheckedCloneDrop { - panic_in_clone, - dropped: false, - need_drop: vec![0, 1, 2, 3], - }, + count, + CheckedCloneDrop::new(panic_in_clone, panic_in_drop, fun(count)), ), |(k, _)| *k, ); + count += 1; } - let mut count = 0; // Let's check that all elements are located as we wanted - // - // SAFETY: We know for sure that `RawTable` will outlive - // the returned `RawIter` iterator. - for ((key, value), panic_in_clone) in map.iter().zip(armed_flags) { - assert_eq!(*key, count); - assert_eq!(value.panic_in_clone, panic_in_clone); - count += 1; + let mut check_count = 0; + for ((key, value), (panic_in_clone, panic_in_drop)) in guard.iter().zip(iter) { + if *key != check_count { + return Err(format!( + "key != check_count,\nkey: `{}`,\ncheck_count: `{}`", + key, check_count + )); + } + if value.dropped + || value.panic_in_clone != panic_in_clone + || value.panic_in_drop != panic_in_drop + || value.data != fun(check_count) + { + return Err(format!( + "Value is not equal to expected,\nvalue: `{:?}`,\nexpected: \ + `CheckedCloneDrop {{ panic_in_clone: {}, panic_in_drop: {}, dropped: {}, data: {:?} }}`", + value, panic_in_clone, panic_in_drop, false, fun(check_count) + )); + } + check_count += 1; + } + + if guard.len() != check_count as usize { + return Err(format!( + "map.len() != check_count,\nmap.len(): `{}`,\ncheck_count: `{}`", + guard.len(), + check_count + )); + } + + if count != check_count { + return Err(format!( + "count != check_count,\ncount: `{}`,\ncheck_count: `{}`", + count, check_count + )); } - assert_eq!(map.len(), 7); - assert_eq!(count, 7); + core::mem::forget(guard); + } + Ok(map) + } + + const DISARMED: bool = false; + const ARMED: bool = true; + + const ARMED_FLAGS: [bool; 8] = [ + DISARMED, DISARMED, DISARMED, ARMED, DISARMED, DISARMED, DISARMED, DISARMED, + ]; + + const DISARMED_FLAGS: [bool; 8] = [ + DISARMED, DISARMED, DISARMED, DISARMED, DISARMED, DISARMED, DISARMED, DISARMED, + ]; + + #[test] + #[should_panic = "panic in clone"] + fn test_clone_memory_leaks_and_double_drop_one() { + let dropped: Arc = Arc::new(AtomicI8::new(2)); + + { + assert_eq!(ARMED_FLAGS.len(), DISARMED_FLAGS.len()); + + let map: HashMap>, DefaultHashBuilder, MyAlloc> = + match get_test_map( + ARMED_FLAGS.into_iter().zip(DISARMED_FLAGS), + |n| vec![n], + MyAlloc::new(dropped.clone()), + ) { + Ok(map) => map, + Err(msg) => panic!("{msg}"), + }; // Clone should normally clone a few elements, and then (when the // clone function panics), deallocate both its own memory, memory // of `dropped: Arc` and the memory of already cloned // elements (Vec memory inside CheckedCloneDrop). - let _table2 = map.clone(); + let _map2 = map.clone(); } } + + #[test] + #[should_panic = "panic in drop"] + fn test_clone_memory_leaks_and_double_drop_two() { + let dropped: Arc = Arc::new(AtomicI8::new(2)); + + { + assert_eq!(ARMED_FLAGS.len(), DISARMED_FLAGS.len()); + + let map: HashMap, DefaultHashBuilder, _> = match get_test_map( + DISARMED_FLAGS.into_iter().zip(DISARMED_FLAGS), + |n| n, + MyAlloc::new(dropped.clone()), + ) { + Ok(map) => map, + Err(msg) => panic!("{msg}"), + }; + + let mut map2 = match get_test_map( + DISARMED_FLAGS.into_iter().zip(ARMED_FLAGS), + |n| n, + MyAlloc::new(dropped.clone()), + ) { + Ok(map) => map, + Err(msg) => panic!("{msg}"), + }; + + // The `clone_from` should try to drop the elements of `map2` without + // double drop and leaking the allocator. Elements that have not been + // dropped leak their memory. + map2.clone_from(&map); + } + } + + /// We check that we have a working table if the clone operation from another + /// thread ended in a panic (when buckets of maps are equal to each other). + #[test] + fn test_catch_panic_clone_from_when_len_is_equal() { + use std::thread; + + let dropped: Arc = Arc::new(AtomicI8::new(2)); + + { + assert_eq!(ARMED_FLAGS.len(), DISARMED_FLAGS.len()); + + let mut map = match get_test_map( + DISARMED_FLAGS.into_iter().zip(DISARMED_FLAGS), + |n| vec![n], + MyAlloc::new(dropped.clone()), + ) { + Ok(map) => map, + Err(msg) => panic!("{msg}"), + }; + + thread::scope(|s| { + let result: thread::ScopedJoinHandle<'_, String> = s.spawn(|| { + let scope_map = + match get_test_map(ARMED_FLAGS.into_iter().zip(DISARMED_FLAGS), |n| vec![n * 2], MyAlloc::new(dropped.clone())) { + Ok(map) => map, + Err(msg) => return msg, + }; + if map.table.buckets() != scope_map.table.buckets() { + return format!( + "map.table.buckets() != scope_map.table.buckets(),\nleft: `{}`,\nright: `{}`", + map.table.buckets(), scope_map.table.buckets() + ); + } + map.clone_from(&scope_map); + "We must fail the cloning!!!".to_owned() + }); + if let Ok(msg) = result.join() { + panic!("{msg}") + } + }); + + // Let's check that all iterators work fine and do not return elements + // (especially `RawIterRange`, which does not depend on the number of + // elements in the table, but looks directly at the control bytes) + // + // SAFETY: We know for sure that `RawTable` will outlive + // the returned `RawIter / RawIterRange` iterator. + assert_eq!(map.len(), 0); + assert_eq!(map.iter().count(), 0); + assert_eq!(unsafe { map.table.iter().count() }, 0); + assert_eq!(unsafe { map.table.iter().iter.count() }, 0); + + for idx in 0..map.table.buckets() { + let idx = idx as u64; + assert!( + map.table.find(idx, |(k, _)| *k == idx).is_none(), + "Index: {idx}" + ); + } + } + + // All allocator clones should already be dropped. + assert_eq!(dropped.load(Ordering::SeqCst), 0); + } + + /// We check that we have a working table if the clone operation from another + /// thread ended in a panic (when buckets of maps are not equal to each other). + #[test] + fn test_catch_panic_clone_from_when_len_is_not_equal() { + use std::thread; + + let dropped: Arc = Arc::new(AtomicI8::new(2)); + + { + assert_eq!(ARMED_FLAGS.len(), DISARMED_FLAGS.len()); + + let mut map = match get_test_map( + [DISARMED].into_iter().zip([DISARMED]), + |n| vec![n], + MyAlloc::new(dropped.clone()), + ) { + Ok(map) => map, + Err(msg) => panic!("{msg}"), + }; + + thread::scope(|s| { + let result: thread::ScopedJoinHandle<'_, String> = s.spawn(|| { + let scope_map = match get_test_map( + ARMED_FLAGS.into_iter().zip(DISARMED_FLAGS), + |n| vec![n * 2], + MyAlloc::new(dropped.clone()), + ) { + Ok(map) => map, + Err(msg) => return msg, + }; + if map.table.buckets() == scope_map.table.buckets() { + return format!( + "map.table.buckets() == scope_map.table.buckets(): `{}`", + map.table.buckets() + ); + } + map.clone_from(&scope_map); + "We must fail the cloning!!!".to_owned() + }); + if let Ok(msg) = result.join() { + panic!("{msg}") + } + }); + + // Let's check that all iterators work fine and do not return elements + // (especially `RawIterRange`, which does not depend on the number of + // elements in the table, but looks directly at the control bytes) + // + // SAFETY: We know for sure that `RawTable` will outlive + // the returned `RawIter / RawIterRange` iterator. + assert_eq!(map.len(), 0); + assert_eq!(map.iter().count(), 0); + assert_eq!(unsafe { map.table.iter().count() }, 0); + assert_eq!(unsafe { map.table.iter().iter.count() }, 0); + + for idx in 0..map.table.buckets() { + let idx = idx as u64; + assert!( + map.table.find(idx, |(k, _)| *k == idx).is_none(), + "Index: {idx}" + ); + } + } + + // All allocator clones should already be dropped. + assert_eq!(dropped.load(Ordering::SeqCst), 0); + } } diff --git a/src/raw/mod.rs b/src/raw/mod.rs index 6c77c1f07..945705965 100644 --- a/src/raw/mod.rs +++ b/src/raw/mod.rs @@ -1,5 +1,5 @@ use crate::alloc::alloc::{handle_alloc_error, Layout}; -use crate::scopeguard::guard; +use crate::scopeguard::{guard, ScopeGuard}; use crate::TryReserveError; use core::iter::FusedIterator; use core::marker::PhantomData; @@ -2770,7 +2770,7 @@ impl Clone for RawTable { unsafe { // Make sure that if any panics occurs, we clear the table and // leave it in an empty state. - let mut guard = guard(&mut *self, |self_| { + let mut self_ = guard(self, |self_| { self_.clear_no_drop(); }); @@ -2781,31 +2781,34 @@ impl Clone for RawTable { // This leak is unavoidable: we can't try dropping more elements // since this could lead to another panic and abort the process. // - // SAFETY: We clear our table right after dropping the elements, - // so there is no double drop, since `items` will be equal to zero. - guard.drop_elements(); - - // Okay, we've successfully dropped all elements, so we'll just set - // `items` to zero (so that the `Drop` of `RawTable` doesn't try to - // drop all elements twice) and just forget about the guard. - guard.table.items = 0; - mem::forget(guard); + // SAFETY: If something gets wrong we clear our table right after + // dropping the elements, so there is no double drop, since `items` + // will be equal to zero. + self_.drop_elements(); // If necessary, resize our table to match the source. - if self.buckets() != source.buckets() { + if self_.buckets() != source.buckets() { // Skip our drop by using ptr::write. - if !self.table.is_empty_singleton() { + if !self_.table.is_empty_singleton() { // SAFETY: We have verified that the table is allocated. - self.free_buckets(); + self_.free_buckets(); } - (self as *mut Self).write( + // Let's read `alloc` for reusing in new table allocator + // SAFETY: + // * `&mut self_.table.alloc` is valid for reading, properly + // aligned, and points to a properly initialized value as + // it is derived from a reference. + // + // * We want to overwrite our own table. + let alloc = ptr::read(&self_.table.alloc); + (&mut **self_ as *mut Self).write( // Avoid `Result::unwrap_or_else` because it bloats LLVM IR. // // SAFETY: This is safe as we are taking the size of an already allocated table // and therefore сapacity overflow cannot occur, `self.table.buckets()` is power // of two and all allocator errors will be caught inside `RawTableInner::new_uninitialized`. match Self::new_uninitialized( - self.table.alloc.clone(), + alloc, source.buckets(), Fallibility::Infallible, ) { @@ -2817,9 +2820,11 @@ impl Clone for RawTable { // Cloning elements may fail (the clone function may panic), but the `ScopeGuard` // inside the `clone_from_impl` function will take care of that, dropping all - // cloned elements if necessary. The `Drop` of `RawTable` takes care of the rest - // by freeing up the allocated memory. - self.clone_from_spec(source); + // cloned elements if necessary. Our `ScopeGuard` will clear the table. + self_.clone_from_spec(source); + + // Disarm the scope guard if cloning was successful. + ScopeGuard::into_inner(self_); } } } @@ -3815,4 +3820,151 @@ mod test_map { drop(table); } } + + /// CHECKING THAT WE DON'T TRY TO DROP DATA IF THE `ITEMS` + /// ARE ZERO, EVEN IF WE HAVE `FULL` CONTROL BYTES. + #[test] + fn test_catch_panic_clone_from() { + use ::alloc::sync::Arc; + use ::alloc::vec::Vec; + use allocator_api2::alloc::{AllocError, Allocator, Global}; + use core::sync::atomic::{AtomicI8, Ordering}; + use std::thread; + + struct MyAllocInner { + drop_count: Arc, + } + + #[derive(Clone)] + struct MyAlloc { + _inner: Arc, + } + + impl Drop for MyAllocInner { + fn drop(&mut self) { + println!("MyAlloc freed."); + self.drop_count.fetch_sub(1, Ordering::SeqCst); + } + } + + unsafe impl Allocator for MyAlloc { + fn allocate(&self, layout: Layout) -> std::result::Result, AllocError> { + let g = Global; + g.allocate(layout) + } + + unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { + let g = Global; + g.deallocate(ptr, layout) + } + } + + const DISARMED: bool = false; + const ARMED: bool = true; + + struct CheckedCloneDrop { + panic_in_clone: bool, + dropped: bool, + need_drop: Vec, + } + + impl Clone for CheckedCloneDrop { + fn clone(&self) -> Self { + if self.panic_in_clone { + panic!("panic in clone") + } + Self { + panic_in_clone: self.panic_in_clone, + dropped: self.dropped, + need_drop: self.need_drop.clone(), + } + } + } + + impl Drop for CheckedCloneDrop { + fn drop(&mut self) { + if self.dropped { + panic!("double drop"); + } + self.dropped = true; + } + } + + let dropped: Arc = Arc::new(AtomicI8::new(2)); + + let mut table = RawTable::new_in(MyAlloc { + _inner: Arc::new(MyAllocInner { + drop_count: dropped.clone(), + }), + }); + + for (idx, panic_in_clone) in core::iter::repeat(DISARMED).take(7).enumerate() { + let idx = idx as u64; + table.insert( + idx, + ( + idx, + CheckedCloneDrop { + panic_in_clone, + dropped: false, + need_drop: vec![idx], + }, + ), + |(k, _)| *k, + ); + } + + assert_eq!(table.len(), 7); + + thread::scope(|s| { + let result = s.spawn(|| { + let armed_flags = [ + DISARMED, DISARMED, ARMED, DISARMED, DISARMED, DISARMED, DISARMED, + ]; + let mut scope_table = RawTable::new_in(MyAlloc { + _inner: Arc::new(MyAllocInner { + drop_count: dropped.clone(), + }), + }); + for (idx, &panic_in_clone) in armed_flags.iter().enumerate() { + let idx = idx as u64; + scope_table.insert( + idx, + ( + idx, + CheckedCloneDrop { + panic_in_clone, + dropped: false, + need_drop: vec![idx + 100], + }, + ), + |(k, _)| *k, + ); + } + table.clone_from(&scope_table); + }); + assert!(result.join().is_err()); + }); + + // Let's check that all iterators work fine and do not return elements + // (especially `RawIterRange`, which does not depend on the number of + // elements in the table, but looks directly at the control bytes) + // + // SAFETY: We know for sure that `RawTable` will outlive + // the returned `RawIter / RawIterRange` iterator. + assert_eq!(table.len(), 0); + assert_eq!(unsafe { table.iter().count() }, 0); + assert_eq!(unsafe { table.iter().iter.count() }, 0); + + for idx in 0..table.buckets() { + let idx = idx as u64; + assert!( + table.find(idx, |(k, _)| *k == idx).is_none(), + "Index: {idx}" + ); + } + + // All allocator clones should already be dropped. + assert_eq!(dropped.load(Ordering::SeqCst), 1); + } } diff --git a/src/scopeguard.rs b/src/scopeguard.rs index 47965a845..382d06043 100644 --- a/src/scopeguard.rs +++ b/src/scopeguard.rs @@ -25,7 +25,6 @@ impl ScopeGuard where F: FnMut(&mut T), { - #[allow(dead_code)] #[inline] pub fn into_inner(guard: Self) -> T { // Cannot move out of Drop-implementing types, so