Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix VecDeque::shrink_to UB when handle_alloc_error unwinds. #123803

Merged
merged 2 commits into from
May 26, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions library/alloc/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,10 @@ rand_xorshift = "0.3.0"
name = "alloctests"
path = "tests/lib.rs"

[[test]]
name = "vec_deque_alloc_error"
path = "tests/vec_deque_alloc_error.rs"

[[bench]]
name = "allocbenches"
path = "benches/lib.rs"
Expand Down
66 changes: 65 additions & 1 deletion library/alloc/src/collections/vec_deque/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -982,6 +982,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
// `head` and `len` are at most `isize::MAX` and `target_cap < self.capacity()`, so nothing can
// overflow.
let tail_outside = (target_cap + 1..=self.capacity()).contains(&(self.head + self.len));
// Used in the drop guard below.
let old_head = self.head;

if self.len == 0 {
self.head = 0;
Expand Down Expand Up @@ -1034,12 +1036,74 @@ impl<T, A: Allocator> VecDeque<T, A> {
}
self.head = new_head;
}
self.buf.shrink_to_fit(target_cap);

struct Guard<'a, T, A: Allocator> {
deque: &'a mut VecDeque<T, A>,
old_head: usize,
target_cap: usize,
}

impl<T, A: Allocator> Drop for Guard<'_, T, A> {
#[cold]
fn drop(&mut self) {
unsafe {
// SAFETY: This is only called if `buf.shrink_to_fit` unwinds,
// which is the only time it's safe to call `abort_shrink`.
self.deque.abort_shrink(self.old_head, self.target_cap)
}
}
}

let guard = Guard { deque: self, old_head, target_cap };

guard.deque.buf.shrink_to_fit(target_cap);

// Don't drop the guard if we didn't unwind.
mem::forget(guard);

debug_assert!(self.head < self.capacity() || self.capacity() == 0);
debug_assert!(self.len <= self.capacity());
}

/// Reverts the deque back into a consistent state in case `shrink_to` failed.
/// This is necessary to prevent UB if the backing allocator returns an error
/// from `shrink` and `handle_alloc_error` subsequently unwinds (see #123369).
///
/// `old_head` refers to the head index before `shrink_to` was called. `target_cap`
/// is the capacity that it was trying to shrink to.
unsafe fn abort_shrink(&mut self, old_head: usize, target_cap: usize) {
// Moral equivalent of self.head + self.len <= target_cap. Won't overflow
// because `self.len <= target_cap`.
if self.head <= target_cap - self.len {
// The deque's buffer is contiguous, so no need to copy anything around.
return;
}

// `shrink_to` already copied the head to fit into the new capacity, so this won't overflow.
let head_len = target_cap - self.head;
// `self.head > target_cap - self.len` => `self.len > target_cap - self.head =: head_len` so this must be positive.
let tail_len = self.len - head_len;

if tail_len <= cmp::min(head_len, self.capacity() - target_cap) {
// There's enough spare capacity to copy the tail to the back (because `tail_len < self.capacity() - target_cap`),
// and copying the tail should be cheaper than copying the head (because `tail_len <= head_len`).

unsafe {
// The old tail and the new tail can't overlap because the head slice lies between them. The
// head slice ends at `target_cap`, so that's where we copy to.
self.copy_nonoverlapping(0, target_cap, tail_len);
}
} else {
// Either there's not enough spare capacity to make the deque contiguous, or the head is shorter than the tail
// (and therefore hopefully cheaper to copy).
unsafe {
// The old and the new head slice can overlap, so we can't use `copy_nonoverlapping` here.
self.copy(self.head, old_head, head_len);
self.head = old_head;
}
}
}

/// Shortens the deque, keeping the first `len` elements and dropping
/// the rest.
///
Expand Down
49 changes: 49 additions & 0 deletions library/alloc/tests/vec_deque_alloc_error.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
#![feature(alloc_error_hook, allocator_api)]

use std::{
alloc::{set_alloc_error_hook, AllocError, Allocator, Layout, System},
collections::VecDeque,
panic::{catch_unwind, AssertUnwindSafe},
ptr::NonNull,
};

#[test]
fn test_shrink_to_unwind() {
// This tests that `shrink_to` leaves the deque in a consistent state when
// the call to `RawVec::shrink_to_fit` unwinds. The code is adapted from #123369
// but changed to hopefully not have any UB even if the test fails.

struct BadAlloc;

unsafe impl Allocator for BadAlloc {
fn allocate(&self, l: Layout) -> Result<NonNull<[u8]>, AllocError> {
// We allocate zeroed here so that the whole buffer of the deque
// is always initialized. That way, even if the deque is left in
// an inconsistent state, no uninitialized memory should be accessed.
System.allocate_zeroed(l)
}

unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
unsafe { System.deallocate(ptr, layout) }
}

unsafe fn shrink(
&self,
_ptr: NonNull<u8>,
_old_layout: Layout,
_new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
Err(AllocError)
}
}

set_alloc_error_hook(|_| panic!("alloc error"));

let mut v = VecDeque::with_capacity_in(15, BadAlloc);
v.push_back(1);
v.push_front(2);
// This should unwind because it calls `BadAlloc::shrink` and then `handle_alloc_error` which unwinds.
assert!(catch_unwind(AssertUnwindSafe(|| v.shrink_to_fit())).is_err());
// This should only pass if the deque is left in a consistent state.
assert_eq!(v, [2, 1]);
}
Loading