Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Make TypedArena::alloc_from_iter like DroplessArena::alloc_from_iter. #116327

Closed
wants to merge 2 commits into from
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
231 changes: 81 additions & 150 deletions compiler/rustc_arena/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -44,23 +44,6 @@ fn outline<F: FnOnce() -> R, R>(f: F) -> R {
f()
}

/// An arena that can hold objects of only one type.
pub struct TypedArena<T> {
/// A pointer to the next object to be allocated.
ptr: Cell<*mut T>,

/// A pointer to the end of the allocated area. When this pointer is
/// reached, a new chunk is allocated.
end: Cell<*mut T>,

/// A vector of arena chunks.
chunks: RefCell<Vec<ArenaChunk<T>>>,

/// Marker indicating that dropping the arena causes its owned
/// instances of `T` to be dropped.
_own: PhantomData<T>,
}

struct ArenaChunk<T = u8> {
/// The raw storage for the arena chunk.
storage: NonNull<[MaybeUninit<T>]>,
Expand Down Expand Up @@ -130,6 +113,23 @@ impl<T> ArenaChunk<T> {
const PAGE: usize = 4096;
const HUGE_PAGE: usize = 2 * 1024 * 1024;

/// An arena that can hold objects of only one type.
pub struct TypedArena<T> {
/// A pointer to the next object to be allocated.
ptr: Cell<*mut T>,

/// A pointer to the end of the allocated area. When this pointer is
/// reached, a new chunk is allocated.
end: Cell<*mut T>,

/// A vector of arena chunks.
chunks: RefCell<Vec<ArenaChunk<T>>>,

/// Marker indicating that dropping the arena causes its owned
/// instances of `T` to be dropped.
_own: PhantomData<T>,
}

impl<T> Default for TypedArena<T> {
/// Creates a new `TypedArena`.
fn default() -> TypedArena<T> {
Expand All @@ -144,77 +144,6 @@ impl<T> Default for TypedArena<T> {
}
}

trait IterExt<T> {
fn alloc_from_iter(self, arena: &TypedArena<T>) -> &mut [T];
}

impl<I, T> IterExt<T> for I
where
I: IntoIterator<Item = T>,
{
// This default collects into a `SmallVec` and then allocates by copying
// from it. The specializations below for types like `Vec` are more
// efficient, copying directly without the intermediate collecting step.
// This default could be made more efficient, like
// `DroplessArena::alloc_from_iter`, but it's not hot enough to bother.
#[inline]
default fn alloc_from_iter(self, arena: &TypedArena<T>) -> &mut [T] {
let vec: SmallVec<[_; 8]> = self.into_iter().collect();
vec.alloc_from_iter(arena)
}
}

impl<T, const N: usize> IterExt<T> for std::array::IntoIter<T, N> {
#[inline]
fn alloc_from_iter(self, arena: &TypedArena<T>) -> &mut [T] {
let len = self.len();
if len == 0 {
return &mut [];
}
// Move the content to the arena by copying and then forgetting it.
let start_ptr = arena.alloc_raw_slice(len);
unsafe {
self.as_slice().as_ptr().copy_to_nonoverlapping(start_ptr, len);
mem::forget(self);
slice::from_raw_parts_mut(start_ptr, len)
}
}
}

impl<T> IterExt<T> for Vec<T> {
#[inline]
fn alloc_from_iter(mut self, arena: &TypedArena<T>) -> &mut [T] {
let len = self.len();
if len == 0 {
return &mut [];
}
// Move the content to the arena by copying and then forgetting it.
let start_ptr = arena.alloc_raw_slice(len);
unsafe {
self.as_ptr().copy_to_nonoverlapping(start_ptr, len);
self.set_len(0);
slice::from_raw_parts_mut(start_ptr, len)
}
}
}

impl<A: smallvec::Array> IterExt<A::Item> for SmallVec<A> {
#[inline]
fn alloc_from_iter(mut self, arena: &TypedArena<A::Item>) -> &mut [A::Item] {
let len = self.len();
if len == 0 {
return &mut [];
}
// Move the content to the arena by copying and then forgetting it.
let start_ptr = arena.alloc_raw_slice(len);
unsafe {
self.as_ptr().copy_to_nonoverlapping(start_ptr, len);
self.set_len(0);
slice::from_raw_parts_mut(start_ptr, len)
}
}
}

impl<T> TypedArena<T> {
/// Allocates an object in the `TypedArena`, returning a reference to it.
#[inline]
Expand Down Expand Up @@ -270,8 +199,7 @@ impl<T> TypedArena<T> {

#[inline]
pub fn alloc_from_iter<I: IntoIterator<Item = T>>(&self, iter: I) -> &mut [T] {
assert!(mem::size_of::<T>() != 0);
iter.alloc_from_iter(self)
alloc_from_iter(iter, |len| self.alloc_raw_slice(len))
}

/// Grows the arena.
Expand Down Expand Up @@ -527,76 +455,79 @@ impl DroplessArena {
}
}

/// # Safety
///
/// The caller must ensure that `mem` is valid for writes up to
/// `size_of::<T>() * len`.
#[inline]
unsafe fn write_from_iter<T, I: Iterator<Item = T>>(
&self,
mut iter: I,
len: usize,
mem: *mut T,
) -> &mut [T] {
let mut i = 0;
// Use a manual loop since LLVM manages to optimize it better for
// slice iterators
loop {
// SAFETY: The caller must ensure that `mem` is valid for writes up to
// `size_of::<T>() * len`.
unsafe {
match iter.next() {
Some(value) if i < len => mem.add(i).write(value),
Some(_) | None => {
// We only return as many items as the iterator gave us, even
// though it was supposed to give us `len`
return slice::from_raw_parts_mut(mem, i);
}
}
}
i += 1;
}
pub fn alloc_from_iter<T, I: IntoIterator<Item = T>>(&self, iter: I) -> &mut [T] {
alloc_from_iter(iter, |len| self.alloc_raw(Layout::array::<T>(len).unwrap()) as *mut T)
}
}

#[inline]
pub fn alloc_from_iter<T, I: IntoIterator<Item = T>>(&self, iter: I) -> &mut [T] {
let iter = iter.into_iter();
assert!(mem::size_of::<T>() != 0);
assert!(!mem::needs_drop::<T>());
#[inline]
pub fn alloc_from_iter<'a, T, I, F>(iter: I, alloc_raw: F) -> &'a mut [T]
where
I: IntoIterator<Item = T>,
F: FnOnce(usize) -> *mut T,
{
let iter = iter.into_iter();
assert!(mem::size_of::<T>() != 0);

let size_hint = iter.size_hint();
match iter.size_hint() {
(min, Some(max)) if min == max => {
// We know the exact number of elements the iterator will produce here
let len = min;

match size_hint {
(min, Some(max)) if min == max => {
// We know the exact number of elements the iterator will produce here
let len = min;
if len == 0 {
return &mut [];
}

if len == 0 {
let mem = alloc_raw(len);
unsafe { write_from_iter(iter, len, mem) }
}
(_, _) => {
outline(move || -> &mut [T] {
let mut vec: SmallVec<[_; 8]> = iter.collect();
if vec.is_empty() {
return &mut [];
}
// Move the content to the arena by copying it and then forgetting
// the content of the SmallVec
unsafe {
let len = vec.len();
let start_ptr = alloc_raw(len);
vec.as_ptr().copy_to_nonoverlapping(start_ptr, len);
vec.set_len(0);
slice::from_raw_parts_mut(start_ptr, len)
}
})
}
}
}

let mem = self.alloc_raw(Layout::array::<T>(len).unwrap()) as *mut T;
unsafe { self.write_from_iter(iter, len, mem) }
}
(_, _) => {
outline(move || -> &mut [T] {
let mut vec: SmallVec<[_; 8]> = iter.collect();
if vec.is_empty() {
return &mut [];
}
// Move the content to the arena by copying it and then forgetting
// the content of the SmallVec
unsafe {
let len = vec.len();
let start_ptr =
self.alloc_raw(Layout::for_value::<[T]>(vec.as_slice())) as *mut T;
vec.as_ptr().copy_to_nonoverlapping(start_ptr, len);
vec.set_len(0);
slice::from_raw_parts_mut(start_ptr, len)
}
})
/// # Safety
///
/// The caller must ensure that `mem` is valid for writes up to
/// `size_of::<T>() * len`.
#[inline]
unsafe fn write_from_iter<'a, T, I: Iterator<Item = T>>(
mut iter: I,
len: usize,
mem: *mut T,
) -> &'a mut [T] {
let mut i = 0;
// Use a manual loop since LLVM manages to optimize it better for
// slice iterators
loop {
// SAFETY: The caller must ensure that `mem` is valid for writes up to
// `size_of::<T>() * len`.
unsafe {
match iter.next() {
Some(value) if i < len => mem.add(i).write(value),
Some(_) | None => {
// We only return as many items as the iterator gave us, even
// though it was supposed to give us `len`
return slice::from_raw_parts_mut(mem, i);
}
}
}
i += 1;
}
}

Expand Down
Loading