From d6710b8fc8523ea502cf7b267c49f4a7dfa2c942 Mon Sep 17 00:00:00 2001 From: Conrad Ludgate Date: Sun, 27 Feb 2022 12:45:29 +0000 Subject: [PATCH 01/14] delete raw_vec --- library/alloc/src/boxed.rs | 334 ++++++++++- .../alloc/src/collections/vec_deque/mod.rs | 68 ++- library/alloc/src/lib.rs | 2 - library/alloc/src/raw_vec.rs | 519 ------------------ library/alloc/src/vec/into_iter.rs | 6 +- library/alloc/src/vec/mod.rs | 36 +- library/alloc/src/vec/spec_from_elem.rs | 8 +- .../alloc/src/vec/spec_from_iter_nested.rs | 5 +- library/core/src/ptr/unique.rs | 20 + 9 files changed, 411 insertions(+), 587 deletions(-) delete mode 100644 library/alloc/src/raw_vec.rs diff --git a/library/alloc/src/boxed.rs b/library/alloc/src/boxed.rs index 639e7f213eaae..d0782dfae3a2e 100644 --- a/library/alloc/src/boxed.rs +++ b/library/alloc/src/boxed.rs @@ -132,6 +132,7 @@ #![stable(feature = "rust1", since = "1.0.0")] +use core::alloc::LayoutError; use core::any::Any; use core::async_iter::AsyncIterator; use core::borrow; @@ -157,7 +158,9 @@ use crate::alloc::{handle_alloc_error, WriteCloneIntoRaw}; use crate::alloc::{AllocError, Allocator, Global, Layout}; #[cfg(not(no_global_oom_handling))] use crate::borrow::Cow; -use crate::raw_vec::RawVec; +// use crate::raw_vec::RawVec; +use crate::collections::TryReserveError; +use crate::collections::TryReserveErrorKind; #[cfg(not(no_global_oom_handling))] use crate::str::from_boxed_utf8_unchecked; #[cfg(not(no_global_oom_handling))] @@ -586,6 +589,14 @@ impl Box { } impl Box<[T]> { + pub(crate) const MIN_NON_ZERO_CAP: usize = if mem::size_of::() == 1 { + 8 + } else if mem::size_of::() <= 1024 { + 4 + } else { + 1 + }; + /// Constructs a new boxed slice with uninitialized contents. /// /// # Examples @@ -610,7 +621,7 @@ impl Box<[T]> { #[unstable(feature = "new_uninit", issue = "63291")] #[must_use] pub fn new_uninit_slice(len: usize) -> Box<[mem::MaybeUninit]> { - unsafe { RawVec::with_capacity(len).into_box(len) } + Self::new_uninit_slice_in(len, Global) } /// Constructs a new boxed slice with uninitialized contents, with the memory @@ -635,7 +646,7 @@ impl Box<[T]> { #[unstable(feature = "new_uninit", issue = "63291")] #[must_use] pub fn new_zeroed_slice(len: usize) -> Box<[mem::MaybeUninit]> { - unsafe { RawVec::with_capacity_zeroed(len).into_box(len) } + Self::new_zeroed_slice_in(len, Global) } /// Constructs a new boxed slice with uninitialized contents. Returns an error if @@ -667,7 +678,7 @@ impl Box<[T]> { Err(_) => return Err(AllocError), }; let ptr = Global.allocate(layout)?; - Ok(RawVec::from_raw_parts_in(ptr.as_mut_ptr() as *mut _, len, Global).into_box(len)) + Ok(Box::from_raw_slice_parts_in(ptr.as_mut_ptr() as *mut _, len, Global)) } } @@ -699,12 +710,60 @@ impl Box<[T]> { Err(_) => return Err(AllocError), }; let ptr = Global.allocate_zeroed(layout)?; - Ok(RawVec::from_raw_parts_in(ptr.as_mut_ptr() as *mut _, len, Global).into_box(len)) + Ok(Box::from_raw_slice_parts_in(ptr.as_mut_ptr() as *mut _, len, Global)) } } + + pub const fn empty() -> Self { + Self::empty_in(Global) + } +} + +#[cfg(not(no_global_oom_handling))] +enum AllocInit { + /// The contents of the new memory are uninitialized. + Uninitialized, + /// The new memory is guaranteed to be zeroed. + Zeroed, } impl Box<[T], A> { + pub const fn empty_in(alloc: A) -> Self { + Box(Unique::dangling_slice(), alloc) + } + + #[cfg(not(no_global_oom_handling))] + fn allocate_in(capacity: usize, init: AllocInit, alloc: A) -> Box<[mem::MaybeUninit], A> { + // Don't allocate here because `Drop` will not deallocate when `capacity` is 0. + if mem::size_of::() == 0 || capacity == 0 { + Box::empty_in(alloc) + } else { + // We avoid `unwrap_or_else` here because it bloats the amount of + // LLVM IR generated. + let layout = match Layout::array::(capacity) { + Ok(layout) => layout, + Err(_) => capacity_overflow(), + }; + match alloc_guard(layout.size()) { + Ok(_) => {} + Err(_) => capacity_overflow(), + } + let result = match init { + AllocInit::Uninitialized => alloc.allocate(layout), + AllocInit::Zeroed => alloc.allocate_zeroed(layout), + }; + let ptr = match result { + Ok(ptr) => ptr, + Err(_) => handle_alloc_error(layout), + }; + + // Allocators currently return a `NonNull<[u8]>` whose length + // matches the size requested. If that ever changes, the capacity + // here should change to `ptr.len() / mem::size_of::()`. + unsafe { Box::from_raw_slice_parts_in(ptr.as_ptr().cast(), capacity, alloc) } + } + } + /// Constructs a new boxed slice with uninitialized contents in the provided allocator. /// /// # Examples @@ -732,7 +791,7 @@ impl Box<[T], A> { // #[unstable(feature = "new_uninit", issue = "63291")] #[must_use] pub fn new_uninit_slice_in(len: usize, alloc: A) -> Box<[mem::MaybeUninit], A> { - unsafe { RawVec::with_capacity_in(len, alloc).into_box(len) } + Self::allocate_in(len, AllocInit::Uninitialized, alloc) } /// Constructs a new boxed slice with uninitialized contents in the provided allocator, @@ -760,7 +819,16 @@ impl Box<[T], A> { // #[unstable(feature = "new_uninit", issue = "63291")] #[must_use] pub fn new_zeroed_slice_in(len: usize, alloc: A) -> Box<[mem::MaybeUninit], A> { - unsafe { RawVec::with_capacity_zeroed_in(len, alloc).into_box(len) } + Self::allocate_in(len, AllocInit::Zeroed, alloc) + } + + #[unstable(feature = "allocator_api", issue = "32838")] + #[inline] + pub unsafe fn from_raw_slice_parts_in(ptr: *mut T, len: usize, alloc: A) -> Self { + unsafe { + let raw = core::slice::from_raw_parts_mut(ptr, len); + Self::from_raw_in(raw, alloc) + } } } @@ -876,6 +944,181 @@ impl Box<[mem::MaybeUninit], A> { let (raw, alloc) = Box::into_raw_with_allocator(self); unsafe { Box::from_raw_in(raw as *mut [T], alloc) } } + + /// Ensures that the buffer contains at least enough space to hold `len + + /// additional` elements. If it doesn't already have enough capacity, will + /// reallocate enough space plus comfortable slack space to get amortized + /// *O*(1) behavior. Will limit this behavior if it would needlessly cause + /// itself to panic. + /// + /// If `len` exceeds `self.capacity()`, this may fail to actually allocate + /// the requested space. This is not really unsafe, but the unsafe + /// code *you* write that relies on the behavior of this function may break. + /// + /// This is ideal for implementing a bulk-push operation like `extend`. + /// + /// # Panics + /// + /// Panics if the new capacity exceeds `isize::MAX` bytes. + /// + /// # Aborts + /// + /// Aborts on OOM. + #[cfg(not(no_global_oom_handling))] + #[inline] + pub fn reserve(&mut self, len: usize, additional: usize) { + handle_reserve(self.grow_exact(len, additional)) + } + + /// A specialized version of `reserve()` used only by the hot and + /// oft-instantiated `Vec::push()`, which does its own capacity check. + #[cfg(not(no_global_oom_handling))] + #[inline(never)] + pub fn reserve_for_push(&mut self, len: usize) { + handle_reserve(self.grow_amortized(len, 1)); + } + + /// Shrinks the buffer down to the specified capacity. If the given amount + /// is 0, actually completely deallocates. + /// + /// # Panics + /// + /// Panics if the given amount is *larger* than the current capacity. + /// + /// # Aborts + /// + /// Aborts on OOM. + #[cfg(not(no_global_oom_handling))] + pub fn shrink_to_fit(&mut self, cap: usize) { + handle_reserve(self.shrink(cap)); + } + + /// The same as `reserve`, but returns on errors instead of panicking or aborting. + pub fn try_reserve(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> { + self.grow_amortized(len, additional) + } + + /// Ensures that the buffer contains at least enough space to hold `len + + /// additional` elements. If it doesn't already, will reallocate the + /// minimum possible amount of memory necessary. Generally this will be + /// exactly the amount of memory necessary, but in principle the allocator + /// is free to give back more than we asked for. + /// + /// If `len` exceeds `self.capacity()`, this may fail to actually allocate + /// the requested space. This is not really unsafe, but the unsafe code + /// *you* write that relies on the behavior of this function may break. + /// + /// # Panics + /// + /// Panics if the new capacity exceeds `isize::MAX` bytes. + /// + /// # Aborts + /// + /// Aborts on OOM. + #[cfg(not(no_global_oom_handling))] + pub fn reserve_exact(&mut self, len: usize, additional: usize) { + handle_reserve(self.try_reserve_exact(len, additional)); + } + + /// The same as `reserve_exact`, but returns on errors instead of panicking or aborting. + pub fn try_reserve_exact( + &mut self, + len: usize, + additional: usize, + ) -> Result<(), TryReserveError> { + self.grow_exact(len, additional) + } + + fn set_ptr_and_cap(&mut self, ptr: core::ptr::NonNull<[u8]>, cap: usize) { + // Allocators currently return a `NonNull<[u8]>` whose length matches + // the size requested. If that ever changes, the capacity here should + // change to `ptr.len() / mem::size_of::()`. + self.0 = unsafe { + Unique::new_unchecked(core::slice::from_raw_parts_mut(ptr.cast().as_ptr(), cap)) + } + } + + // This method is usually instantiated many times. So we want it to be as + // small as possible, to improve compile times. But we also want as much of + // its contents to be statically computable as possible, to make the + // generated code run faster. Therefore, this method is carefully written + // so that all of the code that depends on `T` is within it, while as much + // of the code that doesn't depend on `T` as possible is in functions that + // are non-generic over `T`. + fn grow_amortized(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> { + // This is ensured by the calling contexts. + debug_assert!(additional > 0); + + if mem::size_of::() == 0 { + // Since we return a capacity of `usize::MAX` when `elem_size` is + // 0, getting to here necessarily means the `RawVec` is overfull. + return Err(TryReserveErrorKind::CapacityOverflow.into()); + } + + // Nothing we can really do about these checks, sadly. + let required_cap = + len.checked_add(additional).ok_or(TryReserveErrorKind::CapacityOverflow)?; + + // This guarantees exponential growth. The doubling cannot overflow + // because `cap <= isize::MAX` and the type of `cap` is `usize`. + let cap = core::cmp::max(self.len() * 2, required_cap); + let cap = core::cmp::max(Box::<[T]>::MIN_NON_ZERO_CAP, cap); + + let new_layout = Layout::array::(cap); + + // `finish_grow` is non-generic over `T`. + let ptr = finish_grow(new_layout, self.current_memory(), &mut self.1)?; + self.set_ptr_and_cap(ptr, cap); + Ok(()) + } + + // The constraints on this method are much the same as those on + // `grow_amortized`, but this method is usually instantiated less often so + // it's less critical. + fn grow_exact(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> { + if mem::size_of::() == 0 { + // Since we return a capacity of `usize::MAX` when `elem_size` is + // 0, getting to here necessarily means the `RawVec` is overfull. + return Err(TryReserveErrorKind::CapacityOverflow.into()); + } + + let cap = len.checked_add(additional).ok_or(TryReserveErrorKind::CapacityOverflow)?; + let new_layout = Layout::array::(cap); + + // `finish_grow` is non-generic over `T`. + let ptr = finish_grow(new_layout, self.current_memory(), &mut self.1)?; + self.set_ptr_and_cap(ptr, cap); + Ok(()) + } + + fn current_memory(&self) -> Option<(core::ptr::NonNull, Layout)> { + if mem::size_of::() == 0 || self.len() == 0 { + None + } else { + // We have an allocated chunk of memory, so we can bypass runtime + // checks to get our current layout. + unsafe { + let layout = Layout::array::(self.len()).unwrap_unchecked(); + Some((self.0.cast().into(), layout)) + } + } + } + + fn shrink(&mut self, cap: usize) -> Result<(), TryReserveError> { + let (ptr, layout) = if let Some(mem) = self.current_memory() { mem } else { return Ok(()) }; + + let ptr = unsafe { + // `Layout::array` cannot overflow here because it would have + // overflowed earlier when capacity was larger. + let new_layout = Layout::array::(cap).unwrap_unchecked(); + self.1.shrink(ptr, layout, new_layout).map_err(|_| TryReserveErrorKind::AllocError { + layout: new_layout, + non_exhaustive: (), + })? + }; + self.set_ptr_and_cap(ptr, cap); + Ok(()) + } } impl Box { @@ -1424,10 +1667,10 @@ impl From<&[T]> for Box<[T]> { /// ``` fn from(slice: &[T]) -> Box<[T]> { let len = slice.len(); - let buf = RawVec::with_capacity(len); + let buf = Box::new_uninit_slice(len); unsafe { - ptr::copy_nonoverlapping(slice.as_ptr(), buf.ptr(), len); - buf.into_box(slice.len()).assume_init() + ptr::copy_nonoverlapping(slice.as_ptr(), buf.0.as_ptr().cast(), len); + buf.assume_init() } } } @@ -2016,3 +2259,74 @@ impl AsyncIterator for Box { (**self).size_hint() } } + +// This function is outside `Box` to minimize compile times. See the comment +// above `RawVec::grow_amortized` for details. (The `A` parameter isn't +// significant, because the number of different `A` types seen in practice is +// much smaller than the number of `T` types.) +#[inline(never)] +fn finish_grow( + new_layout: Result, + current_memory: Option<(core::ptr::NonNull, Layout)>, + alloc: &mut A, +) -> Result, TryReserveError> +where + A: Allocator, +{ + // Check for the error here to minimize the size of `RawVec::grow_*`. + let new_layout = new_layout.map_err(|_| TryReserveErrorKind::CapacityOverflow)?; + + alloc_guard(new_layout.size())?; + + let memory = if let Some((ptr, old_layout)) = current_memory { + debug_assert_eq!(old_layout.align(), new_layout.align()); + unsafe { + // The allocator checks for alignment equality + core::intrinsics::assume(old_layout.align() == new_layout.align()); + alloc.grow(ptr, old_layout, new_layout) + } + } else { + alloc.allocate(new_layout) + }; + + memory.map_err(|_| { + TryReserveErrorKind::AllocError { layout: new_layout, non_exhaustive: () }.into() + }) +} + +// Central function for reserve error handling. +#[cfg(not(no_global_oom_handling))] +#[inline] +fn handle_reserve(result: Result<(), TryReserveError>) { + match result.map_err(|e| e.kind()) { + Err(CapacityOverflow) => capacity_overflow(), + Err(TryReserveErrorKind::AllocError { layout, .. }) => handle_alloc_error(layout), + Ok(()) => { /* yay */ } + } +} + +// We need to guarantee the following: +// * We don't ever allocate `> isize::MAX` byte-size objects. +// * We don't overflow `usize::MAX` and actually allocate too little. +// +// On 64-bit we just need to check for overflow since trying to allocate +// `> isize::MAX` bytes will surely fail. On 32-bit and 16-bit we need to add +// an extra guard for this in case we're running on a platform which can use +// all 4GB in user-space, e.g., PAE or x32. + +#[inline] +fn alloc_guard(alloc_size: usize) -> Result<(), TryReserveError> { + if usize::BITS < 64 && alloc_size > isize::MAX as usize { + Err(TryReserveErrorKind::CapacityOverflow.into()) + } else { + Ok(()) + } +} + +// One central function responsible for reporting capacity overflows. This'll +// ensure that the code generation related to these panics is minimal as there's +// only one location which panics rather than a bunch throughout the module. +#[cfg(not(no_global_oom_handling))] +fn capacity_overflow() -> ! { + panic!("capacity overflow"); +} diff --git a/library/alloc/src/collections/vec_deque/mod.rs b/library/alloc/src/collections/vec_deque/mod.rs index 5f1a6848ae62a..a60f3b7771c60 100644 --- a/library/alloc/src/collections/vec_deque/mod.rs +++ b/library/alloc/src/collections/vec_deque/mod.rs @@ -18,9 +18,9 @@ use core::ptr::{self, NonNull}; use core::slice; use crate::alloc::{Allocator, Global}; +use crate::boxed::Box; use crate::collections::TryReserveError; use crate::collections::TryReserveErrorKind; -use crate::raw_vec::RawVec; use crate::vec::Vec; #[macro_use] @@ -106,7 +106,7 @@ pub struct VecDeque< // is defined as the distance between the two. tail: usize, head: usize, - buf: RawVec, + buf: Box<[MaybeUninit], A>, } #[stable(feature = "rust1", since = "1.0.0")] @@ -170,8 +170,14 @@ impl Default for VecDeque { impl VecDeque { /// Marginally more convenient #[inline] - fn ptr(&self) -> *mut T { - self.buf.ptr() + fn ptr(&self) -> *const T { + self.buf.as_ptr().cast() + } + + /// Marginally more convenient + #[inline] + fn mut_ptr(&mut self) -> *mut T { + self.buf.as_mut_ptr().cast() } /// Marginally more convenient @@ -181,7 +187,7 @@ impl VecDeque { // For zero sized types, we are always at maximum capacity MAXIMUM_ZST_CAPACITY } else { - self.buf.capacity() + self.buf.len() } } @@ -206,7 +212,7 @@ impl VecDeque { /// [zeroed]: mem::MaybeUninit::zeroed #[inline] unsafe fn buffer_as_mut_slice(&mut self) -> &mut [MaybeUninit] { - unsafe { slice::from_raw_parts_mut(self.ptr() as *mut MaybeUninit, self.cap()) } + unsafe { slice::from_raw_parts_mut(self.mut_ptr() as *mut MaybeUninit, self.cap()) } } /// Moves an element out of the buffer @@ -219,7 +225,7 @@ impl VecDeque { #[inline] unsafe fn buffer_write(&mut self, off: usize, value: T) { unsafe { - ptr::write(self.ptr().add(off), value); + ptr::write(self.mut_ptr().add(off), value); } } @@ -252,7 +258,7 @@ impl VecDeque { /// Copies a contiguous block of memory len long from src to dst #[inline] - unsafe fn copy(&self, dst: usize, src: usize, len: usize) { + unsafe fn copy(&mut self, dst: usize, src: usize, len: usize) { debug_assert!( dst + len <= self.cap(), "cpy dst={} src={} len={} cap={}", @@ -270,13 +276,13 @@ impl VecDeque { self.cap() ); unsafe { - ptr::copy(self.ptr().add(src), self.ptr().add(dst), len); + ptr::copy(self.ptr().add(src), self.mut_ptr().add(dst), len); } } /// Copies a contiguous block of memory len long from src to dst #[inline] - unsafe fn copy_nonoverlapping(&self, dst: usize, src: usize, len: usize) { + unsafe fn copy_nonoverlapping(&mut self, dst: usize, src: usize, len: usize) { debug_assert!( dst + len <= self.cap(), "cno dst={} src={} len={} cap={}", @@ -294,14 +300,14 @@ impl VecDeque { self.cap() ); unsafe { - ptr::copy_nonoverlapping(self.ptr().add(src), self.ptr().add(dst), len); + ptr::copy_nonoverlapping(self.ptr().add(src), self.mut_ptr().add(dst), len); } } /// Copies a potentially wrapping block of memory len long from src to dest. /// (abs(dst - src) + len) must be no larger than cap() (There must be at /// most one continuous overlapping region between src and dest). - unsafe fn wrap_copy(&self, dst: usize, src: usize, len: usize) { + unsafe fn wrap_copy(&mut self, dst: usize, src: usize, len: usize) { #[allow(dead_code)] fn diff(a: usize, b: usize) -> usize { if a <= b { b - a } else { a - b } @@ -442,13 +448,13 @@ impl VecDeque { let head_room = self.cap() - dst; if src.len() <= head_room { unsafe { - ptr::copy_nonoverlapping(src.as_ptr(), self.ptr().add(dst), src.len()); + ptr::copy_nonoverlapping(src.as_ptr(), self.mut_ptr().add(dst), src.len()); } } else { let (left, right) = src.split_at(head_room); unsafe { - ptr::copy_nonoverlapping(left.as_ptr(), self.ptr().add(dst), left.len()); - ptr::copy_nonoverlapping(right.as_ptr(), self.ptr(), right.len()); + ptr::copy_nonoverlapping(left.as_ptr(), self.mut_ptr().add(dst), left.len()); + ptr::copy_nonoverlapping(right.as_ptr(), self.mut_ptr(), right.len()); } } } @@ -563,7 +569,7 @@ impl VecDeque { // +1 since the ringbuffer always leaves one space empty let cap = cmp::max(capacity + 1, MINIMUM_CAPACITY + 1).next_power_of_two(); - VecDeque { tail: 0, head: 0, buf: RawVec::with_capacity_in(cap, alloc) } + VecDeque { tail: 0, head: 0, buf: Box::new_uninit_slice_in(cap, alloc) } } /// Provides a reference to the element at the given index. @@ -614,7 +620,7 @@ impl VecDeque { pub fn get_mut(&mut self, index: usize) -> Option<&mut T> { if index < self.len() { let idx = self.wrap_add(self.tail, index); - unsafe { Some(&mut *self.ptr().add(idx)) } + unsafe { Some(&mut *self.mut_ptr().add(idx)) } } else { None } @@ -649,7 +655,7 @@ impl VecDeque { assert!(j < self.len()); let ri = self.wrap_add(self.tail, i); let rj = self.wrap_add(self.tail, j); - unsafe { ptr::swap(self.ptr().add(ri), self.ptr().add(rj)) } + unsafe { ptr::swap(self.mut_ptr().add(ri), self.mut_ptr().add(rj)) } } /// Returns the number of elements the deque can hold without @@ -993,7 +999,7 @@ impl VecDeque { #[unstable(feature = "allocator_api", issue = "32838")] #[inline] pub fn allocator(&self) -> &A { - self.buf.allocator() + Box::allocator(&self.buf) } /// Returns a front-to-back iterator. @@ -1037,7 +1043,7 @@ impl VecDeque { pub fn iter_mut(&mut self) -> IterMut<'_, T> { // SAFETY: The internal `IterMut` safety invariant is established because the // `ring` we create is a dereferenceable slice for lifetime '_. - let ring = ptr::slice_from_raw_parts_mut(self.ptr(), self.cap()); + let ring = ptr::slice_from_raw_parts_mut(self.mut_ptr(), self.cap()); unsafe { IterMut::new(ring, self.tail, self.head, PhantomData) } } @@ -1234,7 +1240,7 @@ impl VecDeque { // SAFETY: The internal `IterMut` safety invariant is established because the // `ring` we create is a dereferenceable slice for lifetime '_. - let ring = ptr::slice_from_raw_parts_mut(self.ptr(), self.cap()); + let ring = ptr::slice_from_raw_parts_mut(self.mut_ptr(), self.cap()); unsafe { IterMut::new(ring, tail, head, PhantomData) } } @@ -2086,12 +2092,16 @@ impl VecDeque { // `at` lies in the first half. let amount_in_first = first_len - at; - ptr::copy_nonoverlapping(first_half.as_ptr().add(at), other.ptr(), amount_in_first); + ptr::copy_nonoverlapping( + first_half.as_ptr().add(at), + other.mut_ptr(), + amount_in_first, + ); // just take all of the second half. ptr::copy_nonoverlapping( second_half.as_ptr(), - other.ptr().add(amount_in_first), + other.mut_ptr().add(amount_in_first), second_len, ); } else { @@ -2101,7 +2111,7 @@ impl VecDeque { let amount_in_second = second_len - offset; ptr::copy_nonoverlapping( second_half.as_ptr().add(offset), - other.ptr(), + other.mut_ptr(), amount_in_second, ); } @@ -2365,7 +2375,7 @@ impl VecDeque { }; } - let buf = self.buf.ptr(); + let buf = self.buf.as_mut_ptr().cast::(); let cap = self.cap(); let len = self.len(); @@ -3041,7 +3051,7 @@ impl From> for VecDeque { unsafe { let (other_buf, len, capacity, alloc) = other.into_raw_parts_with_alloc(); - let buf = RawVec::from_raw_parts_in(other_buf, capacity, alloc); + let buf = Box::from_raw_slice_parts_in(other_buf.cast(), capacity, alloc); VecDeque { tail: 0, head: len, buf } } } @@ -3082,8 +3092,8 @@ impl From> for Vec { other.make_contiguous(); unsafe { - let other = ManuallyDrop::new(other); - let buf = other.buf.ptr(); + let mut other = ManuallyDrop::new(other); + let buf = other.buf.as_mut_ptr().cast::(); let len = other.len(); let cap = other.cap(); let alloc = ptr::read(other.allocator()); @@ -3113,7 +3123,7 @@ impl From<[T; N]> for VecDeque { if mem::size_of::() != 0 { // SAFETY: VecDeque::with_capacity ensures that there is enough capacity. unsafe { - ptr::copy_nonoverlapping(arr.as_ptr(), deq.ptr(), N); + ptr::copy_nonoverlapping(arr.as_ptr(), deq.mut_ptr(), N); } } deq.tail = 0; diff --git a/library/alloc/src/lib.rs b/library/alloc/src/lib.rs index 4d2dc4ecee0b8..e487b478e888e 100644 --- a/library/alloc/src/lib.rs +++ b/library/alloc/src/lib.rs @@ -193,8 +193,6 @@ extern crate test; #[macro_use] mod macros; -mod raw_vec; - // Heaps provided for low-level allocation strategies pub mod alloc; diff --git a/library/alloc/src/raw_vec.rs b/library/alloc/src/raw_vec.rs deleted file mode 100644 index 9dbac3c36ffb2..0000000000000 --- a/library/alloc/src/raw_vec.rs +++ /dev/null @@ -1,519 +0,0 @@ -#![unstable(feature = "raw_vec_internals", reason = "unstable const warnings", issue = "none")] - -use core::alloc::LayoutError; -use core::cmp; -use core::intrinsics; -use core::mem::{self, ManuallyDrop, MaybeUninit}; -use core::ops::Drop; -use core::ptr::{self, NonNull, Unique}; -use core::slice; - -#[cfg(not(no_global_oom_handling))] -use crate::alloc::handle_alloc_error; -use crate::alloc::{Allocator, Global, Layout}; -use crate::boxed::Box; -use crate::collections::TryReserveError; -use crate::collections::TryReserveErrorKind::*; - -#[cfg(test)] -mod tests; - -#[cfg(not(no_global_oom_handling))] -enum AllocInit { - /// The contents of the new memory are uninitialized. - Uninitialized, - /// The new memory is guaranteed to be zeroed. - Zeroed, -} - -/// A low-level utility for more ergonomically allocating, reallocating, and deallocating -/// a buffer of memory on the heap without having to worry about all the corner cases -/// involved. This type is excellent for building your own data structures like Vec and VecDeque. -/// In particular: -/// -/// * Produces `Unique::dangling()` on zero-sized types. -/// * Produces `Unique::dangling()` on zero-length allocations. -/// * Avoids freeing `Unique::dangling()`. -/// * Catches all overflows in capacity computations (promotes them to "capacity overflow" panics). -/// * Guards against 32-bit systems allocating more than isize::MAX bytes. -/// * Guards against overflowing your length. -/// * Calls `handle_alloc_error` for fallible allocations. -/// * Contains a `ptr::Unique` and thus endows the user with all related benefits. -/// * Uses the excess returned from the allocator to use the largest available capacity. -/// -/// This type does not in anyway inspect the memory that it manages. When dropped it *will* -/// free its memory, but it *won't* try to drop its contents. It is up to the user of `RawVec` -/// to handle the actual things *stored* inside of a `RawVec`. -/// -/// Note that the excess of a zero-sized types is always infinite, so `capacity()` always returns -/// `usize::MAX`. This means that you need to be careful when round-tripping this type with a -/// `Box<[T]>`, since `capacity()` won't yield the length. -#[allow(missing_debug_implementations)] -pub(crate) struct RawVec { - ptr: Unique, - cap: usize, - alloc: A, -} - -impl RawVec { - /// HACK(Centril): This exists because stable `const fn` can only call stable `const fn`, so - /// they cannot call `Self::new()`. - /// - /// If you change `RawVec::new` or dependencies, please take care to not introduce anything - /// that would truly const-call something unstable. - pub const NEW: Self = Self::new(); - - /// Creates the biggest possible `RawVec` (on the system heap) - /// without allocating. If `T` has positive size, then this makes a - /// `RawVec` with capacity `0`. If `T` is zero-sized, then it makes a - /// `RawVec` with capacity `usize::MAX`. Useful for implementing - /// delayed allocation. - #[must_use] - pub const fn new() -> Self { - Self::new_in(Global) - } - - /// Creates a `RawVec` (on the system heap) with exactly the - /// capacity and alignment requirements for a `[T; capacity]`. This is - /// equivalent to calling `RawVec::new` when `capacity` is `0` or `T` is - /// zero-sized. Note that if `T` is zero-sized this means you will - /// *not* get a `RawVec` with the requested capacity. - /// - /// # Panics - /// - /// Panics if the requested capacity exceeds `isize::MAX` bytes. - /// - /// # Aborts - /// - /// Aborts on OOM. - #[cfg(not(any(no_global_oom_handling, test)))] - #[must_use] - #[inline] - pub fn with_capacity(capacity: usize) -> Self { - Self::with_capacity_in(capacity, Global) - } - - /// Like `with_capacity`, but guarantees the buffer is zeroed. - #[cfg(not(any(no_global_oom_handling, test)))] - #[must_use] - #[inline] - pub fn with_capacity_zeroed(capacity: usize) -> Self { - Self::with_capacity_zeroed_in(capacity, Global) - } -} - -impl RawVec { - // Tiny Vecs are dumb. Skip to: - // - 8 if the element size is 1, because any heap allocators is likely - // to round up a request of less than 8 bytes to at least 8 bytes. - // - 4 if elements are moderate-sized (<= 1 KiB). - // - 1 otherwise, to avoid wasting too much space for very short Vecs. - pub(crate) const MIN_NON_ZERO_CAP: usize = if mem::size_of::() == 1 { - 8 - } else if mem::size_of::() <= 1024 { - 4 - } else { - 1 - }; - - /// Like `new`, but parameterized over the choice of allocator for - /// the returned `RawVec`. - #[rustc_allow_const_fn_unstable(const_fn)] - pub const fn new_in(alloc: A) -> Self { - // `cap: 0` means "unallocated". zero-sized types are ignored. - Self { ptr: Unique::dangling(), cap: 0, alloc } - } - - /// Like `with_capacity`, but parameterized over the choice of - /// allocator for the returned `RawVec`. - #[cfg(not(no_global_oom_handling))] - #[inline] - pub fn with_capacity_in(capacity: usize, alloc: A) -> Self { - Self::allocate_in(capacity, AllocInit::Uninitialized, alloc) - } - - /// Like `with_capacity_zeroed`, but parameterized over the choice - /// of allocator for the returned `RawVec`. - #[cfg(not(no_global_oom_handling))] - #[inline] - pub fn with_capacity_zeroed_in(capacity: usize, alloc: A) -> Self { - Self::allocate_in(capacity, AllocInit::Zeroed, alloc) - } - - /// Converts the entire buffer into `Box<[MaybeUninit]>` with the specified `len`. - /// - /// Note that this will correctly reconstitute any `cap` changes - /// that may have been performed. (See description of type for details.) - /// - /// # Safety - /// - /// * `len` must be greater than or equal to the most recently requested capacity, and - /// * `len` must be less than or equal to `self.capacity()`. - /// - /// Note, that the requested capacity and `self.capacity()` could differ, as - /// an allocator could overallocate and return a greater memory block than requested. - pub unsafe fn into_box(self, len: usize) -> Box<[MaybeUninit], A> { - // Sanity-check one half of the safety requirement (we cannot check the other half). - debug_assert!( - len <= self.capacity(), - "`len` must be smaller than or equal to `self.capacity()`" - ); - - let me = ManuallyDrop::new(self); - unsafe { - let slice = slice::from_raw_parts_mut(me.ptr() as *mut MaybeUninit, len); - Box::from_raw_in(slice, ptr::read(&me.alloc)) - } - } - - #[cfg(not(no_global_oom_handling))] - fn allocate_in(capacity: usize, init: AllocInit, alloc: A) -> Self { - // Don't allocate here because `Drop` will not deallocate when `capacity` is 0. - if mem::size_of::() == 0 || capacity == 0 { - Self::new_in(alloc) - } else { - // We avoid `unwrap_or_else` here because it bloats the amount of - // LLVM IR generated. - let layout = match Layout::array::(capacity) { - Ok(layout) => layout, - Err(_) => capacity_overflow(), - }; - match alloc_guard(layout.size()) { - Ok(_) => {} - Err(_) => capacity_overflow(), - } - let result = match init { - AllocInit::Uninitialized => alloc.allocate(layout), - AllocInit::Zeroed => alloc.allocate_zeroed(layout), - }; - let ptr = match result { - Ok(ptr) => ptr, - Err(_) => handle_alloc_error(layout), - }; - - // Allocators currently return a `NonNull<[u8]>` whose length - // matches the size requested. If that ever changes, the capacity - // here should change to `ptr.len() / mem::size_of::()`. - Self { - ptr: unsafe { Unique::new_unchecked(ptr.cast().as_ptr()) }, - cap: capacity, - alloc, - } - } - } - - /// Reconstitutes a `RawVec` from a pointer, capacity, and allocator. - /// - /// # Safety - /// - /// The `ptr` must be allocated (via the given allocator `alloc`), and with the given - /// `capacity`. - /// The `capacity` cannot exceed `isize::MAX` for sized types. (only a concern on 32-bit - /// systems). ZST vectors may have a capacity up to `usize::MAX`. - /// If the `ptr` and `capacity` come from a `RawVec` created via `alloc`, then this is - /// guaranteed. - #[inline] - pub unsafe fn from_raw_parts_in(ptr: *mut T, capacity: usize, alloc: A) -> Self { - Self { ptr: unsafe { Unique::new_unchecked(ptr) }, cap: capacity, alloc } - } - - /// Gets a raw pointer to the start of the allocation. Note that this is - /// `Unique::dangling()` if `capacity == 0` or `T` is zero-sized. In the former case, you must - /// be careful. - #[inline] - pub fn ptr(&self) -> *mut T { - self.ptr.as_ptr() - } - - /// Gets the capacity of the allocation. - /// - /// This will always be `usize::MAX` if `T` is zero-sized. - #[inline(always)] - pub fn capacity(&self) -> usize { - if mem::size_of::() == 0 { usize::MAX } else { self.cap } - } - - /// Returns a shared reference to the allocator backing this `RawVec`. - pub fn allocator(&self) -> &A { - &self.alloc - } - - fn current_memory(&self) -> Option<(NonNull, Layout)> { - if mem::size_of::() == 0 || self.cap == 0 { - None - } else { - // We have an allocated chunk of memory, so we can bypass runtime - // checks to get our current layout. - unsafe { - let layout = Layout::array::(self.cap).unwrap_unchecked(); - Some((self.ptr.cast().into(), layout)) - } - } - } - - /// Ensures that the buffer contains at least enough space to hold `len + - /// additional` elements. If it doesn't already have enough capacity, will - /// reallocate enough space plus comfortable slack space to get amortized - /// *O*(1) behavior. Will limit this behavior if it would needlessly cause - /// itself to panic. - /// - /// If `len` exceeds `self.capacity()`, this may fail to actually allocate - /// the requested space. This is not really unsafe, but the unsafe - /// code *you* write that relies on the behavior of this function may break. - /// - /// This is ideal for implementing a bulk-push operation like `extend`. - /// - /// # Panics - /// - /// Panics if the new capacity exceeds `isize::MAX` bytes. - /// - /// # Aborts - /// - /// Aborts on OOM. - #[cfg(not(no_global_oom_handling))] - #[inline] - pub fn reserve(&mut self, len: usize, additional: usize) { - // Callers expect this function to be very cheap when there is already sufficient capacity. - // Therefore, we move all the resizing and error-handling logic from grow_amortized and - // handle_reserve behind a call, while making sure that this function is likely to be - // inlined as just a comparison and a call if the comparison fails. - #[cold] - fn do_reserve_and_handle( - slf: &mut RawVec, - len: usize, - additional: usize, - ) { - handle_reserve(slf.grow_amortized(len, additional)); - } - - if self.needs_to_grow(len, additional) { - do_reserve_and_handle(self, len, additional); - } - } - - /// A specialized version of `reserve()` used only by the hot and - /// oft-instantiated `Vec::push()`, which does its own capacity check. - #[cfg(not(no_global_oom_handling))] - #[inline(never)] - pub fn reserve_for_push(&mut self, len: usize) { - handle_reserve(self.grow_amortized(len, 1)); - } - - /// The same as `reserve`, but returns on errors instead of panicking or aborting. - pub fn try_reserve(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> { - if self.needs_to_grow(len, additional) { - self.grow_amortized(len, additional) - } else { - Ok(()) - } - } - - /// Ensures that the buffer contains at least enough space to hold `len + - /// additional` elements. If it doesn't already, will reallocate the - /// minimum possible amount of memory necessary. Generally this will be - /// exactly the amount of memory necessary, but in principle the allocator - /// is free to give back more than we asked for. - /// - /// If `len` exceeds `self.capacity()`, this may fail to actually allocate - /// the requested space. This is not really unsafe, but the unsafe code - /// *you* write that relies on the behavior of this function may break. - /// - /// # Panics - /// - /// Panics if the new capacity exceeds `isize::MAX` bytes. - /// - /// # Aborts - /// - /// Aborts on OOM. - #[cfg(not(no_global_oom_handling))] - pub fn reserve_exact(&mut self, len: usize, additional: usize) { - handle_reserve(self.try_reserve_exact(len, additional)); - } - - /// The same as `reserve_exact`, but returns on errors instead of panicking or aborting. - pub fn try_reserve_exact( - &mut self, - len: usize, - additional: usize, - ) -> Result<(), TryReserveError> { - if self.needs_to_grow(len, additional) { self.grow_exact(len, additional) } else { Ok(()) } - } - - /// Shrinks the buffer down to the specified capacity. If the given amount - /// is 0, actually completely deallocates. - /// - /// # Panics - /// - /// Panics if the given amount is *larger* than the current capacity. - /// - /// # Aborts - /// - /// Aborts on OOM. - #[cfg(not(no_global_oom_handling))] - pub fn shrink_to_fit(&mut self, cap: usize) { - handle_reserve(self.shrink(cap)); - } -} - -impl RawVec { - /// Returns if the buffer needs to grow to fulfill the needed extra capacity. - /// Mainly used to make inlining reserve-calls possible without inlining `grow`. - fn needs_to_grow(&self, len: usize, additional: usize) -> bool { - additional > self.capacity().wrapping_sub(len) - } - - fn set_ptr_and_cap(&mut self, ptr: NonNull<[u8]>, cap: usize) { - // Allocators currently return a `NonNull<[u8]>` whose length matches - // the size requested. If that ever changes, the capacity here should - // change to `ptr.len() / mem::size_of::()`. - self.ptr = unsafe { Unique::new_unchecked(ptr.cast().as_ptr()) }; - self.cap = cap; - } - - // This method is usually instantiated many times. So we want it to be as - // small as possible, to improve compile times. But we also want as much of - // its contents to be statically computable as possible, to make the - // generated code run faster. Therefore, this method is carefully written - // so that all of the code that depends on `T` is within it, while as much - // of the code that doesn't depend on `T` as possible is in functions that - // are non-generic over `T`. - fn grow_amortized(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> { - // This is ensured by the calling contexts. - debug_assert!(additional > 0); - - if mem::size_of::() == 0 { - // Since we return a capacity of `usize::MAX` when `elem_size` is - // 0, getting to here necessarily means the `RawVec` is overfull. - return Err(CapacityOverflow.into()); - } - - // Nothing we can really do about these checks, sadly. - let required_cap = len.checked_add(additional).ok_or(CapacityOverflow)?; - - // This guarantees exponential growth. The doubling cannot overflow - // because `cap <= isize::MAX` and the type of `cap` is `usize`. - let cap = cmp::max(self.cap * 2, required_cap); - let cap = cmp::max(Self::MIN_NON_ZERO_CAP, cap); - - let new_layout = Layout::array::(cap); - - // `finish_grow` is non-generic over `T`. - let ptr = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?; - self.set_ptr_and_cap(ptr, cap); - Ok(()) - } - - // The constraints on this method are much the same as those on - // `grow_amortized`, but this method is usually instantiated less often so - // it's less critical. - fn grow_exact(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> { - if mem::size_of::() == 0 { - // Since we return a capacity of `usize::MAX` when the type size is - // 0, getting to here necessarily means the `RawVec` is overfull. - return Err(CapacityOverflow.into()); - } - - let cap = len.checked_add(additional).ok_or(CapacityOverflow)?; - let new_layout = Layout::array::(cap); - - // `finish_grow` is non-generic over `T`. - let ptr = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?; - self.set_ptr_and_cap(ptr, cap); - Ok(()) - } - - fn shrink(&mut self, cap: usize) -> Result<(), TryReserveError> { - assert!(cap <= self.capacity(), "Tried to shrink to a larger capacity"); - - let (ptr, layout) = if let Some(mem) = self.current_memory() { mem } else { return Ok(()) }; - - let ptr = unsafe { - // `Layout::array` cannot overflow here because it would have - // overflowed earlier when capacity was larger. - let new_layout = Layout::array::(cap).unwrap_unchecked(); - self.alloc - .shrink(ptr, layout, new_layout) - .map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })? - }; - self.set_ptr_and_cap(ptr, cap); - Ok(()) - } -} - -// This function is outside `RawVec` to minimize compile times. See the comment -// above `RawVec::grow_amortized` for details. (The `A` parameter isn't -// significant, because the number of different `A` types seen in practice is -// much smaller than the number of `T` types.) -#[inline(never)] -fn finish_grow( - new_layout: Result, - current_memory: Option<(NonNull, Layout)>, - alloc: &mut A, -) -> Result, TryReserveError> -where - A: Allocator, -{ - // Check for the error here to minimize the size of `RawVec::grow_*`. - let new_layout = new_layout.map_err(|_| CapacityOverflow)?; - - alloc_guard(new_layout.size())?; - - let memory = if let Some((ptr, old_layout)) = current_memory { - debug_assert_eq!(old_layout.align(), new_layout.align()); - unsafe { - // The allocator checks for alignment equality - intrinsics::assume(old_layout.align() == new_layout.align()); - alloc.grow(ptr, old_layout, new_layout) - } - } else { - alloc.allocate(new_layout) - }; - - memory.map_err(|_| AllocError { layout: new_layout, non_exhaustive: () }.into()) -} - -unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawVec { - /// Frees the memory owned by the `RawVec` *without* trying to drop its contents. - fn drop(&mut self) { - if let Some((ptr, layout)) = self.current_memory() { - unsafe { self.alloc.deallocate(ptr, layout) } - } - } -} - -// Central function for reserve error handling. -#[cfg(not(no_global_oom_handling))] -#[inline] -fn handle_reserve(result: Result<(), TryReserveError>) { - match result.map_err(|e| e.kind()) { - Err(CapacityOverflow) => capacity_overflow(), - Err(AllocError { layout, .. }) => handle_alloc_error(layout), - Ok(()) => { /* yay */ } - } -} - -// We need to guarantee the following: -// * We don't ever allocate `> isize::MAX` byte-size objects. -// * We don't overflow `usize::MAX` and actually allocate too little. -// -// On 64-bit we just need to check for overflow since trying to allocate -// `> isize::MAX` bytes will surely fail. On 32-bit and 16-bit we need to add -// an extra guard for this in case we're running on a platform which can use -// all 4GB in user-space, e.g., PAE or x32. - -#[inline] -fn alloc_guard(alloc_size: usize) -> Result<(), TryReserveError> { - if usize::BITS < 64 && alloc_size > isize::MAX as usize { - Err(CapacityOverflow.into()) - } else { - Ok(()) - } -} - -// One central function responsible for reporting capacity overflows. This'll -// ensure that the code generation related to these panics is minimal as there's -// only one location which panics rather than a bunch throughout the module. -#[cfg(not(no_global_oom_handling))] -fn capacity_overflow() -> ! { - panic!("capacity overflow"); -} diff --git a/library/alloc/src/vec/into_iter.rs b/library/alloc/src/vec/into_iter.rs index 8134eea570ad7..9ab805d4ade92 100644 --- a/library/alloc/src/vec/into_iter.rs +++ b/library/alloc/src/vec/into_iter.rs @@ -1,7 +1,7 @@ #[cfg(not(no_global_oom_handling))] use super::AsVecIntoIter; use crate::alloc::{Allocator, Global}; -use crate::raw_vec::RawVec; +use crate::boxed::Box; use core::fmt; use core::intrinsics::arith_offset; use core::iter::{ @@ -113,7 +113,7 @@ impl IntoIter { // struct and then overwriting &mut self. // this creates less assembly self.cap = 0; - self.buf = unsafe { NonNull::new_unchecked(RawVec::NEW.ptr()) }; + self.buf = NonNull::dangling(); self.ptr = self.buf.as_ptr(); self.end = self.buf.as_ptr(); @@ -322,7 +322,7 @@ unsafe impl<#[may_dangle] T, A: Allocator> Drop for IntoIter { // `IntoIter::alloc` is not used anymore after this and will be dropped by RawVec let alloc = ManuallyDrop::take(&mut self.0.alloc); // RawVec handles deallocation - let _ = RawVec::from_raw_parts_in(self.0.buf.as_ptr(), self.0.cap, alloc); + let _ = Box::from_raw_slice_parts_in(self.0.buf.as_ptr(), self.0.cap, alloc); } } } diff --git a/library/alloc/src/vec/mod.rs b/library/alloc/src/vec/mod.rs index cbb5b0627b77d..8b50fc0725ed2 100644 --- a/library/alloc/src/vec/mod.rs +++ b/library/alloc/src/vec/mod.rs @@ -73,7 +73,6 @@ use crate::alloc::{Allocator, Global}; use crate::borrow::{Cow, ToOwned}; use crate::boxed::Box; use crate::collections::TryReserveError; -use crate::raw_vec::RawVec; #[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")] pub use self::drain_filter::DrainFilter; @@ -398,7 +397,7 @@ mod spec_extend; #[cfg_attr(not(test), rustc_diagnostic_item = "Vec")] #[rustc_insignificant_dtor] pub struct Vec { - buf: RawVec, + buf: Box<[MaybeUninit], A>, len: usize, } @@ -422,7 +421,7 @@ impl Vec { #[stable(feature = "rust1", since = "1.0.0")] #[must_use] pub const fn new() -> Self { - Vec { buf: RawVec::NEW, len: 0 } + Vec { buf: Box::<[MaybeUninit]>::empty(), len: 0 } } /// Constructs a new, empty `Vec` with the specified capacity. @@ -561,7 +560,7 @@ impl Vec { #[inline] #[unstable(feature = "allocator_api", issue = "32838")] pub const fn new_in(alloc: A) -> Self { - Vec { buf: RawVec::new_in(alloc), len: 0 } + Vec { buf: Box::empty_in(alloc), len: 0 } } /// Constructs a new, empty `Vec` with the specified capacity with the provided @@ -610,7 +609,7 @@ impl Vec { #[inline] #[unstable(feature = "allocator_api", issue = "32838")] pub fn with_capacity_in(capacity: usize, alloc: A) -> Self { - Vec { buf: RawVec::with_capacity_in(capacity, alloc), len: 0 } + Vec { buf: Box::new_uninit_slice_in(capacity, alloc), len: 0 } } /// Creates a `Vec` directly from the raw components of another vector. @@ -686,7 +685,9 @@ impl Vec { #[inline] #[unstable(feature = "allocator_api", issue = "32838")] pub unsafe fn from_raw_parts_in(ptr: *mut T, length: usize, capacity: usize, alloc: A) -> Self { - unsafe { Vec { buf: RawVec::from_raw_parts_in(ptr, capacity, alloc), len: length } } + unsafe { + Vec { buf: Box::from_raw_slice_parts_in(ptr.cast(), capacity, alloc), len: length } + } } /// Decomposes a `Vec` into its raw components. @@ -787,7 +788,7 @@ impl Vec { #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn capacity(&self) -> usize { - self.buf.capacity() + self.buf.len() } /// Reserves capacity for at least `additional` more elements to be inserted @@ -1001,8 +1002,7 @@ impl Vec { self.shrink_to_fit(); let me = ManuallyDrop::new(self); let buf = ptr::read(&me.buf); - let len = me.len(); - buf.into_box(len).assume_init() + buf.assume_init() } } @@ -1135,7 +1135,7 @@ impl Vec { pub fn as_ptr(&self) -> *const T { // We shadow the slice method of the same name to avoid going through // `deref`, which creates an intermediate reference. - let ptr = self.buf.ptr(); + let ptr = self.buf.as_ptr().cast::(); unsafe { assume(!ptr.is_null()); } @@ -1171,7 +1171,7 @@ impl Vec { pub fn as_mut_ptr(&mut self) -> *mut T { // We shadow the slice method of the same name to avoid going through // `deref_mut`, which creates an intermediate reference. - let ptr = self.buf.ptr(); + let ptr = self.buf.as_mut_ptr().cast::(); unsafe { assume(!ptr.is_null()); } @@ -1182,7 +1182,7 @@ impl Vec { #[unstable(feature = "allocator_api", issue = "32838")] #[inline] pub fn allocator(&self) -> &A { - self.buf.allocator() + Box::allocator(&self.buf) } /// Forces the length of the vector to `new_len`. @@ -1351,7 +1351,7 @@ impl Vec { } // space for the new element - if len == self.buf.capacity() { + if len == self.capacity() { self.reserve(1); } @@ -1727,7 +1727,7 @@ impl Vec { pub fn push(&mut self, value: T) { // This will panic or abort if we would allocate > isize::MAX bytes // or if the length increment would overflow for zero-sized types. - if self.len == self.buf.capacity() { + if self.len == self.capacity() { self.buf.reserve_for_push(self.len); } unsafe { @@ -2094,7 +2094,7 @@ impl Vec { unsafe { slice::from_raw_parts_mut( self.as_mut_ptr().add(self.len) as *mut MaybeUninit, - self.buf.capacity() - self.len, + self.capacity() - self.len, ) } } @@ -2168,11 +2168,11 @@ impl Vec { let ptr = self.as_mut_ptr(); // SAFETY: // - `ptr` is guaranteed to be valid for `self.len` elements - // - but the allocation extends out to `self.buf.capacity()` elements, possibly + // - but the allocation extends out to `self.capacity()` elements, possibly // uninitialized let spare_ptr = unsafe { ptr.add(self.len) }; let spare_ptr = spare_ptr.cast::>(); - let spare_len = self.buf.capacity() - self.len; + let spare_len = self.capacity() - self.len; // SAFETY: // - `ptr` is guaranteed to be valid for `self.len` elements @@ -2640,7 +2640,7 @@ impl IntoIterator for Vec { } else { begin.add(me.len()) as *const T }; - let cap = me.buf.capacity(); + let cap = me.capacity(); IntoIter { buf: NonNull::new_unchecked(begin), phantom: PhantomData, diff --git a/library/alloc/src/vec/spec_from_elem.rs b/library/alloc/src/vec/spec_from_elem.rs index de610174783c4..8f0dfec758434 100644 --- a/library/alloc/src/vec/spec_from_elem.rs +++ b/library/alloc/src/vec/spec_from_elem.rs @@ -1,5 +1,5 @@ use crate::alloc::Allocator; -use crate::raw_vec::RawVec; +use crate::boxed::Box; use core::ptr::{self}; use super::{ExtendElement, IsZero, Vec}; @@ -21,7 +21,7 @@ impl SpecFromElem for i8 { #[inline] fn from_elem(elem: i8, n: usize, alloc: A) -> Vec { if elem == 0 { - return Vec { buf: RawVec::with_capacity_zeroed_in(n, alloc), len: n }; + return Vec { buf: Box::new_zeroed_slice_in(n, alloc), len: n }; } unsafe { let mut v = Vec::with_capacity_in(n, alloc); @@ -36,7 +36,7 @@ impl SpecFromElem for u8 { #[inline] fn from_elem(elem: u8, n: usize, alloc: A) -> Vec { if elem == 0 { - return Vec { buf: RawVec::with_capacity_zeroed_in(n, alloc), len: n }; + return Vec { buf: Box::new_zeroed_slice_in(n, alloc), len: n }; } unsafe { let mut v = Vec::with_capacity_in(n, alloc); @@ -51,7 +51,7 @@ impl SpecFromElem for T { #[inline] fn from_elem(elem: T, n: usize, alloc: A) -> Vec { if elem.is_zero() { - return Vec { buf: RawVec::with_capacity_zeroed_in(n, alloc), len: n }; + return Vec { buf: Box::new_zeroed_slice_in(n, alloc), len: n }; } let mut v = Vec::with_capacity_in(n, alloc); v.extend_with(n, ExtendElement(elem)); diff --git a/library/alloc/src/vec/spec_from_iter_nested.rs b/library/alloc/src/vec/spec_from_iter_nested.rs index f915ebb86e5a5..37fd0721dfff6 100644 --- a/library/alloc/src/vec/spec_from_iter_nested.rs +++ b/library/alloc/src/vec/spec_from_iter_nested.rs @@ -2,7 +2,8 @@ use core::cmp; use core::iter::TrustedLen; use core::ptr; -use crate::raw_vec::RawVec; +// use crate::raw_vec::RawVec; +use crate::boxed::Box; use super::{SpecExtend, Vec}; @@ -28,7 +29,7 @@ where Some(element) => { let (lower, _) = iterator.size_hint(); let initial_capacity = - cmp::max(RawVec::::MIN_NON_ZERO_CAP, lower.saturating_add(1)); + cmp::max(Box::<[T]>::MIN_NON_ZERO_CAP, lower.saturating_add(1)); let mut vector = Vec::with_capacity(initial_capacity); unsafe { // SAFETY: We requested capacity at least 1 diff --git a/library/core/src/ptr/unique.rs b/library/core/src/ptr/unique.rs index 64616142b4188..82065b4b4b065 100644 --- a/library/core/src/ptr/unique.rs +++ b/library/core/src/ptr/unique.rs @@ -72,6 +72,26 @@ impl Unique { pub const fn dangling() -> Self { Self::from(NonNull::dangling()) } + + /// Creates a new `Unique` that is dangling, but well-aligned. + /// + /// This is useful for initializing types which lazily allocate, like + /// `Vec::new` does. + /// + /// Note that the pointer value may potentially represent a valid pointer to + /// a `T`, which means this must not be used as a "not yet initialized" + /// sentinel value. Types that lazily allocate must track initialization by + /// some other means. + #[must_use] + #[inline] + pub const fn dangling_slice() -> Unique<[T]> { + // SAFETY: mem::align_of() returns a valid, non-null pointer. The + // conditions to call new_unchecked() are thus respected. + unsafe { + let slice = core::slice::from_raw_parts_mut(mem::align_of::() as *mut T, 0); + Unique::new_unchecked(slice) + } + } } #[unstable(feature = "ptr_internals", issue = "none")] From 8be258501fc624533d4309bd37be7a625be2a928 Mon Sep 17 00:00:00 2001 From: Conrad Ludgate Date: Sun, 27 Feb 2022 14:20:32 +0000 Subject: [PATCH 02/14] fix issue --- library/alloc/src/boxed.rs | 35 +++++++++++++++++++++++++---------- library/alloc/src/vec/mod.rs | 2 +- 2 files changed, 26 insertions(+), 11 deletions(-) diff --git a/library/alloc/src/boxed.rs b/library/alloc/src/boxed.rs index d0782dfae3a2e..5f44a6d427023 100644 --- a/library/alloc/src/boxed.rs +++ b/library/alloc/src/boxed.rs @@ -714,6 +714,16 @@ impl Box<[T]> { } } + /// HACK(conradludgate): This exists because stable `const fn` can only call stable `const fn`, so + /// they cannot call `Self::empty()`. + /// + /// If you change `Box<[T]>::empty` or dependencies, please take care to not introduce anything + /// that would truly const-call something unstable. + pub(crate) const EMPTY: Self = Self::empty(); + + /// Constructs a new empty boxed slice + #[unstable(feature = "allocator_api", issue = "32838")] + #[inline] pub const fn empty() -> Self { Self::empty_in(Global) } @@ -728,6 +738,9 @@ enum AllocInit { } impl Box<[T], A> { + /// Constructs a new empty boxed slice + #[unstable(feature = "allocator_api", issue = "32838")] + #[inline] pub const fn empty_in(alloc: A) -> Self { Box(Unique::dangling_slice(), alloc) } @@ -822,9 +835,7 @@ impl Box<[T], A> { Self::allocate_in(len, AllocInit::Zeroed, alloc) } - #[unstable(feature = "allocator_api", issue = "32838")] - #[inline] - pub unsafe fn from_raw_slice_parts_in(ptr: *mut T, len: usize, alloc: A) -> Self { + pub(crate) unsafe fn from_raw_slice_parts_in(ptr: *mut T, len: usize, alloc: A) -> Self { unsafe { let raw = core::slice::from_raw_parts_mut(ptr, len); Self::from_raw_in(raw, alloc) @@ -966,7 +977,7 @@ impl Box<[mem::MaybeUninit], A> { /// Aborts on OOM. #[cfg(not(no_global_oom_handling))] #[inline] - pub fn reserve(&mut self, len: usize, additional: usize) { + pub(crate) fn reserve(&mut self, len: usize, additional: usize) { handle_reserve(self.grow_exact(len, additional)) } @@ -974,7 +985,7 @@ impl Box<[mem::MaybeUninit], A> { /// oft-instantiated `Vec::push()`, which does its own capacity check. #[cfg(not(no_global_oom_handling))] #[inline(never)] - pub fn reserve_for_push(&mut self, len: usize) { + pub(crate) fn reserve_for_push(&mut self, len: usize) { handle_reserve(self.grow_amortized(len, 1)); } @@ -989,12 +1000,16 @@ impl Box<[mem::MaybeUninit], A> { /// /// Aborts on OOM. #[cfg(not(no_global_oom_handling))] - pub fn shrink_to_fit(&mut self, cap: usize) { + pub(crate) fn shrink_to_fit(&mut self, cap: usize) { handle_reserve(self.shrink(cap)); } /// The same as `reserve`, but returns on errors instead of panicking or aborting. - pub fn try_reserve(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> { + pub(crate) fn try_reserve( + &mut self, + len: usize, + additional: usize, + ) -> Result<(), TryReserveError> { self.grow_amortized(len, additional) } @@ -1016,12 +1031,12 @@ impl Box<[mem::MaybeUninit], A> { /// /// Aborts on OOM. #[cfg(not(no_global_oom_handling))] - pub fn reserve_exact(&mut self, len: usize, additional: usize) { + pub(crate) fn reserve_exact(&mut self, len: usize, additional: usize) { handle_reserve(self.try_reserve_exact(len, additional)); } /// The same as `reserve_exact`, but returns on errors instead of panicking or aborting. - pub fn try_reserve_exact( + pub(crate) fn try_reserve_exact( &mut self, len: usize, additional: usize, @@ -2299,7 +2314,7 @@ where #[inline] fn handle_reserve(result: Result<(), TryReserveError>) { match result.map_err(|e| e.kind()) { - Err(CapacityOverflow) => capacity_overflow(), + Err(TryReserveErrorKind::CapacityOverflow) => capacity_overflow(), Err(TryReserveErrorKind::AllocError { layout, .. }) => handle_alloc_error(layout), Ok(()) => { /* yay */ } } diff --git a/library/alloc/src/vec/mod.rs b/library/alloc/src/vec/mod.rs index 8b50fc0725ed2..858da44fffb0e 100644 --- a/library/alloc/src/vec/mod.rs +++ b/library/alloc/src/vec/mod.rs @@ -421,7 +421,7 @@ impl Vec { #[stable(feature = "rust1", since = "1.0.0")] #[must_use] pub const fn new() -> Self { - Vec { buf: Box::<[MaybeUninit]>::empty(), len: 0 } + Vec { buf: Box::<[MaybeUninit]>::EMPTY, len: 0 } } /// Constructs a new, empty `Vec` with the specified capacity. From 7a7e7d74d396c2c1770dd0a53b1241b296dfb342 Mon Sep 17 00:00:00 2001 From: Conrad Ludgate Date: Mon, 28 Feb 2022 08:37:40 +0000 Subject: [PATCH 03/14] refactor --- library/alloc/src/box_storage.rs | 293 ++++++++++++++++++ library/alloc/src/boxed.rs | 286 +---------------- .../alloc/src/collections/vec_deque/mod.rs | 4 +- library/alloc/src/lib.rs | 1 + library/alloc/src/vec/into_iter.rs | 7 +- library/alloc/src/vec/mod.rs | 6 +- .../alloc/src/vec/spec_from_iter_nested.rs | 8 +- library/alloc/src/vec/splice.rs | 1 + 8 files changed, 323 insertions(+), 283 deletions(-) create mode 100644 library/alloc/src/box_storage.rs diff --git a/library/alloc/src/box_storage.rs b/library/alloc/src/box_storage.rs new file mode 100644 index 0000000000000..f1956e5f97903 --- /dev/null +++ b/library/alloc/src/box_storage.rs @@ -0,0 +1,293 @@ +use crate::alloc::{handle_alloc_error, Allocator, Layout, LayoutError}; +use crate::boxed::Box; +use crate::collections::{TryReserveError, TryReserveErrorKind}; +use core::mem::{self}; + +pub(crate) unsafe fn from_raw_slice_parts_in( + ptr: *mut T, + len: usize, + alloc: A, +) -> Box<[T], A> { + unsafe { + let raw = core::slice::from_raw_parts_mut(ptr, len); + Box::from_raw_in(raw, alloc) + } +} + +pub(crate) fn box_into_raw_slice_parts( + mut this: Box<[T], A>, +) -> (*mut T, usize, A) { + let len = this.len(); + let ptr = this.as_mut_ptr(); + let (_, alloc) = Box::into_raw_with_allocator(this); + (ptr, len, alloc) +} + +pub(crate) trait BoxStorage { + const MIN_NON_ZERO_CAP: usize; + + /// Ensures that the buffer contains at least enough space to hold `len + + /// additional` elements. If it doesn't already have enough capacity, will + /// reallocate enough space plus comfortable slack space to get amortized + /// *O*(1) behavior. Will limit this behavior if it would needlessly cause + /// itself to panic. + /// + /// If `len` exceeds `self.capacity()`, this may fail to actually allocate + /// the requested space. This is not really unsafe, but the unsafe + /// code *you* write that relies on the behavior of this function may break. + /// + /// This is ideal for implementing a bulk-push operation like `extend`. + /// + /// # Panics + /// + /// Panics if the new capacity exceeds `isize::MAX` bytes. + /// + /// # Aborts + /// + /// Aborts on OOM. + #[cfg(not(no_global_oom_handling))] + #[inline] + fn reserve(&mut self, len: usize, additional: usize) { + handle_reserve(self.grow_exact(len, additional)) + } + + /// A specialized version of `reserve()` used only by the hot and + /// oft-instantiated `Vec::push()`, which does its own capacity check. + #[cfg(not(no_global_oom_handling))] + #[inline(never)] + fn reserve_for_push(&mut self, len: usize) { + handle_reserve(self.grow_amortized(len, 1)); + } + + /// Shrinks the buffer down to the specified capacity. If the given amount + /// is 0, actually completely deallocates. + /// + /// # Panics + /// + /// Panics if the given amount is *larger* than the current capacity. + /// + /// # Aborts + /// + /// Aborts on OOM. + #[cfg(not(no_global_oom_handling))] + fn shrink_to_fit(&mut self, cap: usize) { + handle_reserve(self.shrink(cap)); + } + + /// The same as `reserve`, but returns on errors instead of panicking or aborting. + fn try_reserve(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> { + self.grow_amortized(len, additional) + } + + /// Ensures that the buffer contains at least enough space to hold `len + + /// additional` elements. If it doesn't already, will reallocate the + /// minimum possible amount of memory necessary. Generally this will be + /// exactly the amount of memory necessary, but in principle the allocator + /// is free to give back more than we asked for. + /// + /// If `len` exceeds `self.capacity()`, this may fail to actually allocate + /// the requested space. This is not really unsafe, but the unsafe code + /// *you* write that relies on the behavior of this function may break. + /// + /// # Panics + /// + /// Panics if the new capacity exceeds `isize::MAX` bytes. + /// + /// # Aborts + /// + /// Aborts on OOM. + #[cfg(not(no_global_oom_handling))] + fn reserve_exact(&mut self, len: usize, additional: usize) { + handle_reserve(self.try_reserve_exact(len, additional)); + } + + /// The same as `reserve_exact`, but returns on errors instead of panicking or aborting. + fn try_reserve_exact(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> { + self.grow_exact(len, additional) + } + + fn grow_amortized(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError>; + fn grow_exact(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError>; + fn shrink(&mut self, cap: usize) -> Result<(), TryReserveError>; +} + +impl BoxStorage for Box<[mem::MaybeUninit], A> { + const MIN_NON_ZERO_CAP: usize = if mem::size_of::() == 1 { + 8 + } else if mem::size_of::() <= 1024 { + 4 + } else { + 1 + }; + + // This method is usually instantiated many times. So we want it to be as + // small as possible, to improve compile times. But we also want as much of + // its contents to be statically computable as possible, to make the + // generated code run faster. Therefore, this method is carefully written + // so that all of the code that depends on `T` is within it, while as much + // of the code that doesn't depend on `T` as possible is in functions that + // are non-generic over `T`. + fn grow_amortized(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> { + // This is ensured by the calling contexts. + debug_assert!(additional > 0); + + if mem::size_of::() == 0 { + // Since we return a capacity of `usize::MAX` when `elem_size` is + // 0, getting to here necessarily means the `RawVec` is overfull. + return Err(TryReserveErrorKind::CapacityOverflow.into()); + } + + // Nothing we can really do about these checks, sadly. + let required_cap = + len.checked_add(additional).ok_or(TryReserveErrorKind::CapacityOverflow)?; + + // This guarantees exponential growth. The doubling cannot overflow + // because `cap <= isize::MAX` and the type of `cap` is `usize`. + let cap = core::cmp::max(self.len() * 2, required_cap); + let cap = core::cmp::max(Self::MIN_NON_ZERO_CAP, cap); + + let new_layout = Layout::array::(cap); + + replace(self, |ptr, len, alloc| { + Ok((finish_grow(new_layout, slice_layout(ptr, len), alloc)?.as_mut_ptr().cast(), cap)) + }) + } + + // The constraints on this method are much the same as those on + // `grow_amortized`, but this method is usually instantiated less often so + // it's less critical. + fn grow_exact(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> { + if mem::size_of::() == 0 { + // Since we return a capacity of `usize::MAX` when `elem_size` is + // 0, getting to here necessarily means the `RawVec` is overfull. + return Err(TryReserveErrorKind::CapacityOverflow.into()); + } + + let cap = len.checked_add(additional).ok_or(TryReserveErrorKind::CapacityOverflow)?; + let new_layout = Layout::array::(cap); + + replace(self, |ptr, len, alloc| { + Ok((finish_grow(new_layout, slice_layout(ptr, len), alloc)?.as_mut_ptr().cast(), cap)) + }) + } + + fn shrink(&mut self, cap: usize) -> Result<(), TryReserveError> { + replace(self, |ptr, len, alloc| { + let (ptr, layout) = + if let Some(mem) = slice_layout(ptr, len) { mem } else { return Ok((ptr, len)) }; + + let ptr = unsafe { + // `Layout::array` cannot overflow here because it would have + // overflowed earlier when capacity was larger. + let new_layout = Layout::array::(cap).unwrap_unchecked(); + alloc.shrink(ptr, layout, new_layout).map_err(|_| { + TryReserveErrorKind::AllocError { layout: new_layout, non_exhaustive: () } + })? + }; + Ok((ptr.as_mut_ptr().cast(), cap)) + }) + } +} + +fn replace( + dst: &mut Box<[mem::MaybeUninit], A>, + f: impl FnOnce(*mut T, usize, &mut A) -> Result<(*mut T, usize), TryReserveError>, +) -> Result<(), TryReserveError> { + unsafe { + let this = core::ptr::read(dst); + let (ptr, len, mut alloc) = box_into_raw_slice_parts(this); + match f(ptr.cast(), len, &mut alloc) { + Ok((ptr, len)) => { + Ok(core::ptr::write(dst, from_raw_slice_parts_in(ptr.cast(), len, alloc))) + } + Err(err) => { + core::ptr::write(dst, from_raw_slice_parts_in(ptr, len, alloc)); + Err(err) + } + } + } +} + +fn slice_layout(ptr: *mut T, len: usize) -> Option<(core::ptr::NonNull, Layout)> { + if mem::size_of::() == 0 || len == 0 { + None + } else { + // We have an allocated chunk of memory, so we can bypass runtime + // checks to get our current layout. + unsafe { + let layout = Layout::array::(len).unwrap_unchecked(); + Some((core::ptr::NonNull::new_unchecked(ptr.cast()), layout)) + } + } +} + +// This function is outside `Box` to minimize compile times. See the comment +// above `RawVec::grow_amortized` for details. (The `A` parameter isn't +// significant, because the number of different `A` types seen in practice is +// much smaller than the number of `T` types.) +#[inline(never)] +fn finish_grow( + new_layout: Result, + current_memory: Option<(core::ptr::NonNull, Layout)>, + alloc: &mut A, +) -> Result, TryReserveError> +where + A: Allocator, +{ + // Check for the error here to minimize the size of `RawVec::grow_*`. + let new_layout = new_layout.map_err(|_| TryReserveErrorKind::CapacityOverflow)?; + + alloc_guard(new_layout.size())?; + + let memory = if let Some((ptr, old_layout)) = current_memory { + debug_assert_eq!(old_layout.align(), new_layout.align()); + unsafe { + // The allocator checks for alignment equality + core::intrinsics::assume(old_layout.align() == new_layout.align()); + alloc.grow(ptr, old_layout, new_layout) + } + } else { + alloc.allocate(new_layout) + }; + + memory.map_err(|_| { + TryReserveErrorKind::AllocError { layout: new_layout, non_exhaustive: () }.into() + }) +} + +// Central function for reserve error handling. +#[cfg(not(no_global_oom_handling))] +#[inline] +fn handle_reserve(result: Result<(), TryReserveError>) { + match result.map_err(|e| e.kind()) { + Err(TryReserveErrorKind::CapacityOverflow) => capacity_overflow(), + Err(TryReserveErrorKind::AllocError { layout, .. }) => handle_alloc_error(layout), + Ok(()) => { /* yay */ } + } +} + +// We need to guarantee the following: +// * We don't ever allocate `> isize::MAX` byte-size objects. +// * We don't overflow `usize::MAX` and actually allocate too little. +// +// On 64-bit we just need to check for overflow since trying to allocate +// `> isize::MAX` bytes will surely fail. On 32-bit and 16-bit we need to add +// an extra guard for this in case we're running on a platform which can use +// all 4GB in user-space, e.g., PAE or x32. + +#[inline] +pub(crate) fn alloc_guard(alloc_size: usize) -> Result<(), TryReserveError> { + if usize::BITS < 64 && alloc_size > isize::MAX as usize { + Err(TryReserveErrorKind::CapacityOverflow.into()) + } else { + Ok(()) + } +} + +// One central function responsible for reporting capacity overflows. This'll +// ensure that the code generation related to these panics is minimal as there's +// only one location which panics rather than a bunch throughout the module. +#[cfg(not(no_global_oom_handling))] +pub(crate) fn capacity_overflow() -> ! { + panic!("capacity overflow"); +} diff --git a/library/alloc/src/boxed.rs b/library/alloc/src/boxed.rs index 5f44a6d427023..c1ca51fae7210 100644 --- a/library/alloc/src/boxed.rs +++ b/library/alloc/src/boxed.rs @@ -132,7 +132,6 @@ #![stable(feature = "rust1", since = "1.0.0")] -use core::alloc::LayoutError; use core::any::Any; use core::async_iter::AsyncIterator; use core::borrow; @@ -158,9 +157,6 @@ use crate::alloc::{handle_alloc_error, WriteCloneIntoRaw}; use crate::alloc::{AllocError, Allocator, Global, Layout}; #[cfg(not(no_global_oom_handling))] use crate::borrow::Cow; -// use crate::raw_vec::RawVec; -use crate::collections::TryReserveError; -use crate::collections::TryReserveErrorKind; #[cfg(not(no_global_oom_handling))] use crate::str::from_boxed_utf8_unchecked; #[cfg(not(no_global_oom_handling))] @@ -589,14 +585,6 @@ impl Box { } impl Box<[T]> { - pub(crate) const MIN_NON_ZERO_CAP: usize = if mem::size_of::() == 1 { - 8 - } else if mem::size_of::() <= 1024 { - 4 - } else { - 1 - }; - /// Constructs a new boxed slice with uninitialized contents. /// /// # Examples @@ -678,7 +666,7 @@ impl Box<[T]> { Err(_) => return Err(AllocError), }; let ptr = Global.allocate(layout)?; - Ok(Box::from_raw_slice_parts_in(ptr.as_mut_ptr() as *mut _, len, Global)) + Ok(crate::box_storage::from_raw_slice_parts_in(ptr.as_mut_ptr() as *mut _, len, Global)) } } @@ -710,7 +698,7 @@ impl Box<[T]> { Err(_) => return Err(AllocError), }; let ptr = Global.allocate_zeroed(layout)?; - Ok(Box::from_raw_slice_parts_in(ptr.as_mut_ptr() as *mut _, len, Global)) + Ok(crate::box_storage::from_raw_slice_parts_in(ptr.as_mut_ptr() as *mut _, len, Global)) } } @@ -719,7 +707,8 @@ impl Box<[T]> { /// /// If you change `Box<[T]>::empty` or dependencies, please take care to not introduce anything /// that would truly const-call something unstable. - pub(crate) const EMPTY: Self = Self::empty(); + #[unstable(feature = "allocator_api", issue = "32838")] + pub const EMPTY: Self = Self::empty(); /// Constructs a new empty boxed slice #[unstable(feature = "allocator_api", issue = "32838")] @@ -755,11 +744,11 @@ impl Box<[T], A> { // LLVM IR generated. let layout = match Layout::array::(capacity) { Ok(layout) => layout, - Err(_) => capacity_overflow(), + Err(_) => crate::box_storage::capacity_overflow(), }; - match alloc_guard(layout.size()) { + match crate::box_storage::alloc_guard(layout.size()) { Ok(_) => {} - Err(_) => capacity_overflow(), + Err(_) => crate::box_storage::capacity_overflow(), } let result = match init { AllocInit::Uninitialized => alloc.allocate(layout), @@ -773,7 +762,9 @@ impl Box<[T], A> { // Allocators currently return a `NonNull<[u8]>` whose length // matches the size requested. If that ever changes, the capacity // here should change to `ptr.len() / mem::size_of::()`. - unsafe { Box::from_raw_slice_parts_in(ptr.as_ptr().cast(), capacity, alloc) } + unsafe { + crate::box_storage::from_raw_slice_parts_in(ptr.as_ptr().cast(), capacity, alloc) + } } } @@ -834,13 +825,6 @@ impl Box<[T], A> { pub fn new_zeroed_slice_in(len: usize, alloc: A) -> Box<[mem::MaybeUninit], A> { Self::allocate_in(len, AllocInit::Zeroed, alloc) } - - pub(crate) unsafe fn from_raw_slice_parts_in(ptr: *mut T, len: usize, alloc: A) -> Self { - unsafe { - let raw = core::slice::from_raw_parts_mut(ptr, len); - Self::from_raw_in(raw, alloc) - } - } } impl Box, A> { @@ -955,185 +939,6 @@ impl Box<[mem::MaybeUninit], A> { let (raw, alloc) = Box::into_raw_with_allocator(self); unsafe { Box::from_raw_in(raw as *mut [T], alloc) } } - - /// Ensures that the buffer contains at least enough space to hold `len + - /// additional` elements. If it doesn't already have enough capacity, will - /// reallocate enough space plus comfortable slack space to get amortized - /// *O*(1) behavior. Will limit this behavior if it would needlessly cause - /// itself to panic. - /// - /// If `len` exceeds `self.capacity()`, this may fail to actually allocate - /// the requested space. This is not really unsafe, but the unsafe - /// code *you* write that relies on the behavior of this function may break. - /// - /// This is ideal for implementing a bulk-push operation like `extend`. - /// - /// # Panics - /// - /// Panics if the new capacity exceeds `isize::MAX` bytes. - /// - /// # Aborts - /// - /// Aborts on OOM. - #[cfg(not(no_global_oom_handling))] - #[inline] - pub(crate) fn reserve(&mut self, len: usize, additional: usize) { - handle_reserve(self.grow_exact(len, additional)) - } - - /// A specialized version of `reserve()` used only by the hot and - /// oft-instantiated `Vec::push()`, which does its own capacity check. - #[cfg(not(no_global_oom_handling))] - #[inline(never)] - pub(crate) fn reserve_for_push(&mut self, len: usize) { - handle_reserve(self.grow_amortized(len, 1)); - } - - /// Shrinks the buffer down to the specified capacity. If the given amount - /// is 0, actually completely deallocates. - /// - /// # Panics - /// - /// Panics if the given amount is *larger* than the current capacity. - /// - /// # Aborts - /// - /// Aborts on OOM. - #[cfg(not(no_global_oom_handling))] - pub(crate) fn shrink_to_fit(&mut self, cap: usize) { - handle_reserve(self.shrink(cap)); - } - - /// The same as `reserve`, but returns on errors instead of panicking or aborting. - pub(crate) fn try_reserve( - &mut self, - len: usize, - additional: usize, - ) -> Result<(), TryReserveError> { - self.grow_amortized(len, additional) - } - - /// Ensures that the buffer contains at least enough space to hold `len + - /// additional` elements. If it doesn't already, will reallocate the - /// minimum possible amount of memory necessary. Generally this will be - /// exactly the amount of memory necessary, but in principle the allocator - /// is free to give back more than we asked for. - /// - /// If `len` exceeds `self.capacity()`, this may fail to actually allocate - /// the requested space. This is not really unsafe, but the unsafe code - /// *you* write that relies on the behavior of this function may break. - /// - /// # Panics - /// - /// Panics if the new capacity exceeds `isize::MAX` bytes. - /// - /// # Aborts - /// - /// Aborts on OOM. - #[cfg(not(no_global_oom_handling))] - pub(crate) fn reserve_exact(&mut self, len: usize, additional: usize) { - handle_reserve(self.try_reserve_exact(len, additional)); - } - - /// The same as `reserve_exact`, but returns on errors instead of panicking or aborting. - pub(crate) fn try_reserve_exact( - &mut self, - len: usize, - additional: usize, - ) -> Result<(), TryReserveError> { - self.grow_exact(len, additional) - } - - fn set_ptr_and_cap(&mut self, ptr: core::ptr::NonNull<[u8]>, cap: usize) { - // Allocators currently return a `NonNull<[u8]>` whose length matches - // the size requested. If that ever changes, the capacity here should - // change to `ptr.len() / mem::size_of::()`. - self.0 = unsafe { - Unique::new_unchecked(core::slice::from_raw_parts_mut(ptr.cast().as_ptr(), cap)) - } - } - - // This method is usually instantiated many times. So we want it to be as - // small as possible, to improve compile times. But we also want as much of - // its contents to be statically computable as possible, to make the - // generated code run faster. Therefore, this method is carefully written - // so that all of the code that depends on `T` is within it, while as much - // of the code that doesn't depend on `T` as possible is in functions that - // are non-generic over `T`. - fn grow_amortized(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> { - // This is ensured by the calling contexts. - debug_assert!(additional > 0); - - if mem::size_of::() == 0 { - // Since we return a capacity of `usize::MAX` when `elem_size` is - // 0, getting to here necessarily means the `RawVec` is overfull. - return Err(TryReserveErrorKind::CapacityOverflow.into()); - } - - // Nothing we can really do about these checks, sadly. - let required_cap = - len.checked_add(additional).ok_or(TryReserveErrorKind::CapacityOverflow)?; - - // This guarantees exponential growth. The doubling cannot overflow - // because `cap <= isize::MAX` and the type of `cap` is `usize`. - let cap = core::cmp::max(self.len() * 2, required_cap); - let cap = core::cmp::max(Box::<[T]>::MIN_NON_ZERO_CAP, cap); - - let new_layout = Layout::array::(cap); - - // `finish_grow` is non-generic over `T`. - let ptr = finish_grow(new_layout, self.current_memory(), &mut self.1)?; - self.set_ptr_and_cap(ptr, cap); - Ok(()) - } - - // The constraints on this method are much the same as those on - // `grow_amortized`, but this method is usually instantiated less often so - // it's less critical. - fn grow_exact(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> { - if mem::size_of::() == 0 { - // Since we return a capacity of `usize::MAX` when `elem_size` is - // 0, getting to here necessarily means the `RawVec` is overfull. - return Err(TryReserveErrorKind::CapacityOverflow.into()); - } - - let cap = len.checked_add(additional).ok_or(TryReserveErrorKind::CapacityOverflow)?; - let new_layout = Layout::array::(cap); - - // `finish_grow` is non-generic over `T`. - let ptr = finish_grow(new_layout, self.current_memory(), &mut self.1)?; - self.set_ptr_and_cap(ptr, cap); - Ok(()) - } - - fn current_memory(&self) -> Option<(core::ptr::NonNull, Layout)> { - if mem::size_of::() == 0 || self.len() == 0 { - None - } else { - // We have an allocated chunk of memory, so we can bypass runtime - // checks to get our current layout. - unsafe { - let layout = Layout::array::(self.len()).unwrap_unchecked(); - Some((self.0.cast().into(), layout)) - } - } - } - - fn shrink(&mut self, cap: usize) -> Result<(), TryReserveError> { - let (ptr, layout) = if let Some(mem) = self.current_memory() { mem } else { return Ok(()) }; - - let ptr = unsafe { - // `Layout::array` cannot overflow here because it would have - // overflowed earlier when capacity was larger. - let new_layout = Layout::array::(cap).unwrap_unchecked(); - self.1.shrink(ptr, layout, new_layout).map_err(|_| TryReserveErrorKind::AllocError { - layout: new_layout, - non_exhaustive: (), - })? - }; - self.set_ptr_and_cap(ptr, cap); - Ok(()) - } } impl Box { @@ -2274,74 +2079,3 @@ impl AsyncIterator for Box { (**self).size_hint() } } - -// This function is outside `Box` to minimize compile times. See the comment -// above `RawVec::grow_amortized` for details. (The `A` parameter isn't -// significant, because the number of different `A` types seen in practice is -// much smaller than the number of `T` types.) -#[inline(never)] -fn finish_grow( - new_layout: Result, - current_memory: Option<(core::ptr::NonNull, Layout)>, - alloc: &mut A, -) -> Result, TryReserveError> -where - A: Allocator, -{ - // Check for the error here to minimize the size of `RawVec::grow_*`. - let new_layout = new_layout.map_err(|_| TryReserveErrorKind::CapacityOverflow)?; - - alloc_guard(new_layout.size())?; - - let memory = if let Some((ptr, old_layout)) = current_memory { - debug_assert_eq!(old_layout.align(), new_layout.align()); - unsafe { - // The allocator checks for alignment equality - core::intrinsics::assume(old_layout.align() == new_layout.align()); - alloc.grow(ptr, old_layout, new_layout) - } - } else { - alloc.allocate(new_layout) - }; - - memory.map_err(|_| { - TryReserveErrorKind::AllocError { layout: new_layout, non_exhaustive: () }.into() - }) -} - -// Central function for reserve error handling. -#[cfg(not(no_global_oom_handling))] -#[inline] -fn handle_reserve(result: Result<(), TryReserveError>) { - match result.map_err(|e| e.kind()) { - Err(TryReserveErrorKind::CapacityOverflow) => capacity_overflow(), - Err(TryReserveErrorKind::AllocError { layout, .. }) => handle_alloc_error(layout), - Ok(()) => { /* yay */ } - } -} - -// We need to guarantee the following: -// * We don't ever allocate `> isize::MAX` byte-size objects. -// * We don't overflow `usize::MAX` and actually allocate too little. -// -// On 64-bit we just need to check for overflow since trying to allocate -// `> isize::MAX` bytes will surely fail. On 32-bit and 16-bit we need to add -// an extra guard for this in case we're running on a platform which can use -// all 4GB in user-space, e.g., PAE or x32. - -#[inline] -fn alloc_guard(alloc_size: usize) -> Result<(), TryReserveError> { - if usize::BITS < 64 && alloc_size > isize::MAX as usize { - Err(TryReserveErrorKind::CapacityOverflow.into()) - } else { - Ok(()) - } -} - -// One central function responsible for reporting capacity overflows. This'll -// ensure that the code generation related to these panics is minimal as there's -// only one location which panics rather than a bunch throughout the module. -#[cfg(not(no_global_oom_handling))] -fn capacity_overflow() -> ! { - panic!("capacity overflow"); -} diff --git a/library/alloc/src/collections/vec_deque/mod.rs b/library/alloc/src/collections/vec_deque/mod.rs index a60f3b7771c60..350c677370406 100644 --- a/library/alloc/src/collections/vec_deque/mod.rs +++ b/library/alloc/src/collections/vec_deque/mod.rs @@ -18,6 +18,7 @@ use core::ptr::{self, NonNull}; use core::slice; use crate::alloc::{Allocator, Global}; +use crate::box_storage::BoxStorage; use crate::boxed::Box; use crate::collections::TryReserveError; use crate::collections::TryReserveErrorKind; @@ -3051,7 +3052,8 @@ impl From> for VecDeque { unsafe { let (other_buf, len, capacity, alloc) = other.into_raw_parts_with_alloc(); - let buf = Box::from_raw_slice_parts_in(other_buf.cast(), capacity, alloc); + let buf = + crate::box_storage::from_raw_slice_parts_in(other_buf.cast(), capacity, alloc); VecDeque { tail: 0, head: len, buf } } } diff --git a/library/alloc/src/lib.rs b/library/alloc/src/lib.rs index e487b478e888e..71a929a2ab49a 100644 --- a/library/alloc/src/lib.rs +++ b/library/alloc/src/lib.rs @@ -209,6 +209,7 @@ mod boxed { pub use std::boxed::Box; } pub mod borrow; +mod box_storage; pub mod collections; #[cfg(not(no_global_oom_handling))] pub mod ffi; diff --git a/library/alloc/src/vec/into_iter.rs b/library/alloc/src/vec/into_iter.rs index 9ab805d4ade92..2759c217bf3a1 100644 --- a/library/alloc/src/vec/into_iter.rs +++ b/library/alloc/src/vec/into_iter.rs @@ -1,7 +1,6 @@ #[cfg(not(no_global_oom_handling))] use super::AsVecIntoIter; use crate::alloc::{Allocator, Global}; -use crate::boxed::Box; use core::fmt; use core::intrinsics::arith_offset; use core::iter::{ @@ -322,7 +321,11 @@ unsafe impl<#[may_dangle] T, A: Allocator> Drop for IntoIter { // `IntoIter::alloc` is not used anymore after this and will be dropped by RawVec let alloc = ManuallyDrop::take(&mut self.0.alloc); // RawVec handles deallocation - let _ = Box::from_raw_slice_parts_in(self.0.buf.as_ptr(), self.0.cap, alloc); + let _ = crate::box_storage::from_raw_slice_parts_in( + self.0.buf.as_ptr(), + self.0.cap, + alloc, + ); } } } diff --git a/library/alloc/src/vec/mod.rs b/library/alloc/src/vec/mod.rs index 858da44fffb0e..74e830c3b6a0a 100644 --- a/library/alloc/src/vec/mod.rs +++ b/library/alloc/src/vec/mod.rs @@ -71,6 +71,7 @@ use core::slice::{self, SliceIndex}; use crate::alloc::{Allocator, Global}; use crate::borrow::{Cow, ToOwned}; +use crate::box_storage::BoxStorage; use crate::boxed::Box; use crate::collections::TryReserveError; @@ -686,7 +687,10 @@ impl Vec { #[unstable(feature = "allocator_api", issue = "32838")] pub unsafe fn from_raw_parts_in(ptr: *mut T, length: usize, capacity: usize, alloc: A) -> Self { unsafe { - Vec { buf: Box::from_raw_slice_parts_in(ptr.cast(), capacity, alloc), len: length } + Vec { + buf: crate::box_storage::from_raw_slice_parts_in(ptr.cast(), capacity, alloc), + len: length, + } } } diff --git a/library/alloc/src/vec/spec_from_iter_nested.rs b/library/alloc/src/vec/spec_from_iter_nested.rs index 37fd0721dfff6..7a0872ee564ac 100644 --- a/library/alloc/src/vec/spec_from_iter_nested.rs +++ b/library/alloc/src/vec/spec_from_iter_nested.rs @@ -3,7 +3,7 @@ use core::iter::TrustedLen; use core::ptr; // use crate::raw_vec::RawVec; -use crate::boxed::Box; +use crate::{box_storage::BoxStorage, boxed::Box}; use super::{SpecExtend, Vec}; @@ -28,8 +28,10 @@ where None => return Vec::new(), Some(element) => { let (lower, _) = iterator.size_hint(); - let initial_capacity = - cmp::max(Box::<[T]>::MIN_NON_ZERO_CAP, lower.saturating_add(1)); + let initial_capacity = cmp::max( + Box::<[core::mem::MaybeUninit]>::MIN_NON_ZERO_CAP, + lower.saturating_add(1), + ); let mut vector = Vec::with_capacity(initial_capacity); unsafe { // SAFETY: We requested capacity at least 1 diff --git a/library/alloc/src/vec/splice.rs b/library/alloc/src/vec/splice.rs index bad765c7f51fa..c6092c865093c 100644 --- a/library/alloc/src/vec/splice.rs +++ b/library/alloc/src/vec/splice.rs @@ -1,4 +1,5 @@ use crate::alloc::{Allocator, Global}; +use crate::box_storage::BoxStorage; use core::ptr::{self}; use core::slice::{self}; From cd588ebbf72196a89d6bbe2c71d3d0d29e809b6b Mon Sep 17 00:00:00 2001 From: Conrad Ludgate Date: Mon, 28 Feb 2022 09:05:24 +0000 Subject: [PATCH 04/14] fix --- library/alloc/src/box_storage.rs | 61 +++++++++++++++++++++----------- 1 file changed, 41 insertions(+), 20 deletions(-) diff --git a/library/alloc/src/box_storage.rs b/library/alloc/src/box_storage.rs index f1956e5f97903..197b9a7cc2213 100644 --- a/library/alloc/src/box_storage.rs +++ b/library/alloc/src/box_storage.rs @@ -1,7 +1,8 @@ use crate::alloc::{handle_alloc_error, Allocator, Layout, LayoutError}; use crate::boxed::Box; -use crate::collections::{TryReserveError, TryReserveErrorKind}; -use core::mem::{self}; +use crate::collections::TryReserveError; +use crate::collections::TryReserveErrorKind::*; +use core::mem; pub(crate) unsafe fn from_raw_slice_parts_in( ptr: *mut T, @@ -48,7 +49,22 @@ pub(crate) trait BoxStorage { #[cfg(not(no_global_oom_handling))] #[inline] fn reserve(&mut self, len: usize, additional: usize) { - handle_reserve(self.grow_exact(len, additional)) + // Callers expect this function to be very cheap when there is already sufficient capacity. + // Therefore, we move all the resizing and error-handling logic from grow_amortized and + // handle_reserve behind a call, while making sure that this function is likely to be + // inlined as just a comparison and a call if the comparison fails. + if self.needs_to_grow(len, additional) { + self.do_reserve_and_handle(len, additional); + } + } + + /// Returns if the buffer needs to grow to fulfill the needed extra capacity. + /// Mainly used to make inlining reserve-calls possible without inlining `grow`. + fn needs_to_grow(&self, len: usize, additional: usize) -> bool; + + #[cold] + fn do_reserve_and_handle(&mut self, len: usize, additional: usize) { + handle_reserve(self.grow_amortized(len, additional)); } /// A specialized version of `reserve()` used only by the hot and @@ -120,6 +136,12 @@ impl BoxStorage for Box<[mem::MaybeUninit], A> { 1 }; + /// Returns if the buffer needs to grow to fulfill the needed extra capacity. + /// Mainly used to make inlining reserve-calls possible without inlining `grow`. + fn needs_to_grow(&self, len: usize, additional: usize) -> bool { + additional > self.len().wrapping_sub(len) + } + // This method is usually instantiated many times. So we want it to be as // small as possible, to improve compile times. But we also want as much of // its contents to be statically computable as possible, to make the @@ -134,16 +156,16 @@ impl BoxStorage for Box<[mem::MaybeUninit], A> { if mem::size_of::() == 0 { // Since we return a capacity of `usize::MAX` when `elem_size` is // 0, getting to here necessarily means the `RawVec` is overfull. - return Err(TryReserveErrorKind::CapacityOverflow.into()); + return Err(CapacityOverflow.into()); } // Nothing we can really do about these checks, sadly. - let required_cap = - len.checked_add(additional).ok_or(TryReserveErrorKind::CapacityOverflow)?; + let required_cap = len.checked_add(additional).ok_or(CapacityOverflow)?; // This guarantees exponential growth. The doubling cannot overflow // because `cap <= isize::MAX` and the type of `cap` is `usize`. - let cap = core::cmp::max(self.len() * 2, required_cap); + let cap = self.len(); + let cap = core::cmp::max(cap * 2, required_cap); let cap = core::cmp::max(Self::MIN_NON_ZERO_CAP, cap); let new_layout = Layout::array::(cap); @@ -158,12 +180,12 @@ impl BoxStorage for Box<[mem::MaybeUninit], A> { // it's less critical. fn grow_exact(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> { if mem::size_of::() == 0 { - // Since we return a capacity of `usize::MAX` when `elem_size` is + // Since we return a capacity of `usize::MAX` when the type size is // 0, getting to here necessarily means the `RawVec` is overfull. - return Err(TryReserveErrorKind::CapacityOverflow.into()); + return Err(CapacityOverflow.into()); } - let cap = len.checked_add(additional).ok_or(TryReserveErrorKind::CapacityOverflow)?; + let cap = len.checked_add(additional).ok_or(CapacityOverflow)?; let new_layout = Layout::array::(cap); replace(self, |ptr, len, alloc| { @@ -172,6 +194,7 @@ impl BoxStorage for Box<[mem::MaybeUninit], A> { } fn shrink(&mut self, cap: usize) -> Result<(), TryReserveError> { + assert!(cap <= self.len(), "Tried to shrink to a larger capacity"); replace(self, |ptr, len, alloc| { let (ptr, layout) = if let Some(mem) = slice_layout(ptr, len) { mem } else { return Ok((ptr, len)) }; @@ -180,9 +203,9 @@ impl BoxStorage for Box<[mem::MaybeUninit], A> { // `Layout::array` cannot overflow here because it would have // overflowed earlier when capacity was larger. let new_layout = Layout::array::(cap).unwrap_unchecked(); - alloc.shrink(ptr, layout, new_layout).map_err(|_| { - TryReserveErrorKind::AllocError { layout: new_layout, non_exhaustive: () } - })? + alloc + .shrink(ptr, layout, new_layout) + .map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })? }; Ok((ptr.as_mut_ptr().cast(), cap)) }) @@ -235,7 +258,7 @@ where A: Allocator, { // Check for the error here to minimize the size of `RawVec::grow_*`. - let new_layout = new_layout.map_err(|_| TryReserveErrorKind::CapacityOverflow)?; + let new_layout = new_layout.map_err(|_| CapacityOverflow)?; alloc_guard(new_layout.size())?; @@ -250,9 +273,7 @@ where alloc.allocate(new_layout) }; - memory.map_err(|_| { - TryReserveErrorKind::AllocError { layout: new_layout, non_exhaustive: () }.into() - }) + memory.map_err(|_| AllocError { layout: new_layout, non_exhaustive: () }.into()) } // Central function for reserve error handling. @@ -260,8 +281,8 @@ where #[inline] fn handle_reserve(result: Result<(), TryReserveError>) { match result.map_err(|e| e.kind()) { - Err(TryReserveErrorKind::CapacityOverflow) => capacity_overflow(), - Err(TryReserveErrorKind::AllocError { layout, .. }) => handle_alloc_error(layout), + Err(CapacityOverflow) => capacity_overflow(), + Err(AllocError { layout, .. }) => handle_alloc_error(layout), Ok(()) => { /* yay */ } } } @@ -278,7 +299,7 @@ fn handle_reserve(result: Result<(), TryReserveError>) { #[inline] pub(crate) fn alloc_guard(alloc_size: usize) -> Result<(), TryReserveError> { if usize::BITS < 64 && alloc_size > isize::MAX as usize { - Err(TryReserveErrorKind::CapacityOverflow.into()) + Err(CapacityOverflow.into()) } else { Ok(()) } From e6ff4b7e50bb05ef303cc6058ee34f83751b36ab Mon Sep 17 00:00:00 2001 From: Conrad Ludgate Date: Tue, 8 Mar 2022 09:20:38 +0000 Subject: [PATCH 05/14] fix tests --- library/alloc/src/box_storage.rs | 158 +++++++++++++----- .../src/{raw_vec => box_storage}/tests.rs | 36 ++-- library/alloc/src/boxed.rs | 46 +---- .../alloc/src/collections/vec_deque/mod.rs | 56 +++---- 4 files changed, 160 insertions(+), 136 deletions(-) rename library/alloc/src/{raw_vec => box_storage}/tests.rs (78%) diff --git a/library/alloc/src/box_storage.rs b/library/alloc/src/box_storage.rs index 197b9a7cc2213..e00fd74bc1cf3 100644 --- a/library/alloc/src/box_storage.rs +++ b/library/alloc/src/box_storage.rs @@ -1,32 +1,42 @@ -use crate::alloc::{handle_alloc_error, Allocator, Layout, LayoutError}; +#![unstable(feature = "raw_vec_internals", reason = "unstable const warnings", issue = "none")] + +use core::alloc::LayoutError; +use core::cmp; +use core::intrinsics; +use core::mem; +use core::ptr::NonNull; + +#[cfg(not(no_global_oom_handling))] +use crate::alloc::handle_alloc_error; +use crate::alloc::{Allocator, Layout}; use crate::boxed::Box; use crate::collections::TryReserveError; use crate::collections::TryReserveErrorKind::*; -use core::mem; -pub(crate) unsafe fn from_raw_slice_parts_in( - ptr: *mut T, - len: usize, - alloc: A, -) -> Box<[T], A> { - unsafe { - let raw = core::slice::from_raw_parts_mut(ptr, len); - Box::from_raw_in(raw, alloc) - } -} +#[cfg(test)] +mod tests; -pub(crate) fn box_into_raw_slice_parts( - mut this: Box<[T], A>, -) -> (*mut T, usize, A) { - let len = this.len(); - let ptr = this.as_mut_ptr(); - let (_, alloc) = Box::into_raw_with_allocator(this); - (ptr, len, alloc) +#[cfg(not(no_global_oom_handling))] +pub(crate) enum AllocInit { + /// The contents of the new memory are uninitialized. + Uninitialized, + /// The new memory is guaranteed to be zeroed. + Zeroed, } -pub(crate) trait BoxStorage { +pub(crate) trait BoxStorage: Sized { + // Tiny Vecs are dumb. Skip to: + // - 8 if the element size is 1, because any heap allocators is likely + // to round up a request of less than 8 bytes to at least 8 bytes. + // - 4 if elements are moderate-sized (<= 1 KiB). + // - 1 otherwise, to avoid wasting too much space for very short Vecs. const MIN_NON_ZERO_CAP: usize; + /// Gets the capacity of the allocation. + /// + /// This will always be `usize::MAX` if `T` is zero-sized. + fn capacity(&self) -> usize; + /// Ensures that the buffer contains at least enough space to hold `len + /// additional` elements. If it doesn't already have enough capacity, will /// reallocate enough space plus comfortable slack space to get amortized @@ -53,18 +63,20 @@ pub(crate) trait BoxStorage { // Therefore, we move all the resizing and error-handling logic from grow_amortized and // handle_reserve behind a call, while making sure that this function is likely to be // inlined as just a comparison and a call if the comparison fails. + #[cold] + fn do_reserve_and_handle(slf: &mut T, len: usize, additional: usize) { + handle_reserve(slf.grow_amortized(len, additional)); + } + if self.needs_to_grow(len, additional) { - self.do_reserve_and_handle(len, additional); + do_reserve_and_handle(self, len, additional); } } /// Returns if the buffer needs to grow to fulfill the needed extra capacity. /// Mainly used to make inlining reserve-calls possible without inlining `grow`. - fn needs_to_grow(&self, len: usize, additional: usize) -> bool; - - #[cold] - fn do_reserve_and_handle(&mut self, len: usize, additional: usize) { - handle_reserve(self.grow_amortized(len, additional)); + fn needs_to_grow(&self, len: usize, additional: usize) -> bool { + additional > self.capacity().wrapping_sub(len) } /// A specialized version of `reserve()` used only by the hot and @@ -92,7 +104,11 @@ pub(crate) trait BoxStorage { /// The same as `reserve`, but returns on errors instead of panicking or aborting. fn try_reserve(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> { - self.grow_amortized(len, additional) + if self.needs_to_grow(len, additional) { + self.grow_amortized(len, additional) + } else { + Ok(()) + } } /// Ensures that the buffer contains at least enough space to hold `len + @@ -119,7 +135,7 @@ pub(crate) trait BoxStorage { /// The same as `reserve_exact`, but returns on errors instead of panicking or aborting. fn try_reserve_exact(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> { - self.grow_exact(len, additional) + if self.needs_to_grow(len, additional) { self.grow_exact(len, additional) } else { Ok(()) } } fn grow_amortized(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError>; @@ -136,10 +152,9 @@ impl BoxStorage for Box<[mem::MaybeUninit], A> { 1 }; - /// Returns if the buffer needs to grow to fulfill the needed extra capacity. - /// Mainly used to make inlining reserve-calls possible without inlining `grow`. - fn needs_to_grow(&self, len: usize, additional: usize) -> bool { - additional > self.len().wrapping_sub(len) + #[inline(always)] + fn capacity(&self) -> usize { + if mem::size_of::() == 0 { usize::MAX } else { self.len() } } // This method is usually instantiated many times. So we want it to be as @@ -165,13 +180,15 @@ impl BoxStorage for Box<[mem::MaybeUninit], A> { // This guarantees exponential growth. The doubling cannot overflow // because `cap <= isize::MAX` and the type of `cap` is `usize`. let cap = self.len(); - let cap = core::cmp::max(cap * 2, required_cap); - let cap = core::cmp::max(Self::MIN_NON_ZERO_CAP, cap); + let cap = cmp::max(cap * 2, required_cap); + let cap = cmp::max(Self::MIN_NON_ZERO_CAP, cap); let new_layout = Layout::array::(cap); replace(self, |ptr, len, alloc| { - Ok((finish_grow(new_layout, slice_layout(ptr, len), alloc)?.as_mut_ptr().cast(), cap)) + // `finish_grow` is non-generic over `T`. + let ptr = finish_grow(new_layout, slice_layout(ptr, len), alloc)?; + Ok((ptr.as_mut_ptr().cast(), cap)) }) } @@ -184,17 +201,18 @@ impl BoxStorage for Box<[mem::MaybeUninit], A> { // 0, getting to here necessarily means the `RawVec` is overfull. return Err(CapacityOverflow.into()); } - let cap = len.checked_add(additional).ok_or(CapacityOverflow)?; let new_layout = Layout::array::(cap); replace(self, |ptr, len, alloc| { - Ok((finish_grow(new_layout, slice_layout(ptr, len), alloc)?.as_mut_ptr().cast(), cap)) + // `finish_grow` is non-generic over `T`. + let ptr = finish_grow(new_layout, slice_layout(ptr, len), alloc)?; + Ok((ptr.as_mut_ptr().cast(), cap)) }) } fn shrink(&mut self, cap: usize) -> Result<(), TryReserveError> { - assert!(cap <= self.len(), "Tried to shrink to a larger capacity"); + assert!(cap <= self.capacity(), "Tried to shrink to a larger capacity"); replace(self, |ptr, len, alloc| { let (ptr, layout) = if let Some(mem) = slice_layout(ptr, len) { mem } else { return Ok((ptr, len)) }; @@ -212,6 +230,26 @@ impl BoxStorage for Box<[mem::MaybeUninit], A> { } } +pub(crate) unsafe fn from_raw_slice_parts_in( + ptr: *mut T, + len: usize, + alloc: A, +) -> Box<[T], A> { + unsafe { + let raw = core::slice::from_raw_parts_mut(ptr, len); + Box::from_raw_in(raw, alloc) + } +} + +pub(crate) fn box_into_raw_slice_parts( + mut this: Box<[T], A>, +) -> (*mut T, usize, A) { + let len = this.len(); + let ptr = this.as_mut_ptr(); + let (_, alloc) = Box::into_raw_with_allocator(this); + (ptr, len, alloc) +} + fn replace( dst: &mut Box<[mem::MaybeUninit], A>, f: impl FnOnce(*mut T, usize, &mut A) -> Result<(*mut T, usize), TryReserveError>, @@ -244,16 +282,16 @@ fn slice_layout(ptr: *mut T, len: usize) -> Option<(core::ptr::NonNull, L } } -// This function is outside `Box` to minimize compile times. See the comment +// This function is outside `RawVec` to minimize compile times. See the comment // above `RawVec::grow_amortized` for details. (The `A` parameter isn't // significant, because the number of different `A` types seen in practice is // much smaller than the number of `T` types.) #[inline(never)] fn finish_grow( new_layout: Result, - current_memory: Option<(core::ptr::NonNull, Layout)>, + current_memory: Option<(NonNull, Layout)>, alloc: &mut A, -) -> Result, TryReserveError> +) -> Result, TryReserveError> where A: Allocator, { @@ -266,7 +304,7 @@ where debug_assert_eq!(old_layout.align(), new_layout.align()); unsafe { // The allocator checks for alignment equality - core::intrinsics::assume(old_layout.align() == new_layout.align()); + intrinsics::assume(old_layout.align() == new_layout.align()); alloc.grow(ptr, old_layout, new_layout) } } else { @@ -312,3 +350,39 @@ pub(crate) fn alloc_guard(alloc_size: usize) -> Result<(), TryReserveError> { pub(crate) fn capacity_overflow() -> ! { panic!("capacity overflow"); } + +#[cfg(not(no_global_oom_handling))] +pub(crate) fn allocate_in( + capacity: usize, + init: AllocInit, + alloc: A, +) -> Box<[mem::MaybeUninit], A> { + // Don't allocate here because `Drop` will not deallocate when `capacity` is 0. + if mem::size_of::() == 0 || capacity == 0 { + Box::empty_in(alloc) + } else { + // We avoid `unwrap_or_else` here because it bloats the amount of + // LLVM IR generated. + let layout = match Layout::array::(capacity) { + Ok(layout) => layout, + Err(_) => crate::box_storage::capacity_overflow(), + }; + match crate::box_storage::alloc_guard(layout.size()) { + Ok(_) => {} + Err(_) => crate::box_storage::capacity_overflow(), + } + let result = match init { + AllocInit::Uninitialized => alloc.allocate(layout), + AllocInit::Zeroed => alloc.allocate_zeroed(layout), + }; + let ptr = match result { + Ok(ptr) => ptr, + Err(_) => handle_alloc_error(layout), + }; + + // Allocators currently return a `NonNull<[u8]>` whose length + // matches the size requested. If that ever changes, the capacity + // here should change to `ptr.len() / mem::size_of::()`. + unsafe { crate::box_storage::from_raw_slice_parts_in(ptr.as_ptr().cast(), capacity, alloc) } + } +} diff --git a/library/alloc/src/raw_vec/tests.rs b/library/alloc/src/box_storage/tests.rs similarity index 78% rename from library/alloc/src/raw_vec/tests.rs rename to library/alloc/src/box_storage/tests.rs index ff322f0da97c6..c3e28471a7d15 100644 --- a/library/alloc/src/raw_vec/tests.rs +++ b/library/alloc/src/box_storage/tests.rs @@ -1,4 +1,6 @@ use super::*; +use crate::alloc::Global; +use core::mem::MaybeUninit; use std::cell::Cell; #[test] @@ -40,23 +42,23 @@ fn allocator_param() { } let a = BoundedAlloc { fuel: Cell::new(500) }; - let mut v: RawVec = RawVec::with_capacity_in(50, a); - assert_eq!(v.alloc.fuel.get(), 450); + let mut v: Box<[MaybeUninit], _> = Box::new_uninit_slice_in(50, a); + assert_eq!(Box::allocator(&v).fuel.get(), 450); v.reserve(50, 150); // (causes a realloc, thus using 50 + 150 = 200 units of fuel) - assert_eq!(v.alloc.fuel.get(), 250); + assert_eq!(Box::allocator(&v).fuel.get(), 250); } #[test] fn reserve_does_not_overallocate() { { - let mut v: RawVec = RawVec::new(); + let mut v: Box<[MaybeUninit]> = Box::empty(); // First, `reserve` allocates like `reserve_exact`. v.reserve(0, 9); assert_eq!(9, v.capacity()); } { - let mut v: RawVec = RawVec::new(); + let mut v: Box<[MaybeUninit]> = Box::empty(); v.reserve(0, 7); assert_eq!(7, v.capacity()); // 97 is more than double of 7, so `reserve` should work @@ -66,7 +68,7 @@ fn reserve_does_not_overallocate() { } { - let mut v: RawVec = RawVec::new(); + let mut v: Box<[MaybeUninit]> = Box::empty(); v.reserve(0, 12); assert_eq!(12, v.capacity()); v.reserve(12, 3); @@ -81,10 +83,10 @@ fn reserve_does_not_overallocate() { struct ZST; // A `RawVec` holding zero-sized elements should always look like this. -fn zst_sanity(v: &RawVec) { +fn zst_sanity(v: &Box<[MaybeUninit]>) { assert_eq!(v.capacity(), usize::MAX); - assert_eq!(v.ptr(), core::ptr::Unique::::dangling().as_ptr()); - assert_eq!(v.current_memory(), None); + assert_eq!(v.as_ptr(), core::ptr::Unique::::dangling().as_ptr()); + // assert_eq!(v.current_memory(), None); } #[test] @@ -95,22 +97,22 @@ fn zst() { // All these different ways of creating the RawVec produce the same thing. - let v: RawVec = RawVec::new(); + let v: Box<[MaybeUninit]> = Box::empty(); zst_sanity(&v); - let v: RawVec = RawVec::with_capacity_in(100, Global); + let v: Box<[MaybeUninit]> = Box::new_uninit_slice_in(100, Global); zst_sanity(&v); - let v: RawVec = RawVec::with_capacity_in(100, Global); + let v: Box<[MaybeUninit]> = Box::new_uninit_slice_in(100, Global); zst_sanity(&v); - let v: RawVec = RawVec::allocate_in(0, AllocInit::Uninitialized, Global); + let v: Box<[MaybeUninit]> = allocate_in(0, AllocInit::Uninitialized, Global); zst_sanity(&v); - let v: RawVec = RawVec::allocate_in(100, AllocInit::Uninitialized, Global); + let v: Box<[MaybeUninit]> = allocate_in(100, AllocInit::Uninitialized, Global); zst_sanity(&v); - let mut v: RawVec = RawVec::allocate_in(usize::MAX, AllocInit::Uninitialized, Global); + let mut v: Box<[MaybeUninit]> = allocate_in(usize::MAX, AllocInit::Uninitialized, Global); zst_sanity(&v); // Check all these operations work as expected with zero-sized elements. @@ -147,7 +149,7 @@ fn zst() { #[test] #[should_panic(expected = "capacity overflow")] fn zst_reserve_panic() { - let mut v: RawVec = RawVec::new(); + let mut v: Box<[MaybeUninit]> = Box::empty(); zst_sanity(&v); v.reserve(101, usize::MAX - 100); @@ -156,7 +158,7 @@ fn zst_reserve_panic() { #[test] #[should_panic(expected = "capacity overflow")] fn zst_reserve_exact_panic() { - let mut v: RawVec = RawVec::new(); + let mut v: Box<[MaybeUninit]> = Box::empty(); zst_sanity(&v); v.reserve_exact(101, usize::MAX - 100); diff --git a/library/alloc/src/boxed.rs b/library/alloc/src/boxed.rs index c1ca51fae7210..0fd5e17fe7cbf 100644 --- a/library/alloc/src/boxed.rs +++ b/library/alloc/src/boxed.rs @@ -718,14 +718,6 @@ impl Box<[T]> { } } -#[cfg(not(no_global_oom_handling))] -enum AllocInit { - /// The contents of the new memory are uninitialized. - Uninitialized, - /// The new memory is guaranteed to be zeroed. - Zeroed, -} - impl Box<[T], A> { /// Constructs a new empty boxed slice #[unstable(feature = "allocator_api", issue = "32838")] @@ -734,40 +726,6 @@ impl Box<[T], A> { Box(Unique::dangling_slice(), alloc) } - #[cfg(not(no_global_oom_handling))] - fn allocate_in(capacity: usize, init: AllocInit, alloc: A) -> Box<[mem::MaybeUninit], A> { - // Don't allocate here because `Drop` will not deallocate when `capacity` is 0. - if mem::size_of::() == 0 || capacity == 0 { - Box::empty_in(alloc) - } else { - // We avoid `unwrap_or_else` here because it bloats the amount of - // LLVM IR generated. - let layout = match Layout::array::(capacity) { - Ok(layout) => layout, - Err(_) => crate::box_storage::capacity_overflow(), - }; - match crate::box_storage::alloc_guard(layout.size()) { - Ok(_) => {} - Err(_) => crate::box_storage::capacity_overflow(), - } - let result = match init { - AllocInit::Uninitialized => alloc.allocate(layout), - AllocInit::Zeroed => alloc.allocate_zeroed(layout), - }; - let ptr = match result { - Ok(ptr) => ptr, - Err(_) => handle_alloc_error(layout), - }; - - // Allocators currently return a `NonNull<[u8]>` whose length - // matches the size requested. If that ever changes, the capacity - // here should change to `ptr.len() / mem::size_of::()`. - unsafe { - crate::box_storage::from_raw_slice_parts_in(ptr.as_ptr().cast(), capacity, alloc) - } - } - } - /// Constructs a new boxed slice with uninitialized contents in the provided allocator. /// /// # Examples @@ -795,7 +753,7 @@ impl Box<[T], A> { // #[unstable(feature = "new_uninit", issue = "63291")] #[must_use] pub fn new_uninit_slice_in(len: usize, alloc: A) -> Box<[mem::MaybeUninit], A> { - Self::allocate_in(len, AllocInit::Uninitialized, alloc) + crate::box_storage::allocate_in(len, crate::box_storage::AllocInit::Uninitialized, alloc) } /// Constructs a new boxed slice with uninitialized contents in the provided allocator, @@ -823,7 +781,7 @@ impl Box<[T], A> { // #[unstable(feature = "new_uninit", issue = "63291")] #[must_use] pub fn new_zeroed_slice_in(len: usize, alloc: A) -> Box<[mem::MaybeUninit], A> { - Self::allocate_in(len, AllocInit::Zeroed, alloc) + crate::box_storage::allocate_in(len, crate::box_storage::AllocInit::Zeroed, alloc) } } diff --git a/library/alloc/src/collections/vec_deque/mod.rs b/library/alloc/src/collections/vec_deque/mod.rs index 350c677370406..6b1a9f25f7466 100644 --- a/library/alloc/src/collections/vec_deque/mod.rs +++ b/library/alloc/src/collections/vec_deque/mod.rs @@ -171,14 +171,8 @@ impl Default for VecDeque { impl VecDeque { /// Marginally more convenient #[inline] - fn ptr(&self) -> *const T { - self.buf.as_ptr().cast() - } - - /// Marginally more convenient - #[inline] - fn mut_ptr(&mut self) -> *mut T { - self.buf.as_mut_ptr().cast() + fn ptr(&self) -> *mut T { + self.buf.as_ptr() as *const T as *mut T } /// Marginally more convenient @@ -213,7 +207,7 @@ impl VecDeque { /// [zeroed]: mem::MaybeUninit::zeroed #[inline] unsafe fn buffer_as_mut_slice(&mut self) -> &mut [MaybeUninit] { - unsafe { slice::from_raw_parts_mut(self.mut_ptr() as *mut MaybeUninit, self.cap()) } + unsafe { slice::from_raw_parts_mut(self.ptr() as *mut MaybeUninit, self.cap()) } } /// Moves an element out of the buffer @@ -226,7 +220,7 @@ impl VecDeque { #[inline] unsafe fn buffer_write(&mut self, off: usize, value: T) { unsafe { - ptr::write(self.mut_ptr().add(off), value); + ptr::write(self.ptr().add(off), value); } } @@ -259,7 +253,7 @@ impl VecDeque { /// Copies a contiguous block of memory len long from src to dst #[inline] - unsafe fn copy(&mut self, dst: usize, src: usize, len: usize) { + unsafe fn copy(&self, dst: usize, src: usize, len: usize) { debug_assert!( dst + len <= self.cap(), "cpy dst={} src={} len={} cap={}", @@ -277,13 +271,13 @@ impl VecDeque { self.cap() ); unsafe { - ptr::copy(self.ptr().add(src), self.mut_ptr().add(dst), len); + ptr::copy(self.ptr().add(src), self.ptr().add(dst), len); } } /// Copies a contiguous block of memory len long from src to dst #[inline] - unsafe fn copy_nonoverlapping(&mut self, dst: usize, src: usize, len: usize) { + unsafe fn copy_nonoverlapping(&self, dst: usize, src: usize, len: usize) { debug_assert!( dst + len <= self.cap(), "cno dst={} src={} len={} cap={}", @@ -301,14 +295,14 @@ impl VecDeque { self.cap() ); unsafe { - ptr::copy_nonoverlapping(self.ptr().add(src), self.mut_ptr().add(dst), len); + ptr::copy_nonoverlapping(self.ptr().add(src), self.ptr().add(dst), len); } } /// Copies a potentially wrapping block of memory len long from src to dest. /// (abs(dst - src) + len) must be no larger than cap() (There must be at /// most one continuous overlapping region between src and dest). - unsafe fn wrap_copy(&mut self, dst: usize, src: usize, len: usize) { + unsafe fn wrap_copy(&self, dst: usize, src: usize, len: usize) { #[allow(dead_code)] fn diff(a: usize, b: usize) -> usize { if a <= b { b - a } else { a - b } @@ -449,13 +443,13 @@ impl VecDeque { let head_room = self.cap() - dst; if src.len() <= head_room { unsafe { - ptr::copy_nonoverlapping(src.as_ptr(), self.mut_ptr().add(dst), src.len()); + ptr::copy_nonoverlapping(src.as_ptr(), self.ptr().add(dst), src.len()); } } else { let (left, right) = src.split_at(head_room); unsafe { - ptr::copy_nonoverlapping(left.as_ptr(), self.mut_ptr().add(dst), left.len()); - ptr::copy_nonoverlapping(right.as_ptr(), self.mut_ptr(), right.len()); + ptr::copy_nonoverlapping(left.as_ptr(), self.ptr().add(dst), left.len()); + ptr::copy_nonoverlapping(right.as_ptr(), self.ptr(), right.len()); } } } @@ -621,7 +615,7 @@ impl VecDeque { pub fn get_mut(&mut self, index: usize) -> Option<&mut T> { if index < self.len() { let idx = self.wrap_add(self.tail, index); - unsafe { Some(&mut *self.mut_ptr().add(idx)) } + unsafe { Some(&mut *self.ptr().add(idx)) } } else { None } @@ -656,7 +650,7 @@ impl VecDeque { assert!(j < self.len()); let ri = self.wrap_add(self.tail, i); let rj = self.wrap_add(self.tail, j); - unsafe { ptr::swap(self.mut_ptr().add(ri), self.mut_ptr().add(rj)) } + unsafe { ptr::swap(self.ptr().add(ri), self.ptr().add(rj)) } } /// Returns the number of elements the deque can hold without @@ -1044,7 +1038,7 @@ impl VecDeque { pub fn iter_mut(&mut self) -> IterMut<'_, T> { // SAFETY: The internal `IterMut` safety invariant is established because the // `ring` we create is a dereferenceable slice for lifetime '_. - let ring = ptr::slice_from_raw_parts_mut(self.mut_ptr(), self.cap()); + let ring = ptr::slice_from_raw_parts_mut(self.ptr(), self.cap()); unsafe { IterMut::new(ring, self.tail, self.head, PhantomData) } } @@ -1241,7 +1235,7 @@ impl VecDeque { // SAFETY: The internal `IterMut` safety invariant is established because the // `ring` we create is a dereferenceable slice for lifetime '_. - let ring = ptr::slice_from_raw_parts_mut(self.mut_ptr(), self.cap()); + let ring = ptr::slice_from_raw_parts_mut(self.ptr(), self.cap()); unsafe { IterMut::new(ring, tail, head, PhantomData) } } @@ -2093,16 +2087,12 @@ impl VecDeque { // `at` lies in the first half. let amount_in_first = first_len - at; - ptr::copy_nonoverlapping( - first_half.as_ptr().add(at), - other.mut_ptr(), - amount_in_first, - ); + ptr::copy_nonoverlapping(first_half.as_ptr().add(at), other.ptr(), amount_in_first); // just take all of the second half. ptr::copy_nonoverlapping( second_half.as_ptr(), - other.mut_ptr().add(amount_in_first), + other.ptr().add(amount_in_first), second_len, ); } else { @@ -2112,7 +2102,7 @@ impl VecDeque { let amount_in_second = second_len - offset; ptr::copy_nonoverlapping( second_half.as_ptr().add(offset), - other.mut_ptr(), + other.ptr(), amount_in_second, ); } @@ -2376,7 +2366,7 @@ impl VecDeque { }; } - let buf = self.buf.as_mut_ptr().cast::(); + let buf = self.ptr(); let cap = self.cap(); let len = self.len(); @@ -3094,8 +3084,8 @@ impl From> for Vec { other.make_contiguous(); unsafe { - let mut other = ManuallyDrop::new(other); - let buf = other.buf.as_mut_ptr().cast::(); + let other = ManuallyDrop::new(other); + let buf = other.ptr(); let len = other.len(); let cap = other.cap(); let alloc = ptr::read(other.allocator()); @@ -3125,7 +3115,7 @@ impl From<[T; N]> for VecDeque { if mem::size_of::() != 0 { // SAFETY: VecDeque::with_capacity ensures that there is enough capacity. unsafe { - ptr::copy_nonoverlapping(arr.as_ptr(), deq.mut_ptr(), N); + ptr::copy_nonoverlapping(arr.as_ptr(), deq.ptr(), N); } } deq.tail = 0; From d1df4669ef8d8ee1aa8cd88fc2f395b61098bdde Mon Sep 17 00:00:00 2001 From: Conrad Ludgate Date: Tue, 8 Mar 2022 09:38:07 +0000 Subject: [PATCH 06/14] fix test --- library/alloc/src/box_storage/tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/alloc/src/box_storage/tests.rs b/library/alloc/src/box_storage/tests.rs index c3e28471a7d15..f64623ee896fc 100644 --- a/library/alloc/src/box_storage/tests.rs +++ b/library/alloc/src/box_storage/tests.rs @@ -85,7 +85,7 @@ struct ZST; // A `RawVec` holding zero-sized elements should always look like this. fn zst_sanity(v: &Box<[MaybeUninit]>) { assert_eq!(v.capacity(), usize::MAX); - assert_eq!(v.as_ptr(), core::ptr::Unique::::dangling().as_ptr()); + assert_eq!(v.as_ptr().cast::(), core::ptr::Unique::::dangling().as_ptr() as *const T); // assert_eq!(v.current_memory(), None); } From f2d3aa97a18b2c259ea5cdf28b2b124f4ab2e7b8 Mon Sep 17 00:00:00 2001 From: Conrad Ludgate Date: Tue, 8 Mar 2022 09:56:33 +0000 Subject: [PATCH 07/14] trick linter to know that Zeroed is used --- library/alloc/src/box_storage/tests.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/library/alloc/src/box_storage/tests.rs b/library/alloc/src/box_storage/tests.rs index f64623ee896fc..fe115b6f36b87 100644 --- a/library/alloc/src/box_storage/tests.rs +++ b/library/alloc/src/box_storage/tests.rs @@ -106,6 +106,15 @@ fn zst() { let v: Box<[MaybeUninit]> = Box::new_uninit_slice_in(100, Global); zst_sanity(&v); + let v: Box<[MaybeUninit]> = allocate_in(0, AllocInit::Zeroed, Global); + zst_sanity(&v); + + let v: Box<[MaybeUninit]> = allocate_in(100, AllocInit::Zeroed, Global); + zst_sanity(&v); + + let v: Box<[MaybeUninit]> = allocate_in(usize::MAX, AllocInit::Zeroed, Global); + zst_sanity(&v); + let v: Box<[MaybeUninit]> = allocate_in(0, AllocInit::Uninitialized, Global); zst_sanity(&v); From 4b5406ee6c4daba994c733a40532ea210d98cd3f Mon Sep 17 00:00:00 2001 From: Conrad Ludgate Date: Sun, 13 Mar 2022 07:35:58 +0000 Subject: [PATCH 08/14] small touch ups --- library/alloc/src/box_storage.rs | 86 +++++++++---------- library/alloc/src/box_storage/tests.rs | 1 - library/alloc/src/boxed.rs | 10 ++- .../alloc/src/collections/vec_deque/mod.rs | 5 +- library/alloc/src/lib.rs | 1 + library/alloc/src/vec/into_iter.rs | 7 +- library/alloc/src/vec/mod.rs | 13 +-- .../alloc/src/vec/spec_from_iter_nested.rs | 9 +- library/core/src/ptr/unique.rs | 20 ----- 9 files changed, 61 insertions(+), 91 deletions(-) diff --git a/library/alloc/src/box_storage.rs b/library/alloc/src/box_storage.rs index e00fd74bc1cf3..8c654e33b8420 100644 --- a/library/alloc/src/box_storage.rs +++ b/library/alloc/src/box_storage.rs @@ -4,6 +4,7 @@ use core::alloc::LayoutError; use core::cmp; use core::intrinsics; use core::mem; +use core::mem::MaybeUninit; use core::ptr::NonNull; #[cfg(not(no_global_oom_handling))] @@ -170,7 +171,7 @@ impl BoxStorage for Box<[mem::MaybeUninit], A> { if mem::size_of::() == 0 { // Since we return a capacity of `usize::MAX` when `elem_size` is - // 0, getting to here necessarily means the `RawVec` is overfull. + // 0, getting to here necessarily means the boxed-slice is overfull. return Err(CapacityOverflow.into()); } @@ -183,12 +184,11 @@ impl BoxStorage for Box<[mem::MaybeUninit], A> { let cap = cmp::max(cap * 2, required_cap); let cap = cmp::max(Self::MIN_NON_ZERO_CAP, cap); - let new_layout = Layout::array::(cap); - - replace(self, |ptr, len, alloc| { + replace(self, |current_memory, alloc| { + let new_layout = Layout::array::(cap); // `finish_grow` is non-generic over `T`. - let ptr = finish_grow(new_layout, slice_layout(ptr, len), alloc)?; - Ok((ptr.as_mut_ptr().cast(), cap)) + let ptr = finish_grow(new_layout, current_memory, alloc)?; + Ok(Some((ptr, cap))) }) } @@ -198,24 +198,23 @@ impl BoxStorage for Box<[mem::MaybeUninit], A> { fn grow_exact(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> { if mem::size_of::() == 0 { // Since we return a capacity of `usize::MAX` when the type size is - // 0, getting to here necessarily means the `RawVec` is overfull. + // 0, getting to here necessarily means the boxed-slice is overfull. return Err(CapacityOverflow.into()); } let cap = len.checked_add(additional).ok_or(CapacityOverflow)?; - let new_layout = Layout::array::(cap); - replace(self, |ptr, len, alloc| { + replace(self, |current_memory, alloc| { + let new_layout = Layout::array::(cap); // `finish_grow` is non-generic over `T`. - let ptr = finish_grow(new_layout, slice_layout(ptr, len), alloc)?; - Ok((ptr.as_mut_ptr().cast(), cap)) + let ptr = finish_grow(new_layout, current_memory, alloc)?; + Ok(Some((ptr, cap))) }) } fn shrink(&mut self, cap: usize) -> Result<(), TryReserveError> { assert!(cap <= self.capacity(), "Tried to shrink to a larger capacity"); - replace(self, |ptr, len, alloc| { - let (ptr, layout) = - if let Some(mem) = slice_layout(ptr, len) { mem } else { return Ok((ptr, len)) }; + replace(self, |current_memory, alloc| { + let (ptr, layout) = if let Some(mem) = current_memory { mem } else { return Ok(None) }; let ptr = unsafe { // `Layout::array` cannot overflow here because it would have @@ -225,12 +224,12 @@ impl BoxStorage for Box<[mem::MaybeUninit], A> { .shrink(ptr, layout, new_layout) .map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })? }; - Ok((ptr.as_mut_ptr().cast(), cap)) + Ok(Some((ptr, cap))) }) } } -pub(crate) unsafe fn from_raw_slice_parts_in( +pub(crate) unsafe fn storage_from_raw_parts_in( ptr: *mut T, len: usize, alloc: A, @@ -241,43 +240,42 @@ pub(crate) unsafe fn from_raw_slice_parts_in( } } -pub(crate) fn box_into_raw_slice_parts( - mut this: Box<[T], A>, -) -> (*mut T, usize, A) { - let len = this.len(); - let ptr = this.as_mut_ptr(); - let (_, alloc) = Box::into_raw_with_allocator(this); - (ptr, len, alloc) -} - fn replace( dst: &mut Box<[mem::MaybeUninit], A>, - f: impl FnOnce(*mut T, usize, &mut A) -> Result<(*mut T, usize), TryReserveError>, + f: impl FnOnce( + Option<(NonNull, Layout)>, + &A, + ) -> Result, usize)>, TryReserveError>, ) -> Result<(), TryReserveError> { unsafe { - let this = core::ptr::read(dst); - let (ptr, len, mut alloc) = box_into_raw_slice_parts(this); - match f(ptr.cast(), len, &mut alloc) { - Ok((ptr, len)) => { - Ok(core::ptr::write(dst, from_raw_slice_parts_in(ptr.cast(), len, alloc))) - } - Err(err) => { - core::ptr::write(dst, from_raw_slice_parts_in(ptr, len, alloc)); - Err(err) + let current_memory = slice_layout(&mut *dst); + let alloc = Box::allocator(dst); + match f(current_memory, &alloc) { + Ok(None) => Ok(()), + Ok(Some((ptr, len))) => { + // hack because we don't have access to box here :() + let raw = + core::slice::from_raw_parts_mut(ptr.as_ptr().cast::>(), len); + let this = core::ptr::read(dst); + let (_, alloc) = Box::into_raw_with_allocator(this); + let this = Box::from_raw_in(raw, alloc); + core::ptr::write(dst, this); + Ok(()) } + Err(err) => Err(err), } } } -fn slice_layout(ptr: *mut T, len: usize) -> Option<(core::ptr::NonNull, Layout)> { - if mem::size_of::() == 0 || len == 0 { +fn slice_layout(slice: &mut [MaybeUninit]) -> Option<(NonNull, Layout)> { + if mem::size_of::() == 0 || slice.len() == 0 { None } else { // We have an allocated chunk of memory, so we can bypass runtime // checks to get our current layout. unsafe { - let layout = Layout::array::(len).unwrap_unchecked(); - Some((core::ptr::NonNull::new_unchecked(ptr.cast()), layout)) + let layout = Layout::array::(slice.len()).unwrap_unchecked(); + Some((NonNull::new_unchecked(slice.as_mut_ptr().cast()), layout)) } } } @@ -290,7 +288,7 @@ fn slice_layout(ptr: *mut T, len: usize) -> Option<(core::ptr::NonNull, L fn finish_grow( new_layout: Result, current_memory: Option<(NonNull, Layout)>, - alloc: &mut A, + alloc: &A, ) -> Result, TryReserveError> where A: Allocator, @@ -365,11 +363,11 @@ pub(crate) fn allocate_in( // LLVM IR generated. let layout = match Layout::array::(capacity) { Ok(layout) => layout, - Err(_) => crate::box_storage::capacity_overflow(), + Err(_) => capacity_overflow(), }; - match crate::box_storage::alloc_guard(layout.size()) { + match alloc_guard(layout.size()) { Ok(_) => {} - Err(_) => crate::box_storage::capacity_overflow(), + Err(_) => capacity_overflow(), } let result = match init { AllocInit::Uninitialized => alloc.allocate(layout), @@ -383,6 +381,6 @@ pub(crate) fn allocate_in( // Allocators currently return a `NonNull<[u8]>` whose length // matches the size requested. If that ever changes, the capacity // here should change to `ptr.len() / mem::size_of::()`. - unsafe { crate::box_storage::from_raw_slice_parts_in(ptr.as_ptr().cast(), capacity, alloc) } + unsafe { storage_from_raw_parts_in(ptr.as_ptr().cast(), capacity, alloc) } } } diff --git a/library/alloc/src/box_storage/tests.rs b/library/alloc/src/box_storage/tests.rs index fe115b6f36b87..d9f4e12bfea39 100644 --- a/library/alloc/src/box_storage/tests.rs +++ b/library/alloc/src/box_storage/tests.rs @@ -86,7 +86,6 @@ struct ZST; fn zst_sanity(v: &Box<[MaybeUninit]>) { assert_eq!(v.capacity(), usize::MAX); assert_eq!(v.as_ptr().cast::(), core::ptr::Unique::::dangling().as_ptr() as *const T); - // assert_eq!(v.current_memory(), None); } #[test] diff --git a/library/alloc/src/boxed.rs b/library/alloc/src/boxed.rs index 0fd5e17fe7cbf..a2a6b5f26e121 100644 --- a/library/alloc/src/boxed.rs +++ b/library/alloc/src/boxed.rs @@ -157,6 +157,7 @@ use crate::alloc::{handle_alloc_error, WriteCloneIntoRaw}; use crate::alloc::{AllocError, Allocator, Global, Layout}; #[cfg(not(no_global_oom_handling))] use crate::borrow::Cow; +use crate::box_storage::storage_from_raw_parts_in; #[cfg(not(no_global_oom_handling))] use crate::str::from_boxed_utf8_unchecked; #[cfg(not(no_global_oom_handling))] @@ -666,7 +667,7 @@ impl Box<[T]> { Err(_) => return Err(AllocError), }; let ptr = Global.allocate(layout)?; - Ok(crate::box_storage::from_raw_slice_parts_in(ptr.as_mut_ptr() as *mut _, len, Global)) + Ok(storage_from_raw_parts_in(ptr.as_mut_ptr() as *mut _, len, Global)) } } @@ -698,7 +699,7 @@ impl Box<[T]> { Err(_) => return Err(AllocError), }; let ptr = Global.allocate_zeroed(layout)?; - Ok(crate::box_storage::from_raw_slice_parts_in(ptr.as_mut_ptr() as *mut _, len, Global)) + Ok(storage_from_raw_parts_in(ptr.as_mut_ptr() as *mut _, len, Global)) } } @@ -723,7 +724,10 @@ impl Box<[T], A> { #[unstable(feature = "allocator_api", issue = "32838")] #[inline] pub const fn empty_in(alloc: A) -> Self { - Box(Unique::dangling_slice(), alloc) + unsafe { + let slice = core::slice::from_raw_parts_mut(mem::align_of::() as *mut T, 0); + Self::from_raw_in(slice, alloc) + } } /// Constructs a new boxed slice with uninitialized contents in the provided allocator. diff --git a/library/alloc/src/collections/vec_deque/mod.rs b/library/alloc/src/collections/vec_deque/mod.rs index 6b1a9f25f7466..858b2643c099c 100644 --- a/library/alloc/src/collections/vec_deque/mod.rs +++ b/library/alloc/src/collections/vec_deque/mod.rs @@ -18,7 +18,7 @@ use core::ptr::{self, NonNull}; use core::slice; use crate::alloc::{Allocator, Global}; -use crate::box_storage::BoxStorage; +use crate::box_storage::{storage_from_raw_parts_in, BoxStorage}; use crate::boxed::Box; use crate::collections::TryReserveError; use crate::collections::TryReserveErrorKind; @@ -3042,8 +3042,7 @@ impl From> for VecDeque { unsafe { let (other_buf, len, capacity, alloc) = other.into_raw_parts_with_alloc(); - let buf = - crate::box_storage::from_raw_slice_parts_in(other_buf.cast(), capacity, alloc); + let buf = storage_from_raw_parts_in(other_buf.cast(), capacity, alloc); VecDeque { tail: 0, head: len, buf } } } diff --git a/library/alloc/src/lib.rs b/library/alloc/src/lib.rs index 71a929a2ab49a..e8d00af42e750 100644 --- a/library/alloc/src/lib.rs +++ b/library/alloc/src/lib.rs @@ -107,6 +107,7 @@ #![feature(const_maybe_uninit_as_mut_ptr)] #![feature(const_refs_to_cell)] #![feature(core_c_str)] +#![feature(const_slice_from_raw_parts)] #![feature(core_intrinsics)] #![feature(core_ffi_c)] #![feature(const_eval_select)] diff --git a/library/alloc/src/vec/into_iter.rs b/library/alloc/src/vec/into_iter.rs index 2759c217bf3a1..534953691cb53 100644 --- a/library/alloc/src/vec/into_iter.rs +++ b/library/alloc/src/vec/into_iter.rs @@ -1,6 +1,7 @@ #[cfg(not(no_global_oom_handling))] use super::AsVecIntoIter; use crate::alloc::{Allocator, Global}; +use crate::box_storage::storage_from_raw_parts_in; use core::fmt; use core::intrinsics::arith_offset; use core::iter::{ @@ -321,11 +322,7 @@ unsafe impl<#[may_dangle] T, A: Allocator> Drop for IntoIter { // `IntoIter::alloc` is not used anymore after this and will be dropped by RawVec let alloc = ManuallyDrop::take(&mut self.0.alloc); // RawVec handles deallocation - let _ = crate::box_storage::from_raw_slice_parts_in( - self.0.buf.as_ptr(), - self.0.cap, - alloc, - ); + let _ = storage_from_raw_parts_in(self.0.buf.as_ptr(), self.0.cap, alloc); } } } diff --git a/library/alloc/src/vec/mod.rs b/library/alloc/src/vec/mod.rs index 74e830c3b6a0a..ab0c4a041c881 100644 --- a/library/alloc/src/vec/mod.rs +++ b/library/alloc/src/vec/mod.rs @@ -71,7 +71,7 @@ use core::slice::{self, SliceIndex}; use crate::alloc::{Allocator, Global}; use crate::borrow::{Cow, ToOwned}; -use crate::box_storage::BoxStorage; +use crate::box_storage::{storage_from_raw_parts_in, BoxStorage}; use crate::boxed::Box; use crate::collections::TryReserveError; @@ -686,12 +686,7 @@ impl Vec { #[inline] #[unstable(feature = "allocator_api", issue = "32838")] pub unsafe fn from_raw_parts_in(ptr: *mut T, length: usize, capacity: usize, alloc: A) -> Self { - unsafe { - Vec { - buf: crate::box_storage::from_raw_slice_parts_in(ptr.cast(), capacity, alloc), - len: length, - } - } + unsafe { Vec { buf: storage_from_raw_parts_in(ptr.cast(), capacity, alloc), len: length } } } /// Decomposes a `Vec` into its raw components. @@ -792,7 +787,7 @@ impl Vec { #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn capacity(&self) -> usize { - self.buf.len() + self.buf.capacity() } /// Reserves capacity for at least `additional` more elements to be inserted @@ -1731,7 +1726,7 @@ impl Vec { pub fn push(&mut self, value: T) { // This will panic or abort if we would allocate > isize::MAX bytes // or if the length increment would overflow for zero-sized types. - if self.len == self.capacity() { + if self.len == self.buf.capacity() { self.buf.reserve_for_push(self.len); } unsafe { diff --git a/library/alloc/src/vec/spec_from_iter_nested.rs b/library/alloc/src/vec/spec_from_iter_nested.rs index 7a0872ee564ac..744447376256a 100644 --- a/library/alloc/src/vec/spec_from_iter_nested.rs +++ b/library/alloc/src/vec/spec_from_iter_nested.rs @@ -1,8 +1,7 @@ -use core::cmp; use core::iter::TrustedLen; use core::ptr; +use core::{cmp, mem::MaybeUninit}; -// use crate::raw_vec::RawVec; use crate::{box_storage::BoxStorage, boxed::Box}; use super::{SpecExtend, Vec}; @@ -28,10 +27,8 @@ where None => return Vec::new(), Some(element) => { let (lower, _) = iterator.size_hint(); - let initial_capacity = cmp::max( - Box::<[core::mem::MaybeUninit]>::MIN_NON_ZERO_CAP, - lower.saturating_add(1), - ); + let initial_capacity = + cmp::max(Box::<[MaybeUninit]>::MIN_NON_ZERO_CAP, lower.saturating_add(1)); let mut vector = Vec::with_capacity(initial_capacity); unsafe { // SAFETY: We requested capacity at least 1 diff --git a/library/core/src/ptr/unique.rs b/library/core/src/ptr/unique.rs index 82065b4b4b065..64616142b4188 100644 --- a/library/core/src/ptr/unique.rs +++ b/library/core/src/ptr/unique.rs @@ -72,26 +72,6 @@ impl Unique { pub const fn dangling() -> Self { Self::from(NonNull::dangling()) } - - /// Creates a new `Unique` that is dangling, but well-aligned. - /// - /// This is useful for initializing types which lazily allocate, like - /// `Vec::new` does. - /// - /// Note that the pointer value may potentially represent a valid pointer to - /// a `T`, which means this must not be used as a "not yet initialized" - /// sentinel value. Types that lazily allocate must track initialization by - /// some other means. - #[must_use] - #[inline] - pub const fn dangling_slice() -> Unique<[T]> { - // SAFETY: mem::align_of() returns a valid, non-null pointer. The - // conditions to call new_unchecked() are thus respected. - unsafe { - let slice = core::slice::from_raw_parts_mut(mem::align_of::() as *mut T, 0); - Unique::new_unchecked(slice) - } - } } #[unstable(feature = "ptr_internals", issue = "none")] From 1029081cd1b31455d47f28f13e35ac14aeea5885 Mon Sep 17 00:00:00 2001 From: Conrad Ludgate Date: Sun, 10 Apr 2022 08:39:52 +0100 Subject: [PATCH 09/14] fix double-free --- library/alloc/src/box_storage.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/library/alloc/src/box_storage.rs b/library/alloc/src/box_storage.rs index 8c654e33b8420..f9ee26be6aa0d 100644 --- a/library/alloc/src/box_storage.rs +++ b/library/alloc/src/box_storage.rs @@ -233,9 +233,9 @@ pub(crate) unsafe fn storage_from_raw_parts_in( ptr: *mut T, len: usize, alloc: A, -) -> Box<[T], A> { +) -> Box<[MaybeUninit], A> { unsafe { - let raw = core::slice::from_raw_parts_mut(ptr, len); + let raw = core::slice::from_raw_parts_mut(ptr.cast(), len); Box::from_raw_in(raw, alloc) } } From b8b39bd185466dd859a6fb91e3aae885d26d2d7a Mon Sep 17 00:00:00 2001 From: Conrad Ludgate Date: Sun, 10 Apr 2022 09:27:32 +0100 Subject: [PATCH 10/14] fix ownership detection --- .../alloc/src/collections/vec_deque/mod.rs | 10 +++- library/alloc/src/vec/in_place_collect.rs | 2 +- library/alloc/src/vec/into_iter.rs | 50 +++++++------------ library/alloc/src/vec/mod.rs | 27 +++++----- library/alloc/src/vec/spec_from_elem.rs | 11 ++-- library/alloc/src/vec/spec_from_iter.rs | 8 +-- 6 files changed, 53 insertions(+), 55 deletions(-) diff --git a/library/alloc/src/collections/vec_deque/mod.rs b/library/alloc/src/collections/vec_deque/mod.rs index 858b2643c099c..551171907153d 100644 --- a/library/alloc/src/collections/vec_deque/mod.rs +++ b/library/alloc/src/collections/vec_deque/mod.rs @@ -108,6 +108,7 @@ pub struct VecDeque< tail: usize, head: usize, buf: Box<[MaybeUninit], A>, + phantom: PhantomData, } #[stable(feature = "rust1", since = "1.0.0")] @@ -564,7 +565,12 @@ impl VecDeque { // +1 since the ringbuffer always leaves one space empty let cap = cmp::max(capacity + 1, MINIMUM_CAPACITY + 1).next_power_of_two(); - VecDeque { tail: 0, head: 0, buf: Box::new_uninit_slice_in(cap, alloc) } + VecDeque { + tail: 0, + head: 0, + buf: Box::new_uninit_slice_in(cap, alloc), + phantom: PhantomData, + } } /// Provides a reference to the element at the given index. @@ -3043,7 +3049,7 @@ impl From> for VecDeque { unsafe { let (other_buf, len, capacity, alloc) = other.into_raw_parts_with_alloc(); let buf = storage_from_raw_parts_in(other_buf.cast(), capacity, alloc); - VecDeque { tail: 0, head: len, buf } + VecDeque { tail: 0, head: len, buf, phantom: PhantomData } } } } diff --git a/library/alloc/src/vec/in_place_collect.rs b/library/alloc/src/vec/in_place_collect.rs index 282af8cc33fdd..df49febe1e981 100644 --- a/library/alloc/src/vec/in_place_collect.rs +++ b/library/alloc/src/vec/in_place_collect.rs @@ -171,7 +171,7 @@ where inner.ptr, inner.buf.as_ptr() as *mut T, inner.end as *const T, - inner.cap, + inner.buf.len(), ) }; diff --git a/library/alloc/src/vec/into_iter.rs b/library/alloc/src/vec/into_iter.rs index 534953691cb53..39728d2bf4ebd 100644 --- a/library/alloc/src/vec/into_iter.rs +++ b/library/alloc/src/vec/into_iter.rs @@ -1,15 +1,14 @@ #[cfg(not(no_global_oom_handling))] use super::AsVecIntoIter; use crate::alloc::{Allocator, Global}; -use crate::box_storage::storage_from_raw_parts_in; +use crate::boxed::Box; use core::fmt; use core::intrinsics::arith_offset; use core::iter::{ FusedIterator, InPlaceIterable, SourceIter, TrustedLen, TrustedRandomAccessNoCoerce, }; use core::marker::PhantomData; -use core::mem::{self, ManuallyDrop}; -use core::ops::Deref; +use core::mem::{self, ManuallyDrop, MaybeUninit}; use core::ptr::{self, NonNull}; use core::slice::{self}; @@ -30,12 +29,8 @@ pub struct IntoIter< T, #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global, > { - pub(super) buf: NonNull, + pub(super) buf: Box<[MaybeUninit], A>, pub(super) phantom: PhantomData, - pub(super) cap: usize, - // the drop impl reconstructs a RawVec from buf, cap and alloc - // to avoid dropping the allocator twice we need to wrap it into ManuallyDrop - pub(super) alloc: ManuallyDrop, pub(super) ptr: *const T, pub(super) end: *const T, } @@ -86,7 +81,7 @@ impl IntoIter { #[unstable(feature = "allocator_api", issue = "32838")] #[inline] pub fn allocator(&self) -> &A { - &self.alloc + Box::allocator(&self.buf) } fn as_raw_mut_slice(&mut self) -> *mut [T] { @@ -112,10 +107,18 @@ impl IntoIter { // overwrite the individual fields instead of creating a new // struct and then overwriting &mut self. // this creates less assembly - self.cap = 0; - self.buf = NonNull::dangling(); - self.ptr = self.buf.as_ptr(); - self.end = self.buf.as_ptr(); + + // Safety: + // the allocator is being copied from the existing buf, but we're forgetting that allocation + // directly afterwards. + unsafe { + let alloc = core::ptr::read(self.allocator()); + let buf = core::mem::replace(&mut self.buf, Box::empty_in(alloc)); + let _ = ManuallyDrop::new(buf); + } + + self.ptr = NonNull::dangling().as_ptr(); + self.end = NonNull::dangling().as_ptr(); unsafe { ptr::drop_in_place(remaining); @@ -303,36 +306,21 @@ where impl Clone for IntoIter { #[cfg(not(test))] fn clone(&self) -> Self { - self.as_slice().to_vec_in(self.alloc.deref().clone()).into_iter() + self.as_slice().to_vec_in(self.allocator().clone()).into_iter() } #[cfg(test)] fn clone(&self) -> Self { - crate::slice::to_vec(self.as_slice(), self.alloc.deref().clone()).into_iter() + crate::slice::to_vec(self.as_slice(), self.allocator().clone()).into_iter() } } #[stable(feature = "rust1", since = "1.0.0")] unsafe impl<#[may_dangle] T, A: Allocator> Drop for IntoIter { fn drop(&mut self) { - struct DropGuard<'a, T, A: Allocator>(&'a mut IntoIter); - - impl Drop for DropGuard<'_, T, A> { - fn drop(&mut self) { - unsafe { - // `IntoIter::alloc` is not used anymore after this and will be dropped by RawVec - let alloc = ManuallyDrop::take(&mut self.0.alloc); - // RawVec handles deallocation - let _ = storage_from_raw_parts_in(self.0.buf.as_ptr(), self.0.cap, alloc); - } - } - } - - let guard = DropGuard(self); // destroy the remaining elements unsafe { - ptr::drop_in_place(guard.0.as_raw_mut_slice()); + ptr::drop_in_place(self.as_raw_mut_slice()); } - // now `guard` will be dropped and do the rest } } diff --git a/library/alloc/src/vec/mod.rs b/library/alloc/src/vec/mod.rs index ab0c4a041c881..34cf5649086de 100644 --- a/library/alloc/src/vec/mod.rs +++ b/library/alloc/src/vec/mod.rs @@ -399,6 +399,7 @@ mod spec_extend; #[rustc_insignificant_dtor] pub struct Vec { buf: Box<[MaybeUninit], A>, + phantom: PhantomData, len: usize, } @@ -422,7 +423,7 @@ impl Vec { #[stable(feature = "rust1", since = "1.0.0")] #[must_use] pub const fn new() -> Self { - Vec { buf: Box::<[MaybeUninit]>::EMPTY, len: 0 } + Vec { buf: Box::<[MaybeUninit]>::EMPTY, phantom: PhantomData, len: 0 } } /// Constructs a new, empty `Vec` with the specified capacity. @@ -561,7 +562,7 @@ impl Vec { #[inline] #[unstable(feature = "allocator_api", issue = "32838")] pub const fn new_in(alloc: A) -> Self { - Vec { buf: Box::empty_in(alloc), len: 0 } + Vec { buf: Box::empty_in(alloc), phantom: PhantomData, len: 0 } } /// Constructs a new, empty `Vec` with the specified capacity with the provided @@ -610,7 +611,7 @@ impl Vec { #[inline] #[unstable(feature = "allocator_api", issue = "32838")] pub fn with_capacity_in(capacity: usize, alloc: A) -> Self { - Vec { buf: Box::new_uninit_slice_in(capacity, alloc), len: 0 } + Vec { buf: Box::new_uninit_slice_in(capacity, alloc), phantom: PhantomData, len: 0 } } /// Creates a `Vec` directly from the raw components of another vector. @@ -686,7 +687,13 @@ impl Vec { #[inline] #[unstable(feature = "allocator_api", issue = "32838")] pub unsafe fn from_raw_parts_in(ptr: *mut T, length: usize, capacity: usize, alloc: A) -> Self { - unsafe { Vec { buf: storage_from_raw_parts_in(ptr.cast(), capacity, alloc), len: length } } + unsafe { + Vec { + buf: storage_from_raw_parts_in(ptr.cast(), capacity, alloc), + phantom: PhantomData, + len: length, + } + } } /// Decomposes a `Vec` into its raw components. @@ -2632,22 +2639,14 @@ impl IntoIterator for Vec { fn into_iter(self) -> IntoIter { unsafe { let mut me = ManuallyDrop::new(self); - let alloc = ManuallyDrop::new(ptr::read(me.allocator())); + let buf = core::ptr::read(&me.buf); let begin = me.as_mut_ptr(); let end = if mem::size_of::() == 0 { arith_offset(begin as *const i8, me.len() as isize) as *const T } else { begin.add(me.len()) as *const T }; - let cap = me.capacity(); - IntoIter { - buf: NonNull::new_unchecked(begin), - phantom: PhantomData, - cap, - alloc, - ptr: begin, - end, - } + IntoIter { buf, phantom: PhantomData, ptr: begin, end } } } } diff --git a/library/alloc/src/vec/spec_from_elem.rs b/library/alloc/src/vec/spec_from_elem.rs index 8f0dfec758434..e84cd4b90be23 100644 --- a/library/alloc/src/vec/spec_from_elem.rs +++ b/library/alloc/src/vec/spec_from_elem.rs @@ -1,6 +1,9 @@ use crate::alloc::Allocator; use crate::boxed::Box; -use core::ptr::{self}; +use core::{ + marker::PhantomData, + ptr::{self}, +}; use super::{ExtendElement, IsZero, Vec}; @@ -21,7 +24,7 @@ impl SpecFromElem for i8 { #[inline] fn from_elem(elem: i8, n: usize, alloc: A) -> Vec { if elem == 0 { - return Vec { buf: Box::new_zeroed_slice_in(n, alloc), len: n }; + return Vec { buf: Box::new_zeroed_slice_in(n, alloc), phantom: PhantomData, len: n }; } unsafe { let mut v = Vec::with_capacity_in(n, alloc); @@ -36,7 +39,7 @@ impl SpecFromElem for u8 { #[inline] fn from_elem(elem: u8, n: usize, alloc: A) -> Vec { if elem == 0 { - return Vec { buf: Box::new_zeroed_slice_in(n, alloc), len: n }; + return Vec { buf: Box::new_zeroed_slice_in(n, alloc), phantom: PhantomData, len: n }; } unsafe { let mut v = Vec::with_capacity_in(n, alloc); @@ -51,7 +54,7 @@ impl SpecFromElem for T { #[inline] fn from_elem(elem: T, n: usize, alloc: A) -> Vec { if elem.is_zero() { - return Vec { buf: Box::new_zeroed_slice_in(n, alloc), len: n }; + return Vec { buf: Box::new_zeroed_slice_in(n, alloc), phantom: PhantomData, len: n }; } let mut v = Vec::with_capacity_in(n, alloc); v.extend_with(n, ExtendElement(elem)); diff --git a/library/alloc/src/vec/spec_from_iter.rs b/library/alloc/src/vec/spec_from_iter.rs index efa6868473e49..360cb7a1549b6 100644 --- a/library/alloc/src/vec/spec_from_iter.rs +++ b/library/alloc/src/vec/spec_from_iter.rs @@ -1,3 +1,4 @@ +use core::marker::PhantomData; use core::mem::ManuallyDrop; use core::ptr::{self}; @@ -45,13 +46,14 @@ impl SpecFromIter> for Vec { // is not strictly necessary as Vec's allocation behavior is intentionally unspecified. // But it is a conservative choice. let has_advanced = iterator.buf.as_ptr() as *const _ != iterator.ptr; - if !has_advanced || iterator.len() >= iterator.cap / 2 { + if !has_advanced || iterator.len() >= iterator.buf.len() / 2 { unsafe { let it = ManuallyDrop::new(iterator); + let buf = core::ptr::read(&it.buf); if has_advanced { - ptr::copy(it.ptr, it.buf.as_ptr(), it.len()); + ptr::copy(it.ptr, buf.as_ptr().cast::() as *mut T, it.len()); } - return Vec::from_raw_parts(it.buf.as_ptr(), it.len(), it.cap); + return Vec { buf, phantom: PhantomData, len: it.len() }; } } From f667257cadc9570070736f7b901cff8a4e7577ca Mon Sep 17 00:00:00 2001 From: Conrad Ludgate Date: Sun, 10 Apr 2022 09:55:18 +0100 Subject: [PATCH 11/14] fix mir tests --- .../inline/inline_into_box_place.main.Inline.32bit.diff | 6 +++--- .../inline/inline_into_box_place.main.Inline.64bit.diff | 6 +++--- ...n_place.Vec_i32_.AddMovesForPackedDrops.before.32bit.mir | 4 ++-- ...n_place.Vec_i32_.AddMovesForPackedDrops.before.64bit.mir | 4 ++-- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/src/test/mir-opt/inline/inline_into_box_place.main.Inline.32bit.diff b/src/test/mir-opt/inline/inline_into_box_place.main.Inline.32bit.diff index 074ad067ff899..2b56075b5d476 100644 --- a/src/test/mir-opt/inline/inline_into_box_place.main.Inline.32bit.diff +++ b/src/test/mir-opt/inline/inline_into_box_place.main.Inline.32bit.diff @@ -35,7 +35,7 @@ + StorageLive(_7); // scope 0 at $DIR/inline-into-box-place.rs:8:33: 8:43 + _7 = &mut (*_5); // scope 0 at $DIR/inline-into-box-place.rs:8:33: 8:43 + Deinit((*_7)); // scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL -+ ((*_7).0: alloc::raw_vec::RawVec) = const alloc::raw_vec::RawVec:: { ptr: Unique:: { pointer: NonNull:: { pointer: {0x4 as *const u32} }, _marker: PhantomData:: }, cap: 0_usize, alloc: std::alloc::Global }; // scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL ++ ((*_7).0: std::boxed::Box<[std::mem::MaybeUninit]>) = const Box::<[MaybeUninit]>(Unique::<[MaybeUninit]> { pointer: ByRef { alloc: Allocation { bytes: [4, 0, 0, 0, 0, 0, 0, 0], relocations: Relocations(SortedMap { data: [] }), init_mask: InitMask { blocks: [255], len: Size { raw: 8 } }, align: Align { pow2: 2 }, mutability: Not, extra: () }, offset: Size { raw: 0 } }: *const [MaybeUninit::], _marker: PhantomData::<[MaybeUninit]> }, std::alloc::Global); // scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL // mir::Constant - // + span: $DIR/inline-into-box-place.rs:8:33: 8:41 - // + user_ty: UserType(1) @@ -45,8 +45,8 @@ - bb2: { + // + span: $SRC_DIR/alloc/src/vec/mod.rs:LL:COL + // + user_ty: UserType(0) -+ // + literal: Const { ty: alloc::raw_vec::RawVec, val: Value(ByRef { alloc: Allocation { bytes: [4, 0, 0, 0, 0, 0, 0, 0], relocations: Relocations(SortedMap { data: [] }), init_mask: InitMask { blocks: [255], len: Size { raw: 8 } }, align: Align { pow2: 2 }, mutability: Not, extra: () }, offset: Size { raw: 0 } }) } -+ ((*_7).1: usize) = const 0_usize; // scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL ++ // + literal: Const { ty: Box<[MaybeUninit]>, val: Value(ByRef { alloc: Allocation { bytes: [4, 0, 0, 0, 0, 0, 0, 0], relocations: Relocations(SortedMap { data: [] }), init_mask: InitMask { blocks: [255], len: Size { raw: 8 } }, align: Align { pow2: 2 }, mutability: Not, extra: () }, offset: Size { raw: 0 } }) } ++ ((*_7).2: usize) = const 0_usize; // scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL + StorageDead(_7); // scope 0 at $DIR/inline-into-box-place.rs:8:33: 8:43 _1 = move _5; // scope 0 at $DIR/inline-into-box-place.rs:8:29: 8:43 StorageDead(_5); // scope 0 at $DIR/inline-into-box-place.rs:8:42: 8:43 diff --git a/src/test/mir-opt/inline/inline_into_box_place.main.Inline.64bit.diff b/src/test/mir-opt/inline/inline_into_box_place.main.Inline.64bit.diff index a055ae9864f5f..c83d6894ac7e7 100644 --- a/src/test/mir-opt/inline/inline_into_box_place.main.Inline.64bit.diff +++ b/src/test/mir-opt/inline/inline_into_box_place.main.Inline.64bit.diff @@ -35,7 +35,7 @@ + StorageLive(_7); // scope 0 at $DIR/inline-into-box-place.rs:8:33: 8:43 + _7 = &mut (*_5); // scope 0 at $DIR/inline-into-box-place.rs:8:33: 8:43 + Deinit((*_7)); // scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL -+ ((*_7).0: alloc::raw_vec::RawVec) = const alloc::raw_vec::RawVec:: { ptr: Unique:: { pointer: NonNull:: { pointer: {0x4 as *const u32} }, _marker: PhantomData:: }, cap: 0_usize, alloc: std::alloc::Global }; // scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL ++ ((*_7).0: std::boxed::Box<[std::mem::MaybeUninit]>) = const Box::<[MaybeUninit]>(Unique::<[MaybeUninit]> { pointer: ByRef { alloc: Allocation { bytes: [4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], relocations: Relocations(SortedMap { data: [] }), init_mask: InitMask { blocks: [65535], len: Size { raw: 16 } }, align: Align { pow2: 3 }, mutability: Not, extra: () }, offset: Size { raw: 0 } }: *const [MaybeUninit::], _marker: PhantomData::<[MaybeUninit]> }, std::alloc::Global); // scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL // mir::Constant - // + span: $DIR/inline-into-box-place.rs:8:33: 8:41 - // + user_ty: UserType(1) @@ -45,8 +45,8 @@ - bb2: { + // + span: $SRC_DIR/alloc/src/vec/mod.rs:LL:COL + // + user_ty: UserType(0) -+ // + literal: Const { ty: alloc::raw_vec::RawVec, val: Value(ByRef { alloc: Allocation { bytes: [4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], relocations: Relocations(SortedMap { data: [] }), init_mask: InitMask { blocks: [65535], len: Size { raw: 16 } }, align: Align { pow2: 3 }, mutability: Not, extra: () }, offset: Size { raw: 0 } }) } -+ ((*_7).1: usize) = const 0_usize; // scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL ++ // + literal: Const { ty: Box<[MaybeUninit]>, val: Value(ByRef { alloc: Allocation { bytes: [4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], relocations: Relocations(SortedMap { data: [] }), init_mask: InitMask { blocks: [65535], len: Size { raw: 16 } }, align: Align { pow2: 3 }, mutability: Not, extra: () }, offset: Size { raw: 0 } }) } ++ ((*_7).2: usize) = const 0_usize; // scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL + StorageDead(_7); // scope 0 at $DIR/inline-into-box-place.rs:8:33: 8:43 _1 = move _5; // scope 0 at $DIR/inline-into-box-place.rs:8:29: 8:43 StorageDead(_5); // scope 0 at $DIR/inline-into-box-place.rs:8:42: 8:43 diff --git a/src/test/mir-opt/unusual_item_types.core.ptr-drop_in_place.Vec_i32_.AddMovesForPackedDrops.before.32bit.mir b/src/test/mir-opt/unusual_item_types.core.ptr-drop_in_place.Vec_i32_.AddMovesForPackedDrops.before.32bit.mir index 5dc81b787a9fa..d3deecdf497c3 100644 --- a/src/test/mir-opt/unusual_item_types.core.ptr-drop_in_place.Vec_i32_.AddMovesForPackedDrops.before.32bit.mir +++ b/src/test/mir-opt/unusual_item_types.core.ptr-drop_in_place.Vec_i32_.AddMovesForPackedDrops.before.32bit.mir @@ -22,11 +22,11 @@ fn std::ptr::drop_in_place(_1: *mut Vec) -> () { } bb4 (cleanup): { - drop(((*_1).0: alloc::raw_vec::RawVec)) -> bb2; // scope 0 at $SRC_DIR/core/src/ptr/mod.rs:LL:COL + drop(((*_1).0: std::boxed::Box<[std::mem::MaybeUninit]>)) -> bb2; // scope 0 at $SRC_DIR/core/src/ptr/mod.rs:LL:COL } bb5: { - drop(((*_1).0: alloc::raw_vec::RawVec)) -> [return: bb3, unwind: bb2]; // scope 0 at $SRC_DIR/core/src/ptr/mod.rs:LL:COL + drop(((*_1).0: std::boxed::Box<[std::mem::MaybeUninit]>)) -> [return: bb3, unwind: bb2]; // scope 0 at $SRC_DIR/core/src/ptr/mod.rs:LL:COL } bb6: { diff --git a/src/test/mir-opt/unusual_item_types.core.ptr-drop_in_place.Vec_i32_.AddMovesForPackedDrops.before.64bit.mir b/src/test/mir-opt/unusual_item_types.core.ptr-drop_in_place.Vec_i32_.AddMovesForPackedDrops.before.64bit.mir index 5dc81b787a9fa..d3deecdf497c3 100644 --- a/src/test/mir-opt/unusual_item_types.core.ptr-drop_in_place.Vec_i32_.AddMovesForPackedDrops.before.64bit.mir +++ b/src/test/mir-opt/unusual_item_types.core.ptr-drop_in_place.Vec_i32_.AddMovesForPackedDrops.before.64bit.mir @@ -22,11 +22,11 @@ fn std::ptr::drop_in_place(_1: *mut Vec) -> () { } bb4 (cleanup): { - drop(((*_1).0: alloc::raw_vec::RawVec)) -> bb2; // scope 0 at $SRC_DIR/core/src/ptr/mod.rs:LL:COL + drop(((*_1).0: std::boxed::Box<[std::mem::MaybeUninit]>)) -> bb2; // scope 0 at $SRC_DIR/core/src/ptr/mod.rs:LL:COL } bb5: { - drop(((*_1).0: alloc::raw_vec::RawVec)) -> [return: bb3, unwind: bb2]; // scope 0 at $SRC_DIR/core/src/ptr/mod.rs:LL:COL + drop(((*_1).0: std::boxed::Box<[std::mem::MaybeUninit]>)) -> [return: bb3, unwind: bb2]; // scope 0 at $SRC_DIR/core/src/ptr/mod.rs:LL:COL } bb6: { From 0c5fbff26440d06ce5abd188d4d635aa0279fe52 Mon Sep 17 00:00:00 2001 From: Conrad Ludgate Date: Sun, 10 Apr 2022 13:28:12 +0100 Subject: [PATCH 12/14] fix gdb? --- src/etc/gdb_providers.py | 28 +++++++++++++++++++++------- 1 file changed, 21 insertions(+), 7 deletions(-) diff --git a/src/etc/gdb_providers.py b/src/etc/gdb_providers.py index 0a52b8c976f6a..61f5931ac8a02 100644 --- a/src/etc/gdb_providers.py +++ b/src/etc/gdb_providers.py @@ -47,7 +47,7 @@ def __init__(self, valobj): self.valobj = valobj vec = valobj["vec"] self.length = int(vec["len"]) - self.data_ptr = unwrap_unique_or_non_null(vec["buf"]["ptr"]) + self.data_ptr = vec["buf"]["data_ptr"] def to_string(self): return self.data_ptr.lazy_string(encoding="utf-8", length=self.length) @@ -65,7 +65,7 @@ def __init__(self, valobj): vec = buf[ZERO_FIELD] if is_windows else buf self.length = int(vec["len"]) - self.data_ptr = unwrap_unique_or_non_null(vec["buf"]["ptr"]) + self.data_ptr = vec["buf"]["data_ptr"] def to_string(self): return self.data_ptr.lazy_string(encoding="utf-8", length=self.length) @@ -103,6 +103,20 @@ def _enumerate_array_elements(element_ptrs): yield key, element +def _enumerate_mu_array_elements(element_ptrs): + for (i, element_ptr) in enumerate(element_ptrs): + key = "[{}]".format(i) + element = element_ptr.dereference()["value"]["value"] + + try: + str(element) + except RuntimeError: + yield key, "inaccessible" + + break + + yield key, element + class StdSliceProvider: def __init__(self, valobj): self.valobj = valobj @@ -125,13 +139,13 @@ class StdVecProvider: def __init__(self, valobj): self.valobj = valobj self.length = int(valobj["len"]) - self.data_ptr = unwrap_unique_or_non_null(valobj["buf"]["ptr"]) + self.data_ptr = valobj["buf"]["data_ptr"] def to_string(self): return "Vec(size={})".format(self.length) def children(self): - return _enumerate_array_elements( + return _enumerate_mu_array_elements( self.data_ptr + index for index in xrange(self.length) ) @@ -145,8 +159,8 @@ def __init__(self, valobj): self.valobj = valobj self.head = int(valobj["head"]) self.tail = int(valobj["tail"]) - self.cap = int(valobj["buf"]["cap"]) - self.data_ptr = unwrap_unique_or_non_null(valobj["buf"]["ptr"]) + self.cap = int(valobj["buf"]["length"]) + self.data_ptr = valobj["buf"]["data_ptr"] if self.head >= self.tail: self.size = self.head - self.tail else: @@ -156,7 +170,7 @@ def to_string(self): return "VecDeque(size={})".format(self.size) def children(self): - return _enumerate_array_elements( + return _enumerate_mu_array_elements( (self.data_ptr + ((self.tail + index) % self.cap)) for index in xrange(self.size) ) From de18f2acadb5c6ee4620a65f01b1dafeb32e3296 Mon Sep 17 00:00:00 2001 From: Conrad Ludgate Date: Sun, 10 Apr 2022 23:48:33 +0100 Subject: [PATCH 13/14] fix allocation of ZSTs --- library/alloc/src/box_storage.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/library/alloc/src/box_storage.rs b/library/alloc/src/box_storage.rs index f9ee26be6aa0d..e0e3d549c6119 100644 --- a/library/alloc/src/box_storage.rs +++ b/library/alloc/src/box_storage.rs @@ -356,8 +356,12 @@ pub(crate) fn allocate_in( alloc: A, ) -> Box<[mem::MaybeUninit], A> { // Don't allocate here because `Drop` will not deallocate when `capacity` is 0. - if mem::size_of::() == 0 || capacity == 0 { + if capacity == 0 { Box::empty_in(alloc) + } else if mem::size_of::() == 0 { + unsafe { + storage_from_raw_parts_in(core::ptr::Unique::dangling().as_ptr(), capacity, alloc) + } } else { // We avoid `unwrap_or_else` here because it bloats the amount of // LLVM IR generated. From dacece5ed2e613f350d1d60fb24a3acdba61e35f Mon Sep 17 00:00:00 2001 From: Conrad Ludgate Date: Sat, 30 Apr 2022 08:01:47 +0100 Subject: [PATCH 14/14] fix mir tests from rebase --- .../mir-opt/inline/inline_into_box_place.main.Inline.32bit.diff | 2 +- .../mir-opt/inline/inline_into_box_place.main.Inline.64bit.diff | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/test/mir-opt/inline/inline_into_box_place.main.Inline.32bit.diff b/src/test/mir-opt/inline/inline_into_box_place.main.Inline.32bit.diff index 2b56075b5d476..9aeedde7ae84a 100644 --- a/src/test/mir-opt/inline/inline_into_box_place.main.Inline.32bit.diff +++ b/src/test/mir-opt/inline/inline_into_box_place.main.Inline.32bit.diff @@ -35,7 +35,7 @@ + StorageLive(_7); // scope 0 at $DIR/inline-into-box-place.rs:8:33: 8:43 + _7 = &mut (*_5); // scope 0 at $DIR/inline-into-box-place.rs:8:33: 8:43 + Deinit((*_7)); // scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL -+ ((*_7).0: std::boxed::Box<[std::mem::MaybeUninit]>) = const Box::<[MaybeUninit]>(Unique::<[MaybeUninit]> { pointer: ByRef { alloc: Allocation { bytes: [4, 0, 0, 0, 0, 0, 0, 0], relocations: Relocations(SortedMap { data: [] }), init_mask: InitMask { blocks: [255], len: Size { raw: 8 } }, align: Align { pow2: 2 }, mutability: Not, extra: () }, offset: Size { raw: 0 } }: *const [MaybeUninit::], _marker: PhantomData::<[MaybeUninit]> }, std::alloc::Global); // scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL ++ ((*_7).0: std::boxed::Box<[std::mem::MaybeUninit]>) = const Box::<[MaybeUninit]>(Unique::<[MaybeUninit]> { pointer: NonNull::<[MaybeUninit]> { pointer: ByRef { alloc: Allocation { bytes: [4, 0, 0, 0, 0, 0, 0, 0], relocations: Relocations(SortedMap { data: [] }), init_mask: InitMask { blocks: [255], len: Size { raw: 8 } }, align: Align { pow2: 2 }, mutability: Not, extra: () }, offset: Size { raw: 0 } }: *const [MaybeUninit::] }, _marker: PhantomData::<[MaybeUninit]> }, std::alloc::Global); // scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL // mir::Constant - // + span: $DIR/inline-into-box-place.rs:8:33: 8:41 - // + user_ty: UserType(1) diff --git a/src/test/mir-opt/inline/inline_into_box_place.main.Inline.64bit.diff b/src/test/mir-opt/inline/inline_into_box_place.main.Inline.64bit.diff index c83d6894ac7e7..007fe633b89e6 100644 --- a/src/test/mir-opt/inline/inline_into_box_place.main.Inline.64bit.diff +++ b/src/test/mir-opt/inline/inline_into_box_place.main.Inline.64bit.diff @@ -35,7 +35,7 @@ + StorageLive(_7); // scope 0 at $DIR/inline-into-box-place.rs:8:33: 8:43 + _7 = &mut (*_5); // scope 0 at $DIR/inline-into-box-place.rs:8:33: 8:43 + Deinit((*_7)); // scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL -+ ((*_7).0: std::boxed::Box<[std::mem::MaybeUninit]>) = const Box::<[MaybeUninit]>(Unique::<[MaybeUninit]> { pointer: ByRef { alloc: Allocation { bytes: [4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], relocations: Relocations(SortedMap { data: [] }), init_mask: InitMask { blocks: [65535], len: Size { raw: 16 } }, align: Align { pow2: 3 }, mutability: Not, extra: () }, offset: Size { raw: 0 } }: *const [MaybeUninit::], _marker: PhantomData::<[MaybeUninit]> }, std::alloc::Global); // scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL ++ ((*_7).0: std::boxed::Box<[std::mem::MaybeUninit]>) = const Box::<[MaybeUninit]>(Unique::<[MaybeUninit]> { pointer: NonNull::<[MaybeUninit]> { pointer: ByRef { alloc: Allocation { bytes: [4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], relocations: Relocations(SortedMap { data: [] }), init_mask: InitMask { blocks: [65535], len: Size { raw: 16 } }, align: Align { pow2: 3 }, mutability: Not, extra: () }, offset: Size { raw: 0 } }: *const [MaybeUninit::] }, _marker: PhantomData::<[MaybeUninit]> }, std::alloc::Global); // scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL // mir::Constant - // + span: $DIR/inline-into-box-place.rs:8:33: 8:41 - // + user_ty: UserType(1)