diff --git a/library/alloc/src/box_storage.rs b/library/alloc/src/box_storage.rs new file mode 100644 index 0000000000000..e0e3d549c6119 --- /dev/null +++ b/library/alloc/src/box_storage.rs @@ -0,0 +1,390 @@ +#![unstable(feature = "raw_vec_internals", reason = "unstable const warnings", issue = "none")] + +use core::alloc::LayoutError; +use core::cmp; +use core::intrinsics; +use core::mem; +use core::mem::MaybeUninit; +use core::ptr::NonNull; + +#[cfg(not(no_global_oom_handling))] +use crate::alloc::handle_alloc_error; +use crate::alloc::{Allocator, Layout}; +use crate::boxed::Box; +use crate::collections::TryReserveError; +use crate::collections::TryReserveErrorKind::*; + +#[cfg(test)] +mod tests; + +#[cfg(not(no_global_oom_handling))] +pub(crate) enum AllocInit { + /// The contents of the new memory are uninitialized. + Uninitialized, + /// The new memory is guaranteed to be zeroed. + Zeroed, +} + +pub(crate) trait BoxStorage: Sized { + // Tiny Vecs are dumb. Skip to: + // - 8 if the element size is 1, because any heap allocators is likely + // to round up a request of less than 8 bytes to at least 8 bytes. + // - 4 if elements are moderate-sized (<= 1 KiB). + // - 1 otherwise, to avoid wasting too much space for very short Vecs. + const MIN_NON_ZERO_CAP: usize; + + /// Gets the capacity of the allocation. + /// + /// This will always be `usize::MAX` if `T` is zero-sized. + fn capacity(&self) -> usize; + + /// Ensures that the buffer contains at least enough space to hold `len + + /// additional` elements. If it doesn't already have enough capacity, will + /// reallocate enough space plus comfortable slack space to get amortized + /// *O*(1) behavior. Will limit this behavior if it would needlessly cause + /// itself to panic. + /// + /// If `len` exceeds `self.capacity()`, this may fail to actually allocate + /// the requested space. This is not really unsafe, but the unsafe + /// code *you* write that relies on the behavior of this function may break. + /// + /// This is ideal for implementing a bulk-push operation like `extend`. + /// + /// # Panics + /// + /// Panics if the new capacity exceeds `isize::MAX` bytes. + /// + /// # Aborts + /// + /// Aborts on OOM. + #[cfg(not(no_global_oom_handling))] + #[inline] + fn reserve(&mut self, len: usize, additional: usize) { + // Callers expect this function to be very cheap when there is already sufficient capacity. + // Therefore, we move all the resizing and error-handling logic from grow_amortized and + // handle_reserve behind a call, while making sure that this function is likely to be + // inlined as just a comparison and a call if the comparison fails. + #[cold] + fn do_reserve_and_handle(slf: &mut T, len: usize, additional: usize) { + handle_reserve(slf.grow_amortized(len, additional)); + } + + if self.needs_to_grow(len, additional) { + do_reserve_and_handle(self, len, additional); + } + } + + /// Returns if the buffer needs to grow to fulfill the needed extra capacity. + /// Mainly used to make inlining reserve-calls possible without inlining `grow`. + fn needs_to_grow(&self, len: usize, additional: usize) -> bool { + additional > self.capacity().wrapping_sub(len) + } + + /// A specialized version of `reserve()` used only by the hot and + /// oft-instantiated `Vec::push()`, which does its own capacity check. + #[cfg(not(no_global_oom_handling))] + #[inline(never)] + fn reserve_for_push(&mut self, len: usize) { + handle_reserve(self.grow_amortized(len, 1)); + } + + /// Shrinks the buffer down to the specified capacity. If the given amount + /// is 0, actually completely deallocates. + /// + /// # Panics + /// + /// Panics if the given amount is *larger* than the current capacity. + /// + /// # Aborts + /// + /// Aborts on OOM. + #[cfg(not(no_global_oom_handling))] + fn shrink_to_fit(&mut self, cap: usize) { + handle_reserve(self.shrink(cap)); + } + + /// The same as `reserve`, but returns on errors instead of panicking or aborting. + fn try_reserve(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> { + if self.needs_to_grow(len, additional) { + self.grow_amortized(len, additional) + } else { + Ok(()) + } + } + + /// Ensures that the buffer contains at least enough space to hold `len + + /// additional` elements. If it doesn't already, will reallocate the + /// minimum possible amount of memory necessary. Generally this will be + /// exactly the amount of memory necessary, but in principle the allocator + /// is free to give back more than we asked for. + /// + /// If `len` exceeds `self.capacity()`, this may fail to actually allocate + /// the requested space. This is not really unsafe, but the unsafe code + /// *you* write that relies on the behavior of this function may break. + /// + /// # Panics + /// + /// Panics if the new capacity exceeds `isize::MAX` bytes. + /// + /// # Aborts + /// + /// Aborts on OOM. + #[cfg(not(no_global_oom_handling))] + fn reserve_exact(&mut self, len: usize, additional: usize) { + handle_reserve(self.try_reserve_exact(len, additional)); + } + + /// The same as `reserve_exact`, but returns on errors instead of panicking or aborting. + fn try_reserve_exact(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> { + if self.needs_to_grow(len, additional) { self.grow_exact(len, additional) } else { Ok(()) } + } + + fn grow_amortized(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError>; + fn grow_exact(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError>; + fn shrink(&mut self, cap: usize) -> Result<(), TryReserveError>; +} + +impl BoxStorage for Box<[mem::MaybeUninit], A> { + const MIN_NON_ZERO_CAP: usize = if mem::size_of::() == 1 { + 8 + } else if mem::size_of::() <= 1024 { + 4 + } else { + 1 + }; + + #[inline(always)] + fn capacity(&self) -> usize { + if mem::size_of::() == 0 { usize::MAX } else { self.len() } + } + + // This method is usually instantiated many times. So we want it to be as + // small as possible, to improve compile times. But we also want as much of + // its contents to be statically computable as possible, to make the + // generated code run faster. Therefore, this method is carefully written + // so that all of the code that depends on `T` is within it, while as much + // of the code that doesn't depend on `T` as possible is in functions that + // are non-generic over `T`. + fn grow_amortized(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> { + // This is ensured by the calling contexts. + debug_assert!(additional > 0); + + if mem::size_of::() == 0 { + // Since we return a capacity of `usize::MAX` when `elem_size` is + // 0, getting to here necessarily means the boxed-slice is overfull. + return Err(CapacityOverflow.into()); + } + + // Nothing we can really do about these checks, sadly. + let required_cap = len.checked_add(additional).ok_or(CapacityOverflow)?; + + // This guarantees exponential growth. The doubling cannot overflow + // because `cap <= isize::MAX` and the type of `cap` is `usize`. + let cap = self.len(); + let cap = cmp::max(cap * 2, required_cap); + let cap = cmp::max(Self::MIN_NON_ZERO_CAP, cap); + + replace(self, |current_memory, alloc| { + let new_layout = Layout::array::(cap); + // `finish_grow` is non-generic over `T`. + let ptr = finish_grow(new_layout, current_memory, alloc)?; + Ok(Some((ptr, cap))) + }) + } + + // The constraints on this method are much the same as those on + // `grow_amortized`, but this method is usually instantiated less often so + // it's less critical. + fn grow_exact(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> { + if mem::size_of::() == 0 { + // Since we return a capacity of `usize::MAX` when the type size is + // 0, getting to here necessarily means the boxed-slice is overfull. + return Err(CapacityOverflow.into()); + } + let cap = len.checked_add(additional).ok_or(CapacityOverflow)?; + + replace(self, |current_memory, alloc| { + let new_layout = Layout::array::(cap); + // `finish_grow` is non-generic over `T`. + let ptr = finish_grow(new_layout, current_memory, alloc)?; + Ok(Some((ptr, cap))) + }) + } + + fn shrink(&mut self, cap: usize) -> Result<(), TryReserveError> { + assert!(cap <= self.capacity(), "Tried to shrink to a larger capacity"); + replace(self, |current_memory, alloc| { + let (ptr, layout) = if let Some(mem) = current_memory { mem } else { return Ok(None) }; + + let ptr = unsafe { + // `Layout::array` cannot overflow here because it would have + // overflowed earlier when capacity was larger. + let new_layout = Layout::array::(cap).unwrap_unchecked(); + alloc + .shrink(ptr, layout, new_layout) + .map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })? + }; + Ok(Some((ptr, cap))) + }) + } +} + +pub(crate) unsafe fn storage_from_raw_parts_in( + ptr: *mut T, + len: usize, + alloc: A, +) -> Box<[MaybeUninit], A> { + unsafe { + let raw = core::slice::from_raw_parts_mut(ptr.cast(), len); + Box::from_raw_in(raw, alloc) + } +} + +fn replace( + dst: &mut Box<[mem::MaybeUninit], A>, + f: impl FnOnce( + Option<(NonNull, Layout)>, + &A, + ) -> Result, usize)>, TryReserveError>, +) -> Result<(), TryReserveError> { + unsafe { + let current_memory = slice_layout(&mut *dst); + let alloc = Box::allocator(dst); + match f(current_memory, &alloc) { + Ok(None) => Ok(()), + Ok(Some((ptr, len))) => { + // hack because we don't have access to box here :() + let raw = + core::slice::from_raw_parts_mut(ptr.as_ptr().cast::>(), len); + let this = core::ptr::read(dst); + let (_, alloc) = Box::into_raw_with_allocator(this); + let this = Box::from_raw_in(raw, alloc); + core::ptr::write(dst, this); + Ok(()) + } + Err(err) => Err(err), + } + } +} + +fn slice_layout(slice: &mut [MaybeUninit]) -> Option<(NonNull, Layout)> { + if mem::size_of::() == 0 || slice.len() == 0 { + None + } else { + // We have an allocated chunk of memory, so we can bypass runtime + // checks to get our current layout. + unsafe { + let layout = Layout::array::(slice.len()).unwrap_unchecked(); + Some((NonNull::new_unchecked(slice.as_mut_ptr().cast()), layout)) + } + } +} + +// This function is outside `RawVec` to minimize compile times. See the comment +// above `RawVec::grow_amortized` for details. (The `A` parameter isn't +// significant, because the number of different `A` types seen in practice is +// much smaller than the number of `T` types.) +#[inline(never)] +fn finish_grow( + new_layout: Result, + current_memory: Option<(NonNull, Layout)>, + alloc: &A, +) -> Result, TryReserveError> +where + A: Allocator, +{ + // Check for the error here to minimize the size of `RawVec::grow_*`. + let new_layout = new_layout.map_err(|_| CapacityOverflow)?; + + alloc_guard(new_layout.size())?; + + let memory = if let Some((ptr, old_layout)) = current_memory { + debug_assert_eq!(old_layout.align(), new_layout.align()); + unsafe { + // The allocator checks for alignment equality + intrinsics::assume(old_layout.align() == new_layout.align()); + alloc.grow(ptr, old_layout, new_layout) + } + } else { + alloc.allocate(new_layout) + }; + + memory.map_err(|_| AllocError { layout: new_layout, non_exhaustive: () }.into()) +} + +// Central function for reserve error handling. +#[cfg(not(no_global_oom_handling))] +#[inline] +fn handle_reserve(result: Result<(), TryReserveError>) { + match result.map_err(|e| e.kind()) { + Err(CapacityOverflow) => capacity_overflow(), + Err(AllocError { layout, .. }) => handle_alloc_error(layout), + Ok(()) => { /* yay */ } + } +} + +// We need to guarantee the following: +// * We don't ever allocate `> isize::MAX` byte-size objects. +// * We don't overflow `usize::MAX` and actually allocate too little. +// +// On 64-bit we just need to check for overflow since trying to allocate +// `> isize::MAX` bytes will surely fail. On 32-bit and 16-bit we need to add +// an extra guard for this in case we're running on a platform which can use +// all 4GB in user-space, e.g., PAE or x32. + +#[inline] +pub(crate) fn alloc_guard(alloc_size: usize) -> Result<(), TryReserveError> { + if usize::BITS < 64 && alloc_size > isize::MAX as usize { + Err(CapacityOverflow.into()) + } else { + Ok(()) + } +} + +// One central function responsible for reporting capacity overflows. This'll +// ensure that the code generation related to these panics is minimal as there's +// only one location which panics rather than a bunch throughout the module. +#[cfg(not(no_global_oom_handling))] +pub(crate) fn capacity_overflow() -> ! { + panic!("capacity overflow"); +} + +#[cfg(not(no_global_oom_handling))] +pub(crate) fn allocate_in( + capacity: usize, + init: AllocInit, + alloc: A, +) -> Box<[mem::MaybeUninit], A> { + // Don't allocate here because `Drop` will not deallocate when `capacity` is 0. + if capacity == 0 { + Box::empty_in(alloc) + } else if mem::size_of::() == 0 { + unsafe { + storage_from_raw_parts_in(core::ptr::Unique::dangling().as_ptr(), capacity, alloc) + } + } else { + // We avoid `unwrap_or_else` here because it bloats the amount of + // LLVM IR generated. + let layout = match Layout::array::(capacity) { + Ok(layout) => layout, + Err(_) => capacity_overflow(), + }; + match alloc_guard(layout.size()) { + Ok(_) => {} + Err(_) => capacity_overflow(), + } + let result = match init { + AllocInit::Uninitialized => alloc.allocate(layout), + AllocInit::Zeroed => alloc.allocate_zeroed(layout), + }; + let ptr = match result { + Ok(ptr) => ptr, + Err(_) => handle_alloc_error(layout), + }; + + // Allocators currently return a `NonNull<[u8]>` whose length + // matches the size requested. If that ever changes, the capacity + // here should change to `ptr.len() / mem::size_of::()`. + unsafe { storage_from_raw_parts_in(ptr.as_ptr().cast(), capacity, alloc) } + } +} diff --git a/library/alloc/src/raw_vec/tests.rs b/library/alloc/src/box_storage/tests.rs similarity index 74% rename from library/alloc/src/raw_vec/tests.rs rename to library/alloc/src/box_storage/tests.rs index ff322f0da97c6..d9f4e12bfea39 100644 --- a/library/alloc/src/raw_vec/tests.rs +++ b/library/alloc/src/box_storage/tests.rs @@ -1,4 +1,6 @@ use super::*; +use crate::alloc::Global; +use core::mem::MaybeUninit; use std::cell::Cell; #[test] @@ -40,23 +42,23 @@ fn allocator_param() { } let a = BoundedAlloc { fuel: Cell::new(500) }; - let mut v: RawVec = RawVec::with_capacity_in(50, a); - assert_eq!(v.alloc.fuel.get(), 450); + let mut v: Box<[MaybeUninit], _> = Box::new_uninit_slice_in(50, a); + assert_eq!(Box::allocator(&v).fuel.get(), 450); v.reserve(50, 150); // (causes a realloc, thus using 50 + 150 = 200 units of fuel) - assert_eq!(v.alloc.fuel.get(), 250); + assert_eq!(Box::allocator(&v).fuel.get(), 250); } #[test] fn reserve_does_not_overallocate() { { - let mut v: RawVec = RawVec::new(); + let mut v: Box<[MaybeUninit]> = Box::empty(); // First, `reserve` allocates like `reserve_exact`. v.reserve(0, 9); assert_eq!(9, v.capacity()); } { - let mut v: RawVec = RawVec::new(); + let mut v: Box<[MaybeUninit]> = Box::empty(); v.reserve(0, 7); assert_eq!(7, v.capacity()); // 97 is more than double of 7, so `reserve` should work @@ -66,7 +68,7 @@ fn reserve_does_not_overallocate() { } { - let mut v: RawVec = RawVec::new(); + let mut v: Box<[MaybeUninit]> = Box::empty(); v.reserve(0, 12); assert_eq!(12, v.capacity()); v.reserve(12, 3); @@ -81,10 +83,9 @@ fn reserve_does_not_overallocate() { struct ZST; // A `RawVec` holding zero-sized elements should always look like this. -fn zst_sanity(v: &RawVec) { +fn zst_sanity(v: &Box<[MaybeUninit]>) { assert_eq!(v.capacity(), usize::MAX); - assert_eq!(v.ptr(), core::ptr::Unique::::dangling().as_ptr()); - assert_eq!(v.current_memory(), None); + assert_eq!(v.as_ptr().cast::(), core::ptr::Unique::::dangling().as_ptr() as *const T); } #[test] @@ -95,22 +96,31 @@ fn zst() { // All these different ways of creating the RawVec produce the same thing. - let v: RawVec = RawVec::new(); + let v: Box<[MaybeUninit]> = Box::empty(); zst_sanity(&v); - let v: RawVec = RawVec::with_capacity_in(100, Global); + let v: Box<[MaybeUninit]> = Box::new_uninit_slice_in(100, Global); zst_sanity(&v); - let v: RawVec = RawVec::with_capacity_in(100, Global); + let v: Box<[MaybeUninit]> = Box::new_uninit_slice_in(100, Global); zst_sanity(&v); - let v: RawVec = RawVec::allocate_in(0, AllocInit::Uninitialized, Global); + let v: Box<[MaybeUninit]> = allocate_in(0, AllocInit::Zeroed, Global); zst_sanity(&v); - let v: RawVec = RawVec::allocate_in(100, AllocInit::Uninitialized, Global); + let v: Box<[MaybeUninit]> = allocate_in(100, AllocInit::Zeroed, Global); zst_sanity(&v); - let mut v: RawVec = RawVec::allocate_in(usize::MAX, AllocInit::Uninitialized, Global); + let v: Box<[MaybeUninit]> = allocate_in(usize::MAX, AllocInit::Zeroed, Global); + zst_sanity(&v); + + let v: Box<[MaybeUninit]> = allocate_in(0, AllocInit::Uninitialized, Global); + zst_sanity(&v); + + let v: Box<[MaybeUninit]> = allocate_in(100, AllocInit::Uninitialized, Global); + zst_sanity(&v); + + let mut v: Box<[MaybeUninit]> = allocate_in(usize::MAX, AllocInit::Uninitialized, Global); zst_sanity(&v); // Check all these operations work as expected with zero-sized elements. @@ -147,7 +157,7 @@ fn zst() { #[test] #[should_panic(expected = "capacity overflow")] fn zst_reserve_panic() { - let mut v: RawVec = RawVec::new(); + let mut v: Box<[MaybeUninit]> = Box::empty(); zst_sanity(&v); v.reserve(101, usize::MAX - 100); @@ -156,7 +166,7 @@ fn zst_reserve_panic() { #[test] #[should_panic(expected = "capacity overflow")] fn zst_reserve_exact_panic() { - let mut v: RawVec = RawVec::new(); + let mut v: Box<[MaybeUninit]> = Box::empty(); zst_sanity(&v); v.reserve_exact(101, usize::MAX - 100); diff --git a/library/alloc/src/boxed.rs b/library/alloc/src/boxed.rs index 639e7f213eaae..a2a6b5f26e121 100644 --- a/library/alloc/src/boxed.rs +++ b/library/alloc/src/boxed.rs @@ -157,7 +157,7 @@ use crate::alloc::{handle_alloc_error, WriteCloneIntoRaw}; use crate::alloc::{AllocError, Allocator, Global, Layout}; #[cfg(not(no_global_oom_handling))] use crate::borrow::Cow; -use crate::raw_vec::RawVec; +use crate::box_storage::storage_from_raw_parts_in; #[cfg(not(no_global_oom_handling))] use crate::str::from_boxed_utf8_unchecked; #[cfg(not(no_global_oom_handling))] @@ -610,7 +610,7 @@ impl Box<[T]> { #[unstable(feature = "new_uninit", issue = "63291")] #[must_use] pub fn new_uninit_slice(len: usize) -> Box<[mem::MaybeUninit]> { - unsafe { RawVec::with_capacity(len).into_box(len) } + Self::new_uninit_slice_in(len, Global) } /// Constructs a new boxed slice with uninitialized contents, with the memory @@ -635,7 +635,7 @@ impl Box<[T]> { #[unstable(feature = "new_uninit", issue = "63291")] #[must_use] pub fn new_zeroed_slice(len: usize) -> Box<[mem::MaybeUninit]> { - unsafe { RawVec::with_capacity_zeroed(len).into_box(len) } + Self::new_zeroed_slice_in(len, Global) } /// Constructs a new boxed slice with uninitialized contents. Returns an error if @@ -667,7 +667,7 @@ impl Box<[T]> { Err(_) => return Err(AllocError), }; let ptr = Global.allocate(layout)?; - Ok(RawVec::from_raw_parts_in(ptr.as_mut_ptr() as *mut _, len, Global).into_box(len)) + Ok(storage_from_raw_parts_in(ptr.as_mut_ptr() as *mut _, len, Global)) } } @@ -699,12 +699,37 @@ impl Box<[T]> { Err(_) => return Err(AllocError), }; let ptr = Global.allocate_zeroed(layout)?; - Ok(RawVec::from_raw_parts_in(ptr.as_mut_ptr() as *mut _, len, Global).into_box(len)) + Ok(storage_from_raw_parts_in(ptr.as_mut_ptr() as *mut _, len, Global)) } } + + /// HACK(conradludgate): This exists because stable `const fn` can only call stable `const fn`, so + /// they cannot call `Self::empty()`. + /// + /// If you change `Box<[T]>::empty` or dependencies, please take care to not introduce anything + /// that would truly const-call something unstable. + #[unstable(feature = "allocator_api", issue = "32838")] + pub const EMPTY: Self = Self::empty(); + + /// Constructs a new empty boxed slice + #[unstable(feature = "allocator_api", issue = "32838")] + #[inline] + pub const fn empty() -> Self { + Self::empty_in(Global) + } } impl Box<[T], A> { + /// Constructs a new empty boxed slice + #[unstable(feature = "allocator_api", issue = "32838")] + #[inline] + pub const fn empty_in(alloc: A) -> Self { + unsafe { + let slice = core::slice::from_raw_parts_mut(mem::align_of::() as *mut T, 0); + Self::from_raw_in(slice, alloc) + } + } + /// Constructs a new boxed slice with uninitialized contents in the provided allocator. /// /// # Examples @@ -732,7 +757,7 @@ impl Box<[T], A> { // #[unstable(feature = "new_uninit", issue = "63291")] #[must_use] pub fn new_uninit_slice_in(len: usize, alloc: A) -> Box<[mem::MaybeUninit], A> { - unsafe { RawVec::with_capacity_in(len, alloc).into_box(len) } + crate::box_storage::allocate_in(len, crate::box_storage::AllocInit::Uninitialized, alloc) } /// Constructs a new boxed slice with uninitialized contents in the provided allocator, @@ -760,7 +785,7 @@ impl Box<[T], A> { // #[unstable(feature = "new_uninit", issue = "63291")] #[must_use] pub fn new_zeroed_slice_in(len: usize, alloc: A) -> Box<[mem::MaybeUninit], A> { - unsafe { RawVec::with_capacity_zeroed_in(len, alloc).into_box(len) } + crate::box_storage::allocate_in(len, crate::box_storage::AllocInit::Zeroed, alloc) } } @@ -1424,10 +1449,10 @@ impl From<&[T]> for Box<[T]> { /// ``` fn from(slice: &[T]) -> Box<[T]> { let len = slice.len(); - let buf = RawVec::with_capacity(len); + let buf = Box::new_uninit_slice(len); unsafe { - ptr::copy_nonoverlapping(slice.as_ptr(), buf.ptr(), len); - buf.into_box(slice.len()).assume_init() + ptr::copy_nonoverlapping(slice.as_ptr(), buf.0.as_ptr().cast(), len); + buf.assume_init() } } } diff --git a/library/alloc/src/collections/vec_deque/mod.rs b/library/alloc/src/collections/vec_deque/mod.rs index 5f1a6848ae62a..551171907153d 100644 --- a/library/alloc/src/collections/vec_deque/mod.rs +++ b/library/alloc/src/collections/vec_deque/mod.rs @@ -18,9 +18,10 @@ use core::ptr::{self, NonNull}; use core::slice; use crate::alloc::{Allocator, Global}; +use crate::box_storage::{storage_from_raw_parts_in, BoxStorage}; +use crate::boxed::Box; use crate::collections::TryReserveError; use crate::collections::TryReserveErrorKind; -use crate::raw_vec::RawVec; use crate::vec::Vec; #[macro_use] @@ -106,7 +107,8 @@ pub struct VecDeque< // is defined as the distance between the two. tail: usize, head: usize, - buf: RawVec, + buf: Box<[MaybeUninit], A>, + phantom: PhantomData, } #[stable(feature = "rust1", since = "1.0.0")] @@ -171,7 +173,7 @@ impl VecDeque { /// Marginally more convenient #[inline] fn ptr(&self) -> *mut T { - self.buf.ptr() + self.buf.as_ptr() as *const T as *mut T } /// Marginally more convenient @@ -181,7 +183,7 @@ impl VecDeque { // For zero sized types, we are always at maximum capacity MAXIMUM_ZST_CAPACITY } else { - self.buf.capacity() + self.buf.len() } } @@ -563,7 +565,12 @@ impl VecDeque { // +1 since the ringbuffer always leaves one space empty let cap = cmp::max(capacity + 1, MINIMUM_CAPACITY + 1).next_power_of_two(); - VecDeque { tail: 0, head: 0, buf: RawVec::with_capacity_in(cap, alloc) } + VecDeque { + tail: 0, + head: 0, + buf: Box::new_uninit_slice_in(cap, alloc), + phantom: PhantomData, + } } /// Provides a reference to the element at the given index. @@ -993,7 +1000,7 @@ impl VecDeque { #[unstable(feature = "allocator_api", issue = "32838")] #[inline] pub fn allocator(&self) -> &A { - self.buf.allocator() + Box::allocator(&self.buf) } /// Returns a front-to-back iterator. @@ -2365,7 +2372,7 @@ impl VecDeque { }; } - let buf = self.buf.ptr(); + let buf = self.ptr(); let cap = self.cap(); let len = self.len(); @@ -3041,8 +3048,8 @@ impl From> for VecDeque { unsafe { let (other_buf, len, capacity, alloc) = other.into_raw_parts_with_alloc(); - let buf = RawVec::from_raw_parts_in(other_buf, capacity, alloc); - VecDeque { tail: 0, head: len, buf } + let buf = storage_from_raw_parts_in(other_buf.cast(), capacity, alloc); + VecDeque { tail: 0, head: len, buf, phantom: PhantomData } } } } @@ -3083,7 +3090,7 @@ impl From> for Vec { unsafe { let other = ManuallyDrop::new(other); - let buf = other.buf.ptr(); + let buf = other.ptr(); let len = other.len(); let cap = other.cap(); let alloc = ptr::read(other.allocator()); diff --git a/library/alloc/src/lib.rs b/library/alloc/src/lib.rs index 4d2dc4ecee0b8..e8d00af42e750 100644 --- a/library/alloc/src/lib.rs +++ b/library/alloc/src/lib.rs @@ -107,6 +107,7 @@ #![feature(const_maybe_uninit_as_mut_ptr)] #![feature(const_refs_to_cell)] #![feature(core_c_str)] +#![feature(const_slice_from_raw_parts)] #![feature(core_intrinsics)] #![feature(core_ffi_c)] #![feature(const_eval_select)] @@ -193,8 +194,6 @@ extern crate test; #[macro_use] mod macros; -mod raw_vec; - // Heaps provided for low-level allocation strategies pub mod alloc; @@ -211,6 +210,7 @@ mod boxed { pub use std::boxed::Box; } pub mod borrow; +mod box_storage; pub mod collections; #[cfg(not(no_global_oom_handling))] pub mod ffi; diff --git a/library/alloc/src/raw_vec.rs b/library/alloc/src/raw_vec.rs deleted file mode 100644 index 9dbac3c36ffb2..0000000000000 --- a/library/alloc/src/raw_vec.rs +++ /dev/null @@ -1,519 +0,0 @@ -#![unstable(feature = "raw_vec_internals", reason = "unstable const warnings", issue = "none")] - -use core::alloc::LayoutError; -use core::cmp; -use core::intrinsics; -use core::mem::{self, ManuallyDrop, MaybeUninit}; -use core::ops::Drop; -use core::ptr::{self, NonNull, Unique}; -use core::slice; - -#[cfg(not(no_global_oom_handling))] -use crate::alloc::handle_alloc_error; -use crate::alloc::{Allocator, Global, Layout}; -use crate::boxed::Box; -use crate::collections::TryReserveError; -use crate::collections::TryReserveErrorKind::*; - -#[cfg(test)] -mod tests; - -#[cfg(not(no_global_oom_handling))] -enum AllocInit { - /// The contents of the new memory are uninitialized. - Uninitialized, - /// The new memory is guaranteed to be zeroed. - Zeroed, -} - -/// A low-level utility for more ergonomically allocating, reallocating, and deallocating -/// a buffer of memory on the heap without having to worry about all the corner cases -/// involved. This type is excellent for building your own data structures like Vec and VecDeque. -/// In particular: -/// -/// * Produces `Unique::dangling()` on zero-sized types. -/// * Produces `Unique::dangling()` on zero-length allocations. -/// * Avoids freeing `Unique::dangling()`. -/// * Catches all overflows in capacity computations (promotes them to "capacity overflow" panics). -/// * Guards against 32-bit systems allocating more than isize::MAX bytes. -/// * Guards against overflowing your length. -/// * Calls `handle_alloc_error` for fallible allocations. -/// * Contains a `ptr::Unique` and thus endows the user with all related benefits. -/// * Uses the excess returned from the allocator to use the largest available capacity. -/// -/// This type does not in anyway inspect the memory that it manages. When dropped it *will* -/// free its memory, but it *won't* try to drop its contents. It is up to the user of `RawVec` -/// to handle the actual things *stored* inside of a `RawVec`. -/// -/// Note that the excess of a zero-sized types is always infinite, so `capacity()` always returns -/// `usize::MAX`. This means that you need to be careful when round-tripping this type with a -/// `Box<[T]>`, since `capacity()` won't yield the length. -#[allow(missing_debug_implementations)] -pub(crate) struct RawVec { - ptr: Unique, - cap: usize, - alloc: A, -} - -impl RawVec { - /// HACK(Centril): This exists because stable `const fn` can only call stable `const fn`, so - /// they cannot call `Self::new()`. - /// - /// If you change `RawVec::new` or dependencies, please take care to not introduce anything - /// that would truly const-call something unstable. - pub const NEW: Self = Self::new(); - - /// Creates the biggest possible `RawVec` (on the system heap) - /// without allocating. If `T` has positive size, then this makes a - /// `RawVec` with capacity `0`. If `T` is zero-sized, then it makes a - /// `RawVec` with capacity `usize::MAX`. Useful for implementing - /// delayed allocation. - #[must_use] - pub const fn new() -> Self { - Self::new_in(Global) - } - - /// Creates a `RawVec` (on the system heap) with exactly the - /// capacity and alignment requirements for a `[T; capacity]`. This is - /// equivalent to calling `RawVec::new` when `capacity` is `0` or `T` is - /// zero-sized. Note that if `T` is zero-sized this means you will - /// *not* get a `RawVec` with the requested capacity. - /// - /// # Panics - /// - /// Panics if the requested capacity exceeds `isize::MAX` bytes. - /// - /// # Aborts - /// - /// Aborts on OOM. - #[cfg(not(any(no_global_oom_handling, test)))] - #[must_use] - #[inline] - pub fn with_capacity(capacity: usize) -> Self { - Self::with_capacity_in(capacity, Global) - } - - /// Like `with_capacity`, but guarantees the buffer is zeroed. - #[cfg(not(any(no_global_oom_handling, test)))] - #[must_use] - #[inline] - pub fn with_capacity_zeroed(capacity: usize) -> Self { - Self::with_capacity_zeroed_in(capacity, Global) - } -} - -impl RawVec { - // Tiny Vecs are dumb. Skip to: - // - 8 if the element size is 1, because any heap allocators is likely - // to round up a request of less than 8 bytes to at least 8 bytes. - // - 4 if elements are moderate-sized (<= 1 KiB). - // - 1 otherwise, to avoid wasting too much space for very short Vecs. - pub(crate) const MIN_NON_ZERO_CAP: usize = if mem::size_of::() == 1 { - 8 - } else if mem::size_of::() <= 1024 { - 4 - } else { - 1 - }; - - /// Like `new`, but parameterized over the choice of allocator for - /// the returned `RawVec`. - #[rustc_allow_const_fn_unstable(const_fn)] - pub const fn new_in(alloc: A) -> Self { - // `cap: 0` means "unallocated". zero-sized types are ignored. - Self { ptr: Unique::dangling(), cap: 0, alloc } - } - - /// Like `with_capacity`, but parameterized over the choice of - /// allocator for the returned `RawVec`. - #[cfg(not(no_global_oom_handling))] - #[inline] - pub fn with_capacity_in(capacity: usize, alloc: A) -> Self { - Self::allocate_in(capacity, AllocInit::Uninitialized, alloc) - } - - /// Like `with_capacity_zeroed`, but parameterized over the choice - /// of allocator for the returned `RawVec`. - #[cfg(not(no_global_oom_handling))] - #[inline] - pub fn with_capacity_zeroed_in(capacity: usize, alloc: A) -> Self { - Self::allocate_in(capacity, AllocInit::Zeroed, alloc) - } - - /// Converts the entire buffer into `Box<[MaybeUninit]>` with the specified `len`. - /// - /// Note that this will correctly reconstitute any `cap` changes - /// that may have been performed. (See description of type for details.) - /// - /// # Safety - /// - /// * `len` must be greater than or equal to the most recently requested capacity, and - /// * `len` must be less than or equal to `self.capacity()`. - /// - /// Note, that the requested capacity and `self.capacity()` could differ, as - /// an allocator could overallocate and return a greater memory block than requested. - pub unsafe fn into_box(self, len: usize) -> Box<[MaybeUninit], A> { - // Sanity-check one half of the safety requirement (we cannot check the other half). - debug_assert!( - len <= self.capacity(), - "`len` must be smaller than or equal to `self.capacity()`" - ); - - let me = ManuallyDrop::new(self); - unsafe { - let slice = slice::from_raw_parts_mut(me.ptr() as *mut MaybeUninit, len); - Box::from_raw_in(slice, ptr::read(&me.alloc)) - } - } - - #[cfg(not(no_global_oom_handling))] - fn allocate_in(capacity: usize, init: AllocInit, alloc: A) -> Self { - // Don't allocate here because `Drop` will not deallocate when `capacity` is 0. - if mem::size_of::() == 0 || capacity == 0 { - Self::new_in(alloc) - } else { - // We avoid `unwrap_or_else` here because it bloats the amount of - // LLVM IR generated. - let layout = match Layout::array::(capacity) { - Ok(layout) => layout, - Err(_) => capacity_overflow(), - }; - match alloc_guard(layout.size()) { - Ok(_) => {} - Err(_) => capacity_overflow(), - } - let result = match init { - AllocInit::Uninitialized => alloc.allocate(layout), - AllocInit::Zeroed => alloc.allocate_zeroed(layout), - }; - let ptr = match result { - Ok(ptr) => ptr, - Err(_) => handle_alloc_error(layout), - }; - - // Allocators currently return a `NonNull<[u8]>` whose length - // matches the size requested. If that ever changes, the capacity - // here should change to `ptr.len() / mem::size_of::()`. - Self { - ptr: unsafe { Unique::new_unchecked(ptr.cast().as_ptr()) }, - cap: capacity, - alloc, - } - } - } - - /// Reconstitutes a `RawVec` from a pointer, capacity, and allocator. - /// - /// # Safety - /// - /// The `ptr` must be allocated (via the given allocator `alloc`), and with the given - /// `capacity`. - /// The `capacity` cannot exceed `isize::MAX` for sized types. (only a concern on 32-bit - /// systems). ZST vectors may have a capacity up to `usize::MAX`. - /// If the `ptr` and `capacity` come from a `RawVec` created via `alloc`, then this is - /// guaranteed. - #[inline] - pub unsafe fn from_raw_parts_in(ptr: *mut T, capacity: usize, alloc: A) -> Self { - Self { ptr: unsafe { Unique::new_unchecked(ptr) }, cap: capacity, alloc } - } - - /// Gets a raw pointer to the start of the allocation. Note that this is - /// `Unique::dangling()` if `capacity == 0` or `T` is zero-sized. In the former case, you must - /// be careful. - #[inline] - pub fn ptr(&self) -> *mut T { - self.ptr.as_ptr() - } - - /// Gets the capacity of the allocation. - /// - /// This will always be `usize::MAX` if `T` is zero-sized. - #[inline(always)] - pub fn capacity(&self) -> usize { - if mem::size_of::() == 0 { usize::MAX } else { self.cap } - } - - /// Returns a shared reference to the allocator backing this `RawVec`. - pub fn allocator(&self) -> &A { - &self.alloc - } - - fn current_memory(&self) -> Option<(NonNull, Layout)> { - if mem::size_of::() == 0 || self.cap == 0 { - None - } else { - // We have an allocated chunk of memory, so we can bypass runtime - // checks to get our current layout. - unsafe { - let layout = Layout::array::(self.cap).unwrap_unchecked(); - Some((self.ptr.cast().into(), layout)) - } - } - } - - /// Ensures that the buffer contains at least enough space to hold `len + - /// additional` elements. If it doesn't already have enough capacity, will - /// reallocate enough space plus comfortable slack space to get amortized - /// *O*(1) behavior. Will limit this behavior if it would needlessly cause - /// itself to panic. - /// - /// If `len` exceeds `self.capacity()`, this may fail to actually allocate - /// the requested space. This is not really unsafe, but the unsafe - /// code *you* write that relies on the behavior of this function may break. - /// - /// This is ideal for implementing a bulk-push operation like `extend`. - /// - /// # Panics - /// - /// Panics if the new capacity exceeds `isize::MAX` bytes. - /// - /// # Aborts - /// - /// Aborts on OOM. - #[cfg(not(no_global_oom_handling))] - #[inline] - pub fn reserve(&mut self, len: usize, additional: usize) { - // Callers expect this function to be very cheap when there is already sufficient capacity. - // Therefore, we move all the resizing and error-handling logic from grow_amortized and - // handle_reserve behind a call, while making sure that this function is likely to be - // inlined as just a comparison and a call if the comparison fails. - #[cold] - fn do_reserve_and_handle( - slf: &mut RawVec, - len: usize, - additional: usize, - ) { - handle_reserve(slf.grow_amortized(len, additional)); - } - - if self.needs_to_grow(len, additional) { - do_reserve_and_handle(self, len, additional); - } - } - - /// A specialized version of `reserve()` used only by the hot and - /// oft-instantiated `Vec::push()`, which does its own capacity check. - #[cfg(not(no_global_oom_handling))] - #[inline(never)] - pub fn reserve_for_push(&mut self, len: usize) { - handle_reserve(self.grow_amortized(len, 1)); - } - - /// The same as `reserve`, but returns on errors instead of panicking or aborting. - pub fn try_reserve(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> { - if self.needs_to_grow(len, additional) { - self.grow_amortized(len, additional) - } else { - Ok(()) - } - } - - /// Ensures that the buffer contains at least enough space to hold `len + - /// additional` elements. If it doesn't already, will reallocate the - /// minimum possible amount of memory necessary. Generally this will be - /// exactly the amount of memory necessary, but in principle the allocator - /// is free to give back more than we asked for. - /// - /// If `len` exceeds `self.capacity()`, this may fail to actually allocate - /// the requested space. This is not really unsafe, but the unsafe code - /// *you* write that relies on the behavior of this function may break. - /// - /// # Panics - /// - /// Panics if the new capacity exceeds `isize::MAX` bytes. - /// - /// # Aborts - /// - /// Aborts on OOM. - #[cfg(not(no_global_oom_handling))] - pub fn reserve_exact(&mut self, len: usize, additional: usize) { - handle_reserve(self.try_reserve_exact(len, additional)); - } - - /// The same as `reserve_exact`, but returns on errors instead of panicking or aborting. - pub fn try_reserve_exact( - &mut self, - len: usize, - additional: usize, - ) -> Result<(), TryReserveError> { - if self.needs_to_grow(len, additional) { self.grow_exact(len, additional) } else { Ok(()) } - } - - /// Shrinks the buffer down to the specified capacity. If the given amount - /// is 0, actually completely deallocates. - /// - /// # Panics - /// - /// Panics if the given amount is *larger* than the current capacity. - /// - /// # Aborts - /// - /// Aborts on OOM. - #[cfg(not(no_global_oom_handling))] - pub fn shrink_to_fit(&mut self, cap: usize) { - handle_reserve(self.shrink(cap)); - } -} - -impl RawVec { - /// Returns if the buffer needs to grow to fulfill the needed extra capacity. - /// Mainly used to make inlining reserve-calls possible without inlining `grow`. - fn needs_to_grow(&self, len: usize, additional: usize) -> bool { - additional > self.capacity().wrapping_sub(len) - } - - fn set_ptr_and_cap(&mut self, ptr: NonNull<[u8]>, cap: usize) { - // Allocators currently return a `NonNull<[u8]>` whose length matches - // the size requested. If that ever changes, the capacity here should - // change to `ptr.len() / mem::size_of::()`. - self.ptr = unsafe { Unique::new_unchecked(ptr.cast().as_ptr()) }; - self.cap = cap; - } - - // This method is usually instantiated many times. So we want it to be as - // small as possible, to improve compile times. But we also want as much of - // its contents to be statically computable as possible, to make the - // generated code run faster. Therefore, this method is carefully written - // so that all of the code that depends on `T` is within it, while as much - // of the code that doesn't depend on `T` as possible is in functions that - // are non-generic over `T`. - fn grow_amortized(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> { - // This is ensured by the calling contexts. - debug_assert!(additional > 0); - - if mem::size_of::() == 0 { - // Since we return a capacity of `usize::MAX` when `elem_size` is - // 0, getting to here necessarily means the `RawVec` is overfull. - return Err(CapacityOverflow.into()); - } - - // Nothing we can really do about these checks, sadly. - let required_cap = len.checked_add(additional).ok_or(CapacityOverflow)?; - - // This guarantees exponential growth. The doubling cannot overflow - // because `cap <= isize::MAX` and the type of `cap` is `usize`. - let cap = cmp::max(self.cap * 2, required_cap); - let cap = cmp::max(Self::MIN_NON_ZERO_CAP, cap); - - let new_layout = Layout::array::(cap); - - // `finish_grow` is non-generic over `T`. - let ptr = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?; - self.set_ptr_and_cap(ptr, cap); - Ok(()) - } - - // The constraints on this method are much the same as those on - // `grow_amortized`, but this method is usually instantiated less often so - // it's less critical. - fn grow_exact(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> { - if mem::size_of::() == 0 { - // Since we return a capacity of `usize::MAX` when the type size is - // 0, getting to here necessarily means the `RawVec` is overfull. - return Err(CapacityOverflow.into()); - } - - let cap = len.checked_add(additional).ok_or(CapacityOverflow)?; - let new_layout = Layout::array::(cap); - - // `finish_grow` is non-generic over `T`. - let ptr = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?; - self.set_ptr_and_cap(ptr, cap); - Ok(()) - } - - fn shrink(&mut self, cap: usize) -> Result<(), TryReserveError> { - assert!(cap <= self.capacity(), "Tried to shrink to a larger capacity"); - - let (ptr, layout) = if let Some(mem) = self.current_memory() { mem } else { return Ok(()) }; - - let ptr = unsafe { - // `Layout::array` cannot overflow here because it would have - // overflowed earlier when capacity was larger. - let new_layout = Layout::array::(cap).unwrap_unchecked(); - self.alloc - .shrink(ptr, layout, new_layout) - .map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })? - }; - self.set_ptr_and_cap(ptr, cap); - Ok(()) - } -} - -// This function is outside `RawVec` to minimize compile times. See the comment -// above `RawVec::grow_amortized` for details. (The `A` parameter isn't -// significant, because the number of different `A` types seen in practice is -// much smaller than the number of `T` types.) -#[inline(never)] -fn finish_grow( - new_layout: Result, - current_memory: Option<(NonNull, Layout)>, - alloc: &mut A, -) -> Result, TryReserveError> -where - A: Allocator, -{ - // Check for the error here to minimize the size of `RawVec::grow_*`. - let new_layout = new_layout.map_err(|_| CapacityOverflow)?; - - alloc_guard(new_layout.size())?; - - let memory = if let Some((ptr, old_layout)) = current_memory { - debug_assert_eq!(old_layout.align(), new_layout.align()); - unsafe { - // The allocator checks for alignment equality - intrinsics::assume(old_layout.align() == new_layout.align()); - alloc.grow(ptr, old_layout, new_layout) - } - } else { - alloc.allocate(new_layout) - }; - - memory.map_err(|_| AllocError { layout: new_layout, non_exhaustive: () }.into()) -} - -unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawVec { - /// Frees the memory owned by the `RawVec` *without* trying to drop its contents. - fn drop(&mut self) { - if let Some((ptr, layout)) = self.current_memory() { - unsafe { self.alloc.deallocate(ptr, layout) } - } - } -} - -// Central function for reserve error handling. -#[cfg(not(no_global_oom_handling))] -#[inline] -fn handle_reserve(result: Result<(), TryReserveError>) { - match result.map_err(|e| e.kind()) { - Err(CapacityOverflow) => capacity_overflow(), - Err(AllocError { layout, .. }) => handle_alloc_error(layout), - Ok(()) => { /* yay */ } - } -} - -// We need to guarantee the following: -// * We don't ever allocate `> isize::MAX` byte-size objects. -// * We don't overflow `usize::MAX` and actually allocate too little. -// -// On 64-bit we just need to check for overflow since trying to allocate -// `> isize::MAX` bytes will surely fail. On 32-bit and 16-bit we need to add -// an extra guard for this in case we're running on a platform which can use -// all 4GB in user-space, e.g., PAE or x32. - -#[inline] -fn alloc_guard(alloc_size: usize) -> Result<(), TryReserveError> { - if usize::BITS < 64 && alloc_size > isize::MAX as usize { - Err(CapacityOverflow.into()) - } else { - Ok(()) - } -} - -// One central function responsible for reporting capacity overflows. This'll -// ensure that the code generation related to these panics is minimal as there's -// only one location which panics rather than a bunch throughout the module. -#[cfg(not(no_global_oom_handling))] -fn capacity_overflow() -> ! { - panic!("capacity overflow"); -} diff --git a/library/alloc/src/vec/in_place_collect.rs b/library/alloc/src/vec/in_place_collect.rs index 282af8cc33fdd..df49febe1e981 100644 --- a/library/alloc/src/vec/in_place_collect.rs +++ b/library/alloc/src/vec/in_place_collect.rs @@ -171,7 +171,7 @@ where inner.ptr, inner.buf.as_ptr() as *mut T, inner.end as *const T, - inner.cap, + inner.buf.len(), ) }; diff --git a/library/alloc/src/vec/into_iter.rs b/library/alloc/src/vec/into_iter.rs index 8134eea570ad7..39728d2bf4ebd 100644 --- a/library/alloc/src/vec/into_iter.rs +++ b/library/alloc/src/vec/into_iter.rs @@ -1,15 +1,14 @@ #[cfg(not(no_global_oom_handling))] use super::AsVecIntoIter; use crate::alloc::{Allocator, Global}; -use crate::raw_vec::RawVec; +use crate::boxed::Box; use core::fmt; use core::intrinsics::arith_offset; use core::iter::{ FusedIterator, InPlaceIterable, SourceIter, TrustedLen, TrustedRandomAccessNoCoerce, }; use core::marker::PhantomData; -use core::mem::{self, ManuallyDrop}; -use core::ops::Deref; +use core::mem::{self, ManuallyDrop, MaybeUninit}; use core::ptr::{self, NonNull}; use core::slice::{self}; @@ -30,12 +29,8 @@ pub struct IntoIter< T, #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global, > { - pub(super) buf: NonNull, + pub(super) buf: Box<[MaybeUninit], A>, pub(super) phantom: PhantomData, - pub(super) cap: usize, - // the drop impl reconstructs a RawVec from buf, cap and alloc - // to avoid dropping the allocator twice we need to wrap it into ManuallyDrop - pub(super) alloc: ManuallyDrop, pub(super) ptr: *const T, pub(super) end: *const T, } @@ -86,7 +81,7 @@ impl IntoIter { #[unstable(feature = "allocator_api", issue = "32838")] #[inline] pub fn allocator(&self) -> &A { - &self.alloc + Box::allocator(&self.buf) } fn as_raw_mut_slice(&mut self) -> *mut [T] { @@ -112,10 +107,18 @@ impl IntoIter { // overwrite the individual fields instead of creating a new // struct and then overwriting &mut self. // this creates less assembly - self.cap = 0; - self.buf = unsafe { NonNull::new_unchecked(RawVec::NEW.ptr()) }; - self.ptr = self.buf.as_ptr(); - self.end = self.buf.as_ptr(); + + // Safety: + // the allocator is being copied from the existing buf, but we're forgetting that allocation + // directly afterwards. + unsafe { + let alloc = core::ptr::read(self.allocator()); + let buf = core::mem::replace(&mut self.buf, Box::empty_in(alloc)); + let _ = ManuallyDrop::new(buf); + } + + self.ptr = NonNull::dangling().as_ptr(); + self.end = NonNull::dangling().as_ptr(); unsafe { ptr::drop_in_place(remaining); @@ -303,36 +306,21 @@ where impl Clone for IntoIter { #[cfg(not(test))] fn clone(&self) -> Self { - self.as_slice().to_vec_in(self.alloc.deref().clone()).into_iter() + self.as_slice().to_vec_in(self.allocator().clone()).into_iter() } #[cfg(test)] fn clone(&self) -> Self { - crate::slice::to_vec(self.as_slice(), self.alloc.deref().clone()).into_iter() + crate::slice::to_vec(self.as_slice(), self.allocator().clone()).into_iter() } } #[stable(feature = "rust1", since = "1.0.0")] unsafe impl<#[may_dangle] T, A: Allocator> Drop for IntoIter { fn drop(&mut self) { - struct DropGuard<'a, T, A: Allocator>(&'a mut IntoIter); - - impl Drop for DropGuard<'_, T, A> { - fn drop(&mut self) { - unsafe { - // `IntoIter::alloc` is not used anymore after this and will be dropped by RawVec - let alloc = ManuallyDrop::take(&mut self.0.alloc); - // RawVec handles deallocation - let _ = RawVec::from_raw_parts_in(self.0.buf.as_ptr(), self.0.cap, alloc); - } - } - } - - let guard = DropGuard(self); // destroy the remaining elements unsafe { - ptr::drop_in_place(guard.0.as_raw_mut_slice()); + ptr::drop_in_place(self.as_raw_mut_slice()); } - // now `guard` will be dropped and do the rest } } diff --git a/library/alloc/src/vec/mod.rs b/library/alloc/src/vec/mod.rs index cbb5b0627b77d..34cf5649086de 100644 --- a/library/alloc/src/vec/mod.rs +++ b/library/alloc/src/vec/mod.rs @@ -71,9 +71,9 @@ use core::slice::{self, SliceIndex}; use crate::alloc::{Allocator, Global}; use crate::borrow::{Cow, ToOwned}; +use crate::box_storage::{storage_from_raw_parts_in, BoxStorage}; use crate::boxed::Box; use crate::collections::TryReserveError; -use crate::raw_vec::RawVec; #[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")] pub use self::drain_filter::DrainFilter; @@ -398,7 +398,8 @@ mod spec_extend; #[cfg_attr(not(test), rustc_diagnostic_item = "Vec")] #[rustc_insignificant_dtor] pub struct Vec { - buf: RawVec, + buf: Box<[MaybeUninit], A>, + phantom: PhantomData, len: usize, } @@ -422,7 +423,7 @@ impl Vec { #[stable(feature = "rust1", since = "1.0.0")] #[must_use] pub const fn new() -> Self { - Vec { buf: RawVec::NEW, len: 0 } + Vec { buf: Box::<[MaybeUninit]>::EMPTY, phantom: PhantomData, len: 0 } } /// Constructs a new, empty `Vec` with the specified capacity. @@ -561,7 +562,7 @@ impl Vec { #[inline] #[unstable(feature = "allocator_api", issue = "32838")] pub const fn new_in(alloc: A) -> Self { - Vec { buf: RawVec::new_in(alloc), len: 0 } + Vec { buf: Box::empty_in(alloc), phantom: PhantomData, len: 0 } } /// Constructs a new, empty `Vec` with the specified capacity with the provided @@ -610,7 +611,7 @@ impl Vec { #[inline] #[unstable(feature = "allocator_api", issue = "32838")] pub fn with_capacity_in(capacity: usize, alloc: A) -> Self { - Vec { buf: RawVec::with_capacity_in(capacity, alloc), len: 0 } + Vec { buf: Box::new_uninit_slice_in(capacity, alloc), phantom: PhantomData, len: 0 } } /// Creates a `Vec` directly from the raw components of another vector. @@ -686,7 +687,13 @@ impl Vec { #[inline] #[unstable(feature = "allocator_api", issue = "32838")] pub unsafe fn from_raw_parts_in(ptr: *mut T, length: usize, capacity: usize, alloc: A) -> Self { - unsafe { Vec { buf: RawVec::from_raw_parts_in(ptr, capacity, alloc), len: length } } + unsafe { + Vec { + buf: storage_from_raw_parts_in(ptr.cast(), capacity, alloc), + phantom: PhantomData, + len: length, + } + } } /// Decomposes a `Vec` into its raw components. @@ -1001,8 +1008,7 @@ impl Vec { self.shrink_to_fit(); let me = ManuallyDrop::new(self); let buf = ptr::read(&me.buf); - let len = me.len(); - buf.into_box(len).assume_init() + buf.assume_init() } } @@ -1135,7 +1141,7 @@ impl Vec { pub fn as_ptr(&self) -> *const T { // We shadow the slice method of the same name to avoid going through // `deref`, which creates an intermediate reference. - let ptr = self.buf.ptr(); + let ptr = self.buf.as_ptr().cast::(); unsafe { assume(!ptr.is_null()); } @@ -1171,7 +1177,7 @@ impl Vec { pub fn as_mut_ptr(&mut self) -> *mut T { // We shadow the slice method of the same name to avoid going through // `deref_mut`, which creates an intermediate reference. - let ptr = self.buf.ptr(); + let ptr = self.buf.as_mut_ptr().cast::(); unsafe { assume(!ptr.is_null()); } @@ -1182,7 +1188,7 @@ impl Vec { #[unstable(feature = "allocator_api", issue = "32838")] #[inline] pub fn allocator(&self) -> &A { - self.buf.allocator() + Box::allocator(&self.buf) } /// Forces the length of the vector to `new_len`. @@ -1351,7 +1357,7 @@ impl Vec { } // space for the new element - if len == self.buf.capacity() { + if len == self.capacity() { self.reserve(1); } @@ -2094,7 +2100,7 @@ impl Vec { unsafe { slice::from_raw_parts_mut( self.as_mut_ptr().add(self.len) as *mut MaybeUninit, - self.buf.capacity() - self.len, + self.capacity() - self.len, ) } } @@ -2168,11 +2174,11 @@ impl Vec { let ptr = self.as_mut_ptr(); // SAFETY: // - `ptr` is guaranteed to be valid for `self.len` elements - // - but the allocation extends out to `self.buf.capacity()` elements, possibly + // - but the allocation extends out to `self.capacity()` elements, possibly // uninitialized let spare_ptr = unsafe { ptr.add(self.len) }; let spare_ptr = spare_ptr.cast::>(); - let spare_len = self.buf.capacity() - self.len; + let spare_len = self.capacity() - self.len; // SAFETY: // - `ptr` is guaranteed to be valid for `self.len` elements @@ -2633,22 +2639,14 @@ impl IntoIterator for Vec { fn into_iter(self) -> IntoIter { unsafe { let mut me = ManuallyDrop::new(self); - let alloc = ManuallyDrop::new(ptr::read(me.allocator())); + let buf = core::ptr::read(&me.buf); let begin = me.as_mut_ptr(); let end = if mem::size_of::() == 0 { arith_offset(begin as *const i8, me.len() as isize) as *const T } else { begin.add(me.len()) as *const T }; - let cap = me.buf.capacity(); - IntoIter { - buf: NonNull::new_unchecked(begin), - phantom: PhantomData, - cap, - alloc, - ptr: begin, - end, - } + IntoIter { buf, phantom: PhantomData, ptr: begin, end } } } } diff --git a/library/alloc/src/vec/spec_from_elem.rs b/library/alloc/src/vec/spec_from_elem.rs index de610174783c4..e84cd4b90be23 100644 --- a/library/alloc/src/vec/spec_from_elem.rs +++ b/library/alloc/src/vec/spec_from_elem.rs @@ -1,6 +1,9 @@ use crate::alloc::Allocator; -use crate::raw_vec::RawVec; -use core::ptr::{self}; +use crate::boxed::Box; +use core::{ + marker::PhantomData, + ptr::{self}, +}; use super::{ExtendElement, IsZero, Vec}; @@ -21,7 +24,7 @@ impl SpecFromElem for i8 { #[inline] fn from_elem(elem: i8, n: usize, alloc: A) -> Vec { if elem == 0 { - return Vec { buf: RawVec::with_capacity_zeroed_in(n, alloc), len: n }; + return Vec { buf: Box::new_zeroed_slice_in(n, alloc), phantom: PhantomData, len: n }; } unsafe { let mut v = Vec::with_capacity_in(n, alloc); @@ -36,7 +39,7 @@ impl SpecFromElem for u8 { #[inline] fn from_elem(elem: u8, n: usize, alloc: A) -> Vec { if elem == 0 { - return Vec { buf: RawVec::with_capacity_zeroed_in(n, alloc), len: n }; + return Vec { buf: Box::new_zeroed_slice_in(n, alloc), phantom: PhantomData, len: n }; } unsafe { let mut v = Vec::with_capacity_in(n, alloc); @@ -51,7 +54,7 @@ impl SpecFromElem for T { #[inline] fn from_elem(elem: T, n: usize, alloc: A) -> Vec { if elem.is_zero() { - return Vec { buf: RawVec::with_capacity_zeroed_in(n, alloc), len: n }; + return Vec { buf: Box::new_zeroed_slice_in(n, alloc), phantom: PhantomData, len: n }; } let mut v = Vec::with_capacity_in(n, alloc); v.extend_with(n, ExtendElement(elem)); diff --git a/library/alloc/src/vec/spec_from_iter.rs b/library/alloc/src/vec/spec_from_iter.rs index efa6868473e49..360cb7a1549b6 100644 --- a/library/alloc/src/vec/spec_from_iter.rs +++ b/library/alloc/src/vec/spec_from_iter.rs @@ -1,3 +1,4 @@ +use core::marker::PhantomData; use core::mem::ManuallyDrop; use core::ptr::{self}; @@ -45,13 +46,14 @@ impl SpecFromIter> for Vec { // is not strictly necessary as Vec's allocation behavior is intentionally unspecified. // But it is a conservative choice. let has_advanced = iterator.buf.as_ptr() as *const _ != iterator.ptr; - if !has_advanced || iterator.len() >= iterator.cap / 2 { + if !has_advanced || iterator.len() >= iterator.buf.len() / 2 { unsafe { let it = ManuallyDrop::new(iterator); + let buf = core::ptr::read(&it.buf); if has_advanced { - ptr::copy(it.ptr, it.buf.as_ptr(), it.len()); + ptr::copy(it.ptr, buf.as_ptr().cast::() as *mut T, it.len()); } - return Vec::from_raw_parts(it.buf.as_ptr(), it.len(), it.cap); + return Vec { buf, phantom: PhantomData, len: it.len() }; } } diff --git a/library/alloc/src/vec/spec_from_iter_nested.rs b/library/alloc/src/vec/spec_from_iter_nested.rs index f915ebb86e5a5..744447376256a 100644 --- a/library/alloc/src/vec/spec_from_iter_nested.rs +++ b/library/alloc/src/vec/spec_from_iter_nested.rs @@ -1,8 +1,8 @@ -use core::cmp; use core::iter::TrustedLen; use core::ptr; +use core::{cmp, mem::MaybeUninit}; -use crate::raw_vec::RawVec; +use crate::{box_storage::BoxStorage, boxed::Box}; use super::{SpecExtend, Vec}; @@ -28,7 +28,7 @@ where Some(element) => { let (lower, _) = iterator.size_hint(); let initial_capacity = - cmp::max(RawVec::::MIN_NON_ZERO_CAP, lower.saturating_add(1)); + cmp::max(Box::<[MaybeUninit]>::MIN_NON_ZERO_CAP, lower.saturating_add(1)); let mut vector = Vec::with_capacity(initial_capacity); unsafe { // SAFETY: We requested capacity at least 1 diff --git a/library/alloc/src/vec/splice.rs b/library/alloc/src/vec/splice.rs index bad765c7f51fa..c6092c865093c 100644 --- a/library/alloc/src/vec/splice.rs +++ b/library/alloc/src/vec/splice.rs @@ -1,4 +1,5 @@ use crate::alloc::{Allocator, Global}; +use crate::box_storage::BoxStorage; use core::ptr::{self}; use core::slice::{self}; diff --git a/src/etc/gdb_providers.py b/src/etc/gdb_providers.py index 0a52b8c976f6a..61f5931ac8a02 100644 --- a/src/etc/gdb_providers.py +++ b/src/etc/gdb_providers.py @@ -47,7 +47,7 @@ def __init__(self, valobj): self.valobj = valobj vec = valobj["vec"] self.length = int(vec["len"]) - self.data_ptr = unwrap_unique_or_non_null(vec["buf"]["ptr"]) + self.data_ptr = vec["buf"]["data_ptr"] def to_string(self): return self.data_ptr.lazy_string(encoding="utf-8", length=self.length) @@ -65,7 +65,7 @@ def __init__(self, valobj): vec = buf[ZERO_FIELD] if is_windows else buf self.length = int(vec["len"]) - self.data_ptr = unwrap_unique_or_non_null(vec["buf"]["ptr"]) + self.data_ptr = vec["buf"]["data_ptr"] def to_string(self): return self.data_ptr.lazy_string(encoding="utf-8", length=self.length) @@ -103,6 +103,20 @@ def _enumerate_array_elements(element_ptrs): yield key, element +def _enumerate_mu_array_elements(element_ptrs): + for (i, element_ptr) in enumerate(element_ptrs): + key = "[{}]".format(i) + element = element_ptr.dereference()["value"]["value"] + + try: + str(element) + except RuntimeError: + yield key, "inaccessible" + + break + + yield key, element + class StdSliceProvider: def __init__(self, valobj): self.valobj = valobj @@ -125,13 +139,13 @@ class StdVecProvider: def __init__(self, valobj): self.valobj = valobj self.length = int(valobj["len"]) - self.data_ptr = unwrap_unique_or_non_null(valobj["buf"]["ptr"]) + self.data_ptr = valobj["buf"]["data_ptr"] def to_string(self): return "Vec(size={})".format(self.length) def children(self): - return _enumerate_array_elements( + return _enumerate_mu_array_elements( self.data_ptr + index for index in xrange(self.length) ) @@ -145,8 +159,8 @@ def __init__(self, valobj): self.valobj = valobj self.head = int(valobj["head"]) self.tail = int(valobj["tail"]) - self.cap = int(valobj["buf"]["cap"]) - self.data_ptr = unwrap_unique_or_non_null(valobj["buf"]["ptr"]) + self.cap = int(valobj["buf"]["length"]) + self.data_ptr = valobj["buf"]["data_ptr"] if self.head >= self.tail: self.size = self.head - self.tail else: @@ -156,7 +170,7 @@ def to_string(self): return "VecDeque(size={})".format(self.size) def children(self): - return _enumerate_array_elements( + return _enumerate_mu_array_elements( (self.data_ptr + ((self.tail + index) % self.cap)) for index in xrange(self.size) ) diff --git a/src/test/mir-opt/inline/inline_into_box_place.main.Inline.32bit.diff b/src/test/mir-opt/inline/inline_into_box_place.main.Inline.32bit.diff index 074ad067ff899..9aeedde7ae84a 100644 --- a/src/test/mir-opt/inline/inline_into_box_place.main.Inline.32bit.diff +++ b/src/test/mir-opt/inline/inline_into_box_place.main.Inline.32bit.diff @@ -35,7 +35,7 @@ + StorageLive(_7); // scope 0 at $DIR/inline-into-box-place.rs:8:33: 8:43 + _7 = &mut (*_5); // scope 0 at $DIR/inline-into-box-place.rs:8:33: 8:43 + Deinit((*_7)); // scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL -+ ((*_7).0: alloc::raw_vec::RawVec) = const alloc::raw_vec::RawVec:: { ptr: Unique:: { pointer: NonNull:: { pointer: {0x4 as *const u32} }, _marker: PhantomData:: }, cap: 0_usize, alloc: std::alloc::Global }; // scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL ++ ((*_7).0: std::boxed::Box<[std::mem::MaybeUninit]>) = const Box::<[MaybeUninit]>(Unique::<[MaybeUninit]> { pointer: NonNull::<[MaybeUninit]> { pointer: ByRef { alloc: Allocation { bytes: [4, 0, 0, 0, 0, 0, 0, 0], relocations: Relocations(SortedMap { data: [] }), init_mask: InitMask { blocks: [255], len: Size { raw: 8 } }, align: Align { pow2: 2 }, mutability: Not, extra: () }, offset: Size { raw: 0 } }: *const [MaybeUninit::] }, _marker: PhantomData::<[MaybeUninit]> }, std::alloc::Global); // scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL // mir::Constant - // + span: $DIR/inline-into-box-place.rs:8:33: 8:41 - // + user_ty: UserType(1) @@ -45,8 +45,8 @@ - bb2: { + // + span: $SRC_DIR/alloc/src/vec/mod.rs:LL:COL + // + user_ty: UserType(0) -+ // + literal: Const { ty: alloc::raw_vec::RawVec, val: Value(ByRef { alloc: Allocation { bytes: [4, 0, 0, 0, 0, 0, 0, 0], relocations: Relocations(SortedMap { data: [] }), init_mask: InitMask { blocks: [255], len: Size { raw: 8 } }, align: Align { pow2: 2 }, mutability: Not, extra: () }, offset: Size { raw: 0 } }) } -+ ((*_7).1: usize) = const 0_usize; // scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL ++ // + literal: Const { ty: Box<[MaybeUninit]>, val: Value(ByRef { alloc: Allocation { bytes: [4, 0, 0, 0, 0, 0, 0, 0], relocations: Relocations(SortedMap { data: [] }), init_mask: InitMask { blocks: [255], len: Size { raw: 8 } }, align: Align { pow2: 2 }, mutability: Not, extra: () }, offset: Size { raw: 0 } }) } ++ ((*_7).2: usize) = const 0_usize; // scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL + StorageDead(_7); // scope 0 at $DIR/inline-into-box-place.rs:8:33: 8:43 _1 = move _5; // scope 0 at $DIR/inline-into-box-place.rs:8:29: 8:43 StorageDead(_5); // scope 0 at $DIR/inline-into-box-place.rs:8:42: 8:43 diff --git a/src/test/mir-opt/inline/inline_into_box_place.main.Inline.64bit.diff b/src/test/mir-opt/inline/inline_into_box_place.main.Inline.64bit.diff index a055ae9864f5f..007fe633b89e6 100644 --- a/src/test/mir-opt/inline/inline_into_box_place.main.Inline.64bit.diff +++ b/src/test/mir-opt/inline/inline_into_box_place.main.Inline.64bit.diff @@ -35,7 +35,7 @@ + StorageLive(_7); // scope 0 at $DIR/inline-into-box-place.rs:8:33: 8:43 + _7 = &mut (*_5); // scope 0 at $DIR/inline-into-box-place.rs:8:33: 8:43 + Deinit((*_7)); // scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL -+ ((*_7).0: alloc::raw_vec::RawVec) = const alloc::raw_vec::RawVec:: { ptr: Unique:: { pointer: NonNull:: { pointer: {0x4 as *const u32} }, _marker: PhantomData:: }, cap: 0_usize, alloc: std::alloc::Global }; // scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL ++ ((*_7).0: std::boxed::Box<[std::mem::MaybeUninit]>) = const Box::<[MaybeUninit]>(Unique::<[MaybeUninit]> { pointer: NonNull::<[MaybeUninit]> { pointer: ByRef { alloc: Allocation { bytes: [4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], relocations: Relocations(SortedMap { data: [] }), init_mask: InitMask { blocks: [65535], len: Size { raw: 16 } }, align: Align { pow2: 3 }, mutability: Not, extra: () }, offset: Size { raw: 0 } }: *const [MaybeUninit::] }, _marker: PhantomData::<[MaybeUninit]> }, std::alloc::Global); // scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL // mir::Constant - // + span: $DIR/inline-into-box-place.rs:8:33: 8:41 - // + user_ty: UserType(1) @@ -45,8 +45,8 @@ - bb2: { + // + span: $SRC_DIR/alloc/src/vec/mod.rs:LL:COL + // + user_ty: UserType(0) -+ // + literal: Const { ty: alloc::raw_vec::RawVec, val: Value(ByRef { alloc: Allocation { bytes: [4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], relocations: Relocations(SortedMap { data: [] }), init_mask: InitMask { blocks: [65535], len: Size { raw: 16 } }, align: Align { pow2: 3 }, mutability: Not, extra: () }, offset: Size { raw: 0 } }) } -+ ((*_7).1: usize) = const 0_usize; // scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL ++ // + literal: Const { ty: Box<[MaybeUninit]>, val: Value(ByRef { alloc: Allocation { bytes: [4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], relocations: Relocations(SortedMap { data: [] }), init_mask: InitMask { blocks: [65535], len: Size { raw: 16 } }, align: Align { pow2: 3 }, mutability: Not, extra: () }, offset: Size { raw: 0 } }) } ++ ((*_7).2: usize) = const 0_usize; // scope 3 at $SRC_DIR/alloc/src/vec/mod.rs:LL:COL + StorageDead(_7); // scope 0 at $DIR/inline-into-box-place.rs:8:33: 8:43 _1 = move _5; // scope 0 at $DIR/inline-into-box-place.rs:8:29: 8:43 StorageDead(_5); // scope 0 at $DIR/inline-into-box-place.rs:8:42: 8:43 diff --git a/src/test/mir-opt/unusual_item_types.core.ptr-drop_in_place.Vec_i32_.AddMovesForPackedDrops.before.32bit.mir b/src/test/mir-opt/unusual_item_types.core.ptr-drop_in_place.Vec_i32_.AddMovesForPackedDrops.before.32bit.mir index 5dc81b787a9fa..d3deecdf497c3 100644 --- a/src/test/mir-opt/unusual_item_types.core.ptr-drop_in_place.Vec_i32_.AddMovesForPackedDrops.before.32bit.mir +++ b/src/test/mir-opt/unusual_item_types.core.ptr-drop_in_place.Vec_i32_.AddMovesForPackedDrops.before.32bit.mir @@ -22,11 +22,11 @@ fn std::ptr::drop_in_place(_1: *mut Vec) -> () { } bb4 (cleanup): { - drop(((*_1).0: alloc::raw_vec::RawVec)) -> bb2; // scope 0 at $SRC_DIR/core/src/ptr/mod.rs:LL:COL + drop(((*_1).0: std::boxed::Box<[std::mem::MaybeUninit]>)) -> bb2; // scope 0 at $SRC_DIR/core/src/ptr/mod.rs:LL:COL } bb5: { - drop(((*_1).0: alloc::raw_vec::RawVec)) -> [return: bb3, unwind: bb2]; // scope 0 at $SRC_DIR/core/src/ptr/mod.rs:LL:COL + drop(((*_1).0: std::boxed::Box<[std::mem::MaybeUninit]>)) -> [return: bb3, unwind: bb2]; // scope 0 at $SRC_DIR/core/src/ptr/mod.rs:LL:COL } bb6: { diff --git a/src/test/mir-opt/unusual_item_types.core.ptr-drop_in_place.Vec_i32_.AddMovesForPackedDrops.before.64bit.mir b/src/test/mir-opt/unusual_item_types.core.ptr-drop_in_place.Vec_i32_.AddMovesForPackedDrops.before.64bit.mir index 5dc81b787a9fa..d3deecdf497c3 100644 --- a/src/test/mir-opt/unusual_item_types.core.ptr-drop_in_place.Vec_i32_.AddMovesForPackedDrops.before.64bit.mir +++ b/src/test/mir-opt/unusual_item_types.core.ptr-drop_in_place.Vec_i32_.AddMovesForPackedDrops.before.64bit.mir @@ -22,11 +22,11 @@ fn std::ptr::drop_in_place(_1: *mut Vec) -> () { } bb4 (cleanup): { - drop(((*_1).0: alloc::raw_vec::RawVec)) -> bb2; // scope 0 at $SRC_DIR/core/src/ptr/mod.rs:LL:COL + drop(((*_1).0: std::boxed::Box<[std::mem::MaybeUninit]>)) -> bb2; // scope 0 at $SRC_DIR/core/src/ptr/mod.rs:LL:COL } bb5: { - drop(((*_1).0: alloc::raw_vec::RawVec)) -> [return: bb3, unwind: bb2]; // scope 0 at $SRC_DIR/core/src/ptr/mod.rs:LL:COL + drop(((*_1).0: std::boxed::Box<[std::mem::MaybeUninit]>)) -> [return: bb3, unwind: bb2]; // scope 0 at $SRC_DIR/core/src/ptr/mod.rs:LL:COL } bb6: {