diff --git a/alloc/src/collections/vec_deque/mod.rs b/alloc/src/collections/vec_deque/mod.rs index 4643a6bbe2ecd..61d05afccfd1a 100644 --- a/alloc/src/collections/vec_deque/mod.rs +++ b/alloc/src/collections/vec_deque/mod.rs @@ -982,6 +982,8 @@ impl VecDeque { // `head` and `len` are at most `isize::MAX` and `target_cap < self.capacity()`, so nothing can // overflow. let tail_outside = (target_cap + 1..=self.capacity()).contains(&(self.head + self.len)); + // Used in the drop guard below. + let old_head = self.head; if self.len == 0 { self.head = 0; @@ -1034,12 +1036,74 @@ impl VecDeque { } self.head = new_head; } - self.buf.shrink_to_fit(target_cap); + + struct Guard<'a, T, A: Allocator> { + deque: &'a mut VecDeque, + old_head: usize, + target_cap: usize, + } + + impl Drop for Guard<'_, T, A> { + #[cold] + fn drop(&mut self) { + unsafe { + // SAFETY: This is only called if `buf.shrink_to_fit` unwinds, + // which is the only time it's safe to call `abort_shrink`. + self.deque.abort_shrink(self.old_head, self.target_cap) + } + } + } + + let guard = Guard { deque: self, old_head, target_cap }; + + guard.deque.buf.shrink_to_fit(target_cap); + + // Don't drop the guard if we didn't unwind. + mem::forget(guard); debug_assert!(self.head < self.capacity() || self.capacity() == 0); debug_assert!(self.len <= self.capacity()); } + /// Reverts the deque back into a consistent state in case `shrink_to` failed. + /// This is necessary to prevent UB if the backing allocator returns an error + /// from `shrink` and `handle_alloc_error` subsequently unwinds (see #123369). + /// + /// `old_head` refers to the head index before `shrink_to` was called. `target_cap` + /// is the capacity that it was trying to shrink to. + unsafe fn abort_shrink(&mut self, old_head: usize, target_cap: usize) { + // Moral equivalent of self.head + self.len <= target_cap. Won't overflow + // because `self.len <= target_cap`. + if self.head <= target_cap - self.len { + // The deque's buffer is contiguous, so no need to copy anything around. + return; + } + + // `shrink_to` already copied the head to fit into the new capacity, so this won't overflow. + let head_len = target_cap - self.head; + // `self.head > target_cap - self.len` => `self.len > target_cap - self.head =: head_len` so this must be positive. + let tail_len = self.len - head_len; + + if tail_len <= cmp::min(head_len, self.capacity() - target_cap) { + // There's enough spare capacity to copy the tail to the back (because `tail_len < self.capacity() - target_cap`), + // and copying the tail should be cheaper than copying the head (because `tail_len <= head_len`). + + unsafe { + // The old tail and the new tail can't overlap because the head slice lies between them. The + // head slice ends at `target_cap`, so that's where we copy to. + self.copy_nonoverlapping(0, target_cap, tail_len); + } + } else { + // Either there's not enough spare capacity to make the deque contiguous, or the head is shorter than the tail + // (and therefore hopefully cheaper to copy). + unsafe { + // The old and the new head slice can overlap, so we can't use `copy_nonoverlapping` here. + self.copy(self.head, old_head, head_len); + self.head = old_head; + } + } + } + /// Shortens the deque, keeping the first `len` elements and dropping /// the rest. /// diff --git a/alloc/src/collections/vec_deque/tests.rs b/alloc/src/collections/vec_deque/tests.rs index f8ce4ca97884e..5329ad1aed5c3 100644 --- a/alloc/src/collections/vec_deque/tests.rs +++ b/alloc/src/collections/vec_deque/tests.rs @@ -1,4 +1,11 @@ -use core::iter::TrustedLen; +#![feature(alloc_error_hook)] + +use crate::alloc::{AllocError, Layout}; +use core::{iter::TrustedLen, ptr::NonNull}; +use std::{ + alloc::{set_alloc_error_hook, take_alloc_error_hook, System}, + panic::{catch_unwind, AssertUnwindSafe}, +}; use super::*; @@ -790,6 +797,52 @@ fn test_shrink_to() { } } +#[test] +fn test_shrink_to_unwind() { + // This tests that `shrink_to` leaves the deque in a consistent state when + // the call to `RawVec::shrink_to_fit` unwinds. The code is adapted from #123369 + // but changed to hopefully not have any UB even if the test fails. + + struct BadAlloc; + + unsafe impl Allocator for BadAlloc { + fn allocate(&self, l: Layout) -> Result, AllocError> { + // We allocate zeroed here so that the whole buffer of the deque + // is always initialized. That way, even if the deque is left in + // an inconsistent state, no uninitialized memory should be accessed. + System.allocate_zeroed(l) + } + + unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { + unsafe { System.deallocate(ptr, layout) } + } + + unsafe fn shrink( + &self, + _ptr: NonNull, + _old_layout: Layout, + _new_layout: Layout, + ) -> Result, AllocError> { + Err(AllocError) + } + } + + // preserve the old error hook just in case. + let old_error_hook = take_alloc_error_hook(); + set_alloc_error_hook(|_| panic!("alloc error")); + + let mut v = VecDeque::with_capacity_in(15, BadAlloc); + v.push_back(1); + v.push_front(2); + // This should unwind because it calls `BadAlloc::shrink` and then `handle_alloc_error` which unwinds. + assert!(catch_unwind(AssertUnwindSafe(|| v.shrink_to_fit())).is_err()); + // This should only pass if the deque is left in a consistent state. + assert_eq!(v, [2, 1]); + + // restore the old error hook. + set_alloc_error_hook(old_error_hook); +} + #[test] fn test_shrink_to_fit() { // This test checks that every single combination of head and tail position, diff --git a/alloc/src/lib.rs b/alloc/src/lib.rs index 91b83cfe011f2..d607926f8d49e 100644 --- a/alloc/src/lib.rs +++ b/alloc/src/lib.rs @@ -92,6 +92,7 @@ // tidy-alphabetical-start #![cfg_attr(not(no_global_oom_handling), feature(const_alloc_error))] #![cfg_attr(not(no_global_oom_handling), feature(const_btree_len))] +#![cfg_attr(test, feature(alloc_error_hook))] #![cfg_attr(test, feature(is_sorted))] #![cfg_attr(test, feature(new_uninit))] #![feature(alloc_layout_extra)]