diff --git a/appveyor.yml b/appveyor.yml index dddf891314..3dc47f1c67 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -37,7 +37,7 @@ build: false test_script: - set RUSTFLAGS=-g - set RUST_BACKTRACE=1 - - cargo build --release --all-targets --all-features + - cargo build --release --all-features --all-targets - cargo test --release --all-features - set MIRI_SYSROOT=%USERPROFILE%\.xargo\HOST - cargo test --release --all-features diff --git a/rust-version b/rust-version index 6c3858ccc9..dcd90feeda 100644 --- a/rust-version +++ b/rust-version @@ -1 +1 @@ -nightly-2018-11-16 +nightly-2018-11-20 diff --git a/src/fn_call.rs b/src/fn_call.rs index 150cf7402a..509db0355e 100644 --- a/src/fn_call.rs +++ b/src/fn_call.rs @@ -131,10 +131,10 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo } "free" => { - let ptr = self.read_scalar(args[0])?.not_undef()?.erase_tag(); // raw ptr operation, no tag + let ptr = self.read_scalar(args[0])?.not_undef()?; if !ptr.is_null_ptr(self) { self.memory_mut().deallocate( - ptr.to_ptr()?.with_default_tag(), + ptr.to_ptr()?, None, MiriMemoryKind::C.into(), )?; @@ -179,7 +179,7 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo self.write_scalar(Scalar::Ptr(ptr), dest)?; } "__rust_dealloc" => { - let ptr = self.read_scalar(args[0])?.to_ptr()?.erase_tag(); // raw ptr operation, no tag + let ptr = self.read_scalar(args[0])?.to_ptr()?; let old_size = self.read_scalar(args[1])?.to_usize(self)?; let align = self.read_scalar(args[2])?.to_usize(self)?; if old_size == 0 { @@ -189,13 +189,13 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo return err!(HeapAllocNonPowerOfTwoAlignment(align)); } self.memory_mut().deallocate( - ptr.with_default_tag(), + ptr, Some((Size::from_bytes(old_size), Align::from_bytes(align, align).unwrap())), MiriMemoryKind::Rust.into(), )?; } "__rust_realloc" => { - let ptr = self.read_scalar(args[0])?.to_ptr()?.erase_tag(); // raw ptr operation, no tag + let ptr = self.read_scalar(args[0])?.to_ptr()?; let old_size = self.read_scalar(args[1])?.to_usize(self)?; let align = self.read_scalar(args[2])?.to_usize(self)?; let new_size = self.read_scalar(args[3])?.to_usize(self)?; @@ -206,7 +206,7 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo return err!(HeapAllocNonPowerOfTwoAlignment(align)); } let new_ptr = self.memory_mut().reallocate( - ptr.with_default_tag(), + ptr, Size::from_bytes(old_size), Align::from_bytes(align, align).unwrap(), Size::from_bytes(new_size), @@ -238,8 +238,8 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo "dlsym" => { let _handle = self.read_scalar(args[0])?; - let symbol = self.read_scalar(args[1])?.to_ptr()?.erase_tag(); - let symbol_name = self.memory().read_c_str(symbol.with_default_tag())?; + let symbol = self.read_scalar(args[1])?.to_ptr()?; + let symbol_name = self.memory().read_c_str(symbol)?; let err = format!("bad c unicode symbol: {:?}", symbol_name); let symbol_name = ::std::str::from_utf8(symbol_name).unwrap_or(&err); return err!(Unimplemented(format!( @@ -292,13 +292,13 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo return err!(MachineError("the evaluated program panicked".to_string())), "memcmp" => { - let left = self.read_scalar(args[0])?.not_undef()?.erase_tag(); // raw ptr operation - let right = self.read_scalar(args[1])?.not_undef()?.erase_tag(); // raw ptr operation + let left = self.read_scalar(args[0])?.not_undef()?; + let right = self.read_scalar(args[1])?.not_undef()?; let n = Size::from_bytes(self.read_scalar(args[2])?.to_usize(self)?); let result = { - let left_bytes = self.memory().read_bytes(left.with_default_tag(), n)?; - let right_bytes = self.memory().read_bytes(right.with_default_tag(), n)?; + let left_bytes = self.memory().read_bytes(left, n)?; + let right_bytes = self.memory().read_bytes(right, n)?; use std::cmp::Ordering::*; match left_bytes.cmp(right_bytes) { @@ -315,8 +315,7 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo } "memrchr" => { - let ptr = self.read_scalar(args[0])?.not_undef()?.erase_tag(); // raw ptr operation - let ptr = ptr.with_default_tag(); + let ptr = self.read_scalar(args[0])?.not_undef()?; let val = self.read_scalar(args[1])?.to_bytes()? as u8; let num = self.read_scalar(args[2])?.to_usize(self)?; if let Some(idx) = self.memory().read_bytes(ptr, Size::from_bytes(num))? @@ -330,8 +329,7 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo } "memchr" => { - let ptr = self.read_scalar(args[0])?.not_undef()?.erase_tag(); // raw ptr operation - let ptr = ptr.with_default_tag(); + let ptr = self.read_scalar(args[0])?.not_undef()?; let val = self.read_scalar(args[1])?.to_bytes()? as u8; let num = self.read_scalar(args[2])?.to_usize(self)?; if let Some(idx) = self.memory().read_bytes(ptr, Size::from_bytes(num))?.iter().position( @@ -347,8 +345,8 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo "getenv" => { let result = { - let name_ptr = self.read_scalar(args[0])?.to_ptr()?.erase_tag(); // raw ptr operation - let name = self.memory().read_c_str(name_ptr.with_default_tag())?; + let name_ptr = self.read_scalar(args[0])?.to_ptr()?; + let name = self.memory().read_c_str(name_ptr)?; match self.machine.env_vars.get(name) { Some(&var) => Scalar::Ptr(var), None => Scalar::ptr_null(&*self.tcx), @@ -360,10 +358,10 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo "unsetenv" => { let mut success = None; { - let name_ptr = self.read_scalar(args[0])?.not_undef()?.erase_tag(); // raw ptr operation + let name_ptr = self.read_scalar(args[0])?.not_undef()?; if !name_ptr.is_null_ptr(self) { let name = self.memory().read_c_str(name_ptr.to_ptr()? - .with_default_tag())?.to_owned(); + )?.to_owned(); if !name.is_empty() && !name.contains(&b'=') { success = Some(self.machine.env_vars.remove(&name)); } @@ -382,11 +380,11 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo "setenv" => { let mut new = None; { - let name_ptr = self.read_scalar(args[0])?.not_undef()?.erase_tag(); // raw ptr operation - let value_ptr = self.read_scalar(args[1])?.to_ptr()?.erase_tag(); // raw ptr operation - let value = self.memory().read_c_str(value_ptr.with_default_tag())?; + let name_ptr = self.read_scalar(args[0])?.not_undef()?; + let value_ptr = self.read_scalar(args[1])?.to_ptr()?; + let value = self.memory().read_c_str(value_ptr)?; if !name_ptr.is_null_ptr(self) { - let name = self.memory().read_c_str(name_ptr.to_ptr()?.with_default_tag())?; + let name = self.memory().read_c_str(name_ptr.to_ptr()?)?; if !name.is_empty() && !name.contains(&b'=') { new = Some((name.to_owned(), value.to_owned())); } @@ -417,14 +415,14 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo "write" => { let fd = self.read_scalar(args[0])?.to_bytes()?; - let buf = self.read_scalar(args[1])?.not_undef()?.erase_tag(); + let buf = self.read_scalar(args[1])?.not_undef()?; let n = self.read_scalar(args[2])?.to_bytes()? as u64; trace!("Called write({:?}, {:?}, {:?})", fd, buf, n); let result = if fd == 1 || fd == 2 { // stdout/stderr use std::io::{self, Write}; - let buf_cont = self.memory().read_bytes(buf.with_default_tag(), Size::from_bytes(n))?; + let buf_cont = self.memory().read_bytes(buf, Size::from_bytes(n))?; let res = if fd == 1 { io::stdout().write(buf_cont) } else { @@ -445,8 +443,8 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo } "strlen" => { - let ptr = self.read_scalar(args[0])?.to_ptr()?.erase_tag(); - let n = self.memory().read_c_str(ptr.with_default_tag())?.len(); + let ptr = self.read_scalar(args[0])?.to_ptr()?; + let n = self.memory().read_c_str(ptr)?.len(); self.write_scalar(Scalar::from_uint(n as u64, dest.layout.size), dest)?; } @@ -492,7 +490,7 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo // Hook pthread calls that go to the thread-local storage memory subsystem "pthread_key_create" => { - let key_ptr = self.read_scalar(args[0])?.to_ptr()?.erase_tag(); // raw ptr operation + let key_ptr = self.read_scalar(args[0])?.to_ptr()?; // Extract the function type out of the signature (that seems easier than constructing it ourselves...) let dtor = match self.read_scalar(args[1])?.not_undef()? { @@ -515,7 +513,7 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo return err!(OutOfTls); } self.memory_mut().write_scalar( - key_ptr.with_default_tag(), + key_ptr, key_layout.align, Scalar::from_uint(key, key_layout.size).into(), key_layout.size, diff --git a/src/helpers.rs b/src/helpers.rs index 31e2972957..85329ddcf1 100644 --- a/src/helpers.rs +++ b/src/helpers.rs @@ -158,8 +158,10 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super: unsafe_cell_action: F, } - impl<'ecx, 'a, 'mir, 'tcx, F> ValueVisitor<'a, 'mir, 'tcx, Evaluator<'tcx>> - for UnsafeCellVisitor<'ecx, 'a, 'mir, 'tcx, F> + impl<'ecx, 'a, 'mir, 'tcx, F> + ValueVisitor<'a, 'mir, 'tcx, Evaluator<'tcx>> + for + UnsafeCellVisitor<'ecx, 'a, 'mir, 'tcx, F> where F: FnMut(MPlaceTy<'tcx, Borrow>) -> EvalResult<'tcx> { @@ -230,7 +232,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super: } // We should never get to a primitive, but always short-circuit somewhere above - fn visit_primitive(&mut self, _val: ImmTy<'tcx, Borrow>) -> EvalResult<'tcx> + fn visit_primitive(&mut self, _v: MPlaceTy<'tcx, Borrow>) -> EvalResult<'tcx> { bug!("We should always short-circit before coming to a primitive") } diff --git a/src/intrinsic.rs b/src/intrinsic.rs index e23cadfcaf..6d5ac8d88b 100644 --- a/src/intrinsic.rs +++ b/src/intrinsic.rs @@ -154,12 +154,12 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, ' let count = self.read_scalar(args[2])?.to_usize(self)?; let elem_align = elem_layout.align; // erase tags: this is a raw ptr operation - let src = self.read_scalar(args[0])?.not_undef()?.erase_tag(); - let dest = self.read_scalar(args[1])?.not_undef()?.erase_tag(); + let src = self.read_scalar(args[0])?.not_undef()?; + let dest = self.read_scalar(args[1])?.not_undef()?; self.memory_mut().copy( - src.with_default_tag(), + src, elem_align, - dest.with_default_tag(), + dest, elem_align, Size::from_bytes(count * elem_size), intrinsic_name.ends_with("_nonoverlapping"), @@ -436,7 +436,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, ' let ty = substs.type_at(0); let ty_layout = self.layout_of(ty)?; let val_byte = self.read_scalar(args[1])?.to_u8()?; - let ptr = self.read_scalar(args[0])?.not_undef()?.erase_tag().with_default_tag(); + let ptr = self.read_scalar(args[0])?.not_undef()?; let count = self.read_scalar(args[2])?.to_usize(self)?; self.memory().check_align(ptr, ty_layout.align)?; self.memory_mut().write_repeat(ptr, val_byte, ty_layout.size * count)?; diff --git a/src/lib.rs b/src/lib.rs index a0ed7c8e4f..418f8f60ce 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -23,7 +23,7 @@ use rustc::hir::{self, def_id::DefId}; use rustc::mir; use syntax::attr; - +use syntax::source_map::DUMMY_SP; pub use rustc_mir::interpret::*; pub use rustc_mir::interpret::{self, AllocMap, PlaceTy}; // resolve ambiguity @@ -113,7 +113,7 @@ pub fn create_ecx<'a, 'mir: 'a, 'tcx: 'mir>( // Push our stack frame ecx.push_stack_frame( start_instance, - start_mir.span, + DUMMY_SP, // there is no call site, we want no span start_mir, Some(ret_ptr.into()), StackPopCleanup::None { cleanup: true }, @@ -146,7 +146,7 @@ pub fn create_ecx<'a, 'mir: 'a, 'tcx: 'mir>( let ret_place = MPlaceTy::dangling(ecx.layout_of(tcx.mk_unit())?, &ecx).into(); ecx.push_stack_frame( main_instance, - main_mir.span, + DUMMY_SP, // there is no call site, we want no span main_mir, Some(ret_place), StackPopCleanup::None { cleanup: true }, @@ -185,7 +185,7 @@ pub fn eval_main<'a, 'tcx: 'a>( match res { Ok(()) => { let leaks = ecx.memory().leak_report(); - // Disable the leak test on some platforms where we likely do not + // Disable the leak test on some platforms where we do not // correctly implement TLS destructors. let target_os = ecx.tcx.tcx.sess.target.target.target_os.to_lowercase(); let ignore_leaks = target_os == "windows" || target_os == "macos"; @@ -208,8 +208,16 @@ pub fn eval_main<'a, 'tcx: 'a>( let mut err = struct_error(ecx.tcx.tcx.at(span), msg.as_str()); let frames = ecx.generate_stacktrace(None); err.span_label(span, e); - for FrameInfo { span, location, .. } in frames { - err.span_note(span, &format!("inside call to `{}`", location)); + // we iterate with indices because we need to look at the next frame (the caller) + for idx in 0..frames.len() { + let frame_info = &frames[idx]; + let call_site_is_local = frames.get(idx+1).map_or(false, + |caller_info| caller_info.instance.def_id().is_local()); + if call_site_is_local { + err.span_note(frame_info.call_site, &frame_info.to_string()); + } else { + err.note(&frame_info.to_string()); + } } err.emit(); } else { @@ -312,8 +320,6 @@ impl<'a, 'mir, 'tcx> Machine<'a, 'mir, 'tcx> for Evaluator<'tcx> { // Uses mem::uninitialized ("std::ptr::read", ""), ("std::sys::windows::mutex::Mutex::", ""), - // Should directly take a raw reference - (">", "::get"), ]; for frame in ecx.stack().iter() .rev().take(3) @@ -461,9 +467,9 @@ impl<'a, 'mir, 'tcx> Machine<'a, 'mir, 'tcx> for Evaluator<'tcx> { // No tracking Ok(place.ptr) } else { - let ptr = place.ptr.to_ptr()?; // assert this is not a scalar - let tag = ecx.tag_dereference(place, size, mutability.into())?; - Ok(Scalar::Ptr(Pointer::new_with_tag(ptr.alloc_id, ptr.offset, tag))) + ecx.ptr_dereference(place, size, mutability.into())?; + // We never change the pointer + Ok(place.ptr) } } diff --git a/src/operator.rs b/src/operator.rs index e5c695009c..be05c22599 100644 --- a/src/operator.rs +++ b/src/operator.rs @@ -142,8 +142,9 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, ' // allocations sit right next to each other. The C/C++ standards are // somewhat fuzzy about this case, so I think for now this check is // "good enough". - self.memory().check_bounds_ptr(left, false)?; - self.memory().check_bounds_ptr(right, false)?; + // We require liveness, as dead allocations can of course overlap. + self.memory().check_bounds_ptr(left, InboundsCheck::Live)?; + self.memory().check_bounds_ptr(right, InboundsCheck::Live)?; // Two live in-bounds pointers, we can compare across allocations left == right } @@ -153,15 +154,17 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, ' (Scalar::Bits { bits, size }, Scalar::Ptr(ptr)) => { assert_eq!(size as u64, self.pointer_size().bytes()); let bits = bits as u64; - let (alloc_size, alloc_align) = self.memory().get_size_and_align(ptr.alloc_id); // Case I: Comparing with NULL if bits == 0 { // Test if the ptr is in-bounds. Then it cannot be NULL. - if ptr.offset <= alloc_size { + if self.memory().check_bounds_ptr(ptr, InboundsCheck::MaybeDead).is_ok() { return Ok(false); } } + + let (alloc_size, alloc_align) = self.memory().get_size_and_align(ptr.alloc_id); + // Case II: Alignment gives it away if ptr.offset.bytes() % alloc_align.abi() == 0 { // The offset maintains the allocation alignment, so we know `base+offset` @@ -293,11 +296,11 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, ' let offset = offset.checked_mul(pointee_size).ok_or_else(|| EvalErrorKind::Overflow(mir::BinOp::Mul))?; // Now let's see what kind of pointer this is if let Scalar::Ptr(ptr) = ptr { - // Both old and new pointer must be in-bounds. + // Both old and new pointer must be in-bounds of a *live* allocation. // (Of the same allocation, but that part is trivial with our representation.) - self.memory().check_bounds_ptr(ptr, false)?; + self.memory().check_bounds_ptr(ptr, InboundsCheck::Live)?; let ptr = ptr.signed_offset(offset, self)?; - self.memory().check_bounds_ptr(ptr, false)?; + self.memory().check_bounds_ptr(ptr, InboundsCheck::Live)?; Ok(Scalar::Ptr(ptr)) } else { // An integer pointer. They can only be offset by 0, and we pretend there diff --git a/src/stacked_borrows.rs b/src/stacked_borrows.rs index f564bf9ab7..063a544baa 100644 --- a/src/stacked_borrows.rs +++ b/src/stacked_borrows.rs @@ -4,8 +4,8 @@ use rustc::ty::{self, layout::Size}; use rustc::hir::{Mutability, MutMutable, MutImmutable}; use crate::{ - EvalResult, EvalErrorKind, MiriEvalContext, HelpersEvalContextExt, - MemoryKind, MiriMemoryKind, RangeMap, AllocId, Allocation, AllocationExtra, + EvalResult, EvalErrorKind, MiriEvalContext, HelpersEvalContextExt, Evaluator, MutValueVisitor, + MemoryKind, MiriMemoryKind, RangeMap, AllocId, Allocation, AllocationExtra, InboundsCheck, Pointer, MemPlace, Scalar, Immediate, ImmTy, PlaceTy, MPlaceTy, }; @@ -303,6 +303,9 @@ impl<'tcx> Stacks { trace!("{} access of tag {:?}: {:?}, size {}", if is_write { "read" } else { "write" }, ptr.tag, ptr, size.bytes()); + // Even reads can have a side-effect, by invalidating other references. + // This is fundamentally necessary since `&mut` asserts that there + // are no accesses through other references, not even reads. let mut stacks = self.stacks.borrow_mut(); for stack in stacks.iter_mut(ptr.offset, size) { stack.access(ptr.tag, is_write)?; @@ -311,6 +314,7 @@ impl<'tcx> Stacks { } /// Reborrow the given pointer to the new tag for the given kind of reference. + /// This works on `&self` because we might encounter references to constant memory. fn reborrow( &self, ptr: Pointer, @@ -401,12 +405,12 @@ impl<'tcx> Stacks { pub trait EvalContextExt<'tcx> { - fn tag_dereference( + fn ptr_dereference( &self, place: MPlaceTy<'tcx, Borrow>, size: Size, mutability: Option, - ) -> EvalResult<'tcx, Borrow>; + ) -> EvalResult<'tcx>; fn tag_new_allocation( &mut self, @@ -414,8 +418,16 @@ pub trait EvalContextExt<'tcx> { kind: MemoryKind, ) -> Borrow; - /// Retag an indidual pointer, returning the retagged version. + /// Reborrow the given place, returning the newly tagged ptr to it. fn reborrow( + &mut self, + place: MPlaceTy<'tcx, Borrow>, + size: Size, + new_bor: Borrow + ) -> EvalResult<'tcx, Pointer>; + + /// Retag an indidual pointer, returning the retagged version. + fn retag_reference( &mut self, ptr: ImmTy<'tcx, Borrow>, mutbl: Mutability, @@ -468,13 +480,13 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for MiriEvalContext<'a, 'mir, 'tcx> { /// /// Note that this does NOT mean that all this memory will actually get accessed/referenced! /// We could be in the middle of `&(*var).1`. - fn tag_dereference( + fn ptr_dereference( &self, place: MPlaceTy<'tcx, Borrow>, size: Size, mutability: Option, - ) -> EvalResult<'tcx, Borrow> { - trace!("tag_dereference: Accessing {} reference for {:?} (pointee {})", + ) -> EvalResult<'tcx> { + trace!("ptr_dereference: Accessing {} reference for {:?} (pointee {})", if let Some(mutability) = mutability { format!("{:?}", mutability) } else { format!("raw") }, place.ptr, place.layout.ty); let ptr = place.ptr.to_ptr()?; @@ -485,12 +497,8 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for MiriEvalContext<'a, 'mir, 'tcx> { // That can transmute a raw ptr to a (shared/mut) ref, and a mut ref to a shared one. match (mutability, ptr.tag) { (None, _) => { - // Don't use the tag, this is a raw access! They should happen tagless. - // This is needed for `*mut` to make any sense: Writes *do* enforce the - // `Uniq` tag to be up top, but we must make sure raw writes do not do that. - // This does mean, however, that `&*foo` is *not* a NOP *if* `foo` is a raw ptr. - // Also don't do any further validation, this is raw after all. - return Ok(Borrow::default()); + // No further validation on raw accesses. + return Ok(()); } (Some(MutMutable), Borrow::Uniq(_)) | (Some(MutImmutable), Borrow::Shr(_)) => { @@ -515,7 +523,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for MiriEvalContext<'a, 'mir, 'tcx> { } // Get the allocation - self.memory().check_bounds(ptr, size, false)?; + self.memory().check_bounds(ptr, size, InboundsCheck::Live)?; let alloc = self.memory().get(ptr.alloc_id).expect("We checked that the ptr is fine!"); // If we got here, we do some checking, *but* we leave the tag unchanged. if let Borrow::Shr(Some(_)) = ptr.tag { @@ -531,27 +539,51 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for MiriEvalContext<'a, 'mir, 'tcx> { alloc.extra.deref(ptr, size, kind)?; } - // All is good, and do not change the tag - Ok(ptr.tag) + // All is good + Ok(()) } /// The given place may henceforth be accessed through raw pointers. + #[inline(always)] fn escape_to_raw( &mut self, place: MPlaceTy<'tcx, Borrow>, size: Size, ) -> EvalResult<'tcx> { - trace!("escape_to_raw: {:?} is now accessible by raw pointers", *place); - // Get the allocation + self.reborrow(place, size, Borrow::default())?; + Ok(()) + } + + fn reborrow( + &mut self, + place: MPlaceTy<'tcx, Borrow>, + size: Size, + new_bor: Borrow + ) -> EvalResult<'tcx, Pointer> { let ptr = place.ptr.to_ptr()?; - self.memory().check_bounds(ptr, size, false)?; // `ptr_dereference` wouldn't do any checks if this is a raw ptr + let new_ptr = Pointer::new_with_tag(ptr.alloc_id, ptr.offset, new_bor); + trace!("reborrow: Creating new reference for {:?} (pointee {}): {:?}", + ptr, place.layout.ty, new_bor); + + // Get the allocation. It might not be mutable, so we cannot use `get_mut`. + self.memory().check_bounds(ptr, size, InboundsCheck::Live)?; let alloc = self.memory().get(ptr.alloc_id).expect("We checked that the ptr is fine!"); - // Re-borrow to raw. This is a NOP for shared borrows, but we do not know the borrow - // type here and that's also okay. Freezing does not matter here. - alloc.extra.reborrow(ptr, size, Borrow::default(), RefKind::Raw) + // Update the stacks. + if let Borrow::Shr(Some(_)) = new_bor { + // Reference that cares about freezing. We need a frozen-sensitive reborrow. + self.visit_freeze_sensitive(place, size, |cur_ptr, size, frozen| { + let kind = if frozen { RefKind::Frozen } else { RefKind::Raw }; + alloc.extra.reborrow(cur_ptr, size, new_bor, kind) + })?; + } else { + // Just treat this as one big chunk. + let kind = if new_bor.is_unique() { RefKind::Unique } else { RefKind::Raw }; + alloc.extra.reborrow(ptr, size, new_bor, kind)?; + } + Ok(new_ptr) } - fn reborrow( + fn retag_reference( &mut self, val: ImmTy<'tcx, Borrow>, mutbl: Mutability, @@ -566,33 +598,17 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for MiriEvalContext<'a, 'mir, 'tcx> { return Ok(*val); } - // Prepare to re-borrow this place. - let ptr = place.ptr.to_ptr()?; + // Compute new borrow. let time = self.machine.stacked_borrows.increment_clock(); let new_bor = match mutbl { MutMutable => Borrow::Uniq(time), MutImmutable => Borrow::Shr(Some(time)), }; - trace!("reborrow: Creating new {:?} reference for {:?} (pointee {}): {:?}", - mutbl, ptr, place.layout.ty, new_bor); - // Get the allocation. It might not be mutable, so we cannot use `get_mut`. - self.memory().check_bounds(ptr, size, false)?; - let alloc = self.memory().get(ptr.alloc_id).expect("We checked that the ptr is fine!"); - // Update the stacks. - if mutbl == MutImmutable { - // Shared reference. We need a frozen-sensitive reborrow. - self.visit_freeze_sensitive(place, size, |cur_ptr, size, frozen| { - let kind = if frozen { RefKind::Frozen } else { RefKind::Raw }; - alloc.extra.reborrow(cur_ptr, size, new_bor, kind) - })?; - } else { - // Mutable reference. Just treat this as one big chunk. - alloc.extra.reborrow(ptr, size, new_bor, RefKind::Unique)?; - } + // Reborrow. + let new_ptr = self.reborrow(place, size, new_bor)?; // Return new ptr - let new_ptr = Pointer::new_with_tag(ptr.alloc_id, ptr.offset, new_bor); let new_place = MemPlace { ptr: Scalar::Ptr(new_ptr), ..*place }; Ok(new_place.to_ref()) } @@ -602,17 +618,62 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for MiriEvalContext<'a, 'mir, 'tcx> { _fn_entry: bool, place: PlaceTy<'tcx, Borrow> ) -> EvalResult<'tcx> { - // For now, we only retag if the toplevel type is a reference. - // TODO: Recurse into structs and enums, sharing code with validation. // TODO: Honor `fn_entry`. - let mutbl = match place.layout.ty.sty { - ty::Ref(_, _, mutbl) => mutbl, // go ahead - _ => return Ok(()), // do nothing, for now - }; - // Retag the pointer and write it back. - let val = self.read_immediate(self.place_to_op(place)?)?; - let val = self.reborrow(val, mutbl)?; - self.write_immediate(val, place)?; + + // We need a visitor to visit all references. However, that requires + // a `MemPlace`, so we have a fast path for reference types that + // avoids allocating. + // Cannot use `builtin_deref` because that reports *immutable* for `Box`, + // making it useless. + if let Some(mutbl) = match place.layout.ty.sty { + ty::Ref(_, _, mutbl) => Some(mutbl), + ty::Adt(..) if place.layout.ty.is_box() => Some(MutMutable), + _ => None, // handled with the general case below + } { + // fast path + let val = self.read_immediate(self.place_to_op(place)?)?; + let val = self.retag_reference(val, mutbl)?; + self.write_immediate(val, place)?; + return Ok(()); + } + let place = self.force_allocation(place)?; + + let mut visitor = RetagVisitor { ecx: self }; + visitor.visit_value(place)?; + + // The actual visitor + struct RetagVisitor<'ecx, 'a, 'mir, 'tcx> { + ecx: &'ecx mut MiriEvalContext<'a, 'mir, 'tcx>, + } + impl<'ecx, 'a, 'mir, 'tcx> + MutValueVisitor<'a, 'mir, 'tcx, Evaluator<'tcx>> + for + RetagVisitor<'ecx, 'a, 'mir, 'tcx> + { + type V = MPlaceTy<'tcx, Borrow>; + + #[inline(always)] + fn ecx(&mut self) -> &mut MiriEvalContext<'a, 'mir, 'tcx> { + &mut self.ecx + } + + // Primitives of reference type, that is the one thing we are interested in. + fn visit_primitive(&mut self, place: MPlaceTy<'tcx, Borrow>) -> EvalResult<'tcx> + { + // Cannot use `builtin_deref` because that reports *immutable* for `Box`, + // making it useless. + let mutbl = match place.layout.ty.sty { + ty::Ref(_, _, mutbl) => mutbl, + ty::Adt(..) if place.layout.ty.is_box() => MutMutable, + _ => return Ok(()), // nothing to do + }; + let val = self.ecx.read_immediate(place.into())?; + let val = self.ecx.retag_reference(val, mutbl)?; + self.ecx.write_immediate(val, place.into())?; + Ok(()) + } + } + Ok(()) } } diff --git a/tests/compile-fail-fullmir/out_of_bounds_ptr_1.rs b/tests/compile-fail-fullmir/out_of_bounds_ptr_1.rs index 8dce7e5786..ce1c89a2a0 100644 --- a/tests/compile-fail-fullmir/out_of_bounds_ptr_1.rs +++ b/tests/compile-fail-fullmir/out_of_bounds_ptr_1.rs @@ -1,4 +1,4 @@ -// error-pattern: pointer computed at offset 5, outside bounds of allocation +// error-pattern: must be in-bounds and live at offset 5, but is outside bounds of allocation fn main() { let v = [0i8; 4]; let x = &v as *const i8; diff --git a/tests/compile-fail-fullmir/stacked_borrows/box_exclusive_violation1.rs b/tests/compile-fail-fullmir/stacked_borrows/box_exclusive_violation1.rs new file mode 100644 index 0000000000..73631173b9 --- /dev/null +++ b/tests/compile-fail-fullmir/stacked_borrows/box_exclusive_violation1.rs @@ -0,0 +1,29 @@ +fn demo_mut_advanced_unique(mut our: Box) -> i32 { + unknown_code_1(&*our); + + // This "re-asserts" uniqueness of the reference: After writing, we know + // our tag is at the top of the stack. + *our = 5; + + unknown_code_2(); + + // We know this will return 5 + *our //~ ERROR does not exist on the stack +} + +// Now comes the evil context +use std::ptr; + +static mut LEAK: *mut i32 = ptr::null_mut(); + +fn unknown_code_1(x: &i32) { unsafe { + LEAK = x as *const _ as *mut _; +} } + +fn unknown_code_2() { unsafe { + *LEAK = 7; +} } + +fn main() { + assert_eq!(demo_mut_advanced_unique(Box::new(0)), 5); +} diff --git a/tests/compile-fail-fullmir/stacked_borrows/buggy_as_mut_slice.rs b/tests/compile-fail-fullmir/stacked_borrows/buggy_as_mut_slice.rs index 2f3d0793f6..e08e3bba68 100644 --- a/tests/compile-fail-fullmir/stacked_borrows/buggy_as_mut_slice.rs +++ b/tests/compile-fail-fullmir/stacked_borrows/buggy_as_mut_slice.rs @@ -1,5 +1,3 @@ -// error-pattern: mutable reference with frozen tag - mod safe { use std::slice::from_raw_parts_mut; @@ -12,10 +10,8 @@ mod safe { fn main() { let v = vec![0,1,2]; - let _v1 = safe::as_mut_slice(&v); -/* - let v2 = safe::as_mut_slice(&v); + let v1 = safe::as_mut_slice(&v); + let _v2 = safe::as_mut_slice(&v); v1[1] = 5; - v1[1] = 6; -*/ + //~^ ERROR does not exist on the stack } diff --git a/tests/compile-fail-fullmir/stacked_borrows/buggy_split_at_mut.rs b/tests/compile-fail-fullmir/stacked_borrows/buggy_split_at_mut.rs index 711544f801..a6daa5d93d 100644 --- a/tests/compile-fail-fullmir/stacked_borrows/buggy_split_at_mut.rs +++ b/tests/compile-fail-fullmir/stacked_borrows/buggy_split_at_mut.rs @@ -11,6 +11,7 @@ mod safe { assert!(mid <= len); (from_raw_parts_mut(ptr, len - mid), // BUG: should be "mid" instead of "len - mid" + //~^ ERROR does not exist on the stack from_raw_parts_mut(ptr.offset(mid as isize), len - mid)) } } @@ -19,7 +20,6 @@ mod safe { fn main() { let mut array = [1,2,3,4]; let (a, b) = safe::split_at_mut(&mut array, 0); - //~^ ERROR does not exist on the stack a[1] = 5; b[1] = 6; } diff --git a/tests/compile-fail-fullmir/stacked_borrows/return_invalid_mut_option.rs b/tests/compile-fail-fullmir/stacked_borrows/return_invalid_mut_option.rs new file mode 100644 index 0000000000..28a1f74c6a --- /dev/null +++ b/tests/compile-fail-fullmir/stacked_borrows/return_invalid_mut_option.rs @@ -0,0 +1,11 @@ +// Make sure that we cannot return a `&mut` that got already invalidated, not even in an `Option`. +fn foo(x: &mut (i32, i32)) -> Option<&mut i32> { + let xraw = x as *mut (i32, i32); + let ret = Some(unsafe { &mut (*xraw).1 }); + let _val = unsafe { *xraw }; // invalidate xref + ret //~ ERROR does not exist on the stack +} + +fn main() { + foo(&mut (1, 2)); +} diff --git a/tests/compile-fail-fullmir/stacked_borrows/return_invalid_mut_tuple.rs b/tests/compile-fail-fullmir/stacked_borrows/return_invalid_mut_tuple.rs new file mode 100644 index 0000000000..3357af68a8 --- /dev/null +++ b/tests/compile-fail-fullmir/stacked_borrows/return_invalid_mut_tuple.rs @@ -0,0 +1,11 @@ +// Make sure that we cannot return a `&mut` that got already invalidated, not even in a tuple. +fn foo(x: &mut (i32, i32)) -> (&mut i32,) { + let xraw = x as *mut (i32, i32); + let ret = (unsafe { &mut (*xraw).1 },); + let _val = unsafe { *xraw }; // invalidate xref + ret //~ ERROR does not exist on the stack +} + +fn main() { + foo(&mut (1, 2)); +} diff --git a/tests/compile-fail-fullmir/stacked_borrows/return_invalid_shr_option.rs b/tests/compile-fail-fullmir/stacked_borrows/return_invalid_shr_option.rs new file mode 100644 index 0000000000..9d220991c3 --- /dev/null +++ b/tests/compile-fail-fullmir/stacked_borrows/return_invalid_shr_option.rs @@ -0,0 +1,11 @@ +// Make sure that we cannot return a `&` that got already invalidated, not even in an `Option`. +fn foo(x: &mut (i32, i32)) -> Option<&i32> { + let xraw = x as *mut (i32, i32); + let ret = Some(unsafe { &(*xraw).1 }); + unsafe { *xraw = (42, 23) }; // unfreeze + ret //~ ERROR is not frozen +} + +fn main() { + foo(&mut (1, 2)); +} diff --git a/tests/compile-fail-fullmir/stacked_borrows/return_invalid_shr_tuple.rs b/tests/compile-fail-fullmir/stacked_borrows/return_invalid_shr_tuple.rs new file mode 100644 index 0000000000..060fa25c23 --- /dev/null +++ b/tests/compile-fail-fullmir/stacked_borrows/return_invalid_shr_tuple.rs @@ -0,0 +1,11 @@ +// Make sure that we cannot return a `&` that got already invalidated, not even in a tuple. +fn foo(x: &mut (i32, i32)) -> (&i32,) { + let xraw = x as *mut (i32, i32); + let ret = (unsafe { &(*xraw).1 },); + unsafe { *xraw = (42, 23) }; // unfreeze + ret //~ ERROR is not frozen +} + +fn main() { + foo(&mut (1, 2)); +} diff --git a/tests/compile-fail-fullmir/stacked_borrows/transmute-is-no-escape.rs b/tests/compile-fail-fullmir/stacked_borrows/transmute-is-no-escape.rs index 1ab005e3fa..75abce3111 100644 --- a/tests/compile-fail-fullmir/stacked_borrows/transmute-is-no-escape.rs +++ b/tests/compile-fail-fullmir/stacked_borrows/transmute-is-no-escape.rs @@ -8,5 +8,6 @@ use std::mem; fn main() { let mut x: i32 = 42; let raw: *mut i32 = unsafe { mem::transmute(&mut x) }; + let raw = raw as usize as *mut i32; // make sure we killed the tag unsafe { *raw = 13; } //~ ERROR does not exist on the stack } diff --git a/tests/run-pass-fullmir/box-pair-to-vec.rs b/tests/run-pass-fullmir/box-pair-to-vec.rs new file mode 100644 index 0000000000..3b829d0f12 --- /dev/null +++ b/tests/run-pass-fullmir/box-pair-to-vec.rs @@ -0,0 +1,30 @@ +//ignore-msvc: Stdout not implemented on Windows + +#[repr(C)] +#[derive(Debug)] +struct PairFoo { + fst: Foo, + snd: Foo, +} + +#[derive(Debug)] +struct Foo(u64); +fn reinterstruct(box_pair: Box) -> Vec { + let ref_pair = Box::leak(box_pair) as *mut PairFoo; + let ptr_foo = unsafe { &mut (*ref_pair).fst as *mut Foo }; + unsafe { + Vec::from_raw_parts(ptr_foo, 2, 2) + } +} + +fn main() { + let pair_foo = Box::new(PairFoo { + fst: Foo(42), + snd: Foo(1337), + }); + println!("pair_foo = {:?}", pair_foo); + for (n, foo) in reinterstruct(pair_foo).into_iter().enumerate() { + println!("foo #{} = {:?}", n, foo); + } +} + diff --git a/tests/run-pass-fullmir/box-pair-to-vec.stdout b/tests/run-pass-fullmir/box-pair-to-vec.stdout new file mode 100644 index 0000000000..230ef368da --- /dev/null +++ b/tests/run-pass-fullmir/box-pair-to-vec.stdout @@ -0,0 +1,3 @@ +pair_foo = PairFoo { fst: Foo(42), snd: Foo(1337) } +foo #0 = Foo(42) +foo #1 = Foo(1337)