diff --git a/kani-compiler/src/codegen_cprover_gotoc/codegen/intrinsic.rs b/kani-compiler/src/codegen_cprover_gotoc/codegen/intrinsic.rs index c4d5396f1fe8..421d79e943c4 100644 --- a/kani-compiler/src/codegen_cprover_gotoc/codegen/intrinsic.rs +++ b/kani-compiler/src/codegen_cprover_gotoc/codegen/intrinsic.rs @@ -5,6 +5,7 @@ use super::typ; use super::{bb_label, PropertyClass}; use crate::codegen_cprover_gotoc::codegen::ty_stable::pointee_type_stable; use crate::codegen_cprover_gotoc::{utils, GotocCtx}; +use crate::intrinsics::Intrinsic; use crate::unwrap_or_return_codegen_unimplemented_stmt; use cbmc::goto_program::{ ArithmeticOverflowResult, BinaryOperator, BuiltinFn, Expr, Location, Stmt, Type, @@ -114,7 +115,7 @@ impl<'tcx> GotocCtx<'tcx> { span: Span, ) -> Stmt { let intrinsic_name = instance.intrinsic_name().unwrap(); - let intrinsic = intrinsic_name.as_str(); + let intrinsic_str = intrinsic_name.as_str(); let loc = self.codegen_span_stable(span); debug!(?instance, "codegen_intrinsic"); debug!(?fargs, "codegen_intrinsic"); @@ -163,7 +164,7 @@ impl<'tcx> GotocCtx<'tcx> { let div_overflow_check = self.codegen_assert_assume( div_does_not_overflow, PropertyClass::ArithmeticOverflow, - format!("attempt to compute {} which would overflow", intrinsic).as_str(), + format!("attempt to compute {} which would overflow", intrinsic_str).as_str(), loc, ); let res = a.$f(b); @@ -257,7 +258,7 @@ impl<'tcx> GotocCtx<'tcx> { macro_rules! codegen_atomic_binop { ($op: ident) => {{ let loc = self.codegen_span_stable(span); - self.store_concurrent_construct(intrinsic, loc); + self.store_concurrent_construct(intrinsic_str, loc); let var1_ref = fargs.remove(0); let var1 = var1_ref.dereference(); let (tmp, decl_stmt) = @@ -280,7 +281,7 @@ impl<'tcx> GotocCtx<'tcx> { macro_rules! unstable_codegen { ($($tt:tt)*) => {{ let expr = self.codegen_unimplemented_expr( - &format!("'{}' intrinsic", intrinsic), + &format!("'{}' intrinsic", intrinsic_str), cbmc_ret_ty, loc, "https://github.com/model-checking/kani/issues/new/choose", @@ -289,342 +290,273 @@ impl<'tcx> GotocCtx<'tcx> { }}; } - if let Some(stripped) = intrinsic.strip_prefix("simd_shuffle") { - assert!(fargs.len() == 3, "`simd_shuffle` had unexpected arguments {fargs:?}"); - let n: u64 = self.simd_shuffle_length(stripped, farg_types, span); - return self.codegen_intrinsic_simd_shuffle(fargs, place, farg_types, ret_ty, n, span); - } + let intrinsic = Intrinsic::from_instance(&instance); match intrinsic { - "add_with_overflow" => { + Intrinsic::AddWithOverflow => { self.codegen_op_with_overflow(BinaryOperator::OverflowResultPlus, fargs, place, loc) } - "arith_offset" => self.codegen_offset(intrinsic, instance, fargs, place, loc), - "assert_inhabited" => self.codegen_assert_intrinsic(instance, intrinsic, span), - "assert_mem_uninitialized_valid" => { - self.codegen_assert_intrinsic(instance, intrinsic, span) + Intrinsic::ArithOffset => { + self.codegen_offset(intrinsic_str, instance, fargs, place, loc) + } + Intrinsic::AssertInhabited => { + self.codegen_assert_intrinsic(instance, intrinsic_str, span) + } + Intrinsic::AssertMemUninitializedValid => { + self.codegen_assert_intrinsic(instance, intrinsic_str, span) + } + Intrinsic::AssertZeroValid => { + self.codegen_assert_intrinsic(instance, intrinsic_str, span) } - "assert_zero_valid" => self.codegen_assert_intrinsic(instance, intrinsic, span), // https://doc.rust-lang.org/core/intrinsics/fn.assume.html // Informs the optimizer that a condition is always true. // If the condition is false, the behavior is undefined. - "assume" => self.codegen_assert_assume( + Intrinsic::Assume => self.codegen_assert_assume( fargs.remove(0).cast_to(Type::bool()), PropertyClass::Assume, "assumption failed", loc, ), - "atomic_and_seqcst" => codegen_atomic_binop!(bitand), - "atomic_and_acquire" => codegen_atomic_binop!(bitand), - "atomic_and_acqrel" => codegen_atomic_binop!(bitand), - "atomic_and_release" => codegen_atomic_binop!(bitand), - "atomic_and_relaxed" => codegen_atomic_binop!(bitand), - name if name.starts_with("atomic_cxchg") => { - self.codegen_atomic_cxchg(intrinsic, fargs, place, loc) + Intrinsic::AtomicAnd(_) => codegen_atomic_binop!(bitand), + Intrinsic::AtomicCxchg(_) | Intrinsic::AtomicCxchgWeak(_) => { + self.codegen_atomic_cxchg(intrinsic_str, fargs, place, loc) } - "atomic_fence_seqcst" => self.codegen_atomic_noop(intrinsic, loc), - "atomic_fence_acquire" => self.codegen_atomic_noop(intrinsic, loc), - "atomic_fence_acqrel" => self.codegen_atomic_noop(intrinsic, loc), - "atomic_fence_release" => self.codegen_atomic_noop(intrinsic, loc), - "atomic_load_seqcst" => self.codegen_atomic_load(intrinsic, fargs, place, loc), - "atomic_load_acquire" => self.codegen_atomic_load(intrinsic, fargs, place, loc), - "atomic_load_relaxed" => self.codegen_atomic_load(intrinsic, fargs, place, loc), - "atomic_load_unordered" => self.codegen_atomic_load(intrinsic, fargs, place, loc), - "atomic_max_seqcst" => codegen_atomic_binop!(max), - "atomic_max_acquire" => codegen_atomic_binop!(max), - "atomic_max_acqrel" => codegen_atomic_binop!(max), - "atomic_max_release" => codegen_atomic_binop!(max), - "atomic_max_relaxed" => codegen_atomic_binop!(max), - "atomic_min_seqcst" => codegen_atomic_binop!(min), - "atomic_min_acquire" => codegen_atomic_binop!(min), - "atomic_min_acqrel" => codegen_atomic_binop!(min), - "atomic_min_release" => codegen_atomic_binop!(min), - "atomic_min_relaxed" => codegen_atomic_binop!(min), - "atomic_nand_seqcst" => codegen_atomic_binop!(bitnand), - "atomic_nand_acquire" => codegen_atomic_binop!(bitnand), - "atomic_nand_acqrel" => codegen_atomic_binop!(bitnand), - "atomic_nand_release" => codegen_atomic_binop!(bitnand), - "atomic_nand_relaxed" => codegen_atomic_binop!(bitnand), - "atomic_or_seqcst" => codegen_atomic_binop!(bitor), - "atomic_or_acquire" => codegen_atomic_binop!(bitor), - "atomic_or_acqrel" => codegen_atomic_binop!(bitor), - "atomic_or_release" => codegen_atomic_binop!(bitor), - "atomic_or_relaxed" => codegen_atomic_binop!(bitor), - "atomic_singlethreadfence_seqcst" => self.codegen_atomic_noop(intrinsic, loc), - "atomic_singlethreadfence_acquire" => self.codegen_atomic_noop(intrinsic, loc), - "atomic_singlethreadfence_acqrel" => self.codegen_atomic_noop(intrinsic, loc), - "atomic_singlethreadfence_release" => self.codegen_atomic_noop(intrinsic, loc), - "atomic_store_seqcst" => self.codegen_atomic_store(intrinsic, fargs, place, loc), - "atomic_store_release" => self.codegen_atomic_store(intrinsic, fargs, place, loc), - "atomic_store_relaxed" => self.codegen_atomic_store(intrinsic, fargs, place, loc), - "atomic_store_unordered" => self.codegen_atomic_store(intrinsic, fargs, place, loc), - "atomic_umax_seqcst" => codegen_atomic_binop!(max), - "atomic_umax_acquire" => codegen_atomic_binop!(max), - "atomic_umax_acqrel" => codegen_atomic_binop!(max), - "atomic_umax_release" => codegen_atomic_binop!(max), - "atomic_umax_relaxed" => codegen_atomic_binop!(max), - "atomic_umin_seqcst" => codegen_atomic_binop!(min), - "atomic_umin_acquire" => codegen_atomic_binop!(min), - "atomic_umin_acqrel" => codegen_atomic_binop!(min), - "atomic_umin_release" => codegen_atomic_binop!(min), - "atomic_umin_relaxed" => codegen_atomic_binop!(min), - "atomic_xadd_seqcst" => codegen_atomic_binop!(plus), - "atomic_xadd_acquire" => codegen_atomic_binop!(plus), - "atomic_xadd_acqrel" => codegen_atomic_binop!(plus), - "atomic_xadd_release" => codegen_atomic_binop!(plus), - "atomic_xadd_relaxed" => codegen_atomic_binop!(plus), - "atomic_xchg_seqcst" => self.codegen_atomic_store(intrinsic, fargs, place, loc), - "atomic_xchg_acquire" => self.codegen_atomic_store(intrinsic, fargs, place, loc), - "atomic_xchg_acqrel" => self.codegen_atomic_store(intrinsic, fargs, place, loc), - "atomic_xchg_release" => self.codegen_atomic_store(intrinsic, fargs, place, loc), - "atomic_xchg_relaxed" => self.codegen_atomic_store(intrinsic, fargs, place, loc), - "atomic_xor_seqcst" => codegen_atomic_binop!(bitxor), - "atomic_xor_acquire" => codegen_atomic_binop!(bitxor), - "atomic_xor_acqrel" => codegen_atomic_binop!(bitxor), - "atomic_xor_release" => codegen_atomic_binop!(bitxor), - "atomic_xor_relaxed" => codegen_atomic_binop!(bitxor), - "atomic_xsub_seqcst" => codegen_atomic_binop!(sub), - "atomic_xsub_acquire" => codegen_atomic_binop!(sub), - "atomic_xsub_acqrel" => codegen_atomic_binop!(sub), - "atomic_xsub_release" => codegen_atomic_binop!(sub), - "atomic_xsub_relaxed" => codegen_atomic_binop!(sub), - "bitreverse" => { + + Intrinsic::AtomicFence(_) => self.codegen_atomic_noop(intrinsic_str, loc), + Intrinsic::AtomicLoad(_) => self.codegen_atomic_load(intrinsic_str, fargs, place, loc), + Intrinsic::AtomicMax(_) => codegen_atomic_binop!(max), + Intrinsic::AtomicMin(_) => codegen_atomic_binop!(min), + Intrinsic::AtomicNand(_) => codegen_atomic_binop!(bitnand), + Intrinsic::AtomicOr(_) => codegen_atomic_binop!(bitor), + Intrinsic::AtomicSingleThreadFence(_) => self.codegen_atomic_noop(intrinsic_str, loc), + Intrinsic::AtomicStore(_) => { + self.codegen_atomic_store(intrinsic_str, fargs, place, loc) + } + Intrinsic::AtomicUmax(_) => codegen_atomic_binop!(max), + Intrinsic::AtomicUmin(_) => codegen_atomic_binop!(min), + Intrinsic::AtomicXadd(_) => codegen_atomic_binop!(plus), + Intrinsic::AtomicXchg(_) => self.codegen_atomic_store(intrinsic_str, fargs, place, loc), + Intrinsic::AtomicXor(_) => codegen_atomic_binop!(bitxor), + Intrinsic::AtomicXsub(_) => codegen_atomic_binop!(sub), + Intrinsic::Bitreverse => { self.codegen_expr_to_place_stable(place, fargs.remove(0).bitreverse(), loc) } // black_box is an identity function that hints to the compiler // to be maximally pessimistic to limit optimizations - "black_box" => self.codegen_expr_to_place_stable(place, fargs.remove(0), loc), - "breakpoint" => Stmt::skip(loc), - "bswap" => self.codegen_expr_to_place_stable(place, fargs.remove(0).bswap(), loc), - "caller_location" => self.codegen_unimplemented_stmt( - intrinsic, - loc, - "https://github.com/model-checking/kani/issues/374", - ), - "catch_unwind" => self.codegen_unimplemented_stmt( - intrinsic, - loc, - "https://github.com/model-checking/kani/issues/267", - ), - "ceilf32" => codegen_simple_intrinsic!(Ceilf), - "ceilf64" => codegen_simple_intrinsic!(Ceil), - "compare_bytes" => self.codegen_compare_bytes(fargs, place, loc), - "copy" => self.codegen_copy(intrinsic, false, fargs, farg_types, Some(place), loc), - "copy_nonoverlapping" => unreachable!( - "Expected `core::intrinsics::unreachable` to be handled by `StatementKind::CopyNonOverlapping`" - ), - "copysignf32" => codegen_simple_intrinsic!(Copysignf), - "copysignf64" => codegen_simple_intrinsic!(Copysign), - "cosf32" => codegen_simple_intrinsic!(Cosf), - "cosf64" => codegen_simple_intrinsic!(Cos), - "ctlz" => codegen_count_intrinsic!(ctlz, true), - "ctlz_nonzero" => codegen_count_intrinsic!(ctlz, false), - "ctpop" => self.codegen_ctpop(place, span, fargs.remove(0), farg_types[0]), - "cttz" => codegen_count_intrinsic!(cttz, true), - "cttz_nonzero" => codegen_count_intrinsic!(cttz, false), - "discriminant_value" => { + Intrinsic::BlackBox => self.codegen_expr_to_place_stable(place, fargs.remove(0), loc), + Intrinsic::Breakpoint => Stmt::skip(loc), + Intrinsic::Bswap => { + self.codegen_expr_to_place_stable(place, fargs.remove(0).bswap(), loc) + } + Intrinsic::CeilF32 => codegen_simple_intrinsic!(Ceilf), + Intrinsic::CeilF64 => codegen_simple_intrinsic!(Ceil), + Intrinsic::CompareBytes => self.codegen_compare_bytes(fargs, place, loc), + Intrinsic::Copy => { + self.codegen_copy(intrinsic_str, false, fargs, farg_types, Some(place), loc) + } + Intrinsic::CopySignF32 => codegen_simple_intrinsic!(Copysignf), + Intrinsic::CopySignF64 => codegen_simple_intrinsic!(Copysign), + Intrinsic::CosF32 => codegen_simple_intrinsic!(Cosf), + Intrinsic::CosF64 => codegen_simple_intrinsic!(Cos), + Intrinsic::Ctlz => codegen_count_intrinsic!(ctlz, true), + Intrinsic::CtlzNonZero => codegen_count_intrinsic!(ctlz, false), + Intrinsic::Ctpop => self.codegen_ctpop(place, span, fargs.remove(0), farg_types[0]), + Intrinsic::Cttz => codegen_count_intrinsic!(cttz, true), + Intrinsic::CttzNonZero => codegen_count_intrinsic!(cttz, false), + Intrinsic::DiscriminantValue => { let sig = instance.ty().kind().fn_sig().unwrap().skip_binder(); let ty = pointee_type_stable(sig.inputs()[0]).unwrap(); let e = self.codegen_get_discriminant(fargs.remove(0).dereference(), ty, ret_ty); self.codegen_expr_to_place_stable(place, e, loc) } - "exact_div" => self.codegen_exact_div(fargs, place, loc), - "exp2f32" => codegen_simple_intrinsic!(Exp2f), - "exp2f64" => codegen_simple_intrinsic!(Exp2), - "expf32" => codegen_simple_intrinsic!(Expf), - "expf64" => codegen_simple_intrinsic!(Exp), - "fabsf32" => codegen_simple_intrinsic!(Fabsf), - "fabsf64" => codegen_simple_intrinsic!(Fabs), - "fadd_fast" => { + Intrinsic::ExactDiv => self.codegen_exact_div(fargs, place, loc), + Intrinsic::Exp2F32 => codegen_simple_intrinsic!(Exp2f), + Intrinsic::Exp2F64 => codegen_simple_intrinsic!(Exp2), + Intrinsic::ExpF32 => codegen_simple_intrinsic!(Expf), + Intrinsic::ExpF64 => codegen_simple_intrinsic!(Exp), + Intrinsic::FabsF32 => codegen_simple_intrinsic!(Fabsf), + Intrinsic::FabsF64 => codegen_simple_intrinsic!(Fabs), + Intrinsic::FaddFast => { let fargs_clone = fargs.clone(); let binop_stmt = codegen_intrinsic_binop!(plus); - self.add_finite_args_checks(intrinsic, fargs_clone, binop_stmt, span) + self.add_finite_args_checks(intrinsic_str, fargs_clone, binop_stmt, span) } - "fdiv_fast" => { + Intrinsic::FdivFast => { let fargs_clone = fargs.clone(); let binop_stmt = codegen_intrinsic_binop!(div); - self.add_finite_args_checks(intrinsic, fargs_clone, binop_stmt, span) + self.add_finite_args_checks(intrinsic_str, fargs_clone, binop_stmt, span) } - "floorf32" => codegen_simple_intrinsic!(Floorf), - "floorf64" => codegen_simple_intrinsic!(Floor), - "fmaf32" => codegen_simple_intrinsic!(Fmaf), - "fmaf64" => codegen_simple_intrinsic!(Fma), - "fmul_fast" => { + Intrinsic::FloorF32 => codegen_simple_intrinsic!(Floorf), + Intrinsic::FloorF64 => codegen_simple_intrinsic!(Floor), + Intrinsic::FmafF32 => codegen_simple_intrinsic!(Fmaf), + Intrinsic::FmafF64 => codegen_simple_intrinsic!(Fma), + Intrinsic::FmulFast => { let fargs_clone = fargs.clone(); let binop_stmt = codegen_intrinsic_binop!(mul); - self.add_finite_args_checks(intrinsic, fargs_clone, binop_stmt, span) + self.add_finite_args_checks(intrinsic_str, fargs_clone, binop_stmt, span) } - "forget" => Stmt::skip(loc), - "fsub_fast" => { + Intrinsic::Forget => Stmt::skip(loc), + Intrinsic::FsubFast => { let fargs_clone = fargs.clone(); let binop_stmt = codegen_intrinsic_binop!(sub); - self.add_finite_args_checks(intrinsic, fargs_clone, binop_stmt, span) + self.add_finite_args_checks(intrinsic_str, fargs_clone, binop_stmt, span) } - "is_val_statically_known" => { + Intrinsic::IsValStaticallyKnown => { // Returning false is sound according do this intrinsic's documentation: // https://doc.rust-lang.org/nightly/std/intrinsics/fn.is_val_statically_known.html self.codegen_expr_to_place_stable(place, Expr::c_false(), loc) } - "likely" => self.codegen_expr_to_place_stable(place, fargs.remove(0), loc), - "log10f32" => codegen_simple_intrinsic!(Log10f), - "log10f64" => codegen_simple_intrinsic!(Log10), - "log2f32" => codegen_simple_intrinsic!(Log2f), - "log2f64" => codegen_simple_intrinsic!(Log2), - "logf32" => codegen_simple_intrinsic!(Logf), - "logf64" => codegen_simple_intrinsic!(Log), - "maxnumf32" => codegen_simple_intrinsic!(Fmaxf), - "maxnumf64" => codegen_simple_intrinsic!(Fmax), - "min_align_of" => codegen_intrinsic_const!(), - "min_align_of_val" => codegen_size_align!(align), - "minnumf32" => codegen_simple_intrinsic!(Fminf), - "minnumf64" => codegen_simple_intrinsic!(Fmin), - "mul_with_overflow" => { + Intrinsic::Likely => self.codegen_expr_to_place_stable(place, fargs.remove(0), loc), + Intrinsic::Log10F32 => codegen_simple_intrinsic!(Log10f), + Intrinsic::Log10F64 => codegen_simple_intrinsic!(Log10), + Intrinsic::Log2F32 => codegen_simple_intrinsic!(Log2f), + Intrinsic::Log2F64 => codegen_simple_intrinsic!(Log2), + Intrinsic::LogF32 => codegen_simple_intrinsic!(Logf), + Intrinsic::LogF64 => codegen_simple_intrinsic!(Log), + Intrinsic::MaxNumF32 => codegen_simple_intrinsic!(Fmaxf), + Intrinsic::MaxNumF64 => codegen_simple_intrinsic!(Fmax), + Intrinsic::MinAlignOf => codegen_intrinsic_const!(), + Intrinsic::MinAlignOfVal => codegen_size_align!(align), + Intrinsic::MinNumF32 => codegen_simple_intrinsic!(Fminf), + Intrinsic::MinNumF64 => codegen_simple_intrinsic!(Fmin), + Intrinsic::MulWithOverflow => { self.codegen_op_with_overflow(BinaryOperator::OverflowResultMult, fargs, place, loc) } - "nearbyintf32" => codegen_simple_intrinsic!(Nearbyintf), - "nearbyintf64" => codegen_simple_intrinsic!(Nearbyint), - "needs_drop" => codegen_intrinsic_const!(), - // As of https://github.com/rust-lang/rust/pull/110822 the `offset` intrinsic is lowered to `mir::BinOp::Offset` - "offset" => unreachable!( - "Expected `core::intrinsics::unreachable` to be handled by `BinOp::OffSet`" - ), - "powf32" => codegen_simple_intrinsic!(Powf), - "powf64" => codegen_simple_intrinsic!(Pow), - "powif32" => codegen_simple_intrinsic!(Powif), - "powif64" => codegen_simple_intrinsic!(Powi), - "pref_align_of" => codegen_intrinsic_const!(), - "ptr_guaranteed_cmp" => self.codegen_ptr_guaranteed_cmp(fargs, place, loc), - "ptr_offset_from" => self.codegen_ptr_offset_from(fargs, place, loc), - "ptr_offset_from_unsigned" => self.codegen_ptr_offset_from_unsigned(fargs, place, loc), - "raw_eq" => self.codegen_intrinsic_raw_eq(instance, fargs, place, loc), - "retag_box_to_raw" => self.codegen_retag_box_to_raw(fargs, place, loc), - "rintf32" => codegen_simple_intrinsic!(Rintf), - "rintf64" => codegen_simple_intrinsic!(Rint), - "rotate_left" => codegen_intrinsic_binop!(rol), - "rotate_right" => codegen_intrinsic_binop!(ror), - "roundf32" => codegen_simple_intrinsic!(Roundf), - "roundf64" => codegen_simple_intrinsic!(Round), - "saturating_add" => codegen_intrinsic_binop_with_mm!(saturating_add), - "saturating_sub" => codegen_intrinsic_binop_with_mm!(saturating_sub), - "sinf32" => codegen_simple_intrinsic!(Sinf), - "sinf64" => codegen_simple_intrinsic!(Sin), - "simd_add" => self.codegen_simd_op_with_overflow( + Intrinsic::NearbyIntF32 => codegen_simple_intrinsic!(Nearbyintf), + Intrinsic::NearbyIntF64 => codegen_simple_intrinsic!(Nearbyint), + Intrinsic::NeedsDrop => codegen_intrinsic_const!(), + Intrinsic::PowF32 => codegen_simple_intrinsic!(Powf), + Intrinsic::PowF64 => codegen_simple_intrinsic!(Pow), + Intrinsic::PowIF32 => codegen_simple_intrinsic!(Powif), + Intrinsic::PowIF64 => codegen_simple_intrinsic!(Powi), + Intrinsic::PrefAlignOf => codegen_intrinsic_const!(), + Intrinsic::PtrGuaranteedCmp => self.codegen_ptr_guaranteed_cmp(fargs, place, loc), + Intrinsic::PtrOffsetFrom => self.codegen_ptr_offset_from(fargs, place, loc), + Intrinsic::PtrOffsetFromUnsigned => { + self.codegen_ptr_offset_from_unsigned(fargs, place, loc) + } + Intrinsic::RawEq => self.codegen_intrinsic_raw_eq(instance, fargs, place, loc), + Intrinsic::RetagBoxToRaw => self.codegen_retag_box_to_raw(fargs, place, loc), + Intrinsic::RintF32 => codegen_simple_intrinsic!(Rintf), + Intrinsic::RintF64 => codegen_simple_intrinsic!(Rint), + Intrinsic::RotateLeft => codegen_intrinsic_binop!(rol), + Intrinsic::RotateRight => codegen_intrinsic_binop!(ror), + Intrinsic::RoundF32 => codegen_simple_intrinsic!(Roundf), + Intrinsic::RoundF64 => codegen_simple_intrinsic!(Round), + Intrinsic::SaturatingAdd => codegen_intrinsic_binop_with_mm!(saturating_add), + Intrinsic::SaturatingSub => codegen_intrinsic_binop_with_mm!(saturating_sub), + Intrinsic::SinF32 => codegen_simple_intrinsic!(Sinf), + Intrinsic::SinF64 => codegen_simple_intrinsic!(Sin), + Intrinsic::SimdAdd => self.codegen_simd_op_with_overflow( Expr::plus, Expr::add_overflow_p, fargs, - intrinsic, + intrinsic_str, place, loc, ), - "simd_and" => codegen_intrinsic_binop!(bitand), + Intrinsic::SimdAnd => codegen_intrinsic_binop!(bitand), // TODO: `simd_rem` doesn't check for overflow cases for floating point operands. // - "simd_div" | "simd_rem" => { - self.codegen_simd_div_with_overflow(fargs, intrinsic, place, loc) + Intrinsic::SimdDiv | Intrinsic::SimdRem => { + self.codegen_simd_div_with_overflow(fargs, intrinsic_str, place, loc) } - "simd_eq" => { + Intrinsic::SimdEq => { self.codegen_simd_cmp(Expr::vector_eq, fargs, place, span, farg_types, ret_ty) } - "simd_extract" => { + Intrinsic::SimdExtract => { self.codegen_intrinsic_simd_extract(fargs, place, farg_types, ret_ty, span) } - "simd_ge" => { + Intrinsic::SimdGe => { self.codegen_simd_cmp(Expr::vector_ge, fargs, place, span, farg_types, ret_ty) } - "simd_gt" => { + Intrinsic::SimdGt => { self.codegen_simd_cmp(Expr::vector_gt, fargs, place, span, farg_types, ret_ty) } - "simd_insert" => { + Intrinsic::SimdInsert => { self.codegen_intrinsic_simd_insert(fargs, place, cbmc_ret_ty, farg_types, span, loc) } - "simd_le" => { + Intrinsic::SimdLe => { self.codegen_simd_cmp(Expr::vector_le, fargs, place, span, farg_types, ret_ty) } - "simd_lt" => { + Intrinsic::SimdLt => { self.codegen_simd_cmp(Expr::vector_lt, fargs, place, span, farg_types, ret_ty) } - "simd_mul" => self.codegen_simd_op_with_overflow( + Intrinsic::SimdMul => self.codegen_simd_op_with_overflow( Expr::mul, Expr::mul_overflow_p, fargs, - intrinsic, + intrinsic_str, place, loc, ), - "simd_ne" => { + Intrinsic::SimdNe => { self.codegen_simd_cmp(Expr::vector_neq, fargs, place, span, farg_types, ret_ty) } - "simd_or" => codegen_intrinsic_binop!(bitor), - "simd_shl" | "simd_shr" => { - self.codegen_simd_shift_with_distance_check(fargs, intrinsic, place, loc) + Intrinsic::SimdOr => codegen_intrinsic_binop!(bitor), + Intrinsic::SimdShl | Intrinsic::SimdShr => { + self.codegen_simd_shift_with_distance_check(fargs, intrinsic_str, place, loc) + } + Intrinsic::SimdShuffle(stripped) => { + let n: u64 = self.simd_shuffle_length(stripped.as_str(), farg_types, span); + self.codegen_intrinsic_simd_shuffle(fargs, place, farg_types, ret_ty, n, span) } - // "simd_shuffle#" => handled in an `if` preceding this match - "simd_sub" => self.codegen_simd_op_with_overflow( + Intrinsic::SimdSub => self.codegen_simd_op_with_overflow( Expr::sub, Expr::sub_overflow_p, fargs, - intrinsic, + intrinsic_str, place, loc, ), - "simd_xor" => codegen_intrinsic_binop!(bitxor), - "size_of" => unreachable!(), - "size_of_val" => codegen_size_align!(size), - "sqrtf32" => codegen_simple_intrinsic!(Sqrtf), - "sqrtf64" => codegen_simple_intrinsic!(Sqrt), - "sub_with_overflow" => self.codegen_op_with_overflow( + Intrinsic::SimdXor => codegen_intrinsic_binop!(bitxor), + Intrinsic::SizeOfVal => codegen_size_align!(size), + Intrinsic::SqrtF32 => codegen_simple_intrinsic!(Sqrtf), + Intrinsic::SqrtF64 => codegen_simple_intrinsic!(Sqrt), + Intrinsic::SubWithOverflow => self.codegen_op_with_overflow( BinaryOperator::OverflowResultMinus, fargs, place, loc, ), - "transmute" => self.codegen_intrinsic_transmute(fargs, ret_ty, place, loc), - "truncf32" => codegen_simple_intrinsic!(Truncf), - "truncf64" => codegen_simple_intrinsic!(Trunc), - "type_id" => codegen_intrinsic_const!(), - "type_name" => codegen_intrinsic_const!(), - "typed_swap" => self.codegen_swap(fargs, farg_types, loc), - "unaligned_volatile_load" => { + Intrinsic::Transmute => self.codegen_intrinsic_transmute(fargs, ret_ty, place, loc), + Intrinsic::TruncF32 => codegen_simple_intrinsic!(Truncf), + Intrinsic::TruncF64 => codegen_simple_intrinsic!(Trunc), + Intrinsic::TypeId => codegen_intrinsic_const!(), + Intrinsic::TypeName => codegen_intrinsic_const!(), + Intrinsic::TypedSwap => self.codegen_swap(fargs, farg_types, loc), + Intrinsic::UnalignedVolatileLoad => { unstable_codegen!(self.codegen_expr_to_place_stable( place, fargs.remove(0).dereference(), loc )) } - "unchecked_add" | "unchecked_mul" | "unchecked_shl" | "unchecked_shr" - | "unchecked_sub" => { - unreachable!("Expected intrinsic `{intrinsic}` to be lowered before codegen") - } - "unchecked_div" => codegen_op_with_div_overflow_check!(div), - "unchecked_rem" => codegen_op_with_div_overflow_check!(rem), - "unlikely" => self.codegen_expr_to_place_stable(place, fargs.remove(0), loc), - "unreachable" => unreachable!( - "Expected `std::intrinsics::unreachable` to be handled by `TerminatorKind::Unreachable`" - ), - "volatile_copy_memory" => unstable_codegen!(codegen_intrinsic_copy!(Memmove)), - "volatile_copy_nonoverlapping_memory" => { + Intrinsic::UncheckedDiv => codegen_op_with_div_overflow_check!(div), + Intrinsic::UncheckedRem => codegen_op_with_div_overflow_check!(rem), + Intrinsic::Unlikely => self.codegen_expr_to_place_stable(place, fargs.remove(0), loc), + Intrinsic::VolatileCopyMemory => unstable_codegen!(codegen_intrinsic_copy!(Memmove)), + Intrinsic::VolatileCopyNonOverlappingMemory => { unstable_codegen!(codegen_intrinsic_copy!(Memcpy)) } - "volatile_load" => self.codegen_volatile_load(fargs, farg_types, place, loc), - "volatile_store" => { + Intrinsic::VolatileLoad => self.codegen_volatile_load(fargs, farg_types, place, loc), + Intrinsic::VolatileStore => { assert!(self.place_ty_stable(place).kind().is_unit()); self.codegen_volatile_store(fargs, farg_types, loc) } - "vtable_size" => self.vtable_info(VTableInfo::Size, fargs, place, loc), - "vtable_align" => self.vtable_info(VTableInfo::Align, fargs, place, loc), - "wrapping_add" => codegen_wrapping_op!(plus), - "wrapping_mul" => codegen_wrapping_op!(mul), - "wrapping_sub" => codegen_wrapping_op!(sub), - "write_bytes" => { + Intrinsic::VtableSize => self.vtable_info(VTableInfo::Size, fargs, place, loc), + Intrinsic::VtableAlign => self.vtable_info(VTableInfo::Align, fargs, place, loc), + Intrinsic::WrappingAdd => codegen_wrapping_op!(plus), + Intrinsic::WrappingMul => codegen_wrapping_op!(mul), + Intrinsic::WrappingSub => codegen_wrapping_op!(sub), + Intrinsic::WriteBytes => { assert!(self.place_ty_stable(place).kind().is_unit()); self.codegen_write_bytes(fargs, farg_types, loc) } // Unimplemented - _ => self.codegen_unimplemented_stmt( - intrinsic, - loc, - "https://github.com/model-checking/kani/issues/new/choose", - ), + Intrinsic::Unimplemented { name, issue_link } => { + self.codegen_unimplemented_stmt(&name, loc, &issue_link) + } } } diff --git a/kani-compiler/src/intrinsics.rs b/kani-compiler/src/intrinsics.rs new file mode 100644 index 000000000000..9485d8525410 --- /dev/null +++ b/kani-compiler/src/intrinsics.rs @@ -0,0 +1,798 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT + +//! Single source of truth about which intrinsics we support. + +use stable_mir::{ + mir::{mono::Instance, Mutability}, + ty::{FloatTy, IntTy, RigidTy, TyKind, UintTy}, +}; + +// Enumeration of all intrinsics we support right now, with the last option being a catch-all. This +// way, adding an intrinsic would highlight all places where they are used. +#[allow(unused)] +#[derive(Clone, Debug)] +pub enum Intrinsic { + AddWithOverflow, + ArithOffset, + AssertInhabited, + AssertMemUninitializedValid, + AssertZeroValid, + Assume, + AtomicAnd(String), + AtomicCxchg(String), + AtomicCxchgWeak(String), + AtomicFence(String), + AtomicLoad(String), + AtomicMax(String), + AtomicMin(String), + AtomicNand(String), + AtomicOr(String), + AtomicSingleThreadFence(String), + AtomicStore(String), + AtomicUmax(String), + AtomicUmin(String), + AtomicXadd(String), + AtomicXchg(String), + AtomicXor(String), + AtomicXsub(String), + Bitreverse, + BlackBox, + Breakpoint, + Bswap, + CeilF32, + CeilF64, + CompareBytes, + Copy, + CopySignF32, + CopySignF64, + CosF32, + CosF64, + Ctlz, + CtlzNonZero, + Ctpop, + Cttz, + CttzNonZero, + DiscriminantValue, + ExactDiv, + Exp2F32, + Exp2F64, + ExpF32, + ExpF64, + FabsF32, + FabsF64, + FaddFast, + FdivFast, + FloorF32, + FloorF64, + FmafF32, + FmafF64, + FmulFast, + Forget, + FsubFast, + IsValStaticallyKnown, + Likely, + Log10F32, + Log10F64, + Log2F32, + Log2F64, + LogF32, + LogF64, + MaxNumF32, + MaxNumF64, + MinAlignOf, + MinAlignOfVal, + MinNumF32, + MinNumF64, + MulWithOverflow, + NearbyIntF32, + NearbyIntF64, + NeedsDrop, + PowF32, + PowF64, + PowIF32, + PowIF64, + PrefAlignOf, + PtrGuaranteedCmp, + PtrOffsetFrom, + PtrOffsetFromUnsigned, + RawEq, + RetagBoxToRaw, + RintF32, + RintF64, + RotateLeft, + RotateRight, + RoundF32, + RoundF64, + SaturatingAdd, + SaturatingSub, + SinF32, + SinF64, + SimdAdd, + SimdAnd, + SimdDiv, + SimdRem, + SimdEq, + SimdExtract, + SimdGe, + SimdGt, + SimdInsert, + SimdLe, + SimdLt, + SimdMul, + SimdNe, + SimdOr, + SimdShl, + SimdShr, + SimdShuffle(String), + SimdSub, + SimdXor, + SizeOfVal, + SqrtF32, + SqrtF64, + SubWithOverflow, + Transmute, + TruncF32, + TruncF64, + TypeId, + TypeName, + TypedSwap, + UnalignedVolatileLoad, + UncheckedDiv, + UncheckedRem, + Unlikely, + VolatileCopyMemory, + VolatileCopyNonOverlappingMemory, + VolatileLoad, + VolatileStore, + VtableSize, + VtableAlign, + WrappingAdd, + WrappingMul, + WrappingSub, + WriteBytes, + Unimplemented { name: String, issue_link: String }, +} + +/// Assert that top-level types of a function signature match the given patterns. +macro_rules! assert_sig_matches { + ($sig:expr, $($input_type:pat),* => $output_type:pat) => { + let inputs = $sig.inputs(); + let output = $sig.output(); + #[allow(unused_mut)] + let mut index = 0; + $( + #[allow(unused_assignments)] + { + assert!(matches!(inputs[index].kind(), TyKind::RigidTy($input_type))); + index += 1; + } + )* + assert!(inputs.len() == index); + assert!(matches!(output.kind(), TyKind::RigidTy($output_type))); + } +} + +impl Intrinsic { + /// Create an intrinsic enum from a given intrinsic instance, shallowly validating the argument types. + pub fn from_instance(intrinsic_instance: &Instance) -> Self { + let intrinsic_str = intrinsic_instance.intrinsic_name().unwrap(); + let sig = intrinsic_instance.ty().kind().fn_sig().unwrap().skip_binder(); + match intrinsic_str.as_str() { + "add_with_overflow" => { + assert_sig_matches!(sig, _, _ => RigidTy::Tuple(_)); + Self::AddWithOverflow + } + "arith_offset" => { + assert_sig_matches!(sig, + RigidTy::RawPtr(_, Mutability::Not), + RigidTy::Int(IntTy::Isize) + => RigidTy::RawPtr(_, Mutability::Not)); + Self::ArithOffset + } + "assert_inhabited" => { + assert_sig_matches!(sig, => RigidTy::Tuple(_)); + Self::AssertInhabited + } + "assert_mem_uninitialized_valid" => { + assert_sig_matches!(sig, => RigidTy::Tuple(_)); + Self::AssertMemUninitializedValid + } + "assert_zero_valid" => { + assert_sig_matches!(sig, => RigidTy::Tuple(_)); + Self::AssertZeroValid + } + "assume" => { + assert_sig_matches!(sig, RigidTy::Bool => RigidTy::Tuple(_)); + Self::Assume + } + "bitreverse" => { + assert_sig_matches!(sig, _ => _); + Self::Bitreverse + } + "black_box" => { + assert_sig_matches!(sig, _ => _); + Self::BlackBox + } + "breakpoint" => { + assert_sig_matches!(sig, => RigidTy::Tuple(_)); + Self::Breakpoint + } + "bswap" => { + assert_sig_matches!(sig, _ => _); + Self::Bswap + } + "caller_location" => { + assert_sig_matches!(sig, => RigidTy::Ref(_, _, Mutability::Not)); + Self::Unimplemented { + name: intrinsic_str, + issue_link: "https://github.com/model-checking/kani/issues/374".into(), + } + } + "catch_unwind" => { + assert_sig_matches!(sig, RigidTy::FnPtr(_), RigidTy::RawPtr(_, Mutability::Mut), RigidTy::FnPtr(_) => RigidTy::Int(IntTy::I32)); + Self::Unimplemented { + name: intrinsic_str, + issue_link: "https://github.com/model-checking/kani/issues/267".into(), + } + } + "compare_bytes" => { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Not), RigidTy::RawPtr(_, Mutability::Not), RigidTy::Uint(UintTy::Usize) => RigidTy::Int(IntTy::I32)); + Self::CompareBytes + } + "copy" => { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Not), RigidTy::RawPtr(_, Mutability::Mut), RigidTy::Uint(UintTy::Usize) => RigidTy::Tuple(_)); + Self::Copy + } + "copy_nonoverlapping" => unreachable!( + "Expected `core::intrinsics::unreachable` to be handled by `StatementKind::CopyNonOverlapping`" + ), + "ctlz" => { + assert_sig_matches!(sig, _ => RigidTy::Uint(UintTy::U32)); + Self::Ctlz + } + "ctlz_nonzero" => { + assert_sig_matches!(sig, _ => RigidTy::Uint(UintTy::U32)); + Self::CtlzNonZero + } + "ctpop" => { + assert_sig_matches!(sig, _ => RigidTy::Uint(UintTy::U32)); + Self::Ctpop + } + "cttz" => { + assert_sig_matches!(sig, _ => RigidTy::Uint(UintTy::U32)); + Self::Cttz + } + "cttz_nonzero" => { + assert_sig_matches!(sig, _ => RigidTy::Uint(UintTy::U32)); + Self::CttzNonZero + } + "discriminant_value" => { + assert_sig_matches!(sig, RigidTy::Ref(_, _, Mutability::Not) => _); + Self::DiscriminantValue + } + "exact_div" => { + assert_sig_matches!(sig, _, _ => _); + Self::ExactDiv + } + "fadd_fast" => { + assert_sig_matches!(sig, _, _ => _); + Self::FaddFast + } + "fdiv_fast" => { + assert_sig_matches!(sig, _, _ => _); + Self::FdivFast + } + "fmul_fast" => { + assert_sig_matches!(sig, _, _ => _); + Self::FmulFast + } + "forget" => { + assert_sig_matches!(sig, _ => RigidTy::Tuple(_)); + Self::Forget + } + "fsub_fast" => { + assert_sig_matches!(sig, _, _ => _); + Self::FsubFast + } + "is_val_statically_known" => { + assert_sig_matches!(sig, _ => RigidTy::Bool); + Self::IsValStaticallyKnown + } + "likely" => { + assert_sig_matches!(sig, RigidTy::Bool => RigidTy::Bool); + Self::Likely + } + "min_align_of" => { + assert_sig_matches!(sig, => RigidTy::Uint(UintTy::Usize)); + Self::MinAlignOf + } + "min_align_of_val" => { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Not) => RigidTy::Uint(UintTy::Usize)); + Self::MinAlignOfVal + } + "mul_with_overflow" => { + assert_sig_matches!(sig, _, _ => RigidTy::Tuple(_)); + Self::MulWithOverflow + } + "needs_drop" => { + assert_sig_matches!(sig, => RigidTy::Bool); + Self::NeedsDrop + } + // As of https://github.com/rust-lang/rust/pull/110822 the `offset` intrinsic is lowered to `mir::BinOp::Offset` + "offset" => unreachable!( + "Expected `core::intrinsics::unreachable` to be handled by `BinOp::OffSet`" + ), + "pref_align_of" => { + assert_sig_matches!(sig, => RigidTy::Uint(UintTy::Usize)); + Self::PrefAlignOf + } + "ptr_guaranteed_cmp" => { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Not), RigidTy::RawPtr(_, Mutability::Not) => RigidTy::Uint(UintTy::U8)); + Self::PtrGuaranteedCmp + } + "ptr_offset_from" => { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Not), RigidTy::RawPtr(_, Mutability::Not) => RigidTy::Int(IntTy::Isize)); + Self::PtrOffsetFrom + } + "ptr_offset_from_unsigned" => { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Not), RigidTy::RawPtr(_, Mutability::Not) => RigidTy::Uint(UintTy::Usize)); + Self::PtrOffsetFromUnsigned + } + "raw_eq" => { + assert_sig_matches!(sig, RigidTy::Ref(_, _, Mutability::Not), RigidTy::Ref(_, _, Mutability::Not) => RigidTy::Bool); + Self::RawEq + } + "rotate_left" => { + assert_sig_matches!(sig, _, RigidTy::Uint(UintTy::U32) => _); + Self::RotateLeft + } + "rotate_right" => { + assert_sig_matches!(sig, _, RigidTy::Uint(UintTy::U32) => _); + Self::RotateRight + } + "saturating_add" => { + assert_sig_matches!(sig, _, _ => _); + Self::SaturatingAdd + } + "saturating_sub" => { + assert_sig_matches!(sig, _, _ => _); + Self::SaturatingSub + } + "size_of" => unreachable!(), + "size_of_val" => { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Not) => RigidTy::Uint(UintTy::Usize)); + Self::SizeOfVal + } + "sub_with_overflow" => { + assert_sig_matches!(sig, _, _ => RigidTy::Tuple(_)); + Self::SubWithOverflow + } + "transmute" => { + assert_sig_matches!(sig, _ => _); + Self::Transmute + } + "type_id" => { + assert_sig_matches!(sig, => RigidTy::Uint(UintTy::U128)); + Self::TypeId + } + "type_name" => { + assert_sig_matches!(sig, => RigidTy::Ref(_, _, Mutability::Not)); + Self::TypeName + } + "typed_swap" => { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Mut), RigidTy::RawPtr(_, Mutability::Mut) => RigidTy::Tuple(_)); + Self::TypedSwap + } + "unaligned_volatile_load" => { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Not) => _); + Self::UnalignedVolatileLoad + } + "unchecked_add" | "unchecked_mul" | "unchecked_shl" | "unchecked_shr" + | "unchecked_sub" => { + unreachable!("Expected intrinsic `{intrinsic_str}` to be lowered before codegen") + } + "unchecked_div" => { + assert_sig_matches!(sig, _, _ => _); + Self::UncheckedDiv + } + "unchecked_rem" => { + assert_sig_matches!(sig, _, _ => _); + Self::UncheckedRem + } + "unlikely" => { + assert_sig_matches!(sig, RigidTy::Bool => RigidTy::Bool); + Self::Unlikely + } + "unreachable" => unreachable!( + "Expected `std::intrinsics::unreachable` to be handled by `TerminatorKind::Unreachable`" + ), + "volatile_copy_memory" => { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Mut), RigidTy::RawPtr(_, Mutability::Not), RigidTy::Uint(UintTy::Usize) => RigidTy::Tuple(_)); + Self::VolatileCopyMemory + } + "volatile_copy_nonoverlapping_memory" => { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Mut), RigidTy::RawPtr(_, Mutability::Not), RigidTy::Uint(UintTy::Usize) => RigidTy::Tuple(_)); + Self::VolatileCopyNonOverlappingMemory + } + "volatile_load" => { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Not) => _); + Self::VolatileLoad + } + "volatile_store" => { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Mut), _ => RigidTy::Tuple(_)); + Self::VolatileStore + } + "vtable_size" => { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Not) => RigidTy::Uint(UintTy::Usize)); + Self::VtableSize + } + "vtable_align" => { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Not) => RigidTy::Uint(UintTy::Usize)); + Self::VtableAlign + } + "wrapping_add" => { + assert_sig_matches!(sig, _, _ => _); + Self::WrappingAdd + } + "wrapping_mul" => { + assert_sig_matches!(sig, _, _ => _); + Self::WrappingMul + } + "wrapping_sub" => { + assert_sig_matches!(sig, _, _ => _); + Self::WrappingSub + } + "write_bytes" => { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Mut), RigidTy::Uint(UintTy::U8), RigidTy::Uint(UintTy::Usize) => RigidTy::Tuple(_)); + Self::WriteBytes + } + _ => try_match_atomic(intrinsic_instance) + .or_else(|| try_match_simd(intrinsic_instance)) + .or_else(|| try_match_f32(intrinsic_instance)) + .or_else(|| try_match_f64(intrinsic_instance)) + .unwrap_or(Self::Unimplemented { + name: intrinsic_str, + issue_link: "https://github.com/model-checking/kani/issues/new/choose".into(), + }), + } + } +} + +/// Match atomic intrinsics by instance, returning an instance of the intrinsics enum if the match +/// is successful. +fn try_match_atomic(intrinsic_instance: &Instance) -> Option { + let intrinsic_str = intrinsic_instance.intrinsic_name().unwrap(); + let sig = intrinsic_instance.ty().kind().fn_sig().unwrap().skip_binder(); + if let Some(suffix) = intrinsic_str.strip_prefix("atomic_and_") { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Mut), _ => _); + Some(Intrinsic::AtomicAnd(suffix.into())) + } else if let Some(suffix) = intrinsic_str.strip_prefix("atomic_cxchgweak_") { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Mut), _, _ => RigidTy::Tuple(_)); + Some(Intrinsic::AtomicCxchgWeak(suffix.into())) + } else if let Some(suffix) = intrinsic_str.strip_prefix("atomic_cxchg_") { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Mut), _, _ => RigidTy::Tuple(_)); + Some(Intrinsic::AtomicCxchg(suffix.into())) + } else if let Some(suffix) = intrinsic_str.strip_prefix("atomic_fence_") { + assert_sig_matches!(sig, => RigidTy::Tuple(_)); + Some(Intrinsic::AtomicFence(suffix.into())) + } else if let Some(suffix) = intrinsic_str.strip_prefix("atomic_load_") { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Not) => _); + Some(Intrinsic::AtomicLoad(suffix.into())) + } else if let Some(suffix) = intrinsic_str.strip_prefix("atomic_max_") { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Mut), _ => _); + Some(Intrinsic::AtomicMax(suffix.into())) + } else if let Some(suffix) = intrinsic_str.strip_prefix("atomic_min_") { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Mut), _ => _); + Some(Intrinsic::AtomicMin(suffix.into())) + } else if let Some(suffix) = intrinsic_str.strip_prefix("atomic_nand_") { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Mut), _ => _); + Some(Intrinsic::AtomicNand(suffix.into())) + } else if let Some(suffix) = intrinsic_str.strip_prefix("atomic_or_") { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Mut), _ => _); + Some(Intrinsic::AtomicOr(suffix.into())) + } else if let Some(suffix) = intrinsic_str.strip_prefix("atomic_singlethreadfence_") { + assert_sig_matches!(sig, => RigidTy::Tuple(_)); + Some(Intrinsic::AtomicSingleThreadFence(suffix.into())) + } else if let Some(suffix) = intrinsic_str.strip_prefix("atomic_store_") { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Mut), _ => RigidTy::Tuple(_)); + Some(Intrinsic::AtomicStore(suffix.into())) + } else if let Some(suffix) = intrinsic_str.strip_prefix("atomic_umax_") { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Mut), _ => _); + Some(Intrinsic::AtomicUmax(suffix.into())) + } else if let Some(suffix) = intrinsic_str.strip_prefix("atomic_umin_") { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Mut), _ => _); + Some(Intrinsic::AtomicUmin(suffix.into())) + } else if let Some(suffix) = intrinsic_str.strip_prefix("atomic_xadd_") { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Mut), _ => _); + Some(Intrinsic::AtomicXadd(suffix.into())) + } else if let Some(suffix) = intrinsic_str.strip_prefix("atomic_xchg_") { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Mut), _ => _); + Some(Intrinsic::AtomicXchg(suffix.into())) + } else if let Some(suffix) = intrinsic_str.strip_prefix("atomic_xor_") { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Mut), _ => _); + Some(Intrinsic::AtomicXor(suffix.into())) + } else if let Some(suffix) = intrinsic_str.strip_prefix("atomic_xsub_") { + assert_sig_matches!(sig, RigidTy::RawPtr(_, Mutability::Mut), _ => _); + Some(Intrinsic::AtomicXsub(suffix.into())) + } else { + None + } +} + +/// Match SIMD intrinsics by instance, returning an instance of the intrinsics enum if the match +/// is successful. +fn try_match_simd(intrinsic_instance: &Instance) -> Option { + let intrinsic_str = intrinsic_instance.intrinsic_name().unwrap(); + let sig = intrinsic_instance.ty().kind().fn_sig().unwrap().skip_binder(); + match intrinsic_str.as_str() { + "simd_add" => { + assert_sig_matches!(sig, _, _ => _); + Some(Intrinsic::SimdAdd) + } + "simd_and" => { + assert_sig_matches!(sig, _, _ => _); + Some(Intrinsic::SimdAnd) + } + "simd_div" => { + assert_sig_matches!(sig, _, _ => _); + Some(Intrinsic::SimdDiv) + } + "simd_rem" => { + assert_sig_matches!(sig, _, _ => _); + Some(Intrinsic::SimdRem) + } + "simd_eq" => { + assert_sig_matches!(sig, _, _ => _); + Some(Intrinsic::SimdEq) + } + "simd_extract" => { + assert_sig_matches!(sig, _, RigidTy::Uint(UintTy::U32) => _); + Some(Intrinsic::SimdExtract) + } + "simd_ge" => { + assert_sig_matches!(sig, _, _ => _); + Some(Intrinsic::SimdGe) + } + "simd_gt" => { + assert_sig_matches!(sig, _, _ => _); + Some(Intrinsic::SimdGt) + } + "simd_insert" => { + assert_sig_matches!(sig, _, RigidTy::Uint(UintTy::U32), _ => _); + Some(Intrinsic::SimdInsert) + } + "simd_le" => { + assert_sig_matches!(sig, _, _ => _); + Some(Intrinsic::SimdLe) + } + "simd_lt" => { + assert_sig_matches!(sig, _, _ => _); + Some(Intrinsic::SimdLt) + } + "simd_mul" => { + assert_sig_matches!(sig, _, _ => _); + Some(Intrinsic::SimdMul) + } + "simd_ne" => { + assert_sig_matches!(sig, _, _ => _); + Some(Intrinsic::SimdNe) + } + "simd_or" => { + assert_sig_matches!(sig, _, _ => _); + Some(Intrinsic::SimdOr) + } + "simd_shl" => { + assert_sig_matches!(sig, _, _ => _); + Some(Intrinsic::SimdShl) + } + "simd_shr" => { + assert_sig_matches!(sig, _, _ => _); + Some(Intrinsic::SimdShr) + } + "simd_sub" => { + assert_sig_matches!(sig, _, _ => _); + Some(Intrinsic::SimdSub) + } + "simd_xor" => { + assert_sig_matches!(sig, _, _ => _); + Some(Intrinsic::SimdXor) + } + name => { + if let Some(suffix) = name.strip_prefix("simd_shuffle") { + assert_sig_matches!(sig, _, _, _ => _); + Some(Intrinsic::SimdShuffle(suffix.into())) + } else { + None + } + } + } +} + +/// Match f32 arithmetic intrinsics by instance, returning an instance of the intrinsics enum if the match +/// is successful. +fn try_match_f32(intrinsic_instance: &Instance) -> Option { + let intrinsic_str = intrinsic_instance.intrinsic_name().unwrap(); + let sig = intrinsic_instance.ty().kind().fn_sig().unwrap().skip_binder(); + match intrinsic_str.as_str() { + "ceilf32" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F32) => RigidTy::Float(FloatTy::F32)); + Some(Intrinsic::CeilF32) + } + "copysignf32" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F32), RigidTy::Float(FloatTy::F32) => RigidTy::Float(FloatTy::F32)); + Some(Intrinsic::CopySignF32) + } + "cosf32" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F32) => RigidTy::Float(FloatTy::F32)); + Some(Intrinsic::CosF32) + } + "exp2f32" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F32) => RigidTy::Float(FloatTy::F32)); + Some(Intrinsic::Exp2F32) + } + "expf32" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F32) => RigidTy::Float(FloatTy::F32)); + Some(Intrinsic::ExpF32) + } + "fabsf32" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F32) => RigidTy::Float(FloatTy::F32)); + Some(Intrinsic::FabsF32) + } + "floorf32" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F32) => RigidTy::Float(FloatTy::F32)); + Some(Intrinsic::FloorF32) + } + "fmaf32" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F32), RigidTy::Float(FloatTy::F32), RigidTy::Float(FloatTy::F32) => RigidTy::Float(FloatTy::F32)); + Some(Intrinsic::FmafF32) + } + "log10f32" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F32) => RigidTy::Float(FloatTy::F32)); + Some(Intrinsic::Log10F32) + } + "log2f32" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F32) => RigidTy::Float(FloatTy::F32)); + Some(Intrinsic::Log2F32) + } + "logf32" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F32) => RigidTy::Float(FloatTy::F32)); + Some(Intrinsic::LogF32) + } + "maxnumf32" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F32), RigidTy::Float(FloatTy::F32) => RigidTy::Float(FloatTy::F32)); + Some(Intrinsic::MaxNumF32) + } + "minnumf32" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F32), RigidTy::Float(FloatTy::F32) => RigidTy::Float(FloatTy::F32)); + Some(Intrinsic::MinNumF32) + } + "nearbyintf32" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F32) => RigidTy::Float(FloatTy::F32)); + Some(Intrinsic::NearbyIntF32) + } + "powf32" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F32), RigidTy::Float(FloatTy::F32) => RigidTy::Float(FloatTy::F32)); + Some(Intrinsic::PowF32) + } + "powif32" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F32), RigidTy::Int(IntTy::I32) => RigidTy::Float(FloatTy::F32)); + Some(Intrinsic::PowIF32) + } + "rintf32" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F32) => RigidTy::Float(FloatTy::F32)); + Some(Intrinsic::RintF32) + } + "roundf32" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F32) => RigidTy::Float(FloatTy::F32)); + Some(Intrinsic::RoundF32) + } + "sinf32" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F32) => RigidTy::Float(FloatTy::F32)); + Some(Intrinsic::SinF32) + } + "sqrtf32" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F32) => RigidTy::Float(FloatTy::F32)); + Some(Intrinsic::SqrtF32) + } + "truncf32" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F32) => RigidTy::Float(FloatTy::F32)); + Some(Intrinsic::TruncF32) + } + _ => None, + } +} + +/// Match f64 arithmetic intrinsics by instance, returning an instance of the intrinsics enum if the match +/// is successful. +fn try_match_f64(intrinsic_instance: &Instance) -> Option { + let intrinsic_str = intrinsic_instance.intrinsic_name().unwrap(); + let sig = intrinsic_instance.ty().kind().fn_sig().unwrap().skip_binder(); + match intrinsic_str.as_str() { + "ceilf64" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F64) => RigidTy::Float(FloatTy::F64)); + Some(Intrinsic::CeilF64) + } + "copysignf64" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F64), RigidTy::Float(FloatTy::F64) => RigidTy::Float(FloatTy::F64)); + Some(Intrinsic::CopySignF64) + } + "cosf64" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F64) => RigidTy::Float(FloatTy::F64)); + Some(Intrinsic::CosF64) + } + "exp2f64" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F64) => RigidTy::Float(FloatTy::F64)); + Some(Intrinsic::Exp2F64) + } + "expf64" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F64) => RigidTy::Float(FloatTy::F64)); + Some(Intrinsic::ExpF64) + } + "fabsf64" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F64) => RigidTy::Float(FloatTy::F64)); + Some(Intrinsic::FabsF64) + } + "floorf64" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F64) => RigidTy::Float(FloatTy::F64)); + Some(Intrinsic::FloorF64) + } + "fmaf64" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F64), RigidTy::Float(FloatTy::F64), RigidTy::Float(FloatTy::F64) => RigidTy::Float(FloatTy::F64)); + Some(Intrinsic::FmafF64) + } + "log10f64" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F64) => RigidTy::Float(FloatTy::F64)); + Some(Intrinsic::Log10F64) + } + "log2f64" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F64) => RigidTy::Float(FloatTy::F64)); + Some(Intrinsic::Log2F64) + } + "logf64" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F64) => RigidTy::Float(FloatTy::F64)); + Some(Intrinsic::LogF64) + } + "maxnumf64" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F64), RigidTy::Float(FloatTy::F64) => RigidTy::Float(FloatTy::F64)); + Some(Intrinsic::MaxNumF64) + } + "minnumf64" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F64), RigidTy::Float(FloatTy::F64) => RigidTy::Float(FloatTy::F64)); + Some(Intrinsic::MinNumF64) + } + "nearbyintf64" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F64) => RigidTy::Float(FloatTy::F64)); + Some(Intrinsic::NearbyIntF64) + } + "powf64" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F64), RigidTy::Float(FloatTy::F64) => RigidTy::Float(FloatTy::F64)); + Some(Intrinsic::PowF64) + } + "powif64" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F64), RigidTy::Int(IntTy::I32) => RigidTy::Float(FloatTy::F64)); + Some(Intrinsic::PowIF64) + } + "rintf64" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F64) => RigidTy::Float(FloatTy::F64)); + Some(Intrinsic::RintF64) + } + "roundf64" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F64) => RigidTy::Float(FloatTy::F64)); + Some(Intrinsic::RoundF64) + } + "sinf64" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F64) => RigidTy::Float(FloatTy::F64)); + Some(Intrinsic::SinF64) + } + "sqrtf64" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F64) => RigidTy::Float(FloatTy::F64)); + Some(Intrinsic::SqrtF64) + } + "truncf64" => { + assert_sig_matches!(sig, RigidTy::Float(FloatTy::F64) => RigidTy::Float(FloatTy::F64)); + Some(Intrinsic::TruncF64) + } + _ => None, + } +} diff --git a/kani-compiler/src/kani_middle/points_to/points_to_analysis.rs b/kani-compiler/src/kani_middle/points_to/points_to_analysis.rs index 640318ccb584..15593549b690 100644 --- a/kani-compiler/src/kani_middle/points_to/points_to_analysis.rs +++ b/kani-compiler/src/kani_middle/points_to/points_to_analysis.rs @@ -24,12 +24,14 @@ //! Currently, the analysis is not field-sensitive: e.g., if a field of a place aliases to some //! other place, we treat it as if the place itself aliases to another place. -use crate::kani_middle::{ - points_to::{MemLoc, PointsToGraph}, - reachability::CallGraph, - transform::RustcInternalMir, +use crate::{ + intrinsics::Intrinsic, + kani_middle::{ + points_to::{MemLoc, PointsToGraph}, + reachability::CallGraph, + transform::RustcInternalMir, + }, }; -use rustc_ast::Mutability; use rustc_middle::{ mir::{ BasicBlock, BinOp, Body, CallReturnPlaces, Location, NonDivergingIntrinsic, Operand, Place, @@ -202,161 +204,117 @@ impl<'a, 'tcx> Analysis<'tcx> for PointsToAnalysis<'a, 'tcx> { match instance.def { // Intrinsics could introduce aliasing edges we care about, so need to handle them. InstanceKind::Intrinsic(def_id) => { - match self.tcx.intrinsic(def_id).unwrap().name.to_string().as_str() { - name if name.starts_with("atomic") => { - match name { - // All `atomic_cxchg` intrinsics take `dst, old, src` as arguments. - // This is equivalent to `destination = *dst; *dst = src`. - name if name.starts_with("atomic_cxchg") => { - assert_eq!( - args.len(), - 3, - "Unexpected number of arguments for `{name}`" - ); - assert!(matches!( - args[0].node.ty(self.body, self.tcx).kind(), - TyKind::RawPtr(_, Mutability::Mut) - )); - let src_set = - self.successors_for_operand(state, args[2].node.clone()); - let dst_set = - self.successors_for_deref(state, args[0].node.clone()); - let destination_set = - state.resolve_place(*destination, self.instance); - state.extend(&destination_set, &state.successors(&dst_set)); - state.extend(&dst_set, &src_set); - } - // All `atomic_load` intrinsics take `src` as an argument. - // This is equivalent to `destination = *src`. - name if name.starts_with("atomic_load") => { - assert_eq!( - args.len(), - 1, - "Unexpected number of arguments for `{name}`" - ); - assert!(matches!( - args[0].node.ty(self.body, self.tcx).kind(), - TyKind::RawPtr(_, Mutability::Not) - )); - let src_set = - self.successors_for_deref(state, args[0].node.clone()); - let destination_set = - state.resolve_place(*destination, self.instance); - state.extend(&destination_set, &state.successors(&src_set)); - } - // All `atomic_store` intrinsics take `dst, val` as arguments. - // This is equivalent to `*dst = val`. - name if name.starts_with("atomic_store") => { - assert_eq!( - args.len(), - 2, - "Unexpected number of arguments for `{name}`" - ); - assert!(matches!( - args[0].node.ty(self.body, self.tcx).kind(), - TyKind::RawPtr(_, Mutability::Mut) - )); - let dst_set = - self.successors_for_deref(state, args[0].node.clone()); - let val_set = - self.successors_for_operand(state, args[1].node.clone()); - state.extend(&dst_set, &val_set); - } - // All other `atomic` intrinsics take `dst, src` as arguments. - // This is equivalent to `destination = *dst; *dst = src`. - _ => { - assert_eq!( - args.len(), - 2, - "Unexpected number of arguments for `{name}`" - ); - assert!(matches!( - args[0].node.ty(self.body, self.tcx).kind(), - TyKind::RawPtr(_, Mutability::Mut) - )); - let src_set = - self.successors_for_operand(state, args[1].node.clone()); - let dst_set = - self.successors_for_deref(state, args[0].node.clone()); - let destination_set = - state.resolve_place(*destination, self.instance); - state.extend(&destination_set, &state.successors(&dst_set)); - state.extend(&dst_set, &src_set); - } - }; - } - // Similar to `copy_nonoverlapping`, argument order is `src`, `dst`, `count`. - "copy" => { - assert_eq!(args.len(), 3, "Unexpected number of arguments for `copy`"); - assert!(matches!( - args[0].node.ty(self.body, self.tcx).kind(), - TyKind::RawPtr(_, Mutability::Not) - )); - assert!(matches!( - args[1].node.ty(self.body, self.tcx).kind(), - TyKind::RawPtr(_, Mutability::Mut) - )); - self.apply_copy_effect( - state, - args[0].node.clone(), - args[1].node.clone(), - ); - } - // Similar to `copy_nonoverlapping`, argument order is `dst`, `src`, `count`. - "volatile_copy_memory" | "volatile_copy_nonoverlapping_memory" => { - assert_eq!(args.len(), 3, "Unexpected number of arguments for `copy`"); - assert!(matches!( - args[0].node.ty(self.body, self.tcx).kind(), - TyKind::RawPtr(_, Mutability::Mut) - )); - assert!(matches!( - args[1].node.ty(self.body, self.tcx).kind(), - TyKind::RawPtr(_, Mutability::Not) - )); - self.apply_copy_effect( - state, - args[1].node.clone(), - args[0].node.clone(), - ); - } - // Semantically equivalent to dest = *a - "volatile_load" | "unaligned_volatile_load" => { - assert_eq!( - args.len(), - 1, - "Unexpected number of arguments for `volatile_load`" - ); - assert!(matches!( - args[0].node.ty(self.body, self.tcx).kind(), - TyKind::RawPtr(_, Mutability::Not) - )); - // Destination of the return value. - let lvalue_set = state.resolve_place(*destination, self.instance); - let rvalue_set = self.successors_for_deref(state, args[0].node.clone()); - state.extend(&lvalue_set, &state.successors(&rvalue_set)); - } - // Semantically equivalent *a = b. - "volatile_store" | "unaligned_volatile_store" => { - assert_eq!( - args.len(), - 2, - "Unexpected number of arguments for `volatile_store`" - ); - assert!(matches!( - args[0].node.ty(self.body, self.tcx).kind(), - TyKind::RawPtr(_, Mutability::Mut) - )); - let lvalue_set = self.successors_for_deref(state, args[0].node.clone()); - let rvalue_set = - self.successors_for_operand(state, args[1].node.clone()); - state.extend(&lvalue_set, &rvalue_set); - } - _ => { - // TODO: this probably does not handle all relevant intrinsics, so more - // need to be added. For more information, see: - // https://github.com/model-checking/kani/issues/3300 - if self.tcx.is_mir_available(def_id) { - self.apply_regular_call_effect(state, instance, args, destination); + // Check if the intrinsic has a body we can analyze. + if self.tcx.is_mir_available(def_id) { + self.apply_regular_call_effect(state, instance, args, destination); + } else { + // Check all of the other intrinsics. + match Intrinsic::from_instance(&rustc_internal::stable(instance)) { + intrinsic if is_identity_aliasing_intrinsic(intrinsic.clone()) => { + // Treat the intrinsic as an aggregate, taking a union of all of the + // arguments' aliases. + let destination_set = + state.resolve_place(*destination, self.instance); + let operands_set = args + .into_iter() + .flat_map(|operand| { + self.successors_for_operand(state, operand.node.clone()) + }) + .collect(); + state.extend(&destination_set, &operands_set); + } + // All `atomic_cxchg` intrinsics take `dst, old, src` as arguments. + // This is equivalent to `destination = *dst; *dst = src`. + Intrinsic::AtomicCxchg(_) | Intrinsic::AtomicCxchgWeak(_) => { + let src_set = + self.successors_for_operand(state, args[2].node.clone()); + let dst_set = + self.successors_for_deref(state, args[0].node.clone()); + let destination_set = + state.resolve_place(*destination, self.instance); + state.extend(&destination_set, &state.successors(&dst_set)); + state.extend(&dst_set, &src_set); + } + // All `atomic_load` intrinsics take `src` as an argument. + // This is equivalent to `destination = *src`. + Intrinsic::AtomicLoad(_) => { + let src_set = + self.successors_for_deref(state, args[0].node.clone()); + let destination_set = + state.resolve_place(*destination, self.instance); + state.extend(&destination_set, &state.successors(&src_set)); + } + // All `atomic_store` intrinsics take `dst, val` as arguments. + // This is equivalent to `*dst = val`. + Intrinsic::AtomicStore(_) => { + let dst_set = + self.successors_for_deref(state, args[0].node.clone()); + let val_set = + self.successors_for_operand(state, args[1].node.clone()); + state.extend(&dst_set, &val_set); + } + // All other `atomic` intrinsics take `dst, src` as arguments. + // This is equivalent to `destination = *dst; *dst = src`. + Intrinsic::AtomicAnd(_) + | Intrinsic::AtomicMax(_) + | Intrinsic::AtomicMin(_) + | Intrinsic::AtomicNand(_) + | Intrinsic::AtomicOr(_) + | Intrinsic::AtomicUmax(_) + | Intrinsic::AtomicUmin(_) + | Intrinsic::AtomicXadd(_) + | Intrinsic::AtomicXchg(_) + | Intrinsic::AtomicXor(_) + | Intrinsic::AtomicXsub(_) => { + let src_set = + self.successors_for_operand(state, args[1].node.clone()); + let dst_set = + self.successors_for_deref(state, args[0].node.clone()); + let destination_set = + state.resolve_place(*destination, self.instance); + state.extend(&destination_set, &state.successors(&dst_set)); + state.extend(&dst_set, &src_set); + } + // Similar to `copy_nonoverlapping`, argument order is `src`, `dst`, `count`. + Intrinsic::Copy => { + self.apply_copy_effect( + state, + args[0].node.clone(), + args[1].node.clone(), + ); + } + // Similar to `copy_nonoverlapping`, argument order is `dst`, `src`, `count`. + Intrinsic::VolatileCopyMemory + | Intrinsic::VolatileCopyNonOverlappingMemory => { + self.apply_copy_effect( + state, + args[1].node.clone(), + args[0].node.clone(), + ); + } + // Semantically equivalent to dest = *a + Intrinsic::VolatileLoad | Intrinsic::UnalignedVolatileLoad => { + // Destination of the return value. + let lvalue_set = state.resolve_place(*destination, self.instance); + let rvalue_set = + self.successors_for_deref(state, args[0].node.clone()); + state.extend(&lvalue_set, &state.successors(&rvalue_set)); + } + // Semantically equivalent *a = b. + Intrinsic::VolatileStore => { + let lvalue_set = + self.successors_for_deref(state, args[0].node.clone()); + let rvalue_set = + self.successors_for_operand(state, args[1].node.clone()); + state.extend(&lvalue_set, &rvalue_set); + } + Intrinsic::Unimplemented { .. } => { + // This will be taken care of at the codegen level. + } + intrinsic => { + unimplemented!( + "Kani does not support reasoning about aliasing in presence of intrinsic `{intrinsic:?}`. For more information about the state of uninitialized memory checks implementation, see: https://github.com/model-checking/kani/issues/3300." + ); } } } @@ -652,3 +610,136 @@ impl<'a, 'tcx> PointsToAnalysis<'a, 'tcx> { } } } + +/// Determines if the intrinsic does not influence aliasing beyond being treated as an identity +/// function (i.e. propagate aliasing without changes). +fn is_identity_aliasing_intrinsic(intrinsic: Intrinsic) -> bool { + match intrinsic { + Intrinsic::AddWithOverflow + | Intrinsic::ArithOffset + | Intrinsic::AssertInhabited + | Intrinsic::AssertMemUninitializedValid + | Intrinsic::AssertZeroValid + | Intrinsic::Assume + | Intrinsic::Bitreverse + | Intrinsic::BlackBox + | Intrinsic::Breakpoint + | Intrinsic::Bswap + | Intrinsic::CeilF32 + | Intrinsic::CeilF64 + | Intrinsic::CompareBytes + | Intrinsic::CopySignF32 + | Intrinsic::CopySignF64 + | Intrinsic::CosF32 + | Intrinsic::CosF64 + | Intrinsic::Ctlz + | Intrinsic::CtlzNonZero + | Intrinsic::Ctpop + | Intrinsic::Cttz + | Intrinsic::CttzNonZero + | Intrinsic::DiscriminantValue + | Intrinsic::ExactDiv + | Intrinsic::Exp2F32 + | Intrinsic::Exp2F64 + | Intrinsic::ExpF32 + | Intrinsic::ExpF64 + | Intrinsic::FabsF32 + | Intrinsic::FabsF64 + | Intrinsic::FaddFast + | Intrinsic::FdivFast + | Intrinsic::FloorF32 + | Intrinsic::FloorF64 + | Intrinsic::FmafF32 + | Intrinsic::FmafF64 + | Intrinsic::FmulFast + | Intrinsic::Forget + | Intrinsic::FsubFast + | Intrinsic::IsValStaticallyKnown + | Intrinsic::Likely + | Intrinsic::Log10F32 + | Intrinsic::Log10F64 + | Intrinsic::Log2F32 + | Intrinsic::Log2F64 + | Intrinsic::LogF32 + | Intrinsic::LogF64 + | Intrinsic::MaxNumF32 + | Intrinsic::MaxNumF64 + | Intrinsic::MinAlignOf + | Intrinsic::MinAlignOfVal + | Intrinsic::MinNumF32 + | Intrinsic::MinNumF64 + | Intrinsic::MulWithOverflow + | Intrinsic::NearbyIntF32 + | Intrinsic::NearbyIntF64 + | Intrinsic::NeedsDrop + | Intrinsic::PowF32 + | Intrinsic::PowF64 + | Intrinsic::PowIF32 + | Intrinsic::PowIF64 + | Intrinsic::PrefAlignOf + | Intrinsic::PtrGuaranteedCmp + | Intrinsic::PtrOffsetFrom + | Intrinsic::PtrOffsetFromUnsigned + | Intrinsic::RawEq + | Intrinsic::RintF32 + | Intrinsic::RintF64 + | Intrinsic::RotateLeft + | Intrinsic::RotateRight + | Intrinsic::RoundF32 + | Intrinsic::RoundF64 + | Intrinsic::SaturatingAdd + | Intrinsic::SaturatingSub + | Intrinsic::SinF32 + | Intrinsic::SinF64 + | Intrinsic::SizeOfVal + | Intrinsic::SqrtF32 + | Intrinsic::SqrtF64 + | Intrinsic::SubWithOverflow + | Intrinsic::TruncF32 + | Intrinsic::TruncF64 + | Intrinsic::TypeId + | Intrinsic::TypeName + | Intrinsic::UncheckedDiv + | Intrinsic::UncheckedRem + | Intrinsic::Unlikely + | Intrinsic::VtableSize + | Intrinsic::VtableAlign + | Intrinsic::WrappingAdd + | Intrinsic::WrappingMul + | Intrinsic::WrappingSub + | Intrinsic::WriteBytes => { + /* Intrinsics that do not interact with aliasing beyond propagating it. */ + true + } + Intrinsic::SimdAdd + | Intrinsic::SimdAnd + | Intrinsic::SimdDiv + | Intrinsic::SimdRem + | Intrinsic::SimdEq + | Intrinsic::SimdExtract + | Intrinsic::SimdGe + | Intrinsic::SimdGt + | Intrinsic::SimdInsert + | Intrinsic::SimdLe + | Intrinsic::SimdLt + | Intrinsic::SimdMul + | Intrinsic::SimdNe + | Intrinsic::SimdOr + | Intrinsic::SimdShl + | Intrinsic::SimdShr + | Intrinsic::SimdShuffle(_) + | Intrinsic::SimdSub + | Intrinsic::SimdXor => { + /* SIMD operations */ + true + } + Intrinsic::AtomicFence(_) | Intrinsic::AtomicSingleThreadFence(_) => { + /* Atomic fences */ + true + } + _ => { + /* Everything else */ + false + } + } +} diff --git a/kani-compiler/src/kani_middle/transform/check_uninit/delayed_ub/initial_target_visitor.rs b/kani-compiler/src/kani_middle/transform/check_uninit/delayed_ub/initial_target_visitor.rs index 11ac412703ae..35d0e7041704 100644 --- a/kani-compiler/src/kani_middle/transform/check_uninit/delayed_ub/initial_target_visitor.rs +++ b/kani-compiler/src/kani_middle/transform/check_uninit/delayed_ub/initial_target_visitor.rs @@ -4,14 +4,17 @@ //! This module contains the visitor responsible for collecting initial analysis targets for delayed //! UB instrumentation. -use crate::kani_middle::transform::check_uninit::ty_layout::tys_layout_equal_to_size; +use crate::{ + intrinsics::Intrinsic, + kani_middle::transform::check_uninit::ty_layout::tys_layout_equal_to_size, +}; use stable_mir::{ mir::{ alloc::GlobalAlloc, mono::{Instance, InstanceKind, StaticDef}, visit::Location, - Body, CastKind, LocalDecl, MirVisitor, Mutability, NonDivergingIntrinsic, Operand, Place, - Rvalue, Statement, StatementKind, Terminator, TerminatorKind, + Body, CastKind, LocalDecl, MirVisitor, NonDivergingIntrinsic, Operand, Place, Rvalue, + Statement, StatementKind, Terminator, TerminatorKind, }, ty::{ConstantKind, RigidTy, TyKind}, }; @@ -101,34 +104,12 @@ impl MirVisitor for InitialTargetVisitor { if let TerminatorKind::Call { func, args, .. } = &term.kind { let instance = try_resolve_instance(self.body.locals(), func).unwrap(); if instance.kind == InstanceKind::Intrinsic { - match instance.intrinsic_name().unwrap().as_str() { - "copy" => { - assert_eq!(args.len(), 3, "Unexpected number of arguments for `copy`"); - assert!(matches!( - args[0].ty(self.body.locals()).unwrap().kind(), - TyKind::RigidTy(RigidTy::RawPtr(_, Mutability::Not)) - )); - assert!(matches!( - args[1].ty(self.body.locals()).unwrap().kind(), - TyKind::RigidTy(RigidTy::RawPtr(_, Mutability::Mut)) - )); + match Intrinsic::from_instance(&instance) { + Intrinsic::Copy => { // Here, `dst` is the second argument. self.push_operand(&args[1]); } - "volatile_copy_memory" | "volatile_copy_nonoverlapping_memory" => { - assert_eq!( - args.len(), - 3, - "Unexpected number of arguments for `volatile_copy`" - ); - assert!(matches!( - args[0].ty(self.body.locals()).unwrap().kind(), - TyKind::RigidTy(RigidTy::RawPtr(_, Mutability::Mut)) - )); - assert!(matches!( - args[1].ty(self.body.locals()).unwrap().kind(), - TyKind::RigidTy(RigidTy::RawPtr(_, Mutability::Not)) - )); + Intrinsic::VolatileCopyMemory | Intrinsic::VolatileCopyNonOverlappingMemory => { // Here, `dst` is the first argument. self.push_operand(&args[0]); } diff --git a/kani-compiler/src/kani_middle/transform/check_uninit/ptr_uninit/uninit_visitor.rs b/kani-compiler/src/kani_middle/transform/check_uninit/ptr_uninit/uninit_visitor.rs index 837e14abc886..f682f93a261e 100644 --- a/kani-compiler/src/kani_middle/transform/check_uninit/ptr_uninit/uninit_visitor.rs +++ b/kani-compiler/src/kani_middle/transform/check_uninit/ptr_uninit/uninit_visitor.rs @@ -3,12 +3,15 @@ // //! Visitor that collects all instructions relevant to uninitialized memory access. -use crate::kani_middle::transform::{ - body::{InsertPosition, MutableBody, SourceInstruction}, - check_uninit::{ - relevant_instruction::{InitRelevantInstruction, MemoryInitOp}, - ty_layout::tys_layout_compatible_to_size, - TargetFinder, +use crate::{ + intrinsics::Intrinsic, + kani_middle::transform::{ + body::{InsertPosition, MutableBody, SourceInstruction}, + check_uninit::{ + relevant_instruction::{InitRelevantInstruction, MemoryInitOp}, + ty_layout::tys_layout_compatible_to_size, + TargetFinder, + }, }, }; use stable_mir::{ @@ -16,8 +19,8 @@ use stable_mir::{ alloc::GlobalAlloc, mono::{Instance, InstanceKind}, visit::{Location, PlaceContext}, - BasicBlockIdx, CastKind, LocalDecl, MirVisitor, Mutability, NonDivergingIntrinsic, Operand, - Place, PointerCoercion, ProjectionElem, Rvalue, Statement, StatementKind, Terminator, + BasicBlockIdx, CastKind, LocalDecl, MirVisitor, NonDivergingIntrinsic, Operand, Place, + PointerCoercion, ProjectionElem, Rvalue, Statement, StatementKind, Terminator, TerminatorKind, }, ty::{ConstantKind, RigidTy, TyKind}, @@ -182,46 +185,30 @@ impl MirVisitor for CheckUninitVisitor { }; match instance.kind { InstanceKind::Intrinsic => { - match instance.intrinsic_name().unwrap().as_str() { - intrinsic_name if can_skip_intrinsic(intrinsic_name) => { + match Intrinsic::from_instance(&instance) { + intrinsic_name if can_skip_intrinsic(intrinsic_name.clone()) => { /* Intrinsics that can be safely skipped */ } - name if name.starts_with("atomic") => { - let num_args = match name { - // All `atomic_cxchg` intrinsics take `dst, old, src` as arguments. - name if name.starts_with("atomic_cxchg") => 3, - // All `atomic_load` intrinsics take `src` as an argument. - name if name.starts_with("atomic_load") => 1, - // All other `atomic` intrinsics take `dst, src` as arguments. - _ => 2, - }; - assert_eq!( - args.len(), - num_args, - "Unexpected number of arguments for `{name}`" - ); - assert!(matches!( - args[0].ty(&self.locals).unwrap().kind(), - TyKind::RigidTy(RigidTy::RawPtr(..)) - )); + Intrinsic::AtomicAnd(_) + | Intrinsic::AtomicCxchg(_) + | Intrinsic::AtomicCxchgWeak(_) + | Intrinsic::AtomicLoad(_) + | Intrinsic::AtomicMax(_) + | Intrinsic::AtomicMin(_) + | Intrinsic::AtomicNand(_) + | Intrinsic::AtomicOr(_) + | Intrinsic::AtomicStore(_) + | Intrinsic::AtomicUmax(_) + | Intrinsic::AtomicUmin(_) + | Intrinsic::AtomicXadd(_) + | Intrinsic::AtomicXchg(_) + | Intrinsic::AtomicXor(_) + | Intrinsic::AtomicXsub(_) => { self.push_target(MemoryInitOp::Check { operand: args[0].clone(), }); } - "compare_bytes" => { - assert_eq!( - args.len(), - 3, - "Unexpected number of arguments for `compare_bytes`" - ); - assert!(matches!( - args[0].ty(&self.locals).unwrap().kind(), - TyKind::RigidTy(RigidTy::RawPtr(_, Mutability::Not)) - )); - assert!(matches!( - args[1].ty(&self.locals).unwrap().kind(), - TyKind::RigidTy(RigidTy::RawPtr(_, Mutability::Not)) - )); + Intrinsic::CompareBytes => { self.push_target(MemoryInitOp::CheckSliceChunk { operand: args[0].clone(), count: args[2].clone(), @@ -231,22 +218,7 @@ impl MirVisitor for CheckUninitVisitor { count: args[2].clone(), }); } - "copy" - | "volatile_copy_memory" - | "volatile_copy_nonoverlapping_memory" => { - assert_eq!( - args.len(), - 3, - "Unexpected number of arguments for `copy`" - ); - assert!(matches!( - args[0].ty(&self.locals).unwrap().kind(), - TyKind::RigidTy(RigidTy::RawPtr(_, Mutability::Not)) - )); - assert!(matches!( - args[1].ty(&self.locals).unwrap().kind(), - TyKind::RigidTy(RigidTy::RawPtr(_, Mutability::Mut)) - )); + Intrinsic::Copy => { self.push_target(MemoryInitOp::CheckSliceChunk { operand: args[0].clone(), count: args[2].clone(), @@ -258,20 +230,20 @@ impl MirVisitor for CheckUninitVisitor { position: InsertPosition::After, }); } - "typed_swap" => { - assert_eq!( - args.len(), - 2, - "Unexpected number of arguments for `typed_swap`" - ); - assert!(matches!( - args[0].ty(&self.locals).unwrap().kind(), - TyKind::RigidTy(RigidTy::RawPtr(_, Mutability::Mut)) - )); - assert!(matches!( - args[1].ty(&self.locals).unwrap().kind(), - TyKind::RigidTy(RigidTy::RawPtr(_, Mutability::Mut)) - )); + Intrinsic::VolatileCopyMemory + | Intrinsic::VolatileCopyNonOverlappingMemory => { + self.push_target(MemoryInitOp::CheckSliceChunk { + operand: args[1].clone(), + count: args[2].clone(), + }); + self.push_target(MemoryInitOp::SetSliceChunk { + operand: args[0].clone(), + count: args[2].clone(), + value: true, + position: InsertPosition::After, + }); + } + Intrinsic::TypedSwap => { self.push_target(MemoryInitOp::Check { operand: args[0].clone(), }); @@ -279,46 +251,19 @@ impl MirVisitor for CheckUninitVisitor { operand: args[1].clone(), }); } - "volatile_load" | "unaligned_volatile_load" => { - assert_eq!( - args.len(), - 1, - "Unexpected number of arguments for `volatile_load`" - ); - assert!(matches!( - args[0].ty(&self.locals).unwrap().kind(), - TyKind::RigidTy(RigidTy::RawPtr(_, Mutability::Not)) - )); + Intrinsic::VolatileLoad | Intrinsic::UnalignedVolatileLoad => { self.push_target(MemoryInitOp::Check { operand: args[0].clone(), }); } - "volatile_store" => { - assert_eq!( - args.len(), - 2, - "Unexpected number of arguments for `volatile_store`" - ); - assert!(matches!( - args[0].ty(&self.locals).unwrap().kind(), - TyKind::RigidTy(RigidTy::RawPtr(_, Mutability::Mut)) - )); + Intrinsic::VolatileStore => { self.push_target(MemoryInitOp::Set { operand: args[0].clone(), value: true, position: InsertPosition::After, }); } - "write_bytes" => { - assert_eq!( - args.len(), - 3, - "Unexpected number of arguments for `write_bytes`" - ); - assert!(matches!( - args[0].ty(&self.locals).unwrap().kind(), - TyKind::RigidTy(RigidTy::RawPtr(_, Mutability::Mut)) - )); + Intrinsic::WriteBytes => { self.push_target(MemoryInitOp::SetSliceChunk { operand: args[0].clone(), count: args[2].clone(), @@ -328,7 +273,7 @@ impl MirVisitor for CheckUninitVisitor { } intrinsic => { self.push_target(MemoryInitOp::Unsupported { - reason: format!("Kani does not support reasoning about memory initialization of intrinsic `{intrinsic}`."), + reason: format!("Kani does not support reasoning about memory initialization of intrinsic `{intrinsic:?}`."), }); } } @@ -519,132 +464,131 @@ impl MirVisitor for CheckUninitVisitor { /// Determines if the intrinsic has no memory initialization related function and hence can be /// safely skipped. -fn can_skip_intrinsic(intrinsic_name: &str) -> bool { - match intrinsic_name { - "add_with_overflow" - | "arith_offset" - | "assert_inhabited" - | "assert_mem_uninitialized_valid" - | "assert_zero_valid" - | "assume" - | "bitreverse" - | "black_box" - | "breakpoint" - | "bswap" - | "caller_location" - | "ceilf32" - | "ceilf64" - | "copysignf32" - | "copysignf64" - | "cosf32" - | "cosf64" - | "ctlz" - | "ctlz_nonzero" - | "ctpop" - | "cttz" - | "cttz_nonzero" - | "discriminant_value" - | "exact_div" - | "exp2f32" - | "exp2f64" - | "expf32" - | "expf64" - | "fabsf32" - | "fabsf64" - | "fadd_fast" - | "fdiv_fast" - | "floorf32" - | "floorf64" - | "fmaf32" - | "fmaf64" - | "fmul_fast" - | "forget" - | "fsub_fast" - | "is_val_statically_known" - | "likely" - | "log10f32" - | "log10f64" - | "log2f32" - | "log2f64" - | "logf32" - | "logf64" - | "maxnumf32" - | "maxnumf64" - | "min_align_of" - | "min_align_of_val" - | "minnumf32" - | "minnumf64" - | "mul_with_overflow" - | "nearbyintf32" - | "nearbyintf64" - | "needs_drop" - | "powf32" - | "powf64" - | "powif32" - | "powif64" - | "pref_align_of" - | "raw_eq" - | "rintf32" - | "rintf64" - | "rotate_left" - | "rotate_right" - | "roundf32" - | "roundf64" - | "saturating_add" - | "saturating_sub" - | "sinf32" - | "sinf64" - | "sqrtf32" - | "sqrtf64" - | "sub_with_overflow" - | "truncf32" - | "truncf64" - | "type_id" - | "type_name" - | "unchecked_div" - | "unchecked_rem" - | "unlikely" - | "vtable_size" - | "vtable_align" - | "wrapping_add" - | "wrapping_mul" - | "wrapping_sub" => { +fn can_skip_intrinsic(intrinsic: Intrinsic) -> bool { + match intrinsic { + Intrinsic::AddWithOverflow + | Intrinsic::ArithOffset + | Intrinsic::AssertInhabited + | Intrinsic::AssertMemUninitializedValid + | Intrinsic::AssertZeroValid + | Intrinsic::Assume + | Intrinsic::Bitreverse + | Intrinsic::BlackBox + | Intrinsic::Breakpoint + | Intrinsic::Bswap + | Intrinsic::CeilF32 + | Intrinsic::CeilF64 + | Intrinsic::CopySignF32 + | Intrinsic::CopySignF64 + | Intrinsic::CosF32 + | Intrinsic::CosF64 + | Intrinsic::Ctlz + | Intrinsic::CtlzNonZero + | Intrinsic::Ctpop + | Intrinsic::Cttz + | Intrinsic::CttzNonZero + | Intrinsic::DiscriminantValue + | Intrinsic::ExactDiv + | Intrinsic::Exp2F32 + | Intrinsic::Exp2F64 + | Intrinsic::ExpF32 + | Intrinsic::ExpF64 + | Intrinsic::FabsF32 + | Intrinsic::FabsF64 + | Intrinsic::FaddFast + | Intrinsic::FdivFast + | Intrinsic::FloorF32 + | Intrinsic::FloorF64 + | Intrinsic::FmafF32 + | Intrinsic::FmafF64 + | Intrinsic::FmulFast + | Intrinsic::Forget + | Intrinsic::FsubFast + | Intrinsic::IsValStaticallyKnown + | Intrinsic::Likely + | Intrinsic::Log10F32 + | Intrinsic::Log10F64 + | Intrinsic::Log2F32 + | Intrinsic::Log2F64 + | Intrinsic::LogF32 + | Intrinsic::LogF64 + | Intrinsic::MaxNumF32 + | Intrinsic::MaxNumF64 + | Intrinsic::MinAlignOf + | Intrinsic::MinAlignOfVal + | Intrinsic::MinNumF32 + | Intrinsic::MinNumF64 + | Intrinsic::MulWithOverflow + | Intrinsic::NearbyIntF32 + | Intrinsic::NearbyIntF64 + | Intrinsic::NeedsDrop + | Intrinsic::PowF32 + | Intrinsic::PowF64 + | Intrinsic::PowIF32 + | Intrinsic::PowIF64 + | Intrinsic::PrefAlignOf + | Intrinsic::RawEq + | Intrinsic::RintF32 + | Intrinsic::RintF64 + | Intrinsic::RotateLeft + | Intrinsic::RotateRight + | Intrinsic::RoundF32 + | Intrinsic::RoundF64 + | Intrinsic::SaturatingAdd + | Intrinsic::SaturatingSub + | Intrinsic::SinF32 + | Intrinsic::SinF64 + | Intrinsic::SqrtF32 + | Intrinsic::SqrtF64 + | Intrinsic::SubWithOverflow + | Intrinsic::TruncF32 + | Intrinsic::TruncF64 + | Intrinsic::TypeId + | Intrinsic::TypeName + | Intrinsic::UncheckedDiv + | Intrinsic::UncheckedRem + | Intrinsic::Unlikely + | Intrinsic::VtableSize + | Intrinsic::VtableAlign + | Intrinsic::WrappingAdd + | Intrinsic::WrappingMul + | Intrinsic::WrappingSub => { /* Intrinsics that do not interact with memory initialization. */ true } - "ptr_guaranteed_cmp" | "ptr_offset_from" | "ptr_offset_from_unsigned" | "size_of_val" => { + Intrinsic::PtrGuaranteedCmp + | Intrinsic::PtrOffsetFrom + | Intrinsic::PtrOffsetFromUnsigned + | Intrinsic::SizeOfVal => { /* AFAICS from the documentation, none of those require the pointer arguments to be actually initialized. */ true } - name if name.starts_with("simd") => { + Intrinsic::SimdAdd + | Intrinsic::SimdAnd + | Intrinsic::SimdDiv + | Intrinsic::SimdRem + | Intrinsic::SimdEq + | Intrinsic::SimdExtract + | Intrinsic::SimdGe + | Intrinsic::SimdGt + | Intrinsic::SimdInsert + | Intrinsic::SimdLe + | Intrinsic::SimdLt + | Intrinsic::SimdMul + | Intrinsic::SimdNe + | Intrinsic::SimdOr + | Intrinsic::SimdShl + | Intrinsic::SimdShr + | Intrinsic::SimdShuffle(_) + | Intrinsic::SimdSub + | Intrinsic::SimdXor => { /* SIMD operations */ true } - name if name.starts_with("atomic_fence") - || name.starts_with("atomic_singlethreadfence") => - { + Intrinsic::AtomicFence(_) | Intrinsic::AtomicSingleThreadFence(_) => { /* Atomic fences */ true } - "copy_nonoverlapping" => unreachable!( - "Expected `core::intrinsics::unreachable` to be handled by `StatementKind::CopyNonOverlapping`" - ), - "offset" => unreachable!( - "Expected `core::intrinsics::unreachable` to be handled by `BinOp::OffSet`" - ), - "unreachable" => unreachable!( - "Expected `std::intrinsics::unreachable` to be handled by `TerminatorKind::Unreachable`" - ), - "transmute" | "transmute_copy" | "unchecked_add" | "unchecked_mul" | "unchecked_shl" - | "size_of" | "unchecked_shr" | "unchecked_sub" => { - unreachable!("Expected intrinsic to be lowered before codegen") - } - "catch_unwind" => { - unimplemented!("") - } - "retag_box_to_raw" => { - unreachable!("This was removed in the latest Rust version.") - } _ => { /* Everything else */ false diff --git a/kani-compiler/src/main.rs b/kani-compiler/src/main.rs index 7f1fb144a09b..e47483fb4fa5 100644 --- a/kani-compiler/src/main.rs +++ b/kani-compiler/src/main.rs @@ -39,6 +39,7 @@ extern crate tempfile; mod args; #[cfg(feature = "cprover")] mod codegen_cprover_gotoc; +mod intrinsics; mod kani_compiler; mod kani_middle; mod kani_queries;