diff --git a/src/kani-compiler/rustc_codegen_kani/src/codegen/intrinsic.rs b/src/kani-compiler/rustc_codegen_kani/src/codegen/intrinsic.rs index b7eb043fd29c8..e9aa393c7867c 100644 --- a/src/kani-compiler/rustc_codegen_kani/src/codegen/intrinsic.rs +++ b/src/kani-compiler/rustc_codegen_kani/src/codegen/intrinsic.rs @@ -1,8 +1,6 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR MIT //! this module handles intrinsics -use tracing::{debug, warn}; - use crate::GotocCtx; use cbmc::goto_program::{BuiltinFn, Expr, Location, Stmt, Type}; use rustc_middle::mir::Place; @@ -10,6 +8,17 @@ use rustc_middle::ty::layout::LayoutOf; use rustc_middle::ty::Instance; use rustc_middle::ty::{self, Ty, TyS}; use rustc_span::Span; +use tracing::{debug, warn}; + +macro_rules! emit_concurrency_warning { + ($intrinsic: expr, $loc: expr) => {{ + warn!( + "Kani does not support concurrency for now. `{}` in {} treated as a sequential operation.", + $intrinsic, + $loc.short_string() + ); + }}; +} struct SizeAlign { size: Expr, @@ -279,8 +288,8 @@ impl<'tcx> GotocCtx<'tcx> { // Note: Atomic arithmetic operations wrap around on overflow. macro_rules! codegen_atomic_binop { ($op: ident) => {{ - warn!("Kani does not support concurrency for now. {} treated as a sequential operation.", intrinsic); let loc = self.codegen_span_option(span); + emit_concurrency_warning!(intrinsic, loc); let var1_ref = fargs.remove(0); let var1 = var1_ref.dereference(); let tmp = self.gen_temp_variable(var1.typ().clone(), loc.clone()).to_expr(); @@ -519,6 +528,10 @@ impl<'tcx> GotocCtx<'tcx> { "volatile_copy_memory" => codegen_intrinsic_copy!(Memmove), "volatile_copy_nonoverlapping_memory" => codegen_intrinsic_copy!(Memcpy), "volatile_load" => self.codegen_expr_to_place(p, fargs.remove(0).dereference()), + "volatile_store" => { + assert!(self.place_ty(p).is_unit()); + self.codegen_volatile_store(instance, intrinsic, fargs, loc) + } "wrapping_add" => codegen_wrapping_op!(plus), "wrapping_mul" => codegen_wrapping_op!(mul), "wrapping_sub" => codegen_wrapping_op!(sub), @@ -657,10 +670,7 @@ impl<'tcx> GotocCtx<'tcx> { p: &Place<'tcx>, loc: Location, ) -> Stmt { - warn!( - "Kani does not support concurrency for now. {} treated as a sequential operation.", - intrinsic - ); + emit_concurrency_warning!(intrinsic, loc); let var1_ref = fargs.remove(0); let var1 = var1_ref.dereference().with_location(loc.clone()); let res_stmt = self.codegen_expr_to_place(p, var1); @@ -688,10 +698,7 @@ impl<'tcx> GotocCtx<'tcx> { p: &Place<'tcx>, loc: Location, ) -> Stmt { - warn!( - "Kani does not support concurrency for now. {} treated as a sequential operation.", - intrinsic - ); + emit_concurrency_warning!(intrinsic, loc); let var1_ref = fargs.remove(0); let var1 = var1_ref.dereference().with_location(loc.clone()); let tmp = self.gen_temp_variable(var1.typ().clone(), loc.clone()).to_expr(); @@ -727,10 +734,7 @@ impl<'tcx> GotocCtx<'tcx> { p: &Place<'tcx>, loc: Location, ) -> Stmt { - warn!( - "Kani does not support concurrency for now. {} treated as a sequential operation.", - intrinsic - ); + emit_concurrency_warning!(intrinsic, loc); let var1_ref = fargs.remove(0); let var1 = var1_ref.dereference().with_location(loc.clone()); let tmp = self.gen_temp_variable(var1.typ().clone(), loc.clone()).to_expr(); @@ -743,10 +747,7 @@ impl<'tcx> GotocCtx<'tcx> { /// Atomic no-ops (e.g., atomic_fence) are transformed into SKIP statements fn codegen_atomic_noop(&mut self, intrinsic: &str, loc: Location) -> Stmt { - warn!( - "Kani does not support concurrency for now. {} treated as a sequential operation.", - intrinsic - ); + emit_concurrency_warning!(intrinsic, loc); let skip_stmt = Stmt::skip(loc.clone()); Stmt::atomic_block(vec![skip_stmt], loc) } @@ -1005,4 +1006,27 @@ impl<'tcx> GotocCtx<'tcx> { .collect(); self.codegen_expr_to_place(p, Expr::vector_expr(cbmc_ret_ty, elems)) } + + /// A volatile write of a memory location: + /// https://doc.rust-lang.org/std/ptr/fn.write_volatile.html + /// + /// Undefined behavior if any of these conditions are violated: + /// * `dst` must be valid for writes (done by `--pointer-check`) + /// * `dst` must be properly aligned (done by `align_check` below) + fn codegen_volatile_store( + &mut self, + instance: Instance<'tcx>, + intrinsic: &str, + mut fargs: Vec, + loc: Location, + ) -> Stmt { + emit_concurrency_warning!(intrinsic, loc); + let dst = fargs.remove(0); + let src = fargs.remove(0); + let typ = instance.substs.type_at(0); + let align = self.is_aligned(typ, dst.clone()); + let align_check = Stmt::assert(align, "`dst` is properly aligned", loc.clone()); + let expr = dst.dereference().assign(src, loc.clone()); + Stmt::block(vec![align_check, expr], loc) + } } diff --git a/src/kani-compiler/rustc_codegen_kani/src/overrides/hooks.rs b/src/kani-compiler/rustc_codegen_kani/src/overrides/hooks.rs index 390b3b493844a..171924fa9e1e9 100644 --- a/src/kani-compiler/rustc_codegen_kani/src/overrides/hooks.rs +++ b/src/kani-compiler/rustc_codegen_kani/src/overrides/hooks.rs @@ -461,10 +461,8 @@ impl<'tcx> GotocHook<'tcx> for PtrWrite { let name = with_no_trimmed_paths(|| tcx.def_path_str(instance.def_id())); name == "core::ptr::write" || name == "core::ptr::write_unaligned" - || name == "core::ptr::write_volatile" || name == "std::ptr::write" || name == "std::ptr::write_unaligned" - || name == "std::ptr::write_volatile" } fn handle( diff --git a/src/kani-compiler/rustc_codegen_kani/src/utils/utils.rs b/src/kani-compiler/rustc_codegen_kani/src/utils/utils.rs index c185c1484a304..44bba689a3296 100644 --- a/src/kani-compiler/rustc_codegen_kani/src/utils/utils.rs +++ b/src/kani-compiler/rustc_codegen_kani/src/utils/utils.rs @@ -4,6 +4,8 @@ use super::super::codegen::TypeExt; use crate::GotocCtx; use cbmc::btree_string_map; use cbmc::goto_program::{Expr, ExprValue, Location, Stmt, SymbolTable, Type}; +use rustc_middle::ty::layout::LayoutOf; +use rustc_middle::ty::TyS; use tracing::debug; // Should move into rvalue @@ -67,6 +69,16 @@ impl<'tcx> GotocCtx<'tcx> { Expr::statement_expression(body, t).with_location(loc) } + /// Generates an expression `((dst as usize) % align_of(T) == 0` + /// to determine if `dst` is aligned. + pub fn is_aligned(&mut self, typ: &'tcx TyS<'_>, dst: Expr) -> Expr { + let layout = self.layout_of(typ); + let align = Expr::int_constant(layout.align.abi.bytes(), Type::size_t()); + let cast_dst = dst.cast_to(Type::size_t()); + let zero = Type::size_t().zero(); + cast_dst.rem(align).eq(zero) + } + pub fn unsupported_msg(item: &str, url: Option<&str>) -> String { let mut s = format!("{} is not currently supported by Kani", item); if url.is_some() { diff --git a/tests/kani/Intrinsics/Volatile/store.rs b/tests/kani/Intrinsics/Volatile/store.rs new file mode 100644 index 0000000000000..ed7ad1ddc8a5d --- /dev/null +++ b/tests/kani/Intrinsics/Volatile/store.rs @@ -0,0 +1,27 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 OR MIT + +// Check that `volatile_store` passes when it writes to an aligned value. +// This example is similar to the one that appears in the documentation for +// `write_unaligned`: +// https://doc.rust-lang.org/std/ptr/fn.write_unaligned.html +#![feature(core_intrinsics)] + +// In contrast to the `Packed` struct in `store_fail.rs`, this struct includes +// padding so that each field is aligned. +struct NonPacked { + _padding: u8, + unaligned: u32, +} + +fn main() { + let mut packed: NonPacked = unsafe { std::mem::zeroed() }; + // Take the address of a 32-bit integer which is not aligned. + // In contrast to `&packed.unaligned as *mut _`, this has no undefined behavior. + let unaligned = std::ptr::addr_of_mut!(packed.unaligned); + + // Store the value with `volatile_store`. + // This includes an alignment check for `unaligned` which should pass. + unsafe { std::intrinsics::volatile_store(unaligned, 42) }; + assert!(packed.unaligned == 42); +} diff --git a/tests/kani/Intrinsics/Volatile/store_fail.rs b/tests/kani/Intrinsics/Volatile/store_fail.rs new file mode 100644 index 0000000000000..b369a75f72e15 --- /dev/null +++ b/tests/kani/Intrinsics/Volatile/store_fail.rs @@ -0,0 +1,29 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 OR MIT +// kani-verify-fail + +// Check that `volatile_store` fails when it writes to an unaligned value. +// This example is similar to the one that appears in the documentation for +// `write_unaligned`: +// https://doc.rust-lang.org/std/ptr/fn.write_unaligned.html +#![feature(core_intrinsics)] + +// `repr(packed)` forces the struct to be stripped of any padding and only align +// its fields to a byte. +#[repr(packed)] +struct Packed { + _padding: u8, + unaligned: u32, +} + +fn main() { + let mut packed: Packed = unsafe { std::mem::zeroed() }; + // Take the address of a 32-bit integer which is not aligned. + // In contrast to `&packed.unaligned as *mut _`, this has no undefined behavior. + let unaligned = std::ptr::addr_of_mut!(packed.unaligned); + + // Store the value with `volatile_store`. + // This includes an alignment check for `unaligned` which should fail. + unsafe { std::intrinsics::volatile_store(unaligned, 42) }; + assert!(packed.unaligned == 42); +}