From 75e040e78b3ca2f773c7dbba0496496ce3887f65 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Sun, 27 Nov 2022 09:51:58 +0100 Subject: [PATCH 01/22] replace 'locally built rustc' instructions by 'Miri in rustc' --- CONTRIBUTING.md | 61 ++++++++++++------------------------------------- 1 file changed, 14 insertions(+), 47 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5f46e2af0f9c3..a8a76be7dabb1 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -203,65 +203,32 @@ for more information about configuring VS Code and `rust-analyzer`. [rdg-r-a]: https://rustc-dev-guide.rust-lang.org/building/suggested.html#configuring-rust-analyzer-for-rustc -## Advanced topic: other build environments +## Advanced topic: Working on Miri in the rustc tree We described above the simplest way to get a working build environment for Miri, which is to use the version of rustc indicated by `rustc-version`. But sometimes, that is not enough. -### Building Miri with a locally built rustc +A big part of the Miri driver is shared with rustc, so working on Miri will +sometimes require also working on rustc itself. In this case, you should *not* +work in a clone of the Miri repository, but in a clone of the +[main Rust repository](https://github.com/rust-lang/rust/). There is a copy of +Miri located at `src/tools/miri` that you can work on directly. A maintainer +will eventually sync those changes back into this repository. -[building Miri with a locally built rustc]: #building-miri-with-a-locally-built-rustc +When working on Miri in the rustc tree, here's how you can run tests: -A big part of the Miri driver lives in rustc, so working on Miri will sometimes -require using a locally built rustc. The bug you want to fix may actually be on -the rustc side, or you just need to get more detailed trace of the execution -than what is possible with release builds -- in both cases, you should develop -Miri against a rustc you compiled yourself, with debug assertions (and hence -tracing) enabled. - -The setup for a local rustc works as follows: -```sh -# Clone the rust-lang/rust repo. -git clone https://github.com/rust-lang/rust rustc -cd rustc -# Create a config.toml with defaults for working on Miri. -./x.py setup compiler - # Now edit `config.toml` and under `[rust]` set `debug-assertions = true`. - -# Build a stage 2 rustc, and build the rustc libraries with that rustc. -# This step can take 30 minutes or more. -./x.py build --stage 2 compiler/rustc -# If you change something, you can get a faster rebuild by doing -./x.py build --keep-stage 0 --stage 2 compiler/rustc -# You may have to change the architecture in the next command -rustup toolchain link stage2 build/x86_64-unknown-linux-gnu/stage2 -# Now cd to your Miri directory, then configure rustup -rustup override set stage2 ``` - -Note: When you are working with a locally built rustc or any other toolchain that -is not the same as the one in `rust-version`, you should not have `.auto-everything` or -`.auto-toolchain` as that will keep resetting your toolchain. - -```sh -rm -f .auto-everything .auto-toolchain +./x.py test miri --stage 0 ``` -Important: You need to delete the Miri cache when you change the stdlib; otherwise the -old, chached version will be used. On Linux, the cache is located at `~/.cache/miri`, -and on Windows, it is located at `%LOCALAPPDATA%\rust-lang\miri\cache`; the exact -location is printed after the library build: "A libstd for Miri is now available in ...". - -Note: `./x.py --stage 2 compiler/rustc` currently errors with `thread 'main' -panicked at 'fs::read(stamp) failed with No such file or directory (os error 2)`, -you can simply ignore that error; Miri will build anyway. +`--bless` will work, too. -For more information about building and configuring a local compiler, -see . +You can also directly run Miri on a Rust source file: -With this, you should now have a working development setup! See -[above](#building-and-testing-miri) for how to proceed working on Miri. +``` +./x.py run miri --stage 0 --args src/tools/miri/tests/pass/hello.rs +``` ## Advanced topic: Syncing with the rustc repo From 36984a48d7d4e375c4f28904e80d59cea5281214 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Sun, 27 Nov 2022 12:18:23 +0100 Subject: [PATCH 02/22] refactor scheduling of TLS dtors --- src/concurrency/thread.rs | 177 +++++++++++++++------------------- src/diagnostics.rs | 13 ++- src/eval.rs | 134 +++++++++++++++++--------- src/lib.rs | 7 +- src/machine.rs | 9 +- src/shims/foreign_items.rs | 2 +- src/shims/tls.rs | 187 +++++++++++++++--------------------- src/shims/unix/thread.rs | 2 +- src/shims/windows/thread.rs | 2 +- 9 files changed, 268 insertions(+), 265 deletions(-) diff --git a/src/concurrency/thread.rs b/src/concurrency/thread.rs index dacb3a9b88f8f..fde47ed9ff1ec 100644 --- a/src/concurrency/thread.rs +++ b/src/concurrency/thread.rs @@ -3,6 +3,7 @@ use std::cell::RefCell; use std::collections::hash_map::Entry; use std::num::TryFromIntError; +use std::task::Poll; use std::time::{Duration, SystemTime}; use log::trace; @@ -16,6 +17,7 @@ use rustc_target::spec::abi::Abi; use crate::concurrency::data_race; use crate::concurrency::sync::SynchronizationState; +use crate::shims::tls; use crate::*; #[derive(Clone, Copy, Debug, PartialEq, Eq)] @@ -24,10 +26,8 @@ pub enum SchedulingAction { ExecuteStep, /// Execute a timeout callback. ExecuteTimeoutCallback, - /// Execute destructors of the active thread. - ExecuteDtors, - /// Stop the program. - Stop, + /// Wait for a bit, until there is a timeout to be called. + Sleep(Duration), } /// Trait for callbacks that can be executed when some event happens, such as after a timeout. @@ -41,9 +41,6 @@ type TimeoutCallback<'mir, 'tcx> = Box + 'tcx>; #[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq, Hash)] pub struct ThreadId(u32); -/// The main thread. When it terminates, the whole application terminates. -const MAIN_THREAD: ThreadId = ThreadId(0); - impl ThreadId { pub fn to_u32(self) -> u32 { self.0 @@ -118,6 +115,12 @@ pub struct Thread<'mir, 'tcx> { /// The virtual call stack. stack: Vec>>, + /// The function to call when the stack ran empty, to figure out what to do next. + /// Conceptually, this is the interpreter implementation of the things that happen 'after' the + /// Rust language entry point for this thread returns (usually implemented by the C or OS runtime). + /// (`None` is an error, it means the callback has not been set up yet or is actively running.) + pub(crate) on_stack_empty: Option>, + /// The index of the topmost user-relevant frame in `stack`. This field must contain /// the value produced by `get_top_user_relevant_frame`. /// The `None` state here represents @@ -137,19 +140,10 @@ pub struct Thread<'mir, 'tcx> { pub(crate) last_error: Option>, } -impl<'mir, 'tcx> Thread<'mir, 'tcx> { - /// Check if the thread is done executing (no more stack frames). If yes, - /// change the state to terminated and return `true`. - fn check_terminated(&mut self) -> bool { - if self.state == ThreadState::Enabled { - if self.stack.is_empty() { - self.state = ThreadState::Terminated; - return true; - } - } - false - } +pub type StackEmptyCallback<'mir, 'tcx> = + Box) -> InterpResult<'tcx, Poll<()>>>; +impl<'mir, 'tcx> Thread<'mir, 'tcx> { /// Get the name of the current thread, or `` if it was not set. fn thread_name(&self) -> &[u8] { if let Some(ref thread_name) = self.thread_name { thread_name } else { b"" } @@ -202,28 +196,21 @@ impl<'mir, 'tcx> std::fmt::Debug for Thread<'mir, 'tcx> { } } -impl<'mir, 'tcx> Default for Thread<'mir, 'tcx> { - fn default() -> Self { +impl<'mir, 'tcx> Thread<'mir, 'tcx> { + fn new(name: Option<&str>, on_stack_empty: Option>) -> Self { Self { state: ThreadState::Enabled, - thread_name: None, + thread_name: name.map(|name| Vec::from(name.as_bytes())), stack: Vec::new(), top_user_relevant_frame: None, join_status: ThreadJoinStatus::Joinable, panic_payload: None, last_error: None, + on_stack_empty, } } } -impl<'mir, 'tcx> Thread<'mir, 'tcx> { - fn new(name: &str) -> Self { - let mut thread = Thread::default(); - thread.thread_name = Some(Vec::from(name.as_bytes())); - thread - } -} - impl VisitTags for Thread<'_, '_> { fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) { let Thread { @@ -234,6 +221,7 @@ impl VisitTags for Thread<'_, '_> { state: _, thread_name: _, join_status: _, + on_stack_empty: _, // we assume the closure captures no GC-relevant state } = self; panic_payload.visit_tags(visit); @@ -327,22 +315,6 @@ pub struct ThreadManager<'mir, 'tcx> { timeout_callbacks: FxHashMap>, } -impl<'mir, 'tcx> Default for ThreadManager<'mir, 'tcx> { - fn default() -> Self { - let mut threads = IndexVec::new(); - // Create the main thread and add it to the list of threads. - threads.push(Thread::new("main")); - Self { - active_thread: ThreadId::new(0), - threads, - sync: SynchronizationState::default(), - thread_local_alloc_ids: Default::default(), - yield_active_thread: false, - timeout_callbacks: FxHashMap::default(), - } - } -} - impl VisitTags for ThreadManager<'_, '_> { fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) { let ThreadManager { @@ -367,8 +339,28 @@ impl VisitTags for ThreadManager<'_, '_> { } } +impl<'mir, 'tcx> Default for ThreadManager<'mir, 'tcx> { + fn default() -> Self { + let mut threads = IndexVec::new(); + // Create the main thread and add it to the list of threads. + threads.push(Thread::new(Some("main"), None)); + Self { + active_thread: ThreadId::new(0), + threads, + sync: SynchronizationState::default(), + thread_local_alloc_ids: Default::default(), + yield_active_thread: false, + timeout_callbacks: FxHashMap::default(), + } + } +} + impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { - pub(crate) fn init(ecx: &mut MiriInterpCx<'mir, 'tcx>) { + pub(crate) fn init( + ecx: &mut MiriInterpCx<'mir, 'tcx>, + on_main_stack_empty: StackEmptyCallback<'mir, 'tcx>, + ) { + ecx.machine.threads.threads[ThreadId::new(0)].on_stack_empty = Some(on_main_stack_empty); if ecx.tcx.sess.target.os.as_ref() != "windows" { // The main thread can *not* be joined on except on windows. ecx.machine.threads.threads[ThreadId::new(0)].join_status = ThreadJoinStatus::Detached; @@ -411,9 +403,9 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { } /// Create a new thread and returns its id. - fn create_thread(&mut self) -> ThreadId { + fn create_thread(&mut self, on_stack_empty: StackEmptyCallback<'mir, 'tcx>) -> ThreadId { let new_thread_id = ThreadId::new(self.threads.len()); - self.threads.push(Default::default()); + self.threads.push(Thread::new(None, Some(on_stack_empty))); new_thread_id } @@ -458,6 +450,7 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { } /// Get a mutable borrow of the currently active thread. + /// (Private for a bit of protection.) fn active_thread_mut(&mut self) -> &mut Thread<'mir, 'tcx> { &mut self.threads[self.active_thread] } @@ -669,18 +662,6 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { /// long as we can and switch only when we have to (the active thread was /// blocked, terminated, or has explicitly asked to be preempted). fn schedule(&mut self, clock: &Clock) -> InterpResult<'tcx, SchedulingAction> { - // Check whether the thread has **just** terminated (`check_terminated` - // checks whether the thread has popped all its stack and if yes, sets - // the thread state to terminated). - if self.threads[self.active_thread].check_terminated() { - return Ok(SchedulingAction::ExecuteDtors); - } - // If we get here again and the thread is *still* terminated, there are no more dtors to run. - if self.threads[MAIN_THREAD].state == ThreadState::Terminated { - // The main thread terminated; stop the program. - // We do *not* run TLS dtors of remaining threads, which seems to match rustc behavior. - return Ok(SchedulingAction::Stop); - } // This thread and the program can keep going. if self.threads[self.active_thread].state == ThreadState::Enabled && !self.yield_active_thread @@ -688,18 +669,18 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { // The currently active thread is still enabled, just continue with it. return Ok(SchedulingAction::ExecuteStep); } - // The active thread yielded. Let's see if there are any timeouts to take care of. We do - // this *before* running any other thread, to ensure that timeouts "in the past" fire before - // any other thread can take an action. This ensures that for `pthread_cond_timedwait`, "an - // error is returned if [...] the absolute time specified by abstime has already been passed - // at the time of the call". + // The active thread yielded or got terminated. Let's see if there are any timeouts to take + // care of. We do this *before* running any other thread, to ensure that timeouts "in the + // past" fire before any other thread can take an action. This ensures that for + // `pthread_cond_timedwait`, "an error is returned if [...] the absolute time specified by + // abstime has already been passed at the time of the call". // let potential_sleep_time = self.timeout_callbacks.values().map(|info| info.call_time.get_wait_time(clock)).min(); if potential_sleep_time == Some(Duration::new(0, 0)) { return Ok(SchedulingAction::ExecuteTimeoutCallback); } - // No callbacks scheduled, pick a regular thread to execute. + // No callbacks immediately scheduled, pick a regular thread to execute. // The active thread blocked or yielded. So we go search for another enabled thread. // Crucially, we start searching at the current active thread ID, rather than at 0, since we // want to avoid always scheduling threads 0 and 1 without ever making progress in thread 2. @@ -730,9 +711,7 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { // All threads are currently blocked, but we have unexecuted // timeout_callbacks, which may unblock some of the threads. Hence, // sleep until the first callback. - - clock.sleep(sleep_time); - Ok(SchedulingAction::ExecuteTimeoutCallback) + Ok(SchedulingAction::Sleep(sleep_time)) } else { throw_machine_stop!(TerminationInfo::Deadlock); } @@ -773,18 +752,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { } } + /// Start a regular (non-main) thread. #[inline] - fn create_thread(&mut self) -> ThreadId { - let this = self.eval_context_mut(); - let id = this.machine.threads.create_thread(); - if let Some(data_race) = &mut this.machine.data_race { - data_race.thread_created(&this.machine.threads, id); - } - id - } - - #[inline] - fn start_thread( + fn start_regular_thread( &mut self, thread: Option>, start_routine: Pointer>, @@ -795,7 +765,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { let this = self.eval_context_mut(); // Create the new thread - let new_thread_id = this.create_thread(); + let new_thread_id = this.machine.threads.create_thread({ + let mut state = tls::TlsDtorsState::default(); + Box::new(move |m| state.on_stack_empty(m)) + }); + if let Some(data_race) = &mut this.machine.data_race { + data_race.thread_created(&this.machine.threads, new_thread_id); + } // Write the current thread-id, switch to the next thread later // to treat this write operation as occuring on the current thread. @@ -888,12 +864,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { this.machine.threads.get_total_thread_count() } - #[inline] - fn has_terminated(&self, thread_id: ThreadId) -> bool { - let this = self.eval_context_ref(); - this.machine.threads.has_terminated(thread_id) - } - #[inline] fn have_all_terminated(&self) -> bool { let this = self.eval_context_ref(); @@ -943,26 +913,22 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { where 'mir: 'c, { - let this = self.eval_context_ref(); - this.machine.threads.get_thread_name(thread) + self.eval_context_ref().machine.threads.get_thread_name(thread) } #[inline] fn block_thread(&mut self, thread: ThreadId) { - let this = self.eval_context_mut(); - this.machine.threads.block_thread(thread); + self.eval_context_mut().machine.threads.block_thread(thread); } #[inline] fn unblock_thread(&mut self, thread: ThreadId) { - let this = self.eval_context_mut(); - this.machine.threads.unblock_thread(thread); + self.eval_context_mut().machine.threads.unblock_thread(thread); } #[inline] fn yield_active_thread(&mut self) { - let this = self.eval_context_mut(); - this.machine.threads.yield_active_thread(); + self.eval_context_mut().machine.threads.yield_active_thread(); } #[inline] @@ -1024,6 +990,19 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { Ok(()) } + #[inline] + fn run_on_stack_empty(&mut self) -> InterpResult<'tcx, Poll<()>> { + let this = self.eval_context_mut(); + let mut callback = this + .active_thread_mut() + .on_stack_empty + .take() + .expect("`on_stack_empty` not set up, or already running"); + let res = callback(this)?; + this.active_thread_mut().on_stack_empty = Some(callback); + Ok(res) + } + /// Decide which action to take next and on which thread. #[inline] fn schedule(&mut self) -> InterpResult<'tcx, SchedulingAction> { @@ -1034,10 +1013,14 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { /// Handles thread termination of the active thread: wakes up threads joining on this one, /// and deallocated thread-local statics. /// - /// This is called from `tls.rs` after handling the TLS dtors. + /// This is called by the eval loop when a thread's on_stack_empty returns `Ready`. #[inline] fn thread_terminated(&mut self) -> InterpResult<'tcx> { let this = self.eval_context_mut(); + let thread = this.active_thread_mut(); + assert!(thread.stack.is_empty(), "only threads with an empty stack can be terminated"); + thread.state = ThreadState::Terminated; + for ptr in this.machine.threads.thread_terminated(this.machine.data_race.as_mut()) { this.deallocate_ptr(ptr.into(), None, MiriMemoryKind::Tls.into())?; } diff --git a/src/diagnostics.rs b/src/diagnostics.rs index 7658cea10f9f8..5cd0a0eeb58c1 100644 --- a/src/diagnostics.rs +++ b/src/diagnostics.rs @@ -11,7 +11,10 @@ use crate::*; /// Details of premature program termination. pub enum TerminationInfo { - Exit(i64), + Exit { + code: i64, + leak_check: bool, + }, Abort(String), UnsupportedInIsolation(String), StackedBorrowsUb { @@ -38,7 +41,7 @@ impl fmt::Display for TerminationInfo { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { use TerminationInfo::*; match self { - Exit(code) => write!(f, "the evaluated program completed with exit code {code}"), + Exit { code, .. } => write!(f, "the evaluated program completed with exit code {code}"), Abort(msg) => write!(f, "{msg}"), UnsupportedInIsolation(msg) => write!(f, "{msg}"), Int2PtrWithStrictProvenance => @@ -148,11 +151,11 @@ fn prune_stacktrace<'tcx>( /// Emit a custom diagnostic without going through the miri-engine machinery. /// -/// Returns `Some` if this was regular program termination with a given exit code, `None` otherwise. +/// Returns `Some` if this was regular program termination with a given exit code and a `bool` indicating whether a leak check should happen; `None` otherwise. pub fn report_error<'tcx, 'mir>( ecx: &InterpCx<'mir, 'tcx, MiriMachine<'mir, 'tcx>>, e: InterpErrorInfo<'tcx>, -) -> Option { +) -> Option<(i64, bool)> { use InterpError::*; let mut msg = vec![]; @@ -161,7 +164,7 @@ pub fn report_error<'tcx, 'mir>( let info = info.downcast_ref::().expect("invalid MachineStop payload"); use TerminationInfo::*; let title = match info { - Exit(code) => return Some(*code), + Exit { code, leak_check } => return Some((*code, *leak_check)), Abort(_) => Some("abnormal termination"), UnsupportedInIsolation(_) | Int2PtrWithStrictProvenance => Some("unsupported operation"), diff --git a/src/eval.rs b/src/eval.rs index 363b647d6c684..b5d4282094110 100644 --- a/src/eval.rs +++ b/src/eval.rs @@ -4,6 +4,7 @@ use std::ffi::{OsStr, OsString}; use std::iter; use std::panic::{self, AssertUnwindSafe}; use std::path::PathBuf; +use std::task::Poll; use std::thread; use log::info; @@ -20,6 +21,7 @@ use rustc_target::spec::abi::Abi; use rustc_session::config::EntryFnType; +use crate::shims::tls; use crate::*; #[derive(Copy, Clone, Debug, PartialEq)] @@ -172,17 +174,57 @@ impl Default for MiriConfig { } } -/// Returns a freshly created `InterpCx`, along with an `MPlaceTy` representing -/// the location where the return value of the `start` function will be -/// written to. +/// The state of the main thread. Implementation detail of `on_main_stack_empty`. +#[derive(Default, Debug)] +enum MainThreadState { + #[default] + Running, + TlsDtors(tls::TlsDtorsState), +} + +impl MainThreadState { + fn on_main_stack_empty<'tcx>( + &mut self, + this: &mut MiriInterpCx<'_, 'tcx>, + ) -> InterpResult<'tcx, Poll<()>> { + use MainThreadState::*; + match self { + Running => { + *self = TlsDtors(Default::default()); + } + TlsDtors(state) => + match state.on_stack_empty(this)? { + Poll::Pending => {} // just keep going + Poll::Ready(()) => { + // Need to call `thread_terminated` ourselves since we are not going to + // return to the scheduler loop. + this.thread_terminated()?; + // Raise exception to stop program execution. + let ret_place = MPlaceTy::from_aligned_ptr( + this.machine.main_fn_ret_place.unwrap().ptr, + this.machine.layouts.isize, + ); + let exit_code = + this.read_scalar(&ret_place.into())?.to_machine_isize(this)?; + throw_machine_stop!(TerminationInfo::Exit { + code: exit_code, + leak_check: true + }); + } + }, + } + Ok(Poll::Pending) + } +} + +/// Returns a freshly created `InterpCx`. /// Public because this is also used by `priroda`. pub fn create_ecx<'mir, 'tcx: 'mir>( tcx: TyCtxt<'tcx>, entry_id: DefId, entry_type: EntryFnType, config: &MiriConfig, -) -> InterpResult<'tcx, (InterpCx<'mir, 'tcx, MiriMachine<'mir, 'tcx>>, MPlaceTy<'tcx, Provenance>)> -{ +) -> InterpResult<'tcx, InterpCx<'mir, 'tcx, MiriMachine<'mir, 'tcx>>> { let param_env = ty::ParamEnv::reveal_all(); let layout_cx = LayoutCx { tcx, param_env }; let mut ecx = InterpCx::new( @@ -193,7 +235,11 @@ pub fn create_ecx<'mir, 'tcx: 'mir>( ); // Some parts of initialization require a full `InterpCx`. - MiriMachine::late_init(&mut ecx, config)?; + MiriMachine::late_init(&mut ecx, config, { + let mut state = MainThreadState::default(); + // Cannot capture anything GC-relevant here. + Box::new(move |m| state.on_main_stack_empty(m)) + })?; // Make sure we have MIR. We check MIR for some stable monomorphic function in libcore. let sentinel = ecx.try_resolve_path(&["core", "ascii", "escape_default"], Namespace::ValueNS); @@ -274,6 +320,7 @@ pub fn create_ecx<'mir, 'tcx: 'mir>( // Return place (in static memory so that it does not count as leak). let ret_place = ecx.allocate(ecx.machine.layouts.isize, MiriMemoryKind::Machine.into())?; + ecx.machine.main_fn_ret_place = Some(*ret_place); // Call start function. match entry_type { @@ -321,7 +368,7 @@ pub fn create_ecx<'mir, 'tcx: 'mir>( } } - Ok((ecx, ret_place)) + Ok(ecx) } /// Evaluates the entry function specified by `entry_id`. @@ -337,7 +384,7 @@ pub fn eval_entry<'tcx>( // Copy setting before we move `config`. let ignore_leaks = config.ignore_leaks; - let (mut ecx, ret_place) = match create_ecx(tcx, entry_id, entry_type, &config) { + let mut ecx = match create_ecx(tcx, entry_id, entry_type, &config) { Ok(v) => v, Err(err) => { err.print_backtrace(); @@ -346,34 +393,37 @@ pub fn eval_entry<'tcx>( }; // Perform the main execution. - let res: thread::Result> = panic::catch_unwind(AssertUnwindSafe(|| { - // Main loop. + let res: thread::Result> = panic::catch_unwind(AssertUnwindSafe(|| { + // Main loop. Goes on forever until an interrupt is triggered (represented as `InterpError`). loop { match ecx.schedule()? { SchedulingAction::ExecuteStep => { - assert!(ecx.step()?, "a terminated thread was scheduled for execution"); + if !ecx.step()? { + // See if this thread can do something else. + match ecx.run_on_stack_empty()? { + Poll::Pending => {} // keep going + Poll::Ready(()) => ecx.thread_terminated()?, + } + } } SchedulingAction::ExecuteTimeoutCallback => { ecx.run_timeout_callback()?; } - SchedulingAction::ExecuteDtors => { - // This will either enable the thread again (so we go back - // to `ExecuteStep`), or determine that this thread is done - // for good. - ecx.schedule_next_tls_dtor_for_active_thread()?; - } - SchedulingAction::Stop => { - break; + SchedulingAction::Sleep(duration) => { + ecx.machine.clock.sleep(duration); } } } - let return_code = ecx.read_scalar(&ret_place.into())?.to_machine_isize(&ecx)?; - Ok(return_code) })); let res = res.unwrap_or_else(|panic_payload| { ecx.handle_ice(); panic::resume_unwind(panic_payload) }); + let res = match res { + Err(res) => res, + // `Ok` can never happen + Ok(never) => match never {}, + }; // Machine cleanup. Only do this if all threads have terminated; threads that are still running // might cause Stacked Borrows errors (https://github.com/rust-lang/miri/issues/2396). @@ -386,32 +436,26 @@ pub fn eval_entry<'tcx>( } // Process the result. - match res { - Ok(return_code) => { - if !ignore_leaks { - // Check for thread leaks. - if !ecx.have_all_terminated() { - tcx.sess.err( - "the main thread terminated without waiting for all remaining threads", - ); - tcx.sess.note_without_error("pass `-Zmiri-ignore-leaks` to disable this check"); - return None; - } - // Check for memory leaks. - info!("Additonal static roots: {:?}", ecx.machine.static_roots); - let leaks = ecx.leak_report(&ecx.machine.static_roots); - if leaks != 0 { - tcx.sess.err("the evaluated program leaked memory"); - tcx.sess.note_without_error("pass `-Zmiri-ignore-leaks` to disable this check"); - // Ignore the provided return code - let the reported error - // determine the return code. - return None; - } - } - Some(return_code) + let (return_code, leak_check) = report_error(&ecx, res)?; + if leak_check && !ignore_leaks { + // Check for thread leaks. + if !ecx.have_all_terminated() { + tcx.sess.err("the main thread terminated without waiting for all remaining threads"); + tcx.sess.note_without_error("pass `-Zmiri-ignore-leaks` to disable this check"); + return None; + } + // Check for memory leaks. + info!("Additonal static roots: {:?}", ecx.machine.static_roots); + let leaks = ecx.leak_report(&ecx.machine.static_roots); + if leaks != 0 { + tcx.sess.err("the evaluated program leaked memory"); + tcx.sess.note_without_error("pass `-Zmiri-ignore-leaks` to disable this check"); + // Ignore the provided return code - let the reported error + // determine the return code. + return None; } - Err(e) => report_error(&ecx, e), } + Some(return_code) } /// Turns an array of arguments into a Windows command line string. diff --git a/src/lib.rs b/src/lib.rs index 8913f8aa10fcd..13a8874272a0d 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -81,7 +81,7 @@ pub use crate::shims::intrinsics::EvalContextExt as _; pub use crate::shims::os_str::EvalContextExt as _; pub use crate::shims::panic::{CatchUnwindData, EvalContextExt as _}; pub use crate::shims::time::EvalContextExt as _; -pub use crate::shims::tls::{EvalContextExt as _, TlsData}; +pub use crate::shims::tls::TlsData; pub use crate::shims::EvalContextExt as _; pub use crate::clock::{Clock, Instant}; @@ -89,7 +89,10 @@ pub use crate::concurrency::{ data_race::{AtomicFenceOrd, AtomicReadOrd, AtomicRwOrd, AtomicWriteOrd, EvalContextExt as _}, init_once::{EvalContextExt as _, InitOnceId}, sync::{CondvarId, EvalContextExt as _, MutexId, RwLockId, SyncId}, - thread::{EvalContextExt as _, SchedulingAction, ThreadId, ThreadManager, ThreadState, Time}, + thread::{ + EvalContextExt as _, SchedulingAction, StackEmptyCallback, ThreadId, ThreadManager, + ThreadState, Time, + }, }; pub use crate::diagnostics::{ report_error, EvalContextExt as _, NonHaltingDiagnostic, TerminationInfo, diff --git a/src/machine.rs b/src/machine.rs index edfef211dc675..df1b5064a9cf5 100644 --- a/src/machine.rs +++ b/src/machine.rs @@ -363,6 +363,9 @@ pub struct MiriMachine<'mir, 'tcx> { /// Miri does not expose env vars from the host to the emulated program. pub(crate) env_vars: EnvVars<'tcx>, + /// Return place of the main function. + pub(crate) main_fn_ret_place: Option>, + /// Program arguments (`Option` because we can only initialize them after creating the ecx). /// These are *pointers* to argc/argv because macOS. /// We also need the full command line as one string because of Windows. @@ -492,6 +495,7 @@ impl<'mir, 'tcx> MiriMachine<'mir, 'tcx> { intptrcast: RefCell::new(intptrcast::GlobalStateInner::new(config)), // `env_vars` depends on a full interpreter so we cannot properly initialize it yet. env_vars: EnvVars::default(), + main_fn_ret_place: None, argc: None, argv: None, cmd_line: None, @@ -556,10 +560,11 @@ impl<'mir, 'tcx> MiriMachine<'mir, 'tcx> { pub(crate) fn late_init( this: &mut MiriInterpCx<'mir, 'tcx>, config: &MiriConfig, + on_main_stack_empty: StackEmptyCallback<'mir, 'tcx>, ) -> InterpResult<'tcx> { EnvVars::init(this, config)?; MiriMachine::init_extern_statics(this)?; - ThreadManager::init(this); + ThreadManager::init(this, on_main_stack_empty); Ok(()) } @@ -657,6 +662,7 @@ impl VisitTags for MiriMachine<'_, '_> { threads, tls, env_vars, + main_fn_ret_place, argc, argv, cmd_line, @@ -702,6 +708,7 @@ impl VisitTags for MiriMachine<'_, '_> { data_race.visit_tags(visit); stacked_borrows.visit_tags(visit); intptrcast.visit_tags(visit); + main_fn_ret_place.visit_tags(visit); argc.visit_tags(visit); argv.visit_tags(visit); cmd_line.visit_tags(visit); diff --git a/src/shims/foreign_items.rs b/src/shims/foreign_items.rs index 058f730833bb4..f72521f64adaf 100644 --- a/src/shims/foreign_items.rs +++ b/src/shims/foreign_items.rs @@ -286,7 +286,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { let [code] = this.check_shim(abi, exp_abi, link_name, args)?; // it's really u32 for ExitProcess, but we have to put it into the `Exit` variant anyway let code = this.read_scalar(code)?.to_i32()?; - throw_machine_stop!(TerminationInfo::Exit(code.into())); + throw_machine_stop!(TerminationInfo::Exit { code: code.into(), leak_check: false }); } "abort" => { let [] = this.check_shim(abi, Abi::C { unwind: false }, link_name, args)?; diff --git a/src/shims/tls.rs b/src/shims/tls.rs index 5fda8bd7b7de9..65978c9774f51 100644 --- a/src/shims/tls.rs +++ b/src/shims/tls.rs @@ -1,12 +1,11 @@ //! Implement thread-local storage. use std::collections::btree_map::Entry as BTreeEntry; -use std::collections::hash_map::Entry as HashMapEntry; use std::collections::BTreeMap; +use std::task::Poll; use log::trace; -use rustc_data_structures::fx::FxHashMap; use rustc_middle::ty; use rustc_target::abi::{HasDataLayout, Size}; use rustc_target::spec::abi::Abi; @@ -23,12 +22,12 @@ pub struct TlsEntry<'tcx> { dtor: Option>, } -#[derive(Clone, Debug)] -struct RunningDtorsState { +#[derive(Default, Debug)] +struct RunningDtorState { /// The last TlsKey used to retrieve a TLS destructor. `None` means that we /// have not tried to retrieve a TLS destructor yet or that we already tried /// all keys. - last_dtor_key: Option, + last_key: Option, } #[derive(Debug)] @@ -42,11 +41,6 @@ pub struct TlsData<'tcx> { /// A single per thread destructor of the thread local storage (that's how /// things work on macOS) with a data argument. macos_thread_dtors: BTreeMap, Scalar)>, - - /// State for currently running TLS dtors. If this map contains a key for a - /// specific thread, it means that we are in the "destruct" phase, during - /// which some operations are UB. - dtors_running: FxHashMap, } impl<'tcx> Default for TlsData<'tcx> { @@ -55,7 +49,6 @@ impl<'tcx> Default for TlsData<'tcx> { next_key: 1, // start with 1 as we must not use 0 on Windows keys: Default::default(), macos_thread_dtors: Default::default(), - dtors_running: Default::default(), } } } @@ -143,12 +136,6 @@ impl<'tcx> TlsData<'tcx> { dtor: ty::Instance<'tcx>, data: Scalar, ) -> InterpResult<'tcx> { - if self.dtors_running.contains_key(&thread) { - // UB, according to libstd docs. - throw_ub_format!( - "setting thread's local storage destructor while destructors are already running" - ); - } if self.macos_thread_dtors.insert(thread, (dtor, data)).is_some() { throw_unsup_format!( "setting more than one thread local storage destructor for the same thread is not supported" @@ -211,21 +198,6 @@ impl<'tcx> TlsData<'tcx> { None } - /// Set that dtors are running for `thread`. It is guaranteed not to change - /// the existing values stored in `dtors_running` for this thread. Returns - /// `true` if dtors for `thread` are already running. - fn set_dtors_running_for_thread(&mut self, thread: ThreadId) -> bool { - match self.dtors_running.entry(thread) { - HashMapEntry::Occupied(_) => true, - HashMapEntry::Vacant(entry) => { - // We cannot just do `self.dtors_running.insert` because that - // would overwrite `last_dtor_key` with `None`. - entry.insert(RunningDtorsState { last_dtor_key: None }); - false - } - } - } - /// Delete all TLS entries for the given thread. This function should be /// called after all TLS destructors have already finished. fn delete_all_thread_tls(&mut self, thread_id: ThreadId) { @@ -237,7 +209,7 @@ impl<'tcx> TlsData<'tcx> { impl VisitTags for TlsData<'_> { fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) { - let TlsData { keys, macos_thread_dtors, next_key: _, dtors_running: _ } = self; + let TlsData { keys, macos_thread_dtors, next_key: _ } = self; for scalar in keys.values().flat_map(|v| v.data.values()) { scalar.visit_tags(visit); @@ -248,13 +220,71 @@ impl VisitTags for TlsData<'_> { } } +#[derive(Debug, Default)] +pub struct TlsDtorsState(TlsDtorsStatePriv); + +#[derive(Debug, Default)] +enum TlsDtorsStatePriv { + #[default] + Init, + PthreadDtors(RunningDtorState), + Done, +} + +impl TlsDtorsState { + pub fn on_stack_empty<'tcx>( + &mut self, + this: &mut MiriInterpCx<'_, 'tcx>, + ) -> InterpResult<'tcx, Poll<()>> { + use TlsDtorsStatePriv::*; + match &mut self.0 { + Init => { + match this.tcx.sess.target.os.as_ref() { + "linux" => { + // Run the pthread dtors. + self.0 = PthreadDtors(Default::default()); + } + "macos" => { + // The macOS thread wide destructor runs "before any TLS slots get + // freed", so do that first. + this.schedule_macos_tls_dtor()?; + // When the stack is empty again, go on with the pthread dtors. + self.0 = PthreadDtors(Default::default()); + } + "windows" => { + // Run the special magic hook. + this.schedule_windows_tls_dtors()?; + // And move to the final state. + self.0 = Done; + } + _ => { + // No TLS support for this platform, directly move to final state. + self.0 = Done; + } + } + } + PthreadDtors(state) => { + match this.schedule_next_pthread_tls_dtor(state)? { + Poll::Pending => {} // just keep going + Poll::Ready(()) => self.0 = Done, + } + } + Done => { + this.machine.tls.delete_all_thread_tls(this.get_active_thread()); + return Ok(Poll::Ready(())); + } + } + + Ok(Poll::Pending) + } +} + impl<'mir, 'tcx: 'mir> EvalContextPrivExt<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {} trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { /// Schedule TLS destructors for Windows. /// On windows, TLS destructors are managed by std. fn schedule_windows_tls_dtors(&mut self) -> InterpResult<'tcx> { let this = self.eval_context_mut(); - let active_thread = this.get_active_thread(); // Windows has a special magic linker section that is run on certain events. // Instead of searching for that section and supporting arbitrary hooks in there @@ -284,16 +314,12 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { None, StackPopCleanup::Root { cleanup: true }, )?; - - this.enable_thread(active_thread); Ok(()) } /// Schedule the MacOS thread destructor of the thread local storage to be - /// executed. Returns `true` if scheduled. - /// - /// Note: It is safe to call this function also on other Unixes. - fn schedule_macos_tls_dtor(&mut self) -> InterpResult<'tcx, bool> { + /// executed. + fn schedule_macos_tls_dtor(&mut self) -> InterpResult<'tcx> { let this = self.eval_context_mut(); let thread_id = this.get_active_thread(); if let Some((instance, data)) = this.machine.tls.macos_thread_dtors.remove(&thread_id) { @@ -306,35 +332,27 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { None, StackPopCleanup::Root { cleanup: true }, )?; - - // Enable the thread so that it steps through the destructor which - // we just scheduled. Since we deleted the destructor, it is - // guaranteed that we will schedule it again. The `dtors_running` - // flag will prevent the code from adding the destructor again. - this.enable_thread(thread_id); - Ok(true) - } else { - Ok(false) } + Ok(()) } /// Schedule a pthread TLS destructor. Returns `true` if found /// a destructor to schedule, and `false` otherwise. - fn schedule_next_pthread_tls_dtor(&mut self) -> InterpResult<'tcx, bool> { + fn schedule_next_pthread_tls_dtor( + &mut self, + state: &mut RunningDtorState, + ) -> InterpResult<'tcx, Poll<()>> { let this = self.eval_context_mut(); let active_thread = this.get_active_thread(); - assert!(this.has_terminated(active_thread), "running TLS dtors for non-terminated thread"); // Fetch next dtor after `key`. - let last_key = this.machine.tls.dtors_running[&active_thread].last_dtor_key; - let dtor = match this.machine.tls.fetch_tls_dtor(last_key, active_thread) { + let dtor = match this.machine.tls.fetch_tls_dtor(state.last_key, active_thread) { dtor @ Some(_) => dtor, // We ran each dtor once, start over from the beginning. None => this.machine.tls.fetch_tls_dtor(None, active_thread), }; if let Some((instance, ptr, key)) = dtor { - this.machine.tls.dtors_running.get_mut(&active_thread).unwrap().last_dtor_key = - Some(key); + state.last_key = Some(key); trace!("Running TLS dtor {:?} on {:?} at {:?}", instance, ptr, active_thread); assert!( !ptr.to_machine_usize(this).unwrap() != 0, @@ -349,64 +367,9 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { StackPopCleanup::Root { cleanup: true }, )?; - this.enable_thread(active_thread); - return Ok(true); + return Ok(Poll::Pending); } - this.machine.tls.dtors_running.get_mut(&active_thread).unwrap().last_dtor_key = None; - - Ok(false) - } -} -impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {} -pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { - /// Schedule an active thread's TLS destructor to run on the active thread. - /// Note that this function does not run the destructors itself, it just - /// schedules them one by one each time it is called and reenables the - /// thread so that it can be executed normally by the main execution loop. - /// - /// Note: we consistently run TLS destructors for all threads, including the - /// main thread. However, it is not clear that we should run the TLS - /// destructors for the main thread. See issue: - /// . - fn schedule_next_tls_dtor_for_active_thread(&mut self) -> InterpResult<'tcx> { - let this = self.eval_context_mut(); - let active_thread = this.get_active_thread(); - trace!("schedule_next_tls_dtor_for_active_thread on thread {:?}", active_thread); - - if !this.machine.tls.set_dtors_running_for_thread(active_thread) { - // This is the first time we got asked to schedule a destructor. The - // Windows schedule destructor function must be called exactly once, - // this is why it is in this block. - if this.tcx.sess.target.os == "windows" { - // On Windows, we signal that the thread quit by starting the - // relevant function, reenabling the thread, and going back to - // the scheduler. - this.schedule_windows_tls_dtors()?; - return Ok(()); - } - } - // The remaining dtors make some progress each time around the scheduler loop, - // until they return `false` to indicate that they are done. - - // The macOS thread wide destructor runs "before any TLS slots get - // freed", so do that first. - if this.schedule_macos_tls_dtor()? { - // We have scheduled a MacOS dtor to run on the thread. Execute it - // to completion and come back here. Scheduling a destructor - // destroys it, so we will not enter this branch again. - return Ok(()); - } - if this.schedule_next_pthread_tls_dtor()? { - // We have scheduled a pthread destructor and removed it from the - // destructors list. Run it to completion and come back here. - return Ok(()); - } - - // All dtors done! - this.machine.tls.delete_all_thread_tls(active_thread); - this.thread_terminated()?; - - Ok(()) + Ok(Poll::Ready(())) } } diff --git a/src/shims/unix/thread.rs b/src/shims/unix/thread.rs index b43682710bbe5..5b9dc90f0f006 100644 --- a/src/shims/unix/thread.rs +++ b/src/shims/unix/thread.rs @@ -19,7 +19,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { let func_arg = this.read_immediate(arg)?; - this.start_thread( + this.start_regular_thread( Some(thread_info_place), start_routine, Abi::C { unwind: false }, diff --git a/src/shims/windows/thread.rs b/src/shims/windows/thread.rs index 5ed0cb92f9e34..25a5194caa096 100644 --- a/src/shims/windows/thread.rs +++ b/src/shims/windows/thread.rs @@ -46,7 +46,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { throw_unsup_format!("non-null `lpThreadAttributes` in `CreateThread`") } - this.start_thread( + this.start_regular_thread( thread, start_routine, Abi::System { unwind: false }, From b3bcceaed95eaa6ad3e75867334cab64e73aec2b Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Sun, 27 Nov 2022 12:22:14 +0100 Subject: [PATCH 03/22] cleanup global imports a bit --- src/lib.rs | 5 ++--- src/machine.rs | 8 +++++++- src/stacked_borrows/diagnostics.rs | 4 +++- 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 13a8874272a0d..a759a81b77304 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -90,8 +90,7 @@ pub use crate::concurrency::{ init_once::{EvalContextExt as _, InitOnceId}, sync::{CondvarId, EvalContextExt as _, MutexId, RwLockId, SyncId}, thread::{ - EvalContextExt as _, SchedulingAction, StackEmptyCallback, ThreadId, ThreadManager, - ThreadState, Time, + EvalContextExt as _, SchedulingAction, StackEmptyCallback, ThreadId, ThreadManager, Time, }, }; pub use crate::diagnostics::{ @@ -110,7 +109,7 @@ pub use crate::mono_hash_map::MonoHashMap; pub use crate::operator::EvalContextExt as _; pub use crate::range_map::RangeMap; pub use crate::stacked_borrows::{ - CallId, EvalContextExt as _, Item, Permission, RetagFields, SbTag, Stack, Stacks, + CallId, EvalContextExt as _, Item, Permission, RetagFields, SbTag, }; pub use crate::tag_gc::{EvalContextExt as _, VisitTags}; diff --git a/src/machine.rs b/src/machine.rs index df1b5064a9cf5..4d3444bc39cdd 100644 --- a/src/machine.rs +++ b/src/machine.rs @@ -908,7 +908,13 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> { let alloc = alloc.into_owned(); let stacks = ecx.machine.stacked_borrows.as_ref().map(|stacked_borrows| { - Stacks::new_allocation(id, alloc.size(), stacked_borrows, kind, &ecx.machine) + stacked_borrows::Stacks::new_allocation( + id, + alloc.size(), + stacked_borrows, + kind, + &ecx.machine, + ) }); let race_alloc = ecx.machine.data_race.as_ref().map(|data_race| { data_race::AllocExtra::new_allocation( diff --git a/src/stacked_borrows/diagnostics.rs b/src/stacked_borrows/diagnostics.rs index 9970b79f8c7f1..023f6005419a6 100644 --- a/src/stacked_borrows/diagnostics.rs +++ b/src/stacked_borrows/diagnostics.rs @@ -5,7 +5,9 @@ use rustc_middle::mir::interpret::{alloc_range, AllocId, AllocRange}; use rustc_span::{Span, SpanData}; use rustc_target::abi::Size; -use crate::stacked_borrows::{err_sb_ub, AccessKind, GlobalStateInner, Permission, ProtectorKind}; +use crate::stacked_borrows::{ + err_sb_ub, AccessKind, GlobalStateInner, Permission, ProtectorKind, Stack, +}; use crate::*; use rustc_middle::mir::interpret::InterpError; From 3d963fcc83de2dbe225d1877bc457130fe50b84c Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Sun, 27 Nov 2022 12:30:50 +0100 Subject: [PATCH 04/22] yield the main thread a number of times after its TLS dtors are done --- src/eval.rs | 51 +++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 37 insertions(+), 14 deletions(-) diff --git a/src/eval.rs b/src/eval.rs index b5d4282094110..f12c3518e831d 100644 --- a/src/eval.rs +++ b/src/eval.rs @@ -24,6 +24,11 @@ use rustc_session::config::EntryFnType; use crate::shims::tls; use crate::*; +/// When the main thread would exit, we will yield to any other thread that is ready to execute. +/// But we must only do that a finite number of times, or a background thread running `loop {}` +/// will hang the program. +const MAIN_THREAD_YIELDS_AT_SHUTDOWN: u32 = 1_000; + #[derive(Copy, Clone, Debug, PartialEq)] pub enum AlignmentCheck { /// Do not check alignment. @@ -180,6 +185,10 @@ enum MainThreadState { #[default] Running, TlsDtors(tls::TlsDtorsState), + Yield { + remaining: u32, + }, + Done, } impl MainThreadState { @@ -196,22 +205,36 @@ impl MainThreadState { match state.on_stack_empty(this)? { Poll::Pending => {} // just keep going Poll::Ready(()) => { - // Need to call `thread_terminated` ourselves since we are not going to - // return to the scheduler loop. - this.thread_terminated()?; - // Raise exception to stop program execution. - let ret_place = MPlaceTy::from_aligned_ptr( - this.machine.main_fn_ret_place.unwrap().ptr, - this.machine.layouts.isize, - ); - let exit_code = - this.read_scalar(&ret_place.into())?.to_machine_isize(this)?; - throw_machine_stop!(TerminationInfo::Exit { - code: exit_code, - leak_check: true - }); + // Give background threads a chance to finish by yielding the main thread a + // couple of times -- but only if we would also preempt threads randomly. + if this.machine.preemption_rate > 0.0 { + *self = Yield { remaining: MAIN_THREAD_YIELDS_AT_SHUTDOWN }; + } else { + *self = Done; + } } }, + Yield { remaining } => + match remaining.checked_sub(1) { + None => *self = Done, + Some(new_remaining) => { + *remaining = new_remaining; + this.yield_active_thread(); + } + }, + Done => { + // Figure out exit code. + let ret_place = MPlaceTy::from_aligned_ptr( + this.machine.main_fn_ret_place.unwrap().ptr, + this.machine.layouts.isize, + ); + let exit_code = this.read_scalar(&ret_place.into())?.to_machine_isize(this)?; + // Need to call `thread_terminated` ourselves since we are not going to + // return to the scheduler loop. + this.thread_terminated()?; + // Stop interpreter loop. + throw_machine_stop!(TerminationInfo::Exit { code: exit_code, leak_check: true }); + } } Ok(Poll::Pending) } From 50fc4e7a886841ee9c1bca15ee0e433955f12158 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Sun, 27 Nov 2022 12:57:30 +0100 Subject: [PATCH 05/22] add scoped thread test --- tests/pass/concurrency/scope.rs | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 tests/pass/concurrency/scope.rs diff --git a/tests/pass/concurrency/scope.rs b/tests/pass/concurrency/scope.rs new file mode 100644 index 0000000000000..ce5d17f5f2dc5 --- /dev/null +++ b/tests/pass/concurrency/scope.rs @@ -0,0 +1,24 @@ +use std::thread; + +fn main() { + let mut a = vec![1, 2, 3]; + let mut x = 0; + + thread::scope(|s| { + s.spawn(|| { + // We can borrow `a` here. + let _s = format!("hello from the first scoped thread: {a:?}"); + }); + s.spawn(|| { + let _s = format!("hello from the second scoped thread"); + // We can even mutably borrow `x` here, + // because no other threads are using it. + x += a[0] + a[2]; + }); + let _s = format!("hello from the main thread"); + }); + + // After the scope, we can modify and access our variables again: + a.push(4); + assert_eq!(x, a.len()); +} From a17287c72bfe25428e96b2a05b4d52707f10096e Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Sun, 27 Nov 2022 13:19:54 +0100 Subject: [PATCH 06/22] fix TLS on partially supported OSes --- src/shims/tls.rs | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/src/shims/tls.rs b/src/shims/tls.rs index 65978c9774f51..fe278ff717f0c 100644 --- a/src/shims/tls.rs +++ b/src/shims/tls.rs @@ -240,7 +240,7 @@ impl TlsDtorsState { match &mut self.0 { Init => { match this.tcx.sess.target.os.as_ref() { - "linux" => { + "linux" | "freebsd" | "android" => { // Run the pthread dtors. self.0 = PthreadDtors(Default::default()); } @@ -257,10 +257,16 @@ impl TlsDtorsState { // And move to the final state. self.0 = Done; } - _ => { - // No TLS support for this platform, directly move to final state. + "wasi" | "none" => { + // No OS, no TLS dtors. + // FIXME: should we do something on wasi? self.0 = Done; } + os => { + throw_unsup_format!( + "the TLS machinery does not know how to handle OS `{os}`" + ); + } } } PthreadDtors(state) => { From 4a97fd6c0e0cb1b286f5372ef5d043e829431185 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Sun, 27 Nov 2022 15:19:00 +0100 Subject: [PATCH 07/22] move interpreter loop into thread manager; they are pretty tightly coupled anyway --- src/concurrency/thread.rs | 120 ++++++++++++++++++++++---------------- src/eval.rs | 30 ++-------- src/lib.rs | 4 +- 3 files changed, 76 insertions(+), 78 deletions(-) diff --git a/src/concurrency/thread.rs b/src/concurrency/thread.rs index fde47ed9ff1ec..900d24443cc96 100644 --- a/src/concurrency/thread.rs +++ b/src/concurrency/thread.rs @@ -21,7 +21,7 @@ use crate::shims::tls; use crate::*; #[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub enum SchedulingAction { +enum SchedulingAction { /// Execute step on the active thread. ExecuteStep, /// Execute a timeout callback. @@ -450,8 +450,7 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { } /// Get a mutable borrow of the currently active thread. - /// (Private for a bit of protection.) - fn active_thread_mut(&mut self) -> &mut Thread<'mir, 'tcx> { + pub fn active_thread_mut(&mut self) -> &mut Thread<'mir, 'tcx> { &mut self.threads[self.active_thread] } @@ -718,6 +717,51 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { } } +impl<'mir, 'tcx: 'mir> EvalContextPrivExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx> {} +trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriInterpCxExt<'mir, 'tcx> { + /// Execute a timeout callback on the callback's thread. + #[inline] + fn run_timeout_callback(&mut self) -> InterpResult<'tcx> { + let this = self.eval_context_mut(); + let (thread, callback) = if let Some((thread, callback)) = + this.machine.threads.get_ready_callback(&this.machine.clock) + { + (thread, callback) + } else { + // get_ready_callback can return None if the computer's clock + // was shifted after calling the scheduler and before the call + // to get_ready_callback (see issue + // https://github.com/rust-lang/miri/issues/1763). In this case, + // just do nothing, which effectively just returns to the + // scheduler. + return Ok(()); + }; + // This back-and-forth with `set_active_thread` is here because of two + // design decisions: + // 1. Make the caller and not the callback responsible for changing + // thread. + // 2. Make the scheduler the only place that can change the active + // thread. + let old_thread = this.set_active_thread(thread); + callback.call(this)?; + this.set_active_thread(old_thread); + Ok(()) + } + + #[inline] + fn run_on_stack_empty(&mut self) -> InterpResult<'tcx, Poll<()>> { + let this = self.eval_context_mut(); + let mut callback = this + .active_thread_mut() + .on_stack_empty + .take() + .expect("`on_stack_empty` not set up, or already running"); + let res = callback(this)?; + this.active_thread_mut().on_stack_empty = Some(callback); + Ok(res) + } +} + // Public interface to thread management. impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {} pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { @@ -961,53 +1005,29 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { this.machine.threads.unregister_timeout_callback_if_exists(thread); } - /// Execute a timeout callback on the callback's thread. - #[inline] - fn run_timeout_callback(&mut self) -> InterpResult<'tcx> { + /// Run the core interpreter loop. Returns only when an interrupt occurs (an error or program + /// termination). + fn run_threads(&mut self) -> InterpResult<'tcx, !> { let this = self.eval_context_mut(); - let (thread, callback) = if let Some((thread, callback)) = - this.machine.threads.get_ready_callback(&this.machine.clock) - { - (thread, callback) - } else { - // get_ready_callback can return None if the computer's clock - // was shifted after calling the scheduler and before the call - // to get_ready_callback (see issue - // https://github.com/rust-lang/miri/issues/1763). In this case, - // just do nothing, which effectively just returns to the - // scheduler. - return Ok(()); - }; - // This back-and-forth with `set_active_thread` is here because of two - // design decisions: - // 1. Make the caller and not the callback responsible for changing - // thread. - // 2. Make the scheduler the only place that can change the active - // thread. - let old_thread = this.set_active_thread(thread); - callback.call(this)?; - this.set_active_thread(old_thread); - Ok(()) - } - - #[inline] - fn run_on_stack_empty(&mut self) -> InterpResult<'tcx, Poll<()>> { - let this = self.eval_context_mut(); - let mut callback = this - .active_thread_mut() - .on_stack_empty - .take() - .expect("`on_stack_empty` not set up, or already running"); - let res = callback(this)?; - this.active_thread_mut().on_stack_empty = Some(callback); - Ok(res) - } - - /// Decide which action to take next and on which thread. - #[inline] - fn schedule(&mut self) -> InterpResult<'tcx, SchedulingAction> { - let this = self.eval_context_mut(); - this.machine.threads.schedule(&this.machine.clock) + loop { + match this.machine.threads.schedule(&this.machine.clock)? { + SchedulingAction::ExecuteStep => { + if !this.step()? { + // See if this thread can do something else. + match this.run_on_stack_empty()? { + Poll::Pending => {} // keep going + Poll::Ready(()) => this.terminate_active_thread()?, + } + } + } + SchedulingAction::ExecuteTimeoutCallback => { + this.run_timeout_callback()?; + } + SchedulingAction::Sleep(duration) => { + this.machine.clock.sleep(duration); + } + } + } } /// Handles thread termination of the active thread: wakes up threads joining on this one, @@ -1015,7 +1035,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { /// /// This is called by the eval loop when a thread's on_stack_empty returns `Ready`. #[inline] - fn thread_terminated(&mut self) -> InterpResult<'tcx> { + fn terminate_active_thread(&mut self) -> InterpResult<'tcx> { let this = self.eval_context_mut(); let thread = this.active_thread_mut(); assert!(thread.stack.is_empty(), "only threads with an empty stack can be terminated"); diff --git a/src/eval.rs b/src/eval.rs index f12c3518e831d..c7f3c9577a875 100644 --- a/src/eval.rs +++ b/src/eval.rs @@ -229,9 +229,9 @@ impl MainThreadState { this.machine.layouts.isize, ); let exit_code = this.read_scalar(&ret_place.into())?.to_machine_isize(this)?; - // Need to call `thread_terminated` ourselves since we are not going to - // return to the scheduler loop. - this.thread_terminated()?; + // Need to call this ourselves since we are not going to return to the scheduler + // loop, and we want the main thread TLS to not show up as memory leaks. + this.terminate_active_thread()?; // Stop interpreter loop. throw_machine_stop!(TerminationInfo::Exit { code: exit_code, leak_check: true }); } @@ -416,28 +416,8 @@ pub fn eval_entry<'tcx>( }; // Perform the main execution. - let res: thread::Result> = panic::catch_unwind(AssertUnwindSafe(|| { - // Main loop. Goes on forever until an interrupt is triggered (represented as `InterpError`). - loop { - match ecx.schedule()? { - SchedulingAction::ExecuteStep => { - if !ecx.step()? { - // See if this thread can do something else. - match ecx.run_on_stack_empty()? { - Poll::Pending => {} // keep going - Poll::Ready(()) => ecx.thread_terminated()?, - } - } - } - SchedulingAction::ExecuteTimeoutCallback => { - ecx.run_timeout_callback()?; - } - SchedulingAction::Sleep(duration) => { - ecx.machine.clock.sleep(duration); - } - } - } - })); + let res: thread::Result> = + panic::catch_unwind(AssertUnwindSafe(|| ecx.run_threads())); let res = res.unwrap_or_else(|panic_payload| { ecx.handle_ice(); panic::resume_unwind(panic_payload) diff --git a/src/lib.rs b/src/lib.rs index a759a81b77304..6f483cf2cc480 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -89,9 +89,7 @@ pub use crate::concurrency::{ data_race::{AtomicFenceOrd, AtomicReadOrd, AtomicRwOrd, AtomicWriteOrd, EvalContextExt as _}, init_once::{EvalContextExt as _, InitOnceId}, sync::{CondvarId, EvalContextExt as _, MutexId, RwLockId, SyncId}, - thread::{ - EvalContextExt as _, SchedulingAction, StackEmptyCallback, ThreadId, ThreadManager, Time, - }, + thread::{EvalContextExt as _, StackEmptyCallback, ThreadId, ThreadManager, Time}, }; pub use crate::diagnostics::{ report_error, EvalContextExt as _, NonHaltingDiagnostic, TerminationInfo, From cf24022e79446c1bfde97087c6349f55740ea69a Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Mon, 28 Nov 2022 08:40:41 +0100 Subject: [PATCH 08/22] make ./miri run a bit more silent; add option to control seeds tested by many-seeds --- cargo-miri/src/setup.rs | 16 +++++++++------- miri | 9 +++++++-- 2 files changed, 16 insertions(+), 9 deletions(-) diff --git a/cargo-miri/src/setup.rs b/cargo-miri/src/setup.rs index 9c179e82ba137..3ec63ba0f104f 100644 --- a/cargo-miri/src/setup.rs +++ b/cargo-miri/src/setup.rs @@ -99,12 +99,10 @@ pub fn setup(subcommand: &MiriCommand, target: &str, rustc_version: &VersionMeta // `config.toml`. command.env("RUSTC_WRAPPER", ""); - if only_setup { - if print_sysroot { - // Be extra sure there is no noise on stdout. - command.stdout(process::Stdio::null()); - } + if only_setup && !print_sysroot { + // Forward output. } else { + // Supress output. command.stdout(process::Stdio::null()); command.stderr(process::Stdio::null()); } @@ -120,7 +118,9 @@ pub fn setup(subcommand: &MiriCommand, target: &str, rustc_version: &VersionMeta std::env::set_var("MIRI_SYSROOT", &sysroot_dir); // Do the build. - if only_setup { + if print_sysroot { + // Be silent. + } else if only_setup { // We want to be explicit. eprintln!("Preparing a sysroot for Miri (target: {target})..."); } else { @@ -143,7 +143,9 @@ pub fn setup(subcommand: &MiriCommand, target: &str, rustc_version: &VersionMeta ) } }); - if only_setup { + if print_sysroot { + // Be silent. + } else if only_setup { eprintln!("A sysroot for Miri is now available in `{}`.", sysroot_dir.display()); } else { eprintln!("done"); diff --git a/miri b/miri index 38d36898768e1..a259576ed42a0 100755 --- a/miri +++ b/miri @@ -36,7 +36,8 @@ Mainly meant to be invoked by rust-analyzer. ./miri many-seeds : Runs over and over again with different seeds for Miri. The MIRIFLAGS variable is set to its original value appended with ` -Zmiri-seed=$SEED` for -many different seeds. +many different seeds. The MIRI_SEEDS variable controls how many seeds are being +tried; MIRI_SEED_START controls the first seed to try. ./miri bench : Runs the benchmarks from bench-cargo-miri in hyperfine. hyperfine needs to be installed. @@ -174,7 +175,9 @@ rustc-push) fi ;; many-seeds) - for SEED in $(seq 0 255); do + MIRI_SEED_START=${MIRI_SEED_START:-0} # default to 0 + MIRI_SEEDS=${MIRI_SEEDS:-256} # default to 256 + for SEED in $(seq $MIRI_SEED_START $(( $MIRI_SEED_START + $MIRI_SEEDS - 1 )) ); do echo "Trying seed: $SEED" MIRIFLAGS="$MIRIFLAGS -Zlayout-seed=$SEED -Zmiri-seed=$SEED" $@ || { echo "Failing seed: $SEED"; break; } done @@ -249,6 +252,8 @@ export RUSTFLAGS="-C link-args=-Wl,-rpath,$LIBDIR $RUSTFLAGS" # Build a sysroot and set MIRI_SYSROOT to use it. Arguments are passed to `cargo miri setup`. build_sysroot() { if ! MIRI_SYSROOT="$($CARGO run $CARGO_EXTRA_FLAGS --manifest-path "$MIRIDIR"/cargo-miri/Cargo.toml -- miri setup --print-sysroot "$@")"; then + # Run it again so the user can see the error. + $CARGO run $CARGO_EXTRA_FLAGS --manifest-path "$MIRIDIR"/cargo-miri/Cargo.toml -- miri setup "$@" echo "'cargo miri setup' failed" exit 1 fi From b3dd24020a53df86e51898581aa61a9261048a94 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Mon, 28 Nov 2022 08:48:49 +0100 Subject: [PATCH 09/22] add many-seeds capabilities to CI --- ci.sh | 11 ++++++++--- tests/many-seeds/scoped-thread-leak.rs | 8 ++++++++ 2 files changed, 16 insertions(+), 3 deletions(-) create mode 100644 tests/many-seeds/scoped-thread-leak.rs diff --git a/ci.sh b/ci.sh index dd2d2abe35b53..e455b482338f4 100755 --- a/ci.sh +++ b/ci.sh @@ -40,10 +40,15 @@ function run_tests { ./miri test if [ -z "${MIRI_TEST_TARGET+exists}" ]; then # Only for host architecture: tests with optimizations (`-O` is what cargo passes, but crank MIR - # optimizations up all the way). - # Optimizations change diagnostics (mostly backtraces), so we don't check them - #FIXME(#2155): we want to only run the pass and panic tests here, not the fail tests. + # optimizations up all the way, too). + # Optimizations change diagnostics (mostly backtraces), so we don't check + # them. Also error locations change so we don't run the failing tests. MIRIFLAGS="${MIRIFLAGS:-} -O -Zmir-opt-level=4" MIRI_SKIP_UI_CHECKS=1 ./miri test -- tests/{pass,panic} + + # Also run some many-seeds tests. 64 seeds means this takes around a minute per test. + for FILE in tests/many-seeds/*.rs; do + MIRI_SEEDS=64 CARGO_EXTRA_FLAGS="$CARGO_EXTRA_FLAGS -q" ./miri many-seeds ./miri run "$FILE" + done fi ## test-cargo-miri diff --git a/tests/many-seeds/scoped-thread-leak.rs b/tests/many-seeds/scoped-thread-leak.rs new file mode 100644 index 0000000000000..f28e43696f709 --- /dev/null +++ b/tests/many-seeds/scoped-thread-leak.rs @@ -0,0 +1,8 @@ +//! Regression test for https://github.com/rust-lang/miri/issues/2629 +use std::thread; + +fn main() { + thread::scope(|s| { + s.spawn(|| {}); + }); +} From 7453d7eec7955e60d6cc7b27d13b483531e44f60 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Mon, 28 Nov 2022 10:27:21 +0100 Subject: [PATCH 10/22] decreasw yield count a bit and explain reasoning a bit more --- src/eval.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/eval.rs b/src/eval.rs index c7f3c9577a875..b8578b1277f53 100644 --- a/src/eval.rs +++ b/src/eval.rs @@ -27,7 +27,7 @@ use crate::*; /// When the main thread would exit, we will yield to any other thread that is ready to execute. /// But we must only do that a finite number of times, or a background thread running `loop {}` /// will hang the program. -const MAIN_THREAD_YIELDS_AT_SHUTDOWN: u32 = 1_000; +const MAIN_THREAD_YIELDS_AT_SHUTDOWN: u32 = 256; #[derive(Copy, Clone, Debug, PartialEq)] pub enum AlignmentCheck { @@ -208,8 +208,12 @@ impl MainThreadState { // Give background threads a chance to finish by yielding the main thread a // couple of times -- but only if we would also preempt threads randomly. if this.machine.preemption_rate > 0.0 { + // There is a non-zero chance they will yield back to us often enough to + // make Miri terminate eventually. *self = Yield { remaining: MAIN_THREAD_YIELDS_AT_SHUTDOWN }; } else { + // The other threads did not get preempted, so no need to yield back to + // them. *self = Done; } } From 94d0fb093a962715647c7eed5ae271a7b84f7bc6 Mon Sep 17 00:00:00 2001 From: Oli Scherer Date: Thu, 1 Dec 2022 14:43:29 +0000 Subject: [PATCH 11/22] Bump ui_test crate --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- tests/fail/function_calls/exported_symbol_abi_mismatch.rs | 6 +++--- tests/fail/function_calls/exported_symbol_bad_unwind2.rs | 6 +++--- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 15fc89b86818a..876d49257caa5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -724,9 +724,9 @@ dependencies = [ [[package]] name = "ui_test" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf4559da3fe6b481f8674a29379677cb9606cd6f75fc254a2c9834c55638503d" +checksum = "54ddb6f31025943e2f9d59237f433711c461a43d9415974c3eb3a4902edc1c1f" dependencies = [ "bstr", "cargo_metadata", diff --git a/Cargo.toml b/Cargo.toml index 0f69a0baef4fb..717020f43c4f0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -39,7 +39,7 @@ libloading = "0.7" [dev-dependencies] colored = "2" -ui_test = "0.4" +ui_test = "0.5" rustc_version = "0.4" # Features chosen to match those required by env_logger, to avoid rebuilds regex = { version = "1.5.5", default-features = false, features = ["perf", "std"] } diff --git a/tests/fail/function_calls/exported_symbol_abi_mismatch.rs b/tests/fail/function_calls/exported_symbol_abi_mismatch.rs index dbf72b5b61ad9..50a0e8e6edef8 100644 --- a/tests/fail/function_calls/exported_symbol_abi_mismatch.rs +++ b/tests/fail/function_calls/exported_symbol_abi_mismatch.rs @@ -12,7 +12,7 @@ fn main() { #[cfg(fn_ptr)] unsafe { std::mem::transmute::(foo)(); - //[fn_ptr]~^ ERROR: calling a function with calling convention Rust using calling convention C + //~[fn_ptr]^ ERROR: calling a function with calling convention Rust using calling convention C } // `Instance` caching should not suppress ABI check. @@ -28,8 +28,8 @@ fn main() { } unsafe { foo(); - //[no_cache]~^ ERROR: calling a function with calling convention Rust using calling convention C - //[cache]~| ERROR: calling a function with calling convention Rust using calling convention C + //~[no_cache]^ ERROR: calling a function with calling convention Rust using calling convention C + //~[cache]| ERROR: calling a function with calling convention Rust using calling convention C } } } diff --git a/tests/fail/function_calls/exported_symbol_bad_unwind2.rs b/tests/fail/function_calls/exported_symbol_bad_unwind2.rs index f85ad5ae5072f..554cbe09cf03d 100644 --- a/tests/fail/function_calls/exported_symbol_bad_unwind2.rs +++ b/tests/fail/function_calls/exported_symbol_bad_unwind2.rs @@ -4,8 +4,8 @@ #[cfg_attr(any(definition, both), rustc_nounwind)] #[no_mangle] extern "C-unwind" fn nounwind() { - //[definition]~^ ERROR: abnormal termination: the program aborted execution - //[both]~^^ ERROR: abnormal termination: the program aborted execution + //~[definition]^ ERROR: abnormal termination: the program aborted execution + //~[both]^^ ERROR: abnormal termination: the program aborted execution panic!(); } @@ -15,5 +15,5 @@ fn main() { fn nounwind(); } unsafe { nounwind() } - //[extern_block]~^ ERROR: unwinding past a stack frame that does not allow unwinding + //~[extern_block]^ ERROR: unwinding past a stack frame that does not allow unwinding } From 11e8233c7a2f03847de7721e671d1e3f1feec483 Mon Sep 17 00:00:00 2001 From: Vanille-N Date: Fri, 2 Dec 2022 13:23:37 +0100 Subject: [PATCH 12/22] extract common borrow tracking logic --- src/borrow_tracker/mod.rs | 365 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 365 insertions(+) create mode 100644 src/borrow_tracker/mod.rs diff --git a/src/borrow_tracker/mod.rs b/src/borrow_tracker/mod.rs new file mode 100644 index 0000000000000..69f9bd09f26de --- /dev/null +++ b/src/borrow_tracker/mod.rs @@ -0,0 +1,365 @@ +use std::cell::RefCell; +use std::fmt; +use std::num::NonZeroU64; + +use log::trace; +use smallvec::SmallVec; + +use rustc_data_structures::fx::{FxHashMap, FxHashSet}; +use rustc_middle::mir::RetagKind; +use rustc_target::abi::Size; + +use crate::*; +pub mod stacked_borrows; +use stacked_borrows::diagnostics::RetagCause; + +pub type CallId = NonZeroU64; + +/// Tracking pointer provenance +#[derive(Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)] +pub struct BorTag(NonZeroU64); + +impl BorTag { + pub fn new(i: u64) -> Option { + NonZeroU64::new(i).map(BorTag) + } + + pub fn get(&self) -> u64 { + self.0.get() + } + + pub fn inner(&self) -> NonZeroU64 { + self.0 + } + + pub fn succ(self) -> Option { + self.0.checked_add(1).map(Self) + } + + /// The minimum representable tag + pub fn one() -> Self { + Self::new(1).unwrap() + } +} + +impl std::default::Default for BorTag { + /// The default to be used when borrow tracking is disabled + fn default() -> Self { + Self::one() + } +} + +impl fmt::Debug for BorTag { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "<{}>", self.0) + } +} + +/// Per-frame data for borrow tracking +#[derive(Debug)] +pub struct FrameExtra { + /// The ID of the call this frame corresponds to. + pub call_id: CallId, + + /// If this frame is protecting any tags, they are listed here. We use this list to do + /// incremental updates of the global list of protected tags stored in the + /// `stacked_borrows::GlobalState` upon function return, and if we attempt to pop a protected + /// tag, to identify which call is responsible for protecting the tag. + /// See `Stack::item_popped` for more explanation. + /// + /// This will contain one tag per reference passed to the function, so + /// a size of 2 is enough for the vast majority of functions. + pub protected_tags: SmallVec<[BorTag; 2]>, +} + +impl VisitTags for FrameExtra { + fn visit_tags(&self, _visit: &mut dyn FnMut(BorTag)) { + // `protected_tags` are fine to GC. + } +} + +/// Extra global state, available to the memory access hooks. +#[derive(Debug)] +pub struct GlobalStateInner { + /// Borrow tracker method currently in use. + pub borrow_tracker_method: BorrowTrackerMethod, + /// Next unused pointer ID (tag). + pub next_ptr_tag: BorTag, + /// Table storing the "base" tag for each allocation. + /// The base tag is the one used for the initial pointer. + /// We need this in a separate table to handle cyclic statics. + pub base_ptr_tags: FxHashMap, + /// Next unused call ID (for protectors). + pub next_call_id: CallId, + /// All currently protected tags. + /// An item is protected if its tag is in this set, *and* it has the "protected" bit set. + /// We add tags to this when they are created with a protector in `reborrow`, and + /// we remove tags from this when the call which is protecting them returns, in + /// `GlobalStateInner::end_call`. See `Stack::item_popped` for more details. + pub protected_tags: FxHashMap, + /// The pointer ids to trace + pub tracked_pointer_tags: FxHashSet, + /// The call ids to trace + pub tracked_call_ids: FxHashSet, + /// Whether to recurse into datatypes when searching for pointers to retag. + pub retag_fields: RetagFields, +} + +impl VisitTags for GlobalStateInner { + fn visit_tags(&self, _visit: &mut dyn FnMut(BorTag)) { + // The only candidate is base_ptr_tags, and that does not need visiting since we don't ever + // GC the bottommost tag. + } +} + +/// We need interior mutable access to the global state. +pub type GlobalState = RefCell; + +/// Indicates which kind of access is being performed. +#[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)] +pub enum AccessKind { + Read, + Write, +} + +impl fmt::Display for AccessKind { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + AccessKind::Read => write!(f, "read access"), + AccessKind::Write => write!(f, "write access"), + } + } +} + +/// Policy on whether to recurse into fields to retag +#[derive(Copy, Clone, Debug)] +pub enum RetagFields { + /// Don't retag any fields. + No, + /// Retag all fields. + Yes, + /// Only retag fields of types with Scalar and ScalarPair layout, + /// to match the LLVM `noalias` we generate. + OnlyScalar, +} + +/// The flavor of the protector. +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum ProtectorKind { + /// Protected against aliasing violations from other pointers. + /// + /// Items protected like this cause UB when they are invalidated, *but* the pointer itself may + /// still be used to issue a deallocation. + /// + /// This is required for LLVM IR pointers that are `noalias` but *not* `dereferenceable`. + WeakProtector, + + /// Protected against any kind of invalidation. + /// + /// Items protected like this cause UB when they are invalidated or the memory is deallocated. + /// This is strictly stronger protection than `WeakProtector`. + /// + /// This is required for LLVM IR pointers that are `dereferenceable` (and also allows `noalias`). + StrongProtector, +} + +/// Utilities for initialization and ID generation +impl GlobalStateInner { + pub fn new( + borrow_tracker_method: BorrowTrackerMethod, + tracked_pointer_tags: FxHashSet, + tracked_call_ids: FxHashSet, + retag_fields: RetagFields, + ) -> Self { + GlobalStateInner { + borrow_tracker_method, + next_ptr_tag: BorTag::one(), + base_ptr_tags: FxHashMap::default(), + next_call_id: NonZeroU64::new(1).unwrap(), + protected_tags: FxHashMap::default(), + tracked_pointer_tags, + tracked_call_ids, + retag_fields, + } + } + + /// Generates a new pointer tag. Remember to also check track_pointer_tags and log its creation! + pub fn new_ptr(&mut self) -> BorTag { + let id = self.next_ptr_tag; + self.next_ptr_tag = id.succ().unwrap(); + id + } + + pub fn new_frame(&mut self, machine: &MiriMachine<'_, '_>) -> FrameExtra { + let call_id = self.next_call_id; + trace!("new_frame: Assigning call ID {}", call_id); + if self.tracked_call_ids.contains(&call_id) { + machine.emit_diagnostic(NonHaltingDiagnostic::CreatedCallId(call_id)); + } + self.next_call_id = NonZeroU64::new(call_id.get() + 1).unwrap(); + FrameExtra { call_id, protected_tags: SmallVec::new() } + } + + pub fn end_call(&mut self, frame: &machine::FrameData<'_>) { + for tag in &frame + .borrow_tracker + .as_ref() + .expect("we should have borrow tracking data") + .protected_tags + { + self.protected_tags.remove(tag); + } + } + + pub fn base_ptr_tag(&mut self, id: AllocId, machine: &MiriMachine<'_, '_>) -> BorTag { + self.base_ptr_tags.get(&id).copied().unwrap_or_else(|| { + let tag = self.new_ptr(); + if self.tracked_pointer_tags.contains(&tag) { + machine.emit_diagnostic(NonHaltingDiagnostic::CreatedPointerTag( + tag.inner(), + None, + None, + )); + } + trace!("New allocation {:?} has base tag {:?}", id, tag); + self.base_ptr_tags.try_insert(id, tag).unwrap(); + tag + }) + } +} + +/// Which borrow tracking method to use +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum BorrowTrackerMethod { + /// Stacked Borrows, as implemented in borrow_tracker/stacked + StackedBorrows, +} + +impl BorrowTrackerMethod { + pub fn instanciate_global_state(self, config: &MiriConfig) -> GlobalState { + RefCell::new(GlobalStateInner::new( + self, + config.tracked_pointer_tags.clone(), + config.tracked_call_ids.clone(), + config.retag_fields, + )) + } +} + +impl GlobalStateInner { + pub fn new_allocation( + &mut self, + id: AllocId, + alloc_size: Size, + kind: MemoryKind, + machine: &MiriMachine<'_, '_>, + ) -> AllocExtra { + match self.borrow_tracker_method { + BorrowTrackerMethod::StackedBorrows => + AllocExtra::StackedBorrows(Box::new(RefCell::new(Stacks::new_allocation( + id, alloc_size, self, kind, machine, + )))), + } + } +} + +impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {} +pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { + fn retag(&mut self, kind: RetagKind, place: &PlaceTy<'tcx, Provenance>) -> InterpResult<'tcx> { + let this = self.eval_context_mut(); + let method = this.machine.borrow_tracker.as_ref().unwrap().borrow().borrow_tracker_method; + match method { + BorrowTrackerMethod::StackedBorrows => this.sb_retag(kind, place), + } + } + + fn retag_return_place(&mut self) -> InterpResult<'tcx> { + let this = self.eval_context_mut(); + let method = this.machine.borrow_tracker.as_ref().unwrap().borrow().borrow_tracker_method; + match method { + BorrowTrackerMethod::StackedBorrows => this.sb_retag_return_place(), + } + } + + fn expose_tag(&mut self, alloc_id: AllocId, tag: BorTag) -> InterpResult<'tcx> { + let this = self.eval_context_mut(); + let method = this.machine.borrow_tracker.as_ref().unwrap().borrow().borrow_tracker_method; + match method { + BorrowTrackerMethod::StackedBorrows => this.sb_expose_tag(alloc_id, tag), + } + } +} + +/// Extra per-allocation data for borrow tracking +#[derive(Debug, Clone)] +pub enum AllocExtra { + /// Data corresponding to Stacked Borrows + StackedBorrows(Box>), +} + +impl AllocExtra { + pub fn assert_sb(&self) -> &RefCell { + match self { + AllocExtra::StackedBorrows(ref sb) => sb, + } + } + + pub fn assert_sb_mut(&mut self) -> &mut RefCell { + match self { + AllocExtra::StackedBorrows(ref mut sb) => sb, + } + } + + pub fn before_memory_read<'tcx>( + &self, + alloc_id: AllocId, + prov_extra: ProvenanceExtra, + range: AllocRange, + machine: &MiriMachine<'_, 'tcx>, + ) -> InterpResult<'tcx> { + match self { + AllocExtra::StackedBorrows(sb) => + sb.borrow_mut().before_memory_read(alloc_id, prov_extra, range, machine), + } + } + + pub fn before_memory_write<'tcx>( + &mut self, + alloc_id: AllocId, + prov_extra: ProvenanceExtra, + range: AllocRange, + machine: &mut MiriMachine<'_, 'tcx>, + ) -> InterpResult<'tcx> { + match self { + AllocExtra::StackedBorrows(sb) => + sb.get_mut().before_memory_write(alloc_id, prov_extra, range, machine), + } + } + + pub fn before_memory_deallocation<'tcx>( + &mut self, + alloc_id: AllocId, + prov_extra: ProvenanceExtra, + range: AllocRange, + machine: &mut MiriMachine<'_, 'tcx>, + ) -> InterpResult<'tcx> { + match self { + AllocExtra::StackedBorrows(sb) => + sb.get_mut().before_memory_deallocation(alloc_id, prov_extra, range, machine), + } + } + + pub fn remove_unreachable_tags(&self, tags: &FxHashSet) { + match self { + AllocExtra::StackedBorrows(sb) => sb.borrow_mut().remove_unreachable_tags(tags), + } + } +} + +impl VisitTags for AllocExtra { + fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) { + match self { + AllocExtra::StackedBorrows(sb) => sb.visit_tags(visit), + } + } +} From 30cc8eeffa3fd0df8456915ee27d08a38fd4704e Mon Sep 17 00:00:00 2001 From: Vanille-N Date: Fri, 2 Dec 2022 13:23:57 +0100 Subject: [PATCH 13/22] move stacked_borrows to borrow_tracker/stacked_borrows --- .../stacked_borrows/diagnostics.rs | 25 +- .../stacked_borrows/item.rs | 16 +- .../stacked_borrows/mod.rs | 343 +++++------------- .../stacked_borrows/stack.rs | 23 +- 4 files changed, 116 insertions(+), 291 deletions(-) rename src/{ => borrow_tracker}/stacked_borrows/diagnostics.rs (97%) rename src/{ => borrow_tracker}/stacked_borrows/item.rs (89%) rename src/{ => borrow_tracker}/stacked_borrows/mod.rs (80%) rename src/{ => borrow_tracker}/stacked_borrows/stack.rs (96%) diff --git a/src/stacked_borrows/diagnostics.rs b/src/borrow_tracker/stacked_borrows/diagnostics.rs similarity index 97% rename from src/stacked_borrows/diagnostics.rs rename to src/borrow_tracker/stacked_borrows/diagnostics.rs index 023f6005419a6..c5eb2113f9f8e 100644 --- a/src/stacked_borrows/diagnostics.rs +++ b/src/borrow_tracker/stacked_borrows/diagnostics.rs @@ -1,17 +1,16 @@ use smallvec::SmallVec; use std::fmt; -use rustc_middle::mir::interpret::{alloc_range, AllocId, AllocRange}; +use rustc_middle::mir::interpret::{alloc_range, AllocId, AllocRange, InterpError}; use rustc_span::{Span, SpanData}; use rustc_target::abi::Size; -use crate::stacked_borrows::{ - err_sb_ub, AccessKind, GlobalStateInner, Permission, ProtectorKind, Stack, +use crate::borrow_tracker::{ + stacked_borrows::{err_sb_ub, Permission}, + AccessKind, GlobalStateInner, ProtectorKind, }; use crate::*; -use rustc_middle::mir::interpret::InterpError; - #[derive(Clone, Debug)] pub struct AllocHistory { id: AllocId, @@ -53,7 +52,7 @@ impl Creation { #[derive(Clone, Debug)] struct Invalidation { - tag: SbTag, + tag: BorTag, range: AllocRange, span: Span, cause: InvalidationCause, @@ -100,7 +99,7 @@ impl fmt::Display for InvalidationCause { #[derive(Clone, Debug)] struct Protection { - tag: SbTag, + tag: BorTag, span: Span, } @@ -135,7 +134,7 @@ impl<'ecx, 'mir, 'tcx> DiagnosticCxBuilder<'ecx, 'mir, 'tcx> { pub fn retag( machine: &'ecx MiriMachine<'mir, 'tcx>, cause: RetagCause, - new_tag: SbTag, + new_tag: BorTag, orig_tag: ProvenanceExtra, range: AllocRange, ) -> Self { @@ -185,7 +184,7 @@ enum Operation { #[derive(Debug, Clone)] struct RetagOp { cause: RetagCause, - new_tag: SbTag, + new_tag: BorTag, orig_tag: ProvenanceExtra, range: AllocRange, permission: Option, @@ -257,7 +256,7 @@ impl<'history, 'ecx, 'mir, 'tcx> DiagnosticCx<'history, 'ecx, 'mir, 'tcx> { .push(Creation { retag: op.clone(), span: self.machine.current_span() }); } - pub fn log_invalidation(&mut self, tag: SbTag) { + pub fn log_invalidation(&mut self, tag: BorTag) { let mut span = self.machine.current_span(); let (range, cause) = match &self.operation { Operation::Retag(RetagOp { cause, range, permission, .. }) => { @@ -288,8 +287,8 @@ impl<'history, 'ecx, 'mir, 'tcx> DiagnosticCx<'history, 'ecx, 'mir, 'tcx> { pub fn get_logs_relevant_to( &self, - tag: SbTag, - protector_tag: Option, + tag: BorTag, + protector_tag: Option, ) -> Option { let Some(created) = self.history .creations @@ -410,7 +409,7 @@ impl<'history, 'ecx, 'mir, 'tcx> DiagnosticCx<'history, 'ecx, 'mir, 'tcx> { .all_stacks() .flatten() .map(|frame| { - frame.extra.stacked_borrows.as_ref().expect("we should have Stacked Borrows data") + frame.extra.borrow_tracker.as_ref().expect("we should have borrow tracking data") }) .find(|frame| frame.protected_tags.contains(&item.tag())) .map(|frame| frame.call_id) diff --git a/src/stacked_borrows/item.rs b/src/borrow_tracker/stacked_borrows/item.rs similarity index 89% rename from src/stacked_borrows/item.rs rename to src/borrow_tracker/stacked_borrows/item.rs index 709b27d191b26..b9a52e4966cd7 100644 --- a/src/stacked_borrows/item.rs +++ b/src/borrow_tracker/stacked_borrows/item.rs @@ -1,13 +1,13 @@ -use crate::stacked_borrows::SbTag; use std::fmt; -use std::num::NonZeroU64; + +use crate::borrow_tracker::BorTag; /// An item in the per-location borrow stack. #[derive(Copy, Clone, Hash, PartialEq, Eq)] pub struct Item(u64); // An Item contains 3 bitfields: -// * Bits 0-61 store an SbTag +// * Bits 0-61 store a BorTag // * Bits 61-63 store a Permission // * Bit 64 stores a flag which indicates if we have a protector const TAG_MASK: u64 = u64::MAX >> 3; @@ -18,9 +18,9 @@ const PERM_SHIFT: u64 = 61; const PROTECTED_SHIFT: u64 = 63; impl Item { - pub fn new(tag: SbTag, perm: Permission, protected: bool) -> Self { - assert!(tag.0.get() <= TAG_MASK); - let packed_tag = tag.0.get(); + pub fn new(tag: BorTag, perm: Permission, protected: bool) -> Self { + assert!(tag.get() <= TAG_MASK); + let packed_tag = tag.get(); let packed_perm = perm.to_bits() << PERM_SHIFT; let packed_protected = u64::from(protected) << PROTECTED_SHIFT; @@ -34,8 +34,8 @@ impl Item { } /// The pointers the permission is granted to. - pub fn tag(self) -> SbTag { - SbTag(NonZeroU64::new(self.0 & TAG_MASK).unwrap()) + pub fn tag(self) -> BorTag { + BorTag::new(self.0 & TAG_MASK).unwrap() } /// The permission this item grants. diff --git a/src/stacked_borrows/mod.rs b/src/borrow_tracker/stacked_borrows/mod.rs similarity index 80% rename from src/stacked_borrows/mod.rs rename to src/borrow_tracker/stacked_borrows/mod.rs index 4e369f4291a3f..ec3be398a2c29 100644 --- a/src/stacked_borrows/mod.rs +++ b/src/borrow_tracker/stacked_borrows/mod.rs @@ -2,81 +2,30 @@ //! for further information. use log::trace; -use std::cell::RefCell; use std::cmp; -use std::fmt; -use std::fmt::Write; -use std::num::NonZeroU64; +use std::fmt::{self, Write}; -use rustc_data_structures::fx::{FxHashMap, FxHashSet}; -use rustc_hir::Mutability; -use rustc_middle::mir::RetagKind; +use rustc_data_structures::fx::FxHashSet; +use rustc_middle::mir::{Mutability, RetagKind}; use rustc_middle::ty::{ self, layout::{HasParamEnv, LayoutOf}, }; -use rustc_target::abi::Abi; -use rustc_target::abi::Size; -use smallvec::SmallVec; +use rustc_target::abi::{Abi, Size}; +use crate::borrow_tracker::{ + stacked_borrows::diagnostics::{AllocHistory, DiagnosticCx, DiagnosticCxBuilder, TagHistory}, + AccessKind, GlobalStateInner, ProtectorKind, RetagCause, RetagFields, +}; use crate::*; -pub mod diagnostics; -use diagnostics::{AllocHistory, DiagnosticCx, DiagnosticCxBuilder, RetagCause, TagHistory}; - mod item; pub use item::{Item, Permission}; mod stack; pub use stack::Stack; +pub mod diagnostics; -pub type CallId = NonZeroU64; - -// Even reading memory can have effects on the stack, so we need a `RefCell` here. -pub type AllocExtra = RefCell; - -/// Tracking pointer provenance -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct SbTag(NonZeroU64); - -impl SbTag { - pub fn new(i: u64) -> Option { - NonZeroU64::new(i).map(SbTag) - } - - // The default to be used when SB is disabled - #[allow(clippy::should_implement_trait)] - pub fn default() -> Self { - Self::new(1).unwrap() - } -} - -impl fmt::Debug for SbTag { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "<{}>", self.0) - } -} - -#[derive(Debug)] -pub struct FrameExtra { - /// The ID of the call this frame corresponds to. - call_id: CallId, - - /// If this frame is protecting any tags, they are listed here. We use this list to do - /// incremental updates of the global list of protected tags stored in the - /// `stacked_borrows::GlobalState` upon function return, and if we attempt to pop a protected - /// tag, to identify which call is responsible for protecting the tag. - /// See `Stack::item_invalidated` for more explanation. - /// - /// This will contain one tag per reference passed to the function, so - /// a size of 2 is enough for the vast majority of functions. - protected_tags: SmallVec<[SbTag; 2]>, -} - -impl VisitTags for FrameExtra { - fn visit_tags(&self, _visit: &mut dyn FnMut(SbTag)) { - // `protected_tags` are fine to GC. - } -} +pub type AllocExtra = Stacks; /// Extra per-allocation state. #[derive(Clone, Debug)] @@ -86,98 +35,16 @@ pub struct Stacks { /// Stores past operations on this allocation history: AllocHistory, /// The set of tags that have been exposed inside this allocation. - exposed_tags: FxHashSet, + exposed_tags: FxHashSet, /// Whether this memory has been modified since the last time the tag GC ran modified_since_last_gc: bool, } -/// The flavor of the protector. -#[derive(Copy, Clone, Debug)] -enum ProtectorKind { - /// Protected against aliasing violations from other pointers. - /// - /// Items protected like this cause UB when they are invalidated, *but* the pointer itself may - /// still be used to issue a deallocation. - /// - /// This is required for LLVM IR pointers that are `noalias` but *not* `dereferenceable`. - WeakProtector, - - /// Protected against any kind of invalidation. - /// - /// Items protected like this cause UB when they are invalidated or the memory is deallocated. - /// This is strictly stronger protection than `WeakProtector`. - /// - /// This is required for LLVM IR pointers that are `dereferenceable` (and also allows `noalias`). - StrongProtector, -} - -/// Extra global state, available to the memory access hooks. -#[derive(Debug)] -pub struct GlobalStateInner { - /// Next unused pointer ID (tag). - next_ptr_tag: SbTag, - /// Table storing the "base" tag for each allocation. - /// The base tag is the one used for the initial pointer. - /// We need this in a separate table to handle cyclic statics. - base_ptr_tags: FxHashMap, - /// Next unused call ID (for protectors). - next_call_id: CallId, - /// All currently protected tags, and the status of their protection. - /// An item is protected if its tag is in this set, *and* it has the "protected" bit set. - /// We add tags to this when they are created with a protector in `reborrow`, and - /// we remove tags from this when the call which is protecting them returns, in - /// `GlobalStateInner::end_call`. See `Stack::item_invalidated` for more details. - protected_tags: FxHashMap, - /// The pointer ids to trace - tracked_pointer_tags: FxHashSet, - /// The call ids to trace - tracked_call_ids: FxHashSet, - /// Whether to recurse into datatypes when searching for pointers to retag. - retag_fields: RetagFields, -} - -#[derive(Copy, Clone, Debug)] -pub enum RetagFields { - /// Don't retag any fields. - No, - /// Retag all fields. - Yes, - /// Only retag fields of types with Scalar and ScalarPair layout, - /// to match the LLVM `noalias` we generate. - OnlyScalar, -} - -impl VisitTags for GlobalStateInner { - fn visit_tags(&self, _visit: &mut dyn FnMut(SbTag)) { - // The only candidate is base_ptr_tags, and that does not need visiting since we don't ever - // GC the bottommost tag. - } -} - -/// We need interior mutable access to the global state. -pub type GlobalState = RefCell; - -/// Indicates which kind of access is being performed. -#[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)] -pub enum AccessKind { - Read, - Write, -} - -impl fmt::Display for AccessKind { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - AccessKind::Read => write!(f, "read access"), - AccessKind::Write => write!(f, "write access"), - } - } -} - /// Indicates which kind of reference is being created. /// Used by high-level `reborrow` to compute which permissions to grant to the /// new pointer. #[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub enum RefKind { +enum RefKind { /// `&mut` and `Box`. Unique { two_phase: bool }, /// `&` with or without interior mutability. @@ -198,65 +65,6 @@ impl fmt::Display for RefKind { } } -/// Utilities for initialization and ID generation -impl GlobalStateInner { - pub fn new( - tracked_pointer_tags: FxHashSet, - tracked_call_ids: FxHashSet, - retag_fields: RetagFields, - ) -> Self { - GlobalStateInner { - next_ptr_tag: SbTag(NonZeroU64::new(1).unwrap()), - base_ptr_tags: FxHashMap::default(), - next_call_id: NonZeroU64::new(1).unwrap(), - protected_tags: FxHashMap::default(), - tracked_pointer_tags, - tracked_call_ids, - retag_fields, - } - } - - /// Generates a new pointer tag. Remember to also check track_pointer_tags and log its creation! - fn new_ptr(&mut self) -> SbTag { - let id = self.next_ptr_tag; - self.next_ptr_tag = SbTag(NonZeroU64::new(id.0.get() + 1).unwrap()); - id - } - - pub fn new_frame(&mut self, machine: &MiriMachine<'_, '_>) -> FrameExtra { - let call_id = self.next_call_id; - trace!("new_frame: Assigning call ID {}", call_id); - if self.tracked_call_ids.contains(&call_id) { - machine.emit_diagnostic(NonHaltingDiagnostic::CreatedCallId(call_id)); - } - self.next_call_id = NonZeroU64::new(call_id.get() + 1).unwrap(); - FrameExtra { call_id, protected_tags: SmallVec::new() } - } - - pub fn end_call(&mut self, frame: &machine::FrameData<'_>) { - for tag in &frame - .stacked_borrows - .as_ref() - .expect("we should have Stacked Borrows data") - .protected_tags - { - self.protected_tags.remove(tag); - } - } - - pub fn base_ptr_tag(&mut self, id: AllocId, machine: &MiriMachine<'_, '_>) -> SbTag { - self.base_ptr_tags.get(&id).copied().unwrap_or_else(|| { - let tag = self.new_ptr(); - if self.tracked_pointer_tags.contains(&tag) { - machine.emit_diagnostic(NonHaltingDiagnostic::CreatedPointerTag(tag.0, None, None)); - } - trace!("New allocation {:?} has base tag {:?}", id, tag); - self.base_ptr_tags.try_insert(id, tag).unwrap(); - tag - }) - } -} - /// Error reporting pub fn err_sb_ub<'tcx>( msg: String, @@ -329,14 +137,7 @@ impl<'tcx> Stack { } } - /// Check if the given item is protected. - /// - /// The `provoking_access` argument is only used to produce diagnostics. - /// It is `Some` when we are granting the contained access for said tag, and it is - /// `None` during a deallocation. - /// Within `provoking_access, the `AllocRange` refers the entire operation, and - /// the `Size` refers to the specific location in the `AllocRange` that we are - /// currently checking. + /// The given item was invalidated -- check its protectors for whether that will cause UB. fn item_invalidated( item: &Item, global: &GlobalStateInner, @@ -386,7 +187,7 @@ impl<'tcx> Stack { tag: ProvenanceExtra, global: &GlobalStateInner, dcx: &mut DiagnosticCx<'_, '_, '_, 'tcx>, - exposed_tags: &FxHashSet, + exposed_tags: &FxHashSet, ) -> InterpResult<'tcx> { // Two main steps: Find granting item, remove incompatible items above. @@ -442,23 +243,24 @@ impl<'tcx> Stack { if granting_idx.is_none() || matches!(tag, ProvenanceExtra::Wildcard) { // Compute the upper bound of the items that remain. // (This is why we did all the work above: to reduce the items we have to consider here.) - let mut max = NonZeroU64::new(1).unwrap(); + let mut max = BorTag::one(); for i in 0..self.len() { let item = self.get(i).unwrap(); // Skip disabled items, they cannot be matched anyway. if !matches!(item.perm(), Permission::Disabled) { // We are looking for a strict upper bound, so add 1 to this tag. - max = cmp::max(item.tag().0.checked_add(1).unwrap(), max); + max = cmp::max(item.tag().succ().unwrap(), max); } } if let Some(unk) = self.unknown_bottom() { - max = cmp::max(unk.0, max); + max = cmp::max(unk, max); } // Use `max` as new strict upper bound for everything. trace!( - "access: forgetting stack to upper bound {max} due to wildcard or unknown access" + "access: forgetting stack to upper bound {max} due to wildcard or unknown access", + max = max.get(), ); - self.set_unknown_bottom(SbTag(max)); + self.set_unknown_bottom(max); } // Done. @@ -472,7 +274,7 @@ impl<'tcx> Stack { tag: ProvenanceExtra, global: &GlobalStateInner, dcx: &mut DiagnosticCx<'_, '_, '_, 'tcx>, - exposed_tags: &FxHashSet, + exposed_tags: &FxHashSet, ) -> InterpResult<'tcx> { // Step 1: Make a write access. // As part of this we do regular protector checking, i.e. even weakly protected items cause UB when popped. @@ -497,7 +299,7 @@ impl<'tcx> Stack { access: Option, global: &GlobalStateInner, dcx: &mut DiagnosticCx<'_, '_, '_, 'tcx>, - exposed_tags: &FxHashSet, + exposed_tags: &FxHashSet, ) -> InterpResult<'tcx> { dcx.start_grant(new.perm()); @@ -550,9 +352,9 @@ impl<'tcx> Stack { } // # Stacked Borrows Core End -/// Integration with the SbTag garbage collector +/// Integration with the BorTag garbage collector impl Stacks { - pub fn remove_unreachable_tags(&mut self, live_tags: &FxHashSet) { + pub fn remove_unreachable_tags(&mut self, live_tags: &FxHashSet) { if self.modified_since_last_gc { for stack in self.stacks.iter_mut_all() { if stack.len() > 64 { @@ -565,7 +367,7 @@ impl Stacks { } impl VisitTags for Stacks { - fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) { + fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) { for tag in self.exposed_tags.iter().copied() { visit(tag); } @@ -579,7 +381,7 @@ impl<'tcx> Stacks { fn new( size: Size, perm: Permission, - tag: SbTag, + tag: BorTag, id: AllocId, machine: &MiriMachine<'_, '_>, ) -> Self { @@ -602,7 +404,7 @@ impl<'tcx> Stacks { mut f: impl FnMut( &mut Stack, &mut DiagnosticCx<'_, '_, '_, 'tcx>, - &mut FxHashSet, + &mut FxHashSet, ) -> InterpResult<'tcx>, ) -> InterpResult<'tcx> { self.modified_since_last_gc = true; @@ -620,20 +422,19 @@ impl Stacks { pub fn new_allocation( id: AllocId, size: Size, - state: &GlobalState, + state: &mut GlobalStateInner, kind: MemoryKind, machine: &MiriMachine<'_, '_>, ) -> Self { - let mut extra = state.borrow_mut(); let (base_tag, perm) = match kind { // New unique borrow. This tag is not accessible by the program, // so it will only ever be used when using the local directly (i.e., // not through a pointer). That is, whenever we directly write to a local, this will pop // everything else off the stack, invalidating all previous pointers, // and in particular, *all* raw pointers. - MemoryKind::Stack => (extra.base_ptr_tag(id, machine), Permission::Unique), + MemoryKind::Stack => (state.base_ptr_tag(id, machine), Permission::Unique), // Everything else is shared by default. - _ => (extra.base_ptr_tag(id, machine), Permission::SharedReadWrite), + _ => (state.base_ptr_tag(id, machine), Permission::SharedReadWrite), }; Stacks::new(size, perm, base_tag, id, machine) } @@ -656,7 +457,7 @@ impl Stacks { range.size.bytes() ); let dcx = DiagnosticCxBuilder::read(machine, tag, range); - let state = machine.stacked_borrows.as_ref().unwrap().borrow(); + let state = machine.borrow_tracker.as_ref().unwrap().borrow(); self.for_each(range, dcx, |stack, dcx, exposed_tags| { stack.access(AccessKind::Read, tag, &state, dcx, exposed_tags) }) @@ -677,7 +478,7 @@ impl Stacks { range.size.bytes() ); let dcx = DiagnosticCxBuilder::write(machine, tag, range); - let state = machine.stacked_borrows.as_ref().unwrap().borrow(); + let state = machine.borrow_tracker.as_ref().unwrap().borrow(); self.for_each(range, dcx, |stack, dcx, exposed_tags| { stack.access(AccessKind::Write, tag, &state, dcx, exposed_tags) }) @@ -693,12 +494,16 @@ impl Stacks { ) -> InterpResult<'tcx> { trace!("deallocation with tag {:?}: {:?}, size {}", tag, alloc_id, range.size.bytes()); let dcx = DiagnosticCxBuilder::dealloc(machine, tag); - let state = machine.stacked_borrows.as_ref().unwrap().borrow(); + let state = machine.borrow_tracker.as_ref().unwrap().borrow(); self.for_each(range, dcx, |stack, dcx, exposed_tags| { stack.dealloc(tag, &state, dcx, exposed_tags) })?; Ok(()) } + + fn expose_tag(&mut self, tag: BorTag) { + self.exposed_tags.insert(tag); + } } /// Retagging/reborrowing. There is some policy in here, such as which permissions @@ -710,13 +515,13 @@ impl<'mir: 'ecx, 'tcx: 'mir, 'ecx> EvalContextPrivExt<'mir, 'tcx, 'ecx> trait EvalContextPrivExt<'mir: 'ecx, 'tcx: 'mir, 'ecx>: crate::MiriInterpCxExt<'mir, 'tcx> { /// Returns the `AllocId` the reborrow was done in, if some actual borrow stack manipulation /// happened. - fn reborrow( + fn sb_reborrow( &mut self, place: &MPlaceTy<'tcx, Provenance>, size: Size, kind: RefKind, retag_cause: RetagCause, // What caused this retag, for diagnostics only - new_tag: SbTag, + new_tag: BorTag, protect: Option, ) -> InterpResult<'tcx, Option> { let this = self.eval_context_mut(); @@ -725,7 +530,7 @@ trait EvalContextPrivExt<'mir: 'ecx, 'tcx: 'mir, 'ecx>: crate::MiriInterpCxExt<' let log_creation = |this: &MiriInterpCx<'mir, 'tcx>, loc: Option<(AllocId, Size, ProvenanceExtra)>| // alloc_id, base_offset, orig_tag -> InterpResult<'tcx> { - let global = this.machine.stacked_borrows.as_ref().unwrap().borrow(); + let global = this.machine.borrow_tracker.as_ref().unwrap().borrow(); let ty = place.layout.ty; if global.tracked_pointer_tags.contains(&new_tag) { let mut kind_str = format!("{kind}"); @@ -743,7 +548,7 @@ trait EvalContextPrivExt<'mir: 'ecx, 'tcx: 'mir, 'ecx>: crate::MiriInterpCxExt<' _ => write!(kind_str, " (pointee type {ty})").unwrap(), }; this.emit_diagnostic(NonHaltingDiagnostic::CreatedPointerTag( - new_tag.0, + new_tag.inner(), Some(kind_str), loc.map(|(alloc_id, base_offset, orig_tag)| (alloc_id, alloc_range(base_offset, size), orig_tag)), )); @@ -762,9 +567,10 @@ trait EvalContextPrivExt<'mir: 'ecx, 'tcx: 'mir, 'ecx>: crate::MiriInterpCxExt<' // uncovers a non-supported `extern static`. let extra = this.get_alloc_extra(alloc_id)?; let mut stacked_borrows = extra - .stacked_borrows + .borrow_tracker .as_ref() - .expect("we should have Stacked Borrows data") + .expect("We should have borrow tracking data") + .assert_sb() .borrow_mut(); // Note that we create a *second* `DiagnosticCxBuilder` below for the actual retag. // FIXME: can this be done cleaner? @@ -780,7 +586,7 @@ trait EvalContextPrivExt<'mir: 'ecx, 'tcx: 'mir, 'ecx>: crate::MiriInterpCxExt<' if protect.is_some() { dcx.log_protector(); } - } + }, AllocKind::Function | AllocKind::VTable | AllocKind::Dead => { // No stacked borrows on these allocations. } @@ -839,9 +645,9 @@ trait EvalContextPrivExt<'mir: 'ecx, 'tcx: 'mir, 'ecx>: crate::MiriInterpCxExt<' if let Some(protect) = protect { // See comment in `Stack::item_invalidated` for why we store the tag twice. - this.frame_mut().extra.stacked_borrows.as_mut().unwrap().protected_tags.push(new_tag); + this.frame_mut().extra.borrow_tracker.as_mut().unwrap().protected_tags.push(new_tag); this.machine - .stacked_borrows + .borrow_tracker .as_mut() .unwrap() .get_mut() @@ -876,9 +682,10 @@ trait EvalContextPrivExt<'mir: 'ecx, 'tcx: 'mir, 'ecx>: crate::MiriInterpCxExt<' // `visit_freeze_sensitive` needs to access the global state. let alloc_extra = this.get_alloc_extra(alloc_id)?; let mut stacked_borrows = alloc_extra - .stacked_borrows + .borrow_tracker .as_ref() - .expect("we should have Stacked Borrows data") + .expect("We should have borrow tracking data") + .assert_sb() .borrow_mut(); this.visit_freeze_sensitive(place, size, |mut range, frozen| { // Adjust range. @@ -900,7 +707,7 @@ trait EvalContextPrivExt<'mir: 'ecx, 'tcx: 'mir, 'ecx>: crate::MiriInterpCxExt<' false }; let item = Item::new(new_tag, perm, protected); - let global = this.machine.stacked_borrows.as_ref().unwrap().borrow(); + let global = this.machine.borrow_tracker.as_ref().unwrap().borrow(); let dcx = DiagnosticCxBuilder::retag( &this.machine, retag_cause, @@ -930,13 +737,14 @@ trait EvalContextPrivExt<'mir: 'ecx, 'tcx: 'mir, 'ecx>: crate::MiriInterpCxExt<' // mutable pointer, that seems reasonable. let (alloc_extra, machine) = this.get_alloc_extra_mut(alloc_id)?; let stacked_borrows = alloc_extra - .stacked_borrows + .borrow_tracker .as_mut() - .expect("we should have Stacked Borrows data") + .expect("We should have borrow tracking data") + .assert_sb_mut() .get_mut(); let item = Item::new(new_tag, perm, protect.is_some()); let range = alloc_range(base_offset, size); - let global = machine.stacked_borrows.as_ref().unwrap().borrow(); + let global = machine.borrow_tracker.as_ref().unwrap().borrow(); let dcx = DiagnosticCxBuilder::retag( machine, retag_cause, @@ -960,8 +768,8 @@ trait EvalContextPrivExt<'mir: 'ecx, 'tcx: 'mir, 'ecx>: crate::MiriInterpCxExt<' } /// Retags an indidual pointer, returning the retagged version. - /// `mutbl` can be `None` to make this a raw pointer. - fn retag_reference( + /// `kind` indicates what kind of reference is being created. + fn sb_retag_reference( &mut self, val: &ImmTy<'tcx, Provenance>, kind: RefKind, @@ -981,10 +789,10 @@ trait EvalContextPrivExt<'mir: 'ecx, 'tcx: 'mir, 'ecx>: crate::MiriInterpCxExt<' }; // Compute new borrow. - let new_tag = this.machine.stacked_borrows.as_mut().unwrap().get_mut().new_ptr(); + let new_tag = this.machine.borrow_tracker.as_mut().unwrap().get_mut().new_ptr(); // Reborrow. - let alloc_id = this.reborrow(&place, size, kind, retag_cause, new_tag, protect)?; + let alloc_id = this.sb_reborrow(&place, size, kind, retag_cause, new_tag, protect)?; // Adjust pointer. let new_place = place.map_provenance(|p| { @@ -993,7 +801,7 @@ trait EvalContextPrivExt<'mir: 'ecx, 'tcx: 'mir, 'ecx>: crate::MiriInterpCxExt<' Some(alloc_id) => { // If `reborrow` could figure out the AllocId of this ptr, hard-code it into the new one. // Even if we started out with a wildcard, this newly retagged pointer is tied to that allocation. - Provenance::Concrete { alloc_id, sb: new_tag } + Provenance::Concrete { alloc_id, tag: new_tag } } None => { // Looks like this has to stay a wildcard pointer. @@ -1011,9 +819,13 @@ trait EvalContextPrivExt<'mir: 'ecx, 'tcx: 'mir, 'ecx>: crate::MiriInterpCxExt<' impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {} pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { - fn retag(&mut self, kind: RetagKind, place: &PlaceTy<'tcx, Provenance>) -> InterpResult<'tcx> { + fn sb_retag( + &mut self, + kind: RetagKind, + place: &PlaceTy<'tcx, Provenance>, + ) -> InterpResult<'tcx> { let this = self.eval_context_mut(); - let retag_fields = this.machine.stacked_borrows.as_mut().unwrap().get_mut().retag_fields; + let retag_fields = this.machine.borrow_tracker.as_mut().unwrap().get_mut().retag_fields; let retag_cause = match kind { RetagKind::TwoPhase { .. } => RetagCause::TwoPhase, RetagKind::FnEntry => RetagCause::FnEntry, @@ -1039,7 +851,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { protector: Option, ) -> InterpResult<'tcx> { let val = self.ecx.read_immediate(&self.ecx.place_to_op(place)?)?; - let val = self.ecx.retag_reference(&val, ref_kind, retag_cause, protector)?; + let val = self.ecx.sb_retag_reference(&val, ref_kind, retag_cause, protector)?; self.ecx.write_immediate(*val, place)?; Ok(()) } @@ -1138,7 +950,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { /// /// This is a HACK because there is nothing in MIR that would make the retag /// explicit. Also see . - fn retag_return_place(&mut self) -> InterpResult<'tcx> { + fn sb_retag_return_place(&mut self) -> InterpResult<'tcx> { let this = self.eval_context_mut(); let return_place = &this.frame().return_place; if return_place.layout.is_zst() { @@ -1153,7 +965,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { let ptr_layout = this.layout_of(this.tcx.mk_mut_ptr(return_place.layout.ty))?; let val = ImmTy::from_immediate(return_place.to_ref(this), ptr_layout); // Reborrow it. With protection! That is part of the point. - let val = this.retag_reference( + let val = this.sb_retag_reference( &val, RefKind::Unique { two_phase: false }, RetagCause::FnReturn, @@ -1167,7 +979,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { } /// Mark the given tag as exposed. It was found on a pointer with the given AllocId. - fn expose_tag(&mut self, alloc_id: AllocId, tag: SbTag) -> InterpResult<'tcx> { + fn sb_expose_tag(&mut self, alloc_id: AllocId, tag: BorTag) -> InterpResult<'tcx> { let this = self.eval_context_mut(); // Function pointers and dead objects don't have an alloc_extra so we ignore them. @@ -1181,7 +993,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { // uncovers a non-supported `extern static`. let alloc_extra = this.get_alloc_extra(alloc_id)?; trace!("Stacked Borrows tag {tag:?} exposed in {alloc_id:?}"); - alloc_extra.stacked_borrows.as_ref().unwrap().borrow_mut().exposed_tags.insert(tag); + alloc_extra + .borrow_tracker + .as_ref() + .expect("We should have borrow tracking data") + .assert_sb() + .borrow_mut() + .expose_tag(tag); } AllocKind::Function | AllocKind::VTable | AllocKind::Dead => { // No stacked borrows on these allocations. @@ -1193,7 +1011,12 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { fn print_stacks(&mut self, alloc_id: AllocId) -> InterpResult<'tcx> { let this = self.eval_context_mut(); let alloc_extra = this.get_alloc_extra(alloc_id)?; - let stacks = alloc_extra.stacked_borrows.as_ref().unwrap().borrow(); + let stacks = alloc_extra + .borrow_tracker + .as_ref() + .expect("We should have borrow tracking data") + .assert_sb() + .borrow(); for (range, stack) in stacks.stacks.iter_all() { print!("{range:?}: ["); if let Some(bottom) = stack.unknown_bottom() { diff --git a/src/stacked_borrows/stack.rs b/src/borrow_tracker/stacked_borrows/stack.rs similarity index 96% rename from src/stacked_borrows/stack.rs rename to src/borrow_tracker/stacked_borrows/stack.rs index 51a6fba6df011..1d5cfec3500ae 100644 --- a/src/stacked_borrows/stack.rs +++ b/src/borrow_tracker/stacked_borrows/stack.rs @@ -3,11 +3,14 @@ use std::ops::Range; use rustc_data_structures::fx::FxHashSet; -use crate::stacked_borrows::{AccessKind, Item, Permission, SbTag}; +use crate::borrow_tracker::{ + stacked_borrows::{Item, Permission}, + AccessKind, BorTag, +}; use crate::ProvenanceExtra; /// Exactly what cache size we should use is a difficult tradeoff. There will always be some -/// workload which has a `SbTag` working set which exceeds the size of the cache, and ends up +/// workload which has a `BorTag` working set which exceeds the size of the cache, and ends up /// falling back to linear searches of the borrow stack very often. /// The cost of making this value too large is that the loop in `Stack::insert` which ensures the /// entries in the cache stay correct after an insert becomes expensive. @@ -28,7 +31,7 @@ pub struct Stack { /// than `id`. /// When the bottom is unknown, `borrows` always has a `SharedReadOnly` or `Unique` at the bottom; /// we never have the unknown-to-known boundary in an SRW group. - unknown_bottom: Option, + unknown_bottom: Option, /// A small LRU cache of searches of the borrow stack. #[cfg(feature = "stack-cache")] @@ -40,7 +43,7 @@ pub struct Stack { } impl Stack { - pub fn retain(&mut self, tags: &FxHashSet) { + pub fn retain(&mut self, tags: &FxHashSet) { let mut first_removed = None; // We never consider removing the bottom-most tag. For stacks without an unknown @@ -185,7 +188,7 @@ impl<'tcx> Stack { &mut self, access: AccessKind, tag: ProvenanceExtra, - exposed_tags: &FxHashSet, + exposed_tags: &FxHashSet, ) -> Result, ()> { #[cfg(all(feature = "stack-cache", debug_assertions))] self.verify_cache_consistency(); @@ -219,12 +222,12 @@ impl<'tcx> Stack { // Couldn't find it in the stack; but if there is an unknown bottom it might be there. let found = self.unknown_bottom.is_some_and(|unknown_limit| { - tag.0 < unknown_limit.0 // unknown_limit is an upper bound for what can be in the unknown bottom. + tag < unknown_limit // unknown_limit is an upper bound for what can be in the unknown bottom. }); if found { Ok(None) } else { Err(()) } } - fn find_granting_tagged(&mut self, access: AccessKind, tag: SbTag) -> Option { + fn find_granting_tagged(&mut self, access: AccessKind, tag: BorTag) -> Option { #[cfg(feature = "stack-cache")] if let Some(idx) = self.find_granting_cache(access, tag) { return Some(idx); @@ -243,7 +246,7 @@ impl<'tcx> Stack { } #[cfg(feature = "stack-cache")] - fn find_granting_cache(&mut self, access: AccessKind, tag: SbTag) -> Option { + fn find_granting_cache(&mut self, access: AccessKind, tag: BorTag) -> Option { // This looks like a common-sense optimization; we're going to do a linear search of the // cache or the borrow stack to scan the shorter of the two. This optimization is miniscule // and this check actually ensures we do not access an invalid cache. @@ -349,11 +352,11 @@ impl<'tcx> Stack { self.borrows.len() } - pub fn unknown_bottom(&self) -> Option { + pub fn unknown_bottom(&self) -> Option { self.unknown_bottom } - pub fn set_unknown_bottom(&mut self, tag: SbTag) { + pub fn set_unknown_bottom(&mut self, tag: BorTag) { // We clear the borrow stack but the lookup cache doesn't support clearing per se. Instead, // there is a check explained in `find_granting_cache` which protects against accessing the // cache when it has been cleared and not yet refilled. From 17ee58752f30fc4ce9c413bcd0a105e03f371a5b Mon Sep 17 00:00:00 2001 From: Vanille-N Date: Fri, 2 Dec 2022 13:27:41 +0100 Subject: [PATCH 14/22] SbTag -> BorTag everywhere --- src/bin/miri.rs | 2 +- src/concurrency/data_race.rs | 4 ++-- src/concurrency/init_once.rs | 2 +- src/concurrency/sync.rs | 2 +- src/concurrency/thread.rs | 6 ++--- src/concurrency/weak_memory.rs | 2 +- src/eval.rs | 4 ++-- src/intptrcast.rs | 4 ++-- src/machine.rs | 36 +++++++++++++++--------------- src/shims/env.rs | 2 +- src/shims/panic.rs | 2 +- src/shims/time.rs | 2 +- src/shims/tls.rs | 2 +- src/shims/unix/fs.rs | 4 ++-- src/shims/unix/linux/sync.rs | 2 +- src/shims/unix/sync.rs | 2 +- src/shims/windows/sync.rs | 6 ++--- src/tag_gc.rs | 40 +++++++++++++++++----------------- 18 files changed, 62 insertions(+), 62 deletions(-) diff --git a/src/bin/miri.rs b/src/bin/miri.rs index ffe89921d9866..8d3c535901224 100644 --- a/src/bin/miri.rs +++ b/src/bin/miri.rs @@ -413,7 +413,7 @@ fn main() { err ), }; - for id in ids.into_iter().map(miri::SbTag::new) { + for id in ids.into_iter().map(miri::BorTag::new) { if let Some(id) = id { miri_config.tracked_pointer_tags.insert(id); } else { diff --git a/src/concurrency/data_race.rs b/src/concurrency/data_race.rs index d669cc1362a9a..99288480386e1 100644 --- a/src/concurrency/data_race.rs +++ b/src/concurrency/data_race.rs @@ -670,7 +670,7 @@ pub struct VClockAlloc { } impl VisitTags for VClockAlloc { - fn visit_tags(&self, _visit: &mut dyn FnMut(SbTag)) { + fn visit_tags(&self, _visit: &mut dyn FnMut(BorTag)) { // No tags here. } } @@ -1220,7 +1220,7 @@ pub struct GlobalState { } impl VisitTags for GlobalState { - fn visit_tags(&self, _visit: &mut dyn FnMut(SbTag)) { + fn visit_tags(&self, _visit: &mut dyn FnMut(BorTag)) { // We don't have any tags. } } diff --git a/src/concurrency/init_once.rs b/src/concurrency/init_once.rs index eb42cdf80abbe..9c9d505297c2d 100644 --- a/src/concurrency/init_once.rs +++ b/src/concurrency/init_once.rs @@ -45,7 +45,7 @@ pub(super) struct InitOnce<'mir, 'tcx> { } impl<'mir, 'tcx> VisitTags for InitOnce<'mir, 'tcx> { - fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) { + fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) { for waiter in self.waiters.iter() { waiter.callback.visit_tags(visit); } diff --git a/src/concurrency/sync.rs b/src/concurrency/sync.rs index dc4b435b7101c..402c9ce6fc9af 100644 --- a/src/concurrency/sync.rs +++ b/src/concurrency/sync.rs @@ -181,7 +181,7 @@ pub(crate) struct SynchronizationState<'mir, 'tcx> { } impl<'mir, 'tcx> VisitTags for SynchronizationState<'mir, 'tcx> { - fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) { + fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) { for init_once in self.init_onces.iter() { init_once.visit_tags(visit); } diff --git a/src/concurrency/thread.rs b/src/concurrency/thread.rs index 900d24443cc96..6ba93a13aaf23 100644 --- a/src/concurrency/thread.rs +++ b/src/concurrency/thread.rs @@ -212,7 +212,7 @@ impl<'mir, 'tcx> Thread<'mir, 'tcx> { } impl VisitTags for Thread<'_, '_> { - fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) { + fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) { let Thread { panic_payload, last_error, @@ -233,7 +233,7 @@ impl VisitTags for Thread<'_, '_> { } impl VisitTags for Frame<'_, '_, Provenance, FrameData<'_>> { - fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) { + fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) { let Frame { return_place, locals, @@ -316,7 +316,7 @@ pub struct ThreadManager<'mir, 'tcx> { } impl VisitTags for ThreadManager<'_, '_> { - fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) { + fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) { let ThreadManager { threads, thread_local_alloc_ids, diff --git a/src/concurrency/weak_memory.rs b/src/concurrency/weak_memory.rs index f2a3657295494..4c32efcfa3515 100644 --- a/src/concurrency/weak_memory.rs +++ b/src/concurrency/weak_memory.rs @@ -109,7 +109,7 @@ pub struct StoreBufferAlloc { } impl VisitTags for StoreBufferAlloc { - fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) { + fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) { let Self { store_buffers } = self; for val in store_buffers .borrow() diff --git a/src/eval.rs b/src/eval.rs index b8578b1277f53..91354d35b2331 100644 --- a/src/eval.rs +++ b/src/eval.rs @@ -103,7 +103,7 @@ pub struct MiriConfig { /// The seed to use when non-determinism or randomness are required (e.g. ptr-to-int cast, `getrandom()`). pub seed: Option, /// The stacked borrows pointer ids to report about - pub tracked_pointer_tags: FxHashSet, + pub tracked_pointer_tags: FxHashSet, /// The stacked borrows call IDs to report about pub tracked_call_ids: FxHashSet, /// The allocation ids to report about. @@ -138,7 +138,7 @@ pub struct MiriConfig { /// The location of a shared object file to load when calling external functions /// FIXME! consider allowing users to specify paths to multiple SO files, or to a directory pub external_so_file: Option, - /// Run a garbage collector for SbTags every N basic blocks. + /// Run a garbage collector for BorTags every N basic blocks. pub gc_interval: u32, /// The number of CPUs to be reported by miri. pub num_cpus: u32, diff --git a/src/intptrcast.rs b/src/intptrcast.rs index 9722b7643e426..0f4f489409a08 100644 --- a/src/intptrcast.rs +++ b/src/intptrcast.rs @@ -45,7 +45,7 @@ pub struct GlobalStateInner { } impl VisitTags for GlobalStateInner { - fn visit_tags(&self, _visit: &mut dyn FnMut(SbTag)) { + fn visit_tags(&self, _visit: &mut dyn FnMut(BorTag)) { // Nothing to visit here. } } @@ -105,7 +105,7 @@ impl<'mir, 'tcx> GlobalStateInner { pub fn expose_ptr( ecx: &mut MiriInterpCx<'mir, 'tcx>, alloc_id: AllocId, - sb: SbTag, + tag: BorTag, ) -> InterpResult<'tcx> { let global_state = ecx.machine.intptrcast.get_mut(); // In strict mode, we don't need this, so we can save some cycles by not tracking it. diff --git a/src/machine.rs b/src/machine.rs index 4d3444bc39cdd..7702843728739 100644 --- a/src/machine.rs +++ b/src/machine.rs @@ -147,7 +147,7 @@ pub enum Provenance { Concrete { alloc_id: AllocId, /// Stacked Borrows tag. - sb: SbTag, + tag: BorTag, }, Wildcard, } @@ -173,7 +173,7 @@ impl std::hash::Hash for Provenance { /// The "extra" information a pointer has over a regular AllocId. #[derive(Copy, Clone, PartialEq)] pub enum ProvenanceExtra { - Concrete(SbTag), + Concrete(BorTag), Wildcard, } @@ -188,7 +188,7 @@ static_assert_size!(Scalar, 32); impl fmt::Debug for Provenance { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - Provenance::Concrete { alloc_id, sb } => { + Provenance::Concrete { alloc_id, tag } => { // Forward `alternate` flag to `alloc_id` printing. if f.alternate() { write!(f, "[{alloc_id:#?}]")?; @@ -196,7 +196,7 @@ impl fmt::Debug for Provenance { write!(f, "[{alloc_id:?}]")?; } // Print Stacked Borrows tag. - write!(f, "{sb:?}")?; + write!(f, "{tag:?}")?; } Provenance::Wildcard => { write!(f, "[wildcard]")?; @@ -221,9 +221,9 @@ impl interpret::Provenance for Provenance { match (left, right) { // If both are the *same* concrete tag, that is the result. ( - Some(Provenance::Concrete { alloc_id: left_alloc, sb: left_sb }), - Some(Provenance::Concrete { alloc_id: right_alloc, sb: right_sb }), - ) if left_alloc == right_alloc && left_sb == right_sb => left, + Some(Provenance::Concrete { alloc_id: left_alloc, tag: left_tag }), + Some(Provenance::Concrete { alloc_id: right_alloc, tag: right_tag }), + ) if left_alloc == right_alloc && left_tag == right_tag => left, // If one side is a wildcard, the best possible outcome is that it is equal to the other // one, and we use that. (Some(Provenance::Wildcard), o) | (o, Some(Provenance::Wildcard)) => o, @@ -243,7 +243,7 @@ impl fmt::Debug for ProvenanceExtra { } impl ProvenanceExtra { - pub fn and_then(self, f: impl FnOnce(SbTag) -> Option) -> Option { + pub fn and_then(self, f: impl FnOnce(BorTag) -> Option) -> Option { match self { ProvenanceExtra::Concrete(pid) => f(pid), ProvenanceExtra::Wildcard => None, @@ -463,9 +463,9 @@ pub struct MiriMachine<'mir, 'tcx> { #[cfg(not(target_os = "linux"))] pub external_so_lib: Option, - /// Run a garbage collector for SbTags every N basic blocks. + /// Run a garbage collector for BorTags every N basic blocks. pub(crate) gc_interval: u32, - /// The number of blocks that passed since the last SbTag GC pass. + /// The number of blocks that passed since the last BorTag GC pass. pub(crate) since_gc: u32, /// The number of CPUs to be reported by miri. pub(crate) num_cpus: u32, @@ -656,7 +656,7 @@ impl<'mir, 'tcx> MiriMachine<'mir, 'tcx> { } impl VisitTags for MiriMachine<'_, '_> { - fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) { + fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) { #[rustfmt::skip] let MiriMachine { threads, @@ -959,10 +959,10 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> { stacked_borrows.borrow_mut().base_ptr_tag(ptr.provenance, &ecx.machine) } else { // Value does not matter, SB is disabled - SbTag::default() + BorTag::default() }; Pointer::new( - Provenance::Concrete { alloc_id: ptr.provenance, sb: sb_tag }, + Provenance::Concrete { alloc_id: ptr.provenance, tag }, Size::from_bytes(absolute_addr), ) } @@ -980,8 +980,8 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> { ptr: Pointer, ) -> InterpResult<'tcx> { match ptr.provenance { - Provenance::Concrete { alloc_id, sb } => - intptrcast::GlobalStateInner::expose_ptr(ecx, alloc_id, sb), + Provenance::Concrete { alloc_id, tag } => + intptrcast::GlobalStateInner::expose_ptr(ecx, alloc_id, tag), Provenance::Wildcard => { // No need to do anything for wildcard pointers as // their provenances have already been previously exposed. @@ -999,11 +999,11 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> { let rel = intptrcast::GlobalStateInner::abs_ptr_to_rel(ecx, ptr); rel.map(|(alloc_id, size)| { - let sb = match ptr.provenance { - Provenance::Concrete { sb, .. } => ProvenanceExtra::Concrete(sb), + let tag = match ptr.provenance { + Provenance::Concrete { tag, .. } => ProvenanceExtra::Concrete(tag), Provenance::Wildcard => ProvenanceExtra::Wildcard, }; - (alloc_id, size, sb) + (alloc_id, size, tag) }) } diff --git a/src/shims/env.rs b/src/shims/env.rs index bf6c1f8756290..80fb4ff2fe980 100644 --- a/src/shims/env.rs +++ b/src/shims/env.rs @@ -37,7 +37,7 @@ pub struct EnvVars<'tcx> { } impl VisitTags for EnvVars<'_> { - fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) { + fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) { let EnvVars { map, environ } = self; environ.visit_tags(visit); diff --git a/src/shims/panic.rs b/src/shims/panic.rs index 698e025961da8..7c30845dc6756 100644 --- a/src/shims/panic.rs +++ b/src/shims/panic.rs @@ -36,7 +36,7 @@ pub struct CatchUnwindData<'tcx> { } impl VisitTags for CatchUnwindData<'_> { - fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) { + fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) { let CatchUnwindData { catch_fn, data, dest, ret: _ } = self; catch_fn.visit_tags(visit); data.visit_tags(visit); diff --git a/src/shims/time.rs b/src/shims/time.rs index bc0b71fbc2096..d263aab351b12 100644 --- a/src/shims/time.rs +++ b/src/shims/time.rs @@ -278,7 +278,7 @@ struct UnblockCallback { } impl VisitTags for UnblockCallback { - fn visit_tags(&self, _visit: &mut dyn FnMut(SbTag)) {} + fn visit_tags(&self, _visit: &mut dyn FnMut(BorTag)) {} } impl<'mir, 'tcx: 'mir> MachineCallback<'mir, 'tcx> for UnblockCallback { diff --git a/src/shims/tls.rs b/src/shims/tls.rs index fe278ff717f0c..54fdf2872ab4d 100644 --- a/src/shims/tls.rs +++ b/src/shims/tls.rs @@ -208,7 +208,7 @@ impl<'tcx> TlsData<'tcx> { } impl VisitTags for TlsData<'_> { - fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) { + fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) { let TlsData { keys, macos_thread_dtors, next_key: _ } = self; for scalar in keys.values().flat_map(|v| v.data.values()) { diff --git a/src/shims/unix/fs.rs b/src/shims/unix/fs.rs index e048d53a17e0d..988627db5611c 100644 --- a/src/shims/unix/fs.rs +++ b/src/shims/unix/fs.rs @@ -278,7 +278,7 @@ pub struct FileHandler { } impl VisitTags for FileHandler { - fn visit_tags(&self, _visit: &mut dyn FnMut(SbTag)) { + fn visit_tags(&self, _visit: &mut dyn FnMut(BorTag)) { // All our FileDescriptor do not have any tags. } } @@ -490,7 +490,7 @@ impl Default for DirHandler { } impl VisitTags for DirHandler { - fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) { + fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) { let DirHandler { streams, next_id: _ } = self; for dir in streams.values() { diff --git a/src/shims/unix/linux/sync.rs b/src/shims/unix/linux/sync.rs index 292b9d2e7a176..343232c4bbb29 100644 --- a/src/shims/unix/linux/sync.rs +++ b/src/shims/unix/linux/sync.rs @@ -183,7 +183,7 @@ pub fn futex<'tcx>( } impl<'tcx> VisitTags for Callback<'tcx> { - fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) { + fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) { let Callback { thread: _, addr_usize: _, dest } = self; dest.visit_tags(visit); } diff --git a/src/shims/unix/sync.rs b/src/shims/unix/sync.rs index e0afb500cb18a..f9b5774f0090e 100644 --- a/src/shims/unix/sync.rs +++ b/src/shims/unix/sync.rs @@ -747,7 +747,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { } impl<'tcx> VisitTags for Callback<'tcx> { - fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) { + fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) { let Callback { active_thread: _, mutex_id: _, id: _, dest } = self; dest.visit_tags(visit); } diff --git a/src/shims/windows/sync.rs b/src/shims/windows/sync.rs index 8f414d98dba5f..6b043c6d2c9e1 100644 --- a/src/shims/windows/sync.rs +++ b/src/shims/windows/sync.rs @@ -182,7 +182,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { } impl<'tcx> VisitTags for Callback<'tcx> { - fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) { + fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) { let Callback { init_once_id: _, pending_place } = self; pending_place.visit_tags(visit); } @@ -315,7 +315,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { } impl<'tcx> VisitTags for Callback<'tcx> { - fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) { + fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) { let Callback { thread: _, addr: _, dest } = self; dest.visit_tags(visit); } @@ -419,7 +419,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { } impl<'tcx> VisitTags for Callback<'tcx> { - fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) { + fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) { let Callback { thread: _, condvar_id: _, lock_id: _, mode: _, dest } = self; dest.visit_tags(visit); } diff --git a/src/tag_gc.rs b/src/tag_gc.rs index 73712348f0d5f..b460122196a53 100644 --- a/src/tag_gc.rs +++ b/src/tag_gc.rs @@ -3,11 +3,11 @@ use rustc_data_structures::fx::FxHashSet; use crate::*; pub trait VisitTags { - fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)); + fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)); } impl VisitTags for Option { - fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) { + fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) { if let Some(x) = self { x.visit_tags(visit); } @@ -15,41 +15,41 @@ impl VisitTags for Option { } impl VisitTags for std::cell::RefCell { - fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) { + fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) { self.borrow().visit_tags(visit) } } -impl VisitTags for SbTag { - fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) { +impl VisitTags for BorTag { + fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) { visit(*self) } } impl VisitTags for Provenance { - fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) { - if let Provenance::Concrete { sb, .. } = self { - visit(*sb); + fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) { + if let Provenance::Concrete { tag, .. } = self { + visit(*tag); } } } impl VisitTags for Pointer { - fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) { + fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) { let (prov, _offset) = self.into_parts(); prov.visit_tags(visit); } } impl VisitTags for Pointer> { - fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) { + fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) { let (prov, _offset) = self.into_parts(); prov.visit_tags(visit); } } impl VisitTags for Scalar { - fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) { + fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) { match self { Scalar::Ptr(ptr, _) => ptr.visit_tags(visit), Scalar::Int(_) => (), @@ -58,7 +58,7 @@ impl VisitTags for Scalar { } impl VisitTags for Immediate { - fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) { + fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) { match self { Immediate::Scalar(s) => { s.visit_tags(visit); @@ -73,7 +73,7 @@ impl VisitTags for Immediate { } impl VisitTags for MemPlaceMeta { - fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) { + fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) { match self { MemPlaceMeta::Meta(m) => m.visit_tags(visit), MemPlaceMeta::None => {} @@ -82,7 +82,7 @@ impl VisitTags for MemPlaceMeta { } impl VisitTags for MemPlace { - fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) { + fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) { let MemPlace { ptr, meta } = self; ptr.visit_tags(visit); meta.visit_tags(visit); @@ -90,13 +90,13 @@ impl VisitTags for MemPlace { } impl VisitTags for MPlaceTy<'_, Provenance> { - fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) { + fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) { (**self).visit_tags(visit) } } impl VisitTags for Place { - fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) { + fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) { match self { Place::Ptr(p) => p.visit_tags(visit), Place::Local { .. } => { @@ -107,13 +107,13 @@ impl VisitTags for Place { } impl VisitTags for PlaceTy<'_, Provenance> { - fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) { + fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) { (**self).visit_tags(visit) } } impl VisitTags for Operand { - fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) { + fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) { match self { Operand::Immediate(imm) => { imm.visit_tags(visit); @@ -126,7 +126,7 @@ impl VisitTags for Operand { } impl VisitTags for Allocation { - fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) { + fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) { for prov in self.provenance().provenances() { prov.visit_tags(visit); } @@ -136,7 +136,7 @@ impl VisitTags for Allocation { } impl VisitTags for crate::MiriInterpCx<'_, '_> { - fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) { + fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) { // Memory. self.memory.alloc_map().iter(|it| { for (_id, (_kind, alloc)) in it { From 8e7a9e8072a2019fc90a9c377b7d0f75a252b228 Mon Sep 17 00:00:00 2001 From: Vanille-N Date: Fri, 2 Dec 2022 13:29:48 +0100 Subject: [PATCH 15/22] other renames, introduction of BorrowTrackerMethod and AllocExtra --- src/bin/miri.rs | 2 +- src/eval.rs | 4 +- src/intptrcast.rs | 4 +- src/machine.rs | 108 ++++++++++++++++++++-------------------------- src/tag_gc.rs | 14 +++--- 5 files changed, 56 insertions(+), 76 deletions(-) diff --git a/src/bin/miri.rs b/src/bin/miri.rs index 8d3c535901224..9ac04c4930f26 100644 --- a/src/bin/miri.rs +++ b/src/bin/miri.rs @@ -317,7 +317,7 @@ fn main() { } else if arg == "-Zmiri-disable-validation" { miri_config.validate = false; } else if arg == "-Zmiri-disable-stacked-borrows" { - miri_config.stacked_borrows = false; + miri_config.borrow_tracker = None; } else if arg == "-Zmiri-disable-data-race-detector" { miri_config.data_race_detector = false; miri_config.weak_memory_emulation = false; diff --git a/src/eval.rs b/src/eval.rs index 91354d35b2331..6c9c96eef6238 100644 --- a/src/eval.rs +++ b/src/eval.rs @@ -87,7 +87,7 @@ pub struct MiriConfig { /// Determine if validity checking is enabled. pub validate: bool, /// Determines if Stacked Borrows is enabled. - pub stacked_borrows: bool, + pub borrow_tracker: Option, /// Controls alignment checking. pub check_alignment: AlignmentCheck, /// Controls function [ABI](Abi) checking. @@ -149,7 +149,7 @@ impl Default for MiriConfig { MiriConfig { env: vec![], validate: true, - stacked_borrows: true, + borrow_tracker: Some(BorrowTrackerMethod::StackedBorrows), check_alignment: AlignmentCheck::Int, check_abi: true, isolated_op: IsolatedOp::Reject(RejectOpWith::Abort), diff --git a/src/intptrcast.rs b/src/intptrcast.rs index 0f4f489409a08..c26828b11e0e1 100644 --- a/src/intptrcast.rs +++ b/src/intptrcast.rs @@ -112,8 +112,8 @@ impl<'mir, 'tcx> GlobalStateInner { if global_state.provenance_mode != ProvenanceMode::Strict { trace!("Exposing allocation id {alloc_id:?}"); global_state.exposed.insert(alloc_id); - if ecx.machine.stacked_borrows.is_some() { - ecx.expose_tag(alloc_id, sb)?; + if ecx.machine.borrow_tracker.is_some() { + ecx.expose_tag(alloc_id, tag)?; } } Ok(()) diff --git a/src/machine.rs b/src/machine.rs index 7702843728739..920ee3e1ef1f4 100644 --- a/src/machine.rs +++ b/src/machine.rs @@ -39,7 +39,7 @@ pub const STACK_SIZE: u64 = 16 * PAGE_SIZE; // whatever /// Extra data stored with each stack frame pub struct FrameData<'tcx> { /// Extra data for Stacked Borrows. - pub stacked_borrows: Option, + pub borrow_tracker: Option, /// If this is Some(), then this is a special "catch unwind" frame (the frame of `try_fn` /// called by `try`). When this frame is popped during unwinding a panic, @@ -61,20 +61,20 @@ pub struct FrameData<'tcx> { impl<'tcx> std::fmt::Debug for FrameData<'tcx> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { // Omitting `timing`, it does not support `Debug`. - let FrameData { stacked_borrows, catch_unwind, timing: _, is_user_relevant: _ } = self; + let FrameData { borrow_tracker, catch_unwind, timing: _, is_user_relevant: _ } = self; f.debug_struct("FrameData") - .field("stacked_borrows", stacked_borrows) + .field("borrow_tracker", borrow_tracker) .field("catch_unwind", catch_unwind) .finish() } } impl VisitTags for FrameData<'_> { - fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) { - let FrameData { catch_unwind, stacked_borrows, timing: _, is_user_relevant: _ } = self; + fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) { + let FrameData { catch_unwind, borrow_tracker, timing: _, is_user_relevant: _ } = self; catch_unwind.visit_tags(visit); - stacked_borrows.visit_tags(visit); + borrow_tracker.visit_tags(visit); } } @@ -254,8 +254,8 @@ impl ProvenanceExtra { /// Extra per-allocation data #[derive(Debug, Clone)] pub struct AllocExtra { - /// Stacked Borrows state is only added if it is enabled. - pub stacked_borrows: Option, + /// Global state of the borrow tracker, if enabled. + pub borrow_tracker: Option, /// Data race detection via the use of a vector-clock, /// this is only added if it is enabled. pub data_race: Option, @@ -265,10 +265,10 @@ pub struct AllocExtra { } impl VisitTags for AllocExtra { - fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) { - let AllocExtra { stacked_borrows, data_race, weak_memory } = self; + fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) { + let AllocExtra { borrow_tracker, data_race, weak_memory } = self; - stacked_borrows.visit_tags(visit); + borrow_tracker.visit_tags(visit); data_race.visit_tags(visit); weak_memory.visit_tags(visit); } @@ -350,8 +350,8 @@ pub struct MiriMachine<'mir, 'tcx> { // We carry a copy of the global `TyCtxt` for convenience, so methods taking just `&Evaluator` have `tcx` access. pub tcx: TyCtxt<'tcx>, - /// Stacked Borrows global data. - pub stacked_borrows: Option, + /// Global data for borrow tracking. + pub borrow_tracker: Option, /// Data race detector global data. pub data_race: Option, @@ -480,17 +480,11 @@ impl<'mir, 'tcx> MiriMachine<'mir, 'tcx> { measureme::Profiler::new(out).expect("Couldn't create `measureme` profiler") }); let rng = StdRng::seed_from_u64(config.seed.unwrap_or(0)); - let stacked_borrows = config.stacked_borrows.then(|| { - RefCell::new(stacked_borrows::GlobalStateInner::new( - config.tracked_pointer_tags.clone(), - config.tracked_call_ids.clone(), - config.retag_fields, - )) - }); + let borrow_tracker = config.borrow_tracker.map(|bt| bt.instanciate_global_state(config)); let data_race = config.data_race_detector.then(|| data_race::GlobalState::new(config)); MiriMachine { tcx: layout_cx.tcx, - stacked_borrows, + borrow_tracker, data_race, intptrcast: RefCell::new(intptrcast::GlobalStateInner::new(config)), // `env_vars` depends on a full interpreter so we cannot properly initialize it yet. @@ -668,7 +662,7 @@ impl VisitTags for MiriMachine<'_, '_> { cmd_line, extern_statics, dir_handler, - stacked_borrows, + borrow_tracker, data_race, intptrcast, file_handler, @@ -706,7 +700,7 @@ impl VisitTags for MiriMachine<'_, '_> { dir_handler.visit_tags(visit); file_handler.visit_tags(visit); data_race.visit_tags(visit); - stacked_borrows.visit_tags(visit); + borrow_tracker.visit_tags(visit); intptrcast.visit_tags(visit); main_fn_ret_place.visit_tags(visit); argc.visit_tags(visit); @@ -907,15 +901,12 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> { } let alloc = alloc.into_owned(); - let stacks = ecx.machine.stacked_borrows.as_ref().map(|stacked_borrows| { - stacked_borrows::Stacks::new_allocation( - id, - alloc.size(), - stacked_borrows, - kind, - &ecx.machine, - ) - }); + let borrow_tracker = ecx + .machine + .borrow_tracker + .as_ref() + .map(|bt| bt.borrow_mut().new_allocation(id, alloc.size(), kind, &ecx.machine)); + let race_alloc = ecx.machine.data_race.as_ref().map(|data_race| { data_race::AllocExtra::new_allocation( data_race, @@ -927,11 +918,7 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> { let buffer_alloc = ecx.machine.weak_memory.then(weak_memory::AllocExtra::new_allocation); let alloc: Allocation = alloc.adjust_from_tcx( &ecx.tcx, - AllocExtra { - stacked_borrows: stacks.map(RefCell::new), - data_race: race_alloc, - weak_memory: buffer_alloc, - }, + AllocExtra { borrow_tracker, data_race: race_alloc, weak_memory: buffer_alloc }, |ptr| ecx.global_base_pointer(ptr), )?; Ok(Cow::Owned(alloc)) @@ -955,8 +942,8 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> { } } let absolute_addr = intptrcast::GlobalStateInner::rel_ptr_to_addr(ecx, ptr); - let sb_tag = if let Some(stacked_borrows) = &ecx.machine.stacked_borrows { - stacked_borrows.borrow_mut().base_ptr_tag(ptr.provenance, &ecx.machine) + let tag = if let Some(borrow_tracker) = &ecx.machine.borrow_tracker { + borrow_tracker.borrow_mut().base_ptr_tag(ptr.provenance, &ecx.machine) } else { // Value does not matter, SB is disabled BorTag::default() @@ -1018,10 +1005,8 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> { if let Some(data_race) = &alloc_extra.data_race { data_race.read(alloc_id, range, machine)?; } - if let Some(stacked_borrows) = &alloc_extra.stacked_borrows { - stacked_borrows - .borrow_mut() - .before_memory_read(alloc_id, prov_extra, range, machine)?; + if let Some(borrow_tracker) = &alloc_extra.borrow_tracker { + borrow_tracker.before_memory_read(alloc_id, prov_extra, range, machine)?; } if let Some(weak_memory) = &alloc_extra.weak_memory { weak_memory.memory_accessed(range, machine.data_race.as_ref().unwrap()); @@ -1040,8 +1025,8 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> { if let Some(data_race) = &mut alloc_extra.data_race { data_race.write(alloc_id, range, machine)?; } - if let Some(stacked_borrows) = &mut alloc_extra.stacked_borrows { - stacked_borrows.get_mut().before_memory_write(alloc_id, prov_extra, range, machine)?; + if let Some(borrow_tracker) = &mut alloc_extra.borrow_tracker { + borrow_tracker.before_memory_write(alloc_id, prov_extra, range, machine)?; } if let Some(weak_memory) = &alloc_extra.weak_memory { weak_memory.memory_accessed(range, machine.data_race.as_ref().unwrap()); @@ -1063,16 +1048,10 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> { if let Some(data_race) = &mut alloc_extra.data_race { data_race.deallocate(alloc_id, range, machine)?; } - if let Some(stacked_borrows) = &mut alloc_extra.stacked_borrows { - stacked_borrows.get_mut().before_memory_deallocation( - alloc_id, - prove_extra, - range, - machine, - ) - } else { - Ok(()) + if let Some(borrow_tracker) = &mut alloc_extra.borrow_tracker { + borrow_tracker.before_memory_deallocation(alloc_id, prove_extra, range, machine)?; } + Ok(()) } #[inline(always)] @@ -1081,7 +1060,10 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> { kind: mir::RetagKind, place: &PlaceTy<'tcx, Provenance>, ) -> InterpResult<'tcx> { - if ecx.machine.stacked_borrows.is_some() { ecx.retag(kind, place) } else { Ok(()) } + if ecx.machine.borrow_tracker.is_some() { + ecx.retag(kind, place)?; + } + Ok(()) } #[inline(always)] @@ -1104,10 +1086,10 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> { None }; - let stacked_borrows = ecx.machine.stacked_borrows.as_ref(); + let borrow_tracker = ecx.machine.borrow_tracker.as_ref(); let extra = FrameData { - stacked_borrows: stacked_borrows.map(|sb| sb.borrow_mut().new_frame(&ecx.machine)), + borrow_tracker: borrow_tracker.map(|bt| bt.borrow_mut().new_frame(&ecx.machine)), catch_unwind: None, timing, is_user_relevant: ecx.machine.is_user_relevant(&frame), @@ -1140,7 +1122,7 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> { } } - // Search for SbTags to find all live pointers, then remove all other tags from borrow + // Search for BorTags to find all live pointers, then remove all other tags from borrow // stacks. // When debug assertions are enabled, run the GC as often as possible so that any cases // where it mistakenly removes an important tag become visible. @@ -1166,8 +1148,10 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> { let stack_len = ecx.active_thread_stack().len(); ecx.active_thread_mut().set_top_user_relevant_frame(stack_len - 1); } - - if ecx.machine.stacked_borrows.is_some() { ecx.retag_return_place() } else { Ok(()) } + if ecx.machine.borrow_tracker.is_some() { + ecx.retag_return_place()?; + } + Ok(()) } #[inline(always)] @@ -1184,8 +1168,8 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> { ecx.active_thread_mut().recompute_top_user_relevant_frame(); } let timing = frame.extra.timing.take(); - if let Some(stacked_borrows) = &ecx.machine.stacked_borrows { - stacked_borrows.borrow_mut().end_call(&frame.extra); + if let Some(borrow_tracker) = &ecx.machine.borrow_tracker { + borrow_tracker.borrow_mut().end_call(&frame.extra); } let res = ecx.handle_stack_pop_unwind(frame.extra, unwinding); if let Some(profiler) = ecx.machine.profiler.as_ref() { diff --git a/src/tag_gc.rs b/src/tag_gc.rs index b460122196a53..c1194fe22163a 100644 --- a/src/tag_gc.rs +++ b/src/tag_gc.rs @@ -154,7 +154,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriInterpCxExt<'mir, 'tcx> { fn garbage_collect_tags(&mut self) -> InterpResult<'tcx> { let this = self.eval_context_mut(); // No reason to do anything at all if stacked borrows is off. - if this.machine.stacked_borrows.is_none() { + if this.machine.borrow_tracker.is_none() { return Ok(()); } @@ -167,17 +167,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriInterpCxExt<'mir, 'tcx> { Ok(()) } - fn remove_unreachable_tags(&mut self, tags: FxHashSet) { + fn remove_unreachable_tags(&mut self, tags: FxHashSet) { let this = self.eval_context_mut(); this.memory.alloc_map().iter(|it| { for (_id, (_kind, alloc)) in it { - alloc - .extra - .stacked_borrows - .as_ref() - .unwrap() - .borrow_mut() - .remove_unreachable_tags(&tags); + if let Some(bt) = &alloc.extra.borrow_tracker { + bt.remove_unreachable_tags(&tags); + } } }); } From 7cae2e9da1d47639902a7bc1c77ae9ebe5c2f989 Mon Sep 17 00:00:00 2001 From: Vanille-N Date: Fri, 2 Dec 2022 13:30:06 +0100 Subject: [PATCH 16/22] fix imports --- src/diagnostics.rs | 2 +- src/eval.rs | 1 + src/lib.rs | 11 +++++++---- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/src/diagnostics.rs b/src/diagnostics.rs index 5cd0a0eeb58c1..bc771ca057f09 100644 --- a/src/diagnostics.rs +++ b/src/diagnostics.rs @@ -6,7 +6,7 @@ use log::trace; use rustc_span::{source_map::DUMMY_SP, SpanData, Symbol}; use rustc_target::abi::{Align, Size}; -use crate::stacked_borrows::{diagnostics::TagHistory, AccessKind}; +use crate::borrow_tracker::{stacked_borrows::diagnostics::TagHistory, AccessKind}; use crate::*; /// Details of premature program termination. diff --git a/src/eval.rs b/src/eval.rs index 6c9c96eef6238..7b4973f3b9daf 100644 --- a/src/eval.rs +++ b/src/eval.rs @@ -9,6 +9,7 @@ use std::thread; use log::info; +use crate::borrow_tracker::RetagFields; use rustc_data_structures::fx::FxHashSet; use rustc_hir::def::Namespace; use rustc_hir::def_id::DefId; diff --git a/src/lib.rs b/src/lib.rs index 6f483cf2cc480..2c427c166c16d 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -53,6 +53,7 @@ extern crate rustc_session; extern crate rustc_span; extern crate rustc_target; +mod borrow_tracker; mod clock; mod concurrency; mod diagnostics; @@ -64,7 +65,6 @@ mod mono_hash_map; mod operator; mod range_map; mod shims; -mod stacked_borrows; mod tag_gc; // Establish a "crate-wide prelude": we often import `crate::*`. @@ -84,6 +84,12 @@ pub use crate::shims::time::EvalContextExt as _; pub use crate::shims::tls::TlsData; pub use crate::shims::EvalContextExt as _; +pub use crate::borrow_tracker::stacked_borrows::{ + EvalContextExt as _, Item, Permission, Stack, Stacks, +}; +pub use crate::borrow_tracker::{ + BorTag, BorrowTrackerMethod, CallId, EvalContextExt as _, RetagFields, +}; pub use crate::clock::{Clock, Instant}; pub use crate::concurrency::{ data_race::{AtomicFenceOrd, AtomicReadOrd, AtomicRwOrd, AtomicWriteOrd, EvalContextExt as _}, @@ -106,9 +112,6 @@ pub use crate::machine::{ pub use crate::mono_hash_map::MonoHashMap; pub use crate::operator::EvalContextExt as _; pub use crate::range_map::RangeMap; -pub use crate::stacked_borrows::{ - CallId, EvalContextExt as _, Item, Permission, RetagFields, SbTag, -}; pub use crate::tag_gc::{EvalContextExt as _, VisitTags}; /// Insert rustc arguments at the beginning of the argument list that Miri wants to be From 3378d9dd7a9c03e729c0e5b600ef627f873a4b31 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Fri, 2 Dec 2022 13:56:42 +0100 Subject: [PATCH 17/22] forward verbosity to cargo setup --- cargo-miri/src/phases.rs | 2 +- cargo-miri/src/setup.rs | 7 +++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/cargo-miri/src/phases.rs b/cargo-miri/src/phases.rs index 64b3187305e1a..91945ad39f50b 100644 --- a/cargo-miri/src/phases.rs +++ b/cargo-miri/src/phases.rs @@ -94,7 +94,7 @@ pub fn phase_cargo_miri(mut args: impl Iterator) { let target = target.as_ref().unwrap_or(host); // We always setup. - setup(&subcommand, target, &rustc_version); + setup(&subcommand, target, &rustc_version, verbose); // Invoke actual cargo for the job, but with different flags. // We re-use `cargo test` and `cargo run`, which makes target and binary handling very easy but diff --git a/cargo-miri/src/setup.rs b/cargo-miri/src/setup.rs index 3ec63ba0f104f..a696546954f90 100644 --- a/cargo-miri/src/setup.rs +++ b/cargo-miri/src/setup.rs @@ -13,7 +13,7 @@ use crate::util::*; /// Performs the setup required to make `cargo miri` work: Getting a custom-built libstd. Then sets /// `MIRI_SYSROOT`. Skipped if `MIRI_SYSROOT` is already set, in which case we expect the user has /// done all this already. -pub fn setup(subcommand: &MiriCommand, target: &str, rustc_version: &VersionMeta) { +pub fn setup(subcommand: &MiriCommand, target: &str, rustc_version: &VersionMeta, verbose: usize) { let only_setup = matches!(subcommand, MiriCommand::Setup); let ask_user = !only_setup; let print_sysroot = only_setup && has_arg_flag("--print-sysroot"); // whether we just print the sysroot path @@ -100,7 +100,10 @@ pub fn setup(subcommand: &MiriCommand, target: &str, rustc_version: &VersionMeta command.env("RUSTC_WRAPPER", ""); if only_setup && !print_sysroot { - // Forward output. + // Forward output. Even make it verbose, if requested. + for _ in 0..verbose { + command.arg("-v"); + } } else { // Supress output. command.stdout(process::Stdio::null()); From 77b6d6de2d774e4ac877f11d248d89fb23cd8a1b Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Fri, 2 Dec 2022 14:43:05 +0100 Subject: [PATCH 18/22] slight simplifications for borrow tracking --- src/borrow_tracker/mod.rs | 50 +++++++++++++---------- src/borrow_tracker/stacked_borrows/mod.rs | 40 +++--------------- src/machine.rs | 4 +- 3 files changed, 36 insertions(+), 58 deletions(-) diff --git a/src/borrow_tracker/mod.rs b/src/borrow_tracker/mod.rs index 69f9bd09f26de..78332879e5ea1 100644 --- a/src/borrow_tracker/mod.rs +++ b/src/borrow_tracker/mod.rs @@ -55,9 +55,9 @@ impl fmt::Debug for BorTag { } } -/// Per-frame data for borrow tracking +/// Per-call-stack-frame data for borrow tracking #[derive(Debug)] -pub struct FrameExtra { +pub struct FrameState { /// The ID of the call this frame corresponds to. pub call_id: CallId, @@ -72,7 +72,7 @@ pub struct FrameExtra { pub protected_tags: SmallVec<[BorTag; 2]>, } -impl VisitTags for FrameExtra { +impl VisitTags for FrameState { fn visit_tags(&self, _visit: &mut dyn FnMut(BorTag)) { // `protected_tags` are fine to GC. } @@ -190,14 +190,14 @@ impl GlobalStateInner { id } - pub fn new_frame(&mut self, machine: &MiriMachine<'_, '_>) -> FrameExtra { + pub fn new_frame(&mut self, machine: &MiriMachine<'_, '_>) -> FrameState { let call_id = self.next_call_id; trace!("new_frame: Assigning call ID {}", call_id); if self.tracked_call_ids.contains(&call_id) { machine.emit_diagnostic(NonHaltingDiagnostic::CreatedCallId(call_id)); } self.next_call_id = NonZeroU64::new(call_id.get() + 1).unwrap(); - FrameExtra { call_id, protected_tags: SmallVec::new() } + FrameState { call_id, protected_tags: SmallVec::new() } } pub fn end_call(&mut self, frame: &machine::FrameData<'_>) { @@ -253,10 +253,10 @@ impl GlobalStateInner { alloc_size: Size, kind: MemoryKind, machine: &MiriMachine<'_, '_>, - ) -> AllocExtra { + ) -> AllocState { match self.borrow_tracker_method { BorrowTrackerMethod::StackedBorrows => - AllocExtra::StackedBorrows(Box::new(RefCell::new(Stacks::new_allocation( + AllocState::StackedBorrows(Box::new(RefCell::new(Stacks::new_allocation( id, alloc_size, self, kind, machine, )))), } @@ -292,24 +292,30 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { /// Extra per-allocation data for borrow tracking #[derive(Debug, Clone)] -pub enum AllocExtra { +pub enum AllocState { /// Data corresponding to Stacked Borrows - StackedBorrows(Box>), + StackedBorrows(Box>), } -impl AllocExtra { - pub fn assert_sb(&self) -> &RefCell { - match self { - AllocExtra::StackedBorrows(ref sb) => sb, +impl machine::AllocExtra { + #[track_caller] + pub fn borrow_tracker_sb(&self) -> &RefCell { + match self.borrow_tracker { + Some(AllocState::StackedBorrows(ref sb)) => sb, + _ => panic!("expected Stacked Borrows borrow tracking, got something else"), } } - pub fn assert_sb_mut(&mut self) -> &mut RefCell { - match self { - AllocExtra::StackedBorrows(ref mut sb) => sb, + #[track_caller] + pub fn borrow_tracker_sb_mut(&mut self) -> &mut RefCell { + match self.borrow_tracker { + Some(AllocState::StackedBorrows(ref mut sb)) => sb, + _ => panic!("expected Stacked Borrows borrow tracking, got something else"), } } +} +impl AllocState { pub fn before_memory_read<'tcx>( &self, alloc_id: AllocId, @@ -318,7 +324,7 @@ impl AllocExtra { machine: &MiriMachine<'_, 'tcx>, ) -> InterpResult<'tcx> { match self { - AllocExtra::StackedBorrows(sb) => + AllocState::StackedBorrows(sb) => sb.borrow_mut().before_memory_read(alloc_id, prov_extra, range, machine), } } @@ -331,7 +337,7 @@ impl AllocExtra { machine: &mut MiriMachine<'_, 'tcx>, ) -> InterpResult<'tcx> { match self { - AllocExtra::StackedBorrows(sb) => + AllocState::StackedBorrows(sb) => sb.get_mut().before_memory_write(alloc_id, prov_extra, range, machine), } } @@ -344,22 +350,22 @@ impl AllocExtra { machine: &mut MiriMachine<'_, 'tcx>, ) -> InterpResult<'tcx> { match self { - AllocExtra::StackedBorrows(sb) => + AllocState::StackedBorrows(sb) => sb.get_mut().before_memory_deallocation(alloc_id, prov_extra, range, machine), } } pub fn remove_unreachable_tags(&self, tags: &FxHashSet) { match self { - AllocExtra::StackedBorrows(sb) => sb.borrow_mut().remove_unreachable_tags(tags), + AllocState::StackedBorrows(sb) => sb.borrow_mut().remove_unreachable_tags(tags), } } } -impl VisitTags for AllocExtra { +impl VisitTags for AllocState { fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) { match self { - AllocExtra::StackedBorrows(sb) => sb.visit_tags(visit), + AllocState::StackedBorrows(sb) => sb.visit_tags(visit), } } } diff --git a/src/borrow_tracker/stacked_borrows/mod.rs b/src/borrow_tracker/stacked_borrows/mod.rs index ec3be398a2c29..50c2ad75ca71e 100644 --- a/src/borrow_tracker/stacked_borrows/mod.rs +++ b/src/borrow_tracker/stacked_borrows/mod.rs @@ -25,7 +25,7 @@ mod stack; pub use stack::Stack; pub mod diagnostics; -pub type AllocExtra = Stacks; +pub type AllocState = Stacks; /// Extra per-allocation state. #[derive(Clone, Debug)] @@ -500,10 +500,6 @@ impl Stacks { })?; Ok(()) } - - fn expose_tag(&mut self, tag: BorTag) { - self.exposed_tags.insert(tag); - } } /// Retagging/reborrowing. There is some policy in here, such as which permissions @@ -567,10 +563,7 @@ trait EvalContextPrivExt<'mir: 'ecx, 'tcx: 'mir, 'ecx>: crate::MiriInterpCxExt<' // uncovers a non-supported `extern static`. let extra = this.get_alloc_extra(alloc_id)?; let mut stacked_borrows = extra - .borrow_tracker - .as_ref() - .expect("We should have borrow tracking data") - .assert_sb() + .borrow_tracker_sb() .borrow_mut(); // Note that we create a *second* `DiagnosticCxBuilder` below for the actual retag. // FIXME: can this be done cleaner? @@ -681,12 +674,7 @@ trait EvalContextPrivExt<'mir: 'ecx, 'tcx: 'mir, 'ecx>: crate::MiriInterpCxExt<' // We have to use shared references to alloc/memory_extra here since // `visit_freeze_sensitive` needs to access the global state. let alloc_extra = this.get_alloc_extra(alloc_id)?; - let mut stacked_borrows = alloc_extra - .borrow_tracker - .as_ref() - .expect("We should have borrow tracking data") - .assert_sb() - .borrow_mut(); + let mut stacked_borrows = alloc_extra.borrow_tracker_sb().borrow_mut(); this.visit_freeze_sensitive(place, size, |mut range, frozen| { // Adjust range. range.start += base_offset; @@ -736,12 +724,7 @@ trait EvalContextPrivExt<'mir: 'ecx, 'tcx: 'mir, 'ecx>: crate::MiriInterpCxExt<' // Note that this asserts that the allocation is mutable -- but since we are creating a // mutable pointer, that seems reasonable. let (alloc_extra, machine) = this.get_alloc_extra_mut(alloc_id)?; - let stacked_borrows = alloc_extra - .borrow_tracker - .as_mut() - .expect("We should have borrow tracking data") - .assert_sb_mut() - .get_mut(); + let stacked_borrows = alloc_extra.borrow_tracker_sb_mut().get_mut(); let item = Item::new(new_tag, perm, protect.is_some()); let range = alloc_range(base_offset, size); let global = machine.borrow_tracker.as_ref().unwrap().borrow(); @@ -993,13 +976,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { // uncovers a non-supported `extern static`. let alloc_extra = this.get_alloc_extra(alloc_id)?; trace!("Stacked Borrows tag {tag:?} exposed in {alloc_id:?}"); - alloc_extra - .borrow_tracker - .as_ref() - .expect("We should have borrow tracking data") - .assert_sb() - .borrow_mut() - .expose_tag(tag); + alloc_extra.borrow_tracker_sb().borrow_mut().exposed_tags.insert(tag); } AllocKind::Function | AllocKind::VTable | AllocKind::Dead => { // No stacked borrows on these allocations. @@ -1011,12 +988,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { fn print_stacks(&mut self, alloc_id: AllocId) -> InterpResult<'tcx> { let this = self.eval_context_mut(); let alloc_extra = this.get_alloc_extra(alloc_id)?; - let stacks = alloc_extra - .borrow_tracker - .as_ref() - .expect("We should have borrow tracking data") - .assert_sb() - .borrow(); + let stacks = alloc_extra.borrow_tracker_sb().borrow(); for (range, stack) in stacks.stacks.iter_all() { print!("{range:?}: ["); if let Some(bottom) = stack.unknown_bottom() { diff --git a/src/machine.rs b/src/machine.rs index 920ee3e1ef1f4..79106d538b7c4 100644 --- a/src/machine.rs +++ b/src/machine.rs @@ -39,7 +39,7 @@ pub const STACK_SIZE: u64 = 16 * PAGE_SIZE; // whatever /// Extra data stored with each stack frame pub struct FrameData<'tcx> { /// Extra data for Stacked Borrows. - pub borrow_tracker: Option, + pub borrow_tracker: Option, /// If this is Some(), then this is a special "catch unwind" frame (the frame of `try_fn` /// called by `try`). When this frame is popped during unwinding a panic, @@ -255,7 +255,7 @@ impl ProvenanceExtra { #[derive(Debug, Clone)] pub struct AllocExtra { /// Global state of the borrow tracker, if enabled. - pub borrow_tracker: Option, + pub borrow_tracker: Option, /// Data race detection via the use of a vector-clock, /// this is only added if it is enabled. pub data_race: Option, From a86b0807544406cd15abc806849dece9c35cbea6 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Fri, 2 Dec 2022 14:44:41 +0100 Subject: [PATCH 19/22] rename some more types for consistency --- src/borrow_tracker/mod.rs | 2 +- src/concurrency/data_race.rs | 2 +- src/concurrency/thread.rs | 14 +++++++------- src/concurrency/weak_memory.rs | 2 +- src/helpers.rs | 2 +- src/lib.rs | 2 +- src/machine.rs | 26 +++++++++++++------------- src/shims/panic.rs | 2 +- 8 files changed, 26 insertions(+), 26 deletions(-) diff --git a/src/borrow_tracker/mod.rs b/src/borrow_tracker/mod.rs index 78332879e5ea1..10e6e252e94b7 100644 --- a/src/borrow_tracker/mod.rs +++ b/src/borrow_tracker/mod.rs @@ -200,7 +200,7 @@ impl GlobalStateInner { FrameState { call_id, protected_tags: SmallVec::new() } } - pub fn end_call(&mut self, frame: &machine::FrameData<'_>) { + pub fn end_call(&mut self, frame: &machine::FrameExtra<'_>) { for tag in &frame .borrow_tracker .as_ref() diff --git a/src/concurrency/data_race.rs b/src/concurrency/data_race.rs index 99288480386e1..dd2102043d290 100644 --- a/src/concurrency/data_race.rs +++ b/src/concurrency/data_race.rs @@ -59,7 +59,7 @@ use super::{ weak_memory::EvalContextExt as _, }; -pub type AllocExtra = VClockAlloc; +pub type AllocState = VClockAlloc; /// Valid atomic read-write orderings, alias of atomic::Ordering (not non-exhaustive). #[derive(Copy, Clone, PartialEq, Eq, Debug)] diff --git a/src/concurrency/thread.rs b/src/concurrency/thread.rs index 6ba93a13aaf23..03f9ed351fb69 100644 --- a/src/concurrency/thread.rs +++ b/src/concurrency/thread.rs @@ -113,7 +113,7 @@ pub struct Thread<'mir, 'tcx> { thread_name: Option>, /// The virtual call stack. - stack: Vec>>, + stack: Vec>>, /// The function to call when the stack ran empty, to figure out what to do next. /// Conceptually, this is the interpreter implementation of the things that happen 'after' the @@ -232,7 +232,7 @@ impl VisitTags for Thread<'_, '_> { } } -impl VisitTags for Frame<'_, '_, Provenance, FrameData<'_>> { +impl VisitTags for Frame<'_, '_, Provenance, FrameExtra<'_>> { fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) { let Frame { return_place, @@ -385,20 +385,20 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { } /// Borrow the stack of the active thread. - pub fn active_thread_stack(&self) -> &[Frame<'mir, 'tcx, Provenance, FrameData<'tcx>>] { + pub fn active_thread_stack(&self) -> &[Frame<'mir, 'tcx, Provenance, FrameExtra<'tcx>>] { &self.threads[self.active_thread].stack } /// Mutably borrow the stack of the active thread. fn active_thread_stack_mut( &mut self, - ) -> &mut Vec>> { + ) -> &mut Vec>> { &mut self.threads[self.active_thread].stack } pub fn all_stacks( &self, - ) -> impl Iterator>]> { + ) -> impl Iterator>]> { self.threads.iter().map(|t| &t.stack[..]) } @@ -921,7 +921,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { } #[inline] - fn active_thread_stack(&self) -> &[Frame<'mir, 'tcx, Provenance, FrameData<'tcx>>] { + fn active_thread_stack(&self) -> &[Frame<'mir, 'tcx, Provenance, FrameExtra<'tcx>>] { let this = self.eval_context_ref(); this.machine.threads.active_thread_stack() } @@ -929,7 +929,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { #[inline] fn active_thread_stack_mut( &mut self, - ) -> &mut Vec>> { + ) -> &mut Vec>> { let this = self.eval_context_mut(); this.machine.threads.active_thread_stack_mut() } diff --git a/src/concurrency/weak_memory.rs b/src/concurrency/weak_memory.rs index 4c32efcfa3515..391681444d9ba 100644 --- a/src/concurrency/weak_memory.rs +++ b/src/concurrency/weak_memory.rs @@ -93,7 +93,7 @@ use super::{ vector_clock::{VClock, VTimestamp, VectorIdx}, }; -pub type AllocExtra = StoreBufferAlloc; +pub type AllocState = StoreBufferAlloc; // Each store buffer must be bounded otherwise it will grow indefinitely. // However, bounding the store buffer means restricting the amount of weak diff --git a/src/helpers.rs b/src/helpers.rs index f0d8b6768810c..8f6a5fbc1f01c 100644 --- a/src/helpers.rs +++ b/src/helpers.rs @@ -988,7 +988,7 @@ impl<'mir, 'tcx> MiriMachine<'mir, 'tcx> { self.stack()[frame_idx].current_span() } - fn stack(&self) -> &[Frame<'mir, 'tcx, Provenance, machine::FrameData<'tcx>>] { + fn stack(&self) -> &[Frame<'mir, 'tcx, Provenance, machine::FrameExtra<'tcx>>] { self.threads.active_thread_stack() } diff --git a/src/lib.rs b/src/lib.rs index 2c427c166c16d..42519797976b7 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -106,7 +106,7 @@ pub use crate::eval::{ pub use crate::helpers::EvalContextExt as _; pub use crate::intptrcast::ProvenanceMode; pub use crate::machine::{ - AllocExtra, FrameData, MiriInterpCx, MiriInterpCxExt, MiriMachine, MiriMemoryKind, + AllocExtra, FrameExtra, MiriInterpCx, MiriInterpCxExt, MiriMachine, MiriMemoryKind, PrimitiveLayouts, Provenance, ProvenanceExtra, PAGE_SIZE, STACK_ADDR, STACK_SIZE, }; pub use crate::mono_hash_map::MonoHashMap; diff --git a/src/machine.rs b/src/machine.rs index 79106d538b7c4..c110229c985db 100644 --- a/src/machine.rs +++ b/src/machine.rs @@ -37,7 +37,7 @@ pub const STACK_ADDR: u64 = 32 * PAGE_SIZE; // not really about the "stack", but pub const STACK_SIZE: u64 = 16 * PAGE_SIZE; // whatever /// Extra data stored with each stack frame -pub struct FrameData<'tcx> { +pub struct FrameExtra<'tcx> { /// Extra data for Stacked Borrows. pub borrow_tracker: Option, @@ -58,10 +58,10 @@ pub struct FrameData<'tcx> { pub is_user_relevant: bool, } -impl<'tcx> std::fmt::Debug for FrameData<'tcx> { +impl<'tcx> std::fmt::Debug for FrameExtra<'tcx> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { // Omitting `timing`, it does not support `Debug`. - let FrameData { borrow_tracker, catch_unwind, timing: _, is_user_relevant: _ } = self; + let FrameExtra { borrow_tracker, catch_unwind, timing: _, is_user_relevant: _ } = self; f.debug_struct("FrameData") .field("borrow_tracker", borrow_tracker) .field("catch_unwind", catch_unwind) @@ -69,9 +69,9 @@ impl<'tcx> std::fmt::Debug for FrameData<'tcx> { } } -impl VisitTags for FrameData<'_> { +impl VisitTags for FrameExtra<'_> { fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) { - let FrameData { catch_unwind, borrow_tracker, timing: _, is_user_relevant: _ } = self; + let FrameExtra { catch_unwind, borrow_tracker, timing: _, is_user_relevant: _ } = self; catch_unwind.visit_tags(visit); borrow_tracker.visit_tags(visit); @@ -258,10 +258,10 @@ pub struct AllocExtra { pub borrow_tracker: Option, /// Data race detection via the use of a vector-clock, /// this is only added if it is enabled. - pub data_race: Option, + pub data_race: Option, /// Weak memory emulation via the use of store buffers, /// this is only added if it is enabled. - pub weak_memory: Option, + pub weak_memory: Option, } impl VisitTags for AllocExtra { @@ -736,7 +736,7 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> { type MemoryKind = MiriMemoryKind; type ExtraFnVal = Dlsym; - type FrameExtra = FrameData<'tcx>; + type FrameExtra = FrameExtra<'tcx>; type AllocExtra = AllocExtra; type Provenance = Provenance; @@ -908,14 +908,14 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> { .map(|bt| bt.borrow_mut().new_allocation(id, alloc.size(), kind, &ecx.machine)); let race_alloc = ecx.machine.data_race.as_ref().map(|data_race| { - data_race::AllocExtra::new_allocation( + data_race::AllocState::new_allocation( data_race, &ecx.machine.threads, alloc.size(), kind, ) }); - let buffer_alloc = ecx.machine.weak_memory.then(weak_memory::AllocExtra::new_allocation); + let buffer_alloc = ecx.machine.weak_memory.then(weak_memory::AllocState::new_allocation); let alloc: Allocation = alloc.adjust_from_tcx( &ecx.tcx, AllocExtra { borrow_tracker, data_race: race_alloc, weak_memory: buffer_alloc }, @@ -1070,7 +1070,7 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> { fn init_frame_extra( ecx: &mut InterpCx<'mir, 'tcx, Self>, frame: Frame<'mir, 'tcx, Provenance>, - ) -> InterpResult<'tcx, Frame<'mir, 'tcx, Provenance, FrameData<'tcx>>> { + ) -> InterpResult<'tcx, Frame<'mir, 'tcx, Provenance, FrameExtra<'tcx>>> { // Start recording our event before doing anything else let timing = if let Some(profiler) = ecx.machine.profiler.as_ref() { let fn_name = frame.instance.to_string(); @@ -1088,7 +1088,7 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> { let borrow_tracker = ecx.machine.borrow_tracker.as_ref(); - let extra = FrameData { + let extra = FrameExtra { borrow_tracker: borrow_tracker.map(|bt| bt.borrow_mut().new_frame(&ecx.machine)), catch_unwind: None, timing, @@ -1157,7 +1157,7 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> { #[inline(always)] fn after_stack_pop( ecx: &mut InterpCx<'mir, 'tcx, Self>, - mut frame: Frame<'mir, 'tcx, Provenance, FrameData<'tcx>>, + mut frame: Frame<'mir, 'tcx, Provenance, FrameExtra<'tcx>>, unwinding: bool, ) -> InterpResult<'tcx, StackPopJump> { if frame.extra.is_user_relevant { diff --git a/src/shims/panic.rs b/src/shims/panic.rs index 7c30845dc6756..db3e42facadd0 100644 --- a/src/shims/panic.rs +++ b/src/shims/panic.rs @@ -125,7 +125,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { fn handle_stack_pop_unwind( &mut self, - mut extra: FrameData<'tcx>, + mut extra: FrameExtra<'tcx>, unwinding: bool, ) -> InterpResult<'tcx, StackPopJump> { let this = self.eval_context_mut(); From 7e27e83bd7d2ca9a8ed26b5f6d207dd2f96cada6 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Fri, 2 Dec 2022 14:31:56 +0100 Subject: [PATCH 20/22] fix ICE in pointer tracking --- .../stacked_borrows/diagnostics.rs | 23 ++++++++----------- src/diagnostics.rs | 17 ++++---------- 2 files changed, 13 insertions(+), 27 deletions(-) diff --git a/src/borrow_tracker/stacked_borrows/diagnostics.rs b/src/borrow_tracker/stacked_borrows/diagnostics.rs index c5eb2113f9f8e..9a7b38b13a3ad 100644 --- a/src/borrow_tracker/stacked_borrows/diagnostics.rs +++ b/src/borrow_tracker/stacked_borrows/diagnostics.rs @@ -455,23 +455,18 @@ impl<'history, 'ecx, 'mir, 'tcx> DiagnosticCx<'history, 'ecx, 'mir, 'tcx> { if !global.tracked_pointer_tags.contains(&item.tag()) { return; } - let summary = match self.operation { - Operation::Dealloc(_) => None, - Operation::Access(AccessOp { kind, tag, .. }) => Some((tag, kind)), + let cause = match self.operation { + Operation::Dealloc(_) => format!(" due to deallocation"), + Operation::Access(AccessOp { kind, tag, .. }) => + format!(" due to {kind:?} access for {tag:?}"), Operation::Retag(RetagOp { orig_tag, permission, .. }) => { - let kind = match permission - .expect("start_grant should set the current permission before popping a tag") - { - Permission::SharedReadOnly => AccessKind::Read, - Permission::Unique => AccessKind::Write, - Permission::SharedReadWrite | Permission::Disabled => { - panic!("Only SharedReadOnly and Unique retags can pop tags"); - } - }; - Some((orig_tag, kind)) + let permission = permission + .expect("start_grant should set the current permission before popping a tag"); + format!(" due to {permission:?} retag from {orig_tag:?}") } }; - self.machine.emit_diagnostic(NonHaltingDiagnostic::PoppedPointerTag(*item, summary)); + + self.machine.emit_diagnostic(NonHaltingDiagnostic::PoppedPointerTag(*item, cause)); } } diff --git a/src/diagnostics.rs b/src/diagnostics.rs index bc771ca057f09..074fa032dcc42 100644 --- a/src/diagnostics.rs +++ b/src/diagnostics.rs @@ -6,7 +6,7 @@ use log::trace; use rustc_span::{source_map::DUMMY_SP, SpanData, Symbol}; use rustc_target::abi::{Align, Size}; -use crate::borrow_tracker::{stacked_borrows::diagnostics::TagHistory, AccessKind}; +use crate::borrow_tracker::stacked_borrows::diagnostics::TagHistory; use crate::*; /// Details of premature program termination. @@ -67,9 +67,8 @@ pub enum NonHaltingDiagnostic { /// /// new_kind is `None` for base tags. CreatedPointerTag(NonZeroU64, Option, Option<(AllocId, AllocRange, ProvenanceExtra)>), - /// This `Item` was popped from the borrow stack, either due to an access with the given tag or - /// a deallocation when the second argument is `None`. - PoppedPointerTag(Item, Option<(ProvenanceExtra, AccessKind)>), + /// This `Item` was popped from the borrow stack. The string explains the reason. + PoppedPointerTag(Item, String), CreatedCallId(CallId), CreatedAlloc(AllocId, Size, Align, MemoryKind), FreedAlloc(AllocId), @@ -399,15 +398,7 @@ impl<'mir, 'tcx> MiriMachine<'mir, 'tcx> { format!( "created tag {tag:?} for {kind} at {alloc_id:?}{range:?} derived from {orig_tag:?}" ), - PoppedPointerTag(item, tag) => - match tag { - None => format!("popped tracked tag for item {item:?} due to deallocation",), - Some((tag, access)) => { - format!( - "popped tracked tag for item {item:?} due to {access:?} access for {tag:?}", - ) - } - }, + PoppedPointerTag(item, cause) => format!("popped tracked tag for item {item:?}{cause}"), CreatedCallId(id) => format!("function call with id {id}"), CreatedAlloc(AllocId(id), size, align, kind) => format!( From b925a0302d81a4db2b07f799bb54872d6f682f6a Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Fri, 2 Dec 2022 18:14:53 +0100 Subject: [PATCH 21/22] Preparing for merge from rustc --- rust-version | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rust-version b/rust-version index 851ef39274094..0a6b9417cc2e2 100644 --- a/rust-version +++ b/rust-version @@ -1 +1 @@ -454784afba5bf35b5ff14ada0e31265ad1d75e73 +cef44f53034eac46be3a0e3eec7b2b3d4ef5140b From 90f8a0ca1fadd61400a64fd6b300524b994877a2 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Sat, 3 Dec 2022 13:24:04 +0100 Subject: [PATCH 22/22] clippy --- cargo-miri/src/phases.rs | 3 +-- src/bin/miri.rs | 5 +---- src/concurrency/vector_clock.rs | 36 +++++++++------------------------ src/helpers.rs | 10 +++------ src/shims/foreign_items.rs | 3 +-- 5 files changed, 15 insertions(+), 42 deletions(-) diff --git a/cargo-miri/src/phases.rs b/cargo-miri/src/phases.rs index 91945ad39f50b..2bffff4772270 100644 --- a/cargo-miri/src/phases.rs +++ b/cargo-miri/src/phases.rs @@ -486,8 +486,7 @@ pub fn phase_runner(mut binary_args: impl Iterator, phase: Runner continue; } else if verbose > 0 { eprintln!( - "[cargo-miri runner] Overwriting run-time env var {:?}={:?} with build-time value {:?}", - name, old_val, val + "[cargo-miri runner] Overwriting run-time env var {name:?}={old_val:?} with build-time value {val:?}" ); } } diff --git a/src/bin/miri.rs b/src/bin/miri.rs index 9ac04c4930f26..fce95b987f729 100644 --- a/src/bin/miri.rs +++ b/src/bin/miri.rs @@ -192,10 +192,7 @@ fn init_late_loggers(tcx: TyCtxt<'_>) { if log::Level::from_str(&var).is_ok() { env::set_var( "RUSTC_LOG", - format!( - "rustc_middle::mir::interpret={0},rustc_const_eval::interpret={0}", - var - ), + format!("rustc_middle::mir::interpret={var},rustc_const_eval::interpret={var}"), ); } else { env::set_var("RUSTC_LOG", &var); diff --git a/src/concurrency/vector_clock.rs b/src/concurrency/vector_clock.rs index e7e5b35ac2cd2..ba04991a5889d 100644 --- a/src/concurrency/vector_clock.rs +++ b/src/concurrency/vector_clock.rs @@ -404,67 +404,49 @@ mod tests { assert_eq!( alt_compare, o.map(Ordering::reverse), - "Invalid alt comparison\n l: {:?}\n r: {:?}", - l, - r + "Invalid alt comparison\n l: {l:?}\n r: {r:?}" ); //Test operators with faster implementations assert_eq!( matches!(compare, Some(Ordering::Less)), l < r, - "Invalid (<):\n l: {:?}\n r: {:?}", - l, - r + "Invalid (<):\n l: {l:?}\n r: {r:?}" ); assert_eq!( matches!(compare, Some(Ordering::Less) | Some(Ordering::Equal)), l <= r, - "Invalid (<=):\n l: {:?}\n r: {:?}", - l, - r + "Invalid (<=):\n l: {l:?}\n r: {r:?}" ); assert_eq!( matches!(compare, Some(Ordering::Greater)), l > r, - "Invalid (>):\n l: {:?}\n r: {:?}", - l, - r + "Invalid (>):\n l: {l:?}\n r: {r:?}" ); assert_eq!( matches!(compare, Some(Ordering::Greater) | Some(Ordering::Equal)), l >= r, - "Invalid (>=):\n l: {:?}\n r: {:?}", - l, - r + "Invalid (>=):\n l: {l:?}\n r: {r:?}" ); assert_eq!( matches!(alt_compare, Some(Ordering::Less)), r < l, - "Invalid alt (<):\n l: {:?}\n r: {:?}", - l, - r + "Invalid alt (<):\n l: {l:?}\n r: {r:?}" ); assert_eq!( matches!(alt_compare, Some(Ordering::Less) | Some(Ordering::Equal)), r <= l, - "Invalid alt (<=):\n l: {:?}\n r: {:?}", - l, - r + "Invalid alt (<=):\n l: {l:?}\n r: {r:?}" ); assert_eq!( matches!(alt_compare, Some(Ordering::Greater)), r > l, - "Invalid alt (>):\n l: {:?}\n r: {:?}", - l, - r + "Invalid alt (>):\n l: {l:?}\n r: {r:?}" ); assert_eq!( matches!(alt_compare, Some(Ordering::Greater) | Some(Ordering::Equal)), r >= l, - "Invalid alt (>=):\n l: {:?}\n r: {:?}", - l, - r + "Invalid alt (>=):\n l: {l:?}\n r: {r:?}" ); } } diff --git a/src/helpers.rs b/src/helpers.rs index 8f6a5fbc1f01c..7fb2539ca5a67 100644 --- a/src/helpers.rs +++ b/src/helpers.rs @@ -554,9 +554,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { assert_eq!( self.eval_context_ref().tcx.sess.target.os, target_os, - "`{}` is only available on the `{}` target OS", - name, - target_os, + "`{name}` is only available on the `{target_os}` target OS", ) } @@ -566,8 +564,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { fn assert_target_os_is_unix(&self, name: &str) { assert!( target_os_is_unix(self.eval_context_ref().tcx.sess.target.os.as_ref()), - "`{}` is only available for supported UNIX family targets", - name, + "`{name}` is only available for supported UNIX family targets", ); } @@ -1019,8 +1016,7 @@ where pub fn isolation_abort_error<'tcx>(name: &str) -> InterpResult<'tcx> { throw_machine_stop!(TerminationInfo::UnsupportedInIsolation(format!( - "{} not available when isolation is enabled", - name, + "{name} not available when isolation is enabled", ))) } diff --git a/src/shims/foreign_items.rs b/src/shims/foreign_items.rs index f72521f64adaf..8370e02b588af 100644 --- a/src/shims/foreign_items.rs +++ b/src/shims/foreign_items.rs @@ -299,8 +299,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { return Ok(Some(body)); } this.handle_unsupported(format!( - "can't call (diverging) foreign function: {}", - link_name + "can't call (diverging) foreign function: {link_name}" ))?; return Ok(None); }