From c9430d967c34a2e4fb5444ec6a4fcd01fbff2666 Mon Sep 17 00:00:00 2001 From: Ben Kimock Date: Thu, 3 Nov 2022 00:09:00 -0400 Subject: [PATCH] Track local frames incrementally during execution --- src/concurrency/thread.rs | 61 ++++++++++++++++++++-- src/helpers.rs | 63 ++++------------------- src/lib.rs | 2 +- src/machine.rs | 41 +++++++++------ src/stacked_borrows/diagnostics.rs | 83 +++++++++++------------------- src/stacked_borrows/mod.rs | 55 ++++++++------------ 6 files changed, 147 insertions(+), 158 deletions(-) diff --git a/src/concurrency/thread.rs b/src/concurrency/thread.rs index 8fbee9a352..ddaf04f1b5 100644 --- a/src/concurrency/thread.rs +++ b/src/concurrency/thread.rs @@ -1,5 +1,6 @@ //! Implements threads. +use std::cell::Cell; use std::cell::RefCell; use std::collections::hash_map::Entry; use std::num::TryFromIntError; @@ -118,6 +119,17 @@ pub struct Thread<'mir, 'tcx> { /// The virtual call stack. stack: Vec>>, + /// The index of the topmost local frame in `stack`. If `Some`, this field must contain + /// the value produced by `reset_top_local`. + /// This field is a cache to reduce how often we call that method. The cache is invalidated by + /// mutation of the stack, so calls to `active_thread_stack_mut` set this to `None`. + /// + /// The outer `Option` represents the validity of the cache, and the inner `Option` represents + /// whether or not there is a local frame. Being able to differentiate between knowing that we + /// have no local frame and not knowing whether there is a local frame reduces how often the + /// cache is invalidated. + top_local_frame: Cell>>, + /// The join status. join_status: ThreadJoinStatus, @@ -147,6 +159,35 @@ impl<'mir, 'tcx> Thread<'mir, 'tcx> { fn thread_name(&self) -> &[u8] { if let Some(ref thread_name) = self.thread_name { thread_name } else { b"" } } + + fn compute_top_local_frame(&self) -> Option { + self.stack + .iter() + .enumerate() + .rev() + .find_map(|(idx, frame)| if frame.extra.is_local { Some(idx) } else { None }) + } + + pub fn top_local_frame(&self) -> usize { + let top_local = if let Some(idx) = self.top_local_frame.get() { + debug_assert_eq!(idx, self.compute_top_local_frame()); + idx + } else { + let idx = self.compute_top_local_frame(); + self.top_local_frame.set(Some(idx)); + idx + }; + top_local.unwrap_or_else(|| self.stack.len().saturating_sub(1)) + } + + pub fn set_top_local_frame(&mut self, frame_idx: Option) { + if let Some(idx) = frame_idx { + self.top_local_frame.set(Some(Some(idx))); + debug_assert_eq!(Some(idx), self.compute_top_local_frame()); + } else { + self.top_local_frame.set(None); + } + } } impl<'mir, 'tcx> std::fmt::Debug for Thread<'mir, 'tcx> { @@ -167,6 +208,7 @@ impl<'mir, 'tcx> Default for Thread<'mir, 'tcx> { state: ThreadState::Enabled, thread_name: None, stack: Vec::new(), + top_local_frame: Cell::new(None), join_status: ThreadJoinStatus::Joinable, panic_payload: None, last_error: None, @@ -184,8 +226,15 @@ impl<'mir, 'tcx> Thread<'mir, 'tcx> { impl VisitTags for Thread<'_, '_> { fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) { - let Thread { panic_payload, last_error, stack, state: _, thread_name: _, join_status: _ } = - self; + let Thread { + panic_payload, + last_error, + stack, + top_local_frame: _, + state: _, + thread_name: _, + join_status: _, + } = self; panic_payload.visit_tags(visit); last_error.visit_tags(visit); @@ -414,10 +463,16 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { } /// Get a shared borrow of the currently active thread. - fn active_thread_ref(&self) -> &Thread<'mir, 'tcx> { + pub fn active_thread_ref(&self) -> &Thread<'mir, 'tcx> { &self.threads[self.active_thread] } + /// Reset the current-span caching logic of the active thread. + /// Mostly called when a new frame is added or removed. + pub fn top_local_frame(&self) -> usize { + self.active_thread_ref().top_local_frame() + } + /// Mark the thread as detached, which means that no other thread will try /// to join it and the thread is responsible for cleaning up. /// diff --git a/src/helpers.rs b/src/helpers.rs index cd5e989b43..6ff972dc78 100644 --- a/src/helpers.rs +++ b/src/helpers.rs @@ -936,31 +936,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { } impl<'mir, 'tcx> MiriMachine<'mir, 'tcx> { - pub fn current_span(&self) -> CurrentSpan<'_, 'mir, 'tcx> { - CurrentSpan { current_frame_idx: None, machine: self } - } -} - -/// A `CurrentSpan` should be created infrequently (ideally once) per interpreter step. It does -/// nothing on creation, but when `CurrentSpan::get` is called, searches the current stack for the -/// topmost frame which corresponds to a local crate, and returns the current span in that frame. -/// The result of that search is cached so that later calls are approximately free. -#[derive(Clone)] -pub struct CurrentSpan<'a, 'mir, 'tcx> { - current_frame_idx: Option, - machine: &'a MiriMachine<'mir, 'tcx>, -} - -impl<'a, 'mir: 'a, 'tcx: 'a + 'mir> CurrentSpan<'a, 'mir, 'tcx> { - pub fn machine(&self) -> &'a MiriMachine<'mir, 'tcx> { - self.machine - } - /// Get the current span, skipping non-local frames. /// This function is backed by a cache, and can be assumed to be very fast. - pub fn get(&mut self) -> Span { - let idx = self.current_frame_idx(); - self.stack().get(idx).map(Frame::current_span).unwrap_or(rustc_span::DUMMY_SP) + pub fn current_span(&self) -> Span { + self.stack() + .get(self.top_local_frame()) + .map(Frame::current_span) + .unwrap_or(rustc_span::DUMMY_SP) } /// Returns the span of the *caller* of the current operation, again @@ -968,46 +950,21 @@ impl<'a, 'mir: 'a, 'tcx: 'a + 'mir> CurrentSpan<'a, 'mir, 'tcx> { /// current operation is not in a local crate. /// This is useful when we are processing something which occurs on function-entry and we want /// to point at the call to the function, not the function definition generally. - pub fn get_caller(&mut self) -> Span { + pub fn caller_span(&self) -> Span { // We need to go down at least to the caller (len - 2), or however // far we have to go to find a frame in a local crate. - let local_frame_idx = self.current_frame_idx(); + let local_frame_idx = self.top_local_frame(); let stack = self.stack(); let idx = cmp::min(local_frame_idx, stack.len().saturating_sub(2)); stack.get(idx).map(Frame::current_span).unwrap_or(rustc_span::DUMMY_SP) } fn stack(&self) -> &[Frame<'mir, 'tcx, Provenance, machine::FrameData<'tcx>>] { - self.machine.threads.active_thread_stack() - } - - fn current_frame_idx(&mut self) -> usize { - *self - .current_frame_idx - .get_or_insert_with(|| Self::compute_current_frame_index(self.machine)) + self.threads.active_thread_stack() } - // Find the position of the inner-most frame which is part of the crate being - // compiled/executed, part of the Cargo workspace, and is also not #[track_caller]. - #[inline(never)] - fn compute_current_frame_index(machine: &MiriMachine<'_, '_>) -> usize { - machine - .threads - .active_thread_stack() - .iter() - .enumerate() - .rev() - .find_map(|(idx, frame)| { - let def_id = frame.instance.def_id(); - if (def_id.is_local() || machine.local_crates.contains(&def_id.krate)) - && !frame.instance.def.requires_caller_location(machine.tcx) - { - Some(idx) - } else { - None - } - }) - .unwrap_or(0) + fn top_local_frame(&self) -> usize { + self.threads.active_thread_ref().top_local_frame() } } diff --git a/src/lib.rs b/src/lib.rs index 66df0d737c..8913f8aa10 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -97,7 +97,7 @@ pub use crate::diagnostics::{ pub use crate::eval::{ create_ecx, eval_entry, AlignmentCheck, BacktraceStyle, IsolatedOp, MiriConfig, RejectOpWith, }; -pub use crate::helpers::{CurrentSpan, EvalContextExt as _}; +pub use crate::helpers::EvalContextExt as _; pub use crate::intptrcast::ProvenanceMode; pub use crate::machine::{ AllocExtra, FrameData, MiriInterpCx, MiriInterpCxExt, MiriMachine, MiriMemoryKind, diff --git a/src/machine.rs b/src/machine.rs index 8243ccd90a..8e1ab6c301 100644 --- a/src/machine.rs +++ b/src/machine.rs @@ -50,12 +50,14 @@ pub struct FrameData<'tcx> { /// for the start of this frame. When we finish executing this frame, /// we use this to register a completed event with `measureme`. pub timing: Option, + + pub is_local: bool, } impl<'tcx> std::fmt::Debug for FrameData<'tcx> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { // Omitting `timing`, it does not support `Debug`. - let FrameData { stacked_borrows, catch_unwind, timing: _ } = self; + let FrameData { stacked_borrows, catch_unwind, timing: _, is_local: _ } = self; f.debug_struct("FrameData") .field("stacked_borrows", stacked_borrows) .field("catch_unwind", catch_unwind) @@ -65,7 +67,7 @@ impl<'tcx> std::fmt::Debug for FrameData<'tcx> { impl VisitTags for FrameData<'_> { fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) { - let FrameData { catch_unwind, stacked_borrows, timing: _ } = self; + let FrameData { catch_unwind, stacked_borrows, timing: _, is_local: _ } = self; catch_unwind.visit_tags(visit); stacked_borrows.visit_tags(visit); @@ -895,13 +897,7 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> { let alloc = alloc.into_owned(); let stacks = ecx.machine.stacked_borrows.as_ref().map(|stacked_borrows| { - Stacks::new_allocation( - id, - alloc.size(), - stacked_borrows, - kind, - ecx.machine.current_span(), - ) + Stacks::new_allocation(id, alloc.size(), stacked_borrows, kind, &ecx.machine) }); let race_alloc = ecx.machine.data_race.as_ref().map(|data_race| { data_race::AllocExtra::new_allocation( @@ -1016,8 +1012,7 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> { prov_extra, range, machine.stacked_borrows.as_ref().unwrap(), - machine.current_span(), - &machine.threads, + machine, )?; } if let Some(weak_memory) = &alloc_extra.weak_memory { @@ -1048,8 +1043,7 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> { prov_extra, range, machine.stacked_borrows.as_ref().unwrap(), - machine.current_span(), - &machine.threads, + machine, )?; } if let Some(weak_memory) = &alloc_extra.weak_memory { @@ -1083,8 +1077,7 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> { prove_extra, range, machine.stacked_borrows.as_ref().unwrap(), - machine.current_span(), - &machine.threads, + machine, ) } else { Ok(()) @@ -1120,13 +1113,19 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> { None }; + let def_id = frame.instance.def_id(); + let is_local = (def_id.is_local() || ecx.machine.local_crates.contains(&def_id.krate)) + && !frame.instance.def.requires_caller_location(ecx.machine.tcx); + let stacked_borrows = ecx.machine.stacked_borrows.as_ref(); let extra = FrameData { stacked_borrows: stacked_borrows.map(|sb| sb.borrow_mut().new_frame(&ecx.machine)), catch_unwind: None, timing, + is_local, }; + Ok(frame.with_extra(extra)) } @@ -1174,6 +1173,13 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> { #[inline(always)] fn after_stack_push(ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> { + if ecx.frame().extra.is_local { + // We just pushed a local frame, so we know that the topmost local frame is the topmost + // frame. If we push a non-local frame, there's no need to do anything. + let stack_len = ecx.active_thread_stack().len(); + ecx.active_thread_mut().set_top_local_frame(Some(stack_len - 1)); + } + if ecx.machine.stacked_borrows.is_some() { ecx.retag_return_place() } else { Ok(()) } } @@ -1183,6 +1189,11 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> { mut frame: Frame<'mir, 'tcx, Provenance, FrameData<'tcx>>, unwinding: bool, ) -> InterpResult<'tcx, StackPopJump> { + if frame.extra.is_local { + // All that we store is whether or not the frame we just removed is local, so now we + // have no idea what if any the next topmost local frame is. + ecx.active_thread_mut().set_top_local_frame(None); + } let timing = frame.extra.timing.take(); if let Some(stacked_borrows) = &ecx.machine.stacked_borrows { stacked_borrows.borrow_mut().end_call(&frame.extra); diff --git a/src/stacked_borrows/diagnostics.rs b/src/stacked_borrows/diagnostics.rs index d3843b0303..e2ceabf7d4 100644 --- a/src/stacked_borrows/diagnostics.rs +++ b/src/stacked_borrows/diagnostics.rs @@ -5,7 +5,6 @@ use rustc_middle::mir::interpret::{alloc_range, AllocId, AllocRange}; use rustc_span::{Span, SpanData}; use rustc_target::abi::Size; -use crate::helpers::CurrentSpan; use crate::stacked_borrows::{err_sb_ub, AccessKind, GlobalStateInner, Permission}; use crate::*; @@ -110,42 +109,29 @@ pub struct TagHistory { pub protected: Option<(String, SpanData)>, } -pub struct DiagnosticCxBuilder<'span, 'ecx, 'mir, 'tcx> { +pub struct DiagnosticCxBuilder<'ecx, 'mir, 'tcx> { operation: Operation, - // 'span cannot be merged with any other lifetime since they appear invariantly, under the - // mutable ref. - current_span: &'span mut CurrentSpan<'ecx, 'mir, 'tcx>, - threads: &'ecx ThreadManager<'mir, 'tcx>, + machine: &'ecx MiriMachine<'mir, 'tcx>, } -pub struct DiagnosticCx<'span, 'history, 'ecx, 'mir, 'tcx> { +pub struct DiagnosticCx<'history, 'ecx, 'mir, 'tcx> { operation: Operation, - // 'span and 'history cannot be merged, since when we call `unbuild` we need - // to return the exact 'span that was used when calling `build`. - current_span: &'span mut CurrentSpan<'ecx, 'mir, 'tcx>, - threads: &'ecx ThreadManager<'mir, 'tcx>, + machine: &'ecx MiriMachine<'mir, 'tcx>, history: &'history mut AllocHistory, offset: Size, } -impl<'span, 'ecx, 'mir, 'tcx> DiagnosticCxBuilder<'span, 'ecx, 'mir, 'tcx> { +impl<'ecx, 'mir, 'tcx> DiagnosticCxBuilder<'ecx, 'mir, 'tcx> { pub fn build<'history>( self, history: &'history mut AllocHistory, offset: Size, - ) -> DiagnosticCx<'span, 'history, 'ecx, 'mir, 'tcx> { - DiagnosticCx { - operation: self.operation, - current_span: self.current_span, - threads: self.threads, - history, - offset, - } + ) -> DiagnosticCx<'history, 'ecx, 'mir, 'tcx> { + DiagnosticCx { operation: self.operation, machine: self.machine, history, offset } } pub fn retag( - current_span: &'span mut CurrentSpan<'ecx, 'mir, 'tcx>, - threads: &'ecx ThreadManager<'mir, 'tcx>, + machine: &'ecx MiriMachine<'mir, 'tcx>, cause: RetagCause, new_tag: SbTag, orig_tag: ProvenanceExtra, @@ -154,46 +140,36 @@ impl<'span, 'ecx, 'mir, 'tcx> DiagnosticCxBuilder<'span, 'ecx, 'mir, 'tcx> { let operation = Operation::Retag(RetagOp { cause, new_tag, orig_tag, range, permission: None }); - DiagnosticCxBuilder { current_span, threads, operation } + DiagnosticCxBuilder { machine, operation } } pub fn read( - current_span: &'span mut CurrentSpan<'ecx, 'mir, 'tcx>, - threads: &'ecx ThreadManager<'mir, 'tcx>, + machine: &'ecx MiriMachine<'mir, 'tcx>, tag: ProvenanceExtra, range: AllocRange, ) -> Self { let operation = Operation::Access(AccessOp { kind: AccessKind::Read, tag, range }); - DiagnosticCxBuilder { current_span, threads, operation } + DiagnosticCxBuilder { machine, operation } } pub fn write( - current_span: &'span mut CurrentSpan<'ecx, 'mir, 'tcx>, - threads: &'ecx ThreadManager<'mir, 'tcx>, + machine: &'ecx MiriMachine<'mir, 'tcx>, tag: ProvenanceExtra, range: AllocRange, ) -> Self { let operation = Operation::Access(AccessOp { kind: AccessKind::Write, tag, range }); - DiagnosticCxBuilder { current_span, threads, operation } + DiagnosticCxBuilder { machine, operation } } - pub fn dealloc( - current_span: &'span mut CurrentSpan<'ecx, 'mir, 'tcx>, - threads: &'ecx ThreadManager<'mir, 'tcx>, - tag: ProvenanceExtra, - ) -> Self { + pub fn dealloc(machine: &'ecx MiriMachine<'mir, 'tcx>, tag: ProvenanceExtra) -> Self { let operation = Operation::Dealloc(DeallocOp { tag }); - DiagnosticCxBuilder { current_span, threads, operation } + DiagnosticCxBuilder { machine, operation } } } -impl<'span, 'history, 'ecx, 'mir, 'tcx> DiagnosticCx<'span, 'history, 'ecx, 'mir, 'tcx> { - pub fn unbuild(self) -> DiagnosticCxBuilder<'span, 'ecx, 'mir, 'tcx> { - DiagnosticCxBuilder { - operation: self.operation, - current_span: self.current_span, - threads: self.threads, - } +impl<'history, 'ecx, 'mir, 'tcx> DiagnosticCx<'history, 'ecx, 'mir, 'tcx> { + pub fn unbuild(self) -> DiagnosticCxBuilder<'ecx, 'mir, 'tcx> { + DiagnosticCxBuilder { machine: self.machine, operation: self.operation } } } @@ -234,10 +210,10 @@ struct DeallocOp { } impl AllocHistory { - pub fn new(id: AllocId, item: Item, current_span: &mut CurrentSpan<'_, '_, '_>) -> Self { + pub fn new(id: AllocId, item: Item, machine: &MiriMachine<'_, '_>) -> Self { Self { id, - base: (item, current_span.get()), + base: (item, machine.current_span()), creations: SmallVec::new(), invalidations: SmallVec::new(), protectors: SmallVec::new(), @@ -245,7 +221,7 @@ impl AllocHistory { } } -impl<'span, 'history, 'ecx, 'mir, 'tcx> DiagnosticCx<'span, 'history, 'ecx, 'mir, 'tcx> { +impl<'history, 'ecx, 'mir, 'tcx> DiagnosticCx<'history, 'ecx, 'mir, 'tcx> { pub fn start_grant(&mut self, perm: Permission) { let Operation::Retag(op) = &mut self.operation else { unreachable!("start_grant must only be called during a retag, this is: {:?}", self.operation) @@ -274,15 +250,17 @@ impl<'span, 'history, 'ecx, 'mir, 'tcx> DiagnosticCx<'span, 'history, 'ecx, 'mir let Operation::Retag(op) = &self.operation else { unreachable!("log_creation must only be called during a retag") }; - self.history.creations.push(Creation { retag: op.clone(), span: self.current_span.get() }); + self.history + .creations + .push(Creation { retag: op.clone(), span: self.machine.current_span() }); } pub fn log_invalidation(&mut self, tag: SbTag) { - let mut span = self.current_span.get(); + let mut span = self.machine.current_span(); let (range, cause) = match &self.operation { Operation::Retag(RetagOp { cause, range, permission, .. }) => { if *cause == RetagCause::FnEntry { - span = self.current_span.get_caller(); + span = self.machine.caller_span(); } (*range, InvalidationCause::Retag(permission.unwrap(), *cause)) } @@ -297,7 +275,9 @@ impl<'span, 'history, 'ecx, 'mir, 'tcx> DiagnosticCx<'span, 'history, 'ecx, 'mir let Operation::Retag(op) = &self.operation else { unreachable!("Protectors can only be created during a retag") }; - self.history.protectors.push(Protection { tag: op.new_tag, span: self.current_span.get() }); + self.history + .protectors + .push(Protection { tag: op.new_tag, span: self.machine.current_span() }); } pub fn get_logs_relevant_to( @@ -410,6 +390,7 @@ impl<'span, 'history, 'ecx, 'mir, 'tcx> DiagnosticCx<'span, 'history, 'ecx, 'mir #[inline(never)] // This is only called on fatal code paths pub fn protector_error(&self, item: &Item) -> InterpError<'tcx> { let call_id = self + .machine .threads .all_stacks() .flatten() @@ -478,9 +459,7 @@ impl<'span, 'history, 'ecx, 'mir, 'tcx> DiagnosticCx<'span, 'history, 'ecx, 'mir Some((orig_tag, kind)) } }; - self.current_span - .machine() - .emit_diagnostic(NonHaltingDiagnostic::PoppedPointerTag(*item, summary)); + self.machine.emit_diagnostic(NonHaltingDiagnostic::PoppedPointerTag(*item, summary)); } } diff --git a/src/stacked_borrows/mod.rs b/src/stacked_borrows/mod.rs index f2e2df5ad0..577de69696 100644 --- a/src/stacked_borrows/mod.rs +++ b/src/stacked_borrows/mod.rs @@ -313,7 +313,7 @@ impl<'tcx> Stack { fn item_popped( item: &Item, global: &GlobalStateInner, - dcx: &mut DiagnosticCx<'_, '_, '_, '_, 'tcx>, + dcx: &mut DiagnosticCx<'_, '_, '_, 'tcx>, ) -> InterpResult<'tcx> { if !global.tracked_pointer_tags.is_empty() { dcx.check_tracked_tag_popped(item, global); @@ -351,7 +351,7 @@ impl<'tcx> Stack { access: AccessKind, tag: ProvenanceExtra, global: &mut GlobalStateInner, - dcx: &mut DiagnosticCx<'_, '_, '_, '_, 'tcx>, + dcx: &mut DiagnosticCx<'_, '_, '_, 'tcx>, exposed_tags: &FxHashSet, ) -> InterpResult<'tcx> { // Two main steps: Find granting item, remove incompatible items above. @@ -437,7 +437,7 @@ impl<'tcx> Stack { &mut self, tag: ProvenanceExtra, global: &GlobalStateInner, - dcx: &mut DiagnosticCx<'_, '_, '_, '_, 'tcx>, + dcx: &mut DiagnosticCx<'_, '_, '_, 'tcx>, exposed_tags: &FxHashSet, ) -> InterpResult<'tcx> { // Step 1: Make sure there is a granting item. @@ -464,7 +464,7 @@ impl<'tcx> Stack { derived_from: ProvenanceExtra, new: Item, global: &mut GlobalStateInner, - dcx: &mut DiagnosticCx<'_, '_, '_, '_, 'tcx>, + dcx: &mut DiagnosticCx<'_, '_, '_, 'tcx>, exposed_tags: &FxHashSet, ) -> InterpResult<'tcx> { dcx.start_grant(new.perm()); @@ -555,14 +555,14 @@ impl<'tcx> Stacks { perm: Permission, tag: SbTag, id: AllocId, - current_span: &mut CurrentSpan<'_, '_, '_>, + machine: &MiriMachine<'_, '_>, ) -> Self { let item = Item::new(tag, perm, false); let stack = Stack::new(item); Stacks { stacks: RangeMap::new(size, stack), - history: AllocHistory::new(id, item, current_span), + history: AllocHistory::new(id, item, machine), exposed_tags: FxHashSet::default(), modified_since_last_gc: false, } @@ -572,10 +572,10 @@ impl<'tcx> Stacks { fn for_each( &mut self, range: AllocRange, - mut dcx_builder: DiagnosticCxBuilder<'_, '_, '_, 'tcx>, + mut dcx_builder: DiagnosticCxBuilder<'_, '_, 'tcx>, mut f: impl FnMut( &mut Stack, - &mut DiagnosticCx<'_, '_, '_, '_, 'tcx>, + &mut DiagnosticCx<'_, '_, '_, 'tcx>, &mut FxHashSet, ) -> InterpResult<'tcx>, ) -> InterpResult<'tcx> { @@ -596,7 +596,7 @@ impl Stacks { size: Size, state: &GlobalState, kind: MemoryKind, - mut current_span: CurrentSpan<'_, '_, '_>, + machine: &MiriMachine<'_, '_>, ) -> Self { let mut extra = state.borrow_mut(); let (base_tag, perm) = match kind { @@ -605,12 +605,11 @@ impl Stacks { // not through a pointer). That is, whenever we directly write to a local, this will pop // everything else off the stack, invalidating all previous pointers, // and in particular, *all* raw pointers. - MemoryKind::Stack => - (extra.base_ptr_tag(id, current_span.machine()), Permission::Unique), + MemoryKind::Stack => (extra.base_ptr_tag(id, machine), Permission::Unique), // Everything else is shared by default. - _ => (extra.base_ptr_tag(id, current_span.machine()), Permission::SharedReadWrite), + _ => (extra.base_ptr_tag(id, machine), Permission::SharedReadWrite), }; - Stacks::new(size, perm, base_tag, id, &mut current_span) + Stacks::new(size, perm, base_tag, id, machine) } #[inline(always)] @@ -620,8 +619,7 @@ impl Stacks { tag: ProvenanceExtra, range: AllocRange, state: &GlobalState, - mut current_span: CurrentSpan<'ecx, 'mir, 'tcx>, - threads: &'ecx ThreadManager<'mir, 'tcx>, + machine: &'ecx MiriMachine<'mir, 'tcx>, ) -> InterpResult<'tcx> where 'tcx: 'ecx, @@ -632,7 +630,7 @@ impl Stacks { Pointer::new(alloc_id, range.start), range.size.bytes() ); - let dcx = DiagnosticCxBuilder::read(&mut current_span, threads, tag, range); + let dcx = DiagnosticCxBuilder::read(machine, tag, range); let mut state = state.borrow_mut(); self.for_each(range, dcx, |stack, dcx, exposed_tags| { stack.access(AccessKind::Read, tag, &mut state, dcx, exposed_tags) @@ -646,8 +644,7 @@ impl Stacks { tag: ProvenanceExtra, range: AllocRange, state: &GlobalState, - mut current_span: CurrentSpan<'ecx, 'mir, 'tcx>, - threads: &'ecx ThreadManager<'mir, 'tcx>, + machine: &'ecx MiriMachine<'mir, 'tcx>, ) -> InterpResult<'tcx> { trace!( "write access with tag {:?}: {:?}, size {}", @@ -655,7 +652,7 @@ impl Stacks { Pointer::new(alloc_id, range.start), range.size.bytes() ); - let dcx = DiagnosticCxBuilder::write(&mut current_span, threads, tag, range); + let dcx = DiagnosticCxBuilder::write(machine, tag, range); let mut state = state.borrow_mut(); self.for_each(range, dcx, |stack, dcx, exposed_tags| { stack.access(AccessKind::Write, tag, &mut state, dcx, exposed_tags) @@ -669,11 +666,10 @@ impl Stacks { tag: ProvenanceExtra, range: AllocRange, state: &GlobalState, - mut current_span: CurrentSpan<'ecx, 'mir, 'tcx>, - threads: &'ecx ThreadManager<'mir, 'tcx>, + machine: &'ecx MiriMachine<'mir, 'tcx>, ) -> InterpResult<'tcx> { trace!("deallocation with tag {:?}: {:?}, size {}", tag, alloc_id, range.size.bytes()); - let dcx = DiagnosticCxBuilder::dealloc(&mut current_span, threads, tag); + let dcx = DiagnosticCxBuilder::dealloc(machine, tag); let state = state.borrow(); self.for_each(range, dcx, |stack, dcx, exposed_tags| { stack.dealloc(tag, &state, dcx, exposed_tags) @@ -738,7 +734,6 @@ trait EvalContextPrivExt<'mir: 'ecx, 'tcx: 'mir, 'ecx>: crate::MiriInterpCxExt<' let (_size, _align, alloc_kind) = this.get_alloc_info(alloc_id); match alloc_kind { AllocKind::LiveData => { - let current_span = &mut this.machine.current_span(); // This should have alloc_extra data, but `get_alloc_extra` can still fail // if converting this alloc_id from a global to a local one // uncovers a non-supported `extern static`. @@ -748,12 +743,10 @@ trait EvalContextPrivExt<'mir: 'ecx, 'tcx: 'mir, 'ecx>: crate::MiriInterpCxExt<' .as_ref() .expect("we should have Stacked Borrows data") .borrow_mut(); - let threads = &this.machine.threads; // Note that we create a *second* `DiagnosticCxBuilder` below for the actual retag. // FIXME: can this be done cleaner? let dcx = DiagnosticCxBuilder::retag( - current_span, - threads, + &this.machine, retag_cause, new_tag, orig_tag, @@ -854,8 +847,6 @@ trait EvalContextPrivExt<'mir: 'ecx, 'tcx: 'mir, 'ecx>: crate::MiriInterpCxExt<' .as_ref() .expect("we should have Stacked Borrows data") .borrow_mut(); - // FIXME: can't share this with the current_span inside log_creation - let mut current_span = this.machine.current_span(); this.visit_freeze_sensitive(place, size, |mut range, frozen| { // Adjust range. range.start += base_offset; @@ -875,8 +866,7 @@ trait EvalContextPrivExt<'mir: 'ecx, 'tcx: 'mir, 'ecx>: crate::MiriInterpCxExt<' let item = Item::new(new_tag, perm, protected); let mut global = this.machine.stacked_borrows.as_ref().unwrap().borrow_mut(); let dcx = DiagnosticCxBuilder::retag( - &mut current_span, // FIXME avoid this `clone` - &this.machine.threads, + &this.machine, retag_cause, new_tag, orig_tag, @@ -902,11 +892,8 @@ trait EvalContextPrivExt<'mir: 'ecx, 'tcx: 'mir, 'ecx>: crate::MiriInterpCxExt<' let item = Item::new(new_tag, perm, protect); let range = alloc_range(base_offset, size); let mut global = machine.stacked_borrows.as_ref().unwrap().borrow_mut(); - // FIXME: can't share this with the current_span inside log_creation - let current_span = &mut machine.current_span(); let dcx = DiagnosticCxBuilder::retag( - current_span, - &machine.threads, + machine, retag_cause, new_tag, orig_tag,