Skip to content

Commit

Permalink
Include a Span in VClock
Browse files Browse the repository at this point in the history
  • Loading branch information
saethlin committed Nov 3, 2022
1 parent f0aa729 commit 83b0f42
Show file tree
Hide file tree
Showing 33 changed files with 528 additions and 150 deletions.
177 changes: 112 additions & 65 deletions src/concurrency/data_race.rs

Large diffs are not rendered by default.

6 changes: 4 additions & 2 deletions src/concurrency/init_once.rs
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
fn init_once_complete(&mut self, id: InitOnceId) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let current_thread = this.get_active_thread();
let current_span = this.machine.current_span().get();
let init_once = &mut this.machine.threads.sync.init_onces[id];

assert_eq!(
Expand All @@ -135,7 +136,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {

// Each complete happens-before the end of the wait
if let Some(data_race) = &this.machine.data_race {
data_race.validate_lock_release(&mut init_once.data_race, current_thread);
data_race.validate_lock_release(&mut init_once.data_race, current_thread, current_span);
}

// Wake up everyone.
Expand Down Expand Up @@ -164,6 +165,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
fn init_once_fail(&mut self, id: InitOnceId) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let current_thread = this.get_active_thread();
let current_span = this.machine.current_span().get();
let init_once = &mut this.machine.threads.sync.init_onces[id];
assert_eq!(
init_once.status,
Expand All @@ -175,7 +177,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
// FIXME: should this really induce synchronization? If we think of it as a lock, then yes,
// but the docs don't talk about such details.
if let Some(data_race) = &this.machine.data_race {
data_race.validate_lock_release(&mut init_once.data_race, current_thread);
data_race.validate_lock_release(&mut init_once.data_race, current_thread, current_span);
}

// Wake up one waiting thread, so they can go ahead and try to init this.
Expand Down
33 changes: 27 additions & 6 deletions src/concurrency/sync.rs
Original file line number Diff line number Diff line change
Expand Up @@ -345,6 +345,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
/// return `None`.
fn mutex_unlock(&mut self, id: MutexId, expected_owner: ThreadId) -> Option<usize> {
let this = self.eval_context_mut();
let current_span = this.machine.current_span().get();
let mutex = &mut this.machine.threads.sync.mutexes[id];
if let Some(current_owner) = mutex.owner {
// Mutex is locked.
Expand All @@ -361,7 +362,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
// The mutex is completely unlocked. Try transfering ownership
// to another thread.
if let Some(data_race) = &this.machine.data_race {
data_race.validate_lock_release(&mut mutex.data_race, current_owner);
data_race.validate_lock_release(
&mut mutex.data_race,
current_owner,
current_span,
);
}
this.mutex_dequeue_and_lock(id);
}
Expand Down Expand Up @@ -440,6 +445,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
/// Returns `true` if succeeded, `false` if this `reader` did not hold the lock.
fn rwlock_reader_unlock(&mut self, id: RwLockId, reader: ThreadId) -> bool {
let this = self.eval_context_mut();
let current_span = this.machine.current_span().get();
let rwlock = &mut this.machine.threads.sync.rwlocks[id];
match rwlock.readers.entry(reader) {
Entry::Occupied(mut entry) => {
Expand All @@ -456,7 +462,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
Entry::Vacant(_) => return false, // we did not even own this lock
}
if let Some(data_race) = &this.machine.data_race {
data_race.validate_lock_release_shared(&mut rwlock.data_race_reader, reader);
data_race.validate_lock_release_shared(
&mut rwlock.data_race_reader,
reader,
current_span,
);
}

// The thread was a reader. If the lock is not held any more, give it to a writer.
Expand Down Expand Up @@ -497,6 +507,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
#[inline]
fn rwlock_writer_unlock(&mut self, id: RwLockId, expected_writer: ThreadId) -> bool {
let this = self.eval_context_mut();
let current_span = this.machine.current_span().get();
let rwlock = &mut this.machine.threads.sync.rwlocks[id];
if let Some(current_writer) = rwlock.writer {
if current_writer != expected_writer {
Expand All @@ -509,8 +520,16 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
// since this writer happens-before both the union of readers once they are finished
// and the next writer
if let Some(data_race) = &this.machine.data_race {
data_race.validate_lock_release(&mut rwlock.data_race, current_writer);
data_race.validate_lock_release(&mut rwlock.data_race_reader, current_writer);
data_race.validate_lock_release(
&mut rwlock.data_race,
current_writer,
current_span,
);
data_race.validate_lock_release(
&mut rwlock.data_race_reader,
current_writer,
current_span,
);
}
// The thread was a writer.
//
Expand Down Expand Up @@ -581,12 +600,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
fn condvar_signal(&mut self, id: CondvarId) -> Option<(ThreadId, MutexId)> {
let this = self.eval_context_mut();
let current_thread = this.get_active_thread();
let current_span = this.machine.current_span().get();
let condvar = &mut this.machine.threads.sync.condvars[id];
let data_race = &this.machine.data_race;

// Each condvar signal happens-before the end of the condvar wake
if let Some(data_race) = data_race {
data_race.validate_lock_release(&mut condvar.data_race, current_thread);
data_race.validate_lock_release(&mut condvar.data_race, current_thread, current_span);
}
condvar.waiters.pop_front().map(|waiter| {
if let Some(data_race) = data_race {
Expand Down Expand Up @@ -614,12 +634,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
fn futex_wake(&mut self, addr: u64, bitset: u32) -> Option<ThreadId> {
let this = self.eval_context_mut();
let current_thread = this.get_active_thread();
let current_span = this.machine.current_span().get();
let futex = &mut this.machine.threads.sync.futexes.get_mut(&addr)?;
let data_race = &this.machine.data_race;

// Each futex-wake happens-before the end of the futex wait
if let Some(data_race) = data_race {
data_race.validate_lock_release(&mut futex.data_race, current_thread);
data_race.validate_lock_release(&mut futex.data_race, current_thread, current_span);
}

// Wake up the first thread in the queue that matches any of the bits in the bitset.
Expand Down
12 changes: 9 additions & 3 deletions src/concurrency/thread.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ use rustc_hir::def_id::DefId;
use rustc_index::vec::{Idx, IndexVec};
use rustc_middle::mir::Mutability;
use rustc_middle::ty::layout::TyAndLayout;
use rustc_span::Span;
use rustc_target::spec::abi::Abi;

use crate::concurrency::data_race;
Expand Down Expand Up @@ -599,6 +600,7 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> {
fn thread_terminated(
&mut self,
mut data_race: Option<&mut data_race::GlobalState>,
current_span: Span,
) -> Vec<Pointer<Provenance>> {
let mut free_tls_statics = Vec::new();
{
Expand All @@ -616,7 +618,7 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> {
}
// Set the thread into a terminated state in the data-race detector.
if let Some(ref mut data_race) = data_race {
data_race.thread_terminated(self);
data_race.thread_terminated(self, current_span);
}
// Check if we need to unblock any threads.
let mut joined_threads = vec![]; // store which threads joined, we'll need it
Expand Down Expand Up @@ -750,8 +752,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
fn create_thread(&mut self) -> ThreadId {
let this = self.eval_context_mut();
let id = this.machine.threads.create_thread();
let current_span = this.machine.current_span().get();
if let Some(data_race) = &mut this.machine.data_race {
data_race.thread_created(&this.machine.threads, id);
data_race.thread_created(&this.machine.threads, id, current_span);
}
id
}
Expand Down Expand Up @@ -1011,7 +1014,10 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
#[inline]
fn thread_terminated(&mut self) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
for ptr in this.machine.threads.thread_terminated(this.machine.data_race.as_mut()) {
let current_span = this.machine.current_span().get();
for ptr in
this.machine.threads.thread_terminated(this.machine.data_race.as_mut(), current_span)
{
this.deallocate_ptr(ptr.into(), None, MiriMemoryKind::Tls.into())?;
}
Ok(())
Expand Down
90 changes: 77 additions & 13 deletions src/concurrency/vector_clock.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,11 @@
use rustc_index::vec::Idx;
use rustc_span::{Span, SpanData, DUMMY_SP};
use smallvec::SmallVec;
use std::{cmp::Ordering, fmt::Debug, ops::Index};
use std::{
cmp::Ordering,
fmt::Debug,
ops::{Index, IndexMut},
};

/// A vector clock index, this is associated with a thread id
/// but in some cases one vector index may be shared with
Expand Down Expand Up @@ -42,7 +47,37 @@ const SMALL_VECTOR: usize = 4;

/// The type of the time-stamps recorded in the data-race detector
/// set to a type of unsigned integer
pub type VTimestamp = u32;
#[derive(Clone, Copy, Debug, Eq)]
pub struct VTimestamp {
time: u32,
pub span: Span,
}

impl VTimestamp {
pub const NONE: VTimestamp = VTimestamp { time: 0, span: DUMMY_SP };

pub fn span_data(&self) -> SpanData {
self.span.data()
}
}

impl PartialEq for VTimestamp {
fn eq(&self, other: &Self) -> bool {
self.time == other.time
}
}

impl PartialOrd for VTimestamp {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}

impl Ord for VTimestamp {
fn cmp(&self, other: &Self) -> Ordering {
self.time.cmp(&other.time)
}
}

/// A vector clock for detecting data-races, this is conceptually
/// a map from a vector index (and thus a thread id) to a timestamp.
Expand All @@ -62,7 +97,7 @@ impl VClock {
/// for a value at the given index
pub fn new_with_index(index: VectorIdx, timestamp: VTimestamp) -> VClock {
let len = index.index() + 1;
let mut vec = smallvec::smallvec![0; len];
let mut vec = smallvec::smallvec![VTimestamp::NONE; len];
vec[index.index()] = timestamp;
VClock(vec)
}
Expand All @@ -79,7 +114,7 @@ impl VClock {
#[inline]
fn get_mut_with_min_len(&mut self, min_len: usize) -> &mut [VTimestamp] {
if self.0.len() < min_len {
self.0.resize(min_len, 0);
self.0.resize(min_len, VTimestamp::NONE);
}
assert!(self.0.len() >= min_len);
self.0.as_mut_slice()
Expand All @@ -88,11 +123,14 @@ impl VClock {
/// Increment the vector clock at a known index
/// this will panic if the vector index overflows
#[inline]
pub fn increment_index(&mut self, idx: VectorIdx) {
pub fn increment_index(&mut self, idx: VectorIdx, current_span: Span) {
let idx = idx.index();
let mut_slice = self.get_mut_with_min_len(idx + 1);
let idx_ref = &mut mut_slice[idx];
*idx_ref = idx_ref.checked_add(1).expect("Vector clock overflow")
idx_ref.time = idx_ref.time.checked_add(1).expect("Vector clock overflow");
if current_span != DUMMY_SP {
idx_ref.span = current_span;
}
}

// Join the two vector-clocks together, this
Expand All @@ -102,14 +140,31 @@ impl VClock {
let rhs_slice = other.as_slice();
let lhs_slice = self.get_mut_with_min_len(rhs_slice.len());
for (l, &r) in lhs_slice.iter_mut().zip(rhs_slice.iter()) {
let l_span = l.span;
let r_span = r.span;
*l = r.max(*l);
if l.span == DUMMY_SP {
if r_span != DUMMY_SP {
l.span = r_span;
}
if l_span != DUMMY_SP {
l.span = l_span;
}
}
}
}

/// Set the element at the current index of the vector
pub fn set_at_index(&mut self, other: &Self, idx: VectorIdx) {
let mut_slice = self.get_mut_with_min_len(idx.index() + 1);

let prev_span = mut_slice[idx.index()].span;

mut_slice[idx.index()] = other[idx];

if other[idx].span == DUMMY_SP {
mut_slice[idx.index()].span = prev_span;
}
}

/// Set the vector to the all-zero vector
Expand Down Expand Up @@ -313,7 +368,14 @@ impl Index<VectorIdx> for VClock {

#[inline]
fn index(&self, index: VectorIdx) -> &VTimestamp {
self.as_slice().get(index.to_u32() as usize).unwrap_or(&0)
self.as_slice().get(index.to_u32() as usize).unwrap_or(&VTimestamp::NONE)
}
}

impl IndexMut<VectorIdx> for VClock {
#[inline]
fn index_mut(&mut self, index: VectorIdx) -> &mut VTimestamp {
self.0.as_mut_slice().get_mut(index.to_u32() as usize).unwrap()
}
}

Expand All @@ -323,24 +385,25 @@ impl Index<VectorIdx> for VClock {
#[cfg(test)]
mod tests {

use super::{VClock, VTimestamp, VectorIdx};
use std::cmp::Ordering;
use super::{VClock, VectorIdx};
use rustc_span::DUMMY_SP;

#[test]
fn test_equal() {
let mut c1 = VClock::default();
let mut c2 = VClock::default();
assert_eq!(c1, c2);
c1.increment_index(VectorIdx(5));
c1.increment_index(VectorIdx(5), DUMMY_SP);
assert_ne!(c1, c2);
c2.increment_index(VectorIdx(53));
c2.increment_index(VectorIdx(53), DUMMY_SP);
assert_ne!(c1, c2);
c1.increment_index(VectorIdx(53));
c1.increment_index(VectorIdx(53), DUMMY_SP);
assert_ne!(c1, c2);
c2.increment_index(VectorIdx(5));
c2.increment_index(VectorIdx(5), DUMMY_SP);
assert_eq!(c1, c2);
}

/*
#[test]
fn test_partial_order() {
// Small test
Expand Down Expand Up @@ -467,4 +530,5 @@ mod tests {
r
);
}
*/
}
2 changes: 1 addition & 1 deletion src/concurrency/weak_memory.rs
Original file line number Diff line number Diff line change
Expand Up @@ -258,7 +258,7 @@ impl<'mir, 'tcx: 'mir> StoreBuffer {
// The thread index and timestamp of the initialisation write
// are never meaningfully used, so it's fine to leave them as 0
store_index: VectorIdx::from(0),
timestamp: 0,
timestamp: VTimestamp::NONE,
val: init,
is_seqcst: false,
load_info: RefCell::new(LoadInfo::default()),
Expand Down
Loading

0 comments on commit 83b0f42

Please sign in to comment.