From 82f17ab91714bcc8bd2a5591e90db690d449d38c Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Mon, 16 Mar 2020 16:48:44 -0700 Subject: [PATCH 01/77] Implement basic support for concurrency (Linux only). --- src/eval.rs | 3 +- src/lib.rs | 3 + src/machine.rs | 67 ++++-- src/shims/foreign_items/posix.rs | 101 ++++++++- src/shims/foreign_items/windows.rs | 6 +- src/shims/tls.rs | 98 +++++---- src/threads.rs | 303 +++++++++++++++++++++++++++ tests/compile-fail/thread-spawn.rs | 7 - tests/run-pass/concurrency/simple.rs | 59 ++++++ 9 files changed, 571 insertions(+), 76 deletions(-) create mode 100644 src/threads.rs delete mode 100644 tests/compile-fail/thread-spawn.rs create mode 100644 tests/run-pass/concurrency/simple.rs diff --git a/src/eval.rs b/src/eval.rs index 61a5b71f0b..b0a59c64d1 100644 --- a/src/eval.rs +++ b/src/eval.rs @@ -205,7 +205,8 @@ pub fn eval_main<'tcx>(tcx: TyCtxt<'tcx>, main_id: DefId, config: MiriConfig) -> // Perform the main execution. let res: InterpResult<'_, i64> = (|| { // Main loop. - while ecx.step()? { + while ecx.schedule()? { + assert!(ecx.step()?); ecx.process_diagnostics(); } // Read the return code pointer *before* we run TLS destructors, to assert diff --git a/src/lib.rs b/src/lib.rs index 2f381b4a34..c042526be6 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -12,6 +12,7 @@ extern crate rustc_ast; #[macro_use] extern crate rustc_middle; extern crate rustc_data_structures; extern crate rustc_hir; +extern crate rustc_index; extern crate rustc_mir; extern crate rustc_span; extern crate rustc_target; @@ -26,6 +27,7 @@ mod operator; mod range_map; mod shims; mod stacked_borrows; +mod threads; // Make all those symbols available in the same place as our own. pub use rustc_mir::interpret::*; @@ -60,6 +62,7 @@ pub use crate::range_map::RangeMap; pub use crate::stacked_borrows::{ EvalContextExt as StackedBorEvalContextExt, Item, Permission, PtrId, Stack, Stacks, Tag, }; +pub use crate::threads::EvalContextExt as ThreadsEvalContextExt; /// Insert rustc arguments at the beginning of the argument list that Miri wants to be /// set per default, for maximal validation power. diff --git a/src/machine.rs b/src/machine.rs index 2ab5f10af6..9d1fa9b78c 100644 --- a/src/machine.rs +++ b/src/machine.rs @@ -26,6 +26,8 @@ use rustc_target::abi::{LayoutOf, Size}; use crate::*; +pub use crate::threads::{ThreadId, ThreadSet, ThreadLocalStorage}; + // Some global facts about the emulated machine. pub const PAGE_SIZE: u64 = 4 * 1024; // FIXME: adjust to target architecture pub const STACK_ADDR: u64 = 32 * PAGE_SIZE; // not really about the "stack", but where we start assigning integer addresses to allocations @@ -107,6 +109,7 @@ pub struct AllocExtra { pub struct MemoryExtra { pub stacked_borrows: Option, pub intptrcast: intptrcast::MemoryExtra, + pub tls: ThreadLocalStorage, /// Mapping extern static names to their canonical allocation. extern_statics: FxHashMap, @@ -143,6 +146,7 @@ impl MemoryExtra { rng: RefCell::new(rng), tracked_alloc_id, check_alignment, + tls: Default::default(), } } @@ -251,8 +255,8 @@ pub struct Evaluator<'mir, 'tcx> { /// The "time anchor" for this machine's monotone clock (for `Instant` simulation). pub(crate) time_anchor: Instant, - /// The call stack. - pub(crate) stack: Vec>>, + /// The set of threads. + pub(crate) threads: ThreadSet<'mir, 'tcx>, /// Precomputed `TyLayout`s for primitive data types that are commonly used inside Miri. pub(crate) layouts: PrimitiveLayouts<'tcx>, @@ -282,7 +286,7 @@ impl<'mir, 'tcx> Evaluator<'mir, 'tcx> { panic_payload: None, time_anchor: Instant::now(), layouts, - stack: Vec::default(), + threads: Default::default(), } } } @@ -326,6 +330,19 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> { memory_extra.check_alignment } + #[inline(always)] + fn stack<'a>( + ecx: &'a InterpCx<'mir, 'tcx, Self> + ) -> &'a [Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>] { + ecx.active_thread_stack() + } + + fn stack_mut<'a>( + ecx: &'a mut InterpCx<'mir, 'tcx, Self> + ) -> &'a mut Vec> { + ecx.active_thread_stack_mut() + } + #[inline(always)] fn enforce_validity(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool { ecx.machine.validate @@ -418,29 +435,39 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> { fn canonical_alloc_id(mem: &Memory<'mir, 'tcx, Self>, id: AllocId) -> AllocId { let tcx = mem.tcx; - // Figure out if this is an extern static, and if yes, which one. - let def_id = match tcx.alloc_map.lock().get(id) { - Some(GlobalAlloc::Static(def_id)) if tcx.is_foreign_item(def_id) => def_id, + let alloc = tcx.alloc_map.lock().get(id); + match alloc { + Some(GlobalAlloc::Static(def_id)) if tcx.is_foreign_item(def_id) => { + // Figure out if this is an extern static, and if yes, which one. + let attrs = tcx.get_attrs(def_id); + let link_name = match attr::first_attr_value_str_by_name(&attrs, sym::link_name) { + Some(name) => name, + None => tcx.item_name(def_id), + }; + // Check if we know this one. + if let Some(canonical_id) = mem.extra.extern_statics.get(&link_name) { + trace!("canonical_alloc_id: {:?} ({}) -> {:?}", id, link_name, canonical_id); + *canonical_id + } else { + // Return original id; `Memory::get_static_alloc` will throw an error. + id + } + }, + Some(GlobalAlloc::Static(def_id)) if tcx.has_attr(def_id, sym::thread_local) => { + // We have a thread local, so we need to get a unique allocation id for it. + mem.extra.tls.get_or_register_allocation(*tcx, id) + }, _ => { // No need to canonicalize anything. - return id; + id } - }; - let attrs = tcx.get_attrs(def_id); - let link_name = match attr::first_attr_value_str_by_name(&attrs, sym::link_name) { - Some(name) => name, - None => tcx.item_name(def_id), - }; - // Check if we know this one. - if let Some(canonical_id) = mem.extra.extern_statics.get(&link_name) { - trace!("canonical_alloc_id: {:?} ({}) -> {:?}", id, link_name, canonical_id); - *canonical_id - } else { - // Return original id; `Memory::get_static_alloc` will throw an error. - id } } + fn resolve_thread_local_allocation_id(extra: &Self::MemoryExtra, id: AllocId) -> AllocId { + extra.tls.resolve_allocation(id) + } + fn init_allocation_extra<'b>( memory_extra: &MemoryExtra, id: AllocId, diff --git a/src/shims/foreign_items/posix.rs b/src/shims/foreign_items/posix.rs index 70e16a65b5..47b661061d 100644 --- a/src/shims/foreign_items/posix.rs +++ b/src/shims/foreign_items/posix.rs @@ -6,6 +6,7 @@ use std::convert::TryFrom; use log::trace; use crate::*; +use rustc_index::vec::Idx; use rustc_middle::mir; use rustc_target::abi::{Align, LayoutOf, Size}; @@ -221,13 +222,15 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx } "pthread_getspecific" => { let key = this.force_bits(this.read_scalar(args[0])?.not_undef()?, args[0].layout.size)?; - let ptr = this.machine.tls.load_tls(key, this)?; + let active_thread = this.get_active_thread()?; + let ptr = this.machine.tls.load_tls(key, active_thread, this)?; this.write_scalar(ptr, dest)?; } "pthread_setspecific" => { let key = this.force_bits(this.read_scalar(args[0])?.not_undef()?, args[0].layout.size)?; + let active_thread = this.get_active_thread()?; let new_ptr = this.read_scalar(args[1])?.not_undef()?; - this.machine.tls.store_tls(key, this.test_null(new_ptr)?)?; + this.machine.tls.store_tls(key, active_thread, this.test_null(new_ptr)?)?; // Return success (`0`). this.write_null(dest)?; @@ -291,11 +294,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx this.write_scalar(Scalar::from_i32(result), dest)?; } - // Better error for attempts to create a thread - "pthread_create" => { - throw_unsup_format!("Miri does not support threading"); - } - // Miscellaneous "isatty" => { let _fd = this.read_scalar(args[0])?.to_i32()?; @@ -316,7 +314,94 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx this.write_null(dest)?; } - // Incomplete shims that we "stub out" just to get pre-main initialization code to work. + // Threading + "pthread_create" => { + assert_eq!(args.len(), 4); + let func = args[2]; + let fn_ptr = this.read_scalar(func)?.not_undef()?; + let fn_val = this.memory.get_fn(fn_ptr)?; + let instance = match fn_val { + rustc_mir::interpret::FnVal::Instance(instance) => instance, + _ => unreachable!(), + }; + let thread_info_place = this.deref_operand(args[0])?; + let thread_info_type = args[0].layout.ty + .builtin_deref(true) + .ok_or_else(|| err_ub_format!( + "wrong signature used for `pthread_create`: first argument must be a raw pointer." + ))? + .ty; + let thread_info_layout = this.layout_of(thread_info_type)?; + let func_arg = match *args[3] { + rustc_mir::interpret::Operand::Immediate(immediate) => immediate, + _ => unreachable!(), + }; + let func_args = [func_arg]; + let ret_place = + this.allocate(this.layout_of(this.tcx.types.usize)?, MiriMemoryKind::Machine.into()); + let new_thread_id = this.create_thread()?; + let old_thread_id = this.set_active_thread(new_thread_id)?; + this.call_function( + instance, + &func_args[..], + Some(ret_place.into()), + StackPopCleanup::None { cleanup: true }, + )?; + this.set_active_thread(old_thread_id)?; + this.write_scalar( + Scalar::from_uint(new_thread_id.index() as u128, thread_info_layout.size), + thread_info_place.into(), + )?; + + // Return success (`0`). + this.write_null(dest)?; + } + "pthread_join" => { + assert_eq!(args.len(), 2); + assert!( + this.is_null(this.read_scalar(args[1])?.not_undef()?)?, + "Miri supports pthread_join only with retval==NULL" + ); + let thread = this.read_scalar(args[0])?.not_undef()?.to_machine_usize(this)?; + this.join_thread(thread.into())?; + + // Return success (`0`). + this.write_null(dest)?; + } + "pthread_detach" => { + let thread = this.read_scalar(args[0])?.not_undef()?.to_machine_usize(this)?; + this.detach_thread(thread.into())?; + + // Return success (`0`). + this.write_null(dest)?; + } + + "pthread_attr_getguardsize" => { + assert_eq!(args.len(), 2); + + let guard_size = this.deref_operand(args[1])?; + let guard_size_type = args[1].layout.ty + .builtin_deref(true) + .ok_or_else(|| err_ub_format!( + "wrong signature used for `pthread_attr_getguardsize`: first argument must be a raw pointer." + ))? + .ty; + let guard_size_layout = this.layout_of(guard_size_type)?; + this.write_scalar(Scalar::from_uint(crate::PAGE_SIZE, guard_size_layout.size), guard_size.into())?; + + // Return success (`0`). + this.write_null(dest)?; + } + + "prctl" => { + let option = this.read_scalar(args[0])?.not_undef()?.to_i32()?; + assert_eq!(option, 0xf, "Miri supports only PR_SET_NAME"); + + // Return success (`0`). + this.write_null(dest)?; + } + + // Incomplete shims that we "stub out" just to get pre-main initialziation code to work. // These shims are enabled only when the caller is in the standard library. | "pthread_attr_init" | "pthread_attr_destroy" diff --git a/src/shims/foreign_items/windows.rs b/src/shims/foreign_items/windows.rs index 0950a02bf9..a58444b21b 100644 --- a/src/shims/foreign_items/windows.rs +++ b/src/shims/foreign_items/windows.rs @@ -144,13 +144,15 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx } "TlsGetValue" => { let key = u128::from(this.read_scalar(args[0])?.to_u32()?); - let ptr = this.machine.tls.load_tls(key, this)?; + let active_thread = this.get_active_thread()?; + let ptr = this.machine.tls.load_tls(key, active_thread, this)?; this.write_scalar(ptr, dest)?; } "TlsSetValue" => { let key = u128::from(this.read_scalar(args[0])?.to_u32()?); + let active_thread = this.get_active_thread()?; let new_ptr = this.read_scalar(args[1])?.not_undef()?; - this.machine.tls.store_tls(key, this.test_null(new_ptr)?)?; + this.machine.tls.store_tls(key, active_thread, this.test_null(new_ptr)?)?; // Return success (`1`). this.write_scalar(Scalar::from_i32(1), dest)?; diff --git a/src/shims/tls.rs b/src/shims/tls.rs index 7b84468402..d16acb7500 100644 --- a/src/shims/tls.rs +++ b/src/shims/tls.rs @@ -1,22 +1,24 @@ //! Implement thread-local storage. use std::collections::BTreeMap; +use std::collections::btree_map::Entry; use log::trace; use rustc_middle::ty; use rustc_target::abi::{Size, HasDataLayout}; -use crate::{HelpersEvalContextExt, InterpResult, MPlaceTy, Scalar, StackPopCleanup, Tag}; +use crate::{HelpersEvalContextExt, ThreadsEvalContextExt, InterpResult, MPlaceTy, Scalar, StackPopCleanup, Tag}; +use crate::machine::ThreadId; pub type TlsKey = u128; -#[derive(Copy, Clone, Debug)] +#[derive(Clone, Debug)] pub struct TlsEntry<'tcx> { /// The data for this key. None is used to represent NULL. /// (We normalize this early to avoid having to do a NULL-ptr-test each time we access the data.) /// Will eventually become a map from thread IDs to `Scalar`s, if we ever support more than one thread. - data: Option>, + data: BTreeMap>, dtor: Option>, } @@ -52,7 +54,7 @@ impl<'tcx> TlsData<'tcx> { pub fn create_tls_key(&mut self, dtor: Option>, max_size: Size) -> InterpResult<'tcx, TlsKey> { let new_key = self.next_key; self.next_key += 1; - self.keys.insert(new_key, TlsEntry { data: None, dtor }).unwrap_none(); + self.keys.insert(new_key, TlsEntry { data: Default::default(), dtor }).unwrap_none(); trace!("New TLS key allocated: {} with dtor {:?}", new_key, dtor); if max_size.bits() < 128 && new_key >= (1u128 << max_size.bits() as u128) { @@ -74,22 +76,34 @@ impl<'tcx> TlsData<'tcx> { pub fn load_tls( &self, key: TlsKey, + thread_id: ThreadId, cx: &impl HasDataLayout, ) -> InterpResult<'tcx, Scalar> { match self.keys.get(&key) { - Some(&TlsEntry { data, .. }) => { - trace!("TLS key {} loaded: {:?}", key, data); - Ok(data.unwrap_or_else(|| Scalar::null_ptr(cx).into())) + Some(TlsEntry { data, .. }) => { + let value = data.get(&thread_id).cloned(); + trace!("TLS key {} for thread {:?} loaded: {:?}", key, thread_id, value); + Ok(value.unwrap_or_else(|| Scalar::null_ptr(cx).into())) } None => throw_ub_format!("loading from a non-existing TLS key: {}", key), } } - pub fn store_tls(&mut self, key: TlsKey, new_data: Option>) -> InterpResult<'tcx> { + pub fn store_tls( + &mut self, + key: TlsKey, thread_id: ThreadId, new_data: Option>) -> InterpResult<'tcx> { match self.keys.get_mut(&key) { Some(TlsEntry { data, .. }) => { - trace!("TLS key {} stored: {:?}", key, new_data); - *data = new_data; + match new_data { + Some(ptr) => { + trace!("TLS key {} for thread {:?} stored: {:?}", key, thread_id, ptr); + data.insert(thread_id, ptr); + } + None => { + trace!("TLS key {} for thread {:?} removed", key, thread_id); + data.remove(&thread_id); + } + } Ok(()) } None => throw_ub_format!("storing to a non-existing TLS key: {}", key), @@ -131,7 +145,8 @@ impl<'tcx> TlsData<'tcx> { fn fetch_tls_dtor( &mut self, key: Option, - ) -> Option<(ty::Instance<'tcx>, Scalar, TlsKey)> { + thread_id: ThreadId, + ) -> Option<(ty::Instance<'tcx>, ThreadId, Scalar, TlsKey)> { use std::collections::Bound::*; let thread_local = &mut self.keys; @@ -142,12 +157,15 @@ impl<'tcx> TlsData<'tcx> { for (&key, TlsEntry { data, dtor }) in thread_local.range_mut((start, Unbounded)) { - if let Some(data_scalar) = *data { - if let Some(dtor) = dtor { - let ret = Some((*dtor, data_scalar, key)); - *data = None; - return ret; + match data.entry(thread_id) { + Entry::Occupied(entry) => { + let (thread_id, data_scalar) = entry.remove_entry(); + if let Some(dtor) = dtor { + let ret = Some((dtor, thread_id, data_scalar, key)); + return ret; + } } + Entry::Vacant(_) => {} } } None @@ -156,6 +174,7 @@ impl<'tcx> TlsData<'tcx> { impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {} pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> { + /// Run TLS destructors for the currently active thread. fn run_tls_dtors(&mut self) -> InterpResult<'tcx> { let this = self.eval_context_mut(); assert!(!this.machine.tls.dtors_running, "running TLS dtors twice"); @@ -204,28 +223,31 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx } // Now run the "keyed" destructors. - let mut dtor = this.machine.tls.fetch_tls_dtor(None); - while let Some((instance, ptr, key)) = dtor { - trace!("Running TLS dtor {:?} on {:?}", instance, ptr); - assert!(!this.is_null(ptr).unwrap(), "data can't be NULL when dtor is called!"); - - let ret_place = MPlaceTy::dangling(this.machine.layouts.unit, this).into(); - this.call_function( - instance, - &[ptr.into()], - Some(ret_place), - StackPopCleanup::None { cleanup: true }, - )?; - - // step until out of stackframes - this.run()?; - - // Fetch next dtor after `key`. - dtor = match this.machine.tls.fetch_tls_dtor(Some(key)) { - dtor @ Some(_) => dtor, - // We ran each dtor once, start over from the beginning. - None => this.machine.tls.fetch_tls_dtor(None), - }; + for thread_id in this.get_all_thread_ids() { + this.set_active_thread(thread_id)?; + let mut dtor = this.machine.tls.fetch_tls_dtor(None, thread_id); + while let Some((instance, thread_id, ptr, key)) = dtor { + trace!("Running TLS dtor {:?} on {:?} at {:?}", instance, ptr, thread_id); + assert!(!this.is_null(ptr).unwrap(), "Data can't be NULL when dtor is called!"); + + let ret_place = MPlaceTy::dangling(this.layout_of(this.tcx.mk_unit())?, this).into(); + this.call_function( + instance, + &[ptr.into()], + Some(ret_place), + StackPopCleanup::None { cleanup: true }, + )?; + + // step until out of stackframes + this.run()?; + + // Fetch next dtor after `key`. + dtor = match this.machine.tls.fetch_tls_dtor(Some(key), thread_id) { + dtor @ Some(_) => dtor, + // We ran each dtor once, start over from the beginning. + None => this.machine.tls.fetch_tls_dtor(None, thread_id), + }; + } } Ok(()) } diff --git a/src/threads.rs b/src/threads.rs new file mode 100644 index 0000000000..14ee58c2ee --- /dev/null +++ b/src/threads.rs @@ -0,0 +1,303 @@ +//! Implements threads. + +use std::cell::RefCell; +use std::collections::hash_map::Entry; + +use log::trace; + +use rustc_middle::ty; +use rustc_data_structures::fx::FxHashMap; +use rustc_index::vec::{Idx, IndexVec}; + +use crate::*; + +/// A thread identifier. +#[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq, Hash)] +pub struct ThreadId(usize); + +impl Idx for ThreadId { + fn new(idx: usize) -> Self { + ThreadId(idx) + } + fn index(self) -> usize { + self.0 + } +} + +impl From for ThreadId { + fn from(id: u64) -> Self { + Self(id as usize) + } +} + +/// The state of a thread. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum ThreadState { + /// The thread is enabled and can be executed. + Enabled, + /// The thread tried to join the specified thread and is blocked until that + /// thread terminates. + Blocked(ThreadId), + /// The thread has terminated its execution (we do not delete terminated + /// threads.) + Terminated, +} + +/// A thread. +pub struct Thread<'mir, 'tcx> { + state: ThreadState, + /// The virtual call stack. + stack: Vec>>, + /// Is the thread detached? + /// + /// A thread is detached if its join handle was destroyed and no other + /// thread can join it. + detached: bool, +} + +impl<'mir, 'tcx> Thread<'mir, 'tcx> { + /// Check if the thread terminated. If yes, change the state to terminated + /// and return `true`. + fn check_terminated(&mut self) -> bool { + if self.state == ThreadState::Enabled { + if self.stack.is_empty() { + self.state = ThreadState::Terminated; + return true; + } + } + false + } +} + +impl<'mir, 'tcx> std::fmt::Debug for Thread<'mir, 'tcx> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self.state) + } +} + +impl<'mir, 'tcx> Default for Thread<'mir, 'tcx> { + fn default() -> Self { + Self { state: ThreadState::Enabled, stack: Vec::new(), detached: false } + } +} + +/// A set of threads. +#[derive(Debug)] +pub struct ThreadSet<'mir, 'tcx> { + /// Identifier of the currently active thread. + active_thread: ThreadId, + /// Threads used in the program. + /// + /// Note that this vector also contains terminated threads. + threads: IndexVec>, + + /// List of threads that just terminated. TODO: Cleanup. + terminated_threads: Vec, +} + +impl<'mir, 'tcx> Default for ThreadSet<'mir, 'tcx> { + fn default() -> Self { + let mut threads = IndexVec::new(); + threads.push(Default::default()); + Self { + active_thread: ThreadId::new(0), + threads: threads, + terminated_threads: Default::default(), + } + } +} + +impl<'mir, 'tcx: 'mir> ThreadSet<'mir, 'tcx> { + /// Borrow the stack of the active thread. + fn active_thread_stack(&self) -> &[Frame<'mir, 'tcx, Tag, FrameData<'tcx>>] { + &self.threads[self.active_thread].stack + } + /// Mutably borrow the stack of the active thread. + fn active_thread_stack_mut(&mut self) -> &mut Vec>> { + &mut self.threads[self.active_thread].stack + } + /// Create a new thread and returns its id. + fn create_thread(&mut self) -> ThreadId { + let new_thread_id = ThreadId::new(self.threads.len()); + self.threads.push(Default::default()); + new_thread_id + } + /// Set an active thread and return the id of the thread that was active before. + fn set_active_thread(&mut self, id: ThreadId) -> ThreadId { + let active_thread_id = self.active_thread; + self.active_thread = id; + assert!(self.active_thread.index() < self.threads.len()); + active_thread_id + } + /// Get the id of the currently active thread. + fn get_active_thread(&self) -> ThreadId { + self.active_thread + } + /// Mark the thread as detached, which means that no other thread will try + /// to join it and the thread is responsible for cleaning up. + fn detach_thread(&mut self, id: ThreadId) { + self.threads[id].detached = true; + } + /// Mark that the active thread tries to join the thread with `joined_thread_id`. + fn join_thread(&mut self, joined_thread_id: ThreadId) { + assert!(!self.threads[joined_thread_id].detached, "Bug: trying to join a detached thread."); + assert_ne!(joined_thread_id, self.active_thread, "Bug: trying to join itself"); + assert!( + self.threads + .iter() + .all(|thread| thread.state != ThreadState::Blocked(joined_thread_id)), + "Bug: multiple threads try to join the same thread." + ); + if self.threads[joined_thread_id].state != ThreadState::Terminated { + // The joined thread is still running, we need to wait for it. + self.threads[self.active_thread].state = ThreadState::Blocked(joined_thread_id); + trace!( + "{:?} blocked on {:?} when trying to join", + self.active_thread, + joined_thread_id + ); + } + } + /// Get ids of all threads ever allocated. + fn get_all_thread_ids(&mut self) -> Vec { + (0..self.threads.len()).map(ThreadId::new).collect() + } + /// Decide which thread to run next. + /// + /// Returns `false` if all threads terminated. + fn schedule(&mut self) -> InterpResult<'tcx, bool> { + if self.threads[self.active_thread].check_terminated() { + // Check if we need to unblock any threads. + for (i, thread) in self.threads.iter_enumerated_mut() { + if thread.state == ThreadState::Blocked(self.active_thread) { + trace!("unblocking {:?} because {:?} terminated", i, self.active_thread); + thread.state = ThreadState::Enabled; + } + } + } + if self.threads[self.active_thread].state == ThreadState::Enabled { + return Ok(true); + } + if let Some(enabled_thread) = + self.threads.iter().position(|thread| thread.state == ThreadState::Enabled) + { + self.active_thread = ThreadId::new(enabled_thread); + return Ok(true); + } + if self.threads.iter().all(|thread| thread.state == ThreadState::Terminated) { + Ok(false) + } else { + throw_machine_stop!(TerminationInfo::Abort(Some(format!("execution deadlocked")))) + } + } +} + +/// In Rust, a thread local variable is just a specially marked static. To +/// ensure a property that each memory allocation has a globally unique +/// allocation identifier, we create a fresh allocation id for each thread. This +/// data structure keeps the track of the created allocation identifiers and +/// their relation to the original static allocations. +#[derive(Clone, Debug, Default)] +pub struct ThreadLocalStorage { + /// A map from a thread local allocation identifier to the static from which + /// it was created. + thread_local_origin: RefCell>, + /// A map from a thread local static and thread id to the unique thread + /// local allocation. + thread_local_allocations: RefCell>, + /// The currently active thread. + active_thread: Option, +} + +impl ThreadLocalStorage { + /// For static allocation identifier `original_id` get a thread local + /// allocation identifier. If it is not allocated yet, allocate. + pub fn get_or_register_allocation(&self, tcx: ty::TyCtxt<'_>, original_id: AllocId) -> AllocId { + match self + .thread_local_allocations + .borrow_mut() + .entry((original_id, self.active_thread.unwrap())) + { + Entry::Occupied(entry) => *entry.get(), + Entry::Vacant(entry) => { + let fresh_id = tcx.alloc_map.lock().reserve(); + entry.insert(fresh_id); + self.thread_local_origin.borrow_mut().insert(fresh_id, original_id); + trace!( + "get_or_register_allocation(original_id={:?}) -> {:?}", + original_id, + fresh_id + ); + fresh_id + } + } + } + /// For thread local allocation identifier `alloc_id`, retrieve the original + /// static allocation identifier from which it was created. + pub fn resolve_allocation(&self, alloc_id: AllocId) -> AllocId { + trace!("resolve_allocation(alloc_id: {:?})", alloc_id); + if let Some(original_id) = self.thread_local_origin.borrow().get(&alloc_id) { + trace!("resolve_allocation(alloc_id: {:?}) -> {:?}", alloc_id, original_id); + *original_id + } else { + alloc_id + } + } + /// Set which thread is currently active. + fn set_active_thread(&mut self, active_thread: ThreadId) { + self.active_thread = Some(active_thread); + } +} + +impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {} +pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> { + fn create_thread(&mut self) -> InterpResult<'tcx, ThreadId> { + let this = self.eval_context_mut(); + Ok(this.machine.threads.create_thread()) + } + fn detach_thread(&mut self, thread_id: ThreadId) -> InterpResult<'tcx> { + let this = self.eval_context_mut(); + this.machine.threads.detach_thread(thread_id); + Ok(()) + } + fn join_thread(&mut self, joined_thread_id: ThreadId) -> InterpResult<'tcx> { + let this = self.eval_context_mut(); + this.machine.threads.join_thread(joined_thread_id); + Ok(()) + } + fn set_active_thread(&mut self, thread_id: ThreadId) -> InterpResult<'tcx, ThreadId> { + let this = self.eval_context_mut(); + this.memory.extra.tls.set_active_thread(thread_id); + Ok(this.machine.threads.set_active_thread(thread_id)) + } + fn get_active_thread(&self) -> InterpResult<'tcx, ThreadId> { + let this = self.eval_context_ref(); + Ok(this.machine.threads.get_active_thread()) + } + fn active_thread_stack(&self) -> &[Frame<'mir, 'tcx, Tag, FrameData<'tcx>>] { + let this = self.eval_context_ref(); + this.machine.threads.active_thread_stack() + } + fn active_thread_stack_mut(&mut self) -> &mut Vec>> { + let this = self.eval_context_mut(); + this.machine.threads.active_thread_stack_mut() + } + fn get_all_thread_ids(&mut self) -> Vec { + let this = self.eval_context_mut(); + this.machine.threads.get_all_thread_ids() + } + /// Decide which thread to run next. + /// + /// Returns `false` if all threads terminated. + fn schedule(&mut self) -> InterpResult<'tcx, bool> { + let this = self.eval_context_mut(); + // Find the next thread to run. + if this.machine.threads.schedule()? { + let active_thread = this.machine.threads.get_active_thread(); + this.memory.extra.tls.set_active_thread(active_thread); + Ok(true) + } else { + Ok(false) + } + } +} diff --git a/tests/compile-fail/thread-spawn.rs b/tests/compile-fail/thread-spawn.rs deleted file mode 100644 index 450dea99f5..0000000000 --- a/tests/compile-fail/thread-spawn.rs +++ /dev/null @@ -1,7 +0,0 @@ -use std::thread; - -// error-pattern: Miri does not support threading - -fn main() { - thread::spawn(|| {}); -} diff --git a/tests/run-pass/concurrency/simple.rs b/tests/run-pass/concurrency/simple.rs new file mode 100644 index 0000000000..5c295d1702 --- /dev/null +++ b/tests/run-pass/concurrency/simple.rs @@ -0,0 +1,59 @@ +use std::thread; + +fn create_and_detach() { + thread::spawn(|| ()); +} + +fn create_and_join() { + thread::spawn(|| ()).join().unwrap(); +} + +fn create_and_get_result() { + let nine = thread::spawn(|| 5 + 4).join().unwrap(); + assert_eq!(nine, 9); +} + +fn create_and_leak_result() { + thread::spawn(|| 7); +} + +fn create_nested_and_detach() { + thread::spawn(|| { + thread::spawn(|| ()); + }); +} + +fn create_nested_and_join() { + let handle = thread::spawn(|| thread::spawn(|| ())); + let handle_nested = handle.join().unwrap(); + handle_nested.join().unwrap(); +} + +fn create_move_in() { + let x = String::from("Hello!"); + thread::spawn(move || { + assert_eq!(x.len(), 6); + }) + .join() + .unwrap(); +} + +fn create_move_out() { + let result = thread::spawn(|| { + String::from("Hello!") + }) + .join() + .unwrap(); + assert_eq!(result.len(), 6); +} + +fn main() { + create_and_detach(); + create_and_join(); + create_and_get_result(); + create_and_leak_result(); + create_nested_and_detach(); + create_nested_and_join(); + create_move_in(); + create_move_out(); +} From 58a6a2729aa03ef8ca1c68f9c0396fafa1208f58 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Wed, 1 Apr 2020 16:26:41 -0700 Subject: [PATCH 02/77] Add a warning that Miri does not check for data-races. --- README.md | 4 +++- src/shims/foreign_items/posix.rs | 2 ++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index ecff779873..fb981a71f0 100644 --- a/README.md +++ b/README.md @@ -47,7 +47,9 @@ in your program, and cannot run all programs: * Miri runs the program as a platform-independent interpreter, so the program has no access to most platform-specific APIs or FFI. A few APIs have been implemented (such as printing to stdout) but most have not: for example, Miri - currently does not support concurrency, or SIMD, or networking. + currently does not support SIMD or networking. +* Miri currently does not check for data-races and other concurrency related + issues. [rust]: https://www.rust-lang.org/ [mir]: https://github.com/rust-lang/rfcs/blob/master/text/1211-mir.md diff --git a/src/shims/foreign_items/posix.rs b/src/shims/foreign_items/posix.rs index 47b661061d..878ab8896d 100644 --- a/src/shims/foreign_items/posix.rs +++ b/src/shims/foreign_items/posix.rs @@ -316,6 +316,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx // Threading "pthread_create" => { + println!("WARNING: The thread support is experimental. \ + For example, Miri does not detect data races yet."); assert_eq!(args.len(), 4); let func = args[2]; let fn_ptr = this.read_scalar(func)?.not_undef()?; From 8dd8f199cab87584387ff1dbd74430e908e1f1e2 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Wed, 1 Apr 2020 16:28:33 -0700 Subject: [PATCH 03/77] Update to support the updated API. --- src/machine.rs | 9 +++++++-- src/threads.rs | 13 +++++++++---- 2 files changed, 16 insertions(+), 6 deletions(-) diff --git a/src/machine.rs b/src/machine.rs index 9d1fa9b78c..e6fea672c6 100644 --- a/src/machine.rs +++ b/src/machine.rs @@ -464,8 +464,13 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> { } } - fn resolve_thread_local_allocation_id(extra: &Self::MemoryExtra, id: AllocId) -> AllocId { - extra.tls.resolve_allocation(id) + #[inline(always)] + fn resolve_maybe_global_alloc( + tcx: ty::query::TyCtxtAt<'tcx>, + extra: &Self::MemoryExtra, + id: AllocId, + ) -> Option> { + extra.tls.resolve_allocation(*tcx, id) } fn init_allocation_extra<'b>( diff --git a/src/threads.rs b/src/threads.rs index 14ee58c2ee..618713e3c3 100644 --- a/src/threads.rs +++ b/src/threads.rs @@ -5,9 +5,10 @@ use std::collections::hash_map::Entry; use log::trace; -use rustc_middle::ty; use rustc_data_structures::fx::FxHashMap; use rustc_index::vec::{Idx, IndexVec}; +use rustc_middle::mir; +use rustc_middle::ty; use crate::*; @@ -234,13 +235,17 @@ impl ThreadLocalStorage { } /// For thread local allocation identifier `alloc_id`, retrieve the original /// static allocation identifier from which it was created. - pub fn resolve_allocation(&self, alloc_id: AllocId) -> AllocId { + pub fn resolve_allocation<'tcx>( + &self, + tcx: ty::TyCtxt<'tcx>, + alloc_id: AllocId, + ) -> Option> { trace!("resolve_allocation(alloc_id: {:?})", alloc_id); if let Some(original_id) = self.thread_local_origin.borrow().get(&alloc_id) { trace!("resolve_allocation(alloc_id: {:?}) -> {:?}", alloc_id, original_id); - *original_id + tcx.alloc_map.lock().get(*original_id) } else { - alloc_id + tcx.alloc_map.lock().get(alloc_id) } } /// Set which thread is currently active. From 92946b5a9cc52bfef2338b2075cec85561652449 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Wed, 1 Apr 2020 16:28:53 -0700 Subject: [PATCH 04/77] Add a test for thread locals. --- tests/run-pass/concurrency/thread_locals.rs | 48 +++++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100644 tests/run-pass/concurrency/thread_locals.rs diff --git a/tests/run-pass/concurrency/thread_locals.rs b/tests/run-pass/concurrency/thread_locals.rs new file mode 100644 index 0000000000..d0d25ba7f7 --- /dev/null +++ b/tests/run-pass/concurrency/thread_locals.rs @@ -0,0 +1,48 @@ +#![feature(thread_local)] + +use std::thread; + +#[thread_local] +static mut A: u8 = 0; +#[thread_local] +static mut B: u8 = 0; +static mut C: u8 = 0; + +unsafe fn get_a_ref() -> *mut u8 { + &mut A +} + +fn main() { + + unsafe { + let x = get_a_ref(); + *x = 5; + assert_eq!(A, 5); + B = 15; + C = 25; + } + + thread::spawn(|| { + unsafe { + assert_eq!(A, 0); + assert_eq!(B, 0); + assert_eq!(C, 25); + B = 14; + C = 24; + let y = get_a_ref(); + assert_eq!(*y, 0); + *y = 4; + assert_eq!(A, 4); + assert_eq!(*get_a_ref(), 4); + + } + }).join().unwrap(); + + unsafe { + assert_eq!(*get_a_ref(), 5); + assert_eq!(A, 5); + assert_eq!(B, 15); + assert_eq!(C, 24); + } + +} \ No newline at end of file From aef4c955995468d7efec81b951a1414bd3278a23 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Fri, 3 Apr 2020 16:09:42 -0700 Subject: [PATCH 05/77] Fix the problem of sending pointed to thread local statics. Add a regression test. --- src/machine.rs | 23 +++++++++++++++++++ tests/run-pass/concurrency/simple.stdout | 10 ++++++++ tests/run-pass/concurrency/thread_locals.rs | 16 +++++++++---- .../run-pass/concurrency/thread_locals.stdout | 1 + 4 files changed, 46 insertions(+), 4 deletions(-) create mode 100644 tests/run-pass/concurrency/simple.stdout create mode 100644 tests/run-pass/concurrency/thread_locals.stdout diff --git a/src/machine.rs b/src/machine.rs index e6fea672c6..7ed5f1e553 100644 --- a/src/machine.rs +++ b/src/machine.rs @@ -433,6 +433,29 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> { Ok(()) } + fn access_local( + ecx: &InterpCx<'mir, 'tcx, Self>, + frame: &Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>, + local: mir::Local, + ) -> InterpResult<'tcx, Operand> { + match frame.body.local_decls[local].local_info { + mir::LocalInfo::StaticRef { def_id, is_thread_local: true } => { + let static_alloc_id = ecx.tcx.alloc_map.lock().create_static_alloc(def_id); + let alloc_id = ecx.memory.extra.tls.get_or_register_allocation(*ecx.memory.tcx, static_alloc_id); + let tag = Self::tag_global_base_pointer(&ecx.memory.extra, alloc_id); + let pointer: Pointer = alloc_id.into(); + let pointer = pointer.with_tag(tag); + let scalar: Scalar<_> = pointer.into(); + let scalar: ScalarMaybeUndef<_> = scalar.into(); + let immediate: Immediate<_> = scalar.into(); + Ok( + Operand::Immediate(immediate) + ) + }, + _ => frame.locals[local].access(), + } + } + fn canonical_alloc_id(mem: &Memory<'mir, 'tcx, Self>, id: AllocId) -> AllocId { let tcx = mem.tcx; let alloc = tcx.alloc_map.lock().get(id); diff --git a/tests/run-pass/concurrency/simple.stdout b/tests/run-pass/concurrency/simple.stdout new file mode 100644 index 0000000000..0506b7bdf8 --- /dev/null +++ b/tests/run-pass/concurrency/simple.stdout @@ -0,0 +1,10 @@ +WARNING: The thread support is experimental. For example, Miri does not detect data races yet. +WARNING: The thread support is experimental. For example, Miri does not detect data races yet. +WARNING: The thread support is experimental. For example, Miri does not detect data races yet. +WARNING: The thread support is experimental. For example, Miri does not detect data races yet. +WARNING: The thread support is experimental. For example, Miri does not detect data races yet. +WARNING: The thread support is experimental. For example, Miri does not detect data races yet. +WARNING: The thread support is experimental. For example, Miri does not detect data races yet. +WARNING: The thread support is experimental. For example, Miri does not detect data races yet. +WARNING: The thread support is experimental. For example, Miri does not detect data races yet. +WARNING: The thread support is experimental. For example, Miri does not detect data races yet. diff --git a/tests/run-pass/concurrency/thread_locals.rs b/tests/run-pass/concurrency/thread_locals.rs index d0d25ba7f7..1c268a4ff8 100644 --- a/tests/run-pass/concurrency/thread_locals.rs +++ b/tests/run-pass/concurrency/thread_locals.rs @@ -12,18 +12,24 @@ unsafe fn get_a_ref() -> *mut u8 { &mut A } +struct Sender(*mut u8); + +unsafe impl Send for Sender {} + fn main() { - unsafe { + let ptr = unsafe { let x = get_a_ref(); *x = 5; assert_eq!(A, 5); B = 15; C = 25; - } + Sender(&mut A) + }; - thread::spawn(|| { + thread::spawn(move || { unsafe { + assert_eq!(*ptr.0, 5); assert_eq!(A, 0); assert_eq!(B, 0); assert_eq!(C, 25); @@ -32,6 +38,7 @@ fn main() { let y = get_a_ref(); assert_eq!(*y, 0); *y = 4; + assert_eq!(*ptr.0, 5); assert_eq!(A, 4); assert_eq!(*get_a_ref(), 4); @@ -45,4 +52,5 @@ fn main() { assert_eq!(C, 24); } -} \ No newline at end of file +} + diff --git a/tests/run-pass/concurrency/thread_locals.stdout b/tests/run-pass/concurrency/thread_locals.stdout new file mode 100644 index 0000000000..9a53b4a5c9 --- /dev/null +++ b/tests/run-pass/concurrency/thread_locals.stdout @@ -0,0 +1 @@ +WARNING: The thread support is experimental. For example, Miri does not detect data races yet. From 1f33f04fd453a63a88cb25771466c2618b46d372 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Mon, 6 Apr 2020 13:44:47 -0700 Subject: [PATCH 06/77] Move pthread_create and related shims to a separate file. --- src/lib.rs | 1 + src/shims/foreign_items/posix.rs | 62 +++-------------------- src/shims/mod.rs | 1 + src/shims/threads.rs | 84 ++++++++++++++++++++++++++++++++ src/threads.rs | 4 -- 5 files changed, 93 insertions(+), 59 deletions(-) create mode 100644 src/shims/threads.rs diff --git a/src/lib.rs b/src/lib.rs index c042526be6..d8b3397c8e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -42,6 +42,7 @@ pub use crate::shims::intrinsics::EvalContextExt as IntrinsicsEvalContextExt; pub use crate::shims::os_str::EvalContextExt as OsStrEvalContextExt; pub use crate::shims::panic::{CatchUnwindData, EvalContextExt as PanicEvalContextExt}; pub use crate::shims::sync::{EvalContextExt as SyncEvalContextExt}; +pub use crate::shims::threads::EvalContextExt as ThreadShimsEvalContextExt; pub use crate::shims::time::EvalContextExt as TimeEvalContextExt; pub use crate::shims::tls::{EvalContextExt as TlsEvalContextExt, TlsData}; pub use crate::shims::EvalContextExt as ShimsEvalContextExt; diff --git a/src/shims/foreign_items/posix.rs b/src/shims/foreign_items/posix.rs index 878ab8896d..7d2cb16afe 100644 --- a/src/shims/foreign_items/posix.rs +++ b/src/shims/foreign_items/posix.rs @@ -6,7 +6,6 @@ use std::convert::TryFrom; use log::trace; use crate::*; -use rustc_index::vec::Idx; use rustc_middle::mir; use rustc_target::abi::{Align, LayoutOf, Size}; @@ -316,66 +315,19 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx // Threading "pthread_create" => { - println!("WARNING: The thread support is experimental. \ - For example, Miri does not detect data races yet."); assert_eq!(args.len(), 4); - let func = args[2]; - let fn_ptr = this.read_scalar(func)?.not_undef()?; - let fn_val = this.memory.get_fn(fn_ptr)?; - let instance = match fn_val { - rustc_mir::interpret::FnVal::Instance(instance) => instance, - _ => unreachable!(), - }; - let thread_info_place = this.deref_operand(args[0])?; - let thread_info_type = args[0].layout.ty - .builtin_deref(true) - .ok_or_else(|| err_ub_format!( - "wrong signature used for `pthread_create`: first argument must be a raw pointer." - ))? - .ty; - let thread_info_layout = this.layout_of(thread_info_type)?; - let func_arg = match *args[3] { - rustc_mir::interpret::Operand::Immediate(immediate) => immediate, - _ => unreachable!(), - }; - let func_args = [func_arg]; - let ret_place = - this.allocate(this.layout_of(this.tcx.types.usize)?, MiriMemoryKind::Machine.into()); - let new_thread_id = this.create_thread()?; - let old_thread_id = this.set_active_thread(new_thread_id)?; - this.call_function( - instance, - &func_args[..], - Some(ret_place.into()), - StackPopCleanup::None { cleanup: true }, - )?; - this.set_active_thread(old_thread_id)?; - this.write_scalar( - Scalar::from_uint(new_thread_id.index() as u128, thread_info_layout.size), - thread_info_place.into(), - )?; - - // Return success (`0`). - this.write_null(dest)?; + let result = this.pthread_create(args[0], args[1], args[2], args[3])?; + this.write_scalar(Scalar::from_i32(result), dest)?; } "pthread_join" => { assert_eq!(args.len(), 2); - assert!( - this.is_null(this.read_scalar(args[1])?.not_undef()?)?, - "Miri supports pthread_join only with retval==NULL" - ); - let thread = this.read_scalar(args[0])?.not_undef()?.to_machine_usize(this)?; - this.join_thread(thread.into())?; - - // Return success (`0`). - this.write_null(dest)?; + let result = this.pthread_join(args[0], args[1])?; + this.write_scalar(Scalar::from_i32(result), dest)?; } "pthread_detach" => { - let thread = this.read_scalar(args[0])?.not_undef()?.to_machine_usize(this)?; - this.detach_thread(thread.into())?; - - // Return success (`0`). - this.write_null(dest)?; + assert_eq!(args.len(), 1); + let result = this.pthread_detach(args[0])?; + this.write_scalar(Scalar::from_i32(result), dest)?; } "pthread_attr_getguardsize" => { diff --git a/src/shims/mod.rs b/src/shims/mod.rs index 71ff6024ec..118058dd32 100644 --- a/src/shims/mod.rs +++ b/src/shims/mod.rs @@ -6,6 +6,7 @@ pub mod intrinsics; pub mod os_str; pub mod panic; pub mod sync; +pub mod threads; pub mod time; pub mod tls; diff --git a/src/shims/threads.rs b/src/shims/threads.rs new file mode 100644 index 0000000000..6e1087dd81 --- /dev/null +++ b/src/shims/threads.rs @@ -0,0 +1,84 @@ +use crate::*; +use rustc_index::vec::Idx; +use rustc_target::abi::LayoutOf; + +impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {} +pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> { + fn pthread_create( + &mut self, + thread: OpTy<'tcx, Tag>, + _attr: OpTy<'tcx, Tag>, + start_routine: OpTy<'tcx, Tag>, + arg: OpTy<'tcx, Tag>, + ) -> InterpResult<'tcx, i32> { + println!( + "WARNING: The thread support is experimental. \ + For example, Miri does not detect data races yet." + ); + + let this = self.eval_context_mut(); + + let new_thread_id = this.create_thread()?; + let old_thread_id = this.set_active_thread(new_thread_id)?; + + let thread_info_place = this.deref_operand(thread)?; + let thread_info_type = thread.layout.ty + .builtin_deref(true) + .ok_or_else(|| err_ub_format!( + "wrong signature used for `pthread_create`: first argument must be a raw pointer." + ))? + .ty; + let thread_info_layout = this.layout_of(thread_info_type)?; + this.write_scalar( + Scalar::from_uint(new_thread_id.index() as u128, thread_info_layout.size), + thread_info_place.into(), + )?; + + let fn_ptr = this.read_scalar(start_routine)?.not_undef()?; + let instance = this.memory.get_fn(fn_ptr)?.as_instance()?; + + let func_arg = match *arg { + rustc_mir::interpret::Operand::Immediate(immediate) => immediate, + _ => unreachable!(), + }; + let func_args = [func_arg]; + + let ret_place = + this.allocate(this.layout_of(this.tcx.types.usize)?, MiriMemoryKind::Machine.into()); + + this.call_function( + instance, + &func_args[..], + Some(ret_place.into()), + StackPopCleanup::None { cleanup: true }, + )?; + + this.set_active_thread(old_thread_id)?; + + Ok(0) + } + fn pthread_join( + &mut self, + thread: OpTy<'tcx, Tag>, + retval: OpTy<'tcx, Tag>, + ) -> InterpResult<'tcx, i32> { + let this = self.eval_context_mut(); + + if !this.is_null(this.read_scalar(retval)?.not_undef()?)? { + throw_unsup_format!("Miri supports pthread_join only with retval==NULL"); + } + + let thread_id = this.read_scalar(thread)?.not_undef()?.to_machine_usize(this)?; + this.join_thread(thread_id.into())?; + + Ok(0) + } + fn pthread_detach(&mut self, thread: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { + let this = self.eval_context_mut(); + + let thread_id = this.read_scalar(thread)?.not_undef()?.to_machine_usize(this)?; + this.detach_thread(thread_id.into())?; + + Ok(0) + } +} diff --git a/src/threads.rs b/src/threads.rs index 618713e3c3..9d982359bf 100644 --- a/src/threads.rs +++ b/src/threads.rs @@ -91,9 +91,6 @@ pub struct ThreadSet<'mir, 'tcx> { /// /// Note that this vector also contains terminated threads. threads: IndexVec>, - - /// List of threads that just terminated. TODO: Cleanup. - terminated_threads: Vec, } impl<'mir, 'tcx> Default for ThreadSet<'mir, 'tcx> { @@ -103,7 +100,6 @@ impl<'mir, 'tcx> Default for ThreadSet<'mir, 'tcx> { Self { active_thread: ThreadId::new(0), threads: threads, - terminated_threads: Default::default(), } } } From ed9c7d168b0ded92e4bfb53acd2f71b61b54e306 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Mon, 6 Apr 2020 14:00:45 -0700 Subject: [PATCH 07/77] Report that we do not support foreign thread local statics. --- src/machine.rs | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/src/machine.rs b/src/machine.rs index 7ed5f1e553..2f0aa91575 100644 --- a/src/machine.rs +++ b/src/machine.rs @@ -14,6 +14,7 @@ use rand::rngs::StdRng; use rustc_ast::attr; use rustc_data_structures::fx::FxHashMap; use rustc_middle::{ + middle::codegen_fn_attrs::CodegenFnAttrFlags, mir, ty::{ self, @@ -21,7 +22,7 @@ use rustc_middle::{ TyCtxt, }, }; -use rustc_span::symbol::{sym, Symbol}; +use rustc_span::{def_id::DefId, symbol::{sym, Symbol}}; use rustc_target::abi::{LayoutOf, Size}; use crate::*; @@ -459,8 +460,14 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> { fn canonical_alloc_id(mem: &Memory<'mir, 'tcx, Self>, id: AllocId) -> AllocId { let tcx = mem.tcx; let alloc = tcx.alloc_map.lock().get(id); + fn is_thread_local<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> bool { + tcx.codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) + } match alloc { Some(GlobalAlloc::Static(def_id)) if tcx.is_foreign_item(def_id) => { + if is_thread_local(*tcx, def_id) { + unimplemented!("Foreign thread local statics are not supported yet."); + } // Figure out if this is an extern static, and if yes, which one. let attrs = tcx.get_attrs(def_id); let link_name = match attr::first_attr_value_str_by_name(&attrs, sym::link_name) { @@ -476,7 +483,7 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> { id } }, - Some(GlobalAlloc::Static(def_id)) if tcx.has_attr(def_id, sym::thread_local) => { + Some(GlobalAlloc::Static(def_id)) if is_thread_local(*tcx, def_id) => { // We have a thread local, so we need to get a unique allocation id for it. mem.extra.tls.get_or_register_allocation(*tcx, id) }, From 52184193c363e030818a18a60123eed25b12c7c9 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Mon, 6 Apr 2020 14:41:05 -0700 Subject: [PATCH 08/77] Fix comments in TLS. --- src/machine.rs | 2 +- src/shims/tls.rs | 8 +++++--- src/threads.rs | 13 +++++-------- 3 files changed, 11 insertions(+), 12 deletions(-) diff --git a/src/machine.rs b/src/machine.rs index 2f0aa91575..a7d62897b8 100644 --- a/src/machine.rs +++ b/src/machine.rs @@ -27,7 +27,7 @@ use rustc_target::abi::{LayoutOf, Size}; use crate::*; -pub use crate::threads::{ThreadId, ThreadSet, ThreadLocalStorage}; +pub use crate::threads::{ThreadId, ThreadSet, ThreadState, ThreadLocalStorage}; // Some global facts about the emulated machine. pub const PAGE_SIZE: u64 = 4 * 1024; // FIXME: adjust to target architecture diff --git a/src/shims/tls.rs b/src/shims/tls.rs index d16acb7500..ec8c31fe2c 100644 --- a/src/shims/tls.rs +++ b/src/shims/tls.rs @@ -9,7 +9,7 @@ use rustc_middle::ty; use rustc_target::abi::{Size, HasDataLayout}; use crate::{HelpersEvalContextExt, ThreadsEvalContextExt, InterpResult, MPlaceTy, Scalar, StackPopCleanup, Tag}; -use crate::machine::ThreadId; +use crate::machine::{ThreadId, ThreadState}; pub type TlsKey = u128; @@ -174,7 +174,7 @@ impl<'tcx> TlsData<'tcx> { impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {} pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> { - /// Run TLS destructors for the currently active thread. + /// Run TLS destructors for all threads. fn run_tls_dtors(&mut self) -> InterpResult<'tcx> { let this = self.eval_context_mut(); assert!(!this.machine.tls.dtors_running, "running TLS dtors twice"); @@ -223,7 +223,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx } // Now run the "keyed" destructors. - for thread_id in this.get_all_thread_ids() { + for (thread_id, thread_state) in this.get_all_thread_ids_with_states() { + assert!(thread_state == ThreadState::Terminated, + "TLS destructors should be executed after all threads terminated."); this.set_active_thread(thread_id)?; let mut dtor = this.machine.tls.fetch_tls_dtor(None, thread_id); while let Some((instance, thread_id, ptr, key)) = dtor { diff --git a/src/threads.rs b/src/threads.rs index 9d982359bf..4458f4410e 100644 --- a/src/threads.rs +++ b/src/threads.rs @@ -97,10 +97,7 @@ impl<'mir, 'tcx> Default for ThreadSet<'mir, 'tcx> { fn default() -> Self { let mut threads = IndexVec::new(); threads.push(Default::default()); - Self { - active_thread: ThreadId::new(0), - threads: threads, - } + Self { active_thread: ThreadId::new(0), threads: threads } } } @@ -156,8 +153,8 @@ impl<'mir, 'tcx: 'mir> ThreadSet<'mir, 'tcx> { } } /// Get ids of all threads ever allocated. - fn get_all_thread_ids(&mut self) -> Vec { - (0..self.threads.len()).map(ThreadId::new).collect() + fn get_all_thread_ids_with_states(&self) -> Vec<(ThreadId, ThreadState)> { + self.threads.iter_enumerated().map(|(id, thread)| (id, thread.state)).collect() } /// Decide which thread to run next. /// @@ -283,9 +280,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let this = self.eval_context_mut(); this.machine.threads.active_thread_stack_mut() } - fn get_all_thread_ids(&mut self) -> Vec { + fn get_all_thread_ids_with_states(&mut self) -> Vec<(ThreadId, ThreadState)> { let this = self.eval_context_mut(); - this.machine.threads.get_all_thread_ids() + this.machine.threads.get_all_thread_ids_with_states() } /// Decide which thread to run next. /// From f21197f081048e383ff10427216db7867b746832 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Mon, 6 Apr 2020 16:12:13 -0700 Subject: [PATCH 09/77] Store the thread name. --- src/shims/foreign_items/posix.rs | 13 +++++-------- src/shims/threads.rs | 20 ++++++++++++++++++++ src/threads.rs | 26 ++++++++++++++++++++------ 3 files changed, 45 insertions(+), 14 deletions(-) diff --git a/src/shims/foreign_items/posix.rs b/src/shims/foreign_items/posix.rs index 7d2cb16afe..4e08593d61 100644 --- a/src/shims/foreign_items/posix.rs +++ b/src/shims/foreign_items/posix.rs @@ -329,6 +329,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let result = this.pthread_detach(args[0])?; this.write_scalar(Scalar::from_i32(result), dest)?; } + "prctl" => { + assert_eq!(args.len(), 5); + let result = this.prctl(args[0], args[1], args[2], args[3], args[4])?; + this.write_scalar(Scalar::from_i32(result), dest)?; + } "pthread_attr_getguardsize" => { assert_eq!(args.len(), 2); @@ -347,14 +352,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx this.write_null(dest)?; } - "prctl" => { - let option = this.read_scalar(args[0])?.not_undef()?.to_i32()?; - assert_eq!(option, 0xf, "Miri supports only PR_SET_NAME"); - - // Return success (`0`). - this.write_null(dest)?; - } - // Incomplete shims that we "stub out" just to get pre-main initialziation code to work. // These shims are enabled only when the caller is in the standard library. | "pthread_attr_init" diff --git a/src/shims/threads.rs b/src/shims/threads.rs index 6e1087dd81..3a55fb3c70 100644 --- a/src/shims/threads.rs +++ b/src/shims/threads.rs @@ -79,6 +79,26 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let thread_id = this.read_scalar(thread)?.not_undef()?.to_machine_usize(this)?; this.detach_thread(thread_id.into())?; + Ok(0) + } + fn prctl( + &mut self, + option: OpTy<'tcx, Tag>, + arg2: OpTy<'tcx, Tag>, + _arg3: OpTy<'tcx, Tag>, + _arg4: OpTy<'tcx, Tag>, + _arg5: OpTy<'tcx, Tag>, + ) -> InterpResult<'tcx, i32> { + let this = self.eval_context_mut(); + + let option = this.read_scalar(option)?.not_undef()?.to_i32()?; + if option != this.eval_libc_i32("PR_SET_NAME")? { + throw_unsup_format!("Miri supports only PR_SET_NAME"); + } + let address = this.read_scalar(arg2)?.not_undef()?; + let name = this.memory.read_c_str(address)?.to_owned(); + this.set_active_thread_name(name)?; + Ok(0) } } diff --git a/src/threads.rs b/src/threads.rs index 4458f4410e..4ce35d50ab 100644 --- a/src/threads.rs +++ b/src/threads.rs @@ -47,6 +47,8 @@ pub enum ThreadState { /// A thread. pub struct Thread<'mir, 'tcx> { state: ThreadState, + /// Name of the thread. + thread_name: Option>, /// The virtual call stack. stack: Vec>>, /// Is the thread detached? @@ -78,7 +80,7 @@ impl<'mir, 'tcx> std::fmt::Debug for Thread<'mir, 'tcx> { impl<'mir, 'tcx> Default for Thread<'mir, 'tcx> { fn default() -> Self { - Self { state: ThreadState::Enabled, stack: Vec::new(), detached: false } + Self { state: ThreadState::Enabled, thread_name: None, stack: Vec::new(), detached: false } } } @@ -117,16 +119,20 @@ impl<'mir, 'tcx: 'mir> ThreadSet<'mir, 'tcx> { new_thread_id } /// Set an active thread and return the id of the thread that was active before. - fn set_active_thread(&mut self, id: ThreadId) -> ThreadId { + fn set_active_thread_id(&mut self, id: ThreadId) -> ThreadId { let active_thread_id = self.active_thread; self.active_thread = id; assert!(self.active_thread.index() < self.threads.len()); active_thread_id } /// Get the id of the currently active thread. - fn get_active_thread(&self) -> ThreadId { + fn get_active_thread_id(&self) -> ThreadId { self.active_thread } + /// Get the borrow of the currently active thread. + fn active_thread_mut(&mut self) -> &mut Thread<'mir, 'tcx> { + &mut self.threads[self.active_thread] + } /// Mark the thread as detached, which means that no other thread will try /// to join it and the thread is responsible for cleaning up. fn detach_thread(&mut self, id: ThreadId) { @@ -152,6 +158,10 @@ impl<'mir, 'tcx: 'mir> ThreadSet<'mir, 'tcx> { ); } } + /// Set the name of the active thread. + fn set_thread_name(&mut self, new_thread_name: Vec) { + self.active_thread_mut().thread_name = Some(new_thread_name); + } /// Get ids of all threads ever allocated. fn get_all_thread_ids_with_states(&self) -> Vec<(ThreadId, ThreadState)> { self.threads.iter_enumerated().map(|(id, thread)| (id, thread.state)).collect() @@ -266,11 +276,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn set_active_thread(&mut self, thread_id: ThreadId) -> InterpResult<'tcx, ThreadId> { let this = self.eval_context_mut(); this.memory.extra.tls.set_active_thread(thread_id); - Ok(this.machine.threads.set_active_thread(thread_id)) + Ok(this.machine.threads.set_active_thread_id(thread_id)) } fn get_active_thread(&self) -> InterpResult<'tcx, ThreadId> { let this = self.eval_context_ref(); - Ok(this.machine.threads.get_active_thread()) + Ok(this.machine.threads.get_active_thread_id()) } fn active_thread_stack(&self) -> &[Frame<'mir, 'tcx, Tag, FrameData<'tcx>>] { let this = self.eval_context_ref(); @@ -280,6 +290,10 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let this = self.eval_context_mut(); this.machine.threads.active_thread_stack_mut() } + fn set_active_thread_name(&mut self, new_thread_name: Vec) -> InterpResult<'tcx, ()> { + let this = self.eval_context_mut(); + Ok(this.machine.threads.set_thread_name(new_thread_name)) + } fn get_all_thread_ids_with_states(&mut self) -> Vec<(ThreadId, ThreadState)> { let this = self.eval_context_mut(); this.machine.threads.get_all_thread_ids_with_states() @@ -291,7 +305,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let this = self.eval_context_mut(); // Find the next thread to run. if this.machine.threads.schedule()? { - let active_thread = this.machine.threads.get_active_thread(); + let active_thread = this.machine.threads.get_active_thread_id(); this.memory.extra.tls.set_active_thread(active_thread); Ok(true) } else { From b04bf8a51480c05fc9984476a78f07b927f2672f Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Mon, 6 Apr 2020 16:12:25 -0700 Subject: [PATCH 10/77] Rustfmt the test. --- tests/run-pass/concurrency/thread_locals.rs | 38 +++++++++------------ 1 file changed, 17 insertions(+), 21 deletions(-) diff --git a/tests/run-pass/concurrency/thread_locals.rs b/tests/run-pass/concurrency/thread_locals.rs index 1c268a4ff8..50aa6fee2f 100644 --- a/tests/run-pass/concurrency/thread_locals.rs +++ b/tests/run-pass/concurrency/thread_locals.rs @@ -17,7 +17,6 @@ struct Sender(*mut u8); unsafe impl Send for Sender {} fn main() { - let ptr = unsafe { let x = get_a_ref(); *x = 5; @@ -26,24 +25,23 @@ fn main() { C = 25; Sender(&mut A) }; - - thread::spawn(move || { - unsafe { - assert_eq!(*ptr.0, 5); - assert_eq!(A, 0); - assert_eq!(B, 0); - assert_eq!(C, 25); - B = 14; - C = 24; - let y = get_a_ref(); - assert_eq!(*y, 0); - *y = 4; - assert_eq!(*ptr.0, 5); - assert_eq!(A, 4); - assert_eq!(*get_a_ref(), 4); - - } - }).join().unwrap(); + + thread::spawn(move || unsafe { + assert_eq!(*ptr.0, 5); + assert_eq!(A, 0); + assert_eq!(B, 0); + assert_eq!(C, 25); + B = 14; + C = 24; + let y = get_a_ref(); + assert_eq!(*y, 0); + *y = 4; + assert_eq!(*ptr.0, 5); + assert_eq!(A, 4); + assert_eq!(*get_a_ref(), 4); + }) + .join() + .unwrap(); unsafe { assert_eq!(*get_a_ref(), 5); @@ -51,6 +49,4 @@ fn main() { assert_eq!(B, 15); assert_eq!(C, 24); } - } - From 2202278f6af676266034e756bd3848efe4e10ab8 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Mon, 6 Apr 2020 16:30:30 -0700 Subject: [PATCH 11/77] Fix pthread_self. --- src/shims/foreign_items/posix.rs | 5 ++++- src/shims/threads.rs | 6 ++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/src/shims/foreign_items/posix.rs b/src/shims/foreign_items/posix.rs index 4e08593d61..4cd3b84991 100644 --- a/src/shims/foreign_items/posix.rs +++ b/src/shims/foreign_items/posix.rs @@ -329,6 +329,10 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let result = this.pthread_detach(args[0])?; this.write_scalar(Scalar::from_i32(result), dest)?; } + "pthread_self" => { + assert_eq!(args.len(), 0); + this.pthread_self(dest)?; + } "prctl" => { assert_eq!(args.len(), 5); let result = this.prctl(args[0], args[1], args[2], args[3], args[4])?; @@ -356,7 +360,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx // These shims are enabled only when the caller is in the standard library. | "pthread_attr_init" | "pthread_attr_destroy" - | "pthread_self" | "pthread_attr_setstacksize" | "pthread_condattr_init" | "pthread_condattr_setclock" diff --git a/src/shims/threads.rs b/src/shims/threads.rs index 3a55fb3c70..fc733d7f5c 100644 --- a/src/shims/threads.rs +++ b/src/shims/threads.rs @@ -81,6 +81,12 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx Ok(0) } + fn pthread_self(&mut self, dest: PlaceTy<'tcx, Tag>) -> InterpResult<'tcx> { + let this = self.eval_context_mut(); + + let thread_id = this.get_active_thread()?; + this.write_scalar(Scalar::from_uint(thread_id.index() as u128, dest.layout.size), dest) + } fn prctl( &mut self, option: OpTy<'tcx, Tag>, From 1c8a59c69189b42b97db49292d0ca198a7d5977a Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Tue, 7 Apr 2020 20:20:41 -0700 Subject: [PATCH 12/77] Rebase on PR 1157. --- src/shims/sync.rs | 382 +++++++++++++++++++----- src/threads.rs | 81 ++++- tests/run-pass/concurrency/locks.rs | 29 ++ tests/run-pass/concurrency/locks.stdout | 3 + 4 files changed, 415 insertions(+), 80 deletions(-) create mode 100644 tests/run-pass/concurrency/locks.rs create mode 100644 tests/run-pass/concurrency/locks.stdout diff --git a/src/shims/sync.rs b/src/shims/sync.rs index b03dcbfd89..eb54358114 100644 --- a/src/shims/sync.rs +++ b/src/shims/sync.rs @@ -2,6 +2,7 @@ use rustc_middle::ty::{TyKind, TypeAndMut}; use rustc_target::abi::{LayoutOf, Size}; use crate::stacked_borrows::Tag; +use crate::threads::{BlockSetId, ThreadId}; use crate::*; fn assert_ptr_target_min_size<'mir, 'tcx: 'mir>( @@ -55,15 +56,17 @@ fn mutexattr_set_kind<'mir, 'tcx: 'mir>( // bytes 0-3: reserved for signature on macOS // (need to avoid this because it is set by static initializer macros) // bytes 4-7: count of how many times this mutex has been locked, as a u32 +// bytes 8-11: when count > 0, id of the owner thread as a u32 // bytes 12-15 or 16-19 (depending on platform): mutex kind, as an i32 // (the kind has to be at its offset for compatibility with static initializer macros) +// bytes 20-23: when count > 0, id of the blockset in which the blocked threads are waiting. fn mutex_get_locked_count<'mir, 'tcx: 'mir>( ecx: &MiriEvalContext<'mir, 'tcx>, mutex_op: OpTy<'tcx, Tag>, ) -> InterpResult<'tcx, ScalarMaybeUndef> { // Ensure that the following read at an offset to the mutex pointer is within bounds - assert_ptr_target_min_size(ecx, mutex_op, 20)?; + assert_ptr_target_min_size(ecx, mutex_op, 24)?; let mutex_place = ecx.deref_operand(mutex_op)?; let locked_count_place = mutex_place.offset( Size::from_bytes(4), @@ -80,7 +83,7 @@ fn mutex_set_locked_count<'mir, 'tcx: 'mir>( locked_count: impl Into>, ) -> InterpResult<'tcx, ()> { // Ensure that the following write at an offset to the mutex pointer is within bounds - assert_ptr_target_min_size(ecx, mutex_op, 20)?; + assert_ptr_target_min_size(ecx, mutex_op, 24)?; let mutex_place = ecx.deref_operand(mutex_op)?; let locked_count_place = mutex_place.offset( Size::from_bytes(4), @@ -91,12 +94,45 @@ fn mutex_set_locked_count<'mir, 'tcx: 'mir>( ecx.write_scalar(locked_count.into(), locked_count_place.into()) } +fn mutex_get_owner<'mir, 'tcx: 'mir>( + ecx: &MiriEvalContext<'mir, 'tcx>, + mutex_op: OpTy<'tcx, Tag>, +) -> InterpResult<'tcx, ScalarMaybeUndef> { + // Ensure that the following read at an offset to the mutex pointer is within bounds + assert_ptr_target_min_size(ecx, mutex_op, 24)?; + let mutex_place = ecx.deref_operand(mutex_op)?; + let mutex_id_place = mutex_place.offset( + Size::from_bytes(8), + MemPlaceMeta::None, + ecx.machine.layouts.u32, + ecx, + )?; + ecx.read_scalar(mutex_id_place.into()) +} + +fn mutex_set_owner<'mir, 'tcx: 'mir>( + ecx: &mut MiriEvalContext<'mir, 'tcx>, + mutex_op: OpTy<'tcx, Tag>, + mutex_id: impl Into>, +) -> InterpResult<'tcx, ()> { + // Ensure that the following write at an offset to the mutex pointer is within bounds + assert_ptr_target_min_size(ecx, mutex_op, 24)?; + let mutex_place = ecx.deref_operand(mutex_op)?; + let mutex_id_place = mutex_place.offset( + Size::from_bytes(8), + MemPlaceMeta::None, + ecx.machine.layouts.u32, + ecx, + )?; + ecx.write_scalar(mutex_id.into(), mutex_id_place.into()) +} + fn mutex_get_kind<'mir, 'tcx: 'mir>( ecx: &mut MiriEvalContext<'mir, 'tcx>, mutex_op: OpTy<'tcx, Tag>, ) -> InterpResult<'tcx, ScalarMaybeUndef> { // Ensure that the following read at an offset to the mutex pointer is within bounds - assert_ptr_target_min_size(ecx, mutex_op, 20)?; + assert_ptr_target_min_size(ecx, mutex_op, 24)?; let mutex_place = ecx.deref_operand(mutex_op)?; let kind_offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 }; let kind_place = mutex_place.offset( @@ -114,7 +150,7 @@ fn mutex_set_kind<'mir, 'tcx: 'mir>( kind: impl Into>, ) -> InterpResult<'tcx, ()> { // Ensure that the following write at an offset to the mutex pointer is within bounds - assert_ptr_target_min_size(ecx, mutex_op, 20)?; + assert_ptr_target_min_size(ecx, mutex_op, 24)?; let mutex_place = ecx.deref_operand(mutex_op)?; let kind_offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 }; let kind_place = mutex_place.offset( @@ -126,6 +162,55 @@ fn mutex_set_kind<'mir, 'tcx: 'mir>( ecx.write_scalar(kind.into(), kind_place.into()) } +fn mutex_get_blockset<'mir, 'tcx: 'mir>( + ecx: &MiriEvalContext<'mir, 'tcx>, + mutex_op: OpTy<'tcx, Tag>, +) -> InterpResult<'tcx, ScalarMaybeUndef> { + // Ensure that the following read at an offset to the mutex pointer is within bounds + assert_ptr_target_min_size(ecx, mutex_op, 24)?; + let mutex_place = ecx.deref_operand(mutex_op)?; + let mutex_id_place = mutex_place.offset( + Size::from_bytes(20), + MemPlaceMeta::None, + ecx.machine.layouts.u32, + ecx, + )?; + ecx.read_scalar(mutex_id_place.into()) +} + +fn mutex_set_blockset<'mir, 'tcx: 'mir>( + ecx: &mut MiriEvalContext<'mir, 'tcx>, + mutex_op: OpTy<'tcx, Tag>, + mutex_id: impl Into>, +) -> InterpResult<'tcx, ()> { + // Ensure that the following write at an offset to the mutex pointer is within bounds + assert_ptr_target_min_size(ecx, mutex_op, 24)?; + let mutex_place = ecx.deref_operand(mutex_op)?; + let mutex_id_place = mutex_place.offset( + Size::from_bytes(20), + MemPlaceMeta::None, + ecx.machine.layouts.u32, + ecx, + )?; + ecx.write_scalar(mutex_id.into(), mutex_id_place.into()) +} + +fn mutex_get_or_create_blockset<'mir, 'tcx: 'mir>( + ecx: &mut MiriEvalContext<'mir, 'tcx>, + mutex_op: OpTy<'tcx, Tag>, +) -> InterpResult<'tcx, BlockSetId> { + let blockset = mutex_get_blockset(ecx, mutex_op)?.to_u32()?; + if blockset == 0 { + // 0 is a default value and also not a valid blockset id. Need to + // allocate a new blockset. + let blockset = ecx.create_blockset()?; + mutex_set_blockset(ecx, mutex_op, blockset.to_u32_scalar())?; + Ok(blockset) + } else { + Ok(blockset.into()) + } +} + // pthread_rwlock_t is between 32 and 56 bytes, depending on the platform. // Our chosen memory layout for the emulated rwlock (does not have to match the platform layout!): @@ -133,13 +218,17 @@ fn mutex_set_kind<'mir, 'tcx: 'mir>( // (need to avoid this because it is set by static initializer macros) // bytes 4-7: reader count, as a u32 // bytes 8-11: writer count, as a u32 +// bytes 12-15: when writer or reader count > 0, id of the blockset in which the +// blocked writers are waiting. +// bytes 16-20: when writer count > 0, id of the blockset in which the blocked +// readers are waiting. fn rwlock_get_readers<'mir, 'tcx: 'mir>( ecx: &MiriEvalContext<'mir, 'tcx>, rwlock_op: OpTy<'tcx, Tag>, ) -> InterpResult<'tcx, ScalarMaybeUndef> { // Ensure that the following read at an offset to the rwlock pointer is within bounds - assert_ptr_target_min_size(ecx, rwlock_op, 12)?; + assert_ptr_target_min_size(ecx, rwlock_op, 20)?; let rwlock_place = ecx.deref_operand(rwlock_op)?; let readers_place = rwlock_place.offset( Size::from_bytes(4), @@ -156,7 +245,7 @@ fn rwlock_set_readers<'mir, 'tcx: 'mir>( readers: impl Into>, ) -> InterpResult<'tcx, ()> { // Ensure that the following write at an offset to the rwlock pointer is within bounds - assert_ptr_target_min_size(ecx, rwlock_op, 12)?; + assert_ptr_target_min_size(ecx, rwlock_op, 20)?; let rwlock_place = ecx.deref_operand(rwlock_op)?; let readers_place = rwlock_place.offset( Size::from_bytes(4), @@ -172,7 +261,7 @@ fn rwlock_get_writers<'mir, 'tcx: 'mir>( rwlock_op: OpTy<'tcx, Tag>, ) -> InterpResult<'tcx, ScalarMaybeUndef> { // Ensure that the following read at an offset to the rwlock pointer is within bounds - assert_ptr_target_min_size(ecx, rwlock_op, 12)?; + assert_ptr_target_min_size(ecx, rwlock_op, 20)?; let rwlock_place = ecx.deref_operand(rwlock_op)?; let writers_place = rwlock_place.offset( Size::from_bytes(8), @@ -189,7 +278,7 @@ fn rwlock_set_writers<'mir, 'tcx: 'mir>( writers: impl Into>, ) -> InterpResult<'tcx, ()> { // Ensure that the following write at an offset to the rwlock pointer is within bounds - assert_ptr_target_min_size(ecx, rwlock_op, 12)?; + assert_ptr_target_min_size(ecx, rwlock_op, 20)?; let rwlock_place = ecx.deref_operand(rwlock_op)?; let writers_place = rwlock_place.offset( Size::from_bytes(8), @@ -200,6 +289,104 @@ fn rwlock_set_writers<'mir, 'tcx: 'mir>( ecx.write_scalar(writers.into(), writers_place.into()) } +fn rwlock_get_writer_blockset<'mir, 'tcx: 'mir>( + ecx: &MiriEvalContext<'mir, 'tcx>, + rwlock_op: OpTy<'tcx, Tag>, +) -> InterpResult<'tcx, ScalarMaybeUndef> { + // Ensure that the following read at an offset to the rwlock pointer is within bounds + assert_ptr_target_min_size(ecx, rwlock_op, 20)?; + let rwlock_place = ecx.deref_operand(rwlock_op)?; + let blockset_place = rwlock_place.offset( + Size::from_bytes(12), + MemPlaceMeta::None, + ecx.machine.layouts.u32, + ecx, + )?; + ecx.read_scalar(blockset_place.into()) +} + +fn rwlock_set_writer_blockset<'mir, 'tcx: 'mir>( + ecx: &mut MiriEvalContext<'mir, 'tcx>, + rwlock_op: OpTy<'tcx, Tag>, + blockset: impl Into>, +) -> InterpResult<'tcx, ()> { + // Ensure that the following write at an offset to the rwlock pointer is within bounds + assert_ptr_target_min_size(ecx, rwlock_op, 20)?; + let rwlock_place = ecx.deref_operand(rwlock_op)?; + let blockset_place = rwlock_place.offset( + Size::from_bytes(12), + MemPlaceMeta::None, + ecx.machine.layouts.u32, + ecx, + )?; + ecx.write_scalar(blockset.into(), blockset_place.into()) +} + +fn rwlock_get_or_create_writer_blockset<'mir, 'tcx: 'mir>( + ecx: &mut MiriEvalContext<'mir, 'tcx>, + rwlock_op: OpTy<'tcx, Tag>, +) -> InterpResult<'tcx, BlockSetId> { + let blockset = rwlock_get_writer_blockset(ecx, rwlock_op)?.to_u32()?; + if blockset == 0 { + // 0 is a default value and also not a valid blockset id. Need to + // allocate a new blockset. + let blockset = ecx.create_blockset()?; + rwlock_set_writer_blockset(ecx, rwlock_op, blockset.to_u32_scalar())?; + Ok(blockset) + } else { + Ok(blockset.into()) + } +} + +fn rwlock_get_reader_blockset<'mir, 'tcx: 'mir>( + ecx: &MiriEvalContext<'mir, 'tcx>, + rwlock_op: OpTy<'tcx, Tag>, +) -> InterpResult<'tcx, ScalarMaybeUndef> { + // Ensure that the following read at an offset to the rwlock pointer is within bounds + assert_ptr_target_min_size(ecx, rwlock_op, 20)?; + let rwlock_place = ecx.deref_operand(rwlock_op)?; + let blockset_place = rwlock_place.offset( + Size::from_bytes(16), + MemPlaceMeta::None, + ecx.machine.layouts.u32, + ecx, + )?; + ecx.read_scalar(blockset_place.into()) +} + +fn rwlock_set_reader_blockset<'mir, 'tcx: 'mir>( + ecx: &mut MiriEvalContext<'mir, 'tcx>, + rwlock_op: OpTy<'tcx, Tag>, + blockset: impl Into>, +) -> InterpResult<'tcx, ()> { + // Ensure that the following write at an offset to the rwlock pointer is within bounds + assert_ptr_target_min_size(ecx, rwlock_op, 20)?; + let rwlock_place = ecx.deref_operand(rwlock_op)?; + let blockset_place = rwlock_place.offset( + Size::from_bytes(16), + MemPlaceMeta::None, + ecx.machine.layouts.u32, + ecx, + )?; + ecx.write_scalar(blockset.into(), blockset_place.into()) +} + +fn rwlock_get_or_create_reader_blockset<'mir, 'tcx: 'mir>( + ecx: &mut MiriEvalContext<'mir, 'tcx>, + rwlock_op: OpTy<'tcx, Tag>, +) -> InterpResult<'tcx, BlockSetId> { + let blockset = rwlock_get_reader_blockset(ecx, rwlock_op)?.to_u32()?; + if blockset == 0 { + // 0 is a default value and also not a valid blockset id. Need to + // allocate a new blockset. + let blockset = ecx.create_blockset()?; + rwlock_set_reader_blockset(ecx, rwlock_op, blockset.to_u32_scalar())?; + Ok(blockset) + } else { + Ok(blockset.into()) + } +} + impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {} pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> { fn pthread_mutexattr_init(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { @@ -265,31 +452,40 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let kind = mutex_get_kind(this, mutex_op)?.not_undef()?; let locked_count = mutex_get_locked_count(this, mutex_op)?.to_u32()?; + let active_thread = this.get_active_thread()?; - if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? { - if locked_count == 0 { - mutex_set_locked_count(this, mutex_op, Scalar::from_u32(1))?; - Ok(0) - } else { - throw_machine_stop!(TerminationInfo::Deadlock); - } - } else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? { - if locked_count == 0 { - mutex_set_locked_count(this, mutex_op, Scalar::from_u32(1))?; + if locked_count == 0 { + // The mutex is unlocked. Let's lock it. + mutex_set_locked_count(this, mutex_op, Scalar::from_u32(1))?; + mutex_set_owner(this, mutex_op, active_thread.to_u32_scalar())?; + Ok(0) + } else { + // The mutex is locked. Let's check by whom. + let owner_thread: ThreadId = + mutex_get_owner(this, mutex_op)?.not_undef()?.to_u32()?.into(); + if owner_thread != active_thread { + // Block the active thread. + let blockset = mutex_get_or_create_blockset(this, mutex_op)?; + this.block_active_thread(blockset)?; Ok(0) } else { - this.eval_libc_i32("EDEADLK") - } - } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? { - match locked_count.checked_add(1) { - Some(new_count) => { - mutex_set_locked_count(this, mutex_op, Scalar::from_u32(new_count))?; - Ok(0) + // Trying to acquire the same mutex again. + if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? { + throw_machine_stop!(TerminationInfo::Deadlock); + } else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? { + this.eval_libc_i32("EDEADLK") + } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? { + match locked_count.checked_add(1) { + Some(new_count) => { + mutex_set_locked_count(this, mutex_op, Scalar::from_u32(new_count))?; + Ok(0) + } + None => this.eval_libc_i32("EAGAIN"), + } + } else { + throw_ub_format!("called pthread_mutex_lock on an unsupported type of mutex"); } - None => this.eval_libc_i32("EAGAIN"), } - } else { - throw_ub_format!("called pthread_mutex_lock on an unsupported type of mutex"); } } @@ -298,26 +494,36 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let kind = mutex_get_kind(this, mutex_op)?.not_undef()?; let locked_count = mutex_get_locked_count(this, mutex_op)?.to_u32()?; + let active_thread = this.get_active_thread()?; - if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? - || kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? - { - if locked_count == 0 { - mutex_set_locked_count(this, mutex_op, Scalar::from_u32(1))?; - Ok(0) - } else { + if locked_count == 0 { + // The mutex is unlocked. Let's lock it. + mutex_set_locked_count(this, mutex_op, Scalar::from_u32(1))?; + mutex_set_owner(this, mutex_op, active_thread.to_u32_scalar())?; + Ok(0) + } else { + let owner_thread: ThreadId = mutex_get_owner(this, mutex_op)?.to_u32()?.into(); + if owner_thread != active_thread { this.eval_libc_i32("EBUSY") - } - } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? { - match locked_count.checked_add(1) { - Some(new_count) => { - mutex_set_locked_count(this, mutex_op, Scalar::from_u32(new_count))?; - Ok(0) + } else { + if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? + || kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? + { + this.eval_libc_i32("EBUSY") + } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? { + match locked_count.checked_add(1) { + Some(new_count) => { + mutex_set_locked_count(this, mutex_op, Scalar::from_u32(new_count))?; + Ok(0) + } + None => this.eval_libc_i32("EAGAIN"), + } + } else { + throw_ub_format!( + "called pthread_mutex_trylock on an unsupported type of mutex" + ); } - None => this.eval_libc_i32("EAGAIN"), } - } else { - throw_ub_format!("called pthread_mutex_trylock on an unsupported type of mutex"); } } @@ -326,34 +532,40 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let kind = mutex_get_kind(this, mutex_op)?.not_undef()?; let locked_count = mutex_get_locked_count(this, mutex_op)?.to_u32()?; - - if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? { - if locked_count != 0 { - mutex_set_locked_count(this, mutex_op, Scalar::from_u32(0))?; - Ok(0) + let owner_thread: ThreadId = mutex_get_owner(this, mutex_op)?.to_u32()?.into(); + + if owner_thread != this.get_active_thread()? { + throw_ub_format!("called pthread_mutex_unlock on a mutex owned by another thread"); + } else if locked_count == 1 { + let blockset = mutex_get_or_create_blockset(this, mutex_op)?; + if let Some(new_owner) = this.unblock_random_thread(blockset)? { + // We have at least one thread waiting on this mutex. Transfer + // ownership to it. + mutex_set_owner(this, mutex_op, new_owner.to_u32_scalar())?; } else { - throw_ub_format!("unlocked a PTHREAD_MUTEX_NORMAL mutex that was not locked"); - } - } else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? { - if locked_count != 0 { + // No thread is waiting on this mutex. mutex_set_locked_count(this, mutex_op, Scalar::from_u32(0))?; - Ok(0) - } else { - this.eval_libc_i32("EPERM") } - } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? { - match locked_count.checked_sub(1) { - Some(new_count) => { - mutex_set_locked_count(this, mutex_op, Scalar::from_u32(new_count))?; - Ok(0) - } - None => { - // locked_count was already zero - this.eval_libc_i32("EPERM") + Ok(0) + } else { + if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? { + throw_ub_format!("unlocked a PTHREAD_MUTEX_NORMAL mutex that was not locked"); + } else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? { + this.eval_libc_i32("EPERM") + } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? { + match locked_count.checked_sub(1) { + Some(new_count) => { + mutex_set_locked_count(this, mutex_op, Scalar::from_u32(new_count))?; + Ok(0) + } + None => { + // locked_count was already zero + this.eval_libc_i32("EPERM") + } } + } else { + throw_ub_format!("called pthread_mutex_unlock on an unsupported type of mutex"); } - } else { - throw_ub_format!("called pthread_mutex_unlock on an unsupported type of mutex"); } } @@ -366,6 +578,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx mutex_set_kind(this, mutex_op, ScalarMaybeUndef::Undef)?; mutex_set_locked_count(this, mutex_op, ScalarMaybeUndef::Undef)?; + mutex_set_blockset(this, mutex_op, ScalarMaybeUndef::Undef)?; Ok(0) } @@ -375,8 +588,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?; let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?; + if writers != 0 { - throw_machine_stop!(TerminationInfo::Deadlock); + // The lock is locked by a writer. + assert_eq!(writers, 1); + let reader_blockset = rwlock_get_or_create_reader_blockset(this, rwlock_op)?; + this.block_active_thread(reader_blockset)?; + Ok(0) } else { match readers.checked_add(1) { Some(new_readers) => { @@ -411,14 +629,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?; let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?; - if readers != 0 { - throw_machine_stop!(TerminationInfo::Deadlock); - } else if writers != 0 { - throw_machine_stop!(TerminationInfo::Deadlock); + let writer_blockset = rwlock_get_or_create_writer_blockset(this, rwlock_op)?; + if readers != 0 || writers != 0 { + this.block_active_thread(writer_blockset)?; } else { rwlock_set_writers(this, rwlock_op, Scalar::from_u32(1))?; - Ok(0) } + Ok(0) } fn pthread_rwlock_trywrlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { @@ -439,11 +656,28 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?; let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?; + let writer_blockset = rwlock_get_or_create_writer_blockset(this, rwlock_op)?; if let Some(new_readers) = readers.checked_sub(1) { + assert_eq!(writers, 0); rwlock_set_readers(this, rwlock_op, Scalar::from_u32(new_readers))?; + if new_readers == 0 { + if let Some(_writer) = this.unblock_random_thread(writer_blockset)? { + rwlock_set_writers(this, rwlock_op, Scalar::from_u32(1))?; + } + } Ok(0) } else if writers != 0 { + let reader_blockset = rwlock_get_or_create_reader_blockset(this, rwlock_op)?; rwlock_set_writers(this, rwlock_op, Scalar::from_u32(0))?; + if let Some(_writer) = this.unblock_random_thread(writer_blockset)? { + rwlock_set_writers(this, rwlock_op, Scalar::from_u32(1))?; + } else { + let mut readers = 0; + while let Some(_reader) = this.unblock_random_thread(reader_blockset)? { + readers += 1; + } + rwlock_set_readers(this, rwlock_op, Scalar::from_u32(readers))? + } Ok(0) } else { throw_ub_format!("unlocked an rwlock that was not locked"); @@ -461,6 +695,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx rwlock_set_readers(this, rwlock_op, ScalarMaybeUndef::Undef)?; rwlock_set_writers(this, rwlock_op, ScalarMaybeUndef::Undef)?; + rwlock_set_reader_blockset(this, rwlock_op, ScalarMaybeUndef::Undef)?; + rwlock_set_writer_blockset(this, rwlock_op, ScalarMaybeUndef::Undef)?; Ok(0) } diff --git a/src/threads.rs b/src/threads.rs index 4ce35d50ab..5991ba4ed1 100644 --- a/src/threads.rs +++ b/src/threads.rs @@ -2,6 +2,7 @@ use std::cell::RefCell; use std::collections::hash_map::Entry; +use std::convert::TryFrom; use log::trace; @@ -31,6 +32,37 @@ impl From for ThreadId { } } +impl From for ThreadId { + fn from(id: u32) -> Self { + Self(id as usize) + } +} + +impl ThreadId { + pub fn to_u32_scalar<'tcx>(&self) -> Scalar { + Scalar::from_u32(u32::try_from(self.0).unwrap()) + } +} + +/// An identifier of a set of blocked threads. +/// +/// Note: 0 is not a valid identifier. +#[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq, Hash)] +pub struct BlockSetId(u32); + +impl From for BlockSetId { + fn from(id: u32) -> Self { + assert_ne!(id, 0, "0 is not a valid blockset id"); + Self(id) + } +} + +impl BlockSetId { + pub fn to_u32_scalar<'tcx>(&self) -> Scalar { + Scalar::from_u32(self.0) + } +} + /// The state of a thread. #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum ThreadState { @@ -38,7 +70,9 @@ pub enum ThreadState { Enabled, /// The thread tried to join the specified thread and is blocked until that /// thread terminates. - Blocked(ThreadId), + BlockedOnJoin(ThreadId), + /// The thread is blocked and belongs to the given blockset.. + Blocked(BlockSetId), /// The thread has terminated its execution (we do not delete terminated /// threads.) Terminated, @@ -93,13 +127,15 @@ pub struct ThreadSet<'mir, 'tcx> { /// /// Note that this vector also contains terminated threads. threads: IndexVec>, + /// A counter used to generate unique identifiers for blocksets. + blockset_counter: u32, } impl<'mir, 'tcx> Default for ThreadSet<'mir, 'tcx> { fn default() -> Self { let mut threads = IndexVec::new(); threads.push(Default::default()); - Self { active_thread: ThreadId::new(0), threads: threads } + Self { active_thread: ThreadId::new(0), threads: threads, blockset_counter: 0 } } } @@ -145,12 +181,12 @@ impl<'mir, 'tcx: 'mir> ThreadSet<'mir, 'tcx> { assert!( self.threads .iter() - .all(|thread| thread.state != ThreadState::Blocked(joined_thread_id)), + .all(|thread| thread.state != ThreadState::BlockedOnJoin(joined_thread_id)), "Bug: multiple threads try to join the same thread." ); if self.threads[joined_thread_id].state != ThreadState::Terminated { // The joined thread is still running, we need to wait for it. - self.threads[self.active_thread].state = ThreadState::Blocked(joined_thread_id); + self.active_thread_mut().state = ThreadState::BlockedOnJoin(joined_thread_id); trace!( "{:?} blocked on {:?} when trying to join", self.active_thread, @@ -162,10 +198,29 @@ impl<'mir, 'tcx: 'mir> ThreadSet<'mir, 'tcx> { fn set_thread_name(&mut self, new_thread_name: Vec) { self.active_thread_mut().thread_name = Some(new_thread_name); } - /// Get ids of all threads ever allocated. + /// Get ids and states of all threads ever allocated. fn get_all_thread_ids_with_states(&self) -> Vec<(ThreadId, ThreadState)> { self.threads.iter_enumerated().map(|(id, thread)| (id, thread.state)).collect() } + fn create_blockset(&mut self) -> BlockSetId { + self.blockset_counter = self.blockset_counter.checked_add(1).unwrap(); + self.blockset_counter.into() + } + fn block_active_thread(&mut self, set: BlockSetId) { + let state = &mut self.active_thread_mut().state; + assert_eq!(*state, ThreadState::Enabled); + *state = ThreadState::Blocked(set); + } + fn unblock_random_thread(&mut self, set: BlockSetId) -> Option { + for (id, thread) in self.threads.iter_enumerated_mut() { + if thread.state == ThreadState::Blocked(set) { + trace!("unblocking {:?} in blockset {:?}", id, set); + thread.state = ThreadState::Enabled; + return Some(id); + } + } + None + } /// Decide which thread to run next. /// /// Returns `false` if all threads terminated. @@ -173,7 +228,7 @@ impl<'mir, 'tcx: 'mir> ThreadSet<'mir, 'tcx> { if self.threads[self.active_thread].check_terminated() { // Check if we need to unblock any threads. for (i, thread) in self.threads.iter_enumerated_mut() { - if thread.state == ThreadState::Blocked(self.active_thread) { + if thread.state == ThreadState::BlockedOnJoin(self.active_thread) { trace!("unblocking {:?} because {:?} terminated", i, self.active_thread); thread.state = ThreadState::Enabled; } @@ -191,7 +246,7 @@ impl<'mir, 'tcx: 'mir> ThreadSet<'mir, 'tcx> { if self.threads.iter().all(|thread| thread.state == ThreadState::Terminated) { Ok(false) } else { - throw_machine_stop!(TerminationInfo::Abort(Some(format!("execution deadlocked")))) + throw_machine_stop!(TerminationInfo::Deadlock); } } } @@ -298,6 +353,18 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let this = self.eval_context_mut(); this.machine.threads.get_all_thread_ids_with_states() } + fn create_blockset(&mut self) -> InterpResult<'tcx, BlockSetId> { + let this = self.eval_context_mut(); + Ok(this.machine.threads.create_blockset()) + } + fn block_active_thread(&mut self, set: BlockSetId) -> InterpResult<'tcx> { + let this = self.eval_context_mut(); + Ok(this.machine.threads.block_active_thread(set)) + } + fn unblock_random_thread(&mut self, set: BlockSetId) -> InterpResult<'tcx, Option> { + let this = self.eval_context_mut(); + Ok(this.machine.threads.unblock_random_thread(set)) + } /// Decide which thread to run next. /// /// Returns `false` if all threads terminated. diff --git a/tests/run-pass/concurrency/locks.rs b/tests/run-pass/concurrency/locks.rs new file mode 100644 index 0000000000..575aeadd7f --- /dev/null +++ b/tests/run-pass/concurrency/locks.rs @@ -0,0 +1,29 @@ +//! This test just calls the relevant APIs to check if Miri crashes. + +use std::sync::{Arc, Mutex}; +use std::thread; + +fn main() { + + let data = Arc::new(Mutex::new(0)); + let mut threads = Vec::new(); + + for _ in 0..3 { + let data = Arc::clone(&data); + let thread = thread::spawn(move || { + let mut data = data.lock().unwrap(); + *data += 1; + }); + threads.push(thread); + } + + for thread in threads { + thread.join().unwrap(); + } + + assert!(data.try_lock().is_ok()); + + let data = Arc::try_unwrap(data).unwrap().into_inner().unwrap(); + assert_eq!(data, 3); + +} \ No newline at end of file diff --git a/tests/run-pass/concurrency/locks.stdout b/tests/run-pass/concurrency/locks.stdout new file mode 100644 index 0000000000..2486b320db --- /dev/null +++ b/tests/run-pass/concurrency/locks.stdout @@ -0,0 +1,3 @@ +WARNING: The thread support is experimental. For example, Miri does not detect data races yet. +WARNING: The thread support is experimental. For example, Miri does not detect data races yet. +WARNING: The thread support is experimental. For example, Miri does not detect data races yet. From d907fb50215c2f79e4fd312447a67439620bb2ab Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Thu, 9 Apr 2020 12:06:33 -0700 Subject: [PATCH 13/77] Rename ThreadSet to ThreadManager. --- src/machine.rs | 4 ++-- src/threads.rs | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/machine.rs b/src/machine.rs index a7d62897b8..b56755083f 100644 --- a/src/machine.rs +++ b/src/machine.rs @@ -27,7 +27,7 @@ use rustc_target::abi::{LayoutOf, Size}; use crate::*; -pub use crate::threads::{ThreadId, ThreadSet, ThreadState, ThreadLocalStorage}; +pub use crate::threads::{ThreadId, ThreadManager, ThreadState, ThreadLocalStorage}; // Some global facts about the emulated machine. pub const PAGE_SIZE: u64 = 4 * 1024; // FIXME: adjust to target architecture @@ -257,7 +257,7 @@ pub struct Evaluator<'mir, 'tcx> { pub(crate) time_anchor: Instant, /// The set of threads. - pub(crate) threads: ThreadSet<'mir, 'tcx>, + pub(crate) threads: ThreadManager<'mir, 'tcx>, /// Precomputed `TyLayout`s for primitive data types that are commonly used inside Miri. pub(crate) layouts: PrimitiveLayouts<'tcx>, diff --git a/src/threads.rs b/src/threads.rs index 5991ba4ed1..2352f26ebe 100644 --- a/src/threads.rs +++ b/src/threads.rs @@ -120,7 +120,7 @@ impl<'mir, 'tcx> Default for Thread<'mir, 'tcx> { /// A set of threads. #[derive(Debug)] -pub struct ThreadSet<'mir, 'tcx> { +pub struct ThreadManager<'mir, 'tcx> { /// Identifier of the currently active thread. active_thread: ThreadId, /// Threads used in the program. @@ -131,7 +131,7 @@ pub struct ThreadSet<'mir, 'tcx> { blockset_counter: u32, } -impl<'mir, 'tcx> Default for ThreadSet<'mir, 'tcx> { +impl<'mir, 'tcx> Default for ThreadManager<'mir, 'tcx> { fn default() -> Self { let mut threads = IndexVec::new(); threads.push(Default::default()); @@ -139,7 +139,7 @@ impl<'mir, 'tcx> Default for ThreadSet<'mir, 'tcx> { } } -impl<'mir, 'tcx: 'mir> ThreadSet<'mir, 'tcx> { +impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { /// Borrow the stack of the active thread. fn active_thread_stack(&self) -> &[Frame<'mir, 'tcx, Tag, FrameData<'tcx>>] { &self.threads[self.active_thread].stack From 0c4303cd7f903d2c05e70c05800dddefd7ccb7c6 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Thu, 9 Apr 2020 13:37:38 -0700 Subject: [PATCH 14/77] Small refactoring in pthread sync: extract common functionallity to separate functions. --- src/shims/sync.rs | 235 ++++++++++++---------------------------------- 1 file changed, 58 insertions(+), 177 deletions(-) diff --git a/src/shims/sync.rs b/src/shims/sync.rs index eb54358114..76f97aab23 100644 --- a/src/shims/sync.rs +++ b/src/shims/sync.rs @@ -1,4 +1,4 @@ -use rustc_middle::ty::{TyKind, TypeAndMut}; +use rustc_middle::ty::{layout::TyAndLayout, TyKind, TypeAndMut}; use rustc_target::abi::{LayoutOf, Size}; use crate::stacked_borrows::Tag; @@ -19,22 +19,48 @@ fn assert_ptr_target_min_size<'mir, 'tcx: 'mir>( Ok(()) } +fn get_at_offset<'mir, 'tcx: 'mir>( + ecx: &MiriEvalContext<'mir, 'tcx>, + op: OpTy<'tcx, Tag>, + offset: u64, + layout: TyAndLayout<'tcx>, + min_size: u64, +) -> InterpResult<'tcx, ScalarMaybeUndef> { + // Ensure that the following read at an offset to the attr pointer is within bounds + assert_ptr_target_min_size(ecx, op, min_size)?; + let op_place = ecx.deref_operand(op)?; + let value_place = op_place.offset(Size::from_bytes(offset), MemPlaceMeta::None, layout, ecx)?; + ecx.read_scalar(value_place.into()) +} + +fn set_at_offset<'mir, 'tcx: 'mir>( + ecx: &mut MiriEvalContext<'mir, 'tcx>, + op: OpTy<'tcx, Tag>, + offset: u64, + value: impl Into>, + layout: TyAndLayout<'tcx>, + min_size: u64, +) -> InterpResult<'tcx, ()> { + // Ensure that the following write at an offset to the attr pointer is within bounds + assert_ptr_target_min_size(ecx, op, min_size)?; + let op_place = ecx.deref_operand(op)?; + let value_place = op_place.offset(Size::from_bytes(offset), MemPlaceMeta::None, layout, ecx)?; + ecx.write_scalar(value.into(), value_place.into()) +} + // pthread_mutexattr_t is either 4 or 8 bytes, depending on the platform. // Our chosen memory layout for emulation (does not have to match the platform layout!): // store an i32 in the first four bytes equal to the corresponding libc mutex kind constant // (e.g. PTHREAD_MUTEX_NORMAL). +const PTHREAD_MUTEXATTR_T_MIN_SIZE: u64 = 4; + fn mutexattr_get_kind<'mir, 'tcx: 'mir>( ecx: &MiriEvalContext<'mir, 'tcx>, attr_op: OpTy<'tcx, Tag>, ) -> InterpResult<'tcx, ScalarMaybeUndef> { - // Ensure that the following read at an offset to the attr pointer is within bounds - assert_ptr_target_min_size(ecx, attr_op, 4)?; - let attr_place = ecx.deref_operand(attr_op)?; - let kind_place = - attr_place.offset(Size::ZERO, MemPlaceMeta::None, ecx.machine.layouts.i32, ecx)?; - ecx.read_scalar(kind_place.into()) + get_at_offset(ecx, attr_op, 0, ecx.machine.layouts.i32, PTHREAD_MUTEXATTR_T_MIN_SIZE) } fn mutexattr_set_kind<'mir, 'tcx: 'mir>( @@ -42,12 +68,7 @@ fn mutexattr_set_kind<'mir, 'tcx: 'mir>( attr_op: OpTy<'tcx, Tag>, kind: impl Into>, ) -> InterpResult<'tcx, ()> { - // Ensure that the following write at an offset to the attr pointer is within bounds - assert_ptr_target_min_size(ecx, attr_op, 4)?; - let attr_place = ecx.deref_operand(attr_op)?; - let kind_place = - attr_place.offset(Size::ZERO, MemPlaceMeta::None, ecx.machine.layouts.i32, ecx)?; - ecx.write_scalar(kind.into(), kind_place.into()) + set_at_offset(ecx, attr_op, 0, kind, ecx.machine.layouts.i32, PTHREAD_MUTEXATTR_T_MIN_SIZE) } // pthread_mutex_t is between 24 and 48 bytes, depending on the platform. @@ -61,20 +82,13 @@ fn mutexattr_set_kind<'mir, 'tcx: 'mir>( // (the kind has to be at its offset for compatibility with static initializer macros) // bytes 20-23: when count > 0, id of the blockset in which the blocked threads are waiting. +const PTHREAD_MUTEX_T_MIN_SIZE: u64 = 24; + fn mutex_get_locked_count<'mir, 'tcx: 'mir>( ecx: &MiriEvalContext<'mir, 'tcx>, mutex_op: OpTy<'tcx, Tag>, ) -> InterpResult<'tcx, ScalarMaybeUndef> { - // Ensure that the following read at an offset to the mutex pointer is within bounds - assert_ptr_target_min_size(ecx, mutex_op, 24)?; - let mutex_place = ecx.deref_operand(mutex_op)?; - let locked_count_place = mutex_place.offset( - Size::from_bytes(4), - MemPlaceMeta::None, - ecx.machine.layouts.u32, - ecx, - )?; - ecx.read_scalar(locked_count_place.into()) + get_at_offset(ecx, mutex_op, 4, ecx.machine.layouts.u32, PTHREAD_MUTEX_T_MIN_SIZE) } fn mutex_set_locked_count<'mir, 'tcx: 'mir>( @@ -82,66 +96,30 @@ fn mutex_set_locked_count<'mir, 'tcx: 'mir>( mutex_op: OpTy<'tcx, Tag>, locked_count: impl Into>, ) -> InterpResult<'tcx, ()> { - // Ensure that the following write at an offset to the mutex pointer is within bounds - assert_ptr_target_min_size(ecx, mutex_op, 24)?; - let mutex_place = ecx.deref_operand(mutex_op)?; - let locked_count_place = mutex_place.offset( - Size::from_bytes(4), - MemPlaceMeta::None, - ecx.machine.layouts.u32, - ecx, - )?; - ecx.write_scalar(locked_count.into(), locked_count_place.into()) + set_at_offset(ecx, mutex_op, 4, locked_count, ecx.machine.layouts.u32, PTHREAD_MUTEX_T_MIN_SIZE) } fn mutex_get_owner<'mir, 'tcx: 'mir>( ecx: &MiriEvalContext<'mir, 'tcx>, mutex_op: OpTy<'tcx, Tag>, ) -> InterpResult<'tcx, ScalarMaybeUndef> { - // Ensure that the following read at an offset to the mutex pointer is within bounds - assert_ptr_target_min_size(ecx, mutex_op, 24)?; - let mutex_place = ecx.deref_operand(mutex_op)?; - let mutex_id_place = mutex_place.offset( - Size::from_bytes(8), - MemPlaceMeta::None, - ecx.machine.layouts.u32, - ecx, - )?; - ecx.read_scalar(mutex_id_place.into()) + get_at_offset(ecx, mutex_op, 8, ecx.machine.layouts.u32, PTHREAD_MUTEX_T_MIN_SIZE) } fn mutex_set_owner<'mir, 'tcx: 'mir>( ecx: &mut MiriEvalContext<'mir, 'tcx>, mutex_op: OpTy<'tcx, Tag>, - mutex_id: impl Into>, + owner: impl Into>, ) -> InterpResult<'tcx, ()> { - // Ensure that the following write at an offset to the mutex pointer is within bounds - assert_ptr_target_min_size(ecx, mutex_op, 24)?; - let mutex_place = ecx.deref_operand(mutex_op)?; - let mutex_id_place = mutex_place.offset( - Size::from_bytes(8), - MemPlaceMeta::None, - ecx.machine.layouts.u32, - ecx, - )?; - ecx.write_scalar(mutex_id.into(), mutex_id_place.into()) + set_at_offset(ecx, mutex_op, 8, owner, ecx.machine.layouts.u32, PTHREAD_MUTEX_T_MIN_SIZE) } fn mutex_get_kind<'mir, 'tcx: 'mir>( ecx: &mut MiriEvalContext<'mir, 'tcx>, mutex_op: OpTy<'tcx, Tag>, ) -> InterpResult<'tcx, ScalarMaybeUndef> { - // Ensure that the following read at an offset to the mutex pointer is within bounds - assert_ptr_target_min_size(ecx, mutex_op, 24)?; - let mutex_place = ecx.deref_operand(mutex_op)?; - let kind_offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 }; - let kind_place = mutex_place.offset( - Size::from_bytes(kind_offset), - MemPlaceMeta::None, - ecx.machine.layouts.i32, - ecx, - )?; - ecx.read_scalar(kind_place.into()) + let offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 }; + get_at_offset(ecx, mutex_op, offset, ecx.machine.layouts.i32, PTHREAD_MUTEX_T_MIN_SIZE) } fn mutex_set_kind<'mir, 'tcx: 'mir>( @@ -149,50 +127,23 @@ fn mutex_set_kind<'mir, 'tcx: 'mir>( mutex_op: OpTy<'tcx, Tag>, kind: impl Into>, ) -> InterpResult<'tcx, ()> { - // Ensure that the following write at an offset to the mutex pointer is within bounds - assert_ptr_target_min_size(ecx, mutex_op, 24)?; - let mutex_place = ecx.deref_operand(mutex_op)?; - let kind_offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 }; - let kind_place = mutex_place.offset( - Size::from_bytes(kind_offset), - MemPlaceMeta::None, - ecx.machine.layouts.i32, - ecx, - )?; - ecx.write_scalar(kind.into(), kind_place.into()) + let offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 }; + set_at_offset(ecx, mutex_op, offset, kind, ecx.machine.layouts.i32, PTHREAD_MUTEX_T_MIN_SIZE) } fn mutex_get_blockset<'mir, 'tcx: 'mir>( ecx: &MiriEvalContext<'mir, 'tcx>, mutex_op: OpTy<'tcx, Tag>, ) -> InterpResult<'tcx, ScalarMaybeUndef> { - // Ensure that the following read at an offset to the mutex pointer is within bounds - assert_ptr_target_min_size(ecx, mutex_op, 24)?; - let mutex_place = ecx.deref_operand(mutex_op)?; - let mutex_id_place = mutex_place.offset( - Size::from_bytes(20), - MemPlaceMeta::None, - ecx.machine.layouts.u32, - ecx, - )?; - ecx.read_scalar(mutex_id_place.into()) + get_at_offset(ecx, mutex_op, 20, ecx.machine.layouts.u32, PTHREAD_MUTEX_T_MIN_SIZE) } fn mutex_set_blockset<'mir, 'tcx: 'mir>( ecx: &mut MiriEvalContext<'mir, 'tcx>, mutex_op: OpTy<'tcx, Tag>, - mutex_id: impl Into>, + blockset: impl Into>, ) -> InterpResult<'tcx, ()> { - // Ensure that the following write at an offset to the mutex pointer is within bounds - assert_ptr_target_min_size(ecx, mutex_op, 24)?; - let mutex_place = ecx.deref_operand(mutex_op)?; - let mutex_id_place = mutex_place.offset( - Size::from_bytes(20), - MemPlaceMeta::None, - ecx.machine.layouts.u32, - ecx, - )?; - ecx.write_scalar(mutex_id.into(), mutex_id_place.into()) + set_at_offset(ecx, mutex_op, 20, blockset, ecx.machine.layouts.u32, PTHREAD_MUTEX_T_MIN_SIZE) } fn mutex_get_or_create_blockset<'mir, 'tcx: 'mir>( @@ -223,20 +174,13 @@ fn mutex_get_or_create_blockset<'mir, 'tcx: 'mir>( // bytes 16-20: when writer count > 0, id of the blockset in which the blocked // readers are waiting. +const PTHREAD_RWLOCK_T_MIN_SIZE: u64 = 20; + fn rwlock_get_readers<'mir, 'tcx: 'mir>( ecx: &MiriEvalContext<'mir, 'tcx>, rwlock_op: OpTy<'tcx, Tag>, ) -> InterpResult<'tcx, ScalarMaybeUndef> { - // Ensure that the following read at an offset to the rwlock pointer is within bounds - assert_ptr_target_min_size(ecx, rwlock_op, 20)?; - let rwlock_place = ecx.deref_operand(rwlock_op)?; - let readers_place = rwlock_place.offset( - Size::from_bytes(4), - MemPlaceMeta::None, - ecx.machine.layouts.u32, - ecx, - )?; - ecx.read_scalar(readers_place.into()) + get_at_offset(ecx, rwlock_op, 4, ecx.machine.layouts.u32, PTHREAD_RWLOCK_T_MIN_SIZE) } fn rwlock_set_readers<'mir, 'tcx: 'mir>( @@ -244,32 +188,14 @@ fn rwlock_set_readers<'mir, 'tcx: 'mir>( rwlock_op: OpTy<'tcx, Tag>, readers: impl Into>, ) -> InterpResult<'tcx, ()> { - // Ensure that the following write at an offset to the rwlock pointer is within bounds - assert_ptr_target_min_size(ecx, rwlock_op, 20)?; - let rwlock_place = ecx.deref_operand(rwlock_op)?; - let readers_place = rwlock_place.offset( - Size::from_bytes(4), - MemPlaceMeta::None, - ecx.machine.layouts.u32, - ecx, - )?; - ecx.write_scalar(readers.into(), readers_place.into()) + set_at_offset(ecx, rwlock_op, 4, readers, ecx.machine.layouts.u32, PTHREAD_RWLOCK_T_MIN_SIZE) } fn rwlock_get_writers<'mir, 'tcx: 'mir>( ecx: &MiriEvalContext<'mir, 'tcx>, rwlock_op: OpTy<'tcx, Tag>, ) -> InterpResult<'tcx, ScalarMaybeUndef> { - // Ensure that the following read at an offset to the rwlock pointer is within bounds - assert_ptr_target_min_size(ecx, rwlock_op, 20)?; - let rwlock_place = ecx.deref_operand(rwlock_op)?; - let writers_place = rwlock_place.offset( - Size::from_bytes(8), - MemPlaceMeta::None, - ecx.machine.layouts.u32, - ecx, - )?; - ecx.read_scalar(writers_place.into()) + get_at_offset(ecx, rwlock_op, 8, ecx.machine.layouts.u32, PTHREAD_RWLOCK_T_MIN_SIZE) } fn rwlock_set_writers<'mir, 'tcx: 'mir>( @@ -277,32 +203,14 @@ fn rwlock_set_writers<'mir, 'tcx: 'mir>( rwlock_op: OpTy<'tcx, Tag>, writers: impl Into>, ) -> InterpResult<'tcx, ()> { - // Ensure that the following write at an offset to the rwlock pointer is within bounds - assert_ptr_target_min_size(ecx, rwlock_op, 20)?; - let rwlock_place = ecx.deref_operand(rwlock_op)?; - let writers_place = rwlock_place.offset( - Size::from_bytes(8), - MemPlaceMeta::None, - ecx.machine.layouts.u32, - ecx, - )?; - ecx.write_scalar(writers.into(), writers_place.into()) + set_at_offset(ecx, rwlock_op, 8, writers, ecx.machine.layouts.u32, PTHREAD_RWLOCK_T_MIN_SIZE) } fn rwlock_get_writer_blockset<'mir, 'tcx: 'mir>( ecx: &MiriEvalContext<'mir, 'tcx>, rwlock_op: OpTy<'tcx, Tag>, ) -> InterpResult<'tcx, ScalarMaybeUndef> { - // Ensure that the following read at an offset to the rwlock pointer is within bounds - assert_ptr_target_min_size(ecx, rwlock_op, 20)?; - let rwlock_place = ecx.deref_operand(rwlock_op)?; - let blockset_place = rwlock_place.offset( - Size::from_bytes(12), - MemPlaceMeta::None, - ecx.machine.layouts.u32, - ecx, - )?; - ecx.read_scalar(blockset_place.into()) + get_at_offset(ecx, rwlock_op, 12, ecx.machine.layouts.u32, PTHREAD_RWLOCK_T_MIN_SIZE) } fn rwlock_set_writer_blockset<'mir, 'tcx: 'mir>( @@ -310,16 +218,7 @@ fn rwlock_set_writer_blockset<'mir, 'tcx: 'mir>( rwlock_op: OpTy<'tcx, Tag>, blockset: impl Into>, ) -> InterpResult<'tcx, ()> { - // Ensure that the following write at an offset to the rwlock pointer is within bounds - assert_ptr_target_min_size(ecx, rwlock_op, 20)?; - let rwlock_place = ecx.deref_operand(rwlock_op)?; - let blockset_place = rwlock_place.offset( - Size::from_bytes(12), - MemPlaceMeta::None, - ecx.machine.layouts.u32, - ecx, - )?; - ecx.write_scalar(blockset.into(), blockset_place.into()) + set_at_offset(ecx, rwlock_op, 12, blockset, ecx.machine.layouts.u32, PTHREAD_RWLOCK_T_MIN_SIZE) } fn rwlock_get_or_create_writer_blockset<'mir, 'tcx: 'mir>( @@ -342,16 +241,7 @@ fn rwlock_get_reader_blockset<'mir, 'tcx: 'mir>( ecx: &MiriEvalContext<'mir, 'tcx>, rwlock_op: OpTy<'tcx, Tag>, ) -> InterpResult<'tcx, ScalarMaybeUndef> { - // Ensure that the following read at an offset to the rwlock pointer is within bounds - assert_ptr_target_min_size(ecx, rwlock_op, 20)?; - let rwlock_place = ecx.deref_operand(rwlock_op)?; - let blockset_place = rwlock_place.offset( - Size::from_bytes(16), - MemPlaceMeta::None, - ecx.machine.layouts.u32, - ecx, - )?; - ecx.read_scalar(blockset_place.into()) + get_at_offset(ecx, rwlock_op, 16, ecx.machine.layouts.u32, PTHREAD_RWLOCK_T_MIN_SIZE) } fn rwlock_set_reader_blockset<'mir, 'tcx: 'mir>( @@ -359,16 +249,7 @@ fn rwlock_set_reader_blockset<'mir, 'tcx: 'mir>( rwlock_op: OpTy<'tcx, Tag>, blockset: impl Into>, ) -> InterpResult<'tcx, ()> { - // Ensure that the following write at an offset to the rwlock pointer is within bounds - assert_ptr_target_min_size(ecx, rwlock_op, 20)?; - let rwlock_place = ecx.deref_operand(rwlock_op)?; - let blockset_place = rwlock_place.offset( - Size::from_bytes(16), - MemPlaceMeta::None, - ecx.machine.layouts.u32, - ecx, - )?; - ecx.write_scalar(blockset.into(), blockset_place.into()) + set_at_offset(ecx, rwlock_op, 16, blockset, ecx.machine.layouts.u32, PTHREAD_RWLOCK_T_MIN_SIZE) } fn rwlock_get_or_create_reader_blockset<'mir, 'tcx: 'mir>( From 963e9698f9ab959de06f42047ef1979bde0aac84 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Tue, 14 Apr 2020 17:21:52 -0700 Subject: [PATCH 15/77] Generate fresh allocation ids for thread locals in eval_maybe_thread_local_static_const. --- src/diagnostics.rs | 4 +- src/machine.rs | 112 ++++++++++++++++++++------------------------- src/shims/tls.rs | 2 +- 3 files changed, 53 insertions(+), 65 deletions(-) diff --git a/src/diagnostics.rs b/src/diagnostics.rs index 114f1d9be3..b7c96dd7e9 100644 --- a/src/diagnostics.rs +++ b/src/diagnostics.rs @@ -139,7 +139,7 @@ fn report_msg<'tcx, 'mir>( mut helps: Vec, error: bool, ) { - let span = if let Some(frame) = ecx.machine.stack.last() { + let span = if let Some(frame) = ecx.active_thread_stack().last() { frame.current_source_info().unwrap().span } else { DUMMY_SP @@ -171,7 +171,7 @@ fn report_msg<'tcx, 'mir>( err.emit(); - for (i, frame) in ecx.machine.stack.iter().enumerate() { + for (i, frame) in ecx.active_thread_stack().iter().enumerate() { trace!("-------------------"); trace!("Frame {}", i); trace!(" return: {:?}", frame.return_place.map(|p| *p)); diff --git a/src/machine.rs b/src/machine.rs index b56755083f..a9582f595f 100644 --- a/src/machine.rs +++ b/src/machine.rs @@ -22,7 +22,7 @@ use rustc_middle::{ TyCtxt, }, }; -use rustc_span::{def_id::DefId, symbol::{sym, Symbol}}; +use rustc_span::symbol::{sym, Symbol}; use rustc_target::abi::{LayoutOf, Size}; use crate::*; @@ -331,19 +331,6 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> { memory_extra.check_alignment } - #[inline(always)] - fn stack<'a>( - ecx: &'a InterpCx<'mir, 'tcx, Self> - ) -> &'a [Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>] { - ecx.active_thread_stack() - } - - fn stack_mut<'a>( - ecx: &'a mut InterpCx<'mir, 'tcx, Self> - ) -> &'a mut Vec> { - ecx.active_thread_stack_mut() - } - #[inline(always)] fn enforce_validity(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool { ecx.machine.validate @@ -434,63 +421,52 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> { Ok(()) } - fn access_local( + fn eval_maybe_thread_local_static_const( ecx: &InterpCx<'mir, 'tcx, Self>, - frame: &Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>, - local: mir::Local, - ) -> InterpResult<'tcx, Operand> { - match frame.body.local_decls[local].local_info { - mir::LocalInfo::StaticRef { def_id, is_thread_local: true } => { - let static_alloc_id = ecx.tcx.alloc_map.lock().create_static_alloc(def_id); - let alloc_id = ecx.memory.extra.tls.get_or_register_allocation(*ecx.memory.tcx, static_alloc_id); - let tag = Self::tag_global_base_pointer(&ecx.memory.extra, alloc_id); - let pointer: Pointer = alloc_id.into(); - let pointer = pointer.with_tag(tag); - let scalar: Scalar<_> = pointer.into(); - let scalar: ScalarMaybeUndef<_> = scalar.into(); - let immediate: Immediate<_> = scalar.into(); - Ok( - Operand::Immediate(immediate) - ) - }, - _ => frame.locals[local].access(), + mut val: mir::interpret::ConstValue<'tcx> + )-> InterpResult<'tcx, mir::interpret::ConstValue<'tcx>> { + match &mut val { + mir::interpret::ConstValue::Scalar(Scalar::Ptr(ptr)) => { + let alloc_id = ptr.alloc_id; + let alloc = ecx.tcx.alloc_map.lock().get(alloc_id); + match alloc { + Some(GlobalAlloc::Static(def_id)) + if ecx.tcx.codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) => { + // We have a thread-local static. + let new_alloc_id = ecx.memory.extra.tls.get_or_register_allocation( + *ecx.memory.tcx, alloc_id); + ptr.alloc_id = new_alloc_id; + }, + _ => {}, + } + } + _ => {}, } + Ok(val) } fn canonical_alloc_id(mem: &Memory<'mir, 'tcx, Self>, id: AllocId) -> AllocId { let tcx = mem.tcx; - let alloc = tcx.alloc_map.lock().get(id); - fn is_thread_local<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> bool { - tcx.codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) - } - match alloc { - Some(GlobalAlloc::Static(def_id)) if tcx.is_foreign_item(def_id) => { - if is_thread_local(*tcx, def_id) { - unimplemented!("Foreign thread local statics are not supported yet."); - } - // Figure out if this is an extern static, and if yes, which one. - let attrs = tcx.get_attrs(def_id); - let link_name = match attr::first_attr_value_str_by_name(&attrs, sym::link_name) { - Some(name) => name, - None => tcx.item_name(def_id), - }; - // Check if we know this one. - if let Some(canonical_id) = mem.extra.extern_statics.get(&link_name) { - trace!("canonical_alloc_id: {:?} ({}) -> {:?}", id, link_name, canonical_id); - *canonical_id - } else { - // Return original id; `Memory::get_static_alloc` will throw an error. - id - } - }, - Some(GlobalAlloc::Static(def_id)) if is_thread_local(*tcx, def_id) => { - // We have a thread local, so we need to get a unique allocation id for it. - mem.extra.tls.get_or_register_allocation(*tcx, id) - }, + // Figure out if this is an extern static, and if yes, which one. + let def_id = match tcx.alloc_map.lock().get(id) { + Some(GlobalAlloc::Static(def_id)) if tcx.is_foreign_item(def_id) => def_id, _ => { // No need to canonicalize anything. - id + return id; } + }; + let attrs = tcx.get_attrs(def_id); + let link_name = match attr::first_attr_value_str_by_name(&attrs, sym::link_name) { + Some(name) => name, + None => tcx.item_name(def_id), + }; + // Check if we know this one. + if let Some(canonical_id) = mem.extra.extern_statics.get(&link_name) { + trace!("canonical_alloc_id: {:?} ({}) -> {:?}", id, link_name, canonical_id); + *canonical_id + } else { + // Return original id; `Memory::get_static_alloc` will throw an error. + id } } @@ -587,6 +563,18 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> { Ok(frame.with_extra(extra)) } + fn stack<'a>( + ecx: &'a InterpCx<'mir, 'tcx, Self> + ) -> &'a [Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>] { + ecx.active_thread_stack() + } + + fn stack_mut<'a>( + ecx: &'a mut InterpCx<'mir, 'tcx, Self> + ) -> &'a mut Vec> { + ecx.active_thread_stack_mut() + } + #[inline(always)] fn stack<'a>( ecx: &'a InterpCx<'mir, 'tcx, Self>, diff --git a/src/shims/tls.rs b/src/shims/tls.rs index ec8c31fe2c..5cef3871c0 100644 --- a/src/shims/tls.rs +++ b/src/shims/tls.rs @@ -161,7 +161,7 @@ impl<'tcx> TlsData<'tcx> { Entry::Occupied(entry) => { let (thread_id, data_scalar) = entry.remove_entry(); if let Some(dtor) = dtor { - let ret = Some((dtor, thread_id, data_scalar, key)); + let ret = Some((*dtor, thread_id, data_scalar, key)); return ret; } } From 51b16e56cd297afd308aea5b258a677901c7b45e Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Wed, 15 Apr 2020 14:34:34 -0700 Subject: [PATCH 16/77] Generate thread local allocations in eval_maybe_thread_local_static_const. --- src/machine.rs | 64 ++++++++++++++++++++--------- src/threads.rs | 108 ++++++++++++++++--------------------------------- 2 files changed, 78 insertions(+), 94 deletions(-) diff --git a/src/machine.rs b/src/machine.rs index a9582f595f..a5183d3e81 100644 --- a/src/machine.rs +++ b/src/machine.rs @@ -18,6 +18,7 @@ use rustc_middle::{ mir, ty::{ self, + Instance, layout::{LayoutCx, LayoutError, TyAndLayout}, TyCtxt, }, @@ -27,7 +28,7 @@ use rustc_target::abi::{LayoutOf, Size}; use crate::*; -pub use crate::threads::{ThreadId, ThreadManager, ThreadState, ThreadLocalStorage}; +pub use crate::threads::{ThreadId, ThreadManager, ThreadState}; // Some global facts about the emulated machine. pub const PAGE_SIZE: u64 = 4 * 1024; // FIXME: adjust to target architecture @@ -110,7 +111,6 @@ pub struct AllocExtra { pub struct MemoryExtra { pub stacked_borrows: Option, pub intptrcast: intptrcast::MemoryExtra, - pub tls: ThreadLocalStorage, /// Mapping extern static names to their canonical allocation. extern_statics: FxHashMap, @@ -147,7 +147,6 @@ impl MemoryExtra { rng: RefCell::new(rng), tracked_alloc_id, check_alignment, - tls: Default::default(), } } @@ -423,24 +422,58 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> { fn eval_maybe_thread_local_static_const( ecx: &InterpCx<'mir, 'tcx, Self>, - mut val: mir::interpret::ConstValue<'tcx> - )-> InterpResult<'tcx, mir::interpret::ConstValue<'tcx>> { + mut val: mir::interpret::ConstValue<'tcx>, + ) -> InterpResult<'tcx, mir::interpret::ConstValue<'tcx>> { match &mut val { mir::interpret::ConstValue::Scalar(Scalar::Ptr(ptr)) => { let alloc_id = ptr.alloc_id; let alloc = ecx.tcx.alloc_map.lock().get(alloc_id); match alloc { Some(GlobalAlloc::Static(def_id)) - if ecx.tcx.codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) => { + if ecx + .tcx + .codegen_fn_attrs(def_id) + .flags + .contains(CodegenFnAttrFlags::THREAD_LOCAL) => + { // We have a thread-local static. - let new_alloc_id = ecx.memory.extra.tls.get_or_register_allocation( - *ecx.memory.tcx, alloc_id); + let new_alloc_id = if let Some(new_alloc_id) = + ecx.get_thread_local_alloc_id(alloc_id) + { + new_alloc_id + } else { + if ecx.tcx.is_foreign_item(def_id) { + throw_unsup_format!( + "Foreign thread-local statics are not supported." + ) + } + let instance = Instance::mono(ecx.tcx.tcx, def_id); + let gid = GlobalId { instance, promoted: None }; + let raw_const = ecx + .tcx + .const_eval_raw(ty::ParamEnv::reveal_all().and(gid)) + .map_err(|err| { + // no need to report anything, the const_eval call takes care of that + // for statics + assert!(ecx.tcx.is_static(def_id)); + match err { + ErrorHandled::Reported => err_inval!(ReferencedConstant), + ErrorHandled::TooGeneric => err_inval!(TooGeneric), + } + })?; + let id = raw_const.alloc_id; + let mut alloc_map = ecx.tcx.alloc_map.lock(); + let allocation = alloc_map.unwrap_memory(id); + let new_alloc_id = alloc_map.create_memory_alloc(allocation); + ecx.set_thread_local_alloc_id(alloc_id, new_alloc_id); + new_alloc_id + }; ptr.alloc_id = new_alloc_id; - }, - _ => {}, + } + _ => {} } } - _ => {}, + _ => {} } Ok(val) } @@ -470,15 +503,6 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> { } } - #[inline(always)] - fn resolve_maybe_global_alloc( - tcx: ty::query::TyCtxtAt<'tcx>, - extra: &Self::MemoryExtra, - id: AllocId, - ) -> Option> { - extra.tls.resolve_allocation(*tcx, id) - } - fn init_allocation_extra<'b>( memory_extra: &MemoryExtra, id: AllocId, diff --git a/src/threads.rs b/src/threads.rs index 2352f26ebe..170fb0c476 100644 --- a/src/threads.rs +++ b/src/threads.rs @@ -1,15 +1,12 @@ //! Implements threads. use std::cell::RefCell; -use std::collections::hash_map::Entry; use std::convert::TryFrom; use log::trace; use rustc_data_structures::fx::FxHashMap; use rustc_index::vec::{Idx, IndexVec}; -use rustc_middle::mir; -use rustc_middle::ty; use crate::*; @@ -129,17 +126,41 @@ pub struct ThreadManager<'mir, 'tcx> { threads: IndexVec>, /// A counter used to generate unique identifiers for blocksets. blockset_counter: u32, + /// A mapping from an allocation id of a thread-local static to an + /// allocation id of a thread specific allocation. + thread_local_alloc_ids: RefCell>, } impl<'mir, 'tcx> Default for ThreadManager<'mir, 'tcx> { fn default() -> Self { let mut threads = IndexVec::new(); threads.push(Default::default()); - Self { active_thread: ThreadId::new(0), threads: threads, blockset_counter: 0 } + Self { + active_thread: ThreadId::new(0), + threads: threads, + blockset_counter: 0, + thread_local_alloc_ids: Default::default(), + } } } impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { + /// Check if we have an allocation for the given thread local static for the + /// active thread. + pub fn get_thread_local_alloc_id(&self, static_alloc_id: AllocId) -> Option { + self.thread_local_alloc_ids.borrow().get(&(static_alloc_id, self.active_thread)).cloned() + } + /// Set the allocation id as the allocation id of the given thread local + /// static for the active thread. + pub fn set_thread_local_alloc_id(&self, static_alloc_id: AllocId, new_alloc_id: AllocId) { + assert!( + self.thread_local_alloc_ids + .borrow_mut() + .insert((static_alloc_id, self.active_thread), new_alloc_id) + .is_none(), + "Bug: a thread local initialized twice for the same thread." + ); + } /// Borrow the stack of the active thread. fn active_thread_stack(&self) -> &[Frame<'mir, 'tcx, Tag, FrameData<'tcx>>] { &self.threads[self.active_thread].stack @@ -251,69 +272,16 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { } } -/// In Rust, a thread local variable is just a specially marked static. To -/// ensure a property that each memory allocation has a globally unique -/// allocation identifier, we create a fresh allocation id for each thread. This -/// data structure keeps the track of the created allocation identifiers and -/// their relation to the original static allocations. -#[derive(Clone, Debug, Default)] -pub struct ThreadLocalStorage { - /// A map from a thread local allocation identifier to the static from which - /// it was created. - thread_local_origin: RefCell>, - /// A map from a thread local static and thread id to the unique thread - /// local allocation. - thread_local_allocations: RefCell>, - /// The currently active thread. - active_thread: Option, -} - -impl ThreadLocalStorage { - /// For static allocation identifier `original_id` get a thread local - /// allocation identifier. If it is not allocated yet, allocate. - pub fn get_or_register_allocation(&self, tcx: ty::TyCtxt<'_>, original_id: AllocId) -> AllocId { - match self - .thread_local_allocations - .borrow_mut() - .entry((original_id, self.active_thread.unwrap())) - { - Entry::Occupied(entry) => *entry.get(), - Entry::Vacant(entry) => { - let fresh_id = tcx.alloc_map.lock().reserve(); - entry.insert(fresh_id); - self.thread_local_origin.borrow_mut().insert(fresh_id, original_id); - trace!( - "get_or_register_allocation(original_id={:?}) -> {:?}", - original_id, - fresh_id - ); - fresh_id - } - } - } - /// For thread local allocation identifier `alloc_id`, retrieve the original - /// static allocation identifier from which it was created. - pub fn resolve_allocation<'tcx>( - &self, - tcx: ty::TyCtxt<'tcx>, - alloc_id: AllocId, - ) -> Option> { - trace!("resolve_allocation(alloc_id: {:?})", alloc_id); - if let Some(original_id) = self.thread_local_origin.borrow().get(&alloc_id) { - trace!("resolve_allocation(alloc_id: {:?}) -> {:?}", alloc_id, original_id); - tcx.alloc_map.lock().get(*original_id) - } else { - tcx.alloc_map.lock().get(alloc_id) - } - } - /// Set which thread is currently active. - fn set_active_thread(&mut self, active_thread: ThreadId) { - self.active_thread = Some(active_thread); - } -} - impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {} pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> { + fn get_thread_local_alloc_id(&self, static_alloc_id: AllocId) -> Option { + let this = self.eval_context_ref(); + this.machine.threads.get_thread_local_alloc_id(static_alloc_id) + } + fn set_thread_local_alloc_id(&self, static_alloc_id: AllocId, thread_local_alloc_id: AllocId) { + let this = self.eval_context_ref(); + this.machine.threads.set_thread_local_alloc_id(static_alloc_id, thread_local_alloc_id) + } fn create_thread(&mut self) -> InterpResult<'tcx, ThreadId> { let this = self.eval_context_mut(); Ok(this.machine.threads.create_thread()) @@ -330,7 +298,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx } fn set_active_thread(&mut self, thread_id: ThreadId) -> InterpResult<'tcx, ThreadId> { let this = self.eval_context_mut(); - this.memory.extra.tls.set_active_thread(thread_id); Ok(this.machine.threads.set_active_thread_id(thread_id)) } fn get_active_thread(&self) -> InterpResult<'tcx, ThreadId> { @@ -370,13 +337,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx /// Returns `false` if all threads terminated. fn schedule(&mut self) -> InterpResult<'tcx, bool> { let this = self.eval_context_mut(); - // Find the next thread to run. - if this.machine.threads.schedule()? { - let active_thread = this.machine.threads.get_active_thread_id(); - this.memory.extra.tls.set_active_thread(active_thread); - Ok(true) - } else { - Ok(false) - } + this.machine.threads.schedule() } } From 325c31e578210d0c72d8d5f1612074ddcbe514bb Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Wed, 15 Apr 2020 21:25:12 -0700 Subject: [PATCH 17/77] Address some of the reviewers comments. --- src/eval.rs | 2 +- src/machine.rs | 24 ++++---- src/shims/foreign_items/posix.rs | 57 ++++++++----------- src/shims/threads.rs | 14 +++-- src/threads.rs | 43 ++++++++++++-- tests/compile-fail/thread-spawn.rs | 9 +++ tests/run-pass/concurrency/locks.rs | 2 + tests/run-pass/concurrency/locks.stderr | 2 + tests/run-pass/concurrency/locks.stdout | 3 - tests/run-pass/concurrency/simple.rs | 2 + tests/run-pass/concurrency/simple.stderr | 2 + tests/run-pass/concurrency/simple.stdout | 10 ---- tests/run-pass/concurrency/thread_locals.rs | 2 + .../run-pass/concurrency/thread_locals.stderr | 2 + .../run-pass/concurrency/thread_locals.stdout | 1 - 15 files changed, 101 insertions(+), 74 deletions(-) create mode 100644 tests/compile-fail/thread-spawn.rs create mode 100644 tests/run-pass/concurrency/locks.stderr delete mode 100644 tests/run-pass/concurrency/locks.stdout create mode 100644 tests/run-pass/concurrency/simple.stderr delete mode 100644 tests/run-pass/concurrency/simple.stdout create mode 100644 tests/run-pass/concurrency/thread_locals.stderr delete mode 100644 tests/run-pass/concurrency/thread_locals.stdout diff --git a/src/eval.rs b/src/eval.rs index b0a59c64d1..d83039e475 100644 --- a/src/eval.rs +++ b/src/eval.rs @@ -206,7 +206,7 @@ pub fn eval_main<'tcx>(tcx: TyCtxt<'tcx>, main_id: DefId, config: MiriConfig) -> let res: InterpResult<'_, i64> = (|| { // Main loop. while ecx.schedule()? { - assert!(ecx.step()?); + assert!(ecx.step()?, "Bug: a terminated thread was scheduled for execution."); ecx.process_diagnostics(); } // Read the return code pointer *before* we run TLS destructors, to assert diff --git a/src/machine.rs b/src/machine.rs index a5183d3e81..0920364a44 100644 --- a/src/machine.rs +++ b/src/machine.rs @@ -428,41 +428,37 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> { mir::interpret::ConstValue::Scalar(Scalar::Ptr(ptr)) => { let alloc_id = ptr.alloc_id; let alloc = ecx.tcx.alloc_map.lock().get(alloc_id); + let tcx = ecx.tcx; + let is_thread_local = |def_id| { + tcx.codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) + }; match alloc { - Some(GlobalAlloc::Static(def_id)) - if ecx - .tcx - .codegen_fn_attrs(def_id) - .flags - .contains(CodegenFnAttrFlags::THREAD_LOCAL) => - { - // We have a thread-local static. + Some(GlobalAlloc::Static(def_id)) if is_thread_local(def_id) => { let new_alloc_id = if let Some(new_alloc_id) = ecx.get_thread_local_alloc_id(alloc_id) { new_alloc_id } else { - if ecx.tcx.is_foreign_item(def_id) { + if tcx.is_foreign_item(def_id) { throw_unsup_format!( "Foreign thread-local statics are not supported." ) } - let instance = Instance::mono(ecx.tcx.tcx, def_id); + let instance = Instance::mono(tcx.tcx, def_id); let gid = GlobalId { instance, promoted: None }; - let raw_const = ecx - .tcx + let raw_const = tcx .const_eval_raw(ty::ParamEnv::reveal_all().and(gid)) .map_err(|err| { // no need to report anything, the const_eval call takes care of that // for statics - assert!(ecx.tcx.is_static(def_id)); + assert!(tcx.is_static(def_id)); match err { ErrorHandled::Reported => err_inval!(ReferencedConstant), ErrorHandled::TooGeneric => err_inval!(TooGeneric), } })?; let id = raw_const.alloc_id; - let mut alloc_map = ecx.tcx.alloc_map.lock(); + let mut alloc_map = tcx.alloc_map.lock(); let allocation = alloc_map.unwrap_memory(id); let new_alloc_id = alloc_map.create_memory_alloc(allocation); ecx.set_thread_local_alloc_id(alloc_id, new_alloc_id); diff --git a/src/shims/foreign_items/posix.rs b/src/shims/foreign_items/posix.rs index 4cd3b84991..5bb556aaa5 100644 --- a/src/shims/foreign_items/posix.rs +++ b/src/shims/foreign_items/posix.rs @@ -293,26 +293,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx this.write_scalar(Scalar::from_i32(result), dest)?; } - // Miscellaneous - "isatty" => { - let _fd = this.read_scalar(args[0])?.to_i32()?; - // "returns 1 if fd is an open file descriptor referring to a terminal; otherwise 0 is returned, and errno is set to indicate the error" - // FIXME: we just say nothing is a terminal. - let enotty = this.eval_libc("ENOTTY")?; - this.set_last_error(enotty)?; - this.write_null(dest)?; - } - "pthread_atfork" => { - let _prepare = this.read_scalar(args[0])?.not_undef()?; - let _parent = this.read_scalar(args[1])?.not_undef()?; - let _child = this.read_scalar(args[1])?.not_undef()?; - // We do not support forking, so there is nothing to do here. - this.write_null(dest)?; - } - "sched_yield" => { - this.write_null(dest)?; - } - // Threading "pthread_create" => { assert_eq!(args.len(), 4); @@ -339,20 +319,20 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx this.write_scalar(Scalar::from_i32(result), dest)?; } - "pthread_attr_getguardsize" => { - assert_eq!(args.len(), 2); - - let guard_size = this.deref_operand(args[1])?; - let guard_size_type = args[1].layout.ty - .builtin_deref(true) - .ok_or_else(|| err_ub_format!( - "wrong signature used for `pthread_attr_getguardsize`: first argument must be a raw pointer." - ))? - .ty; - let guard_size_layout = this.layout_of(guard_size_type)?; - this.write_scalar(Scalar::from_uint(crate::PAGE_SIZE, guard_size_layout.size), guard_size.into())?; - - // Return success (`0`). + // Miscellaneous + "isatty" => { + let _fd = this.read_scalar(args[0])?.to_i32()?; + // "returns 1 if fd is an open file descriptor referring to a terminal; otherwise 0 is returned, and errno is set to indicate the error" + // FIXME: we just say nothing is a terminal. + let enotty = this.eval_libc("ENOTTY")?; + this.set_last_error(enotty)?; + this.write_null(dest)?; + } + "pthread_atfork" => { + let _prepare = this.read_scalar(args[0])?.not_undef()?; + let _parent = this.read_scalar(args[1])?.not_undef()?; + let _child = this.read_scalar(args[1])?.not_undef()?; + // We do not support forking, so there is nothing to do here. this.write_null(dest)?; } @@ -369,6 +349,15 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx => { this.write_null(dest)?; } + "pthread_attr_getguardsize" if this.frame().instance.to_string().starts_with("std::sys::unix::") + => { + let guard_size = this.deref_operand(args[1])?; + let guard_size_layout = this.libc_ty_layout("size_t")?; + this.write_scalar(Scalar::from_uint(crate::PAGE_SIZE, guard_size_layout.size), guard_size.into())?; + + // Return success (`0`). + this.write_null(dest)?; + } | "signal" | "sigaction" diff --git a/src/shims/threads.rs b/src/shims/threads.rs index fc733d7f5c..d8ba11d267 100644 --- a/src/shims/threads.rs +++ b/src/shims/threads.rs @@ -11,13 +11,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx start_routine: OpTy<'tcx, Tag>, arg: OpTy<'tcx, Tag>, ) -> InterpResult<'tcx, i32> { - println!( - "WARNING: The thread support is experimental. \ - For example, Miri does not detect data races yet." - ); - let this = self.eval_context_mut(); + this.tcx.sess.warn( + "The thread support is experimental. \ + For example, Miri does not detect data races yet.", + ); + let new_thread_id = this.create_thread()?; let old_thread_id = this.set_active_thread(new_thread_id)?; @@ -57,6 +57,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx Ok(0) } + fn pthread_join( &mut self, thread: OpTy<'tcx, Tag>, @@ -73,6 +74,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx Ok(0) } + fn pthread_detach(&mut self, thread: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); @@ -81,12 +83,14 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx Ok(0) } + fn pthread_self(&mut self, dest: PlaceTy<'tcx, Tag>) -> InterpResult<'tcx> { let this = self.eval_context_mut(); let thread_id = this.get_active_thread()?; this.write_scalar(Scalar::from_uint(thread_id.index() as u128, dest.layout.size), dest) } + fn prctl( &mut self, option: OpTy<'tcx, Tag>, diff --git a/src/threads.rs b/src/threads.rs index 170fb0c476..c623fcae81 100644 --- a/src/threads.rs +++ b/src/threads.rs @@ -2,6 +2,7 @@ use std::cell::RefCell; use std::convert::TryFrom; +use std::num::NonZeroU32; use log::trace; @@ -42,21 +43,18 @@ impl ThreadId { } /// An identifier of a set of blocked threads. -/// -/// Note: 0 is not a valid identifier. #[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq, Hash)] -pub struct BlockSetId(u32); +pub struct BlockSetId(NonZeroU32); impl From for BlockSetId { fn from(id: u32) -> Self { - assert_ne!(id, 0, "0 is not a valid blockset id"); - Self(id) + Self(NonZeroU32::new(id).expect("0 is not a valid blockset id")) } } impl BlockSetId { pub fn to_u32_scalar<'tcx>(&self) -> Scalar { - Scalar::from_u32(self.0) + Scalar::from_u32(self.0.get()) } } @@ -150,6 +148,7 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { pub fn get_thread_local_alloc_id(&self, static_alloc_id: AllocId) -> Option { self.thread_local_alloc_ids.borrow().get(&(static_alloc_id, self.active_thread)).cloned() } + /// Set the allocation id as the allocation id of the given thread local /// static for the active thread. pub fn set_thread_local_alloc_id(&self, static_alloc_id: AllocId, new_alloc_id: AllocId) { @@ -161,20 +160,24 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { "Bug: a thread local initialized twice for the same thread." ); } + /// Borrow the stack of the active thread. fn active_thread_stack(&self) -> &[Frame<'mir, 'tcx, Tag, FrameData<'tcx>>] { &self.threads[self.active_thread].stack } + /// Mutably borrow the stack of the active thread. fn active_thread_stack_mut(&mut self) -> &mut Vec>> { &mut self.threads[self.active_thread].stack } + /// Create a new thread and returns its id. fn create_thread(&mut self) -> ThreadId { let new_thread_id = ThreadId::new(self.threads.len()); self.threads.push(Default::default()); new_thread_id } + /// Set an active thread and return the id of the thread that was active before. fn set_active_thread_id(&mut self, id: ThreadId) -> ThreadId { let active_thread_id = self.active_thread; @@ -182,19 +185,23 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { assert!(self.active_thread.index() < self.threads.len()); active_thread_id } + /// Get the id of the currently active thread. fn get_active_thread_id(&self) -> ThreadId { self.active_thread } + /// Get the borrow of the currently active thread. fn active_thread_mut(&mut self) -> &mut Thread<'mir, 'tcx> { &mut self.threads[self.active_thread] } + /// Mark the thread as detached, which means that no other thread will try /// to join it and the thread is responsible for cleaning up. fn detach_thread(&mut self, id: ThreadId) { self.threads[id].detached = true; } + /// Mark that the active thread tries to join the thread with `joined_thread_id`. fn join_thread(&mut self, joined_thread_id: ThreadId) { assert!(!self.threads[joined_thread_id].detached, "Bug: trying to join a detached thread."); @@ -215,23 +222,32 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { ); } } + /// Set the name of the active thread. fn set_thread_name(&mut self, new_thread_name: Vec) { self.active_thread_mut().thread_name = Some(new_thread_name); } + /// Get ids and states of all threads ever allocated. fn get_all_thread_ids_with_states(&self) -> Vec<(ThreadId, ThreadState)> { self.threads.iter_enumerated().map(|(id, thread)| (id, thread.state)).collect() } + + /// Allocate a new blockset id. fn create_blockset(&mut self) -> BlockSetId { self.blockset_counter = self.blockset_counter.checked_add(1).unwrap(); self.blockset_counter.into() } + + /// Block the currently active thread and put it into the given blockset. fn block_active_thread(&mut self, set: BlockSetId) { let state = &mut self.active_thread_mut().state; assert_eq!(*state, ThreadState::Enabled); *state = ThreadState::Blocked(set); } + + /// Unblock any one thread from the given blockset if it contains at least + /// one. Return the id of the unblocked thread. fn unblock_random_thread(&mut self, set: BlockSetId) -> Option { for (id, thread) in self.threads.iter_enumerated_mut() { if thread.state == ThreadState::Blocked(set) { @@ -242,6 +258,7 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { } None } + /// Decide which thread to run next. /// /// Returns `false` if all threads terminated. @@ -278,60 +295,74 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let this = self.eval_context_ref(); this.machine.threads.get_thread_local_alloc_id(static_alloc_id) } + fn set_thread_local_alloc_id(&self, static_alloc_id: AllocId, thread_local_alloc_id: AllocId) { let this = self.eval_context_ref(); this.machine.threads.set_thread_local_alloc_id(static_alloc_id, thread_local_alloc_id) } + fn create_thread(&mut self) -> InterpResult<'tcx, ThreadId> { let this = self.eval_context_mut(); Ok(this.machine.threads.create_thread()) } + fn detach_thread(&mut self, thread_id: ThreadId) -> InterpResult<'tcx> { let this = self.eval_context_mut(); this.machine.threads.detach_thread(thread_id); Ok(()) } + fn join_thread(&mut self, joined_thread_id: ThreadId) -> InterpResult<'tcx> { let this = self.eval_context_mut(); this.machine.threads.join_thread(joined_thread_id); Ok(()) } + fn set_active_thread(&mut self, thread_id: ThreadId) -> InterpResult<'tcx, ThreadId> { let this = self.eval_context_mut(); Ok(this.machine.threads.set_active_thread_id(thread_id)) } + fn get_active_thread(&self) -> InterpResult<'tcx, ThreadId> { let this = self.eval_context_ref(); Ok(this.machine.threads.get_active_thread_id()) } + fn active_thread_stack(&self) -> &[Frame<'mir, 'tcx, Tag, FrameData<'tcx>>] { let this = self.eval_context_ref(); this.machine.threads.active_thread_stack() } + fn active_thread_stack_mut(&mut self) -> &mut Vec>> { let this = self.eval_context_mut(); this.machine.threads.active_thread_stack_mut() } + fn set_active_thread_name(&mut self, new_thread_name: Vec) -> InterpResult<'tcx, ()> { let this = self.eval_context_mut(); Ok(this.machine.threads.set_thread_name(new_thread_name)) } + fn get_all_thread_ids_with_states(&mut self) -> Vec<(ThreadId, ThreadState)> { let this = self.eval_context_mut(); this.machine.threads.get_all_thread_ids_with_states() } + fn create_blockset(&mut self) -> InterpResult<'tcx, BlockSetId> { let this = self.eval_context_mut(); Ok(this.machine.threads.create_blockset()) } + fn block_active_thread(&mut self, set: BlockSetId) -> InterpResult<'tcx> { let this = self.eval_context_mut(); Ok(this.machine.threads.block_active_thread(set)) } + fn unblock_random_thread(&mut self, set: BlockSetId) -> InterpResult<'tcx, Option> { let this = self.eval_context_mut(); Ok(this.machine.threads.unblock_random_thread(set)) } + /// Decide which thread to run next. /// /// Returns `false` if all threads terminated. diff --git a/tests/compile-fail/thread-spawn.rs b/tests/compile-fail/thread-spawn.rs new file mode 100644 index 0000000000..4b9073f3a7 --- /dev/null +++ b/tests/compile-fail/thread-spawn.rs @@ -0,0 +1,9 @@ +// ignore-linux +// ignore-macos +use std::thread; + +// error-pattern: Miri does not support threading + +fn main() { + thread::spawn(|| {}); +} diff --git a/tests/run-pass/concurrency/locks.rs b/tests/run-pass/concurrency/locks.rs index 575aeadd7f..49935db91b 100644 --- a/tests/run-pass/concurrency/locks.rs +++ b/tests/run-pass/concurrency/locks.rs @@ -1,3 +1,5 @@ +// ignore-windows + //! This test just calls the relevant APIs to check if Miri crashes. use std::sync::{Arc, Mutex}; diff --git a/tests/run-pass/concurrency/locks.stderr b/tests/run-pass/concurrency/locks.stderr new file mode 100644 index 0000000000..20a2bf3eeb --- /dev/null +++ b/tests/run-pass/concurrency/locks.stderr @@ -0,0 +1,2 @@ +warning: The thread support is experimental. For example, Miri does not detect data races yet. + diff --git a/tests/run-pass/concurrency/locks.stdout b/tests/run-pass/concurrency/locks.stdout deleted file mode 100644 index 2486b320db..0000000000 --- a/tests/run-pass/concurrency/locks.stdout +++ /dev/null @@ -1,3 +0,0 @@ -WARNING: The thread support is experimental. For example, Miri does not detect data races yet. -WARNING: The thread support is experimental. For example, Miri does not detect data races yet. -WARNING: The thread support is experimental. For example, Miri does not detect data races yet. diff --git a/tests/run-pass/concurrency/simple.rs b/tests/run-pass/concurrency/simple.rs index 5c295d1702..5adc521f59 100644 --- a/tests/run-pass/concurrency/simple.rs +++ b/tests/run-pass/concurrency/simple.rs @@ -1,3 +1,5 @@ +// ignore-windows + use std::thread; fn create_and_detach() { diff --git a/tests/run-pass/concurrency/simple.stderr b/tests/run-pass/concurrency/simple.stderr new file mode 100644 index 0000000000..20a2bf3eeb --- /dev/null +++ b/tests/run-pass/concurrency/simple.stderr @@ -0,0 +1,2 @@ +warning: The thread support is experimental. For example, Miri does not detect data races yet. + diff --git a/tests/run-pass/concurrency/simple.stdout b/tests/run-pass/concurrency/simple.stdout deleted file mode 100644 index 0506b7bdf8..0000000000 --- a/tests/run-pass/concurrency/simple.stdout +++ /dev/null @@ -1,10 +0,0 @@ -WARNING: The thread support is experimental. For example, Miri does not detect data races yet. -WARNING: The thread support is experimental. For example, Miri does not detect data races yet. -WARNING: The thread support is experimental. For example, Miri does not detect data races yet. -WARNING: The thread support is experimental. For example, Miri does not detect data races yet. -WARNING: The thread support is experimental. For example, Miri does not detect data races yet. -WARNING: The thread support is experimental. For example, Miri does not detect data races yet. -WARNING: The thread support is experimental. For example, Miri does not detect data races yet. -WARNING: The thread support is experimental. For example, Miri does not detect data races yet. -WARNING: The thread support is experimental. For example, Miri does not detect data races yet. -WARNING: The thread support is experimental. For example, Miri does not detect data races yet. diff --git a/tests/run-pass/concurrency/thread_locals.rs b/tests/run-pass/concurrency/thread_locals.rs index 50aa6fee2f..1805a1da3d 100644 --- a/tests/run-pass/concurrency/thread_locals.rs +++ b/tests/run-pass/concurrency/thread_locals.rs @@ -1,3 +1,5 @@ +// ignore-windows + #![feature(thread_local)] use std::thread; diff --git a/tests/run-pass/concurrency/thread_locals.stderr b/tests/run-pass/concurrency/thread_locals.stderr new file mode 100644 index 0000000000..20a2bf3eeb --- /dev/null +++ b/tests/run-pass/concurrency/thread_locals.stderr @@ -0,0 +1,2 @@ +warning: The thread support is experimental. For example, Miri does not detect data races yet. + diff --git a/tests/run-pass/concurrency/thread_locals.stdout b/tests/run-pass/concurrency/thread_locals.stdout deleted file mode 100644 index 9a53b4a5c9..0000000000 --- a/tests/run-pass/concurrency/thread_locals.stdout +++ /dev/null @@ -1 +0,0 @@ -WARNING: The thread support is experimental. For example, Miri does not detect data races yet. From 4609c3c520f8b9b4d014f4a0a8ee12528fba6211 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Thu, 16 Apr 2020 08:34:30 -0700 Subject: [PATCH 18/77] Rename eval_maybe_thread_local_static_const to adjust_global_const. --- src/machine.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/machine.rs b/src/machine.rs index 0920364a44..d79e0255f0 100644 --- a/src/machine.rs +++ b/src/machine.rs @@ -420,7 +420,7 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> { Ok(()) } - fn eval_maybe_thread_local_static_const( + fn adjust_global_const( ecx: &InterpCx<'mir, 'tcx, Self>, mut val: mir::interpret::ConstValue<'tcx>, ) -> InterpResult<'tcx, mir::interpret::ConstValue<'tcx>> { From d9ec0f2b36cc53e770193fa5a409f51cc3a7cc5a Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Thu, 16 Apr 2020 12:21:01 -0700 Subject: [PATCH 19/77] Add a missing newline in the test. --- tests/run-pass/concurrency/locks.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tests/run-pass/concurrency/locks.rs b/tests/run-pass/concurrency/locks.rs index 49935db91b..3c8373691b 100644 --- a/tests/run-pass/concurrency/locks.rs +++ b/tests/run-pass/concurrency/locks.rs @@ -6,7 +6,6 @@ use std::sync::{Arc, Mutex}; use std::thread; fn main() { - let data = Arc::new(Mutex::new(0)); let mut threads = Vec::new(); @@ -27,5 +26,4 @@ fn main() { let data = Arc::try_unwrap(data).unwrap().into_inner().unwrap(); assert_eq!(data, 3); - -} \ No newline at end of file +} From 552080a5b7c4ec01d3ed74b411bfefaaebf70feb Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Thu, 16 Apr 2020 12:22:30 -0700 Subject: [PATCH 20/77] Fix imports. --- src/lib.rs | 4 +++- src/machine.rs | 2 -- src/shims/tls.rs | 6 ++++-- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index d8b3397c8e..bedacf705a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -63,7 +63,9 @@ pub use crate::range_map::RangeMap; pub use crate::stacked_borrows::{ EvalContextExt as StackedBorEvalContextExt, Item, Permission, PtrId, Stack, Stacks, Tag, }; -pub use crate::threads::EvalContextExt as ThreadsEvalContextExt; +pub use crate::threads::{ + EvalContextExt as ThreadsEvalContextExt, ThreadId, ThreadManager, ThreadState, +}; /// Insert rustc arguments at the beginning of the argument list that Miri wants to be /// set per default, for maximal validation power. diff --git a/src/machine.rs b/src/machine.rs index d79e0255f0..23d4e37c66 100644 --- a/src/machine.rs +++ b/src/machine.rs @@ -28,8 +28,6 @@ use rustc_target::abi::{LayoutOf, Size}; use crate::*; -pub use crate::threads::{ThreadId, ThreadManager, ThreadState}; - // Some global facts about the emulated machine. pub const PAGE_SIZE: u64 = 4 * 1024; // FIXME: adjust to target architecture pub const STACK_ADDR: u64 = 32 * PAGE_SIZE; // not really about the "stack", but where we start assigning integer addresses to allocations diff --git a/src/shims/tls.rs b/src/shims/tls.rs index 5cef3871c0..da0c585958 100644 --- a/src/shims/tls.rs +++ b/src/shims/tls.rs @@ -8,8 +8,10 @@ use log::trace; use rustc_middle::ty; use rustc_target::abi::{Size, HasDataLayout}; -use crate::{HelpersEvalContextExt, ThreadsEvalContextExt, InterpResult, MPlaceTy, Scalar, StackPopCleanup, Tag}; -use crate::machine::{ThreadId, ThreadState}; +use crate::{ + HelpersEvalContextExt, InterpResult, MPlaceTy, Scalar, StackPopCleanup, Tag, ThreadId, + ThreadState, ThreadsEvalContextExt, +}; pub type TlsKey = u128; From 94118d4d9ad4bbce3533638ca5fc540275d14a69 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Thu, 16 Apr 2020 12:22:58 -0700 Subject: [PATCH 21/77] Make an assert message consistent with other asserts. --- src/eval.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/eval.rs b/src/eval.rs index d83039e475..085a53862f 100644 --- a/src/eval.rs +++ b/src/eval.rs @@ -206,7 +206,7 @@ pub fn eval_main<'tcx>(tcx: TyCtxt<'tcx>, main_id: DefId, config: MiriConfig) -> let res: InterpResult<'_, i64> = (|| { // Main loop. while ecx.schedule()? { - assert!(ecx.step()?, "Bug: a terminated thread was scheduled for execution."); + assert!(ecx.step()?, "a terminated thread was scheduled for execution"); ecx.process_diagnostics(); } // Read the return code pointer *before* we run TLS destructors, to assert From 1d0eb93ebddd35126cc22f938ff9247ff0e27b0b Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Thu, 16 Apr 2020 12:26:32 -0700 Subject: [PATCH 22/77] Fix typo in a comment. --- src/shims/foreign_items/posix.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/shims/foreign_items/posix.rs b/src/shims/foreign_items/posix.rs index 5bb556aaa5..9e85bcc66b 100644 --- a/src/shims/foreign_items/posix.rs +++ b/src/shims/foreign_items/posix.rs @@ -336,7 +336,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx this.write_null(dest)?; } - // Incomplete shims that we "stub out" just to get pre-main initialziation code to work. + // Incomplete shims that we "stub out" just to get pre-main initialization code to work. // These shims are enabled only when the caller is in the standard library. | "pthread_attr_init" | "pthread_attr_destroy" From 688cacbdd73427ec5bd86300cc75a07fa89ec310 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Thu, 16 Apr 2020 13:32:40 -0700 Subject: [PATCH 23/77] Cleanup the implementation of adjust_global_const. --- src/machine.rs | 50 +---------------------- src/threads.rs | 106 +++++++++++++++++++++++++++++++++++++++++++------ 2 files changed, 94 insertions(+), 62 deletions(-) diff --git a/src/machine.rs b/src/machine.rs index 23d4e37c66..a81273960d 100644 --- a/src/machine.rs +++ b/src/machine.rs @@ -14,11 +14,9 @@ use rand::rngs::StdRng; use rustc_ast::attr; use rustc_data_structures::fx::FxHashMap; use rustc_middle::{ - middle::codegen_fn_attrs::CodegenFnAttrFlags, mir, ty::{ self, - Instance, layout::{LayoutCx, LayoutError, TyAndLayout}, TyCtxt, }, @@ -422,53 +420,7 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> { ecx: &InterpCx<'mir, 'tcx, Self>, mut val: mir::interpret::ConstValue<'tcx>, ) -> InterpResult<'tcx, mir::interpret::ConstValue<'tcx>> { - match &mut val { - mir::interpret::ConstValue::Scalar(Scalar::Ptr(ptr)) => { - let alloc_id = ptr.alloc_id; - let alloc = ecx.tcx.alloc_map.lock().get(alloc_id); - let tcx = ecx.tcx; - let is_thread_local = |def_id| { - tcx.codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) - }; - match alloc { - Some(GlobalAlloc::Static(def_id)) if is_thread_local(def_id) => { - let new_alloc_id = if let Some(new_alloc_id) = - ecx.get_thread_local_alloc_id(alloc_id) - { - new_alloc_id - } else { - if tcx.is_foreign_item(def_id) { - throw_unsup_format!( - "Foreign thread-local statics are not supported." - ) - } - let instance = Instance::mono(tcx.tcx, def_id); - let gid = GlobalId { instance, promoted: None }; - let raw_const = tcx - .const_eval_raw(ty::ParamEnv::reveal_all().and(gid)) - .map_err(|err| { - // no need to report anything, the const_eval call takes care of that - // for statics - assert!(tcx.is_static(def_id)); - match err { - ErrorHandled::Reported => err_inval!(ReferencedConstant), - ErrorHandled::TooGeneric => err_inval!(TooGeneric), - } - })?; - let id = raw_const.alloc_id; - let mut alloc_map = tcx.alloc_map.lock(); - let allocation = alloc_map.unwrap_memory(id); - let new_alloc_id = alloc_map.create_memory_alloc(allocation); - ecx.set_thread_local_alloc_id(alloc_id, new_alloc_id); - new_alloc_id - }; - ptr.alloc_id = new_alloc_id; - } - _ => {} - } - } - _ => {} - } + ecx.remap_thread_local_alloc_ids(&mut val)?; Ok(val) } diff --git a/src/threads.rs b/src/threads.rs index c623fcae81..c8348e2626 100644 --- a/src/threads.rs +++ b/src/threads.rs @@ -7,7 +7,13 @@ use std::num::NonZeroU32; use log::trace; use rustc_data_structures::fx::FxHashMap; +use rustc_hir::def_id::DefId; use rustc_index::vec::{Idx, IndexVec}; +use rustc_middle::{ + middle::codegen_fn_attrs::CodegenFnAttrFlags, + mir, + ty::{self, Instance}, +}; use crate::*; @@ -124,9 +130,9 @@ pub struct ThreadManager<'mir, 'tcx> { threads: IndexVec>, /// A counter used to generate unique identifiers for blocksets. blockset_counter: u32, - /// A mapping from an allocation id of a thread-local static to an - /// allocation id of a thread specific allocation. - thread_local_alloc_ids: RefCell>, + /// A mapping from a thread-local static to an allocation id of a thread + /// specific allocation. + thread_local_alloc_ids: RefCell>, } impl<'mir, 'tcx> Default for ThreadManager<'mir, 'tcx> { @@ -145,19 +151,19 @@ impl<'mir, 'tcx> Default for ThreadManager<'mir, 'tcx> { impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { /// Check if we have an allocation for the given thread local static for the /// active thread. - pub fn get_thread_local_alloc_id(&self, static_alloc_id: AllocId) -> Option { - self.thread_local_alloc_ids.borrow().get(&(static_alloc_id, self.active_thread)).cloned() + pub fn get_thread_local_alloc_id(&self, def_id: DefId) -> Option { + self.thread_local_alloc_ids.borrow().get(&(def_id, self.active_thread)).cloned() } /// Set the allocation id as the allocation id of the given thread local /// static for the active thread. - pub fn set_thread_local_alloc_id(&self, static_alloc_id: AllocId, new_alloc_id: AllocId) { + pub fn set_thread_local_alloc_id(&self, def_id: DefId, new_alloc_id: AllocId) { assert!( self.thread_local_alloc_ids .borrow_mut() - .insert((static_alloc_id, self.active_thread), new_alloc_id) + .insert((def_id, self.active_thread), new_alloc_id) .is_none(), - "Bug: a thread local initialized twice for the same thread." + "a thread local initialized twice for the same thread" ); } @@ -291,14 +297,88 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {} pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> { - fn get_thread_local_alloc_id(&self, static_alloc_id: AllocId) -> Option { + /// A workaround for thread-local statics until + /// https://github.com/rust-lang/rust/issues/70685 is fixed: change the + /// thread-local allocation id with a freshly generated allocation id for + /// the currently active thread. + fn remap_thread_local_alloc_ids( + &self, + val: &mut mir::interpret::ConstValue<'tcx>, + ) -> InterpResult<'tcx> { let this = self.eval_context_ref(); - this.machine.threads.get_thread_local_alloc_id(static_alloc_id) + match val { + mir::interpret::ConstValue::Scalar(Scalar::Ptr(ptr)) => { + let alloc_id = ptr.alloc_id; + let alloc = this.tcx.alloc_map.lock().get(alloc_id); + let tcx = this.tcx; + let is_thread_local = |def_id| { + tcx.codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) + }; + match alloc { + Some(GlobalAlloc::Static(def_id)) if is_thread_local(def_id) => { + ptr.alloc_id = this.get_or_create_thread_local_alloc_id(def_id)?; + } + _ => {} + } + } + _ => { + // FIXME: Handling only `Scalar` seems to work for now, but at + // least in principle thread-locals could be in any constant, so + // we should also consider other cases. However, once + // https://github.com/rust-lang/rust/issues/70685 gets fixed, + // this code will have to be rewritten anyway. + } + } + Ok(()) } - - fn set_thread_local_alloc_id(&self, static_alloc_id: AllocId, thread_local_alloc_id: AllocId) { + /// Get a thread-specific allocation id for the given thread-local static. + /// If needed, allocate a new one. + /// + /// FIXME: This method should be replaced as soon as + /// https://github.com/rust-lang/rust/issues/70685 gets fixed. + fn get_or_create_thread_local_alloc_id(&self, def_id: DefId) -> InterpResult<'tcx, AllocId> { let this = self.eval_context_ref(); - this.machine.threads.set_thread_local_alloc_id(static_alloc_id, thread_local_alloc_id) + let tcx = this.tcx; + if let Some(new_alloc_id) = this.machine.threads.get_thread_local_alloc_id(def_id) { + // We already have a thread-specific allocation id for this + // thread-local static. + Ok(new_alloc_id) + } else { + // We need to allocate a thread-specific allocation id for this + // thread-local static. + // + // At first, we invoke the `const_eval_raw` query and extract the + // allocation from it. Unfortunately, we have to duplicate the code + // from `Memory::get_global_alloc` that does this. + // + // Then we store the retrieved allocation back into the `alloc_map` + // to get a fresh allocation id, which we can use as a + // thread-specific allocation id for the thread-local static. + if tcx.is_foreign_item(def_id) { + throw_unsup_format!("foreign thread-local statics are not supported"); + } + // Invoke the `const_eval_raw` query. + let instance = Instance::mono(tcx.tcx, def_id); + let gid = GlobalId { instance, promoted: None }; + let raw_const = + tcx.const_eval_raw(ty::ParamEnv::reveal_all().and(gid)).map_err(|err| { + // no need to report anything, the const_eval call takes care of that + // for statics + assert!(tcx.is_static(def_id)); + err + })?; + let id = raw_const.alloc_id; + // Extract the allocation from the query result. + let mut alloc_map = tcx.alloc_map.lock(); + let allocation = alloc_map.unwrap_memory(id); + // Create a new allocation id for the same allocation in this hacky + // way. Internally, `alloc_map` deduplicates allocations, but this + // is fine because Miri will make a copy before a first mutable + // access. + let new_alloc_id = alloc_map.create_memory_alloc(allocation); + this.machine.threads.set_thread_local_alloc_id(def_id, new_alloc_id); + Ok(new_alloc_id) + } } fn create_thread(&mut self) -> InterpResult<'tcx, ThreadId> { From a585dc8289120aa4ee232c1f8317a21eb5ae2c1a Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Thu, 16 Apr 2020 13:39:21 -0700 Subject: [PATCH 24/77] Add a missing newline. --- src/threads.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/threads.rs b/src/threads.rs index c8348e2626..72584b7265 100644 --- a/src/threads.rs +++ b/src/threads.rs @@ -331,6 +331,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx } Ok(()) } + /// Get a thread-specific allocation id for the given thread-local static. /// If needed, allocate a new one. /// From 44e930559917968d4513e5915f5957f2fe1f3e11 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Thu, 16 Apr 2020 13:52:14 -0700 Subject: [PATCH 25/77] Rename threads to thread to match the Rust standard library. --- src/lib.rs | 6 +++--- src/shims/mod.rs | 2 +- src/shims/sync.rs | 2 +- src/shims/{threads.rs => thread.rs} | 0 src/{threads.rs => thread.rs} | 0 5 files changed, 5 insertions(+), 5 deletions(-) rename src/shims/{threads.rs => thread.rs} (100%) rename src/{threads.rs => thread.rs} (100%) diff --git a/src/lib.rs b/src/lib.rs index bedacf705a..96e6f7d63e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -27,7 +27,7 @@ mod operator; mod range_map; mod shims; mod stacked_borrows; -mod threads; +mod thread; // Make all those symbols available in the same place as our own. pub use rustc_mir::interpret::*; @@ -42,7 +42,7 @@ pub use crate::shims::intrinsics::EvalContextExt as IntrinsicsEvalContextExt; pub use crate::shims::os_str::EvalContextExt as OsStrEvalContextExt; pub use crate::shims::panic::{CatchUnwindData, EvalContextExt as PanicEvalContextExt}; pub use crate::shims::sync::{EvalContextExt as SyncEvalContextExt}; -pub use crate::shims::threads::EvalContextExt as ThreadShimsEvalContextExt; +pub use crate::shims::thread::EvalContextExt as ThreadShimsEvalContextExt; pub use crate::shims::time::EvalContextExt as TimeEvalContextExt; pub use crate::shims::tls::{EvalContextExt as TlsEvalContextExt, TlsData}; pub use crate::shims::EvalContextExt as ShimsEvalContextExt; @@ -63,7 +63,7 @@ pub use crate::range_map::RangeMap; pub use crate::stacked_borrows::{ EvalContextExt as StackedBorEvalContextExt, Item, Permission, PtrId, Stack, Stacks, Tag, }; -pub use crate::threads::{ +pub use crate::thread::{ EvalContextExt as ThreadsEvalContextExt, ThreadId, ThreadManager, ThreadState, }; diff --git a/src/shims/mod.rs b/src/shims/mod.rs index 118058dd32..166d1a5456 100644 --- a/src/shims/mod.rs +++ b/src/shims/mod.rs @@ -6,7 +6,7 @@ pub mod intrinsics; pub mod os_str; pub mod panic; pub mod sync; -pub mod threads; +pub mod thread; pub mod time; pub mod tls; diff --git a/src/shims/sync.rs b/src/shims/sync.rs index 76f97aab23..d8a0015638 100644 --- a/src/shims/sync.rs +++ b/src/shims/sync.rs @@ -2,7 +2,7 @@ use rustc_middle::ty::{layout::TyAndLayout, TyKind, TypeAndMut}; use rustc_target::abi::{LayoutOf, Size}; use crate::stacked_borrows::Tag; -use crate::threads::{BlockSetId, ThreadId}; +use crate::thread::BlockSetId; use crate::*; fn assert_ptr_target_min_size<'mir, 'tcx: 'mir>( diff --git a/src/shims/threads.rs b/src/shims/thread.rs similarity index 100% rename from src/shims/threads.rs rename to src/shims/thread.rs diff --git a/src/threads.rs b/src/thread.rs similarity index 100% rename from src/threads.rs rename to src/thread.rs From d062f63519bfe7e366f0cadfdb15073434558351 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Thu, 16 Apr 2020 19:40:02 -0700 Subject: [PATCH 26/77] Fix support for MacOS. --- src/eval.rs | 16 ++- src/lib.rs | 2 +- src/shims/foreign_items/posix/macos.rs | 3 +- src/shims/tls.rs | 151 ++++++++++++++----------- src/thread.rs | 49 ++++---- 5 files changed, 128 insertions(+), 93 deletions(-) diff --git a/src/eval.rs b/src/eval.rs index 085a53862f..ab82c39836 100644 --- a/src/eval.rs +++ b/src/eval.rs @@ -205,15 +205,25 @@ pub fn eval_main<'tcx>(tcx: TyCtxt<'tcx>, main_id: DefId, config: MiriConfig) -> // Perform the main execution. let res: InterpResult<'_, i64> = (|| { // Main loop. - while ecx.schedule()? { - assert!(ecx.step()?, "a terminated thread was scheduled for execution"); + loop { + match ecx.schedule()? { + SchedulingAction::ExecuteStep => { + assert!(ecx.step()?, "a terminated thread was scheduled for execution"); + } + SchedulingAction::ExecuteDtors => { + ecx.run_tls_dtors_for_active_thread()?; + } + SchedulingAction::Stop => { + break; + } + } ecx.process_diagnostics(); } // Read the return code pointer *before* we run TLS destructors, to assert // that it was written to by the time that `start` lang item returned. let return_code = ecx.read_scalar(ret_place.into())?.not_undef()?.to_machine_isize(&ecx)?; // Global destructors. - ecx.run_tls_dtors()?; + ecx.run_windows_tls_dtors()?; Ok(return_code) })(); diff --git a/src/lib.rs b/src/lib.rs index 96e6f7d63e..beee94b918 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -64,7 +64,7 @@ pub use crate::stacked_borrows::{ EvalContextExt as StackedBorEvalContextExt, Item, Permission, PtrId, Stack, Stacks, Tag, }; pub use crate::thread::{ - EvalContextExt as ThreadsEvalContextExt, ThreadId, ThreadManager, ThreadState, + EvalContextExt as ThreadsEvalContextExt, SchedulingAction, ThreadId, ThreadManager, ThreadState, }; /// Insert rustc arguments at the beginning of the argument list that Miri wants to be diff --git a/src/shims/foreign_items/posix/macos.rs b/src/shims/foreign_items/posix/macos.rs index dd3dba6ec0..9f65d0f9c4 100644 --- a/src/shims/foreign_items/posix/macos.rs +++ b/src/shims/foreign_items/posix/macos.rs @@ -82,7 +82,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let dtor = this.read_scalar(args[0])?.not_undef()?; let dtor = this.memory.get_fn(dtor)?.as_instance()?; let data = this.read_scalar(args[1])?.not_undef()?; - this.machine.tls.set_global_dtor(dtor, data)?; + let active_thread = this.get_active_thread()?; + this.machine.tls.set_global_dtor(active_thread, dtor, data)?; } // Querying system information diff --git a/src/shims/tls.rs b/src/shims/tls.rs index da0c585958..6dc3025acd 100644 --- a/src/shims/tls.rs +++ b/src/shims/tls.rs @@ -2,15 +2,17 @@ use std::collections::BTreeMap; use std::collections::btree_map::Entry; +use std::collections::HashSet; use log::trace; +use rustc_index::vec::Idx; use rustc_middle::ty; use rustc_target::abi::{Size, HasDataLayout}; use crate::{ HelpersEvalContextExt, InterpResult, MPlaceTy, Scalar, StackPopCleanup, Tag, ThreadId, - ThreadState, ThreadsEvalContextExt, + ThreadsEvalContextExt, }; pub type TlsKey = u128; @@ -32,11 +34,11 @@ pub struct TlsData<'tcx> { /// pthreads-style thread-local storage. keys: BTreeMap>, - /// A single global dtor (that's how things work on macOS) with a data argument. - global_dtor: Option<(ty::Instance<'tcx>, Scalar)>, + /// A single global per thread dtor (that's how things work on macOS) with a data argument. + global_dtors: BTreeMap, Scalar)>, /// Whether we are in the "destruct" phase, during which some operations are UB. - dtors_running: bool, + dtors_running: HashSet, } impl<'tcx> Default for TlsData<'tcx> { @@ -44,8 +46,8 @@ impl<'tcx> Default for TlsData<'tcx> { TlsData { next_key: 1, // start with 1 as we must not use 0 on Windows keys: Default::default(), - global_dtor: None, - dtors_running: false, + global_dtors: Default::default(), + dtors_running: Default::default(), } } } @@ -112,16 +114,15 @@ impl<'tcx> TlsData<'tcx> { } } - pub fn set_global_dtor(&mut self, dtor: ty::Instance<'tcx>, data: Scalar) -> InterpResult<'tcx> { - if self.dtors_running { + /// Set global dtor for the given thread. + pub fn set_global_dtor(&mut self, thread: ThreadId, dtor: ty::Instance<'tcx>, data: Scalar) -> InterpResult<'tcx> { + if self.dtors_running.contains(&thread) { // UB, according to libstd docs. throw_ub_format!("setting global destructor while destructors are already running"); } - if self.global_dtor.is_some() { - throw_unsup_format!("setting more than one global destructor is not supported"); + if self.global_dtors.insert(thread, (dtor, data)).is_some() { + throw_unsup_format!("setting more than one global destructor for the same thread is not supported"); } - - self.global_dtor = Some((dtor, data)); Ok(()) } @@ -148,7 +149,7 @@ impl<'tcx> TlsData<'tcx> { &mut self, key: Option, thread_id: ThreadId, - ) -> Option<(ty::Instance<'tcx>, ThreadId, Scalar, TlsKey)> { + ) -> Option<(ty::Instance<'tcx>, Scalar, TlsKey)> { use std::collections::Bound::*; let thread_local = &mut self.keys; @@ -161,9 +162,9 @@ impl<'tcx> TlsData<'tcx> { { match data.entry(thread_id) { Entry::Occupied(entry) => { - let (thread_id, data_scalar) = entry.remove_entry(); + let data_scalar = entry.remove(); if let Some(dtor) = dtor { - let ret = Some((*dtor, thread_id, data_scalar, key)); + let ret = Some((*dtor, data_scalar, key)); return ret; } } @@ -176,83 +177,99 @@ impl<'tcx> TlsData<'tcx> { impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {} pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> { - /// Run TLS destructors for all threads. - fn run_tls_dtors(&mut self) -> InterpResult<'tcx> { + + /// Run TLS destructors for the main thread on Windows. The implementation + /// assumes that we do not support concurrency on Windows yet. + /// + /// Note: on non-Windows OS this function is a no-op. + fn run_windows_tls_dtors(&mut self) -> InterpResult<'tcx> { let this = self.eval_context_mut(); - assert!(!this.machine.tls.dtors_running, "running TLS dtors twice"); - this.machine.tls.dtors_running = true; + if this.tcx.sess.target.target.target_os != "windows" { + return Ok(()); + } + let active_thread = this.get_active_thread()?; + assert_eq!(active_thread.index(), 0, "concurrency on Windows not supported"); + assert!(!this.machine.tls.dtors_running.contains(&active_thread), "running TLS dtors twice"); + this.machine.tls.dtors_running.insert(active_thread); + // Windows has a special magic linker section that is run on certain events. + // Instead of searching for that section and supporting arbitrary hooks in there + // (that would be basically https://github.com/rust-lang/miri/issues/450), + // we specifically look up the static in libstd that we know is placed + // in that section. + let thread_callback = this.eval_path_scalar(&["std", "sys", "windows", "thread_local", "p_thread_callback"])?; + let thread_callback = this.memory.get_fn(thread_callback.not_undef()?)?.as_instance()?; + + // The signature of this function is `unsafe extern "system" fn(h: c::LPVOID, dwReason: c::DWORD, pv: c::LPVOID)`. + let reason = this.eval_path_scalar(&["std", "sys", "windows", "c", "DLL_PROCESS_DETACH"])?; + let ret_place = MPlaceTy::dangling(this.machine.layouts.unit, this).into(); + this.call_function( + thread_callback, + &[Scalar::null_ptr(this).into(), reason.into(), Scalar::null_ptr(this).into()], + Some(ret_place), + StackPopCleanup::None { cleanup: true }, + )?; + + // step until out of stackframes + this.run()?; + + // Windows doesn't have other destructors. + Ok(()) + } + /// Run TLS destructors for the active thread. + /// + /// Note: on Windows OS this function is a no-op because we do not support + /// concurrency on Windows yet. + fn run_tls_dtors_for_active_thread(&mut self) -> InterpResult<'tcx> { + let this = self.eval_context_mut(); if this.tcx.sess.target.target.target_os == "windows" { - // Windows has a special magic linker section that is run on certain events. - // Instead of searching for that section and supporting arbitrary hooks in there - // (that would be basically https://github.com/rust-lang/miri/issues/450), - // we specifically look up the static in libstd that we know is placed - // in that section. - let thread_callback = this.eval_path_scalar(&["std", "sys", "windows", "thread_local", "p_thread_callback"])?; - let thread_callback = this.memory.get_fn(thread_callback.not_undef()?)?.as_instance()?; - - // The signature of this function is `unsafe extern "system" fn(h: c::LPVOID, dwReason: c::DWORD, pv: c::LPVOID)`. - let reason = this.eval_path_scalar(&["std", "sys", "windows", "c", "DLL_PROCESS_DETACH"])?; + return Ok(()); + } + let thread_id = this.get_active_thread()?; + assert!(!this.machine.tls.dtors_running.contains(&thread_id), "running TLS dtors twice"); + this.machine.tls.dtors_running.insert(thread_id); + + // The macOS global dtor runs "before any TLS slots get freed", so do that first. + if let Some(&(instance, data)) = this.machine.tls.global_dtors.get(&thread_id) { + trace!("Running global dtor {:?} on {:?} at {:?}", instance, data, thread_id); + let ret_place = MPlaceTy::dangling(this.machine.layouts.unit, this).into(); this.call_function( - thread_callback, - &[Scalar::null_ptr(this).into(), reason.into(), Scalar::null_ptr(this).into()], + instance, + &[data.into()], Some(ret_place), StackPopCleanup::None { cleanup: true }, )?; // step until out of stackframes this.run()?; - - // Windows doesn't have other destructors. - return Ok(()); } - // The macOS global dtor runs "before any TLS slots get freed", so do that first. - if let Some((instance, data)) = this.machine.tls.global_dtor { - trace!("Running global dtor {:?} on {:?}", instance, data); + assert!(this.has_terminated(thread_id)?, "running TLS dtors for non-terminated thread"); + let mut dtor = this.machine.tls.fetch_tls_dtor(None, thread_id); + while let Some((instance, ptr, key)) = dtor { + trace!("Running TLS dtor {:?} on {:?} at {:?}", instance, ptr, thread_id); + assert!(!this.is_null(ptr).unwrap(), "Data can't be NULL when dtor is called!"); let ret_place = MPlaceTy::dangling(this.machine.layouts.unit, this).into(); this.call_function( instance, - &[data.into()], + &[ptr.into()], Some(ret_place), StackPopCleanup::None { cleanup: true }, )?; // step until out of stackframes this.run()?; - } - // Now run the "keyed" destructors. - for (thread_id, thread_state) in this.get_all_thread_ids_with_states() { - assert!(thread_state == ThreadState::Terminated, - "TLS destructors should be executed after all threads terminated."); - this.set_active_thread(thread_id)?; - let mut dtor = this.machine.tls.fetch_tls_dtor(None, thread_id); - while let Some((instance, thread_id, ptr, key)) = dtor { - trace!("Running TLS dtor {:?} on {:?} at {:?}", instance, ptr, thread_id); - assert!(!this.is_null(ptr).unwrap(), "Data can't be NULL when dtor is called!"); - - let ret_place = MPlaceTy::dangling(this.layout_of(this.tcx.mk_unit())?, this).into(); - this.call_function( - instance, - &[ptr.into()], - Some(ret_place), - StackPopCleanup::None { cleanup: true }, - )?; - - // step until out of stackframes - this.run()?; - - // Fetch next dtor after `key`. - dtor = match this.machine.tls.fetch_tls_dtor(Some(key), thread_id) { - dtor @ Some(_) => dtor, - // We ran each dtor once, start over from the beginning. - None => this.machine.tls.fetch_tls_dtor(None, thread_id), - }; - } + // Fetch next dtor after `key`. + dtor = match this.machine.tls.fetch_tls_dtor(Some(key), thread_id) { + dtor @ Some(_) => dtor, + // We ran each dtor once, start over from the beginning. + None => this.machine.tls.fetch_tls_dtor(None, thread_id), + }; } + Ok(()) } } diff --git a/src/thread.rs b/src/thread.rs index 72584b7265..d40b2a176e 100644 --- a/src/thread.rs +++ b/src/thread.rs @@ -17,6 +17,16 @@ use rustc_middle::{ use crate::*; +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum SchedulingAction { + /// Execute step on the active thread. + ExecuteStep, + /// Execute destructors of the active thread. + ExecuteDtors, + /// Stop the program. + Stop, +} + /// A thread identifier. #[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq, Hash)] pub struct ThreadId(usize); @@ -197,6 +207,11 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { self.active_thread } + /// Has the given thread terminated? + fn has_terminated(&self, thread_id: ThreadId) -> bool { + self.threads[thread_id].state == ThreadState::Terminated + } + /// Get the borrow of the currently active thread. fn active_thread_mut(&mut self) -> &mut Thread<'mir, 'tcx> { &mut self.threads[self.active_thread] @@ -234,11 +249,6 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { self.active_thread_mut().thread_name = Some(new_thread_name); } - /// Get ids and states of all threads ever allocated. - fn get_all_thread_ids_with_states(&self) -> Vec<(ThreadId, ThreadState)> { - self.threads.iter_enumerated().map(|(id, thread)| (id, thread.state)).collect() - } - /// Allocate a new blockset id. fn create_blockset(&mut self) -> BlockSetId { self.blockset_counter = self.blockset_counter.checked_add(1).unwrap(); @@ -265,10 +275,8 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { None } - /// Decide which thread to run next. - /// - /// Returns `false` if all threads terminated. - fn schedule(&mut self) -> InterpResult<'tcx, bool> { + /// Decide which action to take next and on which thread. + fn schedule(&mut self) -> InterpResult<'tcx, SchedulingAction> { if self.threads[self.active_thread].check_terminated() { // Check if we need to unblock any threads. for (i, thread) in self.threads.iter_enumerated_mut() { @@ -277,18 +285,19 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { thread.state = ThreadState::Enabled; } } + return Ok(SchedulingAction::ExecuteDtors); } if self.threads[self.active_thread].state == ThreadState::Enabled { - return Ok(true); + return Ok(SchedulingAction::ExecuteStep); } if let Some(enabled_thread) = self.threads.iter().position(|thread| thread.state == ThreadState::Enabled) { self.active_thread = ThreadId::new(enabled_thread); - return Ok(true); + return Ok(SchedulingAction::ExecuteStep); } if self.threads.iter().all(|thread| thread.state == ThreadState::Terminated) { - Ok(false) + Ok(SchedulingAction::Stop) } else { throw_machine_stop!(TerminationInfo::Deadlock); } @@ -409,6 +418,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx Ok(this.machine.threads.get_active_thread_id()) } + fn has_terminated(&self, thread_id: ThreadId) -> InterpResult<'tcx, bool> { + let this = self.eval_context_ref(); + Ok(this.machine.threads.has_terminated(thread_id)) + } + fn active_thread_stack(&self) -> &[Frame<'mir, 'tcx, Tag, FrameData<'tcx>>] { let this = self.eval_context_ref(); this.machine.threads.active_thread_stack() @@ -424,11 +438,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx Ok(this.machine.threads.set_thread_name(new_thread_name)) } - fn get_all_thread_ids_with_states(&mut self) -> Vec<(ThreadId, ThreadState)> { - let this = self.eval_context_mut(); - this.machine.threads.get_all_thread_ids_with_states() - } - fn create_blockset(&mut self) -> InterpResult<'tcx, BlockSetId> { let this = self.eval_context_mut(); Ok(this.machine.threads.create_blockset()) @@ -444,10 +453,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx Ok(this.machine.threads.unblock_random_thread(set)) } - /// Decide which thread to run next. - /// - /// Returns `false` if all threads terminated. - fn schedule(&mut self) -> InterpResult<'tcx, bool> { + /// Decide which action to take next and on which thread. + fn schedule(&mut self) -> InterpResult<'tcx, SchedulingAction> { let this = self.eval_context_mut(); this.machine.threads.schedule() } From 134533d066a4ab57d1a3e7ed9590052db313b5e6 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Fri, 17 Apr 2020 15:38:23 -0700 Subject: [PATCH 27/77] Add a comment explaining global destructors on MacOS. --- src/shims/tls.rs | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/src/shims/tls.rs b/src/shims/tls.rs index 6dc3025acd..722b24d747 100644 --- a/src/shims/tls.rs +++ b/src/shims/tls.rs @@ -114,8 +114,21 @@ impl<'tcx> TlsData<'tcx> { } } - /// Set global dtor for the given thread. - pub fn set_global_dtor(&mut self, thread: ThreadId, dtor: ty::Instance<'tcx>, data: Scalar) -> InterpResult<'tcx> { + /// Set global dtor for the given thread. This function is used to implement + /// `_tlv_atexit` shim on MacOS. + /// + /// Global destructors are available only on MacOS and (potentially + /// confusingly) they seem to be still per thread as can be guessed from the + /// following comment in the [`_tlv_atexit` + /// implementation](https://github.com/opensource-apple/dyld/blob/195030646877261f0c8c7ad8b001f52d6a26f514/src/threadLocalVariables.c#L389): + /// + /// // NOTE: this does not need locks because it only operates on current thread data + pub fn set_global_dtor( + &mut self, + thread: ThreadId, + dtor: ty::Instance<'tcx>, + data: Scalar + ) -> InterpResult<'tcx> { if self.dtors_running.contains(&thread) { // UB, according to libstd docs. throw_ub_format!("setting global destructor while destructors are already running"); From 46fd333daa8dc71f1c61aa87ecbf9881fae920c6 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Sat, 18 Apr 2020 15:39:53 -0700 Subject: [PATCH 28/77] Implement thread::yield_now. --- src/shims/foreign_items/posix.rs | 5 +++++ src/shims/thread.rs | 8 ++++++++ src/thread.rs | 32 +++++++++++++++++++++++++++----- 3 files changed, 40 insertions(+), 5 deletions(-) diff --git a/src/shims/foreign_items/posix.rs b/src/shims/foreign_items/posix.rs index 9e85bcc66b..4574d203ef 100644 --- a/src/shims/foreign_items/posix.rs +++ b/src/shims/foreign_items/posix.rs @@ -318,6 +318,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let result = this.prctl(args[0], args[1], args[2], args[3], args[4])?; this.write_scalar(Scalar::from_i32(result), dest)?; } + "sched_yield" => { + assert_eq!(args.len(), 0); + let result = this.sched_yield()?; + this.write_scalar(Scalar::from_i32(result), dest)?; + } // Miscellaneous "isatty" => { diff --git a/src/shims/thread.rs b/src/shims/thread.rs index d8ba11d267..ccdf6df3f9 100644 --- a/src/shims/thread.rs +++ b/src/shims/thread.rs @@ -111,4 +111,12 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx Ok(0) } + + fn sched_yield(&mut self) -> InterpResult<'tcx, i32> { + let this = self.eval_context_mut(); + + this.yield_active_thread()?; + + Ok(0) + } } diff --git a/src/thread.rs b/src/thread.rs index d40b2a176e..31296ad96f 100644 --- a/src/thread.rs +++ b/src/thread.rs @@ -143,6 +143,8 @@ pub struct ThreadManager<'mir, 'tcx> { /// A mapping from a thread-local static to an allocation id of a thread /// specific allocation. thread_local_alloc_ids: RefCell>, + /// A flag that indicates that we should change the active thread. + yield_active_thread: bool, } impl<'mir, 'tcx> Default for ThreadManager<'mir, 'tcx> { @@ -154,6 +156,7 @@ impl<'mir, 'tcx> Default for ThreadManager<'mir, 'tcx> { threads: threads, blockset_counter: 0, thread_local_alloc_ids: Default::default(), + yield_active_thread: false, } } } @@ -275,6 +278,11 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { None } + /// Change the active thread to some enabled thread. + fn yield_active_thread(&mut self) { + self.yield_active_thread = true; + } + /// Decide which action to take next and on which thread. fn schedule(&mut self) -> InterpResult<'tcx, SchedulingAction> { if self.threads[self.active_thread].check_terminated() { @@ -287,13 +295,21 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { } return Ok(SchedulingAction::ExecuteDtors); } - if self.threads[self.active_thread].state == ThreadState::Enabled { + if self.threads[self.active_thread].state == ThreadState::Enabled + && !self.yield_active_thread + { return Ok(SchedulingAction::ExecuteStep); } - if let Some(enabled_thread) = - self.threads.iter().position(|thread| thread.state == ThreadState::Enabled) - { - self.active_thread = ThreadId::new(enabled_thread); + for (id, thread) in self.threads.iter_enumerated() { + if thread.state == ThreadState::Enabled { + if !(self.yield_active_thread && id == self.active_thread) { + self.active_thread = id; + break; + } + } + } + self.yield_active_thread = false; + if self.threads[self.active_thread].state == ThreadState::Enabled { return Ok(SchedulingAction::ExecuteStep); } if self.threads.iter().all(|thread| thread.state == ThreadState::Terminated) { @@ -453,6 +469,12 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx Ok(this.machine.threads.unblock_random_thread(set)) } + fn yield_active_thread(&mut self) -> InterpResult<'tcx> { + let this = self.eval_context_mut(); + this.machine.threads.yield_active_thread(); + Ok(()) + } + /// Decide which action to take next and on which thread. fn schedule(&mut self) -> InterpResult<'tcx, SchedulingAction> { let this = self.eval_context_mut(); From 421be273cc389a5d426063f71cba82bf1c364f00 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Sat, 18 Apr 2020 12:25:11 -0700 Subject: [PATCH 29/77] Add concurrency tests. --- src/shims/sync.rs | 1 + src/shims/tls.rs | 2 + src/thread.rs | 4 +- .../concurrency/dangling_tls_lib.rs | 46 ++++++++++++++++ .../libc_pthread_mutex_deadlock.rs | 32 ++++++++++++ .../libc_pthread_mutex_wrong_owner.rs | 32 ++++++++++++ ...libc_pthread_rwlock_write_read_deadlock.rs | 32 ++++++++++++ ...ibc_pthread_rwlock_write_write_deadlock.rs | 32 ++++++++++++ tests/run-pass/concurrency/locks.rs | 52 +++++++++++++++++-- 9 files changed, 227 insertions(+), 6 deletions(-) create mode 100644 tests/compile-fail/concurrency/dangling_tls_lib.rs create mode 100644 tests/compile-fail/concurrency/libc_pthread_mutex_deadlock.rs create mode 100644 tests/compile-fail/concurrency/libc_pthread_mutex_wrong_owner.rs create mode 100644 tests/compile-fail/concurrency/libc_pthread_rwlock_write_read_deadlock.rs create mode 100644 tests/compile-fail/concurrency/libc_pthread_rwlock_write_write_deadlock.rs diff --git a/src/shims/sync.rs b/src/shims/sync.rs index d8a0015638..6a1ea108db 100644 --- a/src/shims/sync.rs +++ b/src/shims/sync.rs @@ -532,6 +532,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx } } + // FIXME: We should check that this lock was locked by the active thread. fn pthread_rwlock_unlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); diff --git a/src/shims/tls.rs b/src/shims/tls.rs index 722b24d747..89ec165965 100644 --- a/src/shims/tls.rs +++ b/src/shims/tls.rs @@ -233,6 +233,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx /// /// Note: on Windows OS this function is a no-op because we do not support /// concurrency on Windows yet. + /// + /// FIXME: we do not support yet deallocation of thread local statics. fn run_tls_dtors_for_active_thread(&mut self) -> InterpResult<'tcx> { let this = self.eval_context_mut(); if this.tcx.sess.target.target.target_os == "windows" { diff --git a/src/thread.rs b/src/thread.rs index 31296ad96f..ab6a4c94db 100644 --- a/src/thread.rs +++ b/src/thread.rs @@ -164,13 +164,13 @@ impl<'mir, 'tcx> Default for ThreadManager<'mir, 'tcx> { impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { /// Check if we have an allocation for the given thread local static for the /// active thread. - pub fn get_thread_local_alloc_id(&self, def_id: DefId) -> Option { + fn get_thread_local_alloc_id(&self, def_id: DefId) -> Option { self.thread_local_alloc_ids.borrow().get(&(def_id, self.active_thread)).cloned() } /// Set the allocation id as the allocation id of the given thread local /// static for the active thread. - pub fn set_thread_local_alloc_id(&self, def_id: DefId, new_alloc_id: AllocId) { + fn set_thread_local_alloc_id(&self, def_id: DefId, new_alloc_id: AllocId) { assert!( self.thread_local_alloc_ids .borrow_mut() diff --git a/tests/compile-fail/concurrency/dangling_tls_lib.rs b/tests/compile-fail/concurrency/dangling_tls_lib.rs new file mode 100644 index 0000000000..ad12c107bf --- /dev/null +++ b/tests/compile-fail/concurrency/dangling_tls_lib.rs @@ -0,0 +1,46 @@ +// ignore-windows + +#![feature(thread_local_internals)] + +use std::cell::RefCell; +use std::thread; + +static A: std::thread::LocalKey> = { + #[inline] + fn __init() -> RefCell { + RefCell::new(0) + } + + unsafe fn __getit() -> Option<&'static RefCell> { + static __KEY: std::thread::__OsLocalKeyInner> = + std::thread::__OsLocalKeyInner::new(); + __KEY.get(__init) + } + + unsafe { std::thread::LocalKey::new(__getit) } +}; + +struct Sender(*mut u8); + +unsafe impl Send for Sender {} + +fn main() { + A.with(|f| { + assert_eq!(*f.borrow(), 0); + *f.borrow_mut() = 4; + }); + + let handle = thread::spawn(|| { + let ptr = A.with(|f| { + assert_eq!(*f.borrow(), 0); + *f.borrow_mut() = 5; + &mut *f.borrow_mut() as *mut u8 + }); + Sender(ptr) + }); + let ptr = handle.join().unwrap().0; + A.with(|f| { + assert_eq!(*f.borrow(), 4); + }); + let _x = unsafe { *ptr }; //~ ERROR Undefined Behavior +} diff --git a/tests/compile-fail/concurrency/libc_pthread_mutex_deadlock.rs b/tests/compile-fail/concurrency/libc_pthread_mutex_deadlock.rs new file mode 100644 index 0000000000..5d04635a36 --- /dev/null +++ b/tests/compile-fail/concurrency/libc_pthread_mutex_deadlock.rs @@ -0,0 +1,32 @@ +// ignore-windows: No libc on Windows + +#![feature(rustc_private)] + +extern crate libc; + +use std::cell::UnsafeCell; +use std::sync::Arc; +use std::thread; + +struct Mutex(UnsafeCell); + +unsafe impl Send for Mutex {} +unsafe impl Sync for Mutex {} + +fn new_lock() -> Arc { + Arc::new(Mutex(UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER))) +} + +fn main() { + unsafe { + let lock = new_lock(); + assert_eq!(libc::pthread_mutex_lock(lock.0.get() as *mut _), 0); + + let lock_copy = lock.clone(); + thread::spawn(move || { + assert_eq!(libc::pthread_mutex_lock(lock_copy.0.get() as *mut _), 0); //~ ERROR: deadlock + }) + .join() + .unwrap(); + } +} diff --git a/tests/compile-fail/concurrency/libc_pthread_mutex_wrong_owner.rs b/tests/compile-fail/concurrency/libc_pthread_mutex_wrong_owner.rs new file mode 100644 index 0000000000..3009721abe --- /dev/null +++ b/tests/compile-fail/concurrency/libc_pthread_mutex_wrong_owner.rs @@ -0,0 +1,32 @@ +// ignore-windows: No libc on Windows + +#![feature(rustc_private)] + +extern crate libc; + +use std::cell::UnsafeCell; +use std::sync::Arc; +use std::thread; + +struct Mutex(UnsafeCell); + +unsafe impl Send for Mutex {} +unsafe impl Sync for Mutex {} + +fn new_lock() -> Arc { + Arc::new(Mutex(UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER))) +} + +fn main() { + unsafe { + let lock = new_lock(); + assert_eq!(libc::pthread_mutex_lock(lock.0.get() as *mut _), 0); + + let lock_copy = lock.clone(); + thread::spawn(move || { + assert_eq!(libc::pthread_mutex_unlock(lock_copy.0.get() as *mut _), 0); //~ ERROR: Undefined Behavior: called pthread_mutex_unlock on a mutex owned by another thread + }) + .join() + .unwrap(); + } +} diff --git a/tests/compile-fail/concurrency/libc_pthread_rwlock_write_read_deadlock.rs b/tests/compile-fail/concurrency/libc_pthread_rwlock_write_read_deadlock.rs new file mode 100644 index 0000000000..19dce431c8 --- /dev/null +++ b/tests/compile-fail/concurrency/libc_pthread_rwlock_write_read_deadlock.rs @@ -0,0 +1,32 @@ +// ignore-windows: No libc on Windows + +#![feature(rustc_private)] + +extern crate libc; + +use std::cell::UnsafeCell; +use std::sync::Arc; +use std::thread; + +struct RwLock(UnsafeCell); + +unsafe impl Send for RwLock {} +unsafe impl Sync for RwLock {} + +fn new_lock() -> Arc { + Arc::new(RwLock(UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER))) +} + +fn main() { + unsafe { + let lock = new_lock(); + assert_eq!(libc::pthread_rwlock_rdlock(lock.0.get() as *mut _), 0); + + let lock_copy = lock.clone(); + thread::spawn(move || { + assert_eq!(libc::pthread_rwlock_wrlock(lock_copy.0.get() as *mut _), 0); //~ ERROR: deadlock + }) + .join() + .unwrap(); + } +} diff --git a/tests/compile-fail/concurrency/libc_pthread_rwlock_write_write_deadlock.rs b/tests/compile-fail/concurrency/libc_pthread_rwlock_write_write_deadlock.rs new file mode 100644 index 0000000000..098c1c2fe2 --- /dev/null +++ b/tests/compile-fail/concurrency/libc_pthread_rwlock_write_write_deadlock.rs @@ -0,0 +1,32 @@ +// ignore-windows: No libc on Windows + +#![feature(rustc_private)] + +extern crate libc; + +use std::cell::UnsafeCell; +use std::sync::Arc; +use std::thread; + +struct RwLock(UnsafeCell); + +unsafe impl Send for RwLock {} +unsafe impl Sync for RwLock {} + +fn new_lock() -> Arc { + Arc::new(RwLock(UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER))) +} + +fn main() { + unsafe { + let lock = new_lock(); + assert_eq!(libc::pthread_rwlock_wrlock(lock.0.get() as *mut _), 0); + + let lock_copy = lock.clone(); + thread::spawn(move || { + assert_eq!(libc::pthread_rwlock_wrlock(lock_copy.0.get() as *mut _), 0); //~ ERROR: deadlock + }) + .join() + .unwrap(); + } +} diff --git a/tests/run-pass/concurrency/locks.rs b/tests/run-pass/concurrency/locks.rs index 3c8373691b..90c10b8ffe 100644 --- a/tests/run-pass/concurrency/locks.rs +++ b/tests/run-pass/concurrency/locks.rs @@ -1,11 +1,9 @@ // ignore-windows -//! This test just calls the relevant APIs to check if Miri crashes. - -use std::sync::{Arc, Mutex}; +use std::sync::{Arc, Mutex, RwLock}; use std::thread; -fn main() { +fn check_mutex() { let data = Arc::new(Mutex::new(0)); let mut threads = Vec::new(); @@ -27,3 +25,49 @@ fn main() { let data = Arc::try_unwrap(data).unwrap().into_inner().unwrap(); assert_eq!(data, 3); } + +fn check_rwlock_write() { + let data = Arc::new(RwLock::new(0)); + let mut threads = Vec::new(); + + for _ in 0..3 { + let data = Arc::clone(&data); + let thread = thread::spawn(move || { + let mut data = data.write().unwrap(); + *data += 1; + }); + threads.push(thread); + } + + for thread in threads { + thread.join().unwrap(); + } + + assert!(data.try_write().is_ok()); + + let data = Arc::try_unwrap(data).unwrap().into_inner().unwrap(); + assert_eq!(data, 3); +} + +fn check_rwlock_read_no_deadlock() { + let l1 = Arc::new(RwLock::new(0)); + let l2 = Arc::new(RwLock::new(0)); + + let l1_copy = Arc::clone(&l1); + let l2_copy = Arc::clone(&l2); + let _guard1 = l1.read().unwrap(); + let handle = thread::spawn(move || { + let _guard2 = l2_copy.read().unwrap(); + thread::yield_now(); + let _guard1 = l1_copy.read().unwrap(); + }); + thread::yield_now(); + let _guard2 = l2.read().unwrap(); + handle.join().unwrap(); +} + +fn main() { + check_mutex(); + check_rwlock_write(); + check_rwlock_read_no_deadlock(); +} From c84b2890adb6b67712c6cff565eed2521dc9ba65 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Sun, 19 Apr 2020 12:22:33 -0700 Subject: [PATCH 30/77] Update a comment in README about what concurrency checks we support. --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index fb981a71f0..cf50049dae 100644 --- a/README.md +++ b/README.md @@ -48,8 +48,8 @@ in your program, and cannot run all programs: has no access to most platform-specific APIs or FFI. A few APIs have been implemented (such as printing to stdout) but most have not: for example, Miri currently does not support SIMD or networking. -* Miri currently does not check for data-races and other concurrency related - issues. +* Miri currently does not check for data-races and most other concurrency + related issues. [rust]: https://www.rust-lang.org/ [mir]: https://github.com/rust-lang/rfcs/blob/master/text/1211-mir.md From d6c03926ab8aff457a3e9b607bdcca654bfe17fe Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Sun, 19 Apr 2020 12:24:46 -0700 Subject: [PATCH 31/77] Rename MacOS set global dtor function. --- src/shims/foreign_items/posix/macos.rs | 2 +- src/shims/tls.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/shims/foreign_items/posix/macos.rs b/src/shims/foreign_items/posix/macos.rs index 9f65d0f9c4..9f6ea00b03 100644 --- a/src/shims/foreign_items/posix/macos.rs +++ b/src/shims/foreign_items/posix/macos.rs @@ -83,7 +83,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let dtor = this.memory.get_fn(dtor)?.as_instance()?; let data = this.read_scalar(args[1])?.not_undef()?; let active_thread = this.get_active_thread()?; - this.machine.tls.set_global_dtor(active_thread, dtor, data)?; + this.machine.tls.set_thread_global_dtor(active_thread, dtor, data)?; } // Querying system information diff --git a/src/shims/tls.rs b/src/shims/tls.rs index 89ec165965..c08ec78c13 100644 --- a/src/shims/tls.rs +++ b/src/shims/tls.rs @@ -123,7 +123,7 @@ impl<'tcx> TlsData<'tcx> { /// implementation](https://github.com/opensource-apple/dyld/blob/195030646877261f0c8c7ad8b001f52d6a26f514/src/threadLocalVariables.c#L389): /// /// // NOTE: this does not need locks because it only operates on current thread data - pub fn set_global_dtor( + pub fn set_thread_global_dtor( &mut self, thread: ThreadId, dtor: ty::Instance<'tcx>, From 69df2e19de4bf49df6e250cd367c553737dd6d0c Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Sun, 19 Apr 2020 14:01:12 -0700 Subject: [PATCH 32/77] Move prctl to Linux specific shims. --- src/shims/foreign_items/posix.rs | 5 ----- src/shims/foreign_items/posix/linux.rs | 7 +++++++ 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/src/shims/foreign_items/posix.rs b/src/shims/foreign_items/posix.rs index 4574d203ef..6e2a7a9fcb 100644 --- a/src/shims/foreign_items/posix.rs +++ b/src/shims/foreign_items/posix.rs @@ -313,11 +313,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx assert_eq!(args.len(), 0); this.pthread_self(dest)?; } - "prctl" => { - assert_eq!(args.len(), 5); - let result = this.prctl(args[0], args[1], args[2], args[3], args[4])?; - this.write_scalar(Scalar::from_i32(result), dest)?; - } "sched_yield" => { assert_eq!(args.len(), 0); let result = this.sched_yield()?; diff --git a/src/shims/foreign_items/posix/linux.rs b/src/shims/foreign_items/posix/linux.rs index a32f0fa606..eb58f74660 100644 --- a/src/shims/foreign_items/posix/linux.rs +++ b/src/shims/foreign_items/posix/linux.rs @@ -75,6 +75,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx this.write_null(dest)?; } + // Threading + "prctl" => { + assert_eq!(args.len(), 5); + let result = this.prctl(args[0], args[1], args[2], args[3], args[4])?; + this.write_scalar(Scalar::from_i32(result), dest)?; + } + // Dynamically invoked syscalls "syscall" => { let sys_getrandom = this From eab38dfe00d99bca183b7744823f8614d04e5304 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Sun, 19 Apr 2020 14:01:56 -0700 Subject: [PATCH 33/77] Change the warning message. --- src/shims/thread.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/shims/thread.rs b/src/shims/thread.rs index ccdf6df3f9..077da0b1a1 100644 --- a/src/shims/thread.rs +++ b/src/shims/thread.rs @@ -14,7 +14,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let this = self.eval_context_mut(); this.tcx.sess.warn( - "The thread support is experimental. \ + "thread support is experimental. \ For example, Miri does not detect data races yet.", ); From 75e6549c119cd0d30f764c3d8c2a742c63bc495f Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Sun, 19 Apr 2020 14:18:30 -0700 Subject: [PATCH 34/77] Improve prctl, add a test. --- src/shims/thread.rs | 15 ++++++++++----- .../concurrency/libc_prctl_thread_name.rs | 17 +++++++++++++++++ 2 files changed, 27 insertions(+), 5 deletions(-) create mode 100644 tests/run-pass/concurrency/libc_prctl_thread_name.rs diff --git a/src/shims/thread.rs b/src/shims/thread.rs index 077da0b1a1..ab3b436b86 100644 --- a/src/shims/thread.rs +++ b/src/shims/thread.rs @@ -102,12 +102,17 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let this = self.eval_context_mut(); let option = this.read_scalar(option)?.not_undef()?.to_i32()?; - if option != this.eval_libc_i32("PR_SET_NAME")? { - throw_unsup_format!("Miri supports only PR_SET_NAME"); + if option == this.eval_libc_i32("PR_SET_NAME")? { + let address = this.read_scalar(arg2)?.not_undef()?; + let name = this.memory.read_c_str(address)?.to_owned(); + this.set_active_thread_name(name)?; + } else if option == this.eval_libc_i32("PR_GET_NAME")? { + let address = this.read_scalar(arg2)?.not_undef()?; + let name = this.get_active_thread_name()?; + this.memory.write_bytes(address, name)?; + } else { + throw_unsup_format!("Unsupported prctl option."); } - let address = this.read_scalar(arg2)?.not_undef()?; - let name = this.memory.read_c_str(address)?.to_owned(); - this.set_active_thread_name(name)?; Ok(0) } diff --git a/tests/run-pass/concurrency/libc_prctl_thread_name.rs b/tests/run-pass/concurrency/libc_prctl_thread_name.rs new file mode 100644 index 0000000000..235ac27e0a --- /dev/null +++ b/tests/run-pass/concurrency/libc_prctl_thread_name.rs @@ -0,0 +1,17 @@ +// ignore-windows: No libc on Windows + +#![feature(rustc_private)] + +extern crate libc; + +use std::ffi::CString; + +fn main() { + unsafe { + let thread_name = CString::new("hello").expect("CString::new failed"); + assert_eq!(libc::prctl(libc::PR_SET_NAME, thread_name.as_ptr() as u64, 0, 0, 0), 0); + let mut buf = [0; 6]; + assert_eq!(libc::prctl(libc::PR_GET_NAME, buf.as_mut_ptr() as u64, 0, 0, 0), 0); + assert_eq!(thread_name.as_bytes_with_nul(), buf); + } +} From 94cbe88e8073381dbf7aeed2f0cf720b08f05785 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Sun, 19 Apr 2020 14:21:18 -0700 Subject: [PATCH 35/77] Many small changes to thread management. --- src/shims/sync.rs | 8 ++-- src/thread.rs | 112 ++++++++++++++++++++++++++++++++++------------ 2 files changed, 87 insertions(+), 33 deletions(-) diff --git a/src/shims/sync.rs b/src/shims/sync.rs index 6a1ea108db..97afbbe98f 100644 --- a/src/shims/sync.rs +++ b/src/shims/sync.rs @@ -419,7 +419,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx throw_ub_format!("called pthread_mutex_unlock on a mutex owned by another thread"); } else if locked_count == 1 { let blockset = mutex_get_or_create_blockset(this, mutex_op)?; - if let Some(new_owner) = this.unblock_random_thread(blockset)? { + if let Some(new_owner) = this.unblock_some_thread(blockset)? { // We have at least one thread waiting on this mutex. Transfer // ownership to it. mutex_set_owner(this, mutex_op, new_owner.to_u32_scalar())?; @@ -543,7 +543,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx assert_eq!(writers, 0); rwlock_set_readers(this, rwlock_op, Scalar::from_u32(new_readers))?; if new_readers == 0 { - if let Some(_writer) = this.unblock_random_thread(writer_blockset)? { + if let Some(_writer) = this.unblock_some_thread(writer_blockset)? { rwlock_set_writers(this, rwlock_op, Scalar::from_u32(1))?; } } @@ -551,11 +551,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx } else if writers != 0 { let reader_blockset = rwlock_get_or_create_reader_blockset(this, rwlock_op)?; rwlock_set_writers(this, rwlock_op, Scalar::from_u32(0))?; - if let Some(_writer) = this.unblock_random_thread(writer_blockset)? { + if let Some(_writer) = this.unblock_some_thread(writer_blockset)? { rwlock_set_writers(this, rwlock_op, Scalar::from_u32(1))?; } else { let mut readers = 0; - while let Some(_reader) = this.unblock_random_thread(reader_blockset)? { + while let Some(_reader) = this.unblock_some_thread(reader_blockset)? { readers += 1; } rwlock_set_readers(this, rwlock_op, Scalar::from_u32(readers))? diff --git a/src/thread.rs b/src/thread.rs index ab6a4c94db..5eb6560a09 100644 --- a/src/thread.rs +++ b/src/thread.rs @@ -31,6 +31,9 @@ pub enum SchedulingAction { #[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq, Hash)] pub struct ThreadId(usize); +/// The main thread. When it terminates, the whole application terminates. +const MAIN_THREAD: ThreadId = ThreadId(0); + impl Idx for ThreadId { fn new(idx: usize) -> Self { ThreadId(idx) @@ -42,13 +45,13 @@ impl Idx for ThreadId { impl From for ThreadId { fn from(id: u64) -> Self { - Self(id as usize) + Self(usize::try_from(id).unwrap()) } } impl From for ThreadId { fn from(id: u32) -> Self { - Self(id as usize) + Self(usize::try_from(id).unwrap()) } } @@ -82,10 +85,10 @@ pub enum ThreadState { /// The thread tried to join the specified thread and is blocked until that /// thread terminates. BlockedOnJoin(ThreadId), - /// The thread is blocked and belongs to the given blockset.. + /// The thread is blocked and belongs to the given blockset. Blocked(BlockSetId), /// The thread has terminated its execution (we do not delete terminated - /// threads.) + /// threads). Terminated, } @@ -150,6 +153,7 @@ pub struct ThreadManager<'mir, 'tcx> { impl<'mir, 'tcx> Default for ThreadManager<'mir, 'tcx> { fn default() -> Self { let mut threads = IndexVec::new(); + // Create the main thread and add it to the list of threads. threads.push(Default::default()); Self { active_thread: ThreadId::new(0), @@ -170,14 +174,13 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { /// Set the allocation id as the allocation id of the given thread local /// static for the active thread. + /// + /// Panics if a thread local is initialized twice for the same thread. fn set_thread_local_alloc_id(&self, def_id: DefId, new_alloc_id: AllocId) { - assert!( - self.thread_local_alloc_ids - .borrow_mut() - .insert((def_id, self.active_thread), new_alloc_id) - .is_none(), - "a thread local initialized twice for the same thread" - ); + self.thread_local_alloc_ids + .borrow_mut() + .insert((def_id, self.active_thread), new_alloc_id) + .unwrap_none(); } /// Borrow the stack of the active thread. @@ -227,15 +230,20 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { } /// Mark that the active thread tries to join the thread with `joined_thread_id`. - fn join_thread(&mut self, joined_thread_id: ThreadId) { - assert!(!self.threads[joined_thread_id].detached, "Bug: trying to join a detached thread."); - assert_ne!(joined_thread_id, self.active_thread, "Bug: trying to join itself"); - assert!( - self.threads - .iter() - .all(|thread| thread.state != ThreadState::BlockedOnJoin(joined_thread_id)), - "Bug: multiple threads try to join the same thread." - ); + fn join_thread(&mut self, joined_thread_id: ThreadId) -> InterpResult<'tcx> { + if self.threads[joined_thread_id].detached { + throw_ub_format!("trying to join a detached thread"); + } + if joined_thread_id == self.active_thread { + throw_ub_format!("trying to join itself"); + } + if self + .threads + .iter() + .any(|thread| thread.state == ThreadState::BlockedOnJoin(joined_thread_id)) + { + throw_ub_format!("multiple threads try to join the same thread"); + } if self.threads[joined_thread_id].state != ThreadState::Terminated { // The joined thread is still running, we need to wait for it. self.active_thread_mut().state = ThreadState::BlockedOnJoin(joined_thread_id); @@ -245,6 +253,7 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { joined_thread_id ); } + Ok(()) } /// Set the name of the active thread. @@ -252,6 +261,15 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { self.active_thread_mut().thread_name = Some(new_thread_name); } + /// Get the name of the active thread. + fn get_thread_name(&mut self) -> InterpResult<'tcx, Vec> { + if let Some(ref thread_name) = self.active_thread_mut().thread_name { + Ok(thread_name.clone()) + } else { + throw_ub_format!("thread {:?} has no name set", self.active_thread) + } + } + /// Allocate a new blockset id. fn create_blockset(&mut self) -> BlockSetId { self.blockset_counter = self.blockset_counter.checked_add(1).unwrap(); @@ -267,7 +285,7 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { /// Unblock any one thread from the given blockset if it contains at least /// one. Return the id of the unblocked thread. - fn unblock_random_thread(&mut self, set: BlockSetId) -> Option { + fn unblock_some_thread(&mut self, set: BlockSetId) -> Option { for (id, thread) in self.threads.iter_enumerated_mut() { if thread.state == ThreadState::Blocked(set) { trace!("unblocking {:?} in blockset {:?}", id, set); @@ -284,6 +302,11 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { } /// Decide which action to take next and on which thread. + /// + /// The currently implemented scheduling policy is the one that is commonly + /// used in stateless model checkers such as Loom: run the active thread as + /// long as we can and switch only when we have to (the active thread was + /// blocked, terminated, or was explicitly asked to be preempted). fn schedule(&mut self) -> InterpResult<'tcx, SchedulingAction> { if self.threads[self.active_thread].check_terminated() { // Check if we need to unblock any threads. @@ -295,14 +318,24 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { } return Ok(SchedulingAction::ExecuteDtors); } + if self.threads[MAIN_THREAD].state == ThreadState::Terminated { + // The main thread terminated; stop the program. + if self.threads.iter().any(|thread| thread.state != ThreadState::Terminated) { + // FIXME: This check should be either configurable or just emit a warning. + throw_unsup_format!("the main thread terminated without waiting for other threads"); + } + return Ok(SchedulingAction::Stop); + } if self.threads[self.active_thread].state == ThreadState::Enabled && !self.yield_active_thread { + // The currently active thread is still enabled, just continue with it. return Ok(SchedulingAction::ExecuteStep); } + // We need to pick a new thread for execution. for (id, thread) in self.threads.iter_enumerated() { if thread.state == ThreadState::Enabled { - if !(self.yield_active_thread && id == self.active_thread) { + if !self.yield_active_thread || id != self.active_thread { self.active_thread = id; break; } @@ -312,14 +345,16 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { if self.threads[self.active_thread].state == ThreadState::Enabled { return Ok(SchedulingAction::ExecuteStep); } + // We have not found a thread to execute. if self.threads.iter().all(|thread| thread.state == ThreadState::Terminated) { - Ok(SchedulingAction::Stop) + unreachable!(); } else { throw_machine_stop!(TerminationInfo::Deadlock); } } } +// Public interface to thread management. impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {} pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> { /// A workaround for thread-local statics until @@ -331,8 +366,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx val: &mut mir::interpret::ConstValue<'tcx>, ) -> InterpResult<'tcx> { let this = self.eval_context_ref(); - match val { - mir::interpret::ConstValue::Scalar(Scalar::Ptr(ptr)) => { + match *val { + mir::interpret::ConstValue::Scalar(Scalar::Ptr(ref mut ptr)) => { let alloc_id = ptr.alloc_id; let alloc = this.tcx.alloc_map.lock().get(alloc_id); let tcx = this.tcx; @@ -407,68 +442,86 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx } } + #[inline] fn create_thread(&mut self) -> InterpResult<'tcx, ThreadId> { let this = self.eval_context_mut(); Ok(this.machine.threads.create_thread()) } + #[inline] fn detach_thread(&mut self, thread_id: ThreadId) -> InterpResult<'tcx> { let this = self.eval_context_mut(); this.machine.threads.detach_thread(thread_id); Ok(()) } + #[inline] fn join_thread(&mut self, joined_thread_id: ThreadId) -> InterpResult<'tcx> { let this = self.eval_context_mut(); - this.machine.threads.join_thread(joined_thread_id); - Ok(()) + this.machine.threads.join_thread(joined_thread_id) } + #[inline] fn set_active_thread(&mut self, thread_id: ThreadId) -> InterpResult<'tcx, ThreadId> { let this = self.eval_context_mut(); Ok(this.machine.threads.set_active_thread_id(thread_id)) } + #[inline] fn get_active_thread(&self) -> InterpResult<'tcx, ThreadId> { let this = self.eval_context_ref(); Ok(this.machine.threads.get_active_thread_id()) } + #[inline] fn has_terminated(&self, thread_id: ThreadId) -> InterpResult<'tcx, bool> { let this = self.eval_context_ref(); Ok(this.machine.threads.has_terminated(thread_id)) } + #[inline] fn active_thread_stack(&self) -> &[Frame<'mir, 'tcx, Tag, FrameData<'tcx>>] { let this = self.eval_context_ref(); this.machine.threads.active_thread_stack() } + #[inline] fn active_thread_stack_mut(&mut self) -> &mut Vec>> { let this = self.eval_context_mut(); this.machine.threads.active_thread_stack_mut() } + #[inline] fn set_active_thread_name(&mut self, new_thread_name: Vec) -> InterpResult<'tcx, ()> { let this = self.eval_context_mut(); Ok(this.machine.threads.set_thread_name(new_thread_name)) } + #[inline] + fn get_active_thread_name(&mut self) -> InterpResult<'tcx, Vec> { + let this = self.eval_context_mut(); + this.machine.threads.get_thread_name() + } + + #[inline] fn create_blockset(&mut self) -> InterpResult<'tcx, BlockSetId> { let this = self.eval_context_mut(); Ok(this.machine.threads.create_blockset()) } + #[inline] fn block_active_thread(&mut self, set: BlockSetId) -> InterpResult<'tcx> { let this = self.eval_context_mut(); Ok(this.machine.threads.block_active_thread(set)) } - fn unblock_random_thread(&mut self, set: BlockSetId) -> InterpResult<'tcx, Option> { + #[inline] + fn unblock_some_thread(&mut self, set: BlockSetId) -> InterpResult<'tcx, Option> { let this = self.eval_context_mut(); - Ok(this.machine.threads.unblock_random_thread(set)) + Ok(this.machine.threads.unblock_some_thread(set)) } + #[inline] fn yield_active_thread(&mut self) -> InterpResult<'tcx> { let this = self.eval_context_mut(); this.machine.threads.yield_active_thread(); @@ -476,6 +529,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx } /// Decide which action to take next and on which thread. + #[inline] fn schedule(&mut self) -> InterpResult<'tcx, SchedulingAction> { let this = self.eval_context_mut(); this.machine.threads.schedule() From 80459bbf774f6238936837ace61fa6c1c95051ec Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Sun, 19 Apr 2020 14:22:55 -0700 Subject: [PATCH 36/77] Improve concurrency tests. --- .../concurrency/dangling_tls_lib.rs | 2 +- ...libc_pthread_rwlock_write_read_deadlock.rs | 32 ------------------- ...ibc_pthread_rwlock_write_write_deadlock.rs | 32 ------------------- .../{ => concurrency}/thread-spawn.rs | 5 +-- .../libc_pthread_mutex_deadlock.rs | 0 .../libc_pthread_mutex_wrong_owner.rs | 0 ...libc_pthread_rwlock_write_read_deadlock.rs | 25 +++++++++++++-- ...wlock_write_read_deadlock_single_thread.rs | 13 ++++++++ ...ibc_pthread_rwlock_write_write_deadlock.rs | 25 +++++++++++++-- ...lock_write_write_deadlock_single_thread.rs | 13 ++++++++ tests/run-pass/concurrency/locks.rs | 4 ++- tests/run-pass/concurrency/locks.stderr | 2 +- tests/run-pass/concurrency/simple.rs | 2 +- tests/run-pass/concurrency/simple.stderr | 2 +- tests/run-pass/concurrency/thread_locals.rs | 7 +++- .../run-pass/concurrency/thread_locals.stderr | 2 +- 16 files changed, 87 insertions(+), 79 deletions(-) delete mode 100644 tests/compile-fail/concurrency/libc_pthread_rwlock_write_read_deadlock.rs delete mode 100644 tests/compile-fail/concurrency/libc_pthread_rwlock_write_write_deadlock.rs rename tests/compile-fail/{ => concurrency}/thread-spawn.rs (52%) rename tests/compile-fail/{concurrency => sync}/libc_pthread_mutex_deadlock.rs (100%) rename tests/compile-fail/{concurrency => sync}/libc_pthread_mutex_wrong_owner.rs (100%) create mode 100644 tests/compile-fail/sync/libc_pthread_rwlock_write_read_deadlock_single_thread.rs create mode 100644 tests/compile-fail/sync/libc_pthread_rwlock_write_write_deadlock_single_thread.rs diff --git a/tests/compile-fail/concurrency/dangling_tls_lib.rs b/tests/compile-fail/concurrency/dangling_tls_lib.rs index ad12c107bf..684dd0e86f 100644 --- a/tests/compile-fail/concurrency/dangling_tls_lib.rs +++ b/tests/compile-fail/concurrency/dangling_tls_lib.rs @@ -1,4 +1,4 @@ -// ignore-windows +// ignore-windows: Concurrency on Windows is not supported yet. #![feature(thread_local_internals)] diff --git a/tests/compile-fail/concurrency/libc_pthread_rwlock_write_read_deadlock.rs b/tests/compile-fail/concurrency/libc_pthread_rwlock_write_read_deadlock.rs deleted file mode 100644 index 19dce431c8..0000000000 --- a/tests/compile-fail/concurrency/libc_pthread_rwlock_write_read_deadlock.rs +++ /dev/null @@ -1,32 +0,0 @@ -// ignore-windows: No libc on Windows - -#![feature(rustc_private)] - -extern crate libc; - -use std::cell::UnsafeCell; -use std::sync::Arc; -use std::thread; - -struct RwLock(UnsafeCell); - -unsafe impl Send for RwLock {} -unsafe impl Sync for RwLock {} - -fn new_lock() -> Arc { - Arc::new(RwLock(UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER))) -} - -fn main() { - unsafe { - let lock = new_lock(); - assert_eq!(libc::pthread_rwlock_rdlock(lock.0.get() as *mut _), 0); - - let lock_copy = lock.clone(); - thread::spawn(move || { - assert_eq!(libc::pthread_rwlock_wrlock(lock_copy.0.get() as *mut _), 0); //~ ERROR: deadlock - }) - .join() - .unwrap(); - } -} diff --git a/tests/compile-fail/concurrency/libc_pthread_rwlock_write_write_deadlock.rs b/tests/compile-fail/concurrency/libc_pthread_rwlock_write_write_deadlock.rs deleted file mode 100644 index 098c1c2fe2..0000000000 --- a/tests/compile-fail/concurrency/libc_pthread_rwlock_write_write_deadlock.rs +++ /dev/null @@ -1,32 +0,0 @@ -// ignore-windows: No libc on Windows - -#![feature(rustc_private)] - -extern crate libc; - -use std::cell::UnsafeCell; -use std::sync::Arc; -use std::thread; - -struct RwLock(UnsafeCell); - -unsafe impl Send for RwLock {} -unsafe impl Sync for RwLock {} - -fn new_lock() -> Arc { - Arc::new(RwLock(UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER))) -} - -fn main() { - unsafe { - let lock = new_lock(); - assert_eq!(libc::pthread_rwlock_wrlock(lock.0.get() as *mut _), 0); - - let lock_copy = lock.clone(); - thread::spawn(move || { - assert_eq!(libc::pthread_rwlock_wrlock(lock_copy.0.get() as *mut _), 0); //~ ERROR: deadlock - }) - .join() - .unwrap(); - } -} diff --git a/tests/compile-fail/thread-spawn.rs b/tests/compile-fail/concurrency/thread-spawn.rs similarity index 52% rename from tests/compile-fail/thread-spawn.rs rename to tests/compile-fail/concurrency/thread-spawn.rs index 4b9073f3a7..f0e4ab3817 100644 --- a/tests/compile-fail/thread-spawn.rs +++ b/tests/compile-fail/concurrency/thread-spawn.rs @@ -1,5 +1,6 @@ -// ignore-linux -// ignore-macos +// ignore-linux: Only Windows is not supported. +// ignore-macos: Only Windows is not supported. + use std::thread; // error-pattern: Miri does not support threading diff --git a/tests/compile-fail/concurrency/libc_pthread_mutex_deadlock.rs b/tests/compile-fail/sync/libc_pthread_mutex_deadlock.rs similarity index 100% rename from tests/compile-fail/concurrency/libc_pthread_mutex_deadlock.rs rename to tests/compile-fail/sync/libc_pthread_mutex_deadlock.rs diff --git a/tests/compile-fail/concurrency/libc_pthread_mutex_wrong_owner.rs b/tests/compile-fail/sync/libc_pthread_mutex_wrong_owner.rs similarity index 100% rename from tests/compile-fail/concurrency/libc_pthread_mutex_wrong_owner.rs rename to tests/compile-fail/sync/libc_pthread_mutex_wrong_owner.rs diff --git a/tests/compile-fail/sync/libc_pthread_rwlock_write_read_deadlock.rs b/tests/compile-fail/sync/libc_pthread_rwlock_write_read_deadlock.rs index 1b460e7174..19dce431c8 100644 --- a/tests/compile-fail/sync/libc_pthread_rwlock_write_read_deadlock.rs +++ b/tests/compile-fail/sync/libc_pthread_rwlock_write_read_deadlock.rs @@ -4,10 +4,29 @@ extern crate libc; +use std::cell::UnsafeCell; +use std::sync::Arc; +use std::thread; + +struct RwLock(UnsafeCell); + +unsafe impl Send for RwLock {} +unsafe impl Sync for RwLock {} + +fn new_lock() -> Arc { + Arc::new(RwLock(UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER))) +} + fn main() { - let rw = std::cell::UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER); unsafe { - assert_eq!(libc::pthread_rwlock_wrlock(rw.get()), 0); - libc::pthread_rwlock_rdlock(rw.get()); //~ ERROR: deadlock + let lock = new_lock(); + assert_eq!(libc::pthread_rwlock_rdlock(lock.0.get() as *mut _), 0); + + let lock_copy = lock.clone(); + thread::spawn(move || { + assert_eq!(libc::pthread_rwlock_wrlock(lock_copy.0.get() as *mut _), 0); //~ ERROR: deadlock + }) + .join() + .unwrap(); } } diff --git a/tests/compile-fail/sync/libc_pthread_rwlock_write_read_deadlock_single_thread.rs b/tests/compile-fail/sync/libc_pthread_rwlock_write_read_deadlock_single_thread.rs new file mode 100644 index 0000000000..1b460e7174 --- /dev/null +++ b/tests/compile-fail/sync/libc_pthread_rwlock_write_read_deadlock_single_thread.rs @@ -0,0 +1,13 @@ +// ignore-windows: No libc on Windows + +#![feature(rustc_private)] + +extern crate libc; + +fn main() { + let rw = std::cell::UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER); + unsafe { + assert_eq!(libc::pthread_rwlock_wrlock(rw.get()), 0); + libc::pthread_rwlock_rdlock(rw.get()); //~ ERROR: deadlock + } +} diff --git a/tests/compile-fail/sync/libc_pthread_rwlock_write_write_deadlock.rs b/tests/compile-fail/sync/libc_pthread_rwlock_write_write_deadlock.rs index cc327ec46b..098c1c2fe2 100644 --- a/tests/compile-fail/sync/libc_pthread_rwlock_write_write_deadlock.rs +++ b/tests/compile-fail/sync/libc_pthread_rwlock_write_write_deadlock.rs @@ -4,10 +4,29 @@ extern crate libc; +use std::cell::UnsafeCell; +use std::sync::Arc; +use std::thread; + +struct RwLock(UnsafeCell); + +unsafe impl Send for RwLock {} +unsafe impl Sync for RwLock {} + +fn new_lock() -> Arc { + Arc::new(RwLock(UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER))) +} + fn main() { - let rw = std::cell::UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER); unsafe { - assert_eq!(libc::pthread_rwlock_wrlock(rw.get()), 0); - libc::pthread_rwlock_wrlock(rw.get()); //~ ERROR: deadlock + let lock = new_lock(); + assert_eq!(libc::pthread_rwlock_wrlock(lock.0.get() as *mut _), 0); + + let lock_copy = lock.clone(); + thread::spawn(move || { + assert_eq!(libc::pthread_rwlock_wrlock(lock_copy.0.get() as *mut _), 0); //~ ERROR: deadlock + }) + .join() + .unwrap(); } } diff --git a/tests/compile-fail/sync/libc_pthread_rwlock_write_write_deadlock_single_thread.rs b/tests/compile-fail/sync/libc_pthread_rwlock_write_write_deadlock_single_thread.rs new file mode 100644 index 0000000000..cc327ec46b --- /dev/null +++ b/tests/compile-fail/sync/libc_pthread_rwlock_write_write_deadlock_single_thread.rs @@ -0,0 +1,13 @@ +// ignore-windows: No libc on Windows + +#![feature(rustc_private)] + +extern crate libc; + +fn main() { + let rw = std::cell::UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER); + unsafe { + assert_eq!(libc::pthread_rwlock_wrlock(rw.get()), 0); + libc::pthread_rwlock_wrlock(rw.get()); //~ ERROR: deadlock + } +} diff --git a/tests/run-pass/concurrency/locks.rs b/tests/run-pass/concurrency/locks.rs index 90c10b8ffe..f5469712c5 100644 --- a/tests/run-pass/concurrency/locks.rs +++ b/tests/run-pass/concurrency/locks.rs @@ -1,4 +1,4 @@ -// ignore-windows +// ignore-windows: Concurrency on Windows is not supported yet. use std::sync::{Arc, Mutex, RwLock}; use std::thread; @@ -11,6 +11,7 @@ fn check_mutex() { let data = Arc::clone(&data); let thread = thread::spawn(move || { let mut data = data.lock().unwrap(); + thread::yield_now(); *data += 1; }); threads.push(thread); @@ -34,6 +35,7 @@ fn check_rwlock_write() { let data = Arc::clone(&data); let thread = thread::spawn(move || { let mut data = data.write().unwrap(); + thread::yield_now(); *data += 1; }); threads.push(thread); diff --git a/tests/run-pass/concurrency/locks.stderr b/tests/run-pass/concurrency/locks.stderr index 20a2bf3eeb..2dbfb7721d 100644 --- a/tests/run-pass/concurrency/locks.stderr +++ b/tests/run-pass/concurrency/locks.stderr @@ -1,2 +1,2 @@ -warning: The thread support is experimental. For example, Miri does not detect data races yet. +warning: thread support is experimental. For example, Miri does not detect data races yet. diff --git a/tests/run-pass/concurrency/simple.rs b/tests/run-pass/concurrency/simple.rs index 5adc521f59..ad47bb144b 100644 --- a/tests/run-pass/concurrency/simple.rs +++ b/tests/run-pass/concurrency/simple.rs @@ -1,4 +1,4 @@ -// ignore-windows +// ignore-windows: Concurrency on Windows is not supported yet. use std::thread; diff --git a/tests/run-pass/concurrency/simple.stderr b/tests/run-pass/concurrency/simple.stderr index 20a2bf3eeb..2dbfb7721d 100644 --- a/tests/run-pass/concurrency/simple.stderr +++ b/tests/run-pass/concurrency/simple.stderr @@ -1,2 +1,2 @@ -warning: The thread support is experimental. For example, Miri does not detect data races yet. +warning: thread support is experimental. For example, Miri does not detect data races yet. diff --git a/tests/run-pass/concurrency/thread_locals.rs b/tests/run-pass/concurrency/thread_locals.rs index 1805a1da3d..384c2ac915 100644 --- a/tests/run-pass/concurrency/thread_locals.rs +++ b/tests/run-pass/concurrency/thread_locals.rs @@ -1,4 +1,9 @@ -// ignore-windows +// ignore-windows: Concurrency on Windows is not supported yet. + +//! The main purpose of this test is to check that if we take a pointer to +//! thread's `t1` thread-local `A` and send it to another thread `t2`, +//! dereferencing the pointer on `t2` resolves to `t1`'s thread-local. In this +//! test, we also check that thread-locals act as per-thread statics. #![feature(thread_local)] diff --git a/tests/run-pass/concurrency/thread_locals.stderr b/tests/run-pass/concurrency/thread_locals.stderr index 20a2bf3eeb..2dbfb7721d 100644 --- a/tests/run-pass/concurrency/thread_locals.stderr +++ b/tests/run-pass/concurrency/thread_locals.stderr @@ -1,2 +1,2 @@ -warning: The thread support is experimental. For example, Miri does not detect data races yet. +warning: thread support is experimental. For example, Miri does not detect data races yet. From 17f7bc86ae4bf5d160ae13552387afb922f75cdc Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Sun, 19 Apr 2020 15:23:30 -0700 Subject: [PATCH 37/77] Fix how a pthread_create function argument is constructed. --- src/shims/thread.rs | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/src/shims/thread.rs b/src/shims/thread.rs index ab3b436b86..ee4369cb41 100644 --- a/src/shims/thread.rs +++ b/src/shims/thread.rs @@ -37,11 +37,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let fn_ptr = this.read_scalar(start_routine)?.not_undef()?; let instance = this.memory.get_fn(fn_ptr)?.as_instance()?; - let func_arg = match *arg { - rustc_mir::interpret::Operand::Immediate(immediate) => immediate, - _ => unreachable!(), - }; - let func_args = [func_arg]; + let func_arg = this.read_immediate(arg)?; + let func_args = [*func_arg]; let ret_place = this.allocate(this.layout_of(this.tcx.types.usize)?, MiriMemoryKind::Machine.into()); From 5b55e0706c3fff3fd015e5396422a2a1eda19779 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Sun, 19 Apr 2020 16:42:58 -0700 Subject: [PATCH 38/77] Add more concurrency tests. --- .../libc_pthread_create_main_terminate.rs | 24 +++++++++++++++ .../concurrency/libc_pthread_join_detached.rs | 24 +++++++++++++++ .../concurrency/libc_pthread_join_joined.rs | 24 +++++++++++++++ .../concurrency/libc_pthread_join_multiple.rs | 30 +++++++++++++++++++ .../concurrency/libc_pthread_join_self.rs | 16 ++++++++++ 5 files changed, 118 insertions(+) create mode 100644 tests/compile-fail/concurrency/libc_pthread_create_main_terminate.rs create mode 100644 tests/compile-fail/concurrency/libc_pthread_join_detached.rs create mode 100644 tests/compile-fail/concurrency/libc_pthread_join_joined.rs create mode 100644 tests/compile-fail/concurrency/libc_pthread_join_multiple.rs create mode 100644 tests/compile-fail/concurrency/libc_pthread_join_self.rs diff --git a/tests/compile-fail/concurrency/libc_pthread_create_main_terminate.rs b/tests/compile-fail/concurrency/libc_pthread_create_main_terminate.rs new file mode 100644 index 0000000000..e34d3f5c93 --- /dev/null +++ b/tests/compile-fail/concurrency/libc_pthread_create_main_terminate.rs @@ -0,0 +1,24 @@ +// ignore-windows: Concurrency on Windows is not supported yet. + +// Check that we terminate the program when the main thread terminates. + +//~^^^^ ERROR: unsupported operation: the main thread terminated without waiting for other threads + +#![feature(rustc_private)] + +extern crate libc; + +use std::{mem, ptr}; + +extern "C" fn thread_start(_null: *mut libc::c_void) -> *mut libc::c_void { + ptr::null_mut() +} + +fn main() { + unsafe { + let mut native: libc::pthread_t = mem::zeroed(); + let attr: libc::pthread_attr_t = mem::zeroed(); + // assert_eq!(libc::pthread_attr_init(&mut attr), 0); FIXME: this function is not yet implemented. + assert_eq!(libc::pthread_create(&mut native, &attr, thread_start, ptr::null_mut()), 0); + } +} diff --git a/tests/compile-fail/concurrency/libc_pthread_join_detached.rs b/tests/compile-fail/concurrency/libc_pthread_join_detached.rs new file mode 100644 index 0000000000..ad83fb2efe --- /dev/null +++ b/tests/compile-fail/concurrency/libc_pthread_join_detached.rs @@ -0,0 +1,24 @@ +// ignore-windows: Concurrency on Windows is not supported yet. + +// Joining a detached thread is undefined behavior. + +#![feature(rustc_private)] + +extern crate libc; + +use std::{mem, ptr}; + +extern "C" fn thread_start(_null: *mut libc::c_void) -> *mut libc::c_void { + ptr::null_mut() +} + +fn main() { + unsafe { + let mut native: libc::pthread_t = mem::zeroed(); + let attr: libc::pthread_attr_t = mem::zeroed(); + // assert_eq!(libc::pthread_attr_init(&mut attr), 0); FIXME: this function is not yet implemented. + assert_eq!(libc::pthread_create(&mut native, &attr, thread_start, ptr::null_mut()), 0); + assert_eq!(libc::pthread_detach(native), 0); + assert_eq!(libc::pthread_join(native, ptr::null_mut()), 0); //~ ERROR: Undefined Behavior: trying to join a detached or already joined thread + } +} diff --git a/tests/compile-fail/concurrency/libc_pthread_join_joined.rs b/tests/compile-fail/concurrency/libc_pthread_join_joined.rs new file mode 100644 index 0000000000..3ca0424496 --- /dev/null +++ b/tests/compile-fail/concurrency/libc_pthread_join_joined.rs @@ -0,0 +1,24 @@ +// ignore-windows: Concurrency on Windows is not supported yet. + +// Joining an already joined thread is undefined behavior. + +#![feature(rustc_private)] + +extern crate libc; + +use std::{mem, ptr}; + +extern "C" fn thread_start(_null: *mut libc::c_void) -> *mut libc::c_void { + ptr::null_mut() +} + +fn main() { + unsafe { + let mut native: libc::pthread_t = mem::zeroed(); + let attr: libc::pthread_attr_t = mem::zeroed(); + // assert_eq!(libc::pthread_attr_init(&mut attr), 0); FIXME: this function is not yet implemented. + assert_eq!(libc::pthread_create(&mut native, &attr, thread_start, ptr::null_mut()), 0); + assert_eq!(libc::pthread_join(native, ptr::null_mut()), 0); + assert_eq!(libc::pthread_join(native, ptr::null_mut()), 0); //~ ERROR: Undefined Behavior: trying to join a detached or already joined thread + } +} diff --git a/tests/compile-fail/concurrency/libc_pthread_join_multiple.rs b/tests/compile-fail/concurrency/libc_pthread_join_multiple.rs new file mode 100644 index 0000000000..08ce94022c --- /dev/null +++ b/tests/compile-fail/concurrency/libc_pthread_join_multiple.rs @@ -0,0 +1,30 @@ +// ignore-windows: Concurrency on Windows is not supported yet. + +// Joining the same thread multiple times is undefined behavior. + +#![feature(rustc_private)] + +extern crate libc; + +use std::thread; +use std::{mem, ptr}; + +extern "C" fn thread_start(_null: *mut libc::c_void) -> *mut libc::c_void { + ptr::null_mut() +} + +fn main() { + unsafe { + let mut native: libc::pthread_t = mem::zeroed(); + let attr: libc::pthread_attr_t = mem::zeroed(); + // assert_eq!(libc::pthread_attr_init(&mut attr), 0); FIXME: this function is not yet implemented. + assert_eq!(libc::pthread_create(&mut native, &attr, thread_start, ptr::null_mut()), 0); + let mut native_copy: libc::pthread_t = mem::zeroed(); + ptr::copy_nonoverlapping(&native, &mut native_copy, 1); + let handle = thread::spawn(move || { + assert_eq!(libc::pthread_join(native_copy, ptr::null_mut()), 0); //~ ERROR: Undefined Behavior: trying to join a detached or already joined thread + }); + assert_eq!(libc::pthread_join(native, ptr::null_mut()), 0); + handle.join().unwrap(); + } +} diff --git a/tests/compile-fail/concurrency/libc_pthread_join_self.rs b/tests/compile-fail/concurrency/libc_pthread_join_self.rs new file mode 100644 index 0000000000..1aeb274dcd --- /dev/null +++ b/tests/compile-fail/concurrency/libc_pthread_join_self.rs @@ -0,0 +1,16 @@ +// ignore-windows: Concurrency on Windows is not supported yet. + +// Joining itself is undefined behavior. + +#![feature(rustc_private)] + +extern crate libc; + +use std::ptr; + +fn main() { + unsafe { + let native: libc::pthread_t = libc::pthread_self(); + assert_eq!(libc::pthread_join(native, ptr::null_mut()), 0); //~ ERROR: Undefined Behavior: trying to join itself + } +} From e4dc3567f8bb2b5b50230aa31d4ad57b631ac8db Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Sun, 19 Apr 2020 16:43:40 -0700 Subject: [PATCH 39/77] Track if a thread was already joined. --- src/thread.rs | 57 ++++++++++++++++++++++++++++++++++----------------- 1 file changed, 38 insertions(+), 19 deletions(-) diff --git a/src/thread.rs b/src/thread.rs index 5eb6560a09..657792bd2c 100644 --- a/src/thread.rs +++ b/src/thread.rs @@ -92,6 +92,18 @@ pub enum ThreadState { Terminated, } +/// The join status of a thread. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +enum ThreadJoinStatus { + /// The thread can be joined. + Joinable, + /// A thread is detached if its join handle was destroyed and no other + /// thread can join it. + Detached, + /// The thread was already joined by some thread and cannot be joined again. + Joined, +} + /// A thread. pub struct Thread<'mir, 'tcx> { state: ThreadState, @@ -99,11 +111,8 @@ pub struct Thread<'mir, 'tcx> { thread_name: Option>, /// The virtual call stack. stack: Vec>>, - /// Is the thread detached? - /// - /// A thread is detached if its join handle was destroyed and no other - /// thread can join it. - detached: bool, + /// The join status. + join_status: ThreadJoinStatus, } impl<'mir, 'tcx> Thread<'mir, 'tcx> { @@ -128,7 +137,12 @@ impl<'mir, 'tcx> std::fmt::Debug for Thread<'mir, 'tcx> { impl<'mir, 'tcx> Default for Thread<'mir, 'tcx> { fn default() -> Self { - Self { state: ThreadState::Enabled, thread_name: None, stack: Vec::new(), detached: false } + Self { + state: ThreadState::Enabled, + thread_name: None, + stack: Vec::new(), + join_status: ThreadJoinStatus::Joinable, + } } } @@ -225,25 +239,31 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { /// Mark the thread as detached, which means that no other thread will try /// to join it and the thread is responsible for cleaning up. - fn detach_thread(&mut self, id: ThreadId) { - self.threads[id].detached = true; + fn detach_thread(&mut self, id: ThreadId) -> InterpResult<'tcx> { + if self.threads[id].join_status != ThreadJoinStatus::Joinable { + throw_ub_format!("trying to detach thread that was already detached or joined"); + } + self.threads[id].join_status = ThreadJoinStatus::Detached; + Ok(()) } /// Mark that the active thread tries to join the thread with `joined_thread_id`. fn join_thread(&mut self, joined_thread_id: ThreadId) -> InterpResult<'tcx> { - if self.threads[joined_thread_id].detached { - throw_ub_format!("trying to join a detached thread"); + if self.threads[joined_thread_id].join_status != ThreadJoinStatus::Joinable { + throw_ub_format!("trying to join a detached or already joined thread"); } if joined_thread_id == self.active_thread { throw_ub_format!("trying to join itself"); } - if self - .threads - .iter() - .any(|thread| thread.state == ThreadState::BlockedOnJoin(joined_thread_id)) - { - throw_ub_format!("multiple threads try to join the same thread"); - } + assert!( + self.threads + .iter() + .all(|thread| thread.state != ThreadState::BlockedOnJoin(joined_thread_id)), + "a joinable thread has threads waiting for its termination" + ); + // Mark the joined thread as being joined so that we detect if other + // threads try to join it. + self.threads[joined_thread_id].join_status = ThreadJoinStatus::Joined; if self.threads[joined_thread_id].state != ThreadState::Terminated { // The joined thread is still running, we need to wait for it. self.active_thread_mut().state = ThreadState::BlockedOnJoin(joined_thread_id); @@ -451,8 +471,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx #[inline] fn detach_thread(&mut self, thread_id: ThreadId) -> InterpResult<'tcx> { let this = self.eval_context_mut(); - this.machine.threads.detach_thread(thread_id); - Ok(()) + this.machine.threads.detach_thread(thread_id) } #[inline] From 9a01c3fa3e294cfb22fb259da05e54f7ec2a6320 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Sun, 19 Apr 2020 20:52:53 -0700 Subject: [PATCH 40/77] Clarify comments about TLS destructor handling in Windows, add a test for TLS destructors. --- src/eval.rs | 3 +- src/shims/tls.rs | 3 +- src/thread.rs | 11 +++++ .../concurrency/dangling_tls_lib.rs | 3 ++ tests/run-pass/concurrency/tls_lib_drop.rs | 46 +++++++++++++++++++ .../run-pass/concurrency/tls_lib_drop.stderr | 2 + .../run-pass/concurrency/tls_lib_drop.stdout | 2 + 7 files changed, 67 insertions(+), 3 deletions(-) create mode 100644 tests/run-pass/concurrency/tls_lib_drop.rs create mode 100644 tests/run-pass/concurrency/tls_lib_drop.stderr create mode 100644 tests/run-pass/concurrency/tls_lib_drop.stdout diff --git a/src/eval.rs b/src/eval.rs index ab82c39836..c5a04d7585 100644 --- a/src/eval.rs +++ b/src/eval.rs @@ -222,7 +222,8 @@ pub fn eval_main<'tcx>(tcx: TyCtxt<'tcx>, main_id: DefId, config: MiriConfig) -> // Read the return code pointer *before* we run TLS destructors, to assert // that it was written to by the time that `start` lang item returned. let return_code = ecx.read_scalar(ret_place.into())?.not_undef()?.to_machine_isize(&ecx)?; - // Global destructors. + // Run Windows destructors. (We do not support concurrency on Windows + // yet, so we run the destructor of the main thread separately.) ecx.run_windows_tls_dtors()?; Ok(return_code) })(); diff --git a/src/shims/tls.rs b/src/shims/tls.rs index c08ec78c13..31a9ee3c94 100644 --- a/src/shims/tls.rs +++ b/src/shims/tls.rs @@ -6,7 +6,6 @@ use std::collections::HashSet; use log::trace; -use rustc_index::vec::Idx; use rustc_middle::ty; use rustc_target::abi::{Size, HasDataLayout}; @@ -201,7 +200,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx return Ok(()); } let active_thread = this.get_active_thread()?; - assert_eq!(active_thread.index(), 0, "concurrency on Windows not supported"); + assert_eq!(this.get_total_thread_count()?, 1, "concurrency on Windows not supported"); assert!(!this.machine.tls.dtors_running.contains(&active_thread), "running TLS dtors twice"); this.machine.tls.dtors_running.insert(active_thread); // Windows has a special magic linker section that is run on certain events. diff --git a/src/thread.rs b/src/thread.rs index 657792bd2c..8c353d6a88 100644 --- a/src/thread.rs +++ b/src/thread.rs @@ -227,6 +227,11 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { self.active_thread } + /// Get the total number of threads that were ever spawn by this program. + fn get_total_thread_count(&self) -> usize { + self.threads.len() + } + /// Has the given thread terminated? fn has_terminated(&self, thread_id: ThreadId) -> bool { self.threads[thread_id].state == ThreadState::Terminated @@ -492,6 +497,12 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx Ok(this.machine.threads.get_active_thread_id()) } + #[inline] + fn get_total_thread_count(&self) -> InterpResult<'tcx, usize> { + let this = self.eval_context_ref(); + Ok(this.machine.threads.get_total_thread_count()) + } + #[inline] fn has_terminated(&self, thread_id: ThreadId) -> InterpResult<'tcx, bool> { let this = self.eval_context_ref(); diff --git a/tests/compile-fail/concurrency/dangling_tls_lib.rs b/tests/compile-fail/concurrency/dangling_tls_lib.rs index 684dd0e86f..6be5538bb4 100644 --- a/tests/compile-fail/concurrency/dangling_tls_lib.rs +++ b/tests/compile-fail/concurrency/dangling_tls_lib.rs @@ -1,5 +1,8 @@ // ignore-windows: Concurrency on Windows is not supported yet. +//! Check that we catch if a thread local is accessed after the thread has +//! terminated. + #![feature(thread_local_internals)] use std::cell::RefCell; diff --git a/tests/run-pass/concurrency/tls_lib_drop.rs b/tests/run-pass/concurrency/tls_lib_drop.rs new file mode 100644 index 0000000000..c9b04a7282 --- /dev/null +++ b/tests/run-pass/concurrency/tls_lib_drop.rs @@ -0,0 +1,46 @@ +// ignore-windows: Concurrency on Windows is not supported yet. + +//! Check that destructors of the library thread locals are executed immediately +//! after a thread terminates. + +#![feature(thread_local_internals)] + +use std::cell::RefCell; +use std::thread; + +struct TestCell { + value: RefCell, +} + +impl Drop for TestCell { + fn drop(&mut self) { + println!("Dropping: {}", self.value.borrow()) + } +} + +static A: std::thread::LocalKey = { + #[inline] + fn __init() -> TestCell { + TestCell { value: RefCell::new(0) } + } + + unsafe fn __getit() -> Option<&'static TestCell> { + static __KEY: std::thread::__OsLocalKeyInner = + std::thread::__OsLocalKeyInner::new(); + __KEY.get(__init) + } + + unsafe { std::thread::LocalKey::new(__getit) } +}; + +fn main() { + thread::spawn(|| { + A.with(|f| { + assert_eq!(*f.value.borrow(), 0); + *f.value.borrow_mut() = 5; + }); + }) + .join() + .unwrap(); + println!("Continue main.") +} diff --git a/tests/run-pass/concurrency/tls_lib_drop.stderr b/tests/run-pass/concurrency/tls_lib_drop.stderr new file mode 100644 index 0000000000..2dbfb7721d --- /dev/null +++ b/tests/run-pass/concurrency/tls_lib_drop.stderr @@ -0,0 +1,2 @@ +warning: thread support is experimental. For example, Miri does not detect data races yet. + diff --git a/tests/run-pass/concurrency/tls_lib_drop.stdout b/tests/run-pass/concurrency/tls_lib_drop.stdout new file mode 100644 index 0000000000..d2bbb866b7 --- /dev/null +++ b/tests/run-pass/concurrency/tls_lib_drop.stdout @@ -0,0 +1,2 @@ +Dropping: 5 +Continue main. From 3bb16574486ecabd702587c6811ae5154cb3b12c Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Sun, 19 Apr 2020 21:03:23 -0700 Subject: [PATCH 41/77] Small style fix. --- src/machine.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/machine.rs b/src/machine.rs index a81273960d..4032a399e3 100644 --- a/src/machine.rs +++ b/src/machine.rs @@ -282,7 +282,7 @@ impl<'mir, 'tcx> Evaluator<'mir, 'tcx> { panic_payload: None, time_anchor: Instant::now(), layouts, - threads: Default::default(), + threads: ThreadManager::default(), } } } From 452e36efb3840f4b44c70c3939e3a88f27e47710 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Sun, 19 Apr 2020 21:21:22 -0700 Subject: [PATCH 42/77] Print the thread name in Debug. --- src/thread.rs | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/src/thread.rs b/src/thread.rs index 8c353d6a88..76f1e20cb1 100644 --- a/src/thread.rs +++ b/src/thread.rs @@ -131,7 +131,16 @@ impl<'mir, 'tcx> Thread<'mir, 'tcx> { impl<'mir, 'tcx> std::fmt::Debug for Thread<'mir, 'tcx> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}", self.state) + if let Some(ref name) = self.thread_name { + if let Ok(name_str) = std::str::from_utf8(name) { + write!(f, "{}", name_str)?; + } else { + write!(f, "")?; + } + } else { + write!(f, "unnamed")?; + } + write!(f, "({:?}, {:?})", self.state, self.join_status) } } From 69eaaadc28e027fd749c2f6e500daa48f8c2aba3 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Mon, 20 Apr 2020 11:53:19 -0700 Subject: [PATCH 43/77] Fix merge error. --- src/machine.rs | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/src/machine.rs b/src/machine.rs index 4032a399e3..3853f65599 100644 --- a/src/machine.rs +++ b/src/machine.rs @@ -545,20 +545,6 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> { ecx.active_thread_stack_mut() } - #[inline(always)] - fn stack<'a>( - ecx: &'a InterpCx<'mir, 'tcx, Self>, - ) -> &'a [Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>] { - &ecx.machine.stack - } - - #[inline(always)] - fn stack_mut<'a>( - ecx: &'a mut InterpCx<'mir, 'tcx, Self>, - ) -> &'a mut Vec> { - &mut ecx.machine.stack - } - #[inline(always)] fn after_stack_push(ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> { if ecx.memory.extra.stacked_borrows.is_some() { From e7c2694b8b93ad44256d7ce3d179f172a4c6a9b0 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Mon, 20 Apr 2020 13:22:28 -0700 Subject: [PATCH 44/77] Make the main thread detached. --- src/thread.rs | 4 +++- .../concurrency/libc_pthread_join_self.rs | 14 +++++++++----- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/src/thread.rs b/src/thread.rs index 76f1e20cb1..80c9965aa1 100644 --- a/src/thread.rs +++ b/src/thread.rs @@ -177,7 +177,9 @@ impl<'mir, 'tcx> Default for ThreadManager<'mir, 'tcx> { fn default() -> Self { let mut threads = IndexVec::new(); // Create the main thread and add it to the list of threads. - threads.push(Default::default()); + let mut main_thread = Thread::default(); + main_thread.join_status = ThreadJoinStatus::Detached; + threads.push(main_thread); Self { active_thread: ThreadId::new(0), threads: threads, diff --git a/tests/compile-fail/concurrency/libc_pthread_join_self.rs b/tests/compile-fail/concurrency/libc_pthread_join_self.rs index 1aeb274dcd..d765a95d8b 100644 --- a/tests/compile-fail/concurrency/libc_pthread_join_self.rs +++ b/tests/compile-fail/concurrency/libc_pthread_join_self.rs @@ -6,11 +6,15 @@ extern crate libc; -use std::ptr; +use std::{ptr, thread}; fn main() { - unsafe { - let native: libc::pthread_t = libc::pthread_self(); - assert_eq!(libc::pthread_join(native, ptr::null_mut()), 0); //~ ERROR: Undefined Behavior: trying to join itself - } + let handle = thread::spawn(|| { + unsafe { + let native: libc::pthread_t = libc::pthread_self(); + assert_eq!(libc::pthread_join(native, ptr::null_mut()), 0); //~ ERROR: Undefined Behavior: trying to join itself + } + }); + thread::yield_now(); + handle.join().unwrap(); } From e7b82fde4a06ec7a75511a900379017d37d991fd Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Mon, 20 Apr 2020 13:22:51 -0700 Subject: [PATCH 45/77] Fix the test annotation. --- .../concurrency/libc_pthread_create_main_terminate.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/compile-fail/concurrency/libc_pthread_create_main_terminate.rs b/tests/compile-fail/concurrency/libc_pthread_create_main_terminate.rs index e34d3f5c93..ea11691955 100644 --- a/tests/compile-fail/concurrency/libc_pthread_create_main_terminate.rs +++ b/tests/compile-fail/concurrency/libc_pthread_create_main_terminate.rs @@ -1,9 +1,8 @@ // ignore-windows: Concurrency on Windows is not supported yet. +// error-pattern: unsupported operation: the main thread terminated without waiting for other threads // Check that we terminate the program when the main thread terminates. -//~^^^^ ERROR: unsupported operation: the main thread terminated without waiting for other threads - #![feature(rustc_private)] extern crate libc; From 40e50bf58bd82482026bb1e1f0766bdf909fe9cb Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Mon, 20 Apr 2020 13:23:11 -0700 Subject: [PATCH 46/77] Clarify test comments. --- tests/compile-fail/concurrency/libc_pthread_join_multiple.rs | 2 +- tests/run-pass/concurrency/tls_lib_drop.rs | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/compile-fail/concurrency/libc_pthread_join_multiple.rs b/tests/compile-fail/concurrency/libc_pthread_join_multiple.rs index 08ce94022c..949fcc282f 100644 --- a/tests/compile-fail/concurrency/libc_pthread_join_multiple.rs +++ b/tests/compile-fail/concurrency/libc_pthread_join_multiple.rs @@ -1,6 +1,6 @@ // ignore-windows: Concurrency on Windows is not supported yet. -// Joining the same thread multiple times is undefined behavior. +// Joining the same thread from multiple threads is undefined behavior. #![feature(rustc_private)] diff --git a/tests/run-pass/concurrency/tls_lib_drop.rs b/tests/run-pass/concurrency/tls_lib_drop.rs index c9b04a7282..d39528cfef 100644 --- a/tests/run-pass/concurrency/tls_lib_drop.rs +++ b/tests/run-pass/concurrency/tls_lib_drop.rs @@ -2,6 +2,9 @@ //! Check that destructors of the library thread locals are executed immediately //! after a thread terminates. +//! +//! FIXME: We should have a similar test for thread-local statics (statics +//! annotated with `#[thread_local]`) once they support destructors. #![feature(thread_local_internals)] From 8a7dbde372388c0f4125f3aad0f697f8af138026 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Mon, 20 Apr 2020 16:49:27 -0700 Subject: [PATCH 47/77] Check prctl argument types and fix the test. --- src/shims/thread.rs | 20 ++++++++++++++++--- .../concurrency/libc_prctl_thread_name.rs | 4 ++-- 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/src/shims/thread.rs b/src/shims/thread.rs index ee4369cb41..6518732622 100644 --- a/src/shims/thread.rs +++ b/src/shims/thread.rs @@ -92,12 +92,26 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx &mut self, option: OpTy<'tcx, Tag>, arg2: OpTy<'tcx, Tag>, - _arg3: OpTy<'tcx, Tag>, - _arg4: OpTy<'tcx, Tag>, - _arg5: OpTy<'tcx, Tag>, + arg3: OpTy<'tcx, Tag>, + arg4: OpTy<'tcx, Tag>, + arg5: OpTy<'tcx, Tag>, ) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); + // prctl last 5 arguments are declared as variadic. Therefore, we need + // to check their types manually. + let c_long_size = this.libc_ty_layout("c_long")?.size.bytes(); + let check_arg = |arg: OpTy<'tcx, Tag>| -> InterpResult<'tcx> { + match this.read_scalar(arg)?.not_undef()? { + Scalar::Raw { size, .. } if u64::from(size) == c_long_size => Ok(()), + _ => throw_ub_format!("an argument of unsupported type was passed to prctl"), + } + }; + check_arg(arg2)?; + check_arg(arg3)?; + check_arg(arg4)?; + check_arg(arg5)?; + let option = this.read_scalar(option)?.not_undef()?.to_i32()?; if option == this.eval_libc_i32("PR_SET_NAME")? { let address = this.read_scalar(arg2)?.not_undef()?; diff --git a/tests/run-pass/concurrency/libc_prctl_thread_name.rs b/tests/run-pass/concurrency/libc_prctl_thread_name.rs index 235ac27e0a..aa3f62f03d 100644 --- a/tests/run-pass/concurrency/libc_prctl_thread_name.rs +++ b/tests/run-pass/concurrency/libc_prctl_thread_name.rs @@ -9,9 +9,9 @@ use std::ffi::CString; fn main() { unsafe { let thread_name = CString::new("hello").expect("CString::new failed"); - assert_eq!(libc::prctl(libc::PR_SET_NAME, thread_name.as_ptr() as u64, 0, 0, 0), 0); + assert_eq!(libc::prctl(libc::PR_SET_NAME, thread_name.as_ptr() as libc::c_long, 0 as libc::c_long, 0 as libc::c_long, 0 as libc::c_long), 0); let mut buf = [0; 6]; - assert_eq!(libc::prctl(libc::PR_GET_NAME, buf.as_mut_ptr() as u64, 0, 0, 0), 0); + assert_eq!(libc::prctl(libc::PR_GET_NAME, buf.as_mut_ptr() as libc::c_long, 0 as libc::c_long, 0 as libc::c_long, 0 as libc::c_long), 0); assert_eq!(thread_name.as_bytes_with_nul(), buf); } } From d45e985669d1bc532862ed3a50dce9cfdf08d7ff Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Mon, 20 Apr 2020 16:57:30 -0700 Subject: [PATCH 48/77] Clarify FIXME. --- src/thread.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/thread.rs b/src/thread.rs index 80c9965aa1..aee9b8a6f5 100644 --- a/src/thread.rs +++ b/src/thread.rs @@ -357,7 +357,12 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { if self.threads[MAIN_THREAD].state == ThreadState::Terminated { // The main thread terminated; stop the program. if self.threads.iter().any(|thread| thread.state != ThreadState::Terminated) { - // FIXME: This check should be either configurable or just emit a warning. + // FIXME: This check should be either configurable or just emit + // a warning. For example, it seems normal for a program to + // terminate without waiting for its detached threads to + // terminate. However, this case is not trivial to support + // because we also probably do not want to consider the memory + // owned by these threads as leaked. throw_unsup_format!("the main thread terminated without waiting for other threads"); } return Ok(SchedulingAction::Stop); From eaa63266d8456ac8c3d1b82f4e1078fcd271e95c Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Mon, 20 Apr 2020 17:02:43 -0700 Subject: [PATCH 49/77] Make multiple threads to try to join a thread while it is still running. --- tests/compile-fail/concurrency/libc_pthread_join_multiple.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/compile-fail/concurrency/libc_pthread_join_multiple.rs b/tests/compile-fail/concurrency/libc_pthread_join_multiple.rs index 949fcc282f..0d99b69ed9 100644 --- a/tests/compile-fail/concurrency/libc_pthread_join_multiple.rs +++ b/tests/compile-fail/concurrency/libc_pthread_join_multiple.rs @@ -10,6 +10,8 @@ use std::thread; use std::{mem, ptr}; extern "C" fn thread_start(_null: *mut libc::c_void) -> *mut libc::c_void { + thread::yield_now(); + thread::yield_now(); ptr::null_mut() } From cc9248a7c891614cf79e7ec708de2ff99d4eb06c Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Mon, 20 Apr 2020 17:10:25 -0700 Subject: [PATCH 50/77] Ignore prctl test on MacOS because it does not support it. --- tests/run-pass/concurrency/libc_prctl_thread_name.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/run-pass/concurrency/libc_prctl_thread_name.rs b/tests/run-pass/concurrency/libc_prctl_thread_name.rs index aa3f62f03d..b8ba27b3a8 100644 --- a/tests/run-pass/concurrency/libc_prctl_thread_name.rs +++ b/tests/run-pass/concurrency/libc_prctl_thread_name.rs @@ -1,4 +1,5 @@ // ignore-windows: No libc on Windows +// ignore-macos: No prctl on MacOS #![feature(rustc_private)] From 90e9a87fa79f541efecadde6daa53299b9350e07 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Mon, 20 Apr 2020 17:13:22 -0700 Subject: [PATCH 51/77] Add an explanatory comment to the test. --- tests/compile-fail/concurrency/libc_pthread_join_multiple.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/compile-fail/concurrency/libc_pthread_join_multiple.rs b/tests/compile-fail/concurrency/libc_pthread_join_multiple.rs index 0d99b69ed9..f8a43cfcde 100644 --- a/tests/compile-fail/concurrency/libc_pthread_join_multiple.rs +++ b/tests/compile-fail/concurrency/libc_pthread_join_multiple.rs @@ -10,6 +10,7 @@ use std::thread; use std::{mem, ptr}; extern "C" fn thread_start(_null: *mut libc::c_void) -> *mut libc::c_void { + // Yield the thread several times so that other threads can join it. thread::yield_now(); thread::yield_now(); ptr::null_mut() From 8240ed26a97a6d1642546e56144870305ff4676c Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Mon, 20 Apr 2020 17:23:51 -0700 Subject: [PATCH 52/77] Change the test not to rely on internals. --- tests/run-pass/concurrency/tls_lib_drop.rs | 22 +++------------------- 1 file changed, 3 insertions(+), 19 deletions(-) diff --git a/tests/run-pass/concurrency/tls_lib_drop.rs b/tests/run-pass/concurrency/tls_lib_drop.rs index d39528cfef..0d1808cbe0 100644 --- a/tests/run-pass/concurrency/tls_lib_drop.rs +++ b/tests/run-pass/concurrency/tls_lib_drop.rs @@ -2,11 +2,6 @@ //! Check that destructors of the library thread locals are executed immediately //! after a thread terminates. -//! -//! FIXME: We should have a similar test for thread-local statics (statics -//! annotated with `#[thread_local]`) once they support destructors. - -#![feature(thread_local_internals)] use std::cell::RefCell; use std::thread; @@ -21,20 +16,9 @@ impl Drop for TestCell { } } -static A: std::thread::LocalKey = { - #[inline] - fn __init() -> TestCell { - TestCell { value: RefCell::new(0) } - } - - unsafe fn __getit() -> Option<&'static TestCell> { - static __KEY: std::thread::__OsLocalKeyInner = - std::thread::__OsLocalKeyInner::new(); - __KEY.get(__init) - } - - unsafe { std::thread::LocalKey::new(__getit) } -}; +thread_local! { + static A: TestCell = TestCell { value: RefCell::new(0) }; +} fn main() { thread::spawn(|| { From feb188360ee5ff6ae4cdc8e6a20ec29f9cd385ba Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Fri, 24 Apr 2020 15:16:24 -0700 Subject: [PATCH 53/77] Unify TLS dtors; move stepping outside. --- src/eval.rs | 7 +- src/shims/tls.rs | 112 +++++++++++------- src/thread.rs | 13 ++ .../concurrency/tls_lib_drop_single_thread.rs | 25 ++++ .../tls_lib_drop_single_thread.stderr | 2 + 5 files changed, 110 insertions(+), 49 deletions(-) create mode 100644 tests/run-pass/concurrency/tls_lib_drop_single_thread.rs create mode 100644 tests/run-pass/concurrency/tls_lib_drop_single_thread.stderr diff --git a/src/eval.rs b/src/eval.rs index c5a04d7585..9131946f8d 100644 --- a/src/eval.rs +++ b/src/eval.rs @@ -211,7 +211,7 @@ pub fn eval_main<'tcx>(tcx: TyCtxt<'tcx>, main_id: DefId, config: MiriConfig) -> assert!(ecx.step()?, "a terminated thread was scheduled for execution"); } SchedulingAction::ExecuteDtors => { - ecx.run_tls_dtors_for_active_thread()?; + ecx.schedule_tls_dtors_for_active_thread()?; } SchedulingAction::Stop => { break; @@ -219,12 +219,7 @@ pub fn eval_main<'tcx>(tcx: TyCtxt<'tcx>, main_id: DefId, config: MiriConfig) -> } ecx.process_diagnostics(); } - // Read the return code pointer *before* we run TLS destructors, to assert - // that it was written to by the time that `start` lang item returned. let return_code = ecx.read_scalar(ret_place.into())?.not_undef()?.to_machine_isize(&ecx)?; - // Run Windows destructors. (We do not support concurrency on Windows - // yet, so we run the destructor of the main thread separately.) - ecx.run_windows_tls_dtors()?; Ok(return_code) })(); diff --git a/src/shims/tls.rs b/src/shims/tls.rs index 31a9ee3c94..615950621a 100644 --- a/src/shims/tls.rs +++ b/src/shims/tls.rs @@ -38,6 +38,9 @@ pub struct TlsData<'tcx> { /// Whether we are in the "destruct" phase, during which some operations are UB. dtors_running: HashSet, + + /// The last TlsKey used to retrieve a TLS destructor. + last_dtor_key: BTreeMap, } impl<'tcx> Default for TlsData<'tcx> { @@ -47,6 +50,7 @@ impl<'tcx> Default for TlsData<'tcx> { keys: Default::default(), global_dtors: Default::default(), dtors_running: Default::default(), + last_dtor_key: Default::default(), } } } @@ -187,21 +191,15 @@ impl<'tcx> TlsData<'tcx> { } } -impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {} -pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> { - - /// Run TLS destructors for the main thread on Windows. The implementation - /// assumes that we do not support concurrency on Windows yet. - /// - /// Note: on non-Windows OS this function is a no-op. - fn run_windows_tls_dtors(&mut self) -> InterpResult<'tcx> { +impl<'mir, 'tcx: 'mir> EvalContextPrivExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {} +trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> { + /// Schedule TLS destructors for the main thread on Windows. The + /// implementation assumes that we do not support concurrency on Windows + /// yet. + fn schedule_windows_tls_dtors(&mut self) -> InterpResult<'tcx> { let this = self.eval_context_mut(); - if this.tcx.sess.target.target.target_os != "windows" { - return Ok(()); - } let active_thread = this.get_active_thread()?; assert_eq!(this.get_total_thread_count()?, 1, "concurrency on Windows not supported"); - assert!(!this.machine.tls.dtors_running.contains(&active_thread), "running TLS dtors twice"); this.machine.tls.dtors_running.insert(active_thread); // Windows has a special magic linker section that is run on certain events. // Instead of searching for that section and supporting arbitrary hooks in there @@ -221,30 +219,18 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx StackPopCleanup::None { cleanup: true }, )?; - // step until out of stackframes - this.run()?; - - // Windows doesn't have other destructors. + this.enable_thread(active_thread)?; Ok(()) } - /// Run TLS destructors for the active thread. + /// Schedule the MacOS global dtor to be executed. /// - /// Note: on Windows OS this function is a no-op because we do not support - /// concurrency on Windows yet. - /// - /// FIXME: we do not support yet deallocation of thread local statics. - fn run_tls_dtors_for_active_thread(&mut self) -> InterpResult<'tcx> { + /// Note: It is safe to call this function also on other Unixes. + fn schedule_macos_global_tls_dtors(&mut self) -> InterpResult<'tcx> { let this = self.eval_context_mut(); - if this.tcx.sess.target.target.target_os == "windows" { - return Ok(()); - } let thread_id = this.get_active_thread()?; - assert!(!this.machine.tls.dtors_running.contains(&thread_id), "running TLS dtors twice"); - this.machine.tls.dtors_running.insert(thread_id); - // The macOS global dtor runs "before any TLS slots get freed", so do that first. - if let Some(&(instance, data)) = this.machine.tls.global_dtors.get(&thread_id) { + if let Some((instance, data)) = this.machine.tls.global_dtors.remove(&thread_id) { trace!("Running global dtor {:?} on {:?} at {:?}", instance, data, thread_id); let ret_place = MPlaceTy::dangling(this.machine.layouts.unit, this).into(); @@ -255,14 +241,33 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx StackPopCleanup::None { cleanup: true }, )?; - // step until out of stackframes - this.run()?; + // Enable the thread so that it steps through the destructor which + // we just scheduled. Since we deleted the destructor, it is + // guaranteed that we will schedule it again. The `dtors_running` + // flag will prevent the code from adding the destructor again. + this.enable_thread(thread_id)?; } + Ok(()) + } + + /// Schedule a pthread TLS destructor. + fn schedule_pthread_tls_dtors(&mut self) -> InterpResult<'tcx> { + let this = self.eval_context_mut(); + let active_thread = this.get_active_thread()?; - assert!(this.has_terminated(thread_id)?, "running TLS dtors for non-terminated thread"); - let mut dtor = this.machine.tls.fetch_tls_dtor(None, thread_id); - while let Some((instance, ptr, key)) = dtor { - trace!("Running TLS dtor {:?} on {:?} at {:?}", instance, ptr, thread_id); + assert!(this.has_terminated(active_thread)?, "running TLS dtors for non-terminated thread"); + // Fetch next dtor after `key`. + let last_key = this.machine.tls.last_dtor_key.get(&active_thread).cloned(); + let dtor = match this.machine.tls.fetch_tls_dtor(last_key, active_thread) { + dtor @ Some(_) => dtor, + // We ran each dtor once, start over from the beginning. + None => { + this.machine.tls.fetch_tls_dtor(None, active_thread) + } + }; + if let Some((instance, ptr, key)) = dtor { + this.machine.tls.last_dtor_key.insert(active_thread, key); + trace!("Running TLS dtor {:?} on {:?} at {:?}", instance, ptr, active_thread); assert!(!this.is_null(ptr).unwrap(), "Data can't be NULL when dtor is called!"); let ret_place = MPlaceTy::dangling(this.machine.layouts.unit, this).into(); @@ -273,15 +278,36 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx StackPopCleanup::None { cleanup: true }, )?; - // step until out of stackframes - this.run()?; + this.enable_thread(active_thread)?; + return Ok(()); + } + this.machine.tls.last_dtor_key.remove(&active_thread); + + Ok(()) + } +} + +impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {} +pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> { - // Fetch next dtor after `key`. - dtor = match this.machine.tls.fetch_tls_dtor(Some(key), thread_id) { - dtor @ Some(_) => dtor, - // We ran each dtor once, start over from the beginning. - None => this.machine.tls.fetch_tls_dtor(None, thread_id), - }; + /// Schedule an active thread's TLS destructor to run on the active thread. + /// Note that this function does not run the destructors itself, it just + /// schedules them one by one each time it is called. + /// + /// FIXME: we do not support yet deallocation of thread local statics. + fn schedule_tls_dtors_for_active_thread(&mut self) -> InterpResult<'tcx> { + let this = self.eval_context_mut(); + let active_thread = this.get_active_thread()?; + + if this.tcx.sess.target.target.target_os == "windows" { + if !this.machine.tls.dtors_running.contains(&active_thread) { + this.machine.tls.dtors_running.insert(active_thread); + this.schedule_windows_tls_dtors()?; + } + } else { + this.machine.tls.dtors_running.insert(active_thread); + this.schedule_macos_global_tls_dtors()?; + this.schedule_pthread_tls_dtors()?; } Ok(()) diff --git a/src/thread.rs b/src/thread.rs index aee9b8a6f5..c4e0f9be18 100644 --- a/src/thread.rs +++ b/src/thread.rs @@ -248,6 +248,12 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { self.threads[thread_id].state == ThreadState::Terminated } + /// Enable the thread for execution. The thread must be terminated. + fn enable_thread(&mut self, thread_id: ThreadId) { + assert!(self.has_terminated(thread_id)); + self.threads[thread_id].state = ThreadState::Enabled; + } + /// Get the borrow of the currently active thread. fn active_thread_mut(&mut self) -> &mut Thread<'mir, 'tcx> { &mut self.threads[self.active_thread] @@ -525,6 +531,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx Ok(this.machine.threads.has_terminated(thread_id)) } + #[inline] + fn enable_thread(&mut self, thread_id: ThreadId) -> InterpResult<'tcx> { + let this = self.eval_context_mut(); + this.machine.threads.enable_thread(thread_id); + Ok(()) + } + #[inline] fn active_thread_stack(&self) -> &[Frame<'mir, 'tcx, Tag, FrameData<'tcx>>] { let this = self.eval_context_ref(); diff --git a/tests/run-pass/concurrency/tls_lib_drop_single_thread.rs b/tests/run-pass/concurrency/tls_lib_drop_single_thread.rs new file mode 100644 index 0000000000..f232cee5bd --- /dev/null +++ b/tests/run-pass/concurrency/tls_lib_drop_single_thread.rs @@ -0,0 +1,25 @@ +//! Check that destructors of the thread locals are executed on all OSes. + +use std::cell::RefCell; + +struct TestCell { + value: RefCell, +} + +impl Drop for TestCell { + fn drop(&mut self) { + eprintln!("Dropping: {}", self.value.borrow()) + } +} + +thread_local! { + static A: TestCell = TestCell { value: RefCell::new(0) }; +} + +fn main() { + A.with(|f| { + assert_eq!(*f.value.borrow(), 0); + *f.value.borrow_mut() = 5; + }); + eprintln!("Continue main.") +} diff --git a/tests/run-pass/concurrency/tls_lib_drop_single_thread.stderr b/tests/run-pass/concurrency/tls_lib_drop_single_thread.stderr new file mode 100644 index 0000000000..a9d705e5b9 --- /dev/null +++ b/tests/run-pass/concurrency/tls_lib_drop_single_thread.stderr @@ -0,0 +1,2 @@ +Continue main. +Dropping: 5 From 04abf066f15c2ce2d1a788a1021bb14dcb9ac045 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Fri, 24 Apr 2020 16:46:51 -0700 Subject: [PATCH 54/77] Move copying of the thread name to the client side. --- src/shims/thread.rs | 2 +- src/thread.rs | 20 ++++++++++++++------ 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/src/shims/thread.rs b/src/shims/thread.rs index 6518732622..67e833f222 100644 --- a/src/shims/thread.rs +++ b/src/shims/thread.rs @@ -119,7 +119,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx this.set_active_thread_name(name)?; } else if option == this.eval_libc_i32("PR_GET_NAME")? { let address = this.read_scalar(arg2)?.not_undef()?; - let name = this.get_active_thread_name()?; + let name = this.get_active_thread_name()?.to_vec(); this.memory.write_bytes(address, name)?; } else { throw_unsup_format!("Unsupported prctl option."); diff --git a/src/thread.rs b/src/thread.rs index c4e0f9be18..eb7af536cf 100644 --- a/src/thread.rs +++ b/src/thread.rs @@ -254,11 +254,16 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { self.threads[thread_id].state = ThreadState::Enabled; } - /// Get the borrow of the currently active thread. + /// Get a mutable borrow of the currently active thread. fn active_thread_mut(&mut self) -> &mut Thread<'mir, 'tcx> { &mut self.threads[self.active_thread] } + /// Get a shared borrow of the currently active thread. + fn active_thread_ref(&self) -> &Thread<'mir, 'tcx> { + &self.threads[self.active_thread] + } + /// Mark the thread as detached, which means that no other thread will try /// to join it and the thread is responsible for cleaning up. fn detach_thread(&mut self, id: ThreadId) -> InterpResult<'tcx> { @@ -304,9 +309,9 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { } /// Get the name of the active thread. - fn get_thread_name(&mut self) -> InterpResult<'tcx, Vec> { - if let Some(ref thread_name) = self.active_thread_mut().thread_name { - Ok(thread_name.clone()) + fn get_thread_name(&self) -> InterpResult<'tcx, &[u8]> { + if let Some(ref thread_name) = self.active_thread_ref().thread_name { + Ok(thread_name) } else { throw_ub_format!("thread {:?} has no name set", self.active_thread) } @@ -557,8 +562,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx } #[inline] - fn get_active_thread_name(&mut self) -> InterpResult<'tcx, Vec> { - let this = self.eval_context_mut(); + fn get_active_thread_name<'c>(&'c self) -> InterpResult<'tcx, &'c [u8]> + where + 'mir: 'c, + { + let this = self.eval_context_ref(); this.machine.threads.get_thread_name() } From bc9d007e3eccae1bbb7b90bbfd2c2d583e44166f Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Fri, 24 Apr 2020 16:47:18 -0700 Subject: [PATCH 55/77] Improve Debug formatting of the thread name. --- src/thread.rs | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/src/thread.rs b/src/thread.rs index eb7af536cf..ecdaced3f8 100644 --- a/src/thread.rs +++ b/src/thread.rs @@ -132,13 +132,9 @@ impl<'mir, 'tcx> Thread<'mir, 'tcx> { impl<'mir, 'tcx> std::fmt::Debug for Thread<'mir, 'tcx> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { if let Some(ref name) = self.thread_name { - if let Ok(name_str) = std::str::from_utf8(name) { - write!(f, "{}", name_str)?; - } else { - write!(f, "")?; - } + write!(f, "{}", String::from_utf8_lossy(name))?; } else { - write!(f, "unnamed")?; + write!(f, "")?; } write!(f, "({:?}, {:?})", self.state, self.join_status) } From ff5e35b90a7717bffb4bf2f1ae898e2c73920281 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Sun, 26 Apr 2020 14:24:48 -0700 Subject: [PATCH 56/77] Added a test that joining main is UB. --- .../concurrency/libc_pthread_join_main.rs | 20 +++++++++++++++++++ 1 file changed, 20 insertions(+) create mode 100644 tests/compile-fail/concurrency/libc_pthread_join_main.rs diff --git a/tests/compile-fail/concurrency/libc_pthread_join_main.rs b/tests/compile-fail/concurrency/libc_pthread_join_main.rs new file mode 100644 index 0000000000..69e1a68ef9 --- /dev/null +++ b/tests/compile-fail/concurrency/libc_pthread_join_main.rs @@ -0,0 +1,20 @@ +// ignore-windows: Concurrency on Windows is not supported yet. + +// Joining the main thread is undefined behavior. + +#![feature(rustc_private)] + +extern crate libc; + +use std::{ptr, thread}; + +fn main() { + let thread_id: libc::pthread_t = unsafe { libc::pthread_self() }; + let handle = thread::spawn(move || { + unsafe { + assert_eq!(libc::pthread_join(thread_id, ptr::null_mut()), 0); //~ ERROR: Undefined Behavior: trying to join a detached or already joined thread + } + }); + thread::yield_now(); + handle.join().unwrap(); +} From 64164b10e8b321745284bf7da7656e464b4ec9f4 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Sun, 26 Apr 2020 14:42:07 -0700 Subject: [PATCH 57/77] Improve comments. --- src/thread.rs | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/src/thread.rs b/src/thread.rs index ecdaced3f8..9a332a0dcf 100644 --- a/src/thread.rs +++ b/src/thread.rs @@ -61,7 +61,8 @@ impl ThreadId { } } -/// An identifier of a set of blocked threads. +/// An identifier of a set of blocked threads. 0 is used to indicate the absence +/// of a blockset identifier and, therefore, is not a valid identifier. #[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq, Hash)] pub struct BlockSetId(NonZeroU32); @@ -116,8 +117,8 @@ pub struct Thread<'mir, 'tcx> { } impl<'mir, 'tcx> Thread<'mir, 'tcx> { - /// Check if the thread terminated. If yes, change the state to terminated - /// and return `true`. + /// Check if the thread is done executing (no more stack frames). If yes, + /// change the state to terminated and return `true`. fn check_terminated(&mut self) -> bool { if self.state == ThreadState::Enabled { if self.stack.is_empty() { @@ -174,6 +175,7 @@ impl<'mir, 'tcx> Default for ThreadManager<'mir, 'tcx> { let mut threads = IndexVec::new(); // Create the main thread and add it to the list of threads. let mut main_thread = Thread::default(); + // The main thread can *not* be joined on. main_thread.join_status = ThreadJoinStatus::Detached; threads.push(main_thread); Self { @@ -282,7 +284,7 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { self.threads .iter() .all(|thread| thread.state != ThreadState::BlockedOnJoin(joined_thread_id)), - "a joinable thread has threads waiting for its termination" + "a joinable thread already has threads waiting for its termination" ); // Mark the joined thread as being joined so that we detect if other // threads try to join it. @@ -349,7 +351,7 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { /// The currently implemented scheduling policy is the one that is commonly /// used in stateless model checkers such as Loom: run the active thread as /// long as we can and switch only when we have to (the active thread was - /// blocked, terminated, or was explicitly asked to be preempted). + /// blocked, terminated, or has explicitly asked to be preempted). fn schedule(&mut self) -> InterpResult<'tcx, SchedulingAction> { if self.threads[self.active_thread].check_terminated() { // Check if we need to unblock any threads. From 60cd8aa4b0dadaf5e32bcf86ee6cbddb93c69c01 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Sun, 26 Apr 2020 14:44:59 -0700 Subject: [PATCH 58/77] Delete a duplicate test. --- .../concurrency/dangling_tls_lib.rs | 49 ------------------- 1 file changed, 49 deletions(-) delete mode 100644 tests/compile-fail/concurrency/dangling_tls_lib.rs diff --git a/tests/compile-fail/concurrency/dangling_tls_lib.rs b/tests/compile-fail/concurrency/dangling_tls_lib.rs deleted file mode 100644 index 6be5538bb4..0000000000 --- a/tests/compile-fail/concurrency/dangling_tls_lib.rs +++ /dev/null @@ -1,49 +0,0 @@ -// ignore-windows: Concurrency on Windows is not supported yet. - -//! Check that we catch if a thread local is accessed after the thread has -//! terminated. - -#![feature(thread_local_internals)] - -use std::cell::RefCell; -use std::thread; - -static A: std::thread::LocalKey> = { - #[inline] - fn __init() -> RefCell { - RefCell::new(0) - } - - unsafe fn __getit() -> Option<&'static RefCell> { - static __KEY: std::thread::__OsLocalKeyInner> = - std::thread::__OsLocalKeyInner::new(); - __KEY.get(__init) - } - - unsafe { std::thread::LocalKey::new(__getit) } -}; - -struct Sender(*mut u8); - -unsafe impl Send for Sender {} - -fn main() { - A.with(|f| { - assert_eq!(*f.borrow(), 0); - *f.borrow_mut() = 4; - }); - - let handle = thread::spawn(|| { - let ptr = A.with(|f| { - assert_eq!(*f.borrow(), 0); - *f.borrow_mut() = 5; - &mut *f.borrow_mut() as *mut u8 - }); - Sender(ptr) - }); - let ptr = handle.join().unwrap().0; - A.with(|f| { - assert_eq!(*f.borrow(), 4); - }); - let _x = unsafe { *ptr }; //~ ERROR Undefined Behavior -} From 39efdf31cf4f69ac0e33f79efe83243c6cdb4d35 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Sun, 26 Apr 2020 14:56:31 -0700 Subject: [PATCH 59/77] Move prctl test to the same file as other libc tests. --- .../concurrency/libc_prctl_thread_name.rs | 18 ------------------ tests/run-pass/libc.rs | 17 +++++++++++++++++ 2 files changed, 17 insertions(+), 18 deletions(-) delete mode 100644 tests/run-pass/concurrency/libc_prctl_thread_name.rs diff --git a/tests/run-pass/concurrency/libc_prctl_thread_name.rs b/tests/run-pass/concurrency/libc_prctl_thread_name.rs deleted file mode 100644 index b8ba27b3a8..0000000000 --- a/tests/run-pass/concurrency/libc_prctl_thread_name.rs +++ /dev/null @@ -1,18 +0,0 @@ -// ignore-windows: No libc on Windows -// ignore-macos: No prctl on MacOS - -#![feature(rustc_private)] - -extern crate libc; - -use std::ffi::CString; - -fn main() { - unsafe { - let thread_name = CString::new("hello").expect("CString::new failed"); - assert_eq!(libc::prctl(libc::PR_SET_NAME, thread_name.as_ptr() as libc::c_long, 0 as libc::c_long, 0 as libc::c_long, 0 as libc::c_long), 0); - let mut buf = [0; 6]; - assert_eq!(libc::prctl(libc::PR_GET_NAME, buf.as_mut_ptr() as libc::c_long, 0 as libc::c_long, 0 as libc::c_long, 0 as libc::c_long), 0); - assert_eq!(thread_name.as_bytes_with_nul(), buf); - } -} diff --git a/tests/run-pass/libc.rs b/tests/run-pass/libc.rs index 14d12de0d1..5873d42969 100644 --- a/tests/run-pass/libc.rs +++ b/tests/run-pass/libc.rs @@ -141,6 +141,20 @@ fn test_rwlock_libc_static_initializer() { } } +/// Test whether the `prctl` shim correctly sets the thread name. +/// +/// Note: `prctl` exists only on Linux. +fn test_prctl_thread_name() { + use std::ffi::CString; + unsafe { + let thread_name = CString::new("hello").expect("CString::new failed"); + assert_eq!(libc::prctl(libc::PR_SET_NAME, thread_name.as_ptr() as libc::c_long, 0 as libc::c_long, 0 as libc::c_long, 0 as libc::c_long), 0); + let mut buf = [0; 6]; + assert_eq!(libc::prctl(libc::PR_GET_NAME, buf.as_mut_ptr() as libc::c_long, 0 as libc::c_long, 0 as libc::c_long, 0 as libc::c_long), 0); + assert_eq!(thread_name.as_bytes_with_nul(), buf); + } +} + fn main() { #[cfg(target_os = "linux")] test_posix_fadvise(); @@ -152,4 +166,7 @@ fn main() { #[cfg(target_os = "linux")] test_mutex_libc_static_initializer_recursive(); + + #[cfg(target_os = "linux")] + test_prctl_thread_name(); } From 6842eb2b84337ff01158ca7c0eee669b0d1e061f Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Sun, 26 Apr 2020 15:52:01 -0700 Subject: [PATCH 60/77] Rename global tls dtor to thread dtor. --- src/shims/foreign_items/posix/macos.rs | 2 +- src/shims/tls.rs | 39 ++++++++++++++------------ 2 files changed, 22 insertions(+), 19 deletions(-) diff --git a/src/shims/foreign_items/posix/macos.rs b/src/shims/foreign_items/posix/macos.rs index 9f6ea00b03..200b88f29c 100644 --- a/src/shims/foreign_items/posix/macos.rs +++ b/src/shims/foreign_items/posix/macos.rs @@ -83,7 +83,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let dtor = this.memory.get_fn(dtor)?.as_instance()?; let data = this.read_scalar(args[1])?.not_undef()?; let active_thread = this.get_active_thread()?; - this.machine.tls.set_thread_global_dtor(active_thread, dtor, data)?; + this.machine.tls.set_thread_dtor(active_thread, dtor, data)?; } // Querying system information diff --git a/src/shims/tls.rs b/src/shims/tls.rs index 615950621a..d3d5097732 100644 --- a/src/shims/tls.rs +++ b/src/shims/tls.rs @@ -33,8 +33,9 @@ pub struct TlsData<'tcx> { /// pthreads-style thread-local storage. keys: BTreeMap>, - /// A single global per thread dtor (that's how things work on macOS) with a data argument. - global_dtors: BTreeMap, Scalar)>, + /// A single per thread destructor of the thread local storage (that's how + /// things work on macOS) with a data argument. + thread_dtors: BTreeMap, Scalar)>, /// Whether we are in the "destruct" phase, during which some operations are UB. dtors_running: HashSet, @@ -48,7 +49,7 @@ impl<'tcx> Default for TlsData<'tcx> { TlsData { next_key: 1, // start with 1 as we must not use 0 on Windows keys: Default::default(), - global_dtors: Default::default(), + thread_dtors: Default::default(), dtors_running: Default::default(), last_dtor_key: Default::default(), } @@ -117,16 +118,16 @@ impl<'tcx> TlsData<'tcx> { } } - /// Set global dtor for the given thread. This function is used to implement - /// `_tlv_atexit` shim on MacOS. + /// Set the thread wide destructor of the thread local storage for the given + /// thread. This function is used to implement `_tlv_atexit` shim on MacOS. /// - /// Global destructors are available only on MacOS and (potentially - /// confusingly) they seem to be still per thread as can be guessed from the - /// following comment in the [`_tlv_atexit` + /// Thread wide dtors are available only on MacOS. There is one destructor + /// per thread as can be guessed from the following comment in the + /// [`_tlv_atexit` /// implementation](https://github.com/opensource-apple/dyld/blob/195030646877261f0c8c7ad8b001f52d6a26f514/src/threadLocalVariables.c#L389): /// /// // NOTE: this does not need locks because it only operates on current thread data - pub fn set_thread_global_dtor( + pub fn set_thread_dtor( &mut self, thread: ThreadId, dtor: ty::Instance<'tcx>, @@ -134,10 +135,10 @@ impl<'tcx> TlsData<'tcx> { ) -> InterpResult<'tcx> { if self.dtors_running.contains(&thread) { // UB, according to libstd docs. - throw_ub_format!("setting global destructor while destructors are already running"); + throw_ub_format!("setting thread's local storage destructor while destructors are already running"); } - if self.global_dtors.insert(thread, (dtor, data)).is_some() { - throw_unsup_format!("setting more than one global destructor for the same thread is not supported"); + if self.thread_dtors.insert(thread, (dtor, data)).is_some() { + throw_unsup_format!("setting more than one thread local storage destructor for the same thread is not supported"); } Ok(()) } @@ -223,15 +224,15 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx Ok(()) } - /// Schedule the MacOS global dtor to be executed. + /// Schedule the MacOS thread destructor of the thread local storage to be + /// executed. /// /// Note: It is safe to call this function also on other Unixes. - fn schedule_macos_global_tls_dtors(&mut self) -> InterpResult<'tcx> { + fn schedule_macos_tls_dtor(&mut self) -> InterpResult<'tcx> { let this = self.eval_context_mut(); let thread_id = this.get_active_thread()?; - // The macOS global dtor runs "before any TLS slots get freed", so do that first. - if let Some((instance, data)) = this.machine.tls.global_dtors.remove(&thread_id) { - trace!("Running global dtor {:?} on {:?} at {:?}", instance, data, thread_id); + if let Some((instance, data)) = this.machine.tls.thread_dtors.remove(&thread_id) { + trace!("Running macos dtor {:?} on {:?} at {:?}", instance, data, thread_id); let ret_place = MPlaceTy::dangling(this.machine.layouts.unit, this).into(); this.call_function( @@ -306,7 +307,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx } } else { this.machine.tls.dtors_running.insert(active_thread); - this.schedule_macos_global_tls_dtors()?; + // The macOS thread wide destructor runs "before any TLS slots get + // freed", so do that first. + this.schedule_macos_tls_dtor()?; this.schedule_pthread_tls_dtors()?; } From c4574dde8dbd3f996418927a3edc8a83e9709f9c Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Sun, 26 Apr 2020 15:52:45 -0700 Subject: [PATCH 61/77] Many small changes to clean up code. --- src/shims/thread.rs | 44 +++++++++++--------------------------------- src/shims/tls.rs | 8 +++++--- src/thread.rs | 7 +++++++ 3 files changed, 23 insertions(+), 36 deletions(-) diff --git a/src/shims/thread.rs b/src/shims/thread.rs index 67e833f222..c2ef272237 100644 --- a/src/shims/thread.rs +++ b/src/shims/thread.rs @@ -1,5 +1,4 @@ use crate::*; -use rustc_index::vec::Idx; use rustc_target::abi::LayoutOf; impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {} @@ -19,18 +18,12 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx ); let new_thread_id = this.create_thread()?; + // Also switch to new thread so that we can push the first stackframe. let old_thread_id = this.set_active_thread(new_thread_id)?; let thread_info_place = this.deref_operand(thread)?; - let thread_info_type = thread.layout.ty - .builtin_deref(true) - .ok_or_else(|| err_ub_format!( - "wrong signature used for `pthread_create`: first argument must be a raw pointer." - ))? - .ty; - let thread_info_layout = this.layout_of(thread_info_type)?; this.write_scalar( - Scalar::from_uint(new_thread_id.index() as u128, thread_info_layout.size), + Scalar::from_uint(new_thread_id.to_u128(), thread_info_place.layout.size), thread_info_place.into(), )?; @@ -38,14 +31,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let instance = this.memory.get_fn(fn_ptr)?.as_instance()?; let func_arg = this.read_immediate(arg)?; - let func_args = [*func_arg]; let ret_place = this.allocate(this.layout_of(this.tcx.types.usize)?, MiriMemoryKind::Machine.into()); this.call_function( instance, - &func_args[..], + &[*func_arg], Some(ret_place.into()), StackPopCleanup::None { cleanup: true }, )?; @@ -66,7 +58,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx throw_unsup_format!("Miri supports pthread_join only with retval==NULL"); } - let thread_id = this.read_scalar(thread)?.not_undef()?.to_machine_usize(this)?; + let thread_id = this.read_scalar(thread)?.to_machine_usize(this)?; this.join_thread(thread_id.into())?; Ok(0) @@ -75,7 +67,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_detach(&mut self, thread: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let thread_id = this.read_scalar(thread)?.not_undef()?.to_machine_usize(this)?; + let thread_id = this.read_scalar(thread)?.to_machine_usize(this)?; this.detach_thread(thread_id.into())?; Ok(0) @@ -85,34 +77,20 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let this = self.eval_context_mut(); let thread_id = this.get_active_thread()?; - this.write_scalar(Scalar::from_uint(thread_id.index() as u128, dest.layout.size), dest) + this.write_scalar(Scalar::from_uint(thread_id.to_u128(), dest.layout.size), dest) } fn prctl( &mut self, option: OpTy<'tcx, Tag>, arg2: OpTy<'tcx, Tag>, - arg3: OpTy<'tcx, Tag>, - arg4: OpTy<'tcx, Tag>, - arg5: OpTy<'tcx, Tag>, + _arg3: OpTy<'tcx, Tag>, + _arg4: OpTy<'tcx, Tag>, + _arg5: OpTy<'tcx, Tag>, ) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - // prctl last 5 arguments are declared as variadic. Therefore, we need - // to check their types manually. - let c_long_size = this.libc_ty_layout("c_long")?.size.bytes(); - let check_arg = |arg: OpTy<'tcx, Tag>| -> InterpResult<'tcx> { - match this.read_scalar(arg)?.not_undef()? { - Scalar::Raw { size, .. } if u64::from(size) == c_long_size => Ok(()), - _ => throw_ub_format!("an argument of unsupported type was passed to prctl"), - } - }; - check_arg(arg2)?; - check_arg(arg3)?; - check_arg(arg4)?; - check_arg(arg5)?; - - let option = this.read_scalar(option)?.not_undef()?.to_i32()?; + let option = this.read_scalar(option)?.to_i32()?; if option == this.eval_libc_i32("PR_SET_NAME")? { let address = this.read_scalar(arg2)?.not_undef()?; let name = this.memory.read_c_str(address)?.to_owned(); @@ -122,7 +100,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let name = this.get_active_thread_name()?.to_vec(); this.memory.write_bytes(address, name)?; } else { - throw_unsup_format!("Unsupported prctl option."); + throw_unsup_format!("unsupported prctl option {}", option); } Ok(0) diff --git a/src/shims/tls.rs b/src/shims/tls.rs index d3d5097732..087b44af2f 100644 --- a/src/shims/tls.rs +++ b/src/shims/tls.rs @@ -20,7 +20,6 @@ pub type TlsKey = u128; pub struct TlsEntry<'tcx> { /// The data for this key. None is used to represent NULL. /// (We normalize this early to avoid having to do a NULL-ptr-test each time we access the data.) - /// Will eventually become a map from thread IDs to `Scalar`s, if we ever support more than one thread. data: BTreeMap>, dtor: Option>, } @@ -89,7 +88,7 @@ impl<'tcx> TlsData<'tcx> { ) -> InterpResult<'tcx, Scalar> { match self.keys.get(&key) { Some(TlsEntry { data, .. }) => { - let value = data.get(&thread_id).cloned(); + let value = data.get(&thread_id).copied(); trace!("TLS key {} for thread {:?} loaded: {:?}", key, thread_id, value); Ok(value.unwrap_or_else(|| Scalar::null_ptr(cx).into())) } @@ -99,7 +98,10 @@ impl<'tcx> TlsData<'tcx> { pub fn store_tls( &mut self, - key: TlsKey, thread_id: ThreadId, new_data: Option>) -> InterpResult<'tcx> { + key: TlsKey, + thread_id: ThreadId, + new_data: Option> + ) -> InterpResult<'tcx> { match self.keys.get_mut(&key) { Some(TlsEntry { data, .. }) => { match new_data { diff --git a/src/thread.rs b/src/thread.rs index 9a332a0dcf..f9094d771e 100644 --- a/src/thread.rs +++ b/src/thread.rs @@ -1,5 +1,6 @@ //! Implements threads. +use std::convert::TryInto; use std::cell::RefCell; use std::convert::TryFrom; use std::num::NonZeroU32; @@ -34,6 +35,12 @@ pub struct ThreadId(usize); /// The main thread. When it terminates, the whole application terminates. const MAIN_THREAD: ThreadId = ThreadId(0); +impl ThreadId { + pub fn to_u128(self) -> u128 { + self.0.try_into().unwrap() + } +} + impl Idx for ThreadId { fn new(idx: usize) -> Self { ThreadId(idx) From 911ff7eade22e7bd15e79167a22845005f29b3fb Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Sun, 26 Apr 2020 20:49:53 -0700 Subject: [PATCH 62/77] Improve style and comments. --- src/eval.rs | 2 +- src/shims/tls.rs | 14 ++++++++------ 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/src/eval.rs b/src/eval.rs index 9131946f8d..89d61d141a 100644 --- a/src/eval.rs +++ b/src/eval.rs @@ -211,7 +211,7 @@ pub fn eval_main<'tcx>(tcx: TyCtxt<'tcx>, main_id: DefId, config: MiriConfig) -> assert!(ecx.step()?, "a terminated thread was scheduled for execution"); } SchedulingAction::ExecuteDtors => { - ecx.schedule_tls_dtors_for_active_thread()?; + ecx.schedule_next_tls_dtor_for_active_thread()?; } SchedulingAction::Stop => { break; diff --git a/src/shims/tls.rs b/src/shims/tls.rs index 087b44af2f..54850de82c 100644 --- a/src/shims/tls.rs +++ b/src/shims/tls.rs @@ -105,9 +105,9 @@ impl<'tcx> TlsData<'tcx> { match self.keys.get_mut(&key) { Some(TlsEntry { data, .. }) => { match new_data { - Some(ptr) => { - trace!("TLS key {} for thread {:?} stored: {:?}", key, thread_id, ptr); - data.insert(thread_id, ptr); + Some(scalar) => { + trace!("TLS key {} for thread {:?} stored: {:?}", key, thread_id, scalar); + data.insert(thread_id, scalar); } None => { trace!("TLS key {} for thread {:?} removed", key, thread_id); @@ -271,7 +271,7 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx if let Some((instance, ptr, key)) = dtor { this.machine.tls.last_dtor_key.insert(active_thread, key); trace!("Running TLS dtor {:?} on {:?} at {:?}", instance, ptr, active_thread); - assert!(!this.is_null(ptr).unwrap(), "Data can't be NULL when dtor is called!"); + assert!(!this.is_null(ptr).unwrap(), "data can't be NULL when dtor is called!"); let ret_place = MPlaceTy::dangling(this.machine.layouts.unit, this).into(); this.call_function( @@ -295,10 +295,12 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx /// Schedule an active thread's TLS destructor to run on the active thread. /// Note that this function does not run the destructors itself, it just - /// schedules them one by one each time it is called. + /// schedules them one by one each time it is called and reenables the + /// thread so that it can be executed normally by the main execution loop. /// /// FIXME: we do not support yet deallocation of thread local statics. - fn schedule_tls_dtors_for_active_thread(&mut self) -> InterpResult<'tcx> { + /// Issue: https://github.com/rust-lang/miri/issues/1369 + fn schedule_next_tls_dtor_for_active_thread(&mut self) -> InterpResult<'tcx> { let this = self.eval_context_mut(); let active_thread = this.get_active_thread()?; From d9e18ada39b52518c70df7801be564e58f4e8a66 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Sun, 26 Apr 2020 20:50:58 -0700 Subject: [PATCH 63/77] Make sure to remove thread local data only if we have destructor. --- src/shims/tls.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/shims/tls.rs b/src/shims/tls.rs index 54850de82c..7d4aae3670 100644 --- a/src/shims/tls.rs +++ b/src/shims/tls.rs @@ -181,8 +181,8 @@ impl<'tcx> TlsData<'tcx> { { match data.entry(thread_id) { Entry::Occupied(entry) => { - let data_scalar = entry.remove(); if let Some(dtor) = dtor { + let data_scalar = entry.remove(); let ret = Some((*dtor, data_scalar, key)); return ret; } From 174adad2b34ddacc129232c6127a260270d1f52a Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Sun, 26 Apr 2020 20:51:21 -0700 Subject: [PATCH 64/77] Use DLL_THREAD_DETACH when calling windows TLS destructor. --- src/shims/tls.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/shims/tls.rs b/src/shims/tls.rs index 7d4aae3670..eb8c99b72f 100644 --- a/src/shims/tls.rs +++ b/src/shims/tls.rs @@ -213,7 +213,7 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let thread_callback = this.memory.get_fn(thread_callback.not_undef()?)?.as_instance()?; // The signature of this function is `unsafe extern "system" fn(h: c::LPVOID, dwReason: c::DWORD, pv: c::LPVOID)`. - let reason = this.eval_path_scalar(&["std", "sys", "windows", "c", "DLL_PROCESS_DETACH"])?; + let reason = this.eval_path_scalar(&["std", "sys", "windows", "c", "DLL_THREAD_DETACH"])?; let ret_place = MPlaceTy::dangling(this.machine.layouts.unit, this).into(); this.call_function( thread_callback, From 9ba3ef2a44118fb2692a65a04500cdef4f6036d5 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Sun, 26 Apr 2020 21:01:03 -0700 Subject: [PATCH 65/77] Change representation and conversion of ThreadId and BlockSetId. --- src/shims/sync.rs | 6 +++--- src/thread.rs | 20 +++++++++----------- 2 files changed, 12 insertions(+), 14 deletions(-) diff --git a/src/shims/sync.rs b/src/shims/sync.rs index 97afbbe98f..b0605b4e81 100644 --- a/src/shims/sync.rs +++ b/src/shims/sync.rs @@ -158,7 +158,7 @@ fn mutex_get_or_create_blockset<'mir, 'tcx: 'mir>( mutex_set_blockset(ecx, mutex_op, blockset.to_u32_scalar())?; Ok(blockset) } else { - Ok(blockset.into()) + Ok(BlockSetId::new(blockset)) } } @@ -233,7 +233,7 @@ fn rwlock_get_or_create_writer_blockset<'mir, 'tcx: 'mir>( rwlock_set_writer_blockset(ecx, rwlock_op, blockset.to_u32_scalar())?; Ok(blockset) } else { - Ok(blockset.into()) + Ok(BlockSetId::new(blockset)) } } @@ -264,7 +264,7 @@ fn rwlock_get_or_create_reader_blockset<'mir, 'tcx: 'mir>( rwlock_set_reader_blockset(ecx, rwlock_op, blockset.to_u32_scalar())?; Ok(blockset) } else { - Ok(blockset.into()) + Ok(BlockSetId::new(blockset)) } } diff --git a/src/thread.rs b/src/thread.rs index f9094d771e..749d6bf955 100644 --- a/src/thread.rs +++ b/src/thread.rs @@ -30,7 +30,7 @@ pub enum SchedulingAction { /// A thread identifier. #[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq, Hash)] -pub struct ThreadId(usize); +pub struct ThreadId(u32); /// The main thread. When it terminates, the whole application terminates. const MAIN_THREAD: ThreadId = ThreadId(0); @@ -43,22 +43,22 @@ impl ThreadId { impl Idx for ThreadId { fn new(idx: usize) -> Self { - ThreadId(idx) + ThreadId(u32::try_from(idx).unwrap()) } fn index(self) -> usize { - self.0 + usize::try_from(self.0).unwrap() } } impl From for ThreadId { fn from(id: u64) -> Self { - Self(usize::try_from(id).unwrap()) + Self(u32::try_from(id).unwrap()) } } impl From for ThreadId { fn from(id: u32) -> Self { - Self(usize::try_from(id).unwrap()) + Self(u32::try_from(id).unwrap()) } } @@ -73,13 +73,11 @@ impl ThreadId { #[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq, Hash)] pub struct BlockSetId(NonZeroU32); -impl From for BlockSetId { - fn from(id: u32) -> Self { +impl BlockSetId { + /// Panics if `id` is 0. + pub fn new(id: u32) -> Self { Self(NonZeroU32::new(id).expect("0 is not a valid blockset id")) } -} - -impl BlockSetId { pub fn to_u32_scalar<'tcx>(&self) -> Scalar { Scalar::from_u32(self.0.get()) } @@ -325,7 +323,7 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { /// Allocate a new blockset id. fn create_blockset(&mut self) -> BlockSetId { self.blockset_counter = self.blockset_counter.checked_add(1).unwrap(); - self.blockset_counter.into() + BlockSetId::new(self.blockset_counter) } /// Block the currently active thread and put it into the given blockset. From 207c6e7fa74758a64104c2d77218e263d92cf1c6 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Sun, 26 Apr 2020 21:13:33 -0700 Subject: [PATCH 66/77] Improve comments and code clarity. --- src/shims/sync.rs | 16 ++++++++++------ src/thread.rs | 2 +- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/src/shims/sync.rs b/src/shims/sync.rs index b0605b4e81..9dad302706 100644 --- a/src/shims/sync.rs +++ b/src/shims/sync.rs @@ -80,7 +80,8 @@ fn mutexattr_set_kind<'mir, 'tcx: 'mir>( // bytes 8-11: when count > 0, id of the owner thread as a u32 // bytes 12-15 or 16-19 (depending on platform): mutex kind, as an i32 // (the kind has to be at its offset for compatibility with static initializer macros) -// bytes 20-23: when count > 0, id of the blockset in which the blocked threads are waiting. +// bytes 20-23: when count > 0, id of the blockset in which the blocked threads +// are waiting or 0 if blockset is not yet assigned. const PTHREAD_MUTEX_T_MIN_SIZE: u64 = 24; @@ -170,9 +171,9 @@ fn mutex_get_or_create_blockset<'mir, 'tcx: 'mir>( // bytes 4-7: reader count, as a u32 // bytes 8-11: writer count, as a u32 // bytes 12-15: when writer or reader count > 0, id of the blockset in which the -// blocked writers are waiting. +// blocked writers are waiting or 0 if blockset is not yet assigned. // bytes 16-20: when writer count > 0, id of the blockset in which the blocked -// readers are waiting. +// readers are waiting or 0 if blockset is not yet assigned. const PTHREAD_RWLOCK_T_MIN_SIZE: u64 = 20; @@ -342,8 +343,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx Ok(0) } else { // The mutex is locked. Let's check by whom. - let owner_thread: ThreadId = - mutex_get_owner(this, mutex_op)?.not_undef()?.to_u32()?.into(); + let owner_thread: ThreadId = mutex_get_owner(this, mutex_op)?.to_u32()?.into(); if owner_thread != active_thread { // Block the active thread. let blockset = mutex_get_or_create_blockset(this, mutex_op)?; @@ -425,6 +425,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx mutex_set_owner(this, mutex_op, new_owner.to_u32_scalar())?; } else { // No thread is waiting on this mutex. + mutex_set_owner(this, mutex_op, Scalar::from_u32(0))?; mutex_set_locked_count(this, mutex_op, Scalar::from_u32(0))?; } Ok(0) @@ -550,10 +551,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx Ok(0) } else if writers != 0 { let reader_blockset = rwlock_get_or_create_reader_blockset(this, rwlock_op)?; - rwlock_set_writers(this, rwlock_op, Scalar::from_u32(0))?; + // We are prioritizing writers here against the readers. As a + // result, not only readers can starve writers, but also writers can + // starve readers. if let Some(_writer) = this.unblock_some_thread(writer_blockset)? { rwlock_set_writers(this, rwlock_op, Scalar::from_u32(1))?; } else { + rwlock_set_writers(this, rwlock_op, Scalar::from_u32(0))?; let mut readers = 0; while let Some(_reader) = this.unblock_some_thread(reader_blockset)? { readers += 1; diff --git a/src/thread.rs b/src/thread.rs index 749d6bf955..9408dbe56c 100644 --- a/src/thread.rs +++ b/src/thread.rs @@ -1,8 +1,8 @@ //! Implements threads. -use std::convert::TryInto; use std::cell::RefCell; use std::convert::TryFrom; +use std::convert::TryInto; use std::num::NonZeroU32; use log::trace; From 356aecce7f3c438db6804a72a5022a2537d35104 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Sun, 26 Apr 2020 21:25:46 -0700 Subject: [PATCH 67/77] Add a FIXME. --- src/shims/thread.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/shims/thread.rs b/src/shims/thread.rs index c2ef272237..27e9980852 100644 --- a/src/shims/thread.rs +++ b/src/shims/thread.rs @@ -55,6 +55,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let this = self.eval_context_mut(); if !this.is_null(this.read_scalar(retval)?.not_undef()?)? { + // FIXME: implement reading the thread function's return place. throw_unsup_format!("Miri supports pthread_join only with retval==NULL"); } From f204b67b0f5ae6f498d29938790cd989e58f5bec Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Sun, 26 Apr 2020 21:49:36 -0700 Subject: [PATCH 68/77] Merge dtors_running and last_dtor_key fields. --- src/shims/tls.rs | 47 ++++++++++++++++++++++++++++++++--------------- 1 file changed, 32 insertions(+), 15 deletions(-) diff --git a/src/shims/tls.rs b/src/shims/tls.rs index eb8c99b72f..a98a802564 100644 --- a/src/shims/tls.rs +++ b/src/shims/tls.rs @@ -2,10 +2,10 @@ use std::collections::BTreeMap; use std::collections::btree_map::Entry; -use std::collections::HashSet; use log::trace; +use rustc_data_structures::fx::FxHashMap; use rustc_middle::ty; use rustc_target::abi::{Size, HasDataLayout}; @@ -24,6 +24,12 @@ pub struct TlsEntry<'tcx> { dtor: Option>, } +#[derive(Clone, Debug)] +struct RunningDtorsState { + /// The last TlsKey used to retrieve a TLS destructor. + last_dtor_key: Option, +} + #[derive(Debug)] pub struct TlsData<'tcx> { /// The Key to use for the next thread-local allocation. @@ -36,11 +42,10 @@ pub struct TlsData<'tcx> { /// things work on macOS) with a data argument. thread_dtors: BTreeMap, Scalar)>, - /// Whether we are in the "destruct" phase, during which some operations are UB. - dtors_running: HashSet, - - /// The last TlsKey used to retrieve a TLS destructor. - last_dtor_key: BTreeMap, + /// State for currently running TLS dtors. If this map contains a key for a + /// specific thread, it means that we are in the "destruct" phase, during + /// which some operations are UB. + dtors_running: FxHashMap, } impl<'tcx> Default for TlsData<'tcx> { @@ -50,7 +55,6 @@ impl<'tcx> Default for TlsData<'tcx> { keys: Default::default(), thread_dtors: Default::default(), dtors_running: Default::default(), - last_dtor_key: Default::default(), } } } @@ -135,7 +139,7 @@ impl<'tcx> TlsData<'tcx> { dtor: ty::Instance<'tcx>, data: Scalar ) -> InterpResult<'tcx> { - if self.dtors_running.contains(&thread) { + if self.dtors_running.contains_key(&thread) { // UB, according to libstd docs. throw_ub_format!("setting thread's local storage destructor while destructors are already running"); } @@ -192,6 +196,21 @@ impl<'tcx> TlsData<'tcx> { } None } + + /// Set that dtors are running for `thread`. It is guaranteed not to change + /// the existing values stored in `dtors_running` for this thread. Returns + /// `true` if dtors for `thread` are already running. + fn set_dtors_running_for_thread(&mut self, thread: ThreadId) -> bool { + if self.dtors_running.contains_key(&thread) { + true + } else { + self.dtors_running.insert( + thread, + RunningDtorsState { last_dtor_key: None } + ); + false + } + } } impl<'mir, 'tcx: 'mir> EvalContextPrivExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {} @@ -203,7 +222,6 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let this = self.eval_context_mut(); let active_thread = this.get_active_thread()?; assert_eq!(this.get_total_thread_count()?, 1, "concurrency on Windows not supported"); - this.machine.tls.dtors_running.insert(active_thread); // Windows has a special magic linker section that is run on certain events. // Instead of searching for that section and supporting arbitrary hooks in there // (that would be basically https://github.com/rust-lang/miri/issues/450), @@ -260,7 +278,7 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx assert!(this.has_terminated(active_thread)?, "running TLS dtors for non-terminated thread"); // Fetch next dtor after `key`. - let last_key = this.machine.tls.last_dtor_key.get(&active_thread).cloned(); + let last_key = this.machine.tls.dtors_running[&active_thread].last_dtor_key.clone(); let dtor = match this.machine.tls.fetch_tls_dtor(last_key, active_thread) { dtor @ Some(_) => dtor, // We ran each dtor once, start over from the beginning. @@ -269,7 +287,7 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx } }; if let Some((instance, ptr, key)) = dtor { - this.machine.tls.last_dtor_key.insert(active_thread, key); + this.machine.tls.dtors_running.get_mut(&active_thread).unwrap().last_dtor_key = Some(key); trace!("Running TLS dtor {:?} on {:?} at {:?}", instance, ptr, active_thread); assert!(!this.is_null(ptr).unwrap(), "data can't be NULL when dtor is called!"); @@ -284,7 +302,7 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx this.enable_thread(active_thread)?; return Ok(()); } - this.machine.tls.last_dtor_key.remove(&active_thread); + this.machine.tls.dtors_running.get_mut(&active_thread).unwrap().last_dtor_key = None; Ok(()) } @@ -305,12 +323,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let active_thread = this.get_active_thread()?; if this.tcx.sess.target.target.target_os == "windows" { - if !this.machine.tls.dtors_running.contains(&active_thread) { - this.machine.tls.dtors_running.insert(active_thread); + if !this.machine.tls.set_dtors_running_for_thread(active_thread) { this.schedule_windows_tls_dtors()?; } } else { - this.machine.tls.dtors_running.insert(active_thread); + this.machine.tls.set_dtors_running_for_thread(active_thread); // The macOS thread wide destructor runs "before any TLS slots get // freed", so do that first. this.schedule_macos_tls_dtor()?; From 331dbd1469abb9ee7959684305732b0613f0bf15 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Sun, 26 Apr 2020 22:05:09 -0700 Subject: [PATCH 69/77] Add a test for joining in a destructor. --- tests/run-pass/concurrency/tls_lib_drop.rs | 45 +++++++++++++++++-- .../run-pass/concurrency/tls_lib_drop.stdout | 2 + 2 files changed, 43 insertions(+), 4 deletions(-) diff --git a/tests/run-pass/concurrency/tls_lib_drop.rs b/tests/run-pass/concurrency/tls_lib_drop.rs index 0d1808cbe0..de2566de85 100644 --- a/tests/run-pass/concurrency/tls_lib_drop.rs +++ b/tests/run-pass/concurrency/tls_lib_drop.rs @@ -1,8 +1,5 @@ // ignore-windows: Concurrency on Windows is not supported yet. -//! Check that destructors of the library thread locals are executed immediately -//! after a thread terminates. - use std::cell::RefCell; use std::thread; @@ -20,7 +17,9 @@ thread_local! { static A: TestCell = TestCell { value: RefCell::new(0) }; } -fn main() { +/// Check that destructors of the library thread locals are executed immediately +/// after a thread terminates. +fn check_destructors() { thread::spawn(|| { A.with(|f| { assert_eq!(*f.value.borrow(), 0); @@ -31,3 +30,41 @@ fn main() { .unwrap(); println!("Continue main.") } + +struct JoinCell { + value: RefCell>>, +} + +impl Drop for JoinCell { + fn drop(&mut self) { + let join_handle = self.value.borrow_mut().take().unwrap(); + println!("Joining: {}", join_handle.join().unwrap()); + } +} + +thread_local! { + static B: JoinCell = JoinCell { value: RefCell::new(None) }; +} + +/// Check that the destructor can be blocked joining another thread. +fn check_blocking() { + thread::spawn(|| { + B.with(|f| { + assert!(f.value.borrow().is_none()); + let handle = thread::spawn(|| 7); + *f.value.borrow_mut() = Some(handle); + }); + }) + .join() + .unwrap(); + println!("Continue main 2."); + // Preempt the main thread so that the destructor gets executed and can join + // the thread. + thread::yield_now(); + thread::yield_now(); +} + +fn main() { + check_destructors(); + check_blocking(); +} diff --git a/tests/run-pass/concurrency/tls_lib_drop.stdout b/tests/run-pass/concurrency/tls_lib_drop.stdout index d2bbb866b7..d622c0ccce 100644 --- a/tests/run-pass/concurrency/tls_lib_drop.stdout +++ b/tests/run-pass/concurrency/tls_lib_drop.stdout @@ -1,2 +1,4 @@ Dropping: 5 Continue main. +Continue main 2. +Joining: 7 From c56ef31780c63e03ada0a8282e5d95ba1f082d92 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Mon, 27 Apr 2020 11:01:35 -0700 Subject: [PATCH 70/77] Improve comments. --- src/shims/thread.rs | 3 +++ src/thread.rs | 3 +++ 2 files changed, 6 insertions(+) diff --git a/src/shims/thread.rs b/src/shims/thread.rs index 27e9980852..d11853d534 100644 --- a/src/shims/thread.rs +++ b/src/shims/thread.rs @@ -32,6 +32,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let func_arg = this.read_immediate(arg)?; + // Note: the returned value is currently ignored (see the FIXME in + // pthread_join below) because the Rust standard library does not use + // it. let ret_place = this.allocate(this.layout_of(this.tcx.types.usize)?, MiriMemoryKind::Machine.into()); diff --git a/src/thread.rs b/src/thread.rs index 9408dbe56c..715107530c 100644 --- a/src/thread.rs +++ b/src/thread.rs @@ -358,6 +358,9 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { /// long as we can and switch only when we have to (the active thread was /// blocked, terminated, or has explicitly asked to be preempted). fn schedule(&mut self) -> InterpResult<'tcx, SchedulingAction> { + // Check whether the thread has **just** terminated (`check_terminated` + // checks whether the thread has popped all its stack and if yes, sets + // the thread state to terminated.) if self.threads[self.active_thread].check_terminated() { // Check if we need to unblock any threads. for (i, thread) in self.threads.iter_enumerated_mut() { From df2ca53b6978c7ac2ec6e271a5ee73fba70877c5 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Mon, 27 Apr 2020 12:32:57 -0700 Subject: [PATCH 71/77] Make From implementations non-failing. --- src/shims/thread.rs | 6 ++++-- src/thread.rs | 12 +++++++----- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/src/shims/thread.rs b/src/shims/thread.rs index d11853d534..29a4ed3676 100644 --- a/src/shims/thread.rs +++ b/src/shims/thread.rs @@ -1,3 +1,5 @@ +use std::convert::TryInto; + use crate::*; use rustc_target::abi::LayoutOf; @@ -63,7 +65,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx } let thread_id = this.read_scalar(thread)?.to_machine_usize(this)?; - this.join_thread(thread_id.into())?; + this.join_thread(thread_id.try_into().expect("thread ID should fit in u32"))?; Ok(0) } @@ -72,7 +74,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let this = self.eval_context_mut(); let thread_id = this.read_scalar(thread)?.to_machine_usize(this)?; - this.detach_thread(thread_id.into())?; + this.detach_thread(thread_id.try_into().expect("thread ID should fit in u32"))?; Ok(0) } diff --git a/src/thread.rs b/src/thread.rs index 715107530c..69e7bcdb29 100644 --- a/src/thread.rs +++ b/src/thread.rs @@ -3,7 +3,7 @@ use std::cell::RefCell; use std::convert::TryFrom; use std::convert::TryInto; -use std::num::NonZeroU32; +use std::num::{NonZeroU32, TryFromIntError}; use log::trace; @@ -45,20 +45,22 @@ impl Idx for ThreadId { fn new(idx: usize) -> Self { ThreadId(u32::try_from(idx).unwrap()) } + fn index(self) -> usize { usize::try_from(self.0).unwrap() } } -impl From for ThreadId { - fn from(id: u64) -> Self { - Self(u32::try_from(id).unwrap()) +impl TryFrom for ThreadId { + type Error = TryFromIntError; + fn try_from(id: u64) -> Result { + u32::try_from(id).map(|id_u32| Self(id_u32)) } } impl From for ThreadId { fn from(id: u32) -> Self { - Self(u32::try_from(id).unwrap()) + Self(id) } } From 1355574bebb5f7cb572bb7399964f91101e8852e Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Mon, 27 Apr 2020 14:00:39 -0700 Subject: [PATCH 72/77] Delete remaining tls entries after all destructors completed. --- src/shims/tls.rs | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/src/shims/tls.rs b/src/shims/tls.rs index a98a802564..d5ea430dd2 100644 --- a/src/shims/tls.rs +++ b/src/shims/tls.rs @@ -211,6 +211,14 @@ impl<'tcx> TlsData<'tcx> { false } } + + /// Delete all TLS entries for the given thread. This function should be + /// called after all TLS destructors have already finished. + fn delete_all_thread_tls(&mut self, thread_id: ThreadId) { + for TlsEntry { data, .. } in self.keys.values_mut() { + data.remove(&thread_id); + } + } } impl<'mir, 'tcx: 'mir> EvalContextPrivExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {} @@ -271,8 +279,9 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx Ok(()) } - /// Schedule a pthread TLS destructor. - fn schedule_pthread_tls_dtors(&mut self) -> InterpResult<'tcx> { + /// Schedule a pthread TLS destructor. Returns `true` if found + /// a destructor to schedule, and `false` otherwise. + fn schedule_pthread_tls_dtors(&mut self) -> InterpResult<'tcx, bool> { let this = self.eval_context_mut(); let active_thread = this.get_active_thread()?; @@ -300,11 +309,11 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx )?; this.enable_thread(active_thread)?; - return Ok(()); + return Ok(true); } this.machine.tls.dtors_running.get_mut(&active_thread).unwrap().last_dtor_key = None; - Ok(()) + Ok(false) } } @@ -322,16 +331,21 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let this = self.eval_context_mut(); let active_thread = this.get_active_thread()?; - if this.tcx.sess.target.target.target_os == "windows" { + let finished = if this.tcx.sess.target.target.target_os == "windows" { if !this.machine.tls.set_dtors_running_for_thread(active_thread) { this.schedule_windows_tls_dtors()?; } + true } else { this.machine.tls.set_dtors_running_for_thread(active_thread); // The macOS thread wide destructor runs "before any TLS slots get // freed", so do that first. this.schedule_macos_tls_dtor()?; - this.schedule_pthread_tls_dtors()?; + this.schedule_pthread_tls_dtors()? + }; + + if finished { + this.machine.tls.delete_all_thread_tls(active_thread); } Ok(()) From 3b5854191c35107a50ff83dd1e8b46f58d964224 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Mon, 27 Apr 2020 15:21:01 -0700 Subject: [PATCH 73/77] Fix MacOS and Windows builds. --- src/shims/tls.rs | 25 +++++++++++++++++-------- tests/run-pass/libc.rs | 1 + 2 files changed, 18 insertions(+), 8 deletions(-) diff --git a/src/shims/tls.rs b/src/shims/tls.rs index d5ea430dd2..f13d9e6dfe 100644 --- a/src/shims/tls.rs +++ b/src/shims/tls.rs @@ -253,10 +253,10 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx } /// Schedule the MacOS thread destructor of the thread local storage to be - /// executed. + /// executed. Returns `true` if scheduled. /// /// Note: It is safe to call this function also on other Unixes. - fn schedule_macos_tls_dtor(&mut self) -> InterpResult<'tcx> { + fn schedule_macos_tls_dtor(&mut self) -> InterpResult<'tcx, bool> { let this = self.eval_context_mut(); let thread_id = this.get_active_thread()?; if let Some((instance, data)) = this.machine.tls.thread_dtors.remove(&thread_id) { @@ -275,8 +275,10 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx // guaranteed that we will schedule it again. The `dtors_running` // flag will prevent the code from adding the destructor again. this.enable_thread(thread_id)?; + Ok(true) + } else { + Ok(false) } - Ok(()) } /// Schedule a pthread TLS destructor. Returns `true` if found @@ -331,20 +333,27 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let this = self.eval_context_mut(); let active_thread = this.get_active_thread()?; - let finished = if this.tcx.sess.target.target.target_os == "windows" { + let scheduled_next = if this.tcx.sess.target.target.target_os == "windows" { if !this.machine.tls.set_dtors_running_for_thread(active_thread) { this.schedule_windows_tls_dtors()?; + true + } else { + false } - true } else { this.machine.tls.set_dtors_running_for_thread(active_thread); // The macOS thread wide destructor runs "before any TLS slots get // freed", so do that first. - this.schedule_macos_tls_dtor()?; - this.schedule_pthread_tls_dtors()? + if this.schedule_macos_tls_dtor()? { + true + } else { + this.schedule_pthread_tls_dtors()? + } }; - if finished { + if !scheduled_next { + // No dtors scheduled means that we are finished. Delete the + // remaining TLS entries. this.machine.tls.delete_all_thread_tls(active_thread); } diff --git a/tests/run-pass/libc.rs b/tests/run-pass/libc.rs index 5873d42969..36805fc83e 100644 --- a/tests/run-pass/libc.rs +++ b/tests/run-pass/libc.rs @@ -144,6 +144,7 @@ fn test_rwlock_libc_static_initializer() { /// Test whether the `prctl` shim correctly sets the thread name. /// /// Note: `prctl` exists only on Linux. +#[cfg(target_os = "linux")] fn test_prctl_thread_name() { use std::ffi::CString; unsafe { From 46b03174d02c06fe062747e732733a39a1971817 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Wed, 29 Apr 2020 13:16:22 -0700 Subject: [PATCH 74/77] Improve code readability and comments. --- src/eval.rs | 3 ++ src/shims/sync.rs | 2 +- src/shims/thread.rs | 4 +-- src/shims/tls.rs | 67 +++++++++++++++++++++++++++------------------ src/thread.rs | 7 ++--- 5 files changed, 50 insertions(+), 33 deletions(-) diff --git a/src/eval.rs b/src/eval.rs index 89d61d141a..6352d06268 100644 --- a/src/eval.rs +++ b/src/eval.rs @@ -211,6 +211,9 @@ pub fn eval_main<'tcx>(tcx: TyCtxt<'tcx>, main_id: DefId, config: MiriConfig) -> assert!(ecx.step()?, "a terminated thread was scheduled for execution"); } SchedulingAction::ExecuteDtors => { + // This will either enable the thread again (so we go back + // to `ExecuteStep`), or determine that this thread is done + // for good. ecx.schedule_next_tls_dtor_for_active_thread()?; } SchedulingAction::Stop => { diff --git a/src/shims/sync.rs b/src/shims/sync.rs index 9dad302706..bc64b1e97a 100644 --- a/src/shims/sync.rs +++ b/src/shims/sync.rs @@ -555,7 +555,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx // result, not only readers can starve writers, but also writers can // starve readers. if let Some(_writer) = this.unblock_some_thread(writer_blockset)? { - rwlock_set_writers(this, rwlock_op, Scalar::from_u32(1))?; + assert_eq!(writers, 1); } else { rwlock_set_writers(this, rwlock_op, Scalar::from_u32(0))?; let mut readers = 0; diff --git a/src/shims/thread.rs b/src/shims/thread.rs index 29a4ed3676..3aca9520f6 100644 --- a/src/shims/thread.rs +++ b/src/shims/thread.rs @@ -25,7 +25,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let thread_info_place = this.deref_operand(thread)?; this.write_scalar( - Scalar::from_uint(new_thread_id.to_u128(), thread_info_place.layout.size), + Scalar::from_uint(new_thread_id.to_u32(), thread_info_place.layout.size), thread_info_place.into(), )?; @@ -83,7 +83,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let this = self.eval_context_mut(); let thread_id = this.get_active_thread()?; - this.write_scalar(Scalar::from_uint(thread_id.to_u128(), dest.layout.size), dest) + this.write_scalar(Scalar::from_uint(thread_id.to_u32(), dest.layout.size), dest) } fn prctl( diff --git a/src/shims/tls.rs b/src/shims/tls.rs index f13d9e6dfe..8a5bb7b42c 100644 --- a/src/shims/tls.rs +++ b/src/shims/tls.rs @@ -26,7 +26,9 @@ pub struct TlsEntry<'tcx> { #[derive(Clone, Debug)] struct RunningDtorsState { - /// The last TlsKey used to retrieve a TLS destructor. + /// The last TlsKey used to retrieve a TLS destructor. `None` means that we + /// have not tried to retrieve a TLS destructor yet or that we already tried + /// all keys. last_dtor_key: Option, } @@ -40,7 +42,7 @@ pub struct TlsData<'tcx> { /// A single per thread destructor of the thread local storage (that's how /// things work on macOS) with a data argument. - thread_dtors: BTreeMap, Scalar)>, + macos_thread_dtors: BTreeMap, Scalar)>, /// State for currently running TLS dtors. If this map contains a key for a /// specific thread, it means that we are in the "destruct" phase, during @@ -53,7 +55,7 @@ impl<'tcx> Default for TlsData<'tcx> { TlsData { next_key: 1, // start with 1 as we must not use 0 on Windows keys: Default::default(), - thread_dtors: Default::default(), + macos_thread_dtors: Default::default(), dtors_running: Default::default(), } } @@ -143,7 +145,7 @@ impl<'tcx> TlsData<'tcx> { // UB, according to libstd docs. throw_ub_format!("setting thread's local storage destructor while destructors are already running"); } - if self.thread_dtors.insert(thread, (dtor, data)).is_some() { + if self.macos_thread_dtors.insert(thread, (dtor, data)).is_some() { throw_unsup_format!("setting more than one thread local storage destructor for the same thread is not supported"); } Ok(()) @@ -186,6 +188,7 @@ impl<'tcx> TlsData<'tcx> { match data.entry(thread_id) { Entry::Occupied(entry) => { if let Some(dtor) = dtor { + // Set TLS data to NULL, and call dtor with old value. let data_scalar = entry.remove(); let ret = Some((*dtor, data_scalar, key)); return ret; @@ -204,6 +207,8 @@ impl<'tcx> TlsData<'tcx> { if self.dtors_running.contains_key(&thread) { true } else { + // We need to guard this `insert` with a check because otherwise we + // would risk to overwrite `last_dtor_key` with `None`. self.dtors_running.insert( thread, RunningDtorsState { last_dtor_key: None } @@ -259,7 +264,7 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn schedule_macos_tls_dtor(&mut self) -> InterpResult<'tcx, bool> { let this = self.eval_context_mut(); let thread_id = this.get_active_thread()?; - if let Some((instance, data)) = this.machine.tls.thread_dtors.remove(&thread_id) { + if let Some((instance, data)) = this.machine.tls.macos_thread_dtors.remove(&thread_id) { trace!("Running macos dtor {:?} on {:?} at {:?}", instance, data, thread_id); let ret_place = MPlaceTy::dangling(this.machine.layouts.unit, this).into(); @@ -283,7 +288,7 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx /// Schedule a pthread TLS destructor. Returns `true` if found /// a destructor to schedule, and `false` otherwise. - fn schedule_pthread_tls_dtors(&mut self) -> InterpResult<'tcx, bool> { + fn schedule_next_pthread_tls_dtor(&mut self) -> InterpResult<'tcx, bool> { let this = self.eval_context_mut(); let active_thread = this.get_active_thread()?; @@ -329,33 +334,43 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx /// /// FIXME: we do not support yet deallocation of thread local statics. /// Issue: https://github.com/rust-lang/miri/issues/1369 + /// + /// Note: we consistently run TLS destructors for all threads, including the + /// main thread. However, it is not clear that we should run the TLS + /// destructors for the main thread. See issue: + /// https://github.com/rust-lang/rust/issues/28129. fn schedule_next_tls_dtor_for_active_thread(&mut self) -> InterpResult<'tcx> { let this = self.eval_context_mut(); let active_thread = this.get_active_thread()?; - let scheduled_next = if this.tcx.sess.target.target.target_os == "windows" { - if !this.machine.tls.set_dtors_running_for_thread(active_thread) { + if this.machine.tls.set_dtors_running_for_thread(active_thread) { + // This is the first time we got asked to schedule a destructor. The + // Windows schedule destructor function must be called exactly once, + // this is why it is in this block. + if this.tcx.sess.target.target.target_os == "windows" { + // On Windows, we signal that the thread quit by starting the + // relevant function, reenabling the thread, and going back to + // the scheduler. this.schedule_windows_tls_dtors()?; - true - } else { - false - } - } else { - this.machine.tls.set_dtors_running_for_thread(active_thread); - // The macOS thread wide destructor runs "before any TLS slots get - // freed", so do that first. - if this.schedule_macos_tls_dtor()? { - true - } else { - this.schedule_pthread_tls_dtors()? + return Ok(()) } - }; - - if !scheduled_next { - // No dtors scheduled means that we are finished. Delete the - // remaining TLS entries. - this.machine.tls.delete_all_thread_tls(active_thread); } + // The macOS thread wide destructor runs "before any TLS slots get + // freed", so do that first. + if this.schedule_macos_tls_dtor()? { + // We have scheduled a MacOS dtor to run on the thread. Execute it + // to completion and come back here. Scheduling a destructor + // destroys it, so we will not enter this branch again. + return Ok(()) + } + if this.schedule_next_pthread_tls_dtor()? { + // We have scheduled a pthread destructor and removed it from the + // destructors list. Run it to completion and come back here. + return Ok(()) + } + + // All dtors done! + this.machine.tls.delete_all_thread_tls(active_thread); Ok(()) } diff --git a/src/thread.rs b/src/thread.rs index 69e7bcdb29..7d394c9002 100644 --- a/src/thread.rs +++ b/src/thread.rs @@ -2,7 +2,6 @@ use std::cell::RefCell; use std::convert::TryFrom; -use std::convert::TryInto; use std::num::{NonZeroU32, TryFromIntError}; use log::trace; @@ -36,8 +35,8 @@ pub struct ThreadId(u32); const MAIN_THREAD: ThreadId = ThreadId(0); impl ThreadId { - pub fn to_u128(self) -> u128 { - self.0.try_into().unwrap() + pub fn to_u32(self) -> u32 { + self.0 } } @@ -362,7 +361,7 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { fn schedule(&mut self) -> InterpResult<'tcx, SchedulingAction> { // Check whether the thread has **just** terminated (`check_terminated` // checks whether the thread has popped all its stack and if yes, sets - // the thread state to terminated.) + // the thread state to terminated). if self.threads[self.active_thread].check_terminated() { // Check if we need to unblock any threads. for (i, thread) in self.threads.iter_enumerated_mut() { From 0e052ab8970777e8f418c4ccf495845804aeae90 Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Wed, 29 Apr 2020 15:12:09 -0700 Subject: [PATCH 75/77] Use Entry API in set_dtors_running. --- src/shims/tls.rs | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/src/shims/tls.rs b/src/shims/tls.rs index 8a5bb7b42c..57b041e685 100644 --- a/src/shims/tls.rs +++ b/src/shims/tls.rs @@ -1,7 +1,8 @@ //! Implement thread-local storage. use std::collections::BTreeMap; -use std::collections::btree_map::Entry; +use std::collections::btree_map::Entry as BTreeEntry; +use std::collections::hash_map::Entry as HashMapEntry; use log::trace; @@ -186,7 +187,7 @@ impl<'tcx> TlsData<'tcx> { thread_local.range_mut((start, Unbounded)) { match data.entry(thread_id) { - Entry::Occupied(entry) => { + BTreeEntry::Occupied(entry) => { if let Some(dtor) = dtor { // Set TLS data to NULL, and call dtor with old value. let data_scalar = entry.remove(); @@ -194,7 +195,7 @@ impl<'tcx> TlsData<'tcx> { return ret; } } - Entry::Vacant(_) => {} + BTreeEntry::Vacant(_) => {} } } None @@ -204,16 +205,14 @@ impl<'tcx> TlsData<'tcx> { /// the existing values stored in `dtors_running` for this thread. Returns /// `true` if dtors for `thread` are already running. fn set_dtors_running_for_thread(&mut self, thread: ThreadId) -> bool { - if self.dtors_running.contains_key(&thread) { - true - } else { - // We need to guard this `insert` with a check because otherwise we - // would risk to overwrite `last_dtor_key` with `None`. - self.dtors_running.insert( - thread, - RunningDtorsState { last_dtor_key: None } - ); - false + match self.dtors_running.entry(thread) { + HashMapEntry::Occupied(_) => true, + HashMapEntry::Vacant(entry) => { + // We cannot just do `self.dtors_running.insert` because that + // would overwrite `last_dtor_key` with `None`. + entry.insert(RunningDtorsState { last_dtor_key: None }); + false + } } } From 603ec0b3d848f4f0f63f4842231ac13e0fa0ce8c Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Wed, 29 Apr 2020 15:20:26 -0700 Subject: [PATCH 76/77] Fix a regression in Windows dtors. --- src/shims/tls.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/shims/tls.rs b/src/shims/tls.rs index 57b041e685..f78b46ec3e 100644 --- a/src/shims/tls.rs +++ b/src/shims/tls.rs @@ -342,7 +342,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let this = self.eval_context_mut(); let active_thread = this.get_active_thread()?; - if this.machine.tls.set_dtors_running_for_thread(active_thread) { + if !this.machine.tls.set_dtors_running_for_thread(active_thread) { // This is the first time we got asked to schedule a destructor. The // Windows schedule destructor function must be called exactly once, // this is why it is in this block. From 48da0cf489c1cbbb309692db4049632d83740a8e Mon Sep 17 00:00:00 2001 From: Vytautas Astrauskas Date: Thu, 30 Apr 2020 08:35:59 -0700 Subject: [PATCH 77/77] Fix prctl SET_NAME and GET_NAME behaviour. --- src/shims/thread.rs | 10 ++++++++-- src/thread.rs | 8 ++++---- tests/run-pass/libc.rs | 17 +++++++++++++---- 3 files changed, 25 insertions(+), 10 deletions(-) diff --git a/src/shims/thread.rs b/src/shims/thread.rs index 3aca9520f6..2f553c1c72 100644 --- a/src/shims/thread.rs +++ b/src/shims/thread.rs @@ -99,11 +99,17 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let option = this.read_scalar(option)?.to_i32()?; if option == this.eval_libc_i32("PR_SET_NAME")? { let address = this.read_scalar(arg2)?.not_undef()?; - let name = this.memory.read_c_str(address)?.to_owned(); + let mut name = this.memory.read_c_str(address)?.to_owned(); + // The name should be no more than 16 bytes, including the null + // byte. Since `read_c_str` returns the string without the null + // byte, we need to truncate to 15. + name.truncate(15); this.set_active_thread_name(name)?; } else if option == this.eval_libc_i32("PR_GET_NAME")? { let address = this.read_scalar(arg2)?.not_undef()?; - let name = this.get_active_thread_name()?.to_vec(); + let mut name = this.get_active_thread_name()?.to_vec(); + name.push(0u8); + assert!(name.len() <= 16); this.memory.write_bytes(address, name)?; } else { throw_unsup_format!("unsupported prctl option {}", option); diff --git a/src/thread.rs b/src/thread.rs index 7d394c9002..376920e225 100644 --- a/src/thread.rs +++ b/src/thread.rs @@ -313,11 +313,11 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { } /// Get the name of the active thread. - fn get_thread_name(&self) -> InterpResult<'tcx, &[u8]> { + fn get_thread_name(&self) -> &[u8] { if let Some(ref thread_name) = self.active_thread_ref().thread_name { - Ok(thread_name) + thread_name } else { - throw_ub_format!("thread {:?} has no name set", self.active_thread) + b"" } } @@ -574,7 +574,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx 'mir: 'c, { let this = self.eval_context_ref(); - this.machine.threads.get_thread_name() + Ok(this.machine.threads.get_thread_name()) } #[inline] diff --git a/tests/run-pass/libc.rs b/tests/run-pass/libc.rs index 36805fc83e..04ca5c0b3b 100644 --- a/tests/run-pass/libc.rs +++ b/tests/run-pass/libc.rs @@ -147,12 +147,21 @@ fn test_rwlock_libc_static_initializer() { #[cfg(target_os = "linux")] fn test_prctl_thread_name() { use std::ffi::CString; + use libc::c_long; unsafe { + let mut buf = [255; 10]; + assert_eq!(libc::prctl(libc::PR_GET_NAME, buf.as_mut_ptr() as c_long, 0 as c_long, 0 as c_long, 0 as c_long), 0); + assert_eq!(b"\0", &buf); let thread_name = CString::new("hello").expect("CString::new failed"); - assert_eq!(libc::prctl(libc::PR_SET_NAME, thread_name.as_ptr() as libc::c_long, 0 as libc::c_long, 0 as libc::c_long, 0 as libc::c_long), 0); - let mut buf = [0; 6]; - assert_eq!(libc::prctl(libc::PR_GET_NAME, buf.as_mut_ptr() as libc::c_long, 0 as libc::c_long, 0 as libc::c_long, 0 as libc::c_long), 0); - assert_eq!(thread_name.as_bytes_with_nul(), buf); + assert_eq!(libc::prctl(libc::PR_SET_NAME, thread_name.as_ptr() as c_long, 0 as c_long, 0 as c_long, 0 as c_long), 0); + let mut buf = [255; 6]; + assert_eq!(libc::prctl(libc::PR_GET_NAME, buf.as_mut_ptr() as c_long, 0 as c_long, 0 as c_long, 0 as c_long), 0); + assert_eq!(b"hello\0", &buf); + let long_thread_name = CString::new("01234567890123456789").expect("CString::new failed"); + assert_eq!(libc::prctl(libc::PR_SET_NAME, long_thread_name.as_ptr() as c_long, 0 as c_long, 0 as c_long, 0 as c_long), 0); + let mut buf = [255; 16]; + assert_eq!(libc::prctl(libc::PR_GET_NAME, buf.as_mut_ptr() as c_long, 0 as c_long, 0 as c_long, 0 as c_long), 0); + assert_eq!(b"012345678901234\0", &buf); } }