Skip to content

Commit

Permalink
Rollup merge of rust-lang#77657 - fusion-engineering-forks:cleanup-cl…
Browse files Browse the repository at this point in the history
…oudabi-sync, r=dtolnay

Cleanup cloudabi mutexes and condvars

This gets rid of lots of unnecessary unsafety.

All the AtomicU32s were wrapped in UnsafeCell or UnsafeCell<MaybeUninit>, and raw pointers were used to get to the AtomicU32 inside. This change cleans that up by using AtomicU32 directly.

Also replaces a UnsafeCell<u32> by a safer Cell<u32>.

@rustbot modify labels: +C-cleanup
  • Loading branch information
JohnTitor authored Oct 15, 2020
2 parents 5e8f5cb + b3be11e commit 72dbc60
Show file tree
Hide file tree
Showing 3 changed files with 65 additions and 77 deletions.
41 changes: 18 additions & 23 deletions library/std/src/sys/cloudabi/condvar.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
use crate::cell::UnsafeCell;
use crate::mem;
use crate::sync::atomic::{AtomicU32, Ordering};
use crate::sys::cloudabi::abi;
Expand All @@ -12,37 +11,36 @@ extern "C" {
}

pub struct Condvar {
condvar: UnsafeCell<AtomicU32>,
condvar: AtomicU32,
}

pub type MovableCondvar = Condvar;

unsafe impl Send for Condvar {}
unsafe impl Sync for Condvar {}

const NEW: Condvar =
Condvar { condvar: UnsafeCell::new(AtomicU32::new(abi::CONDVAR_HAS_NO_WAITERS.0)) };

impl Condvar {
pub const fn new() -> Condvar {
NEW
Condvar { condvar: AtomicU32::new(abi::CONDVAR_HAS_NO_WAITERS.0) }
}

pub unsafe fn init(&mut self) {}

pub unsafe fn notify_one(&self) {
let condvar = self.condvar.get();
if (*condvar).load(Ordering::Relaxed) != abi::CONDVAR_HAS_NO_WAITERS.0 {
let ret = abi::condvar_signal(condvar as *mut abi::condvar, abi::scope::PRIVATE, 1);
if self.condvar.load(Ordering::Relaxed) != abi::CONDVAR_HAS_NO_WAITERS.0 {
let ret = abi::condvar_signal(
&self.condvar as *const AtomicU32 as *mut abi::condvar,
abi::scope::PRIVATE,
1,
);
assert_eq!(ret, abi::errno::SUCCESS, "Failed to signal on condition variable");
}
}

pub unsafe fn notify_all(&self) {
let condvar = self.condvar.get();
if (*condvar).load(Ordering::Relaxed) != abi::CONDVAR_HAS_NO_WAITERS.0 {
if self.condvar.load(Ordering::Relaxed) != abi::CONDVAR_HAS_NO_WAITERS.0 {
let ret = abi::condvar_signal(
condvar as *mut abi::condvar,
&self.condvar as *const AtomicU32 as *mut abi::condvar,
abi::scope::PRIVATE,
abi::nthreads::MAX,
);
Expand All @@ -53,20 +51,19 @@ impl Condvar {
pub unsafe fn wait(&self, mutex: &Mutex) {
let mutex = mutex::raw(mutex);
assert_eq!(
(*mutex).load(Ordering::Relaxed) & !abi::LOCK_KERNEL_MANAGED.0,
mutex.load(Ordering::Relaxed) & !abi::LOCK_KERNEL_MANAGED.0,
__pthread_thread_id.0 | abi::LOCK_WRLOCKED.0,
"This lock is not write-locked by this thread"
);

// Call into the kernel to wait on the condition variable.
let condvar = self.condvar.get();
let subscription = abi::subscription {
type_: abi::eventtype::CONDVAR,
union: abi::subscription_union {
condvar: abi::subscription_condvar {
condvar: condvar as *mut abi::condvar,
condvar: &self.condvar as *const AtomicU32 as *mut abi::condvar,
condvar_scope: abi::scope::PRIVATE,
lock: mutex as *mut abi::lock,
lock: mutex as *const AtomicU32 as *mut abi::lock,
lock_scope: abi::scope::PRIVATE,
},
},
Expand All @@ -86,23 +83,22 @@ impl Condvar {
pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
let mutex = mutex::raw(mutex);
assert_eq!(
(*mutex).load(Ordering::Relaxed) & !abi::LOCK_KERNEL_MANAGED.0,
mutex.load(Ordering::Relaxed) & !abi::LOCK_KERNEL_MANAGED.0,
__pthread_thread_id.0 | abi::LOCK_WRLOCKED.0,
"This lock is not write-locked by this thread"
);

// Call into the kernel to wait on the condition variable.
let condvar = self.condvar.get();
let timeout =
checked_dur2intervals(&dur).expect("overflow converting duration to nanoseconds");
let subscriptions = [
abi::subscription {
type_: abi::eventtype::CONDVAR,
union: abi::subscription_union {
condvar: abi::subscription_condvar {
condvar: condvar as *mut abi::condvar,
condvar: &self.condvar as *const AtomicU32 as *mut abi::condvar,
condvar_scope: abi::scope::PRIVATE,
lock: mutex as *mut abi::lock,
lock: mutex as *const AtomicU32 as *mut abi::lock,
lock_scope: abi::scope::PRIVATE,
},
},
Expand All @@ -124,7 +120,7 @@ impl Condvar {
let mut nevents: mem::MaybeUninit<usize> = mem::MaybeUninit::uninit();
let ret = abi::poll(
subscriptions.as_ptr(),
mem::MaybeUninit::first_ptr_mut(&mut events),
mem::MaybeUninit::slice_as_mut_ptr(&mut events),
2,
nevents.as_mut_ptr(),
);
Expand All @@ -144,9 +140,8 @@ impl Condvar {
}

pub unsafe fn destroy(&self) {
let condvar = self.condvar.get();
assert_eq!(
(*condvar).load(Ordering::Relaxed),
self.condvar.load(Ordering::Relaxed),
abi::CONDVAR_HAS_NO_WAITERS.0,
"Attempted to destroy a condition variable with blocked threads"
);
Expand Down
54 changes: 25 additions & 29 deletions library/std/src/sys/cloudabi/mutex.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use crate::cell::UnsafeCell;
use crate::cell::Cell;
use crate::mem;
use crate::mem::MaybeUninit;
use crate::sync::atomic::{AtomicU32, Ordering};
Expand All @@ -17,7 +17,7 @@ pub struct Mutex(RWLock);

pub type MovableMutex = Mutex;

pub unsafe fn raw(m: &Mutex) -> *mut AtomicU32 {
pub unsafe fn raw(m: &Mutex) -> &AtomicU32 {
rwlock::raw(&m.0)
}

Expand Down Expand Up @@ -50,28 +50,23 @@ impl Mutex {
}

pub struct ReentrantMutex {
lock: UnsafeCell<MaybeUninit<AtomicU32>>,
recursion: UnsafeCell<MaybeUninit<u32>>,
lock: AtomicU32,
recursion: Cell<u32>,
}

unsafe impl Send for ReentrantMutex {}
unsafe impl Sync for ReentrantMutex {}

impl ReentrantMutex {
pub const unsafe fn uninitialized() -> ReentrantMutex {
ReentrantMutex {
lock: UnsafeCell::new(MaybeUninit::uninit()),
recursion: UnsafeCell::new(MaybeUninit::uninit()),
}
ReentrantMutex { lock: AtomicU32::new(abi::LOCK_UNLOCKED.0), recursion: Cell::new(0) }
}

pub unsafe fn init(&self) {
*self.lock.get() = MaybeUninit::new(AtomicU32::new(abi::LOCK_UNLOCKED.0));
*self.recursion.get() = MaybeUninit::new(0);
}
pub unsafe fn init(&self) {}

pub unsafe fn try_lock(&self) -> bool {
// Attempt to acquire the lock.
let lock = (*self.lock.get()).as_mut_ptr();
let recursion = (*self.recursion.get()).as_mut_ptr();
if let Err(old) = (*lock).compare_exchange(
if let Err(old) = self.lock.compare_exchange(
abi::LOCK_UNLOCKED.0,
__pthread_thread_id.0 | abi::LOCK_WRLOCKED.0,
Ordering::Acquire,
Expand All @@ -80,22 +75,22 @@ impl ReentrantMutex {
// If we fail to acquire the lock, it may be the case
// that we've already acquired it and may need to recurse.
if old & !abi::LOCK_KERNEL_MANAGED.0 == __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0 {
*recursion += 1;
self.recursion.set(self.recursion.get() + 1);
true
} else {
false
}
} else {
// Success.
assert_eq!(*recursion, 0, "Mutex has invalid recursion count");
assert_eq!(self.recursion.get(), 0, "Mutex has invalid recursion count");
true
}
}

pub unsafe fn lock(&self) {
if !self.try_lock() {
// Call into the kernel to acquire a write lock.
let lock = self.lock.get();
let lock = &self.lock as *const AtomicU32;
let subscription = abi::subscription {
type_: abi::eventtype::LOCK_WRLOCK,
union: abi::subscription_union {
Expand All @@ -116,17 +111,17 @@ impl ReentrantMutex {
}

pub unsafe fn unlock(&self) {
let lock = (*self.lock.get()).as_mut_ptr();
let recursion = (*self.recursion.get()).as_mut_ptr();
assert_eq!(
(*lock).load(Ordering::Relaxed) & !abi::LOCK_KERNEL_MANAGED.0,
self.lock.load(Ordering::Relaxed) & !abi::LOCK_KERNEL_MANAGED.0,
__pthread_thread_id.0 | abi::LOCK_WRLOCKED.0,
"This mutex is locked by a different thread"
);

if *recursion > 0 {
*recursion -= 1;
} else if !(*lock)
let r = self.recursion.get();
if r > 0 {
self.recursion.set(r - 1);
} else if !self
.lock
.compare_exchange(
__pthread_thread_id.0 | abi::LOCK_WRLOCKED.0,
abi::LOCK_UNLOCKED.0,
Expand All @@ -137,19 +132,20 @@ impl ReentrantMutex {
{
// Lock is managed by kernelspace. Call into the kernel
// to unblock waiting threads.
let ret = abi::lock_unlock(lock as *mut abi::lock, abi::scope::PRIVATE);
let ret = abi::lock_unlock(
&self.lock as *const AtomicU32 as *mut abi::lock,
abi::scope::PRIVATE,
);
assert_eq!(ret, abi::errno::SUCCESS, "Failed to unlock a mutex");
}
}

pub unsafe fn destroy(&self) {
let lock = (*self.lock.get()).as_mut_ptr();
let recursion = (*self.recursion.get()).as_mut_ptr();
assert_eq!(
(*lock).load(Ordering::Relaxed),
self.lock.load(Ordering::Relaxed),
abi::LOCK_UNLOCKED.0,
"Attempted to destroy locked mutex"
);
assert_eq!(*recursion, 0, "Recursion counter invalid");
assert_eq!(self.recursion.get(), 0, "Recursion counter invalid");
}
}
Loading

0 comments on commit 72dbc60

Please sign in to comment.