Skip to content

Commit

Permalink
Expand lockorder testing to look at mutexes, not specific instances
Browse files Browse the repository at this point in the history
Our existing lockorder inversion checks look at specific instances
of mutexes rather than the general mutex itself. This changes that
behavior to look at the instruction pointer at which a mutex was
created and treat all mutexes which were created at the same
location as equivalent.

This allows us to detect lockorder inversions which occur across
tests, though it does substantially reduce parallelism during test
runs.
  • Loading branch information
TheBlueMatt committed Jul 13, 2022
1 parent 0627c0c commit 52aa726
Showing 1 changed file with 166 additions and 96 deletions.
262 changes: 166 additions & 96 deletions lightning/src/debug_sync.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ use core::time::Duration;
use std::collections::HashSet;
use std::cell::RefCell;

#[cfg(not(feature = "backtrace"))]
use std::sync::atomic::{AtomicUsize, Ordering};

use std::sync::Mutex as StdMutex;
Expand All @@ -15,7 +16,12 @@ use std::sync::RwLockWriteGuard as StdRwLockWriteGuard;
use std::sync::Condvar as StdCondvar;

#[cfg(feature = "backtrace")]
use backtrace::Backtrace;
use {prelude::HashMap, backtrace::Backtrace, std::sync::Once};

#[cfg(not(feature = "backtrace"))]
struct Backtrace{}
#[cfg(not(feature = "backtrace"))]
impl Backtrace { fn new() -> Backtrace { Backtrace {} } }

pub type LockResult<Guard> = Result<Guard, ()>;

Expand Down Expand Up @@ -46,14 +52,19 @@ thread_local! {
/// We track the set of locks currently held by a reference to their `LockMetadata`
static LOCKS_HELD: RefCell<HashSet<Arc<LockMetadata>>> = RefCell::new(HashSet::new());
}
#[cfg(not(feature = "backtrace"))]
static LOCK_IDX: AtomicUsize = AtomicUsize::new(0);

#[cfg(feature = "backtrace")]
static mut LOCKS: Option<StdMutex<HashMap<u64, Arc<LockMetadata>>>> = None;
#[cfg(feature = "backtrace")]
static LOCKS_INIT: Once = Once::new();

/// Metadata about a single lock, by id, the set of things locked-before it, and the backtrace of
/// when the Mutex itself was constructed.
struct LockMetadata {
lock_idx: u64,
locked_before: StdMutex<HashSet<Arc<LockMetadata>>>,
#[cfg(feature = "backtrace")]
locked_before: StdMutex<HashSet<LockDep>>,
lock_construction_bt: Backtrace,
}
impl PartialEq for LockMetadata {
Expand All @@ -64,14 +75,57 @@ impl std::hash::Hash for LockMetadata {
fn hash<H: std::hash::Hasher>(&self, hasher: &mut H) { hasher.write_u64(self.lock_idx); }
}

struct LockDep {
lock: Arc<LockMetadata>,
lockdep_trace: Option<Backtrace>,
}
impl LockDep {
/// Note that `Backtrace::new()` is rather expensive so we rely on the caller to fill in the
/// `lockdep_backtrace` field after ensuring we need it.
fn new_without_bt(lock: &Arc<LockMetadata>) -> Self {
Self { lock: Arc::clone(lock), lockdep_trace: None }
}
}
impl PartialEq for LockDep {
fn eq(&self, o: &LockDep) -> bool { self.lock.lock_idx == o.lock.lock_idx }
}
impl Eq for LockDep {}
impl std::hash::Hash for LockDep {
fn hash<H: std::hash::Hasher>(&self, hasher: &mut H) { hasher.write_u64(self.lock.lock_idx); }
}

impl LockMetadata {
fn new() -> LockMetadata {
LockMetadata {
locked_before: StdMutex::new(HashSet::new()),
lock_idx: LOCK_IDX.fetch_add(1, Ordering::Relaxed) as u64,
#[cfg(feature = "backtrace")]
lock_construction_bt: Backtrace::new(),
fn new() -> Arc<LockMetadata> {
let lock_idx;
let backtrace = Backtrace::new();

#[cfg(not(feature = "backtrace"))]
{ lock_idx = LOCK_IDX.fetch_add(1, Ordering::Relaxed) as u64; }

#[cfg(feature = "backtrace")]
{
let mut idx = None;
// Find the first frame which was *not* in debug_sync (or which is in our tests) and
// use that as the mutex construction site.
for frame in backtrace.frames() {
let symbol_name = frame.symbols().last().unwrap().name().unwrap().as_str().unwrap();
if !symbol_name.contains("lightning::debug_sync::") || symbol_name.contains("lightning::debug_sync::tests") {
idx = Some(frame.ip() as usize as u64);
break;
}
}
lock_idx = idx.unwrap();
LOCKS_INIT.call_once(|| { unsafe { LOCKS = Some(StdMutex::new(HashMap::new())); } });
if let Some(metadata) = unsafe { LOCKS.as_ref() }.unwrap().lock().unwrap().get(&lock_idx) {
return Arc::clone(&metadata);
}
}

Arc::new(LockMetadata {
locked_before: StdMutex::new(HashSet::new()),
lock_idx,
lock_construction_bt: backtrace,
})
}

// Returns whether we were a recursive lock (only relevant for read)
Expand All @@ -89,18 +143,25 @@ impl LockMetadata {
}
for locked in held.borrow().iter() {
if !read && *locked == *this {
panic!("Tried to lock a lock while it was held!");
// With `feature = "backtrace"` set, we may be looking at different instances
// of the same lock.
debug_assert!(cfg!(feature = "backtrace"), "Tried to lock a lock while it was held!");
}
for locked_dep in locked.locked_before.lock().unwrap().iter() {
if *locked_dep == *this {
if locked_dep.lock == *this && locked_dep.lock != *locked {
#[cfg(feature = "backtrace")]
panic!("Tried to violate existing lockorder.\nMutex that should be locked after the current lock was created at the following backtrace.\nNote that to get a backtrace for the lockorder violation, you should set RUST_BACKTRACE=1\n{:?}", locked.lock_construction_bt);
panic!("Tried to violate existing lockorder.\nMutex that should be locked after the current lock was created at the following backtrace.\nNote that to get a backtrace for the lockorder violation, you should set RUST_BACKTRACE=1\nLock constructed at:\n{:?}\n\nLock dep created at:\n{:?}\n\n", locked.lock_construction_bt, locked_dep.lockdep_trace);
#[cfg(not(feature = "backtrace"))]
panic!("Tried to violate existing lockorder. Build with the backtrace feature for more info.");
}
}
// Insert any already-held locks in our locked-before set.
this.locked_before.lock().unwrap().insert(Arc::clone(locked));
let mut locked_before = this.locked_before.lock().unwrap();
let mut lockdep = LockDep::new_without_bt(locked);
if !locked_before.contains(&lockdep) {
lockdep.lockdep_trace = Some(Backtrace::new());
locked_before.insert(lockdep);
}
}
held.borrow_mut().insert(Arc::clone(this));
inserted = true;
Expand All @@ -116,10 +177,15 @@ impl LockMetadata {
// Since a try-lock will simply fail if the lock is held already, we do not
// consider try-locks to ever generate lockorder inversions. However, if a try-lock
// succeeds, we do consider it to have created lockorder dependencies.
held.borrow_mut().insert(Arc::clone(this));
let mut locked_before = this.locked_before.lock().unwrap();
for locked in held.borrow().iter() {
this.locked_before.lock().unwrap().insert(Arc::clone(locked));
let mut lockdep = LockDep::new_without_bt(locked);
if !locked_before.contains(&lockdep) {
lockdep.lockdep_trace = Some(Backtrace::new());
locked_before.insert(lockdep);
}
}
held.borrow_mut().insert(Arc::clone(this));
});
}
}
Expand Down Expand Up @@ -170,7 +236,7 @@ impl<T: Sized> DerefMut for MutexGuard<'_, T> {

impl<T> Mutex<T> {
pub fn new(inner: T) -> Mutex<T> {
Mutex { inner: StdMutex::new(inner), deps: Arc::new(LockMetadata::new()) }
Mutex { inner: StdMutex::new(inner), deps: LockMetadata::new() }
}

pub fn lock<'a>(&'a self) -> LockResult<MutexGuard<'a, T>> {
Expand Down Expand Up @@ -249,7 +315,7 @@ impl<T: Sized> DerefMut for RwLockWriteGuard<'_, T> {

impl<T> RwLock<T> {
pub fn new(inner: T) -> RwLock<T> {
RwLock { inner: StdRwLock::new(inner), deps: Arc::new(LockMetadata::new()) }
RwLock { inner: StdRwLock::new(inner), deps: LockMetadata::new() }
}

pub fn read<'a>(&'a self) -> LockResult<RwLockReadGuard<'a, T>> {
Expand All @@ -271,96 +337,100 @@ impl<T> RwLock<T> {
}
}

#[test]
#[should_panic]
fn recursive_lock_fail() {
let mutex = Mutex::new(());
let _a = mutex.lock().unwrap();
let _b = mutex.lock().unwrap();
}
pub type FairRwLock<T> = RwLock<T>;

#[test]
fn recursive_read() {
let lock = RwLock::new(());
let _a = lock.read().unwrap();
let _b = lock.read().unwrap();
}
mod tests {
use super::{RwLock, Mutex};

#[test]
#[should_panic]
fn lockorder_fail() {
let a = Mutex::new(());
let b = Mutex::new(());
{
let _a = a.lock().unwrap();
let _b = b.lock().unwrap();
}
{
let _b = b.lock().unwrap();
let _a = a.lock().unwrap();
#[test]
#[should_panic]
fn recursive_lock_fail() {
let mutex = Mutex::new(());
let _a = mutex.lock().unwrap();
let _b = mutex.lock().unwrap();
}
}

#[test]
#[should_panic]
fn write_lockorder_fail() {
let a = RwLock::new(());
let b = RwLock::new(());
{
let _a = a.write().unwrap();
let _b = b.write().unwrap();
#[test]
fn recursive_read() {
let lock = RwLock::new(());
let _a = lock.read().unwrap();
let _b = lock.read().unwrap();
}
{
let _b = b.write().unwrap();
let _a = a.write().unwrap();
}
}

#[test]
#[should_panic]
fn read_lockorder_fail() {
let a = RwLock::new(());
let b = RwLock::new(());
{
let _a = a.read().unwrap();
let _b = b.read().unwrap();
}
{
let _b = b.read().unwrap();
let _a = a.read().unwrap();
#[test]
#[should_panic]
fn lockorder_fail() {
let a = Mutex::new(());
let b = Mutex::new(());
{
let _a = a.lock().unwrap();
let _b = b.lock().unwrap();
}
{
let _b = b.lock().unwrap();
let _a = a.lock().unwrap();
}
}
}

#[test]
fn read_recurisve_no_lockorder() {
// Like the above, but note that no lockorder is implied when we recursively read-lock a
// RwLock, causing this to pass just fine.
let a = RwLock::new(());
let b = RwLock::new(());
let _outer = a.read().unwrap();
{
let _a = a.read().unwrap();
let _b = b.read().unwrap();
#[test]
#[should_panic]
fn write_lockorder_fail() {
let a = RwLock::new(());
let b = RwLock::new(());
{
let _a = a.write().unwrap();
let _b = b.write().unwrap();
}
{
let _b = b.write().unwrap();
let _a = a.write().unwrap();
}
}
{
let _b = b.read().unwrap();
let _a = a.read().unwrap();

#[test]
#[should_panic]
fn read_lockorder_fail() {
let a = RwLock::new(());
let b = RwLock::new(());
{
let _a = a.read().unwrap();
let _b = b.read().unwrap();
}
{
let _b = b.read().unwrap();
let _a = a.read().unwrap();
}
}
}

#[test]
#[should_panic]
fn read_write_lockorder_fail() {
let a = RwLock::new(());
let b = RwLock::new(());
{
let _a = a.write().unwrap();
let _b = b.read().unwrap();
#[test]
fn read_recurisve_no_lockorder() {
// Like the above, but note that no lockorder is implied when we recursively read-lock a
// RwLock, causing this to pass just fine.
let a = RwLock::new(());
let b = RwLock::new(());
let _outer = a.read().unwrap();
{
let _a = a.read().unwrap();
let _b = b.read().unwrap();
}
{
let _b = b.read().unwrap();
let _a = a.read().unwrap();
}
}
{
let _b = b.read().unwrap();
let _a = a.write().unwrap();

#[test]
#[should_panic]
fn read_write_lockorder_fail() {
let a = RwLock::new(());
let b = RwLock::new(());
{
let _a = a.write().unwrap();
let _b = b.read().unwrap();
}
{
let _b = b.read().unwrap();
let _a = a.write().unwrap();
}
}
}

pub type FairRwLock<T> = RwLock<T>;

0 comments on commit 52aa726

Please sign in to comment.