Skip to content

Commit

Permalink
Merge pull request #1964 from TheBlueMatt/2023-01-no-debug-panics
Browse files Browse the repository at this point in the history
Use test/_test_utils to enable single-threaded debug assertions
  • Loading branch information
arik-so authored Jan 19, 2023
2 parents 50d1260 + 7a9bea1 commit d66c70e
Show file tree
Hide file tree
Showing 5 changed files with 17 additions and 15 deletions.
2 changes: 1 addition & 1 deletion fuzz/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ libfuzzer_fuzz = ["libfuzzer-sys"]
stdin_fuzz = []

[dependencies]
lightning = { path = "../lightning", features = ["regex", "hashbrown"] }
lightning = { path = "../lightning", features = ["regex", "hashbrown", "_test_utils"] }
lightning-rapid-gossip-sync = { path = "../lightning-rapid-gossip-sync" }
bitcoin = { version = "0.29.0", features = ["secp-lowmemory"] }
hex = "0.3"
Expand Down
1 change: 1 addition & 0 deletions lightning-block-sync/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -27,4 +27,5 @@ serde_json = { version = "1.0", optional = true }
chunked_transfer = { version = "1.4", optional = true }

[dev-dependencies]
lightning = { version = "0.0.113", path = "../lightning", features = ["_test_utils"] }
tokio = { version = "~1.14", features = [ "macros", "rt" ] }
1 change: 1 addition & 0 deletions lightning-net-tokio/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -21,3 +21,4 @@ tokio = { version = "1.0", features = [ "io-util", "macros", "rt", "sync", "net"

[dev-dependencies]
tokio = { version = "~1.14", features = [ "io-util", "macros", "rt", "rt-multi-thread", "sync", "net", "time" ] }
lightning = { version = "0.0.113", path = "../lightning", features = ["_test_utils"] }
12 changes: 6 additions & 6 deletions lightning-net-tokio/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -301,7 +301,7 @@ pub fn setup_inbound<PM, CMH, RMH, OMH, L, UMH>(
{
let remote_addr = get_addr_from_stream(&stream);
let (reader, write_receiver, read_receiver, us) = Connection::new(stream);
#[cfg(debug_assertions)]
#[cfg(test)]
let last_us = Arc::clone(&us);

let handle_opt = if let Ok(_) = peer_manager.new_inbound_connection(SocketDescriptor::new(us.clone()), remote_addr) {
Expand All @@ -322,8 +322,8 @@ pub fn setup_inbound<PM, CMH, RMH, OMH, L, UMH>(
// socket shutdown(). Still, as a check during testing, to make sure tokio doesn't
// keep too many wakers around, this makes sense. The race should be rare (we do
// some work after shutdown()) and an error would be a major memory leak.
#[cfg(debug_assertions)]
assert!(Arc::try_unwrap(last_us).is_ok());
#[cfg(test)]
debug_assert!(Arc::try_unwrap(last_us).is_ok());
}
}
}
Expand Down Expand Up @@ -355,7 +355,7 @@ pub fn setup_outbound<PM, CMH, RMH, OMH, L, UMH>(
{
let remote_addr = get_addr_from_stream(&stream);
let (reader, mut write_receiver, read_receiver, us) = Connection::new(stream);
#[cfg(debug_assertions)]
#[cfg(test)]
let last_us = Arc::clone(&us);
let handle_opt = if let Ok(initial_send) = peer_manager.new_outbound_connection(their_node_id, SocketDescriptor::new(us.clone()), remote_addr) {
Some(tokio::spawn(async move {
Expand Down Expand Up @@ -401,8 +401,8 @@ pub fn setup_outbound<PM, CMH, RMH, OMH, L, UMH>(
// socket shutdown(). Still, as a check during testing, to make sure tokio doesn't
// keep too many wakers around, this makes sense. The race should be rare (we do
// some work after shutdown()) and an error would be a major memory leak.
#[cfg(debug_assertions)]
assert!(Arc::try_unwrap(last_us).is_ok());
#[cfg(test)]
debug_assert!(Arc::try_unwrap(last_us).is_ok());
}
}
}
Expand Down
16 changes: 8 additions & 8 deletions lightning/src/ln/channelmanager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1153,12 +1153,12 @@ macro_rules! handle_error {
match $internal {
Ok(msg) => Ok(msg),
Err(MsgHandleErrInternal { err, chan_id, shutdown_finish }) => {
#[cfg(debug_assertions)]
#[cfg(any(feature = "_test_utils", test))]
{
// In testing, ensure there are no deadlocks where the lock is already held upon
// entering the macro.
assert!($self.pending_events.try_lock().is_ok());
assert!($self.per_peer_state.try_write().is_ok());
debug_assert!($self.pending_events.try_lock().is_ok());
debug_assert!($self.per_peer_state.try_write().is_ok());
}

let mut msg_events = Vec::with_capacity(2);
Expand Down Expand Up @@ -1193,7 +1193,7 @@ macro_rules! handle_error {
let mut peer_state = peer_state_mutex.lock().unwrap();
peer_state.pending_msg_events.append(&mut msg_events);
}
#[cfg(debug_assertions)]
#[cfg(any(feature = "_test_utils", test))]
{
if let None = per_peer_state.get(&$counterparty_node_id) {
// This shouldn't occour in tests unless an unkown counterparty_node_id
Expand All @@ -1206,10 +1206,10 @@ macro_rules! handle_error {
=> {
assert_eq!(*data, expected_error_str);
if let Some((err_channel_id, _user_channel_id)) = chan_id {
assert_eq!(*channel_id, err_channel_id);
debug_assert_eq!(*channel_id, err_channel_id);
}
}
_ => panic!("Unexpected event"),
_ => debug_assert!(false, "Unexpected event"),
}
}
}
Expand Down Expand Up @@ -3565,7 +3565,7 @@ where
/// Fails an HTLC backwards to the sender of it to us.
/// Note that we do not assume that channels corresponding to failed HTLCs are still available.
fn fail_htlc_backwards_internal(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) {
#[cfg(debug_assertions)]
#[cfg(any(feature = "_test_utils", test))]
{
// Ensure that no peer state channel storage lock is not held when calling this
// function.
Expand All @@ -3574,7 +3574,7 @@ where
// this function with any `per_peer_state` peer lock aquired would.
let per_peer_state = self.per_peer_state.read().unwrap();
for (_, peer) in per_peer_state.iter() {
assert!(peer.try_lock().is_ok());
debug_assert!(peer.try_lock().is_ok());
}
}

Expand Down

0 comments on commit d66c70e

Please sign in to comment.