Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Struct-ify reconnect_nodes test util args #2459

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 13 additions & 5 deletions lightning/src/ln/chanmon_update_fail_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,9 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
if disconnect {
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
reconnect_args.send_channel_ready = (true, true);
reconnect_nodes(reconnect_args);
}

chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
Expand Down Expand Up @@ -233,7 +235,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
if disconnect {
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
}

// ...and make sure we can force-close a frozen channel
Expand Down Expand Up @@ -1925,7 +1927,9 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf:
// Make sure nodes[1] isn't stupid enough to re-send the ChannelReady on reconnect
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
reconnect_nodes(&nodes[0], &nodes[1], (false, confirm_a_first), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
reconnect_args.send_channel_ready.1 = confirm_a_first;
reconnect_nodes(reconnect_args);

// But we want to re-emit ChannelPending
expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
Expand Down Expand Up @@ -2575,10 +2579,14 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f
nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());

if second_fails {
reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0), (false, false));
let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]);
reconnect_args.pending_htlc_fails.0 = 1;
reconnect_nodes(reconnect_args);
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]);
} else {
reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false));
let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]);
reconnect_args.pending_htlc_claims.0 = 1;
reconnect_nodes(reconnect_args);
}

if htlc_status == HTLCStatusAtDupClaim::HoldingCell {
Expand Down
34 changes: 33 additions & 1 deletion lightning/src/ln/functional_test_utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2945,9 +2945,41 @@ macro_rules! handle_chan_reestablish_msgs {
}
}

pub struct ReconnectArgs<'a, 'b, 'c, 'd> {
pub node_a: &'a Node<'b, 'c, 'd>,
pub node_b: &'a Node<'b, 'c, 'd>,
pub send_channel_ready: (bool, bool),
pub pending_htlc_adds: (i64, i64),
pub pending_htlc_claims: (usize, usize),
pub pending_htlc_fails: (usize, usize),
pub pending_cell_htlc_claims: (usize, usize),
pub pending_cell_htlc_fails: (usize, usize),
pub pending_raa: (bool, bool),
}

impl<'a, 'b, 'c, 'd> ReconnectArgs<'a, 'b, 'c, 'd> {
pub fn new(node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>) -> Self {
Self {
node_a,
node_b,
send_channel_ready: (false, false),
pending_htlc_adds: (0, 0),
pending_htlc_claims: (0, 0),
pending_htlc_fails: (0, 0),
pending_cell_htlc_claims: (0, 0),
pending_cell_htlc_fails: (0, 0),
pending_raa: (false, false),
}
}
}

/// pending_htlc_adds includes both the holding cell and in-flight update_add_htlcs, whereas
/// for claims/fails they are separated out.
pub fn reconnect_nodes<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>, send_channel_ready: (bool, bool), pending_htlc_adds: (i64, i64), pending_htlc_claims: (usize, usize), pending_htlc_fails: (usize, usize), pending_cell_htlc_claims: (usize, usize), pending_cell_htlc_fails: (usize, usize), pending_raa: (bool, bool)) {
pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) {
let ReconnectArgs {
node_a, node_b, send_channel_ready, pending_htlc_adds, pending_htlc_claims, pending_htlc_fails,
pending_cell_htlc_claims, pending_cell_htlc_fails, pending_raa
} = args;
node_a.node.peer_connected(&node_b.node.get_our_node_id(), &msgs::Init {
features: node_b.node.init_features(), networks: None, remote_network_address: None
}, true).unwrap();
Expand Down
58 changes: 42 additions & 16 deletions lightning/src/ln/functional_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3585,7 +3585,9 @@ fn test_dup_events_on_peer_disconnect() {
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());

reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false));
let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
reconnect_args.pending_htlc_claims.0 = 1;
reconnect_nodes(reconnect_args);
expect_payment_path_successful!(nodes[0]);
}

Expand Down Expand Up @@ -3642,7 +3644,9 @@ fn test_simple_peer_disconnect() {

nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
reconnect_args.send_channel_ready = (true, true);
reconnect_nodes(reconnect_args);

let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
let payment_hash_2 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
Expand All @@ -3651,7 +3655,7 @@ fn test_simple_peer_disconnect() {

nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));

let (payment_preimage_3, payment_hash_3, _) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000);
let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
Expand All @@ -3664,7 +3668,10 @@ fn test_simple_peer_disconnect() {
claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_preimage_3);
fail_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_hash_5);

reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (1, 0), (1, 0), (false, false));
let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
reconnect_args.pending_cell_htlc_fails.0 = 1;
reconnect_args.pending_cell_htlc_claims.0 = 1;
reconnect_nodes(reconnect_args);
{
let events = nodes[0].node.get_and_clear_pending_events();
assert_eq!(events.len(), 4);
Expand Down Expand Up @@ -3776,19 +3783,29 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken
}
// Even if the channel_ready messages get exchanged, as long as nothing further was
// received on either side, both sides will need to resend them.
reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 1), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
reconnect_args.send_channel_ready = (true, true);
reconnect_args.pending_htlc_adds.1 = 1;
reconnect_nodes(reconnect_args);
} else if messages_delivered == 3 {
// nodes[0] still wants its RAA + commitment_signed
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (-1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (true, false));
let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
reconnect_args.pending_htlc_adds.0 = -1;
reconnect_args.pending_raa.0 = true;
reconnect_nodes(reconnect_args);
} else if messages_delivered == 4 {
// nodes[0] still wants its commitment_signed
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (-1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
reconnect_args.pending_htlc_adds.0 = -1;
reconnect_nodes(reconnect_args);
} else if messages_delivered == 5 {
// nodes[1] still wants its final RAA
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, true));
let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
reconnect_args.pending_raa.1 = true;
reconnect_nodes(reconnect_args);
} else if messages_delivered == 6 {
// Everything was delivered...
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
}

let events_1 = nodes[1].node.get_and_clear_pending_events();
Expand All @@ -3812,7 +3829,7 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken

nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));

nodes[1].node.process_pending_htlc_forwards();

Expand Down Expand Up @@ -3896,24 +3913,33 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
if messages_delivered < 2 {
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false));
let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
reconnect_args.pending_htlc_claims.0 = 1;
reconnect_nodes(reconnect_args);
if messages_delivered < 1 {
expect_payment_sent!(nodes[0], payment_preimage_1);
} else {
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
}
} else if messages_delivered == 2 {
// nodes[0] still wants its RAA + commitment_signed
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, -1), (0, 0), (0, 0), (0, 0), (0, 0), (false, true));
let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
reconnect_args.pending_htlc_adds.1 = -1;
reconnect_args.pending_raa.1 = true;
reconnect_nodes(reconnect_args);
} else if messages_delivered == 3 {
// nodes[0] still wants its commitment_signed
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, -1), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
reconnect_args.pending_htlc_adds.1 = -1;
reconnect_nodes(reconnect_args);
} else if messages_delivered == 4 {
// nodes[1] still wants its final RAA
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (true, false));
let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
reconnect_args.pending_raa.0 = true;
reconnect_nodes(reconnect_args);
} else if messages_delivered == 5 {
// Everything was delivered...
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
}

if messages_delivered == 1 || messages_delivered == 2 {
Expand All @@ -3923,7 +3949,7 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
}
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));

if messages_delivered > 2 {
expect_payment_path_successful!(nodes[0]);
Expand Down
2 changes: 1 addition & 1 deletion lightning/src/ln/onion_route_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -597,7 +597,7 @@ fn test_onion_failure() {
nodes[1].node.get_and_clear_pending_msg_events();
nodes[2].node.get_and_clear_pending_msg_events();
}, true, Some(UPDATE|20), Some(NetworkUpdate::ChannelUpdateMessage{msg: ChannelUpdate::dummy(short_channel_id)}), Some(short_channel_id));
reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[2]));

run_onion_failure_test("expiry_too_far", 0, &nodes, &route, &payment_hash, &payment_secret, |msg| {
let session_priv = SecretKey::from_slice(&[3; 32]).unwrap();
Expand Down
16 changes: 10 additions & 6 deletions lightning/src/ln/payment_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -490,7 +490,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
// nodes[1] now immediately fails the HTLC as the next-hop channel is disconnected
let _ = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());

reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[2]));

let as_commitment_tx = get_local_commitment_txn!(nodes[0], chan_id)[0].clone();
if confirm_before_reload {
Expand Down Expand Up @@ -789,7 +789,9 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) {
nodes[0].node.test_process_background_events();
check_added_monitors(&nodes[0], 1);

reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
reconnect_args.send_channel_ready = (true, true);
reconnect_nodes(reconnect_args);

// Now resend the payment, delivering the HTLC and actually claiming it this time. This ensures
// the payment is not (spuriously) listed as still pending.
Expand Down Expand Up @@ -817,7 +819,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) {
nodes[0].node.test_process_background_events();
check_added_monitors(&nodes[0], 1);

reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));

match nodes[0].node.send_payment_with_route(&new_route, payment_hash, RecipientOnionFields::secret_only(payment_secret), payment_id) {
Err(PaymentSendFailure::DuplicatePayment) => {},
Expand Down Expand Up @@ -1011,7 +1013,7 @@ fn test_fulfill_restart_failure() {
reload_node!(nodes[1], &chan_manager_serialized, &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized);

nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));

nodes[1].node.fail_htlc_backwards(&payment_hash);
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
Expand Down Expand Up @@ -3422,9 +3424,11 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) {
reload_node!(nodes[3], config, &nodes[3].node.encode(), &[&mon_bd, &mon_cd],
persister, new_chain_monitor, nodes_0_deserialized);
nodes[1].node.peer_disconnected(&nodes[3].node.get_our_node_id());
reconnect_nodes(&nodes[1], &nodes[3], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[3]));
}
reconnect_nodes(&nodes[2], &nodes[3], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
let mut reconnect_args = ReconnectArgs::new(&nodes[2], &nodes[3]);
reconnect_args.send_channel_ready = (true, true);
reconnect_nodes(reconnect_args);

// Create a new channel between C and D as A will refuse to retry on the existing one because
// it just failed.
Expand Down
22 changes: 13 additions & 9 deletions lightning/src/ln/reload_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,9 @@ fn test_funding_peer_disconnect() {
let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
assert!(events_1.is_empty());

reconnect_nodes(&nodes[0], &nodes[1], (false, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
reconnect_args.send_channel_ready.1 = true;
reconnect_nodes(reconnect_args);

nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
Expand Down Expand Up @@ -180,7 +182,7 @@ fn test_funding_peer_disconnect() {

reload_node!(nodes[0], &nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);

reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
}

#[test]
Expand Down Expand Up @@ -334,7 +336,7 @@ fn test_simple_manager_serialize_deserialize() {
let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
reload_node!(nodes[0], nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);

reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));

fail_payment(&nodes[0], &[&nodes[1]], our_payment_hash);
claim_payment(&nodes[0], &[&nodes[1]], our_payment_preimage);
Expand Down Expand Up @@ -456,8 +458,8 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
check_added_monitors!(nodes[0], 1);

// nodes[1] and nodes[2] have no lost state with nodes[0]...
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
reconnect_nodes(&nodes[0], &nodes[2], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[2]));
//... and we can even still claim the payment!
claim_payment(&nodes[2], &[&nodes[0], &nodes[1]], our_payment_preimage);

Expand Down Expand Up @@ -666,10 +668,12 @@ fn test_forwardable_regen() {
let chan_1_monitor_serialized = get_monitor!(nodes[1], chan_id_2).encode();
reload_node!(nodes[1], nodes[1].node.encode(), &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized);

reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
// Note that nodes[1] and nodes[2] resend their channel_ready here since they haven't updated
// the commitment state.
reconnect_nodes(&nodes[1], &nodes[2], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]);
reconnect_args.send_channel_ready = (true, true);
reconnect_nodes(reconnect_args);

assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());

Expand Down Expand Up @@ -967,7 +971,7 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht
check_added_monitors!(nodes[1], 1);

nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));

if use_cs_commitment {
// If we confirm a commitment transaction that has the HTLC on-chain, nodes[1] should wait
Expand Down Expand Up @@ -1085,7 +1089,7 @@ fn removed_payment_no_manager_persistence() {
// now forgotten everywhere. The ChannelManager should have, as a side-effect of reload,
// learned that the HTLC is gone from the ChannelMonitor and added it to the to-fail-back set.
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));

expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]);
check_added_monitors!(nodes[1], 1);
Expand Down