From 9864a267578d9b053faecdb8e0b97b2dc95d8969 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Tigerstr=C3=B6m?= <11711198+ViktorTigerstrom@users.noreply.github.com> Date: Tue, 17 May 2022 18:07:41 +0200 Subject: [PATCH 01/21] Add `counterparty_node_id` to `short_to_id` map --- lightning/src/ln/channelmanager.rs | 38 ++++++++++++++++-------------- 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index cf593068947..fd40a7e07ad 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -406,10 +406,12 @@ pub(super) enum RAACommitmentOrder { // Note this is only exposed in cfg(test): pub(super) struct ChannelHolder { pub(super) by_id: HashMap<[u8; 32], Channel>, - /// SCIDs (and outbound SCID aliases) to the real channel id. Outbound SCID aliases are added - /// here once the channel is available for normal use, with SCIDs being added once the funding - /// transaction is confirmed at the channel's required confirmation depth. - pub(super) short_to_id: HashMap, + /// SCIDs (and outbound SCID aliases) -> `counterparty_node_id`s and `channel_id`s. + /// + /// Outbound SCID aliases are added here once the channel is available for normal use, with + /// SCIDs being added once the funding transaction is confirmed at the channel's required + /// confirmation depth. + pub(super) short_to_id: HashMap, /// SCID/SCID Alias -> forward infos. Key of 0 means payments received. /// /// Note that because we may have an SCID Alias as the key we can have two entries per channel, @@ -1412,12 +1414,12 @@ macro_rules! send_channel_ready { }); // Note that we may send a `channel_ready` multiple times for a channel if we reconnect, so // we allow collisions, but we shouldn't ever be updating the channel ID pointed to. - let outbound_alias_insert = $short_to_id.insert($channel.outbound_scid_alias(), $channel.channel_id()); - assert!(outbound_alias_insert.is_none() || outbound_alias_insert.unwrap() == $channel.channel_id(), + let outbound_alias_insert = $short_to_id.insert($channel.outbound_scid_alias(), ($channel.get_counterparty_node_id(), $channel.channel_id())); + assert!(outbound_alias_insert.is_none() || outbound_alias_insert.unwrap() == ($channel.get_counterparty_node_id(), $channel.channel_id()), "SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels"); if let Some(real_scid) = $channel.get_short_channel_id() { - let scid_insert = $short_to_id.insert(real_scid, $channel.channel_id()); - assert!(scid_insert.is_none() || scid_insert.unwrap() == $channel.channel_id(), + let scid_insert = $short_to_id.insert(real_scid, ($channel.get_counterparty_node_id(), $channel.channel_id())); + assert!(scid_insert.is_none() || scid_insert.unwrap() == ($channel.get_counterparty_node_id(), $channel.channel_id()), "SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels"); } } @@ -2223,7 +2225,7 @@ impl ChannelMana break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None)); } }, - Some(id) => Some(id.clone()), + Some((_cp_id, id)) => Some(id.clone()), }; let (chan_update_opt, forwardee_cltv_expiry_delta) = if let Some(forwarding_id) = forwarding_id_opt { let chan = channel_state.as_mut().unwrap().by_id.get_mut(&forwarding_id).unwrap(); @@ -2404,7 +2406,7 @@ impl ChannelMana let id = match channel_lock.short_to_id.get(&path.first().unwrap().short_channel_id) { None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()}), - Some(id) => id.clone(), + Some((_cp_id, id)) => id.clone(), }; macro_rules! insert_outbound_payment { @@ -2932,7 +2934,7 @@ impl ChannelMana for (short_chan_id, mut pending_forwards) in channel_state.forward_htlcs.drain() { if short_chan_id != 0 { let forward_chan_id = match channel_state.short_to_id.get(&short_chan_id) { - Some(chan_id) => chan_id.clone(), + Some((_cp_id, chan_id)) => chan_id.clone(), None => { for forward_info in pending_forwards.drain(..) { match forward_info { @@ -3349,7 +3351,7 @@ impl ChannelMana self.process_background_events(); } - fn update_channel_fee(&self, short_to_id: &mut HashMap, pending_msg_events: &mut Vec, chan_id: &[u8; 32], chan: &mut Channel, new_feerate: u32) -> (bool, NotifyOption, Result<(), MsgHandleErrInternal>) { + fn update_channel_fee(&self, short_to_id: &mut HashMap, pending_msg_events: &mut Vec, chan_id: &[u8; 32], chan: &mut Channel, new_feerate: u32) -> (bool, NotifyOption, Result<(), MsgHandleErrInternal>) { if !chan.is_outbound() { return (true, NotifyOption::SkipPersist, Ok(())); } // If the feerate has decreased by less than half, don't bother if new_feerate <= chan.get_feerate() && new_feerate * 2 > chan.get_feerate() { @@ -3965,7 +3967,7 @@ impl ChannelMana //TODO: Delay the claimed_funds relaying just like we do outbound relay! let channel_state = &mut **channel_state_lock; let chan_id = match channel_state.short_to_id.get(&prev_hop.short_channel_id) { - Some(chan_id) => chan_id.clone(), + Some((_cp_id, chan_id)) => chan_id.clone(), None => { return ClaimFundsFromHop::PrevHopForceClosed } @@ -4887,7 +4889,7 @@ impl ChannelMana let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; let chan_id = match channel_state.short_to_id.get(&msg.contents.short_channel_id) { - Some(chan_id) => chan_id.clone(), + Some((_cp_id, chan_id)) => chan_id.clone(), None => { // It's not a local channel return Ok(NotifyOption::SkipPersist) @@ -5669,8 +5671,8 @@ where // using the real SCID at relay-time (i.e. enforce option_scid_alias // then), and if the funding tx is ever un-confirmed we force-close the // channel, ensuring short_to_id is always consistent. - let scid_insert = short_to_id.insert(real_scid, channel.channel_id()); - assert!(scid_insert.is_none() || scid_insert.unwrap() == channel.channel_id(), + let scid_insert = short_to_id.insert(real_scid, (channel.get_counterparty_node_id(), channel.channel_id())); + assert!(scid_insert.is_none() || scid_insert.unwrap() == (channel.get_counterparty_node_id(), channel.channel_id()), "SCIDs should never collide - ensure you weren't behind by a full {} blocks when creating channels", fake_scid::MAX_SCID_BLOCKS_FROM_NOW); } @@ -6713,7 +6715,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> } else { log_info!(args.logger, "Successfully loaded channel {}", log_bytes!(channel.channel_id())); if let Some(short_channel_id) = channel.get_short_channel_id() { - short_to_id.insert(short_channel_id, channel.channel_id()); + short_to_id.insert(short_channel_id, (channel.get_counterparty_node_id(), channel.channel_id())); } by_id.insert(channel.channel_id(), channel); } @@ -6972,7 +6974,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> return Err(DecodeError::InvalidValue); } if chan.is_usable() { - if short_to_id.insert(chan.outbound_scid_alias(), *chan_id).is_some() { + if short_to_id.insert(chan.outbound_scid_alias(), (chan.get_counterparty_node_id(), *chan_id)).is_some() { // Note that in rare cases its possible to hit this while reading an older // channel if we just happened to pick a colliding outbound alias above. log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.outbound_scid_alias()); From 0780fb97e8459b1ce3374fdaef791d57153a3036 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Tigerstr=C3=B6m?= <11711198+ViktorTigerstrom@users.noreply.github.com> Date: Fri, 10 Jun 2022 01:06:42 +0200 Subject: [PATCH 02/21] Rename `short_to_id` map to `short_to_chan_info` As the map values are no longer only `channel_id`s, but also a `counterparty_node_id`s, the map is renamed to better correspond to whats actually stored in the map. --- lightning/src/ln/channel.rs | 5 +- lightning/src/ln/channelmanager.rs | 119 +++++++++++++++-------------- lightning/src/ln/reorg_tests.rs | 6 +- 3 files changed, 66 insertions(+), 64 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 9b47770f120..0ba3c9af70e 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -4828,8 +4828,9 @@ impl Channel { // the funding transaction is at least still in the mempool of most nodes). // // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or - // 0-conf channel, but not doing so may lead to the `ChannelManager::short_to_id` map - // being inconsistent, so we currently have to. + // 0-conf channel, but not doing so may lead to the + // `ChannelManager::short_to_chan_info` map being inconsistent, so we currently have + // to. if funding_tx_confirmations == 0 && self.funding_tx_confirmed_in.is_some() { let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.", self.minimum_depth.unwrap(), funding_tx_confirmations); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index fd40a7e07ad..86568b2c75c 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -411,7 +411,7 @@ pub(super) struct ChannelHolder { /// Outbound SCID aliases are added here once the channel is available for normal use, with /// SCIDs being added once the funding transaction is confirmed at the channel's required /// confirmation depth. - pub(super) short_to_id: HashMap, + pub(super) short_to_chan_info: HashMap, /// SCID/SCID Alias -> forward infos. Key of 0 means payments received. /// /// Note that because we may have an SCID Alias as the key we can have two entries per channel, @@ -1231,9 +1231,9 @@ macro_rules! handle_error { } macro_rules! update_maps_on_chan_removal { - ($self: expr, $short_to_id: expr, $channel: expr) => { + ($self: expr, $short_to_chan_info: expr, $channel: expr) => { if let Some(short_id) = $channel.get_short_channel_id() { - $short_to_id.remove(&short_id); + $short_to_chan_info.remove(&short_id); } else { // If the channel was never confirmed on-chain prior to its closure, remove the // outbound SCID alias we used for it from the collision-prevention set. While we @@ -1244,13 +1244,13 @@ macro_rules! update_maps_on_chan_removal { let alias_removed = $self.outbound_scid_aliases.lock().unwrap().remove(&$channel.outbound_scid_alias()); debug_assert!(alias_removed); } - $short_to_id.remove(&$channel.outbound_scid_alias()); + $short_to_chan_info.remove(&$channel.outbound_scid_alias()); } } /// Returns (boolean indicating if we should remove the Channel object from memory, a mapped error) macro_rules! convert_chan_err { - ($self: ident, $err: expr, $short_to_id: expr, $channel: expr, $channel_id: expr) => { + ($self: ident, $err: expr, $short_to_chan_info: expr, $channel: expr, $channel_id: expr) => { match $err { ChannelError::Warn(msg) => { (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(msg), $channel_id.clone())) @@ -1260,14 +1260,14 @@ macro_rules! convert_chan_err { }, ChannelError::Close(msg) => { log_error!($self.logger, "Closing channel {} due to close-required error: {}", log_bytes!($channel_id[..]), msg); - update_maps_on_chan_removal!($self, $short_to_id, $channel); + update_maps_on_chan_removal!($self, $short_to_chan_info, $channel); let shutdown_res = $channel.force_shutdown(true); (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.get_user_id(), shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok())) }, ChannelError::CloseDelayBroadcast(msg) => { log_error!($self.logger, "Channel {} need to be shutdown but closing transactions not broadcast due to {}", log_bytes!($channel_id[..]), msg); - update_maps_on_chan_removal!($self, $short_to_id, $channel); + update_maps_on_chan_removal!($self, $short_to_chan_info, $channel); let shutdown_res = $channel.force_shutdown(false); (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.get_user_id(), shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok())) @@ -1281,7 +1281,7 @@ macro_rules! break_chan_entry { match $res { Ok(res) => res, Err(e) => { - let (drop, res) = convert_chan_err!($self, e, $channel_state.short_to_id, $entry.get_mut(), $entry.key()); + let (drop, res) = convert_chan_err!($self, e, $channel_state.short_to_chan_info, $entry.get_mut(), $entry.key()); if drop { $entry.remove_entry(); } @@ -1296,7 +1296,7 @@ macro_rules! try_chan_entry { match $res { Ok(res) => res, Err(e) => { - let (drop, res) = convert_chan_err!($self, e, $channel_state.short_to_id, $entry.get_mut(), $entry.key()); + let (drop, res) = convert_chan_err!($self, e, $channel_state.short_to_chan_info, $entry.get_mut(), $entry.key()); if drop { $entry.remove_entry(); } @@ -1310,18 +1310,18 @@ macro_rules! remove_channel { ($self: expr, $channel_state: expr, $entry: expr) => { { let channel = $entry.remove_entry().1; - update_maps_on_chan_removal!($self, $channel_state.short_to_id, channel); + update_maps_on_chan_removal!($self, $channel_state.short_to_chan_info, channel); channel } } } macro_rules! handle_monitor_err { - ($self: ident, $err: expr, $short_to_id: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr, $chan_id: expr) => { + ($self: ident, $err: expr, $short_to_chan_info: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr, $chan_id: expr) => { match $err { ChannelMonitorUpdateErr::PermanentFailure => { log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateErr::PermanentFailure", log_bytes!($chan_id[..])); - update_maps_on_chan_removal!($self, $short_to_id, $chan); + update_maps_on_chan_removal!($self, $short_to_chan_info, $chan); // TODO: $failed_fails is dropped here, which will cause other channels to hit the // chain in a confused state! We need to move them into the ChannelMonitor which // will be responsible for failing backwards once things confirm on-chain. @@ -1361,7 +1361,7 @@ macro_rules! handle_monitor_err { } }; ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr) => { { - let (res, drop) = handle_monitor_err!($self, $err, $channel_state.short_to_id, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills, $entry.key()); + let (res, drop) = handle_monitor_err!($self, $err, $channel_state.short_to_chan_info, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills, $entry.key()); if drop { $entry.remove_entry(); } @@ -1407,18 +1407,18 @@ macro_rules! maybe_break_monitor_err { } macro_rules! send_channel_ready { - ($short_to_id: expr, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => { + ($short_to_chan_info: expr, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => { $pending_msg_events.push(events::MessageSendEvent::SendChannelReady { node_id: $channel.get_counterparty_node_id(), msg: $channel_ready_msg, }); // Note that we may send a `channel_ready` multiple times for a channel if we reconnect, so // we allow collisions, but we shouldn't ever be updating the channel ID pointed to. - let outbound_alias_insert = $short_to_id.insert($channel.outbound_scid_alias(), ($channel.get_counterparty_node_id(), $channel.channel_id())); + let outbound_alias_insert = $short_to_chan_info.insert($channel.outbound_scid_alias(), ($channel.get_counterparty_node_id(), $channel.channel_id())); assert!(outbound_alias_insert.is_none() || outbound_alias_insert.unwrap() == ($channel.get_counterparty_node_id(), $channel.channel_id()), "SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels"); if let Some(real_scid) = $channel.get_short_channel_id() { - let scid_insert = $short_to_id.insert(real_scid, ($channel.get_counterparty_node_id(), $channel.channel_id())); + let scid_insert = $short_to_chan_info.insert(real_scid, ($channel.get_counterparty_node_id(), $channel.channel_id())); assert!(scid_insert.is_none() || scid_insert.unwrap() == ($channel.get_counterparty_node_id(), $channel.channel_id()), "SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels"); } @@ -1459,7 +1459,7 @@ macro_rules! handle_chan_restoration_locked { // Similar to the above, this implies that we're letting the channel_ready fly // before it should be allowed to. assert!(chanmon_update.is_none()); - send_channel_ready!($channel_state.short_to_id, $channel_state.pending_msg_events, $channel_entry.get(), msg); + send_channel_ready!($channel_state.short_to_chan_info, $channel_state.pending_msg_events, $channel_entry.get(), msg); } if let Some(msg) = $announcement_sigs { $channel_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { @@ -1581,7 +1581,7 @@ impl ChannelMana channel_state: Mutex::new(ChannelHolder{ by_id: HashMap::new(), - short_to_id: HashMap::new(), + short_to_chan_info: HashMap::new(), forward_htlcs: HashMap::new(), claimable_htlcs: HashMap::new(), pending_msg_events: Vec::new(), @@ -1838,7 +1838,7 @@ impl ChannelMana if let Some(monitor_update) = monitor_update { if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) { let (result, is_permanent) = - handle_monitor_err!(self, e, channel_state.short_to_id, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE); + handle_monitor_err!(self, e, channel_state.short_to_chan_info, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE); if is_permanent { remove_channel!(self, channel_state, chan_entry); break result; @@ -2213,7 +2213,7 @@ impl ChannelMana // with a short_channel_id of 0. This is important as various things later assume // short_channel_id is non-0 in any ::Forward. if let &PendingHTLCRouting::Forward { ref short_channel_id, .. } = routing { - let id_option = channel_state.as_ref().unwrap().short_to_id.get(&short_channel_id).cloned(); + let id_option = channel_state.as_ref().unwrap().short_to_chan_info.get(&short_channel_id).cloned(); if let Some((err, code, chan_update)) = loop { let forwarding_id_opt = match id_option { None => { // unknown_next_peer @@ -2404,7 +2404,7 @@ impl ChannelMana } } - let id = match channel_lock.short_to_id.get(&path.first().unwrap().short_channel_id) { + let id = match channel_lock.short_to_chan_info.get(&path.first().unwrap().short_channel_id) { None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()}), Some((_cp_id, id)) => id.clone(), }; @@ -2933,7 +2933,7 @@ impl ChannelMana for (short_chan_id, mut pending_forwards) in channel_state.forward_htlcs.drain() { if short_chan_id != 0 { - let forward_chan_id = match channel_state.short_to_id.get(&short_chan_id) { + let forward_chan_id = match channel_state.short_to_chan_info.get(&short_chan_id) { Some((_cp_id, chan_id)) => chan_id.clone(), None => { for forward_info in pending_forwards.drain(..) { @@ -3351,7 +3351,7 @@ impl ChannelMana self.process_background_events(); } - fn update_channel_fee(&self, short_to_id: &mut HashMap, pending_msg_events: &mut Vec, chan_id: &[u8; 32], chan: &mut Channel, new_feerate: u32) -> (bool, NotifyOption, Result<(), MsgHandleErrInternal>) { + fn update_channel_fee(&self, short_to_chan_info: &mut HashMap, pending_msg_events: &mut Vec, chan_id: &[u8; 32], chan: &mut Channel, new_feerate: u32) -> (bool, NotifyOption, Result<(), MsgHandleErrInternal>) { if !chan.is_outbound() { return (true, NotifyOption::SkipPersist, Ok(())); } // If the feerate has decreased by less than half, don't bother if new_feerate <= chan.get_feerate() && new_feerate * 2 > chan.get_feerate() { @@ -3371,7 +3371,7 @@ impl ChannelMana let res = match chan.send_update_fee_and_commit(new_feerate, &self.logger) { Ok(res) => Ok(res), Err(e) => { - let (drop, res) = convert_chan_err!(self, e, short_to_id, chan, chan_id); + let (drop, res) = convert_chan_err!(self, e, short_to_chan_info, chan, chan_id); if drop { retain_channel = false; } Err(res) } @@ -3379,7 +3379,7 @@ impl ChannelMana let ret_err = match res { Ok(Some((update_fee, commitment_signed, monitor_update))) => { if let Err(e) = self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) { - let (res, drop) = handle_monitor_err!(self, e, short_to_id, chan, RAACommitmentOrder::CommitmentFirst, chan_id, COMMITMENT_UPDATE_ONLY); + let (res, drop) = handle_monitor_err!(self, e, short_to_chan_info, chan, RAACommitmentOrder::CommitmentFirst, chan_id, COMMITMENT_UPDATE_ONLY); if drop { retain_channel = false; } res } else { @@ -3419,9 +3419,9 @@ impl ChannelMana let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; let pending_msg_events = &mut channel_state.pending_msg_events; - let short_to_id = &mut channel_state.short_to_id; + let short_to_chan_info = &mut channel_state.short_to_chan_info; channel_state.by_id.retain(|chan_id, chan| { - let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_id, pending_msg_events, chan_id, chan, new_feerate); + let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_chan_info, pending_msg_events, chan_id, chan, new_feerate); if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } if err.is_err() { handle_errors.push(err); @@ -3457,10 +3457,10 @@ impl ChannelMana let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; let pending_msg_events = &mut channel_state.pending_msg_events; - let short_to_id = &mut channel_state.short_to_id; + let short_to_chan_info = &mut channel_state.short_to_chan_info; channel_state.by_id.retain(|chan_id, chan| { let counterparty_node_id = chan.get_counterparty_node_id(); - let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_id, pending_msg_events, chan_id, chan, new_feerate); + let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_chan_info, pending_msg_events, chan_id, chan, new_feerate); if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } if err.is_err() { handle_errors.push((err, counterparty_node_id)); @@ -3468,7 +3468,7 @@ impl ChannelMana if !retain_channel { return false; } if let Err(e) = chan.timer_check_closing_negotiation_progress() { - let (needs_close, err) = convert_chan_err!(self, e, short_to_id, chan, chan_id); + let (needs_close, err) = convert_chan_err!(self, e, short_to_chan_info, chan, chan_id); handle_errors.push((Err(err), chan.get_counterparty_node_id())); if needs_close { return false; } } @@ -3877,7 +3877,7 @@ impl ChannelMana let mut expected_amt_msat = None; let mut valid_mpp = true; for htlc in sources.iter() { - if let None = channel_state.as_ref().unwrap().short_to_id.get(&htlc.prev_hop.short_channel_id) { + if let None = channel_state.as_ref().unwrap().short_to_chan_info.get(&htlc.prev_hop.short_channel_id) { valid_mpp = false; break; } @@ -3966,7 +3966,7 @@ impl ChannelMana fn claim_funds_from_hop(&self, channel_state_lock: &mut MutexGuard>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> ClaimFundsFromHop { //TODO: Delay the claimed_funds relaying just like we do outbound relay! let channel_state = &mut **channel_state_lock; - let chan_id = match channel_state.short_to_id.get(&prev_hop.short_channel_id) { + let chan_id = match channel_state.short_to_chan_info.get(&prev_hop.short_channel_id) { Some((_cp_id, chan_id)) => chan_id.clone(), None => { return ClaimFundsFromHop::PrevHopForceClosed @@ -4014,7 +4014,7 @@ impl ChannelMana payment_preimage, e); } let counterparty_node_id = chan.get().get_counterparty_node_id(); - let (drop, res) = convert_chan_err!(self, e, channel_state.short_to_id, chan.get_mut(), &chan_id); + let (drop, res) = convert_chan_err!(self, e, channel_state.short_to_chan_info, chan.get_mut(), &chan_id); if drop { chan.remove_entry(); } @@ -4418,7 +4418,7 @@ impl ChannelMana msg: funding_msg, }); if let Some(msg) = channel_ready { - send_channel_ready!(channel_state.short_to_id, channel_state.pending_msg_events, chan, msg); + send_channel_ready!(channel_state.short_to_chan_info, channel_state.pending_msg_events, chan, msg); } e.insert(chan); } @@ -4453,7 +4453,7 @@ impl ChannelMana return res } if let Some(msg) = channel_ready { - send_channel_ready!(channel_state.short_to_id, channel_state.pending_msg_events, chan.get(), msg); + send_channel_ready!(channel_state.short_to_chan_info, channel_state.pending_msg_events, chan.get(), msg); } funding_tx }, @@ -4526,7 +4526,7 @@ impl ChannelMana if let Some(monitor_update) = monitor_update { if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) { let (result, is_permanent) = - handle_monitor_err!(self, e, channel_state.short_to_id, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE); + handle_monitor_err!(self, e, channel_state.short_to_chan_info, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE); if is_permanent { remove_channel!(self, channel_state, chan_entry); break result; @@ -4888,7 +4888,7 @@ impl ChannelMana fn internal_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) -> Result { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; - let chan_id = match channel_state.short_to_id.get(&msg.contents.short_channel_id) { + let chan_id = match channel_state.short_to_chan_info.get(&msg.contents.short_channel_id) { Some((_cp_id, chan_id)) => chan_id.clone(), None => { // It's not a local channel @@ -5058,7 +5058,7 @@ impl ChannelMana let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; let by_id = &mut channel_state.by_id; - let short_to_id = &mut channel_state.short_to_id; + let short_to_chan_info = &mut channel_state.short_to_chan_info; let pending_msg_events = &mut channel_state.pending_msg_events; by_id.retain(|channel_id, chan| { @@ -5074,7 +5074,7 @@ impl ChannelMana if let Some((commitment_update, monitor_update)) = commitment_opt { if let Err(e) = self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) { has_monitor_update = true; - let (res, close_channel) = handle_monitor_err!(self, e, short_to_id, chan, RAACommitmentOrder::CommitmentFirst, channel_id, COMMITMENT_UPDATE_ONLY); + let (res, close_channel) = handle_monitor_err!(self, e, short_to_chan_info, chan, RAACommitmentOrder::CommitmentFirst, channel_id, COMMITMENT_UPDATE_ONLY); handle_errors.push((chan.get_counterparty_node_id(), res)); if close_channel { return false; } } else { @@ -5087,7 +5087,7 @@ impl ChannelMana true }, Err(e) => { - let (close_channel, res) = convert_chan_err!(self, e, short_to_id, chan, channel_id); + let (close_channel, res) = convert_chan_err!(self, e, short_to_chan_info, chan, channel_id); handle_errors.push((chan.get_counterparty_node_id(), Err(res))); // ChannelClosed event is generated by handle_error for us !close_channel @@ -5118,7 +5118,7 @@ impl ChannelMana let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; let by_id = &mut channel_state.by_id; - let short_to_id = &mut channel_state.short_to_id; + let short_to_chan_info = &mut channel_state.short_to_chan_info; let pending_msg_events = &mut channel_state.pending_msg_events; by_id.retain(|channel_id, chan| { @@ -5143,13 +5143,13 @@ impl ChannelMana log_info!(self.logger, "Broadcasting {}", log_tx!(tx)); self.tx_broadcaster.broadcast_transaction(&tx); - update_maps_on_chan_removal!(self, short_to_id, chan); + update_maps_on_chan_removal!(self, short_to_chan_info, chan); false } else { true } }, Err(e) => { has_update = true; - let (close_channel, res) = convert_chan_err!(self, e, short_to_id, chan, channel_id); + let (close_channel, res) = convert_chan_err!(self, e, short_to_chan_info, chan, channel_id); handle_errors.push((chan.get_counterparty_node_id(), Err(res))); !close_channel } @@ -5344,7 +5344,7 @@ impl ChannelMana loop { let scid_candidate = fake_scid::Namespace::Phantom.get_fake_scid(best_block.height(), &self.genesis_hash, &self.fake_scid_rand_bytes, &self.keys_manager); // Ensure the generated scid doesn't conflict with a real channel. - match channel_state.short_to_id.entry(scid_candidate) { + match channel_state.short_to_chan_info.entry(scid_candidate) { hash_map::Entry::Occupied(_) => continue, hash_map::Entry::Vacant(_) => return scid_candidate } @@ -5579,7 +5579,7 @@ where fn get_relevant_txids(&self) -> Vec { let channel_state = self.channel_state.lock().unwrap(); - let mut res = Vec::with_capacity(channel_state.short_to_id.len()); + let mut res = Vec::with_capacity(channel_state.short_to_chan_info.len()); for chan in channel_state.by_id.values() { if let Some(funding_txo) = chan.get_funding_txo() { res.push(funding_txo.txid); @@ -5622,7 +5622,7 @@ where { let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_lock; - let short_to_id = &mut channel_state.short_to_id; + let short_to_chan_info = &mut channel_state.short_to_chan_info; let pending_msg_events = &mut channel_state.pending_msg_events; channel_state.by_id.retain(|_, channel| { let res = f(channel); @@ -5634,7 +5634,7 @@ where })); } if let Some(channel_ready) = channel_ready_opt { - send_channel_ready!(short_to_id, pending_msg_events, channel, channel_ready); + send_channel_ready!(short_to_chan_info, pending_msg_events, channel, channel_ready); if channel.is_usable() { log_trace!(self.logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", log_bytes!(channel.channel_id())); if let Ok(msg) = self.get_channel_update_for_unicast(channel) { @@ -5667,18 +5667,19 @@ where if channel.is_our_channel_ready() { if let Some(real_scid) = channel.get_short_channel_id() { // If we sent a 0conf channel_ready, and now have an SCID, we add it - // to the short_to_id map here. Note that we check whether we can relay - // using the real SCID at relay-time (i.e. enforce option_scid_alias - // then), and if the funding tx is ever un-confirmed we force-close the - // channel, ensuring short_to_id is always consistent. - let scid_insert = short_to_id.insert(real_scid, (channel.get_counterparty_node_id(), channel.channel_id())); + // to the short_to_chan_info map here. Note that we check whether we + // can relay using the real SCID at relay-time (i.e. + // enforce option_scid_alias then), and if the funding tx is ever + // un-confirmed we force-close the channel, ensuring short_to_chan_info + // is always consistent. + let scid_insert = short_to_chan_info.insert(real_scid, (channel.get_counterparty_node_id(), channel.channel_id())); assert!(scid_insert.is_none() || scid_insert.unwrap() == (channel.get_counterparty_node_id(), channel.channel_id()), "SCIDs should never collide - ensure you weren't behind by a full {} blocks when creating channels", fake_scid::MAX_SCID_BLOCKS_FROM_NOW); } } } else if let Err(reason) = res { - update_maps_on_chan_removal!(self, short_to_id, channel); + update_maps_on_chan_removal!(self, short_to_chan_info, channel); // It looks like our counterparty went on-chain or funding transaction was // reorged out of the main chain. Close the channel. failed_channels.push(channel.force_shutdown(true)); @@ -5869,14 +5870,14 @@ impl let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; let pending_msg_events = &mut channel_state.pending_msg_events; - let short_to_id = &mut channel_state.short_to_id; + let short_to_chan_info = &mut channel_state.short_to_chan_info; log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates. We believe we {} make future connections to this peer.", log_pubkey!(counterparty_node_id), if no_connection_possible { "cannot" } else { "can" }); channel_state.by_id.retain(|_, chan| { if chan.get_counterparty_node_id() == *counterparty_node_id { chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger); if chan.is_shutdown() { - update_maps_on_chan_removal!(self, short_to_id, chan); + update_maps_on_chan_removal!(self, short_to_chan_info, chan); self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer); return false; } else { @@ -6675,7 +6676,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> let channel_count: u64 = Readable::read(reader)?; let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128)); let mut by_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); - let mut short_to_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); + let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); let mut channel_closures = Vec::new(); for _ in 0..channel_count { let mut channel: Channel = Channel::read(reader, (&args.keys_manager, best_block_height))?; @@ -6715,7 +6716,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> } else { log_info!(args.logger, "Successfully loaded channel {}", log_bytes!(channel.channel_id())); if let Some(short_channel_id) = channel.get_short_channel_id() { - short_to_id.insert(short_channel_id, (channel.get_counterparty_node_id(), channel.channel_id())); + short_to_chan_info.insert(short_channel_id, (channel.get_counterparty_node_id(), channel.channel_id())); } by_id.insert(channel.channel_id(), channel); } @@ -6974,7 +6975,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> return Err(DecodeError::InvalidValue); } if chan.is_usable() { - if short_to_id.insert(chan.outbound_scid_alias(), (chan.get_counterparty_node_id(), *chan_id)).is_some() { + if short_to_chan_info.insert(chan.outbound_scid_alias(), (chan.get_counterparty_node_id(), *chan_id)).is_some() { // Note that in rare cases its possible to hit this while reading an older // channel if we just happened to pick a colliding outbound alias above. log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.outbound_scid_alias()); @@ -7033,7 +7034,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> channel_state: Mutex::new(ChannelHolder { by_id, - short_to_id, + short_to_chan_info, forward_htlcs, claimable_htlcs, pending_msg_events: Vec::new(), diff --git a/lightning/src/ln/reorg_tests.rs b/lightning/src/ln/reorg_tests.rs index 0025598e4c2..07b059d3981 100644 --- a/lightning/src/ln/reorg_tests.rs +++ b/lightning/src/ln/reorg_tests.rs @@ -202,7 +202,7 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_ let channel_state = nodes[0].node.channel_state.lock().unwrap(); assert_eq!(channel_state.by_id.len(), 1); - assert_eq!(channel_state.short_to_id.len(), 2); + assert_eq!(channel_state.short_to_chan_info.len(), 2); mem::drop(channel_state); if !reorg_after_reload { @@ -222,7 +222,7 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_ { let channel_state = nodes[0].node.channel_state.lock().unwrap(); assert_eq!(channel_state.by_id.len(), 0); - assert_eq!(channel_state.short_to_id.len(), 0); + assert_eq!(channel_state.short_to_chan_info.len(), 0); } } @@ -290,7 +290,7 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_ { let channel_state = nodes[0].node.channel_state.lock().unwrap(); assert_eq!(channel_state.by_id.len(), 0); - assert_eq!(channel_state.short_to_id.len(), 0); + assert_eq!(channel_state.short_to_chan_info.len(), 0); } } // With expect_channel_force_closed set the TestChainMonitor will enforce that the next update From 7833589cc25b5ea2222ed9e6501ad16b6c37c251 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Tigerstr=C3=B6m?= <11711198+ViktorTigerstrom@users.noreply.github.com> Date: Fri, 3 Jun 2022 17:17:39 +0200 Subject: [PATCH 03/21] Add id_to_peer map --- lightning/src/ln/channelmanager.rs | 43 +++++++++++++++++++++++++++--- 1 file changed, 40 insertions(+), 3 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 86568b2c75c..f592e158a25 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -739,6 +739,25 @@ pub struct ChannelManager>, + /// `channel_id` -> `counterparty_node_id`. + /// + /// Only `channel_id`s are allowed as keys in this map, and not `temporary_channel_id`s. As + /// multiple channels with the same `temporary_channel_id` to different peers can exist, + /// allowing `temporary_channel_id`s in this map would cause collisions for such channels. + /// + /// Note that this map should only be used for `MonitorEvent` handling, to be able to access + /// the corresponding channel for the event, as we only have access to the `channel_id` during + /// the handling of the events. + /// + /// TODO: + /// The `counterparty_node_id` isn't passed with `MonitorEvent`s currently. To pass it, we need + /// to add the `counterparty_node_id` to `ChannelMonitor`s. However, adding it as a required + /// field in `ChannelMonitor`s would break backwards compatability. + /// We should add `counterparty_node_id`s to `MonitorEvent`s, and eventually rely on it in the + /// future. That would make this map redundant, as only the `ChannelManager::per_peer_state` is + /// required to access the channel with the `counterparty_node_id`. + id_to_peer: Mutex>, + our_network_key: SecretKey, our_network_pubkey: PublicKey, @@ -1244,6 +1263,7 @@ macro_rules! update_maps_on_chan_removal { let alias_removed = $self.outbound_scid_aliases.lock().unwrap().remove(&$channel.outbound_scid_alias()); debug_assert!(alias_removed); } + $self.id_to_peer.lock().unwrap().remove(&$channel.channel_id()); $short_to_chan_info.remove(&$channel.outbound_scid_alias()); } } @@ -1589,6 +1609,7 @@ impl ChannelMana outbound_scid_aliases: Mutex::new(HashSet::new()), pending_inbound_payments: Mutex::new(HashMap::new()), pending_outbound_payments: Mutex::new(HashMap::new()), + id_to_peer: Mutex::new(HashMap::new()), our_network_key: keys_manager.get_node_secret(Recipient::Node).unwrap(), our_network_pubkey: PublicKey::from_secret_key(&secp_ctx, &keys_manager.get_node_secret(Recipient::Node).unwrap()), @@ -2756,12 +2777,18 @@ impl ChannelMana node_id: chan.get_counterparty_node_id(), msg, }); - match channel_state.by_id.entry(chan.channel_id()) { + let chan_id = chan.channel_id(); + match channel_state.by_id.entry(chan_id) { hash_map::Entry::Occupied(_) => { panic!("Generated duplicate funding txid?"); }, hash_map::Entry::Vacant(e) => { + let counterparty_node_id = chan.get_counterparty_node_id(); e.insert(chan); + let mut id_to_peer = self.id_to_peer.lock().unwrap(); + if id_to_peer.insert(chan_id, counterparty_node_id).is_some() { + panic!("id_to_peer map already contained funding txid, which shouldn't be possible"); + } } } Ok(()) @@ -4408,9 +4435,10 @@ impl ChannelMana } let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; - match channel_state.by_id.entry(funding_msg.channel_id) { + let channel_id = funding_msg.channel_id; + match channel_state.by_id.entry(channel_id) { hash_map::Entry::Occupied(_) => { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id".to_owned(), funding_msg.channel_id)) + return Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id".to_owned(), channel_id)) }, hash_map::Entry::Vacant(e) => { channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned { @@ -4421,6 +4449,10 @@ impl ChannelMana send_channel_ready!(channel_state.short_to_chan_info, channel_state.pending_msg_events, chan, msg); } e.insert(chan); + let mut id_to_peer = self.id_to_peer.lock().unwrap(); + if id_to_peer.insert(channel_id, *counterparty_node_id).is_some() { + panic!("id_to_peer map already contained funding txid, which shouldn't be possible"); + } } } Ok(()) @@ -6676,6 +6708,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> let channel_count: u64 = Readable::read(reader)?; let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128)); let mut by_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); + let mut id_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); let mut channel_closures = Vec::new(); for _ in 0..channel_count { @@ -6718,6 +6751,9 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> if let Some(short_channel_id) = channel.get_short_channel_id() { short_to_chan_info.insert(short_channel_id, (channel.get_counterparty_node_id(), channel.channel_id())); } + if channel.is_funding_initiated() { + id_to_peer.insert(channel.channel_id(), channel.get_counterparty_node_id()); + } by_id.insert(channel.channel_id(), channel); } } else { @@ -7044,6 +7080,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()), outbound_scid_aliases: Mutex::new(outbound_scid_aliases), + id_to_peer: Mutex::new(id_to_peer), fake_scid_rand_bytes: fake_scid_rand_bytes.unwrap(), our_network_key, From b0975ebd7f4b3f97c1d3060d79137a04ac50173d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Tigerstr=C3=B6m?= <11711198+ViktorTigerstrom@users.noreply.github.com> Date: Mon, 6 Jun 2022 20:20:48 +0200 Subject: [PATCH 04/21] Add `ChannelManager:id_to_peer` map coverage test --- lightning/src/ln/channelmanager.rs | 115 +++++++++++++++++++++++++++++ 1 file changed, 115 insertions(+) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index f592e158a25..12a40b23e5f 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -7619,6 +7619,121 @@ mod tests { // Check that using the original payment hash succeeds. assert!(inbound_payment::verify(payment_hash, &payment_data, nodes[0].node.highest_seen_timestamp.load(Ordering::Acquire) as u64, &nodes[0].node.inbound_payment_key, &nodes[0].logger).is_ok()); } + + #[test] + fn test_id_to_peer_coverage() { + // Test that the `ChannelManager:id_to_peer` contains channels which have been assigned + // an `channel_id` (i.e. have had the funding tx created), and that they are removed once + // the channel is successfully closed. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None).unwrap(); + let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel); + let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_channel); + + let (temporary_channel_id, tx, _funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42); + let channel_id = &tx.txid().into_inner(); + { + // Ensure that the `id_to_peer` map is empty until either party has recieved the + // funding transaction, and have the real `channel_id`. + assert_eq!(nodes[0].node.id_to_peer.lock().unwrap().len(), 0); + assert_eq!(nodes[1].node.id_to_peer.lock().unwrap().len(), 0); + } + + nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap(); + { + // Assert that `nodes[0]`'s `id_to_peer` map is populated with the channel as soon as + // as it has the funding transaction. + let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap(); + assert_eq!(nodes_0_lock.len(), 1); + assert!(nodes_0_lock.contains_key(channel_id)); + + assert_eq!(nodes[1].node.id_to_peer.lock().unwrap().len(), 0); + } + + let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); + + nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg); + { + let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap(); + assert_eq!(nodes_0_lock.len(), 1); + assert!(nodes_0_lock.contains_key(channel_id)); + + // Assert that `nodes[1]`'s `id_to_peer` map is populated with the channel as soon as + // as it has the funding transaction. + let nodes_1_lock = nodes[1].node.id_to_peer.lock().unwrap(); + assert_eq!(nodes_1_lock.len(), 1); + assert!(nodes_1_lock.contains_key(channel_id)); + } + check_added_monitors!(nodes[1], 1); + let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed); + check_added_monitors!(nodes[0], 1); + let (channel_ready, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx); + let (announcement, nodes_0_update, nodes_1_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready); + update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &nodes_0_update, &nodes_1_update); + + nodes[0].node.close_channel(channel_id, &nodes[1].node.get_our_node_id()).unwrap(); + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id())); + let nodes_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &nodes_1_shutdown); + + let closing_signed_node_0 = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &closing_signed_node_0); + { + // Assert that the channel is kept in the `id_to_peer` map for both nodes until the + // channel can be fully closed by both parties (i.e. no outstanding htlcs exists, the + // fee for the closing transaction has been negotiated and the parties has the other + // party's signature for the fee negotiated closing transaction.) + let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap(); + assert_eq!(nodes_0_lock.len(), 1); + assert!(nodes_0_lock.contains_key(channel_id)); + + // At this stage, `nodes[1]` has proposed a fee for the closing transaction in the + // `handle_closing_signed` call above. As `nodes[1]` has not yet received the signature + // from `nodes[0]` for the closing transaction with the proposed fee, the channel is + // kept in the `nodes[1]`'s `id_to_peer` map. + let nodes_1_lock = nodes[1].node.id_to_peer.lock().unwrap(); + assert_eq!(nodes_1_lock.len(), 1); + assert!(nodes_1_lock.contains_key(channel_id)); + } + + nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id())); + { + // `nodes[0]` accepts `nodes[1]`'s proposed fee for the closing transaction, and + // therefore has all it needs to fully close the channel (both signatures for the + // closing transaction). + // Assert that the channel is removed from `nodes[0]`'s `id_to_peer` map as it can be + // fully closed by `nodes[0]`. + assert_eq!(nodes[0].node.id_to_peer.lock().unwrap().len(), 0); + + // Assert that the channel is still in `nodes[1]`'s `id_to_peer` map, as `nodes[1]` + // doesn't have `nodes[0]`'s signature for the closing transaction yet. + let nodes_1_lock = nodes[1].node.id_to_peer.lock().unwrap(); + assert_eq!(nodes_1_lock.len(), 1); + assert!(nodes_1_lock.contains_key(channel_id)); + } + + let (_nodes_0_update, closing_signed_node_0) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); + + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &closing_signed_node_0.unwrap()); + { + // Assert that the channel is removed from both parties `id_to_peer` map once they both + // have everything required to fully close the channel. + assert_eq!(nodes[0].node.id_to_peer.lock().unwrap().len(), 0); + assert_eq!(nodes[1].node.id_to_peer.lock().unwrap().len(), 0); + } + let (_nodes_1_update, _none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); + + // Clear the `ChannelClosed` event for the nodes. + nodes[0].node.get_and_clear_pending_events(); + nodes[1].node.get_and_clear_pending_events(); + } } #[cfg(all(any(test, feature = "_test_utils"), feature = "_bench_unstable"))] From d3667d9206d2ff4c86b3b21d257321385f4d4f28 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Tigerstr=C3=B6m?= <11711198+ViktorTigerstrom@users.noreply.github.com> Date: Fri, 27 May 2022 20:14:19 +0200 Subject: [PATCH 05/21] Add `counterparty_node` to test macros --- lightning/src/ln/functional_test_utils.rs | 6 +- lightning/src/ln/functional_tests.rs | 84 +++++++++++------------ lightning/src/ln/monitor_tests.rs | 12 ++-- 3 files changed, 51 insertions(+), 51 deletions(-) diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 38c05998e6a..5eda6166f96 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -520,7 +520,7 @@ macro_rules! get_channel_ref { #[cfg(test)] macro_rules! get_feerate { - ($node: expr, $channel_id: expr) => { + ($node: expr, $counterparty_node: expr, $channel_id: expr) => { { let mut lock; let chan = get_channel_ref!($node, lock, $channel_id); @@ -531,7 +531,7 @@ macro_rules! get_feerate { #[cfg(test)] macro_rules! get_opt_anchors { - ($node: expr, $channel_id: expr) => { + ($node: expr, $counterparty_node: expr, $channel_id: expr) => { { let mut lock; let chan = get_channel_ref!($node, lock, $channel_id); @@ -2181,7 +2181,7 @@ pub fn get_announce_close_broadcast_events<'a, 'b, 'c>(nodes: &Vec {{ + ($node: expr, $counterparty_node: expr, $channel_id: expr) => {{ let chan_lock = $node.node.channel_state.lock().unwrap(); let chan = chan_lock.by_id.get(&$channel_id).unwrap(); chan.get_value_stat() diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 76487ddcfd1..034b5fd9c87 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -946,8 +946,8 @@ fn test_update_fee() { check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - assert_eq!(get_feerate!(nodes[0], channel_id), feerate + 30); - assert_eq!(get_feerate!(nodes[1], channel_id), feerate + 30); + assert_eq!(get_feerate!(nodes[0], nodes[1], channel_id), feerate + 30); + assert_eq!(get_feerate!(nodes[1], nodes[0], channel_id), feerate + 30); close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true); check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure); check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure); @@ -1351,11 +1351,11 @@ fn test_basic_channel_reserve() { let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known()); - let chan_stat = get_channel_value_stat!(nodes[0], chan.2); + let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); let channel_reserve = chan_stat.channel_reserve_msat; // The 2* and +1 are for the fee spike reserve. - let commit_tx_fee = 2 * commit_tx_fee_msat(get_feerate!(nodes[0], chan.2), 1 + 1, get_opt_anchors!(nodes[0], chan.2)); + let commit_tx_fee = 2 * commit_tx_fee_msat(get_feerate!(nodes[0], nodes[1], chan.2), 1 + 1, get_opt_anchors!(nodes[0], nodes[1], chan.2)); let max_can_send = 5000000 - channel_reserve - commit_tx_fee; let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send + 1); let err = nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).err().unwrap(); @@ -1408,7 +1408,7 @@ fn test_fee_spike_violation_fails_htlc() { // nodes[0] just sent. In the code for construction of this message, "local" refers // to the sender of the message, and "remote" refers to the receiver. - let feerate_per_kw = get_feerate!(nodes[0], chan.2); + let feerate_per_kw = get_feerate!(nodes[0], nodes[1], chan.2); const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1; @@ -1706,9 +1706,9 @@ fn test_chan_reserve_violation_inbound_htlc_inbound_chan() { let feemsat = 239; let total_routing_fee_msat = (nodes.len() - 2) as u64 * feemsat; - let chan_stat = get_channel_value_stat!(nodes[0], chan.2); - let feerate = get_feerate!(nodes[0], chan.2); - let opt_anchors = get_opt_anchors!(nodes[0], chan.2); + let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); + let feerate = get_feerate!(nodes[0], nodes[1], chan.2); + let opt_anchors = get_opt_anchors!(nodes[0], nodes[1], chan.2); // Add a 2* and +1 for the fee spike reserve. let commit_tx_fee_2_htlc = 2*commit_tx_fee_msat(feerate, 2 + 1, opt_anchors); @@ -1796,11 +1796,11 @@ fn test_channel_reserve_holding_cell_htlcs() { let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 190000, 1001, InitFeatures::known(), InitFeatures::known()); let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 190000, 1001, InitFeatures::known(), InitFeatures::known()); - let mut stat01 = get_channel_value_stat!(nodes[0], chan_1.2); - let mut stat11 = get_channel_value_stat!(nodes[1], chan_1.2); + let mut stat01 = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2); + let mut stat11 = get_channel_value_stat!(nodes[1], nodes[0], chan_1.2); - let mut stat12 = get_channel_value_stat!(nodes[1], chan_2.2); - let mut stat22 = get_channel_value_stat!(nodes[2], chan_2.2); + let mut stat12 = get_channel_value_stat!(nodes[1], nodes[2], chan_2.2); + let mut stat22 = get_channel_value_stat!(nodes[2], nodes[1], chan_2.2); macro_rules! expect_forward { ($node: expr) => {{ @@ -1814,8 +1814,8 @@ fn test_channel_reserve_holding_cell_htlcs() { let feemsat = 239; // set above let total_fee_msat = (nodes.len() - 2) as u64 * feemsat; - let feerate = get_feerate!(nodes[0], chan_1.2); - let opt_anchors = get_opt_anchors!(nodes[0], chan_1.2); + let feerate = get_feerate!(nodes[0], nodes[1], chan_1.2); + let opt_anchors = get_opt_anchors!(nodes[0], nodes[1], chan_1.2); let recv_value_0 = stat01.counterparty_max_htlc_value_in_flight_msat - total_fee_msat; @@ -1845,10 +1845,10 @@ fn test_channel_reserve_holding_cell_htlcs() { send_payment(&nodes[0], &vec![&nodes[1], &nodes[2]][..], recv_value_0); let (stat01_, stat11_, stat12_, stat22_) = ( - get_channel_value_stat!(nodes[0], chan_1.2), - get_channel_value_stat!(nodes[1], chan_1.2), - get_channel_value_stat!(nodes[1], chan_2.2), - get_channel_value_stat!(nodes[2], chan_2.2), + get_channel_value_stat!(nodes[0], nodes[1], chan_1.2), + get_channel_value_stat!(nodes[1], nodes[0], chan_1.2), + get_channel_value_stat!(nodes[1], nodes[2], chan_2.2), + get_channel_value_stat!(nodes[2], nodes[1], chan_2.2), ); assert_eq!(stat01_.value_to_self_msat, stat01.value_to_self_msat - amt_msat); @@ -1899,7 +1899,7 @@ fn test_channel_reserve_holding_cell_htlcs() { let recv_value_21 = recv_value_2/2 - additional_htlc_cost_msat/2; let recv_value_22 = recv_value_2 - recv_value_21 - total_fee_msat - additional_htlc_cost_msat; { - let stat = get_channel_value_stat!(nodes[0], chan_1.2); + let stat = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2); assert_eq!(stat.value_to_self_msat - (stat.pending_outbound_htlcs_amount_msat + recv_value_21 + recv_value_22 + total_fee_msat + total_fee_msat + commit_tx_fee_3_htlcs), stat.channel_reserve_msat); } @@ -2011,11 +2011,11 @@ fn test_channel_reserve_holding_cell_htlcs() { let commit_tx_fee_1_htlc = 2*commit_tx_fee_msat(feerate, 1 + 1, opt_anchors); let expected_value_to_self = stat01.value_to_self_msat - (recv_value_1 + total_fee_msat) - (recv_value_21 + total_fee_msat) - (recv_value_22 + total_fee_msat) - (recv_value_3 + total_fee_msat); - let stat0 = get_channel_value_stat!(nodes[0], chan_1.2); + let stat0 = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2); assert_eq!(stat0.value_to_self_msat, expected_value_to_self); assert_eq!(stat0.value_to_self_msat, stat0.channel_reserve_msat + commit_tx_fee_1_htlc); - let stat2 = get_channel_value_stat!(nodes[2], chan_2.2); + let stat2 = get_channel_value_stat!(nodes[2], nodes[1], chan_2.2); assert_eq!(stat2.value_to_self_msat, stat22.value_to_self_msat + recv_value_1 + recv_value_21 + recv_value_22 + recv_value_3); } @@ -2049,7 +2049,7 @@ fn channel_reserve_in_flight_removes() { let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let b_chan_values = get_channel_value_stat!(nodes[1], chan_1.2); + let b_chan_values = get_channel_value_stat!(nodes[1], nodes[0], chan_1.2); // Route the first two HTLCs. let payment_value_1 = b_chan_values.channel_reserve_msat - b_chan_values.value_to_self_msat - 10000; let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], payment_value_1); @@ -6187,10 +6187,10 @@ fn test_fail_holding_cell_htlc_upon_free() { nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()); - let mut chan_stat = get_channel_value_stat!(nodes[0], chan.2); + let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); let channel_reserve = chan_stat.channel_reserve_msat; - let feerate = get_feerate!(nodes[0], chan.2); - let opt_anchors = get_opt_anchors!(nodes[0], chan.2); + let feerate = get_feerate!(nodes[0], nodes[1], chan.2); + let opt_anchors = get_opt_anchors!(nodes[0], nodes[1], chan.2); // 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve. let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1, opt_anchors); @@ -6198,7 +6198,7 @@ fn test_fail_holding_cell_htlc_upon_free() { // Send a payment which passes reserve checks but gets stuck in the holding cell. let our_payment_id = nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap(); - chan_stat = get_channel_value_stat!(nodes[0], chan.2); + chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); assert_eq!(chan_stat.holding_cell_outbound_amount_msat, max_can_send); // Flush the pending fee update. @@ -6211,7 +6211,7 @@ fn test_fail_holding_cell_htlc_upon_free() { // Upon receipt of the RAA, there will be an attempt to resend the holding cell // HTLC, but now that the fee has been raised the payment will now fail, causing // us to surface its failure to the user. - chan_stat = get_channel_value_stat!(nodes[0], chan.2); + chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0); nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 1 HTLC updates in channel {}", hex::encode(chan.2)), 1); let failure_log = format!("Failed to send HTLC with payment_hash {} due to Cannot send value that would put our balance under counterparty-announced channel reserve value ({}) in channel {}", @@ -6267,10 +6267,10 @@ fn test_free_and_fail_holding_cell_htlcs() { nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()); - let mut chan_stat = get_channel_value_stat!(nodes[0], chan.2); + let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); let channel_reserve = chan_stat.channel_reserve_msat; - let feerate = get_feerate!(nodes[0], chan.2); - let opt_anchors = get_opt_anchors!(nodes[0], chan.2); + let feerate = get_feerate!(nodes[0], nodes[1], chan.2); + let opt_anchors = get_opt_anchors!(nodes[0], nodes[1], chan.2); // 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve. let amt_1 = 20000; @@ -6280,10 +6280,10 @@ fn test_free_and_fail_holding_cell_htlcs() { // Send 2 payments which pass reserve checks but get stuck in the holding cell. nodes[0].node.send_payment(&route_1, payment_hash_1, &Some(payment_secret_1)).unwrap(); - chan_stat = get_channel_value_stat!(nodes[0], chan.2); + chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); assert_eq!(chan_stat.holding_cell_outbound_amount_msat, amt_1); let payment_id_2 = nodes[0].node.send_payment(&route_2, payment_hash_2, &Some(payment_secret_2)).unwrap(); - chan_stat = get_channel_value_stat!(nodes[0], chan.2); + chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); assert_eq!(chan_stat.holding_cell_outbound_amount_msat, amt_1 + amt_2); // Flush the pending fee update. @@ -6297,7 +6297,7 @@ fn test_free_and_fail_holding_cell_htlcs() { // Upon receipt of the RAA, there will be an attempt to resend the holding cell HTLCs, // but now that the fee has been raised the second payment will now fail, causing us // to surface its failure to the user. The first payment should succeed. - chan_stat = get_channel_value_stat!(nodes[0], chan.2); + chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0); nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 2 HTLC updates in channel {}", hex::encode(chan.2)), 1); let failure_log = format!("Failed to send HTLC with payment_hash {} due to Cannot send value that would put our balance under counterparty-announced channel reserve value ({}) in channel {}", @@ -6395,10 +6395,10 @@ fn test_fail_holding_cell_htlc_upon_free_multihop() { nodes[2].node.handle_update_fee(&nodes[1].node.get_our_node_id(), update_msg.unwrap()); - let mut chan_stat = get_channel_value_stat!(nodes[0], chan_0_1.2); + let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan_0_1.2); let channel_reserve = chan_stat.channel_reserve_msat; - let feerate = get_feerate!(nodes[0], chan_0_1.2); - let opt_anchors = get_opt_anchors!(nodes[0], chan_0_1.2); + let feerate = get_feerate!(nodes[0], nodes[1], chan_0_1.2); + let opt_anchors = get_opt_anchors!(nodes[0], nodes[1], chan_0_1.2); // Send a payment which passes reserve checks but gets stuck in the holding cell. let feemsat = 239; @@ -6419,7 +6419,7 @@ fn test_fail_holding_cell_htlc_upon_free_multihop() { commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); - chan_stat = get_channel_value_stat!(nodes[1], chan_1_2.2); + chan_stat = get_channel_value_stat!(nodes[1], nodes[2], chan_1_2.2); assert_eq!(chan_stat.holding_cell_outbound_amount_msat, max_can_send); // Flush the pending fee update. @@ -6623,7 +6623,7 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_value_in_flight() { let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let channel_value = 100000; let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, 0, InitFeatures::known(), InitFeatures::known()); - let max_in_flight = get_channel_value_stat!(nodes[0], chan.2).counterparty_max_htlc_value_in_flight_msat; + let max_in_flight = get_channel_value_stat!(nodes[0], nodes[1], chan.2).counterparty_max_htlc_value_in_flight_msat; send_payment(&nodes[0], &vec!(&nodes[1])[..], max_in_flight); @@ -6678,10 +6678,10 @@ fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() { let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known()); - let chan_stat = get_channel_value_stat!(nodes[0], chan.2); + let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); let channel_reserve = chan_stat.channel_reserve_msat; - let feerate = get_feerate!(nodes[0], chan.2); - let opt_anchors = get_opt_anchors!(nodes[0], chan.2); + let feerate = get_feerate!(nodes[0], nodes[1], chan.2); + let opt_anchors = get_opt_anchors!(nodes[0], nodes[1], chan.2); // The 2* and +1 are for the fee spike reserve. let commit_tx_fee_outbound = 2 * commit_tx_fee_msat(feerate, 1 + 1, opt_anchors); @@ -6757,7 +6757,7 @@ fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() { nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap(); check_added_monitors!(nodes[0], 1); let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - updates.update_add_htlcs[0].amount_msat = get_channel_value_stat!(nodes[1], chan.2).counterparty_max_htlc_value_in_flight_msat + 1; + updates.update_add_htlcs[0].amount_msat = get_channel_value_stat!(nodes[1], nodes[0], chan.2).counterparty_max_htlc_value_in_flight_msat + 1; nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); assert!(nodes[1].node.list_channels().is_empty()); diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index 492a6553770..b01549200d4 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -110,8 +110,8 @@ fn chanmon_claim_value_coop_close() { let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 }; assert_eq!(funding_outpoint.to_channel_id(), chan_id); - let chan_feerate = get_feerate!(nodes[0], chan_id) as u64; - let opt_anchors = get_opt_anchors!(nodes[0], chan_id); + let chan_feerate = get_feerate!(nodes[0], nodes[1], chan_id) as u64; + let opt_anchors = get_opt_anchors!(nodes[0], nodes[1], chan_id); assert_eq!(vec![Balance::ClaimableOnChannelClose { claimable_amount_satoshis: 1_000_000 - 1_000 - chan_feerate * channel::commitment_tx_base_weight(opt_anchors) / 1000 @@ -210,8 +210,8 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) { let htlc_cltv_timeout = nodes[0].best_block_info().1 + TEST_FINAL_CLTV + 1; // Note ChannelManager adds one to CLTV timeouts for safety - let chan_feerate = get_feerate!(nodes[0], chan_id) as u64; - let opt_anchors = get_opt_anchors!(nodes[0], chan_id); + let chan_feerate = get_feerate!(nodes[0], nodes[1], chan_id) as u64; + let opt_anchors = get_opt_anchors!(nodes[0], nodes[1], chan_id); let remote_txn = get_local_commitment_txn!(nodes[1], chan_id); // Before B receives the payment preimage, it only suggests the push_msat value of 1_000 sats @@ -557,8 +557,8 @@ fn test_balances_on_local_commitment_htlcs() { check_added_monitors!(nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash_2, 20_000_000); - let chan_feerate = get_feerate!(nodes[0], chan_id) as u64; - let opt_anchors = get_opt_anchors!(nodes[0], chan_id); + let chan_feerate = get_feerate!(nodes[0], nodes[1], chan_id) as u64; + let opt_anchors = get_opt_anchors!(nodes[0], nodes[1], chan_id); // Get nodes[0]'s commitment transaction and HTLC-Timeout transactions let as_txn = get_local_commitment_txn!(nodes[0], chan_id); From 6c62533340b195bd734917691ab7f9aeee09a280 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Tigerstr=C3=B6m?= <11711198+ViktorTigerstrom@users.noreply.github.com> Date: Thu, 26 May 2022 01:46:22 +0200 Subject: [PATCH 06/21] Store channels per-peer --- lightning/src/ln/chanmon_update_fail_tests.rs | 13 +- lightning/src/ln/channelmanager.rs | 2487 +++++++++-------- lightning/src/ln/functional_test_utils.rs | 24 +- lightning/src/ln/functional_tests.rs | 89 +- lightning/src/ln/onion_route_tests.rs | 4 +- lightning/src/ln/payment_tests.rs | 3 +- lightning/src/ln/reorg_tests.rs | 20 +- lightning/src/ln/shutdown_tests.rs | 5 +- 8 files changed, 1496 insertions(+), 1149 deletions(-) diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 91575195a88..0424624b467 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -130,7 +130,9 @@ fn test_monitor_and_persister_update_fail() { let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert_eq!(updates.update_fulfill_htlcs.len(), 1); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); - if let Some(ref mut channel) = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan.2) { + if let Some(ref mut channel) = nodes[0].node.per_peer_state.write().unwrap().get_mut(&nodes[1].node.get_our_node_id()) + .unwrap().lock().unwrap().channel_by_id.get_mut(&chan.2) + { if let Ok((_, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) { // Check that even though the persister is returning a TemporaryFailure, // because the update is bogus, ultimately the error that's returned @@ -1411,9 +1413,12 @@ fn monitor_failed_no_reestablish_response() { let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2; { - let mut lock; - get_channel_ref!(nodes[0], lock, channel_id).announcement_sigs_state = AnnouncementSigsState::PeerReceived; - get_channel_ref!(nodes[1], lock, channel_id).announcement_sigs_state = AnnouncementSigsState::PeerReceived; + let node_0_per_peer_lock = nodes[0].node.per_peer_state.write().unwrap(); + let mut node_0_peer_state_lock = node_0_per_peer_lock.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); + let node_1_per_peer_lock = nodes[1].node.per_peer_state.write().unwrap(); + let mut node_1_peer_state_lock = node_1_per_peer_lock.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap(); + get_channel_ref!(nodes[0], node_0_peer_state_lock, channel_id).announcement_sigs_state = AnnouncementSigsState::PeerReceived; + get_channel_ref!(nodes[1], node_1_peer_state_lock, channel_id).announcement_sigs_state = AnnouncementSigsState::PeerReceived; } // Route the payment and deliver the initial commitment_signed (with a monitor update failure diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 12a40b23e5f..a886cf6d2f4 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -404,8 +404,7 @@ pub(super) enum RAACommitmentOrder { } // Note this is only exposed in cfg(test): -pub(super) struct ChannelHolder { - pub(super) by_id: HashMap<[u8; 32], Channel>, +pub(super) struct ChannelHolder { /// SCIDs (and outbound SCID aliases) -> `counterparty_node_id`s and `channel_id`s. /// /// Outbound SCID aliases are added here once the channel is available for normal use, with @@ -443,9 +442,15 @@ enum BackgroundEvent { ClosingMonitorUpdate((OutPoint, ChannelMonitorUpdate)), } -/// State we hold per-peer. In the future we should put channels in here, but for now we only hold -/// the latest Init features we heard from the peer. -struct PeerState { +/// State we hold per-peer. +pub(super) struct PeerState { + /// `temporary_channel_id` or `channel_id` -> `channel`. + /// + /// Holds all channels where the peer is the counterparty. Once a channel has been assigned a + /// `channel_id`, the `temporary_channel_id` key in the map is updated and is replaced by the + /// `channel_id`. + pub(super) channel_by_id: HashMap<[u8; 32], Channel>, + /// The latest `InitFeatures` we heard from the peer. latest_features: InitFeatures, } @@ -708,9 +713,9 @@ pub struct ChannelManager, #[cfg(any(test, feature = "_test_utils"))] - pub(super) channel_state: Mutex>, + pub(super) channel_state: Mutex, #[cfg(not(any(test, feature = "_test_utils")))] - channel_state: Mutex>, + channel_state: Mutex, /// Storage for PaymentSecrets and any requirements on future inbound payments before we will /// expose them to users via a PaymentReceived event. HTLCs which do not meet the requirements @@ -779,15 +784,20 @@ pub struct ChannelManager>>, + #[cfg(not(any(test, feature = "_test_utils")))] + per_peer_state: RwLock>>>, + #[cfg(any(test, feature = "_test_utils"))] + pub(super) per_peer_state: RwLock>>>, pending_events: Mutex>, pending_background_events: Mutex>, @@ -1600,7 +1610,6 @@ impl ChannelMana best_block: RwLock::new(params.best_block), channel_state: Mutex::new(ChannelHolder{ - by_id: HashMap::new(), short_to_chan_info: HashMap::new(), forward_htlcs: HashMap::new(), claimable_htlcs: HashMap::new(), @@ -1719,16 +1728,21 @@ impl ChannelMana let temporary_channel_id = channel.channel_id(); let mut channel_state = self.channel_state.lock().unwrap(); - match channel_state.by_id.entry(temporary_channel_id) { - hash_map::Entry::Occupied(_) => { - if cfg!(fuzzing) { - return Err(APIError::APIMisuseError { err: "Fuzzy bad RNG".to_owned() }); - } else { - panic!("RNG is bad???"); - } - }, - hash_map::Entry::Vacant(entry) => { entry.insert(channel); } - } + let per_peer_state = self.per_peer_state.write().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(&their_network_key){ + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(temporary_channel_id) { + hash_map::Entry::Occupied(_) => { + if cfg!(fuzzing) { + return Err(APIError::APIMisuseError { err: "Fuzzy bad RNG".to_owned() }); + } else { + panic!("RNG is bad???"); + } + }, + hash_map::Entry::Vacant(entry) => { entry.insert(channel); } + } + } else { unreachable!() } channel_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { node_id: their_network_key, msg: res, @@ -1736,54 +1750,66 @@ impl ChannelMana Ok(temporary_channel_id) } - fn list_channels_with_filter)) -> bool>(&self, f: Fn) -> Vec { + fn list_channels_with_filter)) -> bool + Copy>(&self, f: Fn) -> Vec { let mut res = Vec::new(); { let channel_state = self.channel_state.lock().unwrap(); - res.reserve(channel_state.by_id.len()); - for (channel_id, channel) in channel_state.by_id.iter().filter(f) { - let balance = channel.get_available_balances(); - let (to_remote_reserve_satoshis, to_self_reserve_satoshis) = - channel.get_holder_counterparty_selected_channel_reserve_satoshis(); - res.push(ChannelDetails { - channel_id: (*channel_id).clone(), - counterparty: ChannelCounterparty { - node_id: channel.get_counterparty_node_id(), - features: InitFeatures::empty(), - unspendable_punishment_reserve: to_remote_reserve_satoshis, - forwarding_info: channel.counterparty_forwarding_info(), - // Ensures that we have actually received the `htlc_minimum_msat` value - // from the counterparty through the `OpenChannel` or `AcceptChannel` - // message (as they are always the first message from the counterparty). - // Else `Channel::get_counterparty_htlc_minimum_msat` could return the - // default `0` value set by `Channel::new_outbound`. - outbound_htlc_minimum_msat: if channel.have_received_message() { - Some(channel.get_counterparty_htlc_minimum_msat()) } else { None }, - outbound_htlc_maximum_msat: channel.get_counterparty_htlc_maximum_msat(), - }, - funding_txo: channel.get_funding_txo(), - // Note that accept_channel (or open_channel) is always the first message, so - // `have_received_message` indicates that type negotiation has completed. - channel_type: if channel.have_received_message() { Some(channel.get_channel_type().clone()) } else { None }, - short_channel_id: channel.get_short_channel_id(), - outbound_scid_alias: if channel.is_usable() { Some(channel.outbound_scid_alias()) } else { None }, - inbound_scid_alias: channel.latest_inbound_scid_alias(), - channel_value_satoshis: channel.get_value_satoshis(), - unspendable_punishment_reserve: to_self_reserve_satoshis, - balance_msat: balance.balance_msat, - inbound_capacity_msat: balance.inbound_capacity_msat, - outbound_capacity_msat: balance.outbound_capacity_msat, - next_outbound_htlc_limit_msat: balance.next_outbound_htlc_limit_msat, - user_channel_id: channel.get_user_id(), - confirmations_required: channel.minimum_depth(), - force_close_spend_delay: channel.get_counterparty_selected_contest_delay(), - is_outbound: channel.is_outbound(), - is_channel_ready: channel.is_usable(), - is_usable: channel.is_live(), - is_public: channel.should_announce(), - inbound_htlc_minimum_msat: Some(channel.get_holder_htlc_minimum_msat()), - inbound_htlc_maximum_msat: channel.get_holder_htlc_maximum_msat() - }); + // Allocate our best estimate of the number of channels we have in the `res` + // Vec. Sadly the `channel_state.short_to_chan_info` map doesn't cover channels without + // a scid or a scid alias, and the `channel_state.id_to_peer` shouldn't be used outside + // of the ChannelMonitor handling. Therefore reallocations may still occur, but is + // unlikely as the `channel_state.short_to_chan_info` map often contains 2 entries for + // the same channel. + res.reserve(channel_state.short_to_chan_info.len()); + + let per_peer_state = self.per_peer_state.read().unwrap(); + for (_cp_id, peer_state_mutex) in per_peer_state.iter() { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + for (channel_id, channel) in peer_state.channel_by_id.iter().filter(f) { + let balance = channel.get_available_balances(); + let (to_remote_reserve_satoshis, to_self_reserve_satoshis) = + channel.get_holder_counterparty_selected_channel_reserve_satoshis(); + res.push(ChannelDetails { + channel_id: (*channel_id).clone(), + counterparty: ChannelCounterparty { + node_id: channel.get_counterparty_node_id(), + features: InitFeatures::empty(), + unspendable_punishment_reserve: to_remote_reserve_satoshis, + forwarding_info: channel.counterparty_forwarding_info(), + // Ensures that we have actually received the `htlc_minimum_msat` value + // from the counterparty through the `OpenChannel` or `AcceptChannel` + // message (as they are always the first message from the counterparty). + // Else `Channel::get_counterparty_htlc_minimum_msat` could return the + // default `0` value set by `Channel::new_outbound`. + outbound_htlc_minimum_msat: if channel.have_received_message() { + Some(channel.get_counterparty_htlc_minimum_msat()) } else { None }, + outbound_htlc_maximum_msat: channel.get_counterparty_htlc_maximum_msat(), + }, + funding_txo: channel.get_funding_txo(), + // Note that accept_channel (or open_channel) is always the first message, so + // `have_received_message` indicates that type negotiation has completed. + channel_type: if channel.have_received_message() { Some(channel.get_channel_type().clone()) } else { None }, + short_channel_id: channel.get_short_channel_id(), + outbound_scid_alias: if channel.is_usable() { Some(channel.outbound_scid_alias()) } else { None }, + inbound_scid_alias: channel.latest_inbound_scid_alias(), + channel_value_satoshis: channel.get_value_satoshis(), + unspendable_punishment_reserve: to_self_reserve_satoshis, + balance_msat: balance.balance_msat, + inbound_capacity_msat: balance.inbound_capacity_msat, + outbound_capacity_msat: balance.outbound_capacity_msat, + next_outbound_htlc_limit_msat: balance.next_outbound_htlc_limit_msat, + user_channel_id: channel.get_user_id(), + confirmations_required: channel.minimum_depth(), + force_close_spend_delay: channel.get_counterparty_selected_contest_delay(), + is_outbound: channel.is_outbound(), + is_channel_ready: channel.is_usable(), + is_usable: channel.is_live(), + is_public: channel.should_announce(), + inbound_htlc_minimum_msat: Some(channel.get_holder_htlc_minimum_msat()), + inbound_htlc_maximum_msat: channel.get_holder_htlc_maximum_msat() + }); + } } } let per_peer_state = self.per_peer_state.read().unwrap(); @@ -1839,51 +1865,50 @@ impl ChannelMana let result: Result<(), _> = loop { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; - match channel_state.by_id.entry(channel_id.clone()) { - hash_map::Entry::Occupied(mut chan_entry) => { - if *counterparty_node_id != chan_entry.get().get_counterparty_node_id(){ - return Err(APIError::APIMisuseError { err: "The passed counterparty_node_id doesn't match the channel's counterparty node_id".to_owned() }); - } - let per_peer_state = self.per_peer_state.read().unwrap(); - let (shutdown_msg, monitor_update, htlcs) = match per_peer_state.get(&counterparty_node_id) { - Some(peer_state) => { - let peer_state = peer_state.lock().unwrap(); - let their_features = &peer_state.latest_features; - chan_entry.get_mut().get_shutdown(&self.keys_manager, their_features, target_feerate_sats_per_1000_weight)? - }, - None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", counterparty_node_id) }), - }; - failed_htlcs = htlcs; + let per_peer_state = self.per_peer_state.write().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(channel_id.clone()) { + hash_map::Entry::Occupied(mut chan_entry) => { + if *counterparty_node_id != chan_entry.get().get_counterparty_node_id(){ + return Err(APIError::APIMisuseError { err: "The passed counterparty_node_id doesn't match the channel's counterparty node_id".to_owned() }); + } + let (shutdown_msg, monitor_update, htlcs) = chan_entry.get_mut().get_shutdown(&self.keys_manager, &peer_state.latest_features, target_feerate_sats_per_1000_weight)?; + failed_htlcs = htlcs; - // Update the monitor with the shutdown script if necessary. - if let Some(monitor_update) = monitor_update { - if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) { - let (result, is_permanent) = + // Update the monitor with the shutdown script if necessary. + if let Some(monitor_update) = monitor_update { + if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) { + let (result, is_permanent) = handle_monitor_err!(self, e, channel_state.short_to_chan_info, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE); - if is_permanent { - remove_channel!(self, channel_state, chan_entry); - break result; + if is_permanent { + remove_channel!(self, channel_state, chan_entry); + break result; + } } } - } - channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { - node_id: *counterparty_node_id, - msg: shutdown_msg - }); + channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { + node_id: *counterparty_node_id, + msg: shutdown_msg + }); - if chan_entry.get().is_shutdown() { - let channel = remove_channel!(self, channel_state, chan_entry); - if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) { - channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { - msg: channel_update - }); + if chan_entry.get().is_shutdown() { + let channel = remove_channel!(self, channel_state, chan_entry); + if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) { + channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg: channel_update + }); + } + self.issue_channel_close_events(&channel, ClosureReason::HolderForceClosed); } - self.issue_channel_close_events(&channel, ClosureReason::HolderForceClosed); - } - break Ok(()); - }, - hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()}) + break Ok(()); + }, + hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable { err: "No such channel".to_owned() }) + } + } else { + return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", counterparty_node_id) }); } }; @@ -1961,18 +1986,25 @@ impl ChannelMana let mut chan = { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; - if let hash_map::Entry::Occupied(chan) = channel_state.by_id.entry(channel_id.clone()) { - if chan.get().get_counterparty_node_id() != *peer_node_id { - return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()}); - } - if let Some(peer_msg) = peer_msg { - self.issue_channel_close_events(chan.get(),ClosureReason::CounterpartyForceClosed { peer_msg: peer_msg.to_string() }); + let per_peer_state = self.per_peer_state.write().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(peer_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + if let hash_map::Entry::Occupied(chan) = peer_state.channel_by_id.entry(channel_id.clone()) { + if chan.get().get_counterparty_node_id() != *peer_node_id { + return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()}); + } + if let Some(peer_msg) = peer_msg { + self.issue_channel_close_events(chan.get(),ClosureReason::CounterpartyForceClosed { peer_msg: peer_msg.to_string() }); + } else { + self.issue_channel_close_events(chan.get(),ClosureReason::HolderForceClosed); + } + remove_channel!(self, channel_state, chan) } else { - self.issue_channel_close_events(chan.get(),ClosureReason::HolderForceClosed); + return Err(APIError::ChannelUnavailable{ err: "No such channel".to_owned() }); } - remove_channel!(self, channel_state, chan) } else { - return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()}); + return Err(APIError::APIMisuseError{ err: format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", peer_node_id) }); } }; log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..])); @@ -2114,7 +2146,7 @@ impl ChannelMana }) } - fn decode_update_add_htlc_onion(&self, msg: &msgs::UpdateAddHTLC) -> (PendingHTLCStatus, MutexGuard>) { + fn decode_update_add_htlc_onion(&self, msg: &msgs::UpdateAddHTLC) -> (PendingHTLCStatus, MutexGuard) { macro_rules! return_malformed_err { ($msg: expr, $err_code: expr) => { { @@ -2236,7 +2268,7 @@ impl ChannelMana if let &PendingHTLCRouting::Forward { ref short_channel_id, .. } = routing { let id_option = channel_state.as_ref().unwrap().short_to_chan_info.get(&short_channel_id).cloned(); if let Some((err, code, chan_update)) = loop { - let forwarding_id_opt = match id_option { + let forwarding_chan_info_opt = match id_option { None => { // unknown_next_peer // Note that this is likely a timing oracle for detecting whether an scid is a // phantom. @@ -2246,10 +2278,14 @@ impl ChannelMana break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None)); } }, - Some((_cp_id, id)) => Some(id.clone()), + Some((cp_id, id)) => Some((cp_id.clone(), id.clone())), }; - let (chan_update_opt, forwardee_cltv_expiry_delta) = if let Some(forwarding_id) = forwarding_id_opt { - let chan = channel_state.as_mut().unwrap().by_id.get_mut(&forwarding_id).unwrap(); + let (chan_update_opt, forwardee_cltv_expiry_delta) = if let Some((counterparty_node_id, forwarding_id)) = forwarding_chan_info_opt { + let per_peer_state = self.per_peer_state.write().unwrap(); + let peer_state_mutex = per_peer_state.get(&counterparty_node_id).unwrap(); + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + let chan = peer_state.channel_by_id.get_mut(&forwarding_id).unwrap(); if !chan.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels { // Note that the behavior here should be identical to the above block - we // should NOT reveal the existence or non-existence of a private channel if @@ -2425,9 +2461,9 @@ impl ChannelMana } } - let id = match channel_lock.short_to_chan_info.get(&path.first().unwrap().short_channel_id) { + let (counterparty_node_id, id) = match channel_lock.short_to_chan_info.get(&path.first().unwrap().short_channel_id) { None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()}), - Some((_cp_id, id)) => id.clone(), + Some((cp_id, id)) => (cp_id.clone(), id.clone()), }; macro_rules! insert_outbound_payment { @@ -2446,53 +2482,58 @@ impl ChannelMana } let channel_state = &mut *channel_lock; - if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(id) { - match { - if chan.get().get_counterparty_node_id() != path.first().unwrap().pubkey { - return Err(APIError::RouteError{err: "Node ID mismatch on first hop!"}); - } - if !chan.get().is_live() { - return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected/pending monitor update!".to_owned()}); - } - break_chan_entry!(self, chan.get_mut().send_htlc_and_commit( - htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute { - path: path.clone(), - session_priv: session_priv.clone(), - first_hop_htlc_msat: htlc_msat, - payment_id, - payment_secret: payment_secret.clone(), - payment_params: payment_params.clone(), - }, onion_packet, &self.logger), - channel_state, chan) - } { - Some((update_add, commitment_signed, monitor_update)) => { - if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { - maybe_break_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true); - // Note that MonitorUpdateFailed here indicates (per function docs) - // that we will resend the commitment update once monitor updating - // is restored. Therefore, we must return an error indicating that - // it is unsafe to retry the payment wholesale, which we do in the - // send_payment check for MonitorUpdateFailed, below. - insert_outbound_payment!(); // Only do this after possibly break'ing on Perm failure above. - return Err(APIError::MonitorUpdateFailed); + let per_peer_state = self.per_peer_state.write().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(id) { + match { + if chan.get().get_counterparty_node_id() != path.first().unwrap().pubkey { + return Err(APIError::RouteError{err: "Node ID mismatch on first hop!"}); } - insert_outbound_payment!(); + if !chan.get().is_live() { + return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected/pending monitor update!".to_owned()}); + } + break_chan_entry!(self, chan.get_mut().send_htlc_and_commit( + htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute { + path: path.clone(), + session_priv: session_priv.clone(), + first_hop_htlc_msat: htlc_msat, + payment_id, + payment_secret: payment_secret.clone(), + payment_params: payment_params.clone(), + }, onion_packet, &self.logger), + channel_state, chan) + } { + Some((update_add, commitment_signed, monitor_update)) => { + if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { + maybe_break_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true); + // Note that MonitorUpdateFailed here indicates (per function docs) + // that we will resend the commitment update once monitor updating + // is restored. Therefore, we must return an error indicating that + // it is unsafe to retry the payment wholesale, which we do in the + // send_payment check for MonitorUpdateFailed, below. + insert_outbound_payment!(); // Only do this after possibly break'ing on Perm failure above. + return Err(APIError::MonitorUpdateFailed); + } + insert_outbound_payment!(); - log_debug!(self.logger, "Sending payment along path resulted in a commitment_signed for channel {}", log_bytes!(chan.get().channel_id())); - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: path.first().unwrap().pubkey, - updates: msgs::CommitmentUpdate { - update_add_htlcs: vec![update_add], - update_fulfill_htlcs: Vec::new(), - update_fail_htlcs: Vec::new(), - update_fail_malformed_htlcs: Vec::new(), - update_fee: None, - commitment_signed, - }, - }); - }, - None => { insert_outbound_payment!(); }, - } + log_debug!(self.logger, "Sending payment along path resulted in a commitment_signed for channel {}", log_bytes!(chan.get().channel_id())); + channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: path.first().unwrap().pubkey, + updates: msgs::CommitmentUpdate { + update_add_htlcs: vec![update_add], + update_fulfill_htlcs: Vec::new(), + update_fail_htlcs: Vec::new(), + update_fail_malformed_htlcs: Vec::new(), + update_fee: None, + commitment_signed, + }, + }); + }, + None => { insert_outbound_payment!(); }, + } + } else { unreachable!(); } } else { unreachable!(); } return Ok(()); }; @@ -2747,20 +2788,29 @@ impl ChannelMana /// Handles the generation of a funding transaction, optionally (for tests) with a function /// which checks the correctness of the funding transaction given the associated channel. fn funding_transaction_generated_intern, &Transaction) -> Result>( - &self, temporary_channel_id: &[u8; 32], _counterparty_node_id: &PublicKey, funding_transaction: Transaction, find_funding_output: FundingOutput + &self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, funding_transaction: Transaction, find_funding_output: FundingOutput ) -> Result<(), APIError> { let (chan, msg) = { - let (res, chan) = match self.channel_state.lock().unwrap().by_id.remove(temporary_channel_id) { - Some(mut chan) => { - let funding_txo = find_funding_output(&chan, &funding_transaction)?; - - (chan.get_outbound_funding_created(funding_transaction, funding_txo, &self.logger) - .map_err(|e| if let ChannelError::Close(msg) = e { - MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.get_user_id(), chan.force_shutdown(true), None) - } else { unreachable!(); }) - , chan) - }, - None => { return Err(APIError::ChannelUnavailable { err: "No such channel".to_owned() }) }, + let (res, chan) = { + let per_peer_state = self.per_peer_state.write().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.remove(temporary_channel_id) { + Some(mut chan) => { + let funding_txo = find_funding_output(&chan, &funding_transaction)?; + + (chan.get_outbound_funding_created(funding_transaction, funding_txo, &self.logger) + .map_err(|e| if let ChannelError::Close(msg) = e { + MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.get_user_id(), chan.force_shutdown(true), None) + } else { unreachable!(); }) + , chan) + }, + None => { return Err(APIError::ChannelUnavailable { err: "No such channel".to_owned() }) }, + } + } else { + return Err(APIError::APIMisuseError { err: format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id) }) + } }; match handle_error!(self, res, chan.get_counterparty_node_id()) { Ok(funding_msg) => { @@ -2772,25 +2822,30 @@ impl ChannelMana } }; - let mut channel_state = self.channel_state.lock().unwrap(); + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let channel_state = &mut *channel_state_lock; channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingCreated { node_id: chan.get_counterparty_node_id(), msg, }); + let per_peer_state = self.per_peer_state.write().unwrap(); let chan_id = chan.channel_id(); - match channel_state.by_id.entry(chan_id) { - hash_map::Entry::Occupied(_) => { - panic!("Generated duplicate funding txid?"); - }, - hash_map::Entry::Vacant(e) => { - let counterparty_node_id = chan.get_counterparty_node_id(); - e.insert(chan); - let mut id_to_peer = self.id_to_peer.lock().unwrap(); - if id_to_peer.insert(chan_id, counterparty_node_id).is_some() { - panic!("id_to_peer map already contained funding txid, which shouldn't be possible"); + if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(chan_id) { + hash_map::Entry::Occupied(_) => { + panic!("Generated duplicate funding txid?"); + }, + hash_map::Entry::Vacant(e) => { + e.insert(chan); + let mut id_to_peer = self.id_to_peer.lock().unwrap(); + if id_to_peer.insert(chan_id, *counterparty_node_id).is_some() { + panic!("id_to_peer map already contained funding txid, which shouldn't be possible"); + } } } - } + } else { unreachable!(); } Ok(()) } @@ -2914,22 +2969,27 @@ impl ChannelMana let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; + let per_peer_state = self.per_peer_state.read().unwrap(); let mut announced_chans = false; - for (_, chan) in channel_state.by_id.iter() { - if let Some(msg) = chan.get_signed_channel_announcement(self.get_our_node_id(), self.genesis_hash.clone(), self.best_block.read().unwrap().height()) { - channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement { - msg, - update_msg: match self.get_channel_update_for_broadcast(chan) { - Ok(msg) => msg, - Err(_) => continue, - }, - }); - announced_chans = true; - } else { - // If the channel is not public or has not yet reached channel_ready, check the - // next channel. If we don't yet have any public channels, we'll skip the broadcast - // below as peers may not accept it without channels on chain first. + for (_cp_id, peer_state_mutex) in per_peer_state.iter() { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + for (_, chan) in peer_state.channel_by_id.iter() { + if let Some(msg) = chan.get_signed_channel_announcement(self.get_our_node_id(), self.genesis_hash.clone(), self.best_block.read().unwrap().height()) { + channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement { + msg, + update_msg: match self.get_channel_update_for_broadcast(chan) { + Ok(msg) => msg, + Err(_) => continue, + }, + }); + announced_chans = true; + } else { + // If the channel is not public or has not yet reached channel_ready, check the + // next channel. If we don't yet have any public channels, we'll skip the broadcast + // below as peers may not accept it without channels on chain first. + } } } @@ -2957,11 +3017,12 @@ impl ChannelMana { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; + let per_peer_state = self.per_peer_state.write().unwrap(); for (short_chan_id, mut pending_forwards) in channel_state.forward_htlcs.drain() { if short_chan_id != 0 { - let forward_chan_id = match channel_state.short_to_chan_info.get(&short_chan_id) { - Some((_cp_id, chan_id)) => chan_id.clone(), + let (counterparty_node_id, forward_chan_id) = match channel_state.short_to_chan_info.get(&short_chan_id) { + Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()), None => { for forward_info in pending_forwards.drain(..) { match forward_info { @@ -3031,129 +3092,136 @@ impl ChannelMana continue; } }; - if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(forward_chan_id) { - let mut add_htlc_msgs = Vec::new(); - let mut fail_htlc_msgs = Vec::new(); - for forward_info in pending_forwards.drain(..) { - match forward_info { - HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo { - routing: PendingHTLCRouting::Forward { - onion_packet, .. - }, incoming_shared_secret, payment_hash, amt_to_forward, outgoing_cltv_value }, - prev_funding_outpoint } => { - log_trace!(self.logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, log_bytes!(payment_hash.0), short_chan_id); - let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { - short_channel_id: prev_short_channel_id, - outpoint: prev_funding_outpoint, - htlc_id: prev_htlc_id, - incoming_packet_shared_secret: incoming_shared_secret, - // Phantom payments are only PendingHTLCRouting::Receive. - phantom_shared_secret: None, - }); - match chan.get_mut().send_htlc(amt_to_forward, payment_hash, outgoing_cltv_value, htlc_source.clone(), onion_packet, &self.logger) { - Err(e) => { - if let ChannelError::Ignore(msg) = e { - log_trace!(self.logger, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(payment_hash.0), msg); - } else { - panic!("Stated return value requirements in send_htlc() were not met"); - } - let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan.get()); - failed_forwards.push((htlc_source, payment_hash, - HTLCFailReason::Reason { failure_code, data } - )); - continue; - }, - Ok(update_add) => { - match update_add { - Some(msg) => { add_htlc_msgs.push(msg); }, - None => { - // Nothing to do here...we're waiting on a remote - // revoke_and_ack before we can add anymore HTLCs. The Channel - // will automatically handle building the update_add_htlc and - // commitment_signed messages when we can. - // TODO: Do some kind of timer to set the channel as !is_live() - // as we don't really want others relying on us relaying through - // this channel currently :/. + if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(forward_chan_id) { + let mut add_htlc_msgs = Vec::new(); + let mut fail_htlc_msgs = Vec::new(); + for forward_info in pending_forwards.drain(..) { + match forward_info { + HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo { + routing: PendingHTLCRouting::Forward { + onion_packet, .. + }, incoming_shared_secret, payment_hash, amt_to_forward, outgoing_cltv_value }, + prev_funding_outpoint } => { + log_trace!(self.logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, log_bytes!(payment_hash.0), short_chan_id); + let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { + short_channel_id: prev_short_channel_id, + outpoint: prev_funding_outpoint, + htlc_id: prev_htlc_id, + incoming_packet_shared_secret: incoming_shared_secret, + // Phantom payments are only PendingHTLCRouting::Receive. + phantom_shared_secret: None, + }); + match chan.get_mut().send_htlc(amt_to_forward, payment_hash, outgoing_cltv_value, htlc_source.clone(), onion_packet, &self.logger) { + Err(e) => { + if let ChannelError::Ignore(msg) = e { + log_trace!(self.logger, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(payment_hash.0), msg); + } else { + panic!("Stated return value requirements in send_htlc() were not met"); + } + let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan.get()); + failed_forwards.push((htlc_source, payment_hash, + HTLCFailReason::Reason { failure_code, data } + )); + continue; + }, + Ok(update_add) => { + match update_add { + Some(msg) => { add_htlc_msgs.push(msg); }, + None => { + // Nothing to do here...we're waiting on a remote + // revoke_and_ack before we can add anymore HTLCs. The Channel + // will automatically handle building the update_add_htlc and + // commitment_signed messages when we can. + // TODO: Do some kind of timer to set the channel as !is_live() + // as we don't really want others relying on us relaying through + // this channel currently :/. + } } } } - } - }, - HTLCForwardInfo::AddHTLC { .. } => { - panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward"); - }, - HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => { - log_trace!(self.logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id); - match chan.get_mut().get_update_fail_htlc(htlc_id, err_packet, &self.logger) { - Err(e) => { - if let ChannelError::Ignore(msg) = e { - log_trace!(self.logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg); - } else { - panic!("Stated return value requirements in get_update_fail_htlc() were not met"); + }, + HTLCForwardInfo::AddHTLC { .. } => { + panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward"); + }, + HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => { + log_trace!(self.logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id); + match chan.get_mut().get_update_fail_htlc(htlc_id, err_packet, &self.logger) { + Err(e) => { + if let ChannelError::Ignore(msg) = e { + log_trace!(self.logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg); + } else { + panic!("Stated return value requirements in get_update_fail_htlc() were not met"); + } + // fail-backs are best-effort, we probably already have one + // pending, and if not that's OK, if not, the channel is on + // the chain and sending the HTLC-Timeout is their problem. + continue; + }, + Ok(Some(msg)) => { fail_htlc_msgs.push(msg); }, + Ok(None) => { + // Nothing to do here...we're waiting on a remote + // revoke_and_ack before we can update the commitment + // transaction. The Channel will automatically handle + // building the update_fail_htlc and commitment_signed + // messages when we can. + // We don't need any kind of timer here as they should fail + // the channel onto the chain if they can't get our + // update_fail_htlc in time, it's not our problem. } - // fail-backs are best-effort, we probably already have one - // pending, and if not that's OK, if not, the channel is on - // the chain and sending the HTLC-Timeout is their problem. - continue; - }, - Ok(Some(msg)) => { fail_htlc_msgs.push(msg); }, - Ok(None) => { - // Nothing to do here...we're waiting on a remote - // revoke_and_ack before we can update the commitment - // transaction. The Channel will automatically handle - // building the update_fail_htlc and commitment_signed - // messages when we can. - // We don't need any kind of timer here as they should fail - // the channel onto the chain if they can't get our - // update_fail_htlc in time, it's not our problem. } - } - }, + }, + } } - } - if !add_htlc_msgs.is_empty() || !fail_htlc_msgs.is_empty() { - let (commitment_msg, monitor_update) = match chan.get_mut().send_commitment(&self.logger) { - Ok(res) => res, - Err(e) => { - // We surely failed send_commitment due to bad keys, in that case - // close channel and then send error message to peer. - let counterparty_node_id = chan.get().get_counterparty_node_id(); - let err: Result<(), _> = match e { - ChannelError::Ignore(_) | ChannelError::Warn(_) => { - panic!("Stated return value requirements in send_commitment() were not met"); - } - ChannelError::Close(msg) => { - log_trace!(self.logger, "Closing channel {} due to Close-required error: {}", log_bytes!(chan.key()[..]), msg); - let mut channel = remove_channel!(self, channel_state, chan); - // ChannelClosed event is generated by handle_error for us. - Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel.channel_id(), channel.get_user_id(), channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok())) - }, - ChannelError::CloseDelayBroadcast(_) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); } - }; - handle_errors.push((counterparty_node_id, err)); + if !add_htlc_msgs.is_empty() || !fail_htlc_msgs.is_empty() { + let (commitment_msg, monitor_update) = match chan.get_mut().send_commitment(&self.logger) { + Ok(res) => res, + Err(e) => { + // We surely failed send_commitment due to bad keys, in that case + // close channel and then send error message to peer. + let counterparty_node_id = chan.get().get_counterparty_node_id(); + let err: Result<(), _> = match e { + ChannelError::Ignore(_) | ChannelError::Warn(_) => { + panic!("Stated return value requirements in send_commitment() were not met"); + } + ChannelError::Close(msg) => { + log_trace!(self.logger, "Closing channel {} due to Close-required error: {}", log_bytes!(chan.key()[..]), msg); + let mut channel = remove_channel!(self, channel_state, chan); + // ChannelClosed event is generated by handle_error for us. + Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel.channel_id(), channel.get_user_id(), channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok())) + }, + ChannelError::CloseDelayBroadcast(_) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); } + }; + handle_errors.push((counterparty_node_id, err)); + continue; + } + }; + if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { + handle_errors.push((chan.get().get_counterparty_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true))); continue; } - }; - if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { - handle_errors.push((chan.get().get_counterparty_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true))); - continue; + log_debug!(self.logger, "Forwarding HTLCs resulted in a commitment update with {} HTLCs added and {} HTLCs failed for channel {}", + add_htlc_msgs.len(), fail_htlc_msgs.len(), log_bytes!(chan.get().channel_id())); + channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: chan.get().get_counterparty_node_id(), + updates: msgs::CommitmentUpdate { + update_add_htlcs: add_htlc_msgs, + update_fulfill_htlcs: Vec::new(), + update_fail_htlcs: fail_htlc_msgs, + update_fail_malformed_htlcs: Vec::new(), + update_fee: None, + commitment_signed: commitment_msg, + }, + }); } - log_debug!(self.logger, "Forwarding HTLCs resulted in a commitment update with {} HTLCs added and {} HTLCs failed for channel {}", - add_htlc_msgs.len(), fail_htlc_msgs.len(), log_bytes!(chan.get().channel_id())); - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: chan.get().get_counterparty_node_id(), - updates: msgs::CommitmentUpdate { - update_add_htlcs: add_htlc_msgs, - update_fulfill_htlcs: Vec::new(), - update_fail_htlcs: fail_htlc_msgs, - update_fail_malformed_htlcs: Vec::new(), - update_fee: None, - commitment_signed: commitment_msg, - }, - }); + } else { + unreachable!(); } - } else { + } + else { unreachable!(); } } else { @@ -3447,14 +3515,19 @@ impl ChannelMana let channel_state = &mut *channel_state_lock; let pending_msg_events = &mut channel_state.pending_msg_events; let short_to_chan_info = &mut channel_state.short_to_chan_info; - channel_state.by_id.retain(|chan_id, chan| { - let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_chan_info, pending_msg_events, chan_id, chan, new_feerate); - if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } - if err.is_err() { - handle_errors.push(err); - } - retain_channel - }); + let per_peer_state = self.per_peer_state.read().unwrap(); + for (_cp_id, peer_state_mutex) in per_peer_state.iter() { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + peer_state.channel_by_id.retain(|chan_id, chan| { + let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_chan_info, pending_msg_events, chan_id, chan, new_feerate); + if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } + if err.is_err() { + handle_errors.push(err); + } + retain_channel + }); + } } should_persist @@ -3485,49 +3558,53 @@ impl ChannelMana let channel_state = &mut *channel_state_lock; let pending_msg_events = &mut channel_state.pending_msg_events; let short_to_chan_info = &mut channel_state.short_to_chan_info; - channel_state.by_id.retain(|chan_id, chan| { - let counterparty_node_id = chan.get_counterparty_node_id(); - let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_chan_info, pending_msg_events, chan_id, chan, new_feerate); - if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } - if err.is_err() { - handle_errors.push((err, counterparty_node_id)); - } - if !retain_channel { return false; } + let per_peer_state = self.per_peer_state.read().unwrap(); + for (counterparty_node_id, peer_state_mutex) in per_peer_state.iter() { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + peer_state.channel_by_id.retain(|chan_id, chan| { + let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_chan_info, pending_msg_events, chan_id, chan, new_feerate); + if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } + if err.is_err() { + handle_errors.push((err, *counterparty_node_id)); + } + if !retain_channel { return false; } - if let Err(e) = chan.timer_check_closing_negotiation_progress() { - let (needs_close, err) = convert_chan_err!(self, e, short_to_chan_info, chan, chan_id); - handle_errors.push((Err(err), chan.get_counterparty_node_id())); - if needs_close { return false; } - } + if let Err(e) = chan.timer_check_closing_negotiation_progress() { + let (needs_close, err) = convert_chan_err!(self, e, short_to_chan_info, chan, chan_id); + handle_errors.push((Err(err), chan.get_counterparty_node_id())); + if needs_close { return false; } + } - match chan.channel_update_status() { - ChannelUpdateStatus::Enabled if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged), - ChannelUpdateStatus::Disabled if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged), - ChannelUpdateStatus::DisabledStaged if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Enabled), - ChannelUpdateStatus::EnabledStaged if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Disabled), - ChannelUpdateStatus::DisabledStaged if !chan.is_live() => { - if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { - pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { - msg: update - }); - } - should_persist = NotifyOption::DoPersist; - chan.set_channel_update_status(ChannelUpdateStatus::Disabled); - }, - ChannelUpdateStatus::EnabledStaged if chan.is_live() => { - if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { - pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { - msg: update - }); - } - should_persist = NotifyOption::DoPersist; - chan.set_channel_update_status(ChannelUpdateStatus::Enabled); - }, - _ => {}, - } + match chan.channel_update_status() { + ChannelUpdateStatus::Enabled if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged), + ChannelUpdateStatus::Disabled if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged), + ChannelUpdateStatus::DisabledStaged if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Enabled), + ChannelUpdateStatus::EnabledStaged if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Disabled), + ChannelUpdateStatus::DisabledStaged if !chan.is_live() => { + if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { + pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg: update + }); + } + should_persist = NotifyOption::DoPersist; + chan.set_channel_update_status(ChannelUpdateStatus::Disabled); + }, + ChannelUpdateStatus::EnabledStaged if chan.is_live() => { + if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { + pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg: update + }); + } + should_persist = NotifyOption::DoPersist; + chan.set_channel_update_status(ChannelUpdateStatus::Enabled); + }, + _ => {}, + } - true - }); + true + }); + } channel_state.claimable_htlcs.retain(|payment_hash, (_, htlcs)| { if htlcs.is_empty() { @@ -3645,19 +3722,27 @@ impl ChannelMana // be surfaced to the user. fn fail_holding_cell_htlcs( &self, mut htlcs_to_fail: Vec<(HTLCSource, PaymentHash)>, channel_id: [u8; 32], - _counterparty_node_id: &PublicKey + counterparty_node_id: &PublicKey ) { for (htlc_src, payment_hash) in htlcs_to_fail.drain(..) { match htlc_src { HTLCSource::PreviousHopData(HTLCPreviousHopData { .. }) => { - let (failure_code, onion_failure_data) = - match self.channel_state.lock().unwrap().by_id.entry(channel_id) { - hash_map::Entry::Occupied(chan_entry) => { - self.get_htlc_inbound_temp_fail_err_and_data(0x1000|7, &chan_entry.get()) - }, - hash_map::Entry::Vacant(_) => (0x4000|10, Vec::new()) - }; let channel_state = self.channel_state.lock().unwrap(); + let (failure_code, onion_failure_data) = { + let per_peer_state = self.per_peer_state.write().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(channel_id) { + hash_map::Entry::Occupied(chan_entry) => { + self.get_htlc_inbound_temp_fail_err_and_data(0x1000|7, &chan_entry.get()) + }, + hash_map::Entry::Vacant(_) => (0x4000|10, Vec::new()) + } + } else { + unreachable!(); + } + }; self.fail_htlc_backwards_internal(channel_state, htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data}); }, @@ -3712,7 +3797,7 @@ impl ChannelMana /// to fail and take the channel_state lock for each iteration (as we take ownership and may /// drop it). In other words, no assumptions are made that entries in claimable_htlcs point to /// still-available channels. - fn fail_htlc_backwards_internal(&self, mut channel_state_lock: MutexGuard>, source: HTLCSource, payment_hash: &PaymentHash, onion_error: HTLCFailReason) { + fn fail_htlc_backwards_internal(&self, mut channel_state_lock: MutexGuard, source: HTLCSource, payment_hash: &PaymentHash, onion_error: HTLCFailReason) { //TODO: There is a timing attack here where if a node fails an HTLC back to us they can //identify whether we sent it or not based on the (I presume) very different runtime //between the branches here. We should make this async and move it into the forward HTLCs @@ -3990,64 +4075,69 @@ impl ChannelMana } } - fn claim_funds_from_hop(&self, channel_state_lock: &mut MutexGuard>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> ClaimFundsFromHop { + fn claim_funds_from_hop(&self, channel_state_lock: &mut MutexGuard, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> ClaimFundsFromHop { //TODO: Delay the claimed_funds relaying just like we do outbound relay! let channel_state = &mut **channel_state_lock; - let chan_id = match channel_state.short_to_chan_info.get(&prev_hop.short_channel_id) { - Some((_cp_id, chan_id)) => chan_id.clone(), + let (counterparty_node_id, chan_id) = match channel_state.short_to_chan_info.get(&prev_hop.short_channel_id) { + Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()), None => { return ClaimFundsFromHop::PrevHopForceClosed } }; - if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(chan_id) { - match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) { - Ok(msgs_monitor_option) => { - if let UpdateFulfillCommitFetch::NewClaim { msgs, htlc_value_msat, monitor_update } = msgs_monitor_option { + let per_peer_state = self.per_peer_state.write().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(chan_id) { + match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) { + Ok(msgs_monitor_option) => { + if let UpdateFulfillCommitFetch::NewClaim { msgs, htlc_value_msat, monitor_update } = msgs_monitor_option { + if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { + log_given_level!(self.logger, if e == ChannelMonitorUpdateErr::PermanentFailure { Level::Error } else { Level::Debug }, + "Failed to update channel monitor with preimage {:?}: {:?}", + payment_preimage, e); + return ClaimFundsFromHop::MonitorUpdateFail( + chan.get().get_counterparty_node_id(), + handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err(), + Some(htlc_value_msat) + ); + } + if let Some((msg, commitment_signed)) = msgs { + log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}", + log_bytes!(payment_preimage.0), log_bytes!(chan.get().channel_id())); + channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: chan.get().get_counterparty_node_id(), + updates: msgs::CommitmentUpdate { + update_add_htlcs: Vec::new(), + update_fulfill_htlcs: vec![msg], + update_fail_htlcs: Vec::new(), + update_fail_malformed_htlcs: Vec::new(), + update_fee: None, + commitment_signed, + } + }); + } + return ClaimFundsFromHop::Success(htlc_value_msat); + } else { + return ClaimFundsFromHop::DuplicateClaim; + } + }, + Err((e, monitor_update)) => { if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { - log_given_level!(self.logger, if e == ChannelMonitorUpdateErr::PermanentFailure { Level::Error } else { Level::Debug }, - "Failed to update channel monitor with preimage {:?}: {:?}", + log_given_level!(self.logger, if e == ChannelMonitorUpdateErr::PermanentFailure { Level::Error } else { Level::Info }, + "Failed to update channel monitor with preimage {:?} immediately prior to force-close: {:?}", payment_preimage, e); - return ClaimFundsFromHop::MonitorUpdateFail( - chan.get().get_counterparty_node_id(), - handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err(), - Some(htlc_value_msat) - ); } - if let Some((msg, commitment_signed)) = msgs { - log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}", - log_bytes!(payment_preimage.0), log_bytes!(chan.get().channel_id())); - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: chan.get().get_counterparty_node_id(), - updates: msgs::CommitmentUpdate { - update_add_htlcs: Vec::new(), - update_fulfill_htlcs: vec![msg], - update_fail_htlcs: Vec::new(), - update_fail_malformed_htlcs: Vec::new(), - update_fee: None, - commitment_signed, - } - }); + let counterparty_node_id = chan.get().get_counterparty_node_id(); + let (drop, res) = convert_chan_err!(self, e, channel_state.short_to_chan_info, chan.get_mut(), &chan_id); + if drop { + chan.remove_entry(); } - return ClaimFundsFromHop::Success(htlc_value_msat); - } else { - return ClaimFundsFromHop::DuplicateClaim; - } - }, - Err((e, monitor_update)) => { - if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { - log_given_level!(self.logger, if e == ChannelMonitorUpdateErr::PermanentFailure { Level::Error } else { Level::Info }, - "Failed to update channel monitor with preimage {:?} immediately prior to force-close: {:?}", - payment_preimage, e); - } - let counterparty_node_id = chan.get().get_counterparty_node_id(); - let (drop, res) = convert_chan_err!(self, e, channel_state.short_to_chan_info, chan.get_mut(), &chan_id); - if drop { - chan.remove_entry(); - } - return ClaimFundsFromHop::MonitorUpdateFail(counterparty_node_id, res, None); - }, - } + return ClaimFundsFromHop::MonitorUpdateFail(counterparty_node_id, res, None); + }, + } + } else { unreachable!(); } } else { unreachable!(); } } @@ -4077,7 +4167,7 @@ impl ChannelMana } } - fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard>, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option, from_onchain: bool, next_channel_id: [u8; 32]) { + fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option, from_onchain: bool, next_channel_id: [u8; 32]) { match source { HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => { mem::drop(channel_state_lock); @@ -4195,9 +4285,24 @@ impl ChannelMana let (mut pending_failures, finalized_claims) = { let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_lock; - let mut channel = match channel_state.by_id.entry(funding_txo.to_channel_id()) { - hash_map::Entry::Occupied(chan) => chan, - hash_map::Entry::Vacant(_) => return, + let counterparty_node_id = { + let id_to_peer = self.id_to_peer.lock().unwrap(); + id_to_peer.get(&funding_txo.to_channel_id()).cloned() + }; + if counterparty_node_id.is_none() { + return + } + let per_peer_state = self.per_peer_state.write().unwrap(); + let mut peer_state_lock; + let mut channel = { + if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id.unwrap()) { + peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(funding_txo.to_channel_id()){ + hash_map::Entry::Occupied(chan) => chan, + hash_map::Entry::Vacant(_) => return, + } + } else { return } }; if !channel.get().is_awaiting_monitor_update() || channel.get().get_latest_monitor_update_id() != highest_applied_update_id { return; @@ -4277,36 +4382,43 @@ impl ChannelMana let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; - match channel_state.by_id.entry(temporary_channel_id.clone()) { - hash_map::Entry::Occupied(mut channel) => { - if !channel.get().inbound_is_awaiting_accept() { - return Err(APIError::APIMisuseError { err: "The channel isn't currently awaiting to be accepted.".to_owned() }); - } - if *counterparty_node_id != channel.get().get_counterparty_node_id() { - return Err(APIError::APIMisuseError { err: "The passed counterparty_node_id doesn't match the channel's counterparty node_id".to_owned() }); - } - if accept_0conf { - channel.get_mut().set_0conf(); - } else if channel.get().get_channel_type().requires_zero_conf() { - let send_msg_err_event = events::MessageSendEvent::HandleError { + let per_peer_state = self.per_peer_state.write().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(temporary_channel_id.clone()) { + hash_map::Entry::Occupied(mut channel) => { + if !channel.get().inbound_is_awaiting_accept() { + return Err(APIError::APIMisuseError { err: "The channel isn't currently awaiting to be accepted.".to_owned() }); + } + if *counterparty_node_id != channel.get().get_counterparty_node_id() { + return Err(APIError::APIMisuseError { err: "The passed counterparty_node_id doesn't match the channel's counterparty node_id".to_owned() }); + } + if accept_0conf { + channel.get_mut().set_0conf(); + } else if channel.get().get_channel_type().requires_zero_conf() { + let send_msg_err_event = events::MessageSendEvent::HandleError { + node_id: channel.get().get_counterparty_node_id(), + action: msgs::ErrorAction::SendErrorMessage{ + msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "No zero confirmation channels accepted".to_owned(), } + } + }; + channel_state.pending_msg_events.push(send_msg_err_event); + let _ = remove_channel!(self, channel_state, channel); + return Err(APIError::APIMisuseError { err: "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned() }); + } + + channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { node_id: channel.get().get_counterparty_node_id(), - action: msgs::ErrorAction::SendErrorMessage{ - msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "No zero confirmation channels accepted".to_owned(), } - } - }; - channel_state.pending_msg_events.push(send_msg_err_event); - let _ = remove_channel!(self, channel_state, channel); - return Err(APIError::APIMisuseError { err: "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned() }); + msg: channel.get_mut().accept_inbound_channel(user_channel_id), + }); + } + hash_map::Entry::Vacant(_) => { + return Err(APIError::ChannelUnavailable { err: "Can't accept a channel that doesn't exist".to_owned() }); } - - channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { - node_id: channel.get().get_counterparty_node_id(), - msg: channel.get_mut().accept_inbound_channel(user_channel_id), - }); - } - hash_map::Entry::Vacant(_) => { - return Err(APIError::ChannelUnavailable { err: "Can't accept a channel that doesn't exist".to_owned() }); } + } else { + return Err(APIError::APIMisuseError { err: format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id) }); } Ok(()) } @@ -4333,35 +4445,42 @@ impl ChannelMana }; let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; - match channel_state.by_id.entry(channel.channel_id()) { - hash_map::Entry::Occupied(_) => { - self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias); - return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision!".to_owned(), msg.temporary_channel_id.clone())) - }, - hash_map::Entry::Vacant(entry) => { - if !self.default_configuration.manually_accept_inbound_channels { - if channel.get_channel_type().requires_zero_conf() { - return Err(MsgHandleErrInternal::send_err_msg_no_close("No zero confirmation channels accepted".to_owned(), msg.temporary_channel_id.clone())); - } - channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { - node_id: counterparty_node_id.clone(), - msg: channel.accept_inbound_channel(0), - }); - } else { - let mut pending_events = self.pending_events.lock().unwrap(); - pending_events.push( - events::Event::OpenChannelRequest { - temporary_channel_id: msg.temporary_channel_id.clone(), - counterparty_node_id: counterparty_node_id.clone(), - funding_satoshis: msg.funding_satoshis, - push_msat: msg.push_msat, - channel_type: channel.get_channel_type().clone(), + let per_peer_state = self.per_peer_state.write().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(channel.channel_id()) { + hash_map::Entry::Occupied(_) => { + self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias); + return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision for the same peer!".to_owned(), msg.temporary_channel_id.clone())) + }, + hash_map::Entry::Vacant(entry) => { + if !self.default_configuration.manually_accept_inbound_channels { + if channel.get_channel_type().requires_zero_conf() { + return Err(MsgHandleErrInternal::send_err_msg_no_close("No zero confirmation channels accepted".to_owned(), msg.temporary_channel_id.clone())); } - ); - } - - entry.insert(channel); - } + channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { + node_id: counterparty_node_id.clone(), + msg: channel.accept_inbound_channel(0), + }); + } else { + let mut pending_events = self.pending_events.lock().unwrap(); + pending_events.push( + events::Event::OpenChannelRequest { + temporary_channel_id: msg.temporary_channel_id.clone(), + counterparty_node_id: counterparty_node_id.clone(), + funding_satoshis: msg.funding_satoshis, + push_msat: msg.push_msat, + channel_type: channel.get_channel_type().clone(), + } + ); + } + + entry.insert(channel); + } + } + } else { + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id.clone())) } Ok(()) } @@ -4370,15 +4489,22 @@ impl ChannelMana let (value, output_script, user_id) = { let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_lock; - match channel_state.by_id.entry(msg.temporary_channel_id) { - hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.temporary_channel_id)); - } - try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &their_features), channel_state, chan); - (chan.get().get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().get_user_id()) - }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.temporary_channel_id)) + let per_peer_state = self.per_peer_state.write().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(msg.temporary_channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_counterparty_node_id() != *counterparty_node_id { + return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.temporary_channel_id)); + } + try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &their_features), channel_state, chan); + (chan.get().get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().get_user_id()) + }, + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.temporary_channel_id)) + } + } else { + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id)) } }; let mut pending_events = self.pending_events.lock().unwrap(); @@ -4397,18 +4523,25 @@ impl ChannelMana let best_block = *self.best_block.read().unwrap(); let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_lock; - match channel_state.by_id.entry(msg.temporary_channel_id.clone()) { - hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.temporary_channel_id)); - } - (try_chan_entry!(self, chan.get_mut().funding_created(msg, best_block, &self.logger), channel_state, chan), chan.remove()) - }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.temporary_channel_id)) + let per_peer_state = self.per_peer_state.write().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(msg.temporary_channel_id.clone()) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_counterparty_node_id() != *counterparty_node_id { + return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.temporary_channel_id)); + } + (try_chan_entry!(self, chan.get_mut().funding_created(msg, best_block, &self.logger), channel_state, chan), chan.remove()) + }, + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.temporary_channel_id)) + } + } else { + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id)) } }; // Because we have exclusive ownership of the channel here we can release the channel_state - // lock before watch_channel + // and peer_state lock before watch_channel if let Err(e) = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor) { match e { ChannelMonitorUpdateErr::PermanentFailure => { @@ -4435,26 +4568,31 @@ impl ChannelMana } let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; + let peer_state_lock = self.per_peer_state.write().unwrap(); let channel_id = funding_msg.channel_id; - match channel_state.by_id.entry(channel_id) { - hash_map::Entry::Occupied(_) => { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id".to_owned(), channel_id)) - }, - hash_map::Entry::Vacant(e) => { - channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned { - node_id: counterparty_node_id.clone(), - msg: funding_msg, - }); - if let Some(msg) = channel_ready { - send_channel_ready!(channel_state.short_to_chan_info, channel_state.pending_msg_events, chan, msg); - } - e.insert(chan); - let mut id_to_peer = self.id_to_peer.lock().unwrap(); - if id_to_peer.insert(channel_id, *counterparty_node_id).is_some() { - panic!("id_to_peer map already contained funding txid, which shouldn't be possible"); + if let Some(peer_state_mutex) = peer_state_lock.get(counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(channel_id) { + hash_map::Entry::Occupied(_) => { + return Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id".to_owned(), channel_id)) + }, + hash_map::Entry::Vacant(e) => { + channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned { + node_id: counterparty_node_id.clone(), + msg: funding_msg, + }); + if let Some(msg) = channel_ready { + send_channel_ready!(channel_state.short_to_chan_info, channel_state.pending_msg_events, chan, msg); + } + e.insert(chan); + let mut id_to_peer = self.id_to_peer.lock().unwrap(); + if id_to_peer.insert(channel_id, *counterparty_node_id).is_some() { + panic!("id_to_peer map already contained funding txid, which shouldn't be possible"); + } } } - } + } else { unreachable!() } Ok(()) } @@ -4463,33 +4601,40 @@ impl ChannelMana let best_block = *self.best_block.read().unwrap(); let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_lock; - match channel_state.by_id.entry(msg.channel_id) { - hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } - let (monitor, funding_tx, channel_ready) = match chan.get_mut().funding_signed(&msg, best_block, &self.logger) { - Ok(update) => update, - Err(e) => try_chan_entry!(self, Err(e), channel_state, chan), - }; - if let Err(e) = self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) { - let mut res = handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, channel_ready.is_some(), OPTIONALLY_RESEND_FUNDING_LOCKED); - if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res { - // We weren't able to watch the channel to begin with, so no updates should be made on - // it. Previously, full_stack_target found an (unreachable) panic when the - // monitor update contained within `shutdown_finish` was applied. - if let Some((ref mut shutdown_finish, _)) = shutdown_finish { - shutdown_finish.0.take(); + let per_peer_state = self.per_peer_state.write().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_counterparty_node_id() != *counterparty_node_id { + return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); + } + let (monitor, funding_tx, channel_ready) = match chan.get_mut().funding_signed(&msg, best_block, &self.logger) { + Ok(update) => update, + Err(e) => try_chan_entry!(self, Err(e), channel_state, chan), + }; + if let Err(e) = self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) { + let mut res = handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, channel_ready.is_some(), OPTIONALLY_RESEND_FUNDING_LOCKED); + if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res { + // We weren't able to watch the channel to begin with, so no updates should be made on + // it. Previously, full_stack_target found an (unreachable) panic when the + // monitor update contained within `shutdown_finish` was applied. + if let Some((ref mut shutdown_finish, _)) = shutdown_finish { + shutdown_finish.0.take(); + } } + return res } - return res - } - if let Some(msg) = channel_ready { - send_channel_ready!(channel_state.short_to_chan_info, channel_state.pending_msg_events, chan.get(), msg); - } - funding_tx - }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + if let Some(msg) = channel_ready { + send_channel_ready!(channel_state.short_to_chan_info, channel_state.pending_msg_events, chan.get(), msg); + } + funding_tx + }, + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + } + } else { + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } }; log_info!(self.logger, "Broadcasting funding transaction with txid {}", funding_tx.txid()); @@ -4500,36 +4645,43 @@ impl ChannelMana fn internal_channel_ready(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReady) -> Result<(), MsgHandleErrInternal> { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; - match channel_state.by_id.entry(msg.channel_id) { - hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } - let announcement_sigs_opt = try_chan_entry!(self, chan.get_mut().channel_ready(&msg, self.get_our_node_id(), - self.genesis_hash.clone(), &self.best_block.read().unwrap(), &self.logger), channel_state, chan); - if let Some(announcement_sigs) = announcement_sigs_opt { - log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(chan.get().channel_id())); - channel_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { - node_id: counterparty_node_id.clone(), - msg: announcement_sigs, - }); - } else if chan.get().is_usable() { - // If we're sending an announcement_signatures, we'll send the (public) - // channel_update after sending a channel_announcement when we receive our - // counterparty's announcement_signatures. Thus, we only bother to send a - // channel_update here if the channel is not public, i.e. we're not sending an - // announcement_signatures. - log_trace!(self.logger, "Sending private initial channel_update for our counterparty on channel {}", log_bytes!(chan.get().channel_id())); - if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) { - channel_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { + let per_peer_state = self.per_peer_state.write().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_counterparty_node_id() != *counterparty_node_id { + return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); + } + let announcement_sigs_opt = try_chan_entry!(self, chan.get_mut().channel_ready(&msg, self.get_our_node_id(), + self.genesis_hash.clone(), &self.best_block.read().unwrap(), &self.logger), channel_state, chan); + if let Some(announcement_sigs) = announcement_sigs_opt { + log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(chan.get().channel_id())); + channel_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { node_id: counterparty_node_id.clone(), - msg, + msg: announcement_sigs, }); + } else if chan.get().is_usable() { + // If we're sending an announcement_signatures, we'll send the (public) + // channel_update after sending a channel_announcement when we receive our + // counterparty's announcement_signatures. Thus, we only bother to send a + // channel_update here if the channel is not public, i.e. we're not sending an + // announcement_signatures. + log_trace!(self.logger, "Sending private initial channel_update for our counterparty on channel {}", log_bytes!(chan.get().channel_id())); + if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) { + channel_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { + node_id: counterparty_node_id.clone(), + msg, + }); + } } - } - Ok(()) - }, - hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + Ok(()) + }, + hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + } + } else { + Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } } @@ -4539,43 +4691,50 @@ impl ChannelMana let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; - match channel_state.by_id.entry(msg.channel_id.clone()) { - hash_map::Entry::Occupied(mut chan_entry) => { - if chan_entry.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } - - if !chan_entry.get().received_shutdown() { - log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.", - log_bytes!(msg.channel_id), - if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" }); - } + let per_peer_state = self.per_peer_state.write().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(msg.channel_id.clone()) { + hash_map::Entry::Occupied(mut chan_entry) => { + if chan_entry.get().get_counterparty_node_id() != *counterparty_node_id { + return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); + } - let (shutdown, monitor_update, htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.keys_manager, &their_features, &msg), channel_state, chan_entry); - dropped_htlcs = htlcs; + if !chan_entry.get().received_shutdown() { + log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.", + log_bytes!(msg.channel_id), + if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" }); + } - // Update the monitor with the shutdown script if necessary. - if let Some(monitor_update) = monitor_update { - if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) { - let (result, is_permanent) = - handle_monitor_err!(self, e, channel_state.short_to_chan_info, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE); - if is_permanent { - remove_channel!(self, channel_state, chan_entry); - break result; + let (shutdown, monitor_update, htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.keys_manager, &their_features, &msg), channel_state, chan_entry); + dropped_htlcs = htlcs; + + // Update the monitor with the shutdown script if necessary. + if let Some(monitor_update) = monitor_update { + if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) { + let (result, is_permanent) = + handle_monitor_err!(self, e, channel_state.short_to_chan_info, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE); + if is_permanent { + remove_channel!(self, channel_state, chan_entry); + break result; + } } } - } - if let Some(msg) = shutdown { - channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { - node_id: *counterparty_node_id, - msg, - }); - } + if let Some(msg) = shutdown { + channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { + node_id: *counterparty_node_id, + msg, + }); + } - break Ok(()); - }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + break Ok(()); + }, + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + } + } else { + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } }; for htlc_source in dropped_htlcs.drain(..) { @@ -4590,28 +4749,35 @@ impl ChannelMana let (tx, chan_option) = { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; - match channel_state.by_id.entry(msg.channel_id.clone()) { - hash_map::Entry::Occupied(mut chan_entry) => { - if chan_entry.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } - let (closing_signed, tx) = try_chan_entry!(self, chan_entry.get_mut().closing_signed(&self.fee_estimator, &msg), channel_state, chan_entry); - if let Some(msg) = closing_signed { - channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { - node_id: counterparty_node_id.clone(), - msg, - }); - } - if tx.is_some() { - // We're done with this channel, we've got a signed closing transaction and - // will send the closing_signed back to the remote peer upon return. This - // also implies there are no pending HTLCs left on the channel, so we can - // fully delete it from tracking (the channel monitor is still around to - // watch for old state broadcasts)! - (tx, Some(remove_channel!(self, channel_state, chan_entry))) - } else { (tx, None) } - }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + let per_peer_state = self.per_peer_state.write().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(msg.channel_id.clone()) { + hash_map::Entry::Occupied(mut chan_entry) => { + if chan_entry.get().get_counterparty_node_id() != *counterparty_node_id { + return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); + } + let (closing_signed, tx) = try_chan_entry!(self, chan_entry.get_mut().closing_signed(&self.fee_estimator, &msg), channel_state, chan_entry); + if let Some(msg) = closing_signed { + channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { + node_id: counterparty_node_id.clone(), + msg, + }); + } + if tx.is_some() { + // We're done with this channel, we've got a signed closing transaction and + // will send the closing_signed back to the remote peer upon return. This + // also implies there are no pending HTLCs left on the channel, so we can + // fully delete it from tracking (the channel monitor is still around to + // watch for old state broadcasts)! + (tx, Some(remove_channel!(self, channel_state, chan_entry))) + } else { (tx, None) } + }, + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + } + } else { + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } }; if let Some(broadcast_tx) = tx { @@ -4643,37 +4809,44 @@ impl ChannelMana let (pending_forward_info, mut channel_state_lock) = self.decode_update_add_htlc_onion(msg); let channel_state = &mut *channel_state_lock; - match channel_state.by_id.entry(msg.channel_id) { - hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } - - let create_pending_htlc_status = |chan: &Channel, pending_forward_info: PendingHTLCStatus, error_code: u16| { - // If the update_add is completely bogus, the call will Err and we will close, - // but if we've sent a shutdown and they haven't acknowledged it yet, we just - // want to reject the new HTLC and fail it backwards instead of forwarding. - match pending_forward_info { - PendingHTLCStatus::Forward(PendingHTLCInfo { ref incoming_shared_secret, .. }) => { - let reason = if (error_code & 0x1000) != 0 { - let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan); - onion_utils::build_first_hop_failure_packet(incoming_shared_secret, real_code, &error_data) - } else { - onion_utils::build_first_hop_failure_packet(incoming_shared_secret, error_code, &[]) - }; - let msg = msgs::UpdateFailHTLC { - channel_id: msg.channel_id, - htlc_id: msg.htlc_id, - reason - }; - PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msg)) - }, - _ => pending_forward_info + let per_peer_state = self.per_peer_state.write().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_counterparty_node_id() != *counterparty_node_id { + return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); } - }; - try_chan_entry!(self, chan.get_mut().update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.logger), channel_state, chan); - }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + + let create_pending_htlc_status = |chan: &Channel, pending_forward_info: PendingHTLCStatus, error_code: u16| { + // If the update_add is completely bogus, the call will Err and we will close, + // but if we've sent a shutdown and they haven't acknowledged it yet, we just + // want to reject the new HTLC and fail it backwards instead of forwarding. + match pending_forward_info { + PendingHTLCStatus::Forward(PendingHTLCInfo { ref incoming_shared_secret, .. }) => { + let reason = if (error_code & 0x1000) != 0 { + let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan); + onion_utils::build_first_hop_failure_packet(incoming_shared_secret, real_code, &error_data) + } else { + onion_utils::build_first_hop_failure_packet(incoming_shared_secret, error_code, &[]) + }; + let msg = msgs::UpdateFailHTLC { + channel_id: msg.channel_id, + htlc_id: msg.htlc_id, + reason + }; + PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msg)) + }, + _ => pending_forward_info + } + }; + try_chan_entry!(self, chan.get_mut().update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.logger), channel_state, chan); + }, + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + } + } else { + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } Ok(()) } @@ -4682,14 +4855,21 @@ impl ChannelMana let mut channel_lock = self.channel_state.lock().unwrap(); let (htlc_source, forwarded_htlc_value) = { let channel_state = &mut *channel_lock; - match channel_state.by_id.entry(msg.channel_id) { - hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } - try_chan_entry!(self, chan.get_mut().update_fulfill_htlc(&msg), channel_state, chan) - }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + let per_peer_state = self.per_peer_state.write().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_counterparty_node_id() != *counterparty_node_id { + return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); + } + try_chan_entry!(self, chan.get_mut().update_fulfill_htlc(&msg), channel_state, chan) + }, + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + } + } else { + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } }; self.claim_funds_internal(channel_lock, htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, msg.channel_id); @@ -4699,14 +4879,21 @@ impl ChannelMana fn internal_update_fail_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), MsgHandleErrInternal> { let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_lock; - match channel_state.by_id.entry(msg.channel_id) { - hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } - try_chan_entry!(self, chan.get_mut().update_fail_htlc(&msg, HTLCFailReason::LightningError { err: msg.reason.clone() }), channel_state, chan); - }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + let per_peer_state = self.per_peer_state.write().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_counterparty_node_id() != *counterparty_node_id { + return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); + } + try_chan_entry!(self, chan.get_mut().update_fail_htlc(&msg, HTLCFailReason::LightningError { err: msg.reason.clone() }), channel_state, chan); + }, + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + } + } else { + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)); } Ok(()) } @@ -4714,64 +4901,78 @@ impl ChannelMana fn internal_update_fail_malformed_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), MsgHandleErrInternal> { let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_lock; - match channel_state.by_id.entry(msg.channel_id) { - hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } - if (msg.failure_code & 0x8000) == 0 { - let chan_err: ChannelError = ChannelError::Close("Got update_fail_malformed_htlc with BADONION not set".to_owned()); - try_chan_entry!(self, Err(chan_err), channel_state, chan); - } - try_chan_entry!(self, chan.get_mut().update_fail_malformed_htlc(&msg, HTLCFailReason::Reason { failure_code: msg.failure_code, data: Vec::new() }), channel_state, chan); - Ok(()) - }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + let per_peer_state = self.per_peer_state.write().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_counterparty_node_id() != *counterparty_node_id { + return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); + } + if (msg.failure_code & 0x8000) == 0 { + let chan_err: ChannelError = ChannelError::Close("Got update_fail_malformed_htlc with BADONION not set".to_owned()); + try_chan_entry!(self, Err(chan_err), channel_state, chan); + } + try_chan_entry!(self, chan.get_mut().update_fail_malformed_htlc(&msg, HTLCFailReason::Reason { failure_code: msg.failure_code, data: Vec::new() }), channel_state, chan); + Ok(()) + }, + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + } + } else { + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } } fn internal_commitment_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(), MsgHandleErrInternal> { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; - match channel_state.by_id.entry(msg.channel_id) { - hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } - let (revoke_and_ack, commitment_signed, monitor_update) = - match chan.get_mut().commitment_signed(&msg, &self.logger) { - Err((None, e)) => try_chan_entry!(self, Err(e), channel_state, chan), - Err((Some(update), e)) => { - assert!(chan.get().is_awaiting_monitor_update()); - let _ = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), update); - try_chan_entry!(self, Err(e), channel_state, chan); - unreachable!(); - }, - Ok(res) => res - }; - if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { - return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some()); - } - channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK { - node_id: counterparty_node_id.clone(), - msg: revoke_and_ack, - }); - if let Some(msg) = commitment_signed { - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + let per_peer_state = self.per_peer_state.write().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_counterparty_node_id() != *counterparty_node_id { + return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); + } + let (revoke_and_ack, commitment_signed, monitor_update) = + match chan.get_mut().commitment_signed(&msg, &self.logger) { + Err((None, e)) => try_chan_entry!(self, Err(e), channel_state, chan), + Err((Some(update), e)) => { + assert!(chan.get().is_awaiting_monitor_update()); + let _ = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), update); + try_chan_entry!(self, Err(e), channel_state, chan); + unreachable!(); + }, + Ok(res) => res + }; + if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { + return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some()); + } + channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK { node_id: counterparty_node_id.clone(), - updates: msgs::CommitmentUpdate { - update_add_htlcs: Vec::new(), - update_fulfill_htlcs: Vec::new(), - update_fail_htlcs: Vec::new(), - update_fail_malformed_htlcs: Vec::new(), - update_fee: None, - commitment_signed: msg, - }, + msg: revoke_and_ack, }); - } - Ok(()) - }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + if let Some(msg) = commitment_signed { + channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: counterparty_node_id.clone(), + updates: msgs::CommitmentUpdate { + update_add_htlcs: Vec::new(), + update_fulfill_htlcs: Vec::new(), + update_fail_htlcs: Vec::new(), + update_fail_malformed_htlcs: Vec::new(), + update_fee: None, + commitment_signed: msg, + }, + }); + } + Ok(()) + }, + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + } + } else { + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } } @@ -4818,45 +5019,52 @@ impl ChannelMana let res = loop { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; - match channel_state.by_id.entry(msg.channel_id) { - hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - break Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } - let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update(); - let raa_updates = break_chan_entry!(self, - chan.get_mut().revoke_and_ack(&msg, &self.logger), channel_state, chan); - htlcs_to_fail = raa_updates.holding_cell_failed_htlcs; - if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), raa_updates.monitor_update) { - if was_frozen_for_monitor { - assert!(raa_updates.commitment_update.is_none()); - assert!(raa_updates.accepted_htlcs.is_empty()); - assert!(raa_updates.failed_htlcs.is_empty()); - assert!(raa_updates.finalized_claimed_htlcs.is_empty()); - break Err(MsgHandleErrInternal::ignore_no_close("Previous monitor update failure prevented responses to RAA".to_owned())); - } else { - if let Err(e) = handle_monitor_err!(self, e, channel_state, chan, - RAACommitmentOrder::CommitmentFirst, false, - raa_updates.commitment_update.is_some(), false, - raa_updates.accepted_htlcs, raa_updates.failed_htlcs, - raa_updates.finalized_claimed_htlcs) { - break Err(e); - } else { unreachable!(); } + let per_peer_state = self.per_peer_state.write().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_counterparty_node_id() != *counterparty_node_id { + break Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); } - } - if let Some(updates) = raa_updates.commitment_update { - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: counterparty_node_id.clone(), - updates, - }); - } - break Ok((raa_updates.accepted_htlcs, raa_updates.failed_htlcs, - raa_updates.finalized_claimed_htlcs, - chan.get().get_short_channel_id() - .unwrap_or(chan.get().outbound_scid_alias()), - chan.get().get_funding_txo().unwrap())) - }, - hash_map::Entry::Vacant(_) => break Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update(); + let raa_updates = break_chan_entry!(self, + chan.get_mut().revoke_and_ack(&msg, &self.logger), channel_state, chan); + htlcs_to_fail = raa_updates.holding_cell_failed_htlcs; + if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), raa_updates.monitor_update) { + if was_frozen_for_monitor { + assert!(raa_updates.commitment_update.is_none()); + assert!(raa_updates.accepted_htlcs.is_empty()); + assert!(raa_updates.failed_htlcs.is_empty()); + assert!(raa_updates.finalized_claimed_htlcs.is_empty()); + break Err(MsgHandleErrInternal::ignore_no_close("Previous monitor update failure prevented responses to RAA".to_owned())); + } else { + if let Err(e) = handle_monitor_err!(self, e, channel_state, chan, + RAACommitmentOrder::CommitmentFirst, false, + raa_updates.commitment_update.is_some(), false, + raa_updates.accepted_htlcs, raa_updates.failed_htlcs, + raa_updates.finalized_claimed_htlcs) { + break Err(e); + } else { unreachable!(); } + } + } + if let Some(updates) = raa_updates.commitment_update { + channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: counterparty_node_id.clone(), + updates, + }); + } + break Ok((raa_updates.accepted_htlcs, raa_updates.failed_htlcs, + raa_updates.finalized_claimed_htlcs, + chan.get().get_short_channel_id() + .unwrap_or(chan.get().outbound_scid_alias()), + chan.get().get_funding_txo().unwrap())) + }, + hash_map::Entry::Vacant(_) => break Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + } + } else { + break Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } }; self.fail_holding_cell_htlcs(htlcs_to_fail, msg.channel_id, counterparty_node_id); @@ -4878,14 +5086,21 @@ impl ChannelMana fn internal_update_fee(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), MsgHandleErrInternal> { let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_lock; - match channel_state.by_id.entry(msg.channel_id) { - hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } - try_chan_entry!(self, chan.get_mut().update_fee(&self.fee_estimator, &msg), channel_state, chan); - }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + let per_peer_state = self.per_peer_state.write().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_counterparty_node_id() != *counterparty_node_id { + return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); + } + try_chan_entry!(self, chan.get_mut().update_fee(&self.fee_estimator, &msg), channel_state, chan); + }, + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + } + } else { + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)); } Ok(()) } @@ -4894,24 +5109,31 @@ impl ChannelMana let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; - match channel_state.by_id.entry(msg.channel_id) { - hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } - if !chan.get().is_usable() { - return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it".to_owned(), action: msgs::ErrorAction::IgnoreError})); - } + let per_peer_state = self.per_peer_state.write().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_counterparty_node_id() != *counterparty_node_id { + return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); + } + if !chan.get().is_usable() { + return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it".to_owned(), action: msgs::ErrorAction::IgnoreError})); + } - channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement { - msg: try_chan_entry!(self, chan.get_mut().announcement_signatures( - self.get_our_node_id(), self.genesis_hash.clone(), self.best_block.read().unwrap().height(), msg), channel_state, chan), - // Note that announcement_signatures fails if the channel cannot be announced, - // so get_channel_update_for_broadcast will never fail by the time we get here. - update_msg: self.get_channel_update_for_broadcast(chan.get()).unwrap(), - }); - }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement { + msg: try_chan_entry!(self, chan.get_mut().announcement_signatures( + self.get_our_node_id(), self.genesis_hash.clone(), self.best_block.read().unwrap().height(), msg), channel_state, chan), + // Note that announcement_signatures fails if the channel cannot be announced, + // so get_channel_update_for_broadcast will never fail by the time we get here. + update_msg: self.get_channel_update_for_broadcast(chan.get()).unwrap(), + }); + }, + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + } + } else { + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)); } Ok(()) } @@ -4920,33 +5142,38 @@ impl ChannelMana fn internal_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) -> Result { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; - let chan_id = match channel_state.short_to_chan_info.get(&msg.contents.short_channel_id) { - Some((_cp_id, chan_id)) => chan_id.clone(), + let (chan_counterparty_node_id, chan_id) = match channel_state.short_to_chan_info.get(&msg.contents.short_channel_id) { + Some((cp_id, chan_id)) => (cp_id, chan_id.clone()), None => { // It's not a local channel return Ok(NotifyOption::SkipPersist) } }; - match channel_state.by_id.entry(chan_id) { - hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - if chan.get().should_announce() { - // If the announcement is about a channel of ours which is public, some - // other peer may simply be forwarding all its gossip to us. Don't provide - // a scary-looking error message and return Ok instead. + let per_peer_state = self.per_peer_state.write().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(chan_counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(chan_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_counterparty_node_id() != *counterparty_node_id { + if chan.get().should_announce() { + // If the announcement is about a channel of ours which is public, some + // other peer may simply be forwarding all its gossip to us. Don't provide + // a scary-looking error message and return Ok instead. + return Ok(NotifyOption::SkipPersist); + } + return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id)); + } + let were_node_one = self.get_our_node_id().serialize()[..] < chan.get().get_counterparty_node_id().serialize()[..]; + let msg_from_node_one = msg.contents.flags & 1 == 0; + if were_node_one == msg_from_node_one { return Ok(NotifyOption::SkipPersist); + } else { + try_chan_entry!(self, chan.get_mut().channel_update(&msg), channel_state, chan); } - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id)); - } - let were_node_one = self.get_our_node_id().serialize()[..] < chan.get().get_counterparty_node_id().serialize()[..]; - let msg_from_node_one = msg.contents.flags & 1 == 0; - if were_node_one == msg_from_node_one { - return Ok(NotifyOption::SkipPersist); - } else { - try_chan_entry!(self, chan.get_mut().channel_update(&msg), channel_state, chan); - } - }, - hash_map::Entry::Vacant(_) => unreachable!() + }, + hash_map::Entry::Vacant(_) => unreachable!() + } } Ok(NotifyOption::DoPersist) } @@ -4957,45 +5184,52 @@ impl ChannelMana let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; - match channel_state.by_id.entry(msg.channel_id) { - hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } - // Currently, we expect all holding cell update_adds to be dropped on peer - // disconnect, so Channel's reestablish will never hand us any holding cell - // freed HTLCs to fail backwards. If in the future we no longer drop pending - // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here. - let responses = try_chan_entry!(self, chan.get_mut().channel_reestablish( - msg, &self.logger, self.our_network_pubkey.clone(), self.genesis_hash, - &*self.best_block.read().unwrap()), channel_state, chan); - let mut channel_update = None; - if let Some(msg) = responses.shutdown_msg { - channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { - node_id: counterparty_node_id.clone(), - msg, - }); - } else if chan.get().is_usable() { - // If the channel is in a usable state (ie the channel is not being shut - // down), send a unicast channel_update to our counterparty to make sure - // they have the latest channel parameters. - if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) { - channel_update = Some(events::MessageSendEvent::SendChannelUpdate { - node_id: chan.get().get_counterparty_node_id(), + let per_peer_state = self.per_peer_state.write().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_counterparty_node_id() != *counterparty_node_id { + return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); + } + // Currently, we expect all holding cell update_adds to be dropped on peer + // disconnect, so Channel's reestablish will never hand us any holding cell + // freed HTLCs to fail backwards. If in the future we no longer drop pending + // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here. + let responses = try_chan_entry!(self, chan.get_mut().channel_reestablish( + msg, &self.logger, self.our_network_pubkey.clone(), self.genesis_hash, + &*self.best_block.read().unwrap()), channel_state, chan); + let mut channel_update = None; + if let Some(msg) = responses.shutdown_msg { + channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { + node_id: counterparty_node_id.clone(), msg, }); + } else if chan.get().is_usable() { + // If the channel is in a usable state (ie the channel is not being shut + // down), send a unicast channel_update to our counterparty to make sure + // they have the latest channel parameters. + if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) { + channel_update = Some(events::MessageSendEvent::SendChannelUpdate { + node_id: chan.get().get_counterparty_node_id(), + msg, + }); + } } - } - let need_lnd_workaround = chan.get_mut().workaround_lnd_bug_4006.take(); - chan_restoration_res = handle_chan_restoration_locked!( - self, channel_state_lock, channel_state, chan, responses.raa, responses.commitment_update, responses.order, - responses.mon_update, Vec::new(), None, responses.channel_ready, responses.announcement_sigs); - if let Some(upd) = channel_update { - channel_state.pending_msg_events.push(upd); - } - (responses.holding_cell_failed_htlcs, need_lnd_workaround) - }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + let need_lnd_workaround = chan.get_mut().workaround_lnd_bug_4006.take(); + chan_restoration_res = handle_chan_restoration_locked!( + self, channel_state_lock, channel_state, chan, responses.raa, responses.commitment_update, responses.order, + responses.mon_update, Vec::new(), None, responses.channel_ready, responses.announcement_sigs); + if let Some(upd) = channel_update { + channel_state.pending_msg_events.push(upd); + } + (responses.holding_cell_failed_htlcs, need_lnd_workaround) + }, + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + } + } else { + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)); } }; post_handle_chan_restoration!(self, chan_restoration_res); @@ -5028,28 +5262,38 @@ impl ChannelMana MonitorEvent::UpdateFailed(funding_outpoint) => { let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_lock; - let by_id = &mut channel_state.by_id; - let pending_msg_events = &mut channel_state.pending_msg_events; - if let hash_map::Entry::Occupied(chan_entry) = by_id.entry(funding_outpoint.to_channel_id()) { - let mut chan = remove_channel!(self, channel_state, chan_entry); - failed_channels.push(chan.force_shutdown(false)); - if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { - pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { - msg: update - }); + let counterparty_node_id_opt = { + let id_to_peer = self.id_to_peer.lock().unwrap(); + id_to_peer.get(&funding_outpoint.to_channel_id()).cloned() + }; + if let Some(counterparty_node_id) = counterparty_node_id_opt { + let per_peer_state = self.per_peer_state.write().unwrap(); + let pending_msg_events = &mut channel_state.pending_msg_events; + if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + if let hash_map::Entry::Occupied(chan_entry) = peer_state.channel_by_id.entry(funding_outpoint.to_channel_id()) { + let mut chan = remove_channel!(self, channel_state, chan_entry); + failed_channels.push(chan.force_shutdown(false)); + if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { + pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg: update + }); + } + let reason = if let MonitorEvent::UpdateFailed(_) = monitor_event { + ClosureReason::ProcessingError { err: "Failed to persist ChannelMonitor update during chain sync".to_string() } + } else { + ClosureReason::CommitmentTxConfirmed + }; + self.issue_channel_close_events(&chan, reason); + pending_msg_events.push(events::MessageSendEvent::HandleError { + node_id: chan.get_counterparty_node_id(), + action: msgs::ErrorAction::SendErrorMessage { + msg: msgs::ErrorMessage { channel_id: chan.channel_id(), data: "Channel force-closed".to_owned() } + }, + }); + } } - let reason = if let MonitorEvent::UpdateFailed(_) = monitor_event { - ClosureReason::ProcessingError { err: "Failed to persist ChannelMonitor update during chain sync".to_string() } - } else { - ClosureReason::CommitmentTxConfirmed - }; - self.issue_channel_close_events(&chan, reason); - pending_msg_events.push(events::MessageSendEvent::HandleError { - node_id: chan.get_counterparty_node_id(), - action: msgs::ErrorAction::SendErrorMessage { - msg: msgs::ErrorMessage { channel_id: chan.channel_id(), data: "Channel force-closed".to_owned() } - }, - }); } }, MonitorEvent::UpdateCompleted { funding_txo, monitor_update_id } => { @@ -5089,43 +5333,46 @@ impl ChannelMana { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; - let by_id = &mut channel_state.by_id; let short_to_chan_info = &mut channel_state.short_to_chan_info; let pending_msg_events = &mut channel_state.pending_msg_events; - - by_id.retain(|channel_id, chan| { - match chan.maybe_free_holding_cell_htlcs(&self.logger) { - Ok((commitment_opt, holding_cell_failed_htlcs)) => { - if !holding_cell_failed_htlcs.is_empty() { - failed_htlcs.push(( - holding_cell_failed_htlcs, - *channel_id, - chan.get_counterparty_node_id() - )); - } - if let Some((commitment_update, monitor_update)) = commitment_opt { - if let Err(e) = self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) { - has_monitor_update = true; - let (res, close_channel) = handle_monitor_err!(self, e, short_to_chan_info, chan, RAACommitmentOrder::CommitmentFirst, channel_id, COMMITMENT_UPDATE_ONLY); - handle_errors.push((chan.get_counterparty_node_id(), res)); - if close_channel { return false; } - } else { - pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: chan.get_counterparty_node_id(), - updates: commitment_update, - }); + let per_peer_state = self.per_peer_state.read().unwrap(); + for (_cp_id, peer_state_mutex) in per_peer_state.iter() { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + peer_state.channel_by_id.retain(|channel_id, chan| { + match chan.maybe_free_holding_cell_htlcs(&self.logger) { + Ok((commitment_opt, holding_cell_failed_htlcs)) => { + if !holding_cell_failed_htlcs.is_empty() { + failed_htlcs.push(( + holding_cell_failed_htlcs, + *channel_id, + chan.get_counterparty_node_id() + )); } + if let Some((commitment_update, monitor_update)) = commitment_opt { + if let Err(e) = self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) { + has_monitor_update = true; + let (res, close_channel) = handle_monitor_err!(self, e, short_to_chan_info, chan, RAACommitmentOrder::CommitmentFirst, channel_id, COMMITMENT_UPDATE_ONLY); + handle_errors.push((chan.get_counterparty_node_id(), res)); + if close_channel { return false; } + } else { + pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: chan.get_counterparty_node_id(), + updates: commitment_update, + }); + } + } + true + }, + Err(e) => { + let (close_channel, res) = convert_chan_err!(self, e, short_to_chan_info, chan, channel_id); + handle_errors.push((chan.get_counterparty_node_id(), Err(res))); + // ChannelClosed event is generated by handle_error for us + !close_channel } - true - }, - Err(e) => { - let (close_channel, res) = convert_chan_err!(self, e, short_to_chan_info, chan, channel_id); - handle_errors.push((chan.get_counterparty_node_id(), Err(res))); - // ChannelClosed event is generated by handle_error for us - !close_channel } - } - }); + }); + } } let has_update = has_monitor_update || !failed_htlcs.is_empty() || !handle_errors.is_empty(); @@ -5149,44 +5396,48 @@ impl ChannelMana { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; - let by_id = &mut channel_state.by_id; let short_to_chan_info = &mut channel_state.short_to_chan_info; let pending_msg_events = &mut channel_state.pending_msg_events; - by_id.retain(|channel_id, chan| { - match chan.maybe_propose_closing_signed(&self.fee_estimator, &self.logger) { - Ok((msg_opt, tx_opt)) => { - if let Some(msg) = msg_opt { - has_update = true; - pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { - node_id: chan.get_counterparty_node_id(), msg, - }); - } - if let Some(tx) = tx_opt { - // We're done with this channel. We got a closing_signed and sent back - // a closing_signed with a closing transaction to broadcast. - if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { - pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { - msg: update + let per_peer_state = self.per_peer_state.read().unwrap(); + for (_cp_id, peer_state_mutex) in per_peer_state.iter() { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + peer_state.channel_by_id.retain(|channel_id, chan| { + match chan.maybe_propose_closing_signed(&self.fee_estimator, &self.logger) { + Ok((msg_opt, tx_opt)) => { + if let Some(msg) = msg_opt { + has_update = true; + pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { + node_id: chan.get_counterparty_node_id(), msg, }); } + if let Some(tx) = tx_opt { + // We're done with this channel. We got a closing_signed and sent back + // a closing_signed with a closing transaction to broadcast. + if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { + pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg: update + }); + } - self.issue_channel_close_events(chan, ClosureReason::CooperativeClosure); + self.issue_channel_close_events(chan, ClosureReason::CooperativeClosure); - log_info!(self.logger, "Broadcasting {}", log_tx!(tx)); - self.tx_broadcaster.broadcast_transaction(&tx); - update_maps_on_chan_removal!(self, short_to_chan_info, chan); - false - } else { true } - }, - Err(e) => { - has_update = true; - let (close_channel, res) = convert_chan_err!(self, e, short_to_chan_info, chan, channel_id); - handle_errors.push((chan.get_counterparty_node_id(), Err(res))); - !close_channel + log_info!(self.logger, "Broadcasting {}", log_tx!(tx)); + self.tx_broadcaster.broadcast_transaction(&tx); + update_maps_on_chan_removal!(self, short_to_chan_info, chan); + false + } else { true } + }, + Err(e) => { + has_update = true; + let (close_channel, res) = convert_chan_err!(self, e, short_to_chan_info, chan, channel_id); + handle_errors.push((chan.get_counterparty_node_id(), Err(res))); + !close_channel + } } - } - }); + }); + } } for (counterparty_node_id, err) in handle_errors.drain(..) { @@ -5612,9 +5863,13 @@ where fn get_relevant_txids(&self) -> Vec { let channel_state = self.channel_state.lock().unwrap(); let mut res = Vec::with_capacity(channel_state.short_to_chan_info.len()); - for chan in channel_state.by_id.values() { - if let Some(funding_txo) = chan.get_funding_txo() { - res.push(funding_txo.txid); + for (_, peer_state_mutex) in self.per_peer_state.read().unwrap().iter() { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + for chan in peer_state.channel_by_id.values() { + if let Some(funding_txo) = chan.get_funding_txo() { + res.push(funding_txo.txid); + } } } res @@ -5656,83 +5911,88 @@ where let channel_state = &mut *channel_lock; let short_to_chan_info = &mut channel_state.short_to_chan_info; let pending_msg_events = &mut channel_state.pending_msg_events; - channel_state.by_id.retain(|_, channel| { - let res = f(channel); - if let Ok((channel_ready_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res { - for (source, payment_hash) in timed_out_pending_htlcs.drain(..) { - let (failure_code, data) = self.get_htlc_inbound_temp_fail_err_and_data(0x1000|14 /* expiry_too_soon */, &channel); - timed_out_htlcs.push((source, payment_hash, HTLCFailReason::Reason { - failure_code, data, - })); - } - if let Some(channel_ready) = channel_ready_opt { - send_channel_ready!(short_to_chan_info, pending_msg_events, channel, channel_ready); - if channel.is_usable() { - log_trace!(self.logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", log_bytes!(channel.channel_id())); - if let Ok(msg) = self.get_channel_update_for_unicast(channel) { - pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { - node_id: channel.get_counterparty_node_id(), - msg, - }); + let per_peer_state = self.per_peer_state.read().unwrap(); + for (_cp_id, peer_state_mutex) in per_peer_state.iter() { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + peer_state.channel_by_id.retain(|_, channel| { + let res = f(channel); + if let Ok((channel_ready_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res { + for (source, payment_hash) in timed_out_pending_htlcs.drain(..) { + let (failure_code, data) = self.get_htlc_inbound_temp_fail_err_and_data(0x1000|14 /* expiry_too_soon */, &channel); + timed_out_htlcs.push((source, payment_hash, HTLCFailReason::Reason { + failure_code, data, + })); + } + if let Some(channel_ready) = channel_ready_opt { + send_channel_ready!(short_to_chan_info, pending_msg_events, channel, channel_ready); + if channel.is_usable() { + log_trace!(self.logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", log_bytes!(channel.channel_id())); + if let Ok(msg) = self.get_channel_update_for_unicast(channel) { + pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { + node_id: channel.get_counterparty_node_id(), + msg, + }); + } + } else { + log_trace!(self.logger, "Sending channel_ready WITHOUT channel_update for {}", log_bytes!(channel.channel_id())); } - } else { - log_trace!(self.logger, "Sending channel_ready WITHOUT channel_update for {}", log_bytes!(channel.channel_id())); } - } - if let Some(announcement_sigs) = announcement_sigs { - log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(channel.channel_id())); - pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { - node_id: channel.get_counterparty_node_id(), - msg: announcement_sigs, - }); - if let Some(height) = height_opt { - if let Some(announcement) = channel.get_signed_channel_announcement(self.get_our_node_id(), self.genesis_hash, height) { - pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement { - msg: announcement, - // Note that announcement_signatures fails if the channel cannot be announced, - // so get_channel_update_for_broadcast will never fail by the time we get here. - update_msg: self.get_channel_update_for_broadcast(channel).unwrap(), - }); + if let Some(announcement_sigs) = announcement_sigs { + log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(channel.channel_id())); + pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { + node_id: channel.get_counterparty_node_id(), + msg: announcement_sigs, + }); + if let Some(height) = height_opt { + if let Some(announcement) = channel.get_signed_channel_announcement(self.get_our_node_id(), self.genesis_hash, height) { + pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement { + msg: announcement, + // Note that announcement_signatures fails if the channel cannot be announced, + // so get_channel_update_for_broadcast will never fail by the time we get here. + update_msg: self.get_channel_update_for_broadcast(channel).unwrap(), + }); + } } } - } - if channel.is_our_channel_ready() { - if let Some(real_scid) = channel.get_short_channel_id() { - // If we sent a 0conf channel_ready, and now have an SCID, we add it - // to the short_to_chan_info map here. Note that we check whether we - // can relay using the real SCID at relay-time (i.e. - // enforce option_scid_alias then), and if the funding tx is ever - // un-confirmed we force-close the channel, ensuring short_to_chan_info - // is always consistent. - let scid_insert = short_to_chan_info.insert(real_scid, (channel.get_counterparty_node_id(), channel.channel_id())); - assert!(scid_insert.is_none() || scid_insert.unwrap() == (channel.get_counterparty_node_id(), channel.channel_id()), - "SCIDs should never collide - ensure you weren't behind by a full {} blocks when creating channels", - fake_scid::MAX_SCID_BLOCKS_FROM_NOW); + if channel.is_our_channel_ready() { + if let Some(real_scid) = channel.get_short_channel_id() { + // If we sent a 0conf channel_ready, and now have an SCID, we add it + // to the short_to_chan_info map here. Note that we check whether we + // can relay using the real SCID at relay-time (i.e. + // enforce option_scid_alias then), and if the funding tx is ever + // un-confirmed we force-close the channel, ensuring short_to_chan_info + // is always consistent. + let scid_insert = short_to_chan_info.insert(real_scid, (channel.get_counterparty_node_id(), channel.channel_id())); + assert!(scid_insert.is_none() || scid_insert.unwrap() == (channel.get_counterparty_node_id(), channel.channel_id()), + "SCIDs should never collide - ensure you weren't behind by a full {} blocks when creating channels", + fake_scid::MAX_SCID_BLOCKS_FROM_NOW); + } } - } - } else if let Err(reason) = res { - update_maps_on_chan_removal!(self, short_to_chan_info, channel); - // It looks like our counterparty went on-chain or funding transaction was - // reorged out of the main chain. Close the channel. - failed_channels.push(channel.force_shutdown(true)); - if let Ok(update) = self.get_channel_update_for_broadcast(&channel) { - pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { - msg: update + } else if let Err(reason) = res { + update_maps_on_chan_removal!(self, short_to_chan_info, channel); + // It looks like our counterparty went on-chain or funding transaction was + // reorged out of the main chain. Close the channel. + failed_channels.push(channel.force_shutdown(true)); + if let Ok(update) = self.get_channel_update_for_broadcast(&channel) { + pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg: update + }); + } + let reason_message = format!("{}", reason); + self.issue_channel_close_events(channel, reason); + pending_msg_events.push(events::MessageSendEvent::HandleError { + node_id: channel.get_counterparty_node_id(), + action: msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { + channel_id: channel.channel_id(), + data: reason_message, + } }, }); + return false; } - let reason_message = format!("{}", reason); - self.issue_channel_close_events(channel, reason); - pending_msg_events.push(events::MessageSendEvent::HandleError { - node_id: channel.get_counterparty_node_id(), - action: msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { - channel_id: channel.channel_id(), - data: reason_message, - } }, - }); - return false; - } - true - }); + true + }); + } if let Some(height) = height_opt { channel_state.claimable_htlcs.retain(|payment_hash, (_, htlcs)| { @@ -5905,19 +6165,24 @@ impl let short_to_chan_info = &mut channel_state.short_to_chan_info; log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates. We believe we {} make future connections to this peer.", log_pubkey!(counterparty_node_id), if no_connection_possible { "cannot" } else { "can" }); - channel_state.by_id.retain(|_, chan| { - if chan.get_counterparty_node_id() == *counterparty_node_id { - chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger); - if chan.is_shutdown() { - update_maps_on_chan_removal!(self, short_to_chan_info, chan); - self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer); - return false; - } else { - no_channels_remain = false; + let per_peer_state = self.per_peer_state.read().unwrap(); + for (_cp_id, peer_state_mutex) in per_peer_state.iter() { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + peer_state.channel_by_id.retain(|_, chan| { + if chan.get_counterparty_node_id() == *counterparty_node_id { + chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger); + if chan.is_shutdown() { + update_maps_on_chan_removal!(self, short_to_chan_info, chan); + self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer); + return false; + } else { + no_channels_remain = false; + } } - } - true - }); + true + }); + } pending_msg_events.retain(|msg| { match msg { &events::MessageSendEvent::SendAcceptChannel { ref node_id, .. } => node_id != counterparty_node_id, @@ -5962,6 +6227,7 @@ impl match peer_state_lock.entry(counterparty_node_id.clone()) { hash_map::Entry::Vacant(e) => { e.insert(Mutex::new(PeerState { + channel_by_id: HashMap::new(), latest_features: init_msg.features.clone(), })); }, @@ -5974,23 +6240,28 @@ impl let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; let pending_msg_events = &mut channel_state.pending_msg_events; - channel_state.by_id.retain(|_, chan| { - if chan.get_counterparty_node_id() == *counterparty_node_id { - if !chan.have_received_message() { - // If we created this (outbound) channel while we were disconnected from the - // peer we probably failed to send the open_channel message, which is now - // lost. We can't have had anything pending related to this channel, so we just - // drop it. - false - } else { - pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish { - node_id: chan.get_counterparty_node_id(), - msg: chan.get_channel_reestablish(&self.logger), - }); - true - } - } else { true } - }); + let per_peer_state = self.per_peer_state.read().unwrap(); + for (_cp_id, peer_state_mutex) in per_peer_state.iter() { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + peer_state.channel_by_id.retain(|_, chan| { + if chan.get_counterparty_node_id() == *counterparty_node_id { + if !chan.have_received_message() { + // If we created this (outbound) channel while we were disconnected from the + // peer we probably failed to send the open_channel message, which is now + // lost. We can't have had anything pending related to this channel, so we just + // drop it. + false + } else { + pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish { + node_id: chan.get_counterparty_node_id(), + msg: chan.get_channel_reestablish(&self.logger), + }); + true + } + } else { true } + }); + } //TODO: Also re-broadcast announcement_signatures } @@ -6008,17 +6279,19 @@ impl { // First check if we can advance the channel type and try again. let mut channel_state = self.channel_state.lock().unwrap(); - if let Some(chan) = channel_state.by_id.get_mut(&msg.channel_id) { - if chan.get_counterparty_node_id() != *counterparty_node_id { - return; - } - if let Ok(msg) = chan.maybe_handle_error_without_close(self.genesis_hash) { - channel_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { - node_id: *counterparty_node_id, - msg, - }); - return; - } + let per_peer_state = self.per_peer_state.write().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + if let Some(chan) = peer_state.channel_by_id.get_mut(&msg.channel_id) { + if let Ok(msg) = chan.maybe_handle_error_without_close(self.genesis_hash) { + channel_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { + node_id: *counterparty_node_id, + msg, + }); + return; + } + } else { return; } } } @@ -6457,45 +6730,60 @@ impl Writeable f } let channel_state = self.channel_state.lock().unwrap(); - let mut unfunded_channels = 0; - for (_, channel) in channel_state.by_id.iter() { - if !channel.is_funding_initiated() { - unfunded_channels += 1; + let mut htlc_purposes: Vec<&events::PaymentPurpose> = Vec::new(); + { + let per_peer_state = self.per_peer_state.write().unwrap(); + let mut unfunded_channels = 0; + let mut number_of_channels = 0; + for (_, peer_state_mutex) in per_peer_state.iter() { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + number_of_channels += peer_state.channel_by_id.len(); + for (_, channel) in peer_state.channel_by_id.iter() { + if !channel.is_funding_initiated() { + unfunded_channels += 1; + } + } } - } - ((channel_state.by_id.len() - unfunded_channels) as u64).write(writer)?; - for (_, channel) in channel_state.by_id.iter() { - if channel.is_funding_initiated() { - channel.write(writer)?; + + ((number_of_channels - unfunded_channels) as u64).write(writer)?; + + for (_, peer_state_mutex) in per_peer_state.iter() { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + for (_, channel) in peer_state.channel_by_id.iter() { + if channel.is_funding_initiated() { + channel.write(writer)?; + } + } } - } - (channel_state.forward_htlcs.len() as u64).write(writer)?; - for (short_channel_id, pending_forwards) in channel_state.forward_htlcs.iter() { - short_channel_id.write(writer)?; - (pending_forwards.len() as u64).write(writer)?; - for forward in pending_forwards { - forward.write(writer)?; + (channel_state.forward_htlcs.len() as u64).write(writer)?; + for (short_channel_id, pending_forwards) in channel_state.forward_htlcs.iter() { + short_channel_id.write(writer)?; + (pending_forwards.len() as u64).write(writer)?; + for forward in pending_forwards { + forward.write(writer)?; + } } - } - let mut htlc_purposes: Vec<&events::PaymentPurpose> = Vec::new(); - (channel_state.claimable_htlcs.len() as u64).write(writer)?; - for (payment_hash, (purpose, previous_hops)) in channel_state.claimable_htlcs.iter() { - payment_hash.write(writer)?; - (previous_hops.len() as u64).write(writer)?; - for htlc in previous_hops.iter() { - htlc.write(writer)?; + (channel_state.claimable_htlcs.len() as u64).write(writer)?; + for (payment_hash, (purpose, previous_hops)) in channel_state.claimable_htlcs.iter() { + payment_hash.write(writer)?; + (previous_hops.len() as u64).write(writer)?; + for htlc in previous_hops.iter() { + htlc.write(writer)?; + } + htlc_purposes.push(purpose); } - htlc_purposes.push(purpose); - } - let per_peer_state = self.per_peer_state.write().unwrap(); - (per_peer_state.len() as u64).write(writer)?; - for (peer_pubkey, peer_state_mutex) in per_peer_state.iter() { - peer_pubkey.write(writer)?; - let peer_state = peer_state_mutex.lock().unwrap(); - peer_state.latest_features.write(writer)?; + (per_peer_state.len() as u64).write(writer)?; + for (peer_pubkey, peer_state_mutex) in per_peer_state.iter() { + peer_pubkey.write(writer)?; + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + peer_state.latest_features.write(writer)?; + } } let pending_inbound_payments = self.pending_inbound_payments.lock().unwrap(); @@ -6707,8 +6995,8 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> let channel_count: u64 = Readable::read(reader)?; let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128)); - let mut by_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); let mut id_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); + let mut peer_channels: HashMap>> = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); let mut channel_closures = Vec::new(); for _ in 0..channel_count { @@ -6751,10 +7039,18 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> if let Some(short_channel_id) = channel.get_short_channel_id() { short_to_chan_info.insert(short_channel_id, (channel.get_counterparty_node_id(), channel.channel_id())); } - if channel.is_funding_initiated() { - id_to_peer.insert(channel.channel_id(), channel.get_counterparty_node_id()); + id_to_peer.insert(channel.channel_id(), channel.get_counterparty_node_id()); + match peer_channels.entry(channel.get_counterparty_node_id()) { + hash_map::Entry::Occupied(mut entry) => { + let by_id_map = entry.get_mut(); + by_id_map.insert(channel.channel_id(), channel); + }, + hash_map::Entry::Vacant(entry) => { + let mut by_id_map = HashMap::new(); + by_id_map.insert(channel.channel_id(), channel); + entry.insert(by_id_map); + } } - by_id.insert(channel.channel_id(), channel); } } else { log_error!(args.logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", log_bytes!(channel.channel_id())); @@ -6799,10 +7095,11 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> } let peer_count: u64 = Readable::read(reader)?; - let mut per_peer_state = HashMap::with_capacity(cmp::min(peer_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex)>())); + let mut per_peer_state = HashMap::with_capacity(cmp::min(peer_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex>)>())); for _ in 0..peer_count { let peer_pubkey = Readable::read(reader)?; let peer_state = PeerState { + channel_by_id: peer_channels.remove(&peer_pubkey).unwrap_or(HashMap::new()), latest_features: Readable::read(reader)?, }; per_peer_state.insert(peer_pubkey, Mutex::new(peer_state)); @@ -6895,7 +7192,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> // We only rebuild the pending payments map if we were most recently serialized by // 0.0.102+ for (_, monitor) in args.channel_monitors.iter() { - if by_id.get(&monitor.get_funding_txo().0.to_channel_id()).is_none() { + if id_to_peer.get(&monitor.get_funding_txo().0.to_channel_id()).is_none() { for (htlc_source, htlc) in monitor.get_pending_outbound_htlcs() { if let HTLCSource::OutboundRoute { payment_id, session_priv, path, payment_secret, .. } = htlc_source { if path.is_empty() { @@ -6995,28 +7292,32 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> } let mut outbound_scid_aliases = HashSet::new(); - for (chan_id, chan) in by_id.iter_mut() { - if chan.outbound_scid_alias() == 0 { - let mut outbound_scid_alias; - loop { - outbound_scid_alias = fake_scid::Namespace::OutboundAlias - .get_fake_scid(best_block_height, &genesis_hash, fake_scid_rand_bytes.as_ref().unwrap(), &args.keys_manager); - if outbound_scid_aliases.insert(outbound_scid_alias) { break; } - } - chan.set_outbound_scid_alias(outbound_scid_alias); - } else if !outbound_scid_aliases.insert(chan.outbound_scid_alias()) { - // Note that in rare cases its possible to hit this while reading an older - // channel if we just happened to pick a colliding outbound alias above. - log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.outbound_scid_alias()); - return Err(DecodeError::InvalidValue); - } - if chan.is_usable() { - if short_to_chan_info.insert(chan.outbound_scid_alias(), (chan.get_counterparty_node_id(), *chan_id)).is_some() { + for (_peer_node_id, peer_state_mutex) in per_peer_state.iter_mut() { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + for (chan_id, chan) in peer_state.channel_by_id.iter_mut() { + if chan.outbound_scid_alias() == 0 { + let mut outbound_scid_alias; + loop { + outbound_scid_alias = fake_scid::Namespace::OutboundAlias + .get_fake_scid(best_block_height, &genesis_hash, fake_scid_rand_bytes.as_ref().unwrap(), &args.keys_manager); + if outbound_scid_aliases.insert(outbound_scid_alias) { break; } + } + chan.set_outbound_scid_alias(outbound_scid_alias); + } else if !outbound_scid_aliases.insert(chan.outbound_scid_alias()) { // Note that in rare cases its possible to hit this while reading an older // channel if we just happened to pick a colliding outbound alias above. log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.outbound_scid_alias()); return Err(DecodeError::InvalidValue); } + if chan.is_usable() { + if short_to_chan_info.insert(chan.outbound_scid_alias(), (chan.get_counterparty_node_id(), *chan_id)).is_some() { + // Note that in rare cases its possible to hit this while reading an older + // channel if we just happened to pick a colliding outbound alias above. + log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.outbound_scid_alias()); + return Err(DecodeError::InvalidValue); + } + } } } @@ -7044,8 +7345,13 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> // without the new monitor persisted - we'll end up right back here on // restart. let previous_channel_id = claimable_htlc.prev_hop.outpoint.to_channel_id(); - if let Some(channel) = by_id.get_mut(&previous_channel_id) { - channel.claim_htlc_while_disconnected_dropping_mon_update(claimable_htlc.prev_hop.htlc_id, payment_preimage, &args.logger); + if let Some(peer_node_id) = id_to_peer.get(&previous_channel_id){ + let peer_state_mutex = per_peer_state.get(peer_node_id).unwrap(); + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + if let Some(channel) = peer_state.channel_by_id.get_mut(&previous_channel_id) { + channel.claim_htlc_while_disconnected_dropping_mon_update(claimable_htlc.prev_hop.htlc_id, payment_preimage, &args.logger); + } } if let Some(previous_hop_monitor) = args.channel_monitors.get(&claimable_htlc.prev_hop.outpoint) { previous_hop_monitor.provide_payment_preimage(&payment_hash, &payment_preimage, &args.tx_broadcaster, &args.fee_estimator, &args.logger); @@ -7069,7 +7375,6 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> best_block: RwLock::new(BestBlock::new(best_block_hash, best_block_height)), channel_state: Mutex::new(ChannelHolder { - by_id, short_to_chan_info, forward_htlcs, claimable_htlcs, diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 5eda6166f96..a32b10f0864 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -510,10 +510,9 @@ macro_rules! get_htlc_update_msgs { #[cfg(test)] macro_rules! get_channel_ref { - ($node: expr, $lock: ident, $channel_id: expr) => { + ($node: expr, $peer_state_lock: ident, $channel_id: expr) => { { - $lock = $node.node.channel_state.lock().unwrap(); - $lock.by_id.get_mut(&$channel_id).unwrap() + $peer_state_lock.channel_by_id.get_mut(&$channel_id).unwrap() } } } @@ -522,8 +521,9 @@ macro_rules! get_channel_ref { macro_rules! get_feerate { ($node: expr, $counterparty_node: expr, $channel_id: expr) => { { - let mut lock; - let chan = get_channel_ref!($node, lock, $channel_id); + let per_peer_lock = $node.node.per_peer_state.write().unwrap(); + let mut peer_state_lock = per_peer_lock.get(&$counterparty_node.node.get_our_node_id()).unwrap().lock().unwrap(); + let chan = get_channel_ref!($node, peer_state_lock, $channel_id); chan.get_feerate() } } @@ -533,8 +533,9 @@ macro_rules! get_feerate { macro_rules! get_opt_anchors { ($node: expr, $counterparty_node: expr, $channel_id: expr) => { { - let mut lock; - let chan = get_channel_ref!($node, lock, $channel_id); + let per_peer_lock = $node.node.per_peer_state.write().unwrap(); + let mut peer_state_lock = per_peer_lock.get(&$counterparty_node.node.get_our_node_id()).unwrap().lock().unwrap(); + let chan = get_channel_ref!($node, peer_state_lock, $channel_id); chan.opt_anchors() } } @@ -1689,8 +1690,8 @@ pub fn do_claim_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, ($node: expr, $prev_node: expr, $next_node: expr, $new_msgs: expr) => { { $node.node.handle_update_fulfill_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0); - let fee = $node.node.channel_state.lock().unwrap() - .by_id.get(&next_msgs.as_ref().unwrap().0.channel_id).unwrap() + let fee = $node.node.per_peer_state.read().unwrap().get(&$prev_node.node.get_our_node_id()) + .unwrap().lock().unwrap().channel_by_id.get(&next_msgs.as_ref().unwrap().0.channel_id).unwrap() .config.options.forwarding_fee_base_msat; expect_payment_forwarded!($node, $next_node, $prev_node, Some(fee as u64), false, false); expected_total_fee_msat += fee as u64; @@ -2182,8 +2183,9 @@ pub fn get_announce_close_broadcast_events<'a, 'b, 'c>(nodes: &Vec {{ - let chan_lock = $node.node.channel_state.lock().unwrap(); - let chan = chan_lock.by_id.get(&$channel_id).unwrap(); + let peer_state_lock = $node.node.per_peer_state.read().unwrap(); + let chan_lock = peer_state_lock.get(&$counterparty_node.node.get_our_node_id()).unwrap().lock().unwrap(); + let chan = chan_lock.channel_by_id.get(&$channel_id).unwrap(); chan.get_value_stat() }} } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 034b5fd9c87..26a1ad2ebd4 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -169,8 +169,11 @@ fn do_test_counterparty_no_reserve(send_from_initiator: bool) { } nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_channel_message); { - let mut lock; - let mut chan = get_channel_ref!(if send_from_initiator { &nodes[1] } else { &nodes[0] }, lock, temp_channel_id); + let sender_node = if send_from_initiator { &nodes[1] } else { &nodes[0] }; + let counterparty_node = if send_from_initiator { &nodes[0] } else { &nodes[1] }; + let per_peer_lock = sender_node.node.per_peer_state.write().unwrap(); + let mut peer_state_lock = per_peer_lock.get(&counterparty_node.node.get_our_node_id()).unwrap().lock().unwrap(); + let mut chan = get_channel_ref!(sender_node, peer_state_lock, temp_channel_id); chan.holder_selected_channel_reserve_satoshis = 0; chan.holder_max_htlc_value_in_flight_msat = 100_000_000; } @@ -678,16 +681,18 @@ fn test_update_fee_that_funder_cannot_afford() { // Get the EnforcingSigner for each channel, which will be used to (1) get the keys // needed to sign the new commitment tx and (2) sign the new commitment tx. let (local_revocation_basepoint, local_htlc_basepoint, local_funding) = { - let chan_lock = nodes[0].node.channel_state.lock().unwrap(); - let local_chan = chan_lock.by_id.get(&chan.2).unwrap(); + let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); + let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); + let local_chan = chan_lock.channel_by_id.get(&chan.2).unwrap(); let chan_signer = local_chan.get_signer(); let pubkeys = chan_signer.pubkeys(); (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint, pubkeys.funding_pubkey) }; let (remote_delayed_payment_basepoint, remote_htlc_basepoint,remote_point, remote_funding) = { - let chan_lock = nodes[1].node.channel_state.lock().unwrap(); - let remote_chan = chan_lock.by_id.get(&chan.2).unwrap(); + let per_peer_state = nodes[1].node.per_peer_state.read().unwrap(); + let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap(); + let remote_chan = chan_lock.channel_by_id.get(&chan.2).unwrap(); let chan_signer = remote_chan.get_signer(); let pubkeys = chan_signer.pubkeys(); (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint, @@ -700,8 +705,9 @@ fn test_update_fee_that_funder_cannot_afford() { &remote_htlc_basepoint, &local_revocation_basepoint, &local_htlc_basepoint).unwrap(); let res = { - let local_chan_lock = nodes[0].node.channel_state.lock().unwrap(); - let local_chan = local_chan_lock.by_id.get(&chan.2).unwrap(); + let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); + let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); + let local_chan = local_chan_lock.channel_by_id.get(&chan.2).unwrap(); let local_chan_signer = local_chan.get_signer(); let mut htlcs: Vec<(HTLCOutputInCommitment, ())> = vec![]; let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data( @@ -1415,8 +1421,9 @@ fn test_fee_spike_violation_fails_htlc() { // Get the EnforcingSigner for each channel, which will be used to (1) get the keys // needed to sign the new commitment tx and (2) sign the new commitment tx. let (local_revocation_basepoint, local_htlc_basepoint, local_secret, next_local_point, local_funding) = { - let chan_lock = nodes[0].node.channel_state.lock().unwrap(); - let local_chan = chan_lock.by_id.get(&chan.2).unwrap(); + let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); + let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); + let local_chan = chan_lock.channel_by_id.get(&chan.2).unwrap(); let chan_signer = local_chan.get_signer(); // Make the signer believe we validated another commitment, so we can release the secret chan_signer.get_enforcement_state().last_holder_commitment -= 1; @@ -1428,8 +1435,9 @@ fn test_fee_spike_violation_fails_htlc() { chan_signer.pubkeys().funding_pubkey) }; let (remote_delayed_payment_basepoint, remote_htlc_basepoint, remote_point, remote_funding) = { - let chan_lock = nodes[1].node.channel_state.lock().unwrap(); - let remote_chan = chan_lock.by_id.get(&chan.2).unwrap(); + let per_peer_state = nodes[1].node.per_peer_state.read().unwrap(); + let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap(); + let remote_chan = chan_lock.channel_by_id.get(&chan.2).unwrap(); let chan_signer = remote_chan.get_signer(); let pubkeys = chan_signer.pubkeys(); (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint, @@ -1456,8 +1464,9 @@ fn test_fee_spike_violation_fails_htlc() { let commitment_number = INITIAL_COMMITMENT_NUMBER - 1; let res = { - let local_chan_lock = nodes[0].node.channel_state.lock().unwrap(); - let local_chan = local_chan_lock.by_id.get(&chan.2).unwrap(); + let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); + let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); + let local_chan = local_chan_lock.channel_by_id.get(&chan.2).unwrap(); let local_chan_signer = local_chan.get_signer(); let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data( commitment_number, @@ -3098,7 +3107,8 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use let value = if use_dust { // The dust limit applied to HTLC outputs considers the fee of the HTLC transaction as // well, so HTLCs at exactly the dust limit will not be included in commitment txn. - nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().holder_dust_limit_satoshis * 1000 + nodes[2].node.per_peer_state.read().unwrap().get(&nodes[1].node.get_our_node_id()) + .unwrap().lock().unwrap().channel_by_id.get(&chan_2.2).unwrap().holder_dust_limit_satoshis * 1000 } else { 3000000 }; let (_, first_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value); @@ -5503,7 +5513,8 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno send_payment(&nodes[1], &[&nodes[2], &nodes[3], &nodes[5]], 500000); assert_eq!(get_local_commitment_txn!(nodes[3], chan.2)[0].output.len(), 2); - let ds_dust_limit = nodes[3].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().holder_dust_limit_satoshis; + let ds_dust_limit = nodes[3].node.per_peer_state.read().unwrap().get(&nodes[2].node.get_our_node_id()) + .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().holder_dust_limit_satoshis; // 0th HTLC: let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee // 1st HTLC: @@ -6582,7 +6593,8 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment() let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0, InitFeatures::known(), InitFeatures::known()); - let max_accepted_htlcs = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().counterparty_max_accepted_htlcs as u64; + let max_accepted_htlcs = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id()) + .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().counterparty_max_accepted_htlcs as u64; for i in 0..max_accepted_htlcs { let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); @@ -6651,8 +6663,9 @@ fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() { let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known()); let htlc_minimum_msat: u64; { - let chan_lock = nodes[0].node.channel_state.lock().unwrap(); - let channel = chan_lock.by_id.get(&chan.2).unwrap(); + let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); + let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); + let channel = chan_lock.channel_by_id.get(&chan.2).unwrap(); htlc_minimum_msat = channel.get_holder_htlc_minimum_msat(); } @@ -7151,7 +7164,8 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) { let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan =create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let bs_dust_limit = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().holder_dust_limit_satoshis; + let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id()) + .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().holder_dust_limit_satoshis; // We route 2 dust-HTLCs between A and B let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000); @@ -7242,7 +7256,8 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) { let nodes = create_network(3, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let bs_dust_limit = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().holder_dust_limit_satoshis; + let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id()) + .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().holder_dust_limit_satoshis; let (_payment_preimage_1, dust_hash, _payment_secret_1) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000); let (_payment_preimage_2, non_dust_hash, _payment_secret_2) = route_payment(&nodes[0], &[&nodes[1]], 1000000); @@ -8087,8 +8102,9 @@ fn test_counterparty_raa_skip_no_crash() { let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2; - let mut guard = nodes[0].node.channel_state.lock().unwrap(); - let keys = guard.by_id.get_mut(&channel_id).unwrap().get_signer(); + let mut per_peer_state = nodes[0].node.per_peer_state.write().unwrap(); + let mut guard = per_peer_state.get_mut(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); + let keys = guard.channel_by_id.get_mut(&channel_id).unwrap().get_signer(); const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1; @@ -8445,8 +8461,9 @@ fn test_reject_funding_before_inbound_channel_accepted() { // `handle_accept_channel`, which is required in order for `create_funding_transaction` to // succeed when `nodes[0]` is passed to it. { - let mut lock; - let channel = get_channel_ref!(&nodes[1], lock, temp_channel_id); + let per_peer_lock = nodes[1].node.per_peer_state.write().unwrap(); + let mut peer_state_lock = per_peer_lock.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap(); + let channel = get_channel_ref!(&nodes[1], peer_state_lock, temp_channel_id); let accept_chan_msg = channel.get_accept_channel_message(); nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_chan_msg); } @@ -8788,7 +8805,9 @@ fn test_update_err_monitor_lockdown() { let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert_eq!(updates.update_fulfill_htlcs.len(), 1); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); - if let Some(ref mut channel) = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan_1.2) { + if let Some(ref mut channel) = nodes[0].node.per_peer_state.write().unwrap().get_mut(&nodes[1].node.get_our_node_id()) + .unwrap().lock().unwrap().channel_by_id.get_mut(&chan_1.2) + { if let Ok((_, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) { if let Err(_) = watchtower.chain_monitor.update_channel(outpoint, update.clone()) {} else { assert!(false); } if let Ok(_) = nodes[0].chain_monitor.update_channel(outpoint, update) {} else { assert!(false); } @@ -8879,7 +8898,9 @@ fn test_concurrent_monitor_claim() { let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert_eq!(updates.update_add_htlcs.len(), 1); nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &updates.update_add_htlcs[0]); - if let Some(ref mut channel) = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan_1.2) { + if let Some(ref mut channel) = nodes[0].node.per_peer_state.write().unwrap().get_mut(&nodes[1].node.get_our_node_id()) + .unwrap().lock().unwrap().channel_by_id.get_mut(&chan_1.2) + { if let Ok((_, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) { // Watchtower Alice should already have seen the block and reject the update if let Err(_) = watchtower_alice.chain_monitor.update_channel(outpoint, update.clone()) {} else { assert!(false); } @@ -9287,12 +9308,13 @@ fn test_duplicate_chan_id() { create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42); // Get and check the FundingGenerationReady event let funding_created = { - let mut a_channel_lock = nodes[0].node.channel_state.lock().unwrap(); + let mut per_peer_state = nodes[0].node.per_peer_state.write().unwrap(); + let mut a_channel_lock = per_peer_state.get_mut(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); // Once we call `get_outbound_funding_created` the channel has a duplicate channel_id as // another channel in the ChannelManager - an invalid state. Thus, we'd panic later when we // try to create another channel. Instead, we drop the channel entirely here (leaving the // channelmanager in a possibly nonsense state instead). - let mut as_chan = a_channel_lock.by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap(); + let mut as_chan = a_channel_lock.channel_by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap(); let logger = test_utils::TestLogger::new(); as_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).unwrap() }; @@ -10203,7 +10225,9 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42); if on_holder_tx { - if let Some(mut chan) = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&temporary_channel_id) { + if let Some(mut chan) = nodes[0].node.per_peer_state.write().unwrap().get_mut(&nodes[1].node.get_our_node_id()) + .unwrap().lock().unwrap().channel_by_id.get_mut(&temporary_channel_id) + { chan.holder_dust_limit_satoshis = 546; } } @@ -10220,8 +10244,9 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update); let dust_buffer_feerate = { - let chan_lock = nodes[0].node.channel_state.lock().unwrap(); - let chan = chan_lock.by_id.get(&channel_id).unwrap(); + let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); + let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); + let chan = chan_lock.channel_by_id.get(&channel_id).unwrap(); chan.get_dust_buffer_feerate(None) as u64 }; let dust_outbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(opt_anchors) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000; diff --git a/lightning/src/ln/onion_route_tests.rs b/lightning/src/ln/onion_route_tests.rs index 1e340d4a15e..d3740691010 100644 --- a/lightning/src/ln/onion_route_tests.rs +++ b/lightning/src/ln/onion_route_tests.rs @@ -490,7 +490,9 @@ fn test_onion_failure() { Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent:true}), Some(short_channel_id)); let short_channel_id = channels[1].0.contents.short_channel_id; - let amt_to_forward = nodes[1].node.channel_state.lock().unwrap().by_id.get(&channels[1].2).unwrap().get_counterparty_htlc_minimum_msat() - 1; + let amt_to_forward = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[2].node.get_our_node_id()) + .unwrap().lock().unwrap().channel_by_id.get(&channels[1].2).unwrap() + .get_counterparty_htlc_minimum_msat() - 1; let mut bogus_route = route.clone(); let route_len = bogus_route.paths[0].len(); bogus_route.paths[0][route_len-1].fee_msat = amt_to_forward; diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 935cb8f9ba4..b231dececbc 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -531,7 +531,8 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { // Update the fee on the middle hop to ensure PaymentSent events have the correct (retried) fee // and not the original fee. We also update node[1]'s relevant config as // do_claim_payment_along_route expects us to never overpay. - nodes[1].node.channel_state.lock().unwrap().by_id.get_mut(&chan_id_2).unwrap() + nodes[1].node.per_peer_state.write().unwrap().get_mut(&nodes[2].node.get_our_node_id()) + .unwrap().lock().unwrap().channel_by_id.get_mut(&chan_id_2).unwrap() .config.options.forwarding_fee_base_msat += 100_000; new_route.paths[0][0].fee_msat += 100_000; diff --git a/lightning/src/ln/reorg_tests.rs b/lightning/src/ln/reorg_tests.rs index 07b059d3981..667b0ca30d1 100644 --- a/lightning/src/ln/reorg_tests.rs +++ b/lightning/src/ln/reorg_tests.rs @@ -27,7 +27,6 @@ use bitcoin::hash_types::BlockHash; use bitcoin::secp256k1::Secp256k1; use prelude::*; -use core::mem; use ln::functional_test_utils::*; @@ -200,10 +199,13 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_ let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let channel_state = nodes[0].node.channel_state.lock().unwrap(); - assert_eq!(channel_state.by_id.len(), 1); - assert_eq!(channel_state.short_to_chan_info.len(), 2); - mem::drop(channel_state); + { + let channel_state = nodes[0].node.channel_state.lock().unwrap(); + let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); + let peer_state = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); + assert_eq!(peer_state.channel_by_id.len(), 1); + assert_eq!(channel_state.short_to_chan_info.len(), 2); + } if !reorg_after_reload { if use_funding_unconfirmed { @@ -221,7 +223,9 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_ check_added_monitors!(nodes[1], 1); { let channel_state = nodes[0].node.channel_state.lock().unwrap(); - assert_eq!(channel_state.by_id.len(), 0); + let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); + let peer_state = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); + assert_eq!(peer_state.channel_by_id.len(), 0); assert_eq!(channel_state.short_to_chan_info.len(), 0); } } @@ -289,7 +293,9 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_ check_added_monitors!(nodes[1], 1); { let channel_state = nodes[0].node.channel_state.lock().unwrap(); - assert_eq!(channel_state.by_id.len(), 0); + let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); + let peer_state = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); + assert_eq!(peer_state.channel_by_id.len(), 0); assert_eq!(channel_state.short_to_chan_info.len(), 0); } } diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs index 3e9f7e343db..f3603adaea8 100644 --- a/lightning/src/ln/shutdown_tests.rs +++ b/lightning/src/ln/shutdown_tests.rs @@ -762,8 +762,9 @@ fn do_test_closing_signed_reinit_timeout(timeout_step: TimeoutStep) { // nodes[1] should happily accept and respond to. node_0_closing_signed.fee_range.as_mut().unwrap().max_fee_satoshis *= 10; { - let mut lock; - get_channel_ref!(nodes[0], lock, chan_id).closing_fee_limits.as_mut().unwrap().1 *= 10; + let per_peer_lock = nodes[0].node.per_peer_state.write().unwrap(); + let mut peer_state_lock = per_peer_lock.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); + get_channel_ref!(nodes[0], peer_state_lock, chan_id).closing_fee_limits.as_mut().unwrap().1 *= 10; } nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed); let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()); From 519fcb4d3806125e50713191ebba16084f66a4d9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Tigerstr=C3=B6m?= <11711198+ViktorTigerstrom@users.noreply.github.com> Date: Fri, 10 Jun 2022 00:29:49 +0200 Subject: [PATCH 07/21] f - Store per peer: aquire locks inside get_channel_ref --- lightning/src/ln/chanmon_update_fail_tests.rs | 12 ++++++------ lightning/src/ln/functional_test_utils.rs | 16 +++++++++------- lightning/src/ln/functional_tests.rs | 12 ++++++------ lightning/src/ln/shutdown_tests.rs | 6 +++--- 4 files changed, 24 insertions(+), 22 deletions(-) diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 0424624b467..094c9bc38d1 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -1413,12 +1413,12 @@ fn monitor_failed_no_reestablish_response() { let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2; { - let node_0_per_peer_lock = nodes[0].node.per_peer_state.write().unwrap(); - let mut node_0_peer_state_lock = node_0_per_peer_lock.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); - let node_1_per_peer_lock = nodes[1].node.per_peer_state.write().unwrap(); - let mut node_1_peer_state_lock = node_1_per_peer_lock.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap(); - get_channel_ref!(nodes[0], node_0_peer_state_lock, channel_id).announcement_sigs_state = AnnouncementSigsState::PeerReceived; - get_channel_ref!(nodes[1], node_1_peer_state_lock, channel_id).announcement_sigs_state = AnnouncementSigsState::PeerReceived; + let mut node_0_per_peer_lock; + let mut node_0_peer_state_lock; + let mut node_1_per_peer_lock; + let mut node_1_peer_state_lock; + get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, channel_id).announcement_sigs_state = AnnouncementSigsState::PeerReceived; + get_channel_ref!(nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, channel_id).announcement_sigs_state = AnnouncementSigsState::PeerReceived; } // Route the payment and deliver the initial commitment_signed (with a monitor update failure diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index a32b10f0864..9ee19437e61 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -510,8 +510,10 @@ macro_rules! get_htlc_update_msgs { #[cfg(test)] macro_rules! get_channel_ref { - ($node: expr, $peer_state_lock: ident, $channel_id: expr) => { + ($node: expr, $counterparty_node: expr, $per_peer_state_lock: ident, $peer_state_lock: ident, $channel_id: expr) => { { + $per_peer_state_lock = $node.node.per_peer_state.write().unwrap(); + $peer_state_lock = $per_peer_state_lock.get(&$counterparty_node.node.get_our_node_id()).unwrap().lock().unwrap(); $peer_state_lock.channel_by_id.get_mut(&$channel_id).unwrap() } } @@ -521,9 +523,9 @@ macro_rules! get_channel_ref { macro_rules! get_feerate { ($node: expr, $counterparty_node: expr, $channel_id: expr) => { { - let per_peer_lock = $node.node.per_peer_state.write().unwrap(); - let mut peer_state_lock = per_peer_lock.get(&$counterparty_node.node.get_our_node_id()).unwrap().lock().unwrap(); - let chan = get_channel_ref!($node, peer_state_lock, $channel_id); + let mut per_peer_state_lock; + let mut peer_state_lock; + let chan = get_channel_ref!($node, $counterparty_node, per_peer_state_lock, peer_state_lock, $channel_id); chan.get_feerate() } } @@ -533,9 +535,9 @@ macro_rules! get_feerate { macro_rules! get_opt_anchors { ($node: expr, $counterparty_node: expr, $channel_id: expr) => { { - let per_peer_lock = $node.node.per_peer_state.write().unwrap(); - let mut peer_state_lock = per_peer_lock.get(&$counterparty_node.node.get_our_node_id()).unwrap().lock().unwrap(); - let chan = get_channel_ref!($node, peer_state_lock, $channel_id); + let mut per_peer_state_lock; + let mut peer_state_lock; + let chan = get_channel_ref!($node, $counterparty_node, per_peer_state_lock, peer_state_lock, $channel_id); chan.opt_anchors() } } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 26a1ad2ebd4..0806fa8cc78 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -171,9 +171,9 @@ fn do_test_counterparty_no_reserve(send_from_initiator: bool) { { let sender_node = if send_from_initiator { &nodes[1] } else { &nodes[0] }; let counterparty_node = if send_from_initiator { &nodes[0] } else { &nodes[1] }; - let per_peer_lock = sender_node.node.per_peer_state.write().unwrap(); - let mut peer_state_lock = per_peer_lock.get(&counterparty_node.node.get_our_node_id()).unwrap().lock().unwrap(); - let mut chan = get_channel_ref!(sender_node, peer_state_lock, temp_channel_id); + let mut sender_node_per_peer_lock; + let mut sender_node_peer_state_lock; + let mut chan = get_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id); chan.holder_selected_channel_reserve_satoshis = 0; chan.holder_max_htlc_value_in_flight_msat = 100_000_000; } @@ -8461,9 +8461,9 @@ fn test_reject_funding_before_inbound_channel_accepted() { // `handle_accept_channel`, which is required in order for `create_funding_transaction` to // succeed when `nodes[0]` is passed to it. { - let per_peer_lock = nodes[1].node.per_peer_state.write().unwrap(); - let mut peer_state_lock = per_peer_lock.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap(); - let channel = get_channel_ref!(&nodes[1], peer_state_lock, temp_channel_id); + let mut node_1_per_peer_lock; + let mut node_1_peer_state_lock; + let channel = get_channel_ref!(&nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, temp_channel_id); let accept_chan_msg = channel.get_accept_channel_message(); nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_chan_msg); } diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs index f3603adaea8..e1b7b20d45a 100644 --- a/lightning/src/ln/shutdown_tests.rs +++ b/lightning/src/ln/shutdown_tests.rs @@ -762,9 +762,9 @@ fn do_test_closing_signed_reinit_timeout(timeout_step: TimeoutStep) { // nodes[1] should happily accept and respond to. node_0_closing_signed.fee_range.as_mut().unwrap().max_fee_satoshis *= 10; { - let per_peer_lock = nodes[0].node.per_peer_state.write().unwrap(); - let mut peer_state_lock = per_peer_lock.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); - get_channel_ref!(nodes[0], peer_state_lock, chan_id).closing_fee_limits.as_mut().unwrap().1 *= 10; + let mut node_0_per_peer_lock; + let mut node_0_peer_state_lock; + get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_id).closing_fee_limits.as_mut().unwrap().1 *= 10; } nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed); let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()); From c37b9847e9f943ba11a927f90db23a703ca21f61 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Tigerstr=C3=B6m?= <11711198+ViktorTigerstrom@users.noreply.github.com> Date: Mon, 6 Jun 2022 21:39:12 +0200 Subject: [PATCH 08/21] f - Make per_peer_state a FairRwLock --- lightning/src/ln/channelmanager.rs | 76 +++++++++++++++--------------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index a886cf6d2f4..91678673b9a 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -64,7 +64,7 @@ use prelude::*; use core::{cmp, mem}; use core::cell::RefCell; use io::Read; -use sync::{Arc, Condvar, Mutex, MutexGuard, RwLock, RwLockReadGuard}; +use sync::{Arc, Condvar, Mutex, MutexGuard, RwLock, RwLockReadGuard, FairRwLock}; use core::sync::atomic::{AtomicUsize, Ordering}; use core::time::Duration; use core::ops::Deref; @@ -795,9 +795,9 @@ pub struct ChannelManager>>>, + per_peer_state: FairRwLock>>>, #[cfg(any(test, feature = "_test_utils"))] - pub(super) per_peer_state: RwLock>>>, + pub(super) per_peer_state: FairRwLock>>>, pending_events: Mutex>, pending_background_events: Mutex>, @@ -1630,7 +1630,7 @@ impl ChannelMana last_node_announcement_serial: AtomicUsize::new(0), highest_seen_timestamp: AtomicUsize::new(0), - per_peer_state: RwLock::new(HashMap::new()), + per_peer_state: FairRwLock::new(HashMap::new()), pending_events: Mutex::new(Vec::new()), pending_background_events: Mutex::new(Vec::new()), @@ -1728,7 +1728,7 @@ impl ChannelMana let temporary_channel_id = channel.channel_id(); let mut channel_state = self.channel_state.lock().unwrap(); - let per_peer_state = self.per_peer_state.write().unwrap(); + let per_peer_state = self.per_peer_state.read().unwrap(); if let Some(peer_state_mutex) = per_peer_state.get(&their_network_key){ let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -1865,7 +1865,7 @@ impl ChannelMana let result: Result<(), _> = loop { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; - let per_peer_state = self.per_peer_state.write().unwrap(); + let per_peer_state = self.per_peer_state.read().unwrap(); if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -1986,7 +1986,7 @@ impl ChannelMana let mut chan = { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; - let per_peer_state = self.per_peer_state.write().unwrap(); + let per_peer_state = self.per_peer_state.read().unwrap(); if let Some(peer_state_mutex) = per_peer_state.get(peer_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -2281,7 +2281,7 @@ impl ChannelMana Some((cp_id, id)) => Some((cp_id.clone(), id.clone())), }; let (chan_update_opt, forwardee_cltv_expiry_delta) = if let Some((counterparty_node_id, forwarding_id)) = forwarding_chan_info_opt { - let per_peer_state = self.per_peer_state.write().unwrap(); + let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(&counterparty_node_id).unwrap(); let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -2482,7 +2482,7 @@ impl ChannelMana } let channel_state = &mut *channel_lock; - let per_peer_state = self.per_peer_state.write().unwrap(); + let per_peer_state = self.per_peer_state.read().unwrap(); if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -2792,7 +2792,7 @@ impl ChannelMana ) -> Result<(), APIError> { let (chan, msg) = { let (res, chan) = { - let per_peer_state = self.per_peer_state.write().unwrap(); + let per_peer_state = self.per_peer_state.read().unwrap(); if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -2828,7 +2828,7 @@ impl ChannelMana node_id: chan.get_counterparty_node_id(), msg, }); - let per_peer_state = self.per_peer_state.write().unwrap(); + let per_peer_state = self.per_peer_state.read().unwrap(); let chan_id = chan.channel_id(); if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); @@ -3017,7 +3017,7 @@ impl ChannelMana { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; - let per_peer_state = self.per_peer_state.write().unwrap(); + let per_peer_state = self.per_peer_state.read().unwrap(); for (short_chan_id, mut pending_forwards) in channel_state.forward_htlcs.drain() { if short_chan_id != 0 { @@ -3729,7 +3729,7 @@ impl ChannelMana HTLCSource::PreviousHopData(HTLCPreviousHopData { .. }) => { let channel_state = self.channel_state.lock().unwrap(); let (failure_code, onion_failure_data) = { - let per_peer_state = self.per_peer_state.write().unwrap(); + let per_peer_state = self.per_peer_state.read().unwrap(); if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -4085,7 +4085,7 @@ impl ChannelMana } }; - let per_peer_state = self.per_peer_state.write().unwrap(); + let per_peer_state = self.per_peer_state.read().unwrap(); if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -4292,7 +4292,7 @@ impl ChannelMana if counterparty_node_id.is_none() { return } - let per_peer_state = self.per_peer_state.write().unwrap(); + let per_peer_state = self.per_peer_state.read().unwrap(); let mut peer_state_lock; let mut channel = { if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id.unwrap()) { @@ -4382,7 +4382,7 @@ impl ChannelMana let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; - let per_peer_state = self.per_peer_state.write().unwrap(); + let per_peer_state = self.per_peer_state.read().unwrap(); if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -4445,7 +4445,7 @@ impl ChannelMana }; let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; - let per_peer_state = self.per_peer_state.write().unwrap(); + let per_peer_state = self.per_peer_state.read().unwrap(); if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -4489,7 +4489,7 @@ impl ChannelMana let (value, output_script, user_id) = { let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_lock; - let per_peer_state = self.per_peer_state.write().unwrap(); + let per_peer_state = self.per_peer_state.read().unwrap(); if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -4523,7 +4523,7 @@ impl ChannelMana let best_block = *self.best_block.read().unwrap(); let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_lock; - let per_peer_state = self.per_peer_state.write().unwrap(); + let per_peer_state = self.per_peer_state.read().unwrap(); if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -4568,7 +4568,7 @@ impl ChannelMana } let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; - let peer_state_lock = self.per_peer_state.write().unwrap(); + let peer_state_lock = self.per_peer_state.read().unwrap(); let channel_id = funding_msg.channel_id; if let Some(peer_state_mutex) = peer_state_lock.get(counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); @@ -4601,7 +4601,7 @@ impl ChannelMana let best_block = *self.best_block.read().unwrap(); let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_lock; - let per_peer_state = self.per_peer_state.write().unwrap(); + let per_peer_state = self.per_peer_state.read().unwrap(); if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -4645,7 +4645,7 @@ impl ChannelMana fn internal_channel_ready(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReady) -> Result<(), MsgHandleErrInternal> { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; - let per_peer_state = self.per_peer_state.write().unwrap(); + let per_peer_state = self.per_peer_state.read().unwrap(); if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -4691,7 +4691,7 @@ impl ChannelMana let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; - let per_peer_state = self.per_peer_state.write().unwrap(); + let per_peer_state = self.per_peer_state.read().unwrap(); if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -4749,7 +4749,7 @@ impl ChannelMana let (tx, chan_option) = { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; - let per_peer_state = self.per_peer_state.write().unwrap(); + let per_peer_state = self.per_peer_state.read().unwrap(); if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -4809,7 +4809,7 @@ impl ChannelMana let (pending_forward_info, mut channel_state_lock) = self.decode_update_add_htlc_onion(msg); let channel_state = &mut *channel_state_lock; - let per_peer_state = self.per_peer_state.write().unwrap(); + let per_peer_state = self.per_peer_state.read().unwrap(); if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -4855,7 +4855,7 @@ impl ChannelMana let mut channel_lock = self.channel_state.lock().unwrap(); let (htlc_source, forwarded_htlc_value) = { let channel_state = &mut *channel_lock; - let per_peer_state = self.per_peer_state.write().unwrap(); + let per_peer_state = self.per_peer_state.read().unwrap(); if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -4879,7 +4879,7 @@ impl ChannelMana fn internal_update_fail_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), MsgHandleErrInternal> { let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_lock; - let per_peer_state = self.per_peer_state.write().unwrap(); + let per_peer_state = self.per_peer_state.read().unwrap(); if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -4901,7 +4901,7 @@ impl ChannelMana fn internal_update_fail_malformed_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), MsgHandleErrInternal> { let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_lock; - let per_peer_state = self.per_peer_state.write().unwrap(); + let per_peer_state = self.per_peer_state.read().unwrap(); if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -4927,7 +4927,7 @@ impl ChannelMana fn internal_commitment_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(), MsgHandleErrInternal> { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; - let per_peer_state = self.per_peer_state.write().unwrap(); + let per_peer_state = self.per_peer_state.read().unwrap(); if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -5019,7 +5019,7 @@ impl ChannelMana let res = loop { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; - let per_peer_state = self.per_peer_state.write().unwrap(); + let per_peer_state = self.per_peer_state.read().unwrap(); if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -5086,7 +5086,7 @@ impl ChannelMana fn internal_update_fee(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), MsgHandleErrInternal> { let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_lock; - let per_peer_state = self.per_peer_state.write().unwrap(); + let per_peer_state = self.per_peer_state.read().unwrap(); if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -5109,7 +5109,7 @@ impl ChannelMana let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; - let per_peer_state = self.per_peer_state.write().unwrap(); + let per_peer_state = self.per_peer_state.read().unwrap(); if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -5149,7 +5149,7 @@ impl ChannelMana return Ok(NotifyOption::SkipPersist) } }; - let per_peer_state = self.per_peer_state.write().unwrap(); + let per_peer_state = self.per_peer_state.read().unwrap(); if let Some(peer_state_mutex) = per_peer_state.get(chan_counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -5184,7 +5184,7 @@ impl ChannelMana let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; - let per_peer_state = self.per_peer_state.write().unwrap(); + let per_peer_state = self.per_peer_state.read().unwrap(); if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -5267,7 +5267,7 @@ impl ChannelMana id_to_peer.get(&funding_outpoint.to_channel_id()).cloned() }; if let Some(counterparty_node_id) = counterparty_node_id_opt { - let per_peer_state = self.per_peer_state.write().unwrap(); + let per_peer_state = self.per_peer_state.read().unwrap(); let pending_msg_events = &mut channel_state.pending_msg_events; if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); @@ -6279,7 +6279,7 @@ impl { // First check if we can advance the channel type and try again. let mut channel_state = self.channel_state.lock().unwrap(); - let per_peer_state = self.per_peer_state.write().unwrap(); + let per_peer_state = self.per_peer_state.read().unwrap(); if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -6732,7 +6732,7 @@ impl Writeable f let channel_state = self.channel_state.lock().unwrap(); let mut htlc_purposes: Vec<&events::PaymentPurpose> = Vec::new(); { - let per_peer_state = self.per_peer_state.write().unwrap(); + let per_peer_state = self.per_peer_state.read().unwrap(); let mut unfunded_channels = 0; let mut number_of_channels = 0; for (_, peer_state_mutex) in per_peer_state.iter() { @@ -7395,7 +7395,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> last_node_announcement_serial: AtomicUsize::new(last_node_announcement_serial as usize), highest_seen_timestamp: AtomicUsize::new(highest_seen_timestamp as usize), - per_peer_state: RwLock::new(per_peer_state), + per_peer_state: FairRwLock::new(per_peer_state), pending_events: Mutex::new(pending_events_read), pending_background_events: Mutex::new(pending_background_events_read), From f7b51d24cd55be85cf9554e70131aa85c96f702e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Tigerstr=C3=B6m?= <11711198+ViktorTigerstrom@users.noreply.github.com> Date: Tue, 7 Jun 2022 00:55:41 +0200 Subject: [PATCH 09/21] f - Remove unreacable branches that can be reached --- lightning/src/ln/channelmanager.rs | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 91678673b9a..1ccdff22364 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -2533,8 +2533,8 @@ impl ChannelMana }, None => { insert_outbound_payment!(); }, } - } else { unreachable!(); } - } else { unreachable!(); } + } else { return Err(APIError::ChannelUnavailable{err: format!("No such channel for the counterparty_node_id {}, as indicated by the short_to_id map", counterparty_node_id) })} + } else { return Err(APIError::ChannelUnavailable{err: format!("No such counterparty_node_id {}, as indicated by the short_to_id map", counterparty_node_id) })} return Ok(()); }; @@ -3218,7 +3218,8 @@ impl ChannelMana }); } } else { - unreachable!(); + let err = Err(MsgHandleErrInternal::send_err_msg_no_close(format!("No such channel for the counterparty_node_id {}, as indicated by the short_to_id map", counterparty_node_id), forward_chan_id)); + handle_errors.push((counterparty_node_id, err)); } } else { @@ -3740,7 +3741,9 @@ impl ChannelMana hash_map::Entry::Vacant(_) => (0x4000|10, Vec::new()) } } else { - unreachable!(); + // Peer must have been dropped before acquiring the per_peer_state + // read lock again in this function. + (0x4000|10, Vec::new()) } }; self.fail_htlc_backwards_internal(channel_state, @@ -4078,6 +4081,7 @@ impl ChannelMana fn claim_funds_from_hop(&self, channel_state_lock: &mut MutexGuard, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> ClaimFundsFromHop { //TODO: Delay the claimed_funds relaying just like we do outbound relay! let channel_state = &mut **channel_state_lock; + let per_peer_state = self.per_peer_state.read().unwrap(); let (counterparty_node_id, chan_id) = match channel_state.short_to_chan_info.get(&prev_hop.short_channel_id) { Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()), None => { @@ -4085,7 +4089,6 @@ impl ChannelMana } }; - let per_peer_state = self.per_peer_state.read().unwrap(); if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -4137,7 +4140,7 @@ impl ChannelMana return ClaimFundsFromHop::MonitorUpdateFail(counterparty_node_id, res, None); }, } - } else { unreachable!(); } + } else { return ClaimFundsFromHop::PrevHopForceClosed } } else { unreachable!(); } } @@ -4592,7 +4595,7 @@ impl ChannelMana } } } - } else { unreachable!() } + } else { return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("The peer with node_id {}, was dropped before the signed funding tx was sent to the peer", counterparty_node_id), channel_id)) } Ok(()) } From 2c112d7d3614463906f16a9d790bc6d413ee47bd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Tigerstr=C3=B6m?= <11711198+ViktorTigerstrom@users.noreply.github.com> Date: Wed, 8 Jun 2022 00:49:39 +0200 Subject: [PATCH 10/21] f - Store channels per-peer: Update per_peer_state docs --- lightning/src/ln/channelmanager.rs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 1ccdff22364..1a9d45ec16a 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -790,8 +790,13 @@ pub struct ChannelManager Date: Fri, 10 Jun 2022 02:01:36 +0200 Subject: [PATCH 11/21] Remove unnecessary `per_peer_state` branch After `channels` are now stored in the `per_peer_state`, some logic can be simplified and extra accessing of the `per_peer_state` can be removed. --- lightning/src/ln/channelmanager.rs | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 1a9d45ec16a..7cc01a1d1ae 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1779,7 +1779,7 @@ impl ChannelMana channel_id: (*channel_id).clone(), counterparty: ChannelCounterparty { node_id: channel.get_counterparty_node_id(), - features: InitFeatures::empty(), + features: peer_state.latest_features.clone(), unspendable_punishment_reserve: to_remote_reserve_satoshis, forwarding_info: channel.counterparty_forwarding_info(), // Ensures that we have actually received the `htlc_minimum_msat` value @@ -1817,12 +1817,6 @@ impl ChannelMana } } } - let per_peer_state = self.per_peer_state.read().unwrap(); - for chan in res.iter_mut() { - if let Some(peer_state) = per_peer_state.get(&chan.counterparty.node_id) { - chan.counterparty.features = peer_state.lock().unwrap().latest_features.clone(); - } - } res } From 7e9ac7b2607d60abc03b99d21ed63e9105d0b717 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Tigerstr=C3=B6m?= <11711198+ViktorTigerstrom@users.noreply.github.com> Date: Mon, 6 Jun 2022 22:54:18 +0200 Subject: [PATCH 12/21] Avoid retaking locks --- lightning/src/ln/channelmanager.rs | 121 ++++++++++++++--------------- 1 file changed, 57 insertions(+), 64 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 7cc01a1d1ae..1a5e9999129 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1703,12 +1703,18 @@ impl ChannelMana return Err(APIError::APIMisuseError { err: format!("Channel value must be at least 1000 satoshis. It was {}", channel_value_satoshis) }); } - let channel = { - let per_peer_state = self.per_peer_state.read().unwrap(); - match per_peer_state.get(&their_network_key) { - Some(peer_state) => { + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); + // We want to make sure the lock is actually acquired by PersistenceNotifierGuard. + debug_assert!(&self.total_consistency_lock.try_write().is_err()); + + let mut channel_state = self.channel_state.lock().unwrap(); + let per_peer_state = self.per_peer_state.read().unwrap(); + match per_peer_state.get(&their_network_key) { + None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", their_network_key) }), + Some(peer_state) => { + let mut peer_state = peer_state.lock().unwrap(); + let channel = { let outbound_scid_alias = self.create_and_insert_outbound_scid_alias(); - let peer_state = peer_state.lock().unwrap(); let their_features = &peer_state.latest_features; let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration }; match Channel::new_outbound(&self.fee_estimator, &self.keys_manager, their_network_key, @@ -1721,38 +1727,29 @@ impl ChannelMana return Err(e); }, } - }, - None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", their_network_key) }), - } - }; - let res = channel.get_open_channel(self.genesis_hash.clone()); + }; + let res = channel.get_open_channel(self.genesis_hash.clone()); - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - // We want to make sure the lock is actually acquired by PersistenceNotifierGuard. - debug_assert!(&self.total_consistency_lock.try_write().is_err()); + let temporary_channel_id = channel.channel_id(); - let temporary_channel_id = channel.channel_id(); - let mut channel_state = self.channel_state.lock().unwrap(); - let per_peer_state = self.per_peer_state.read().unwrap(); - if let Some(peer_state_mutex) = per_peer_state.get(&their_network_key){ - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; - match peer_state.channel_by_id.entry(temporary_channel_id) { - hash_map::Entry::Occupied(_) => { - if cfg!(fuzzing) { - return Err(APIError::APIMisuseError { err: "Fuzzy bad RNG".to_owned() }); - } else { - panic!("RNG is bad???"); - } - }, - hash_map::Entry::Vacant(entry) => { entry.insert(channel); } + match peer_state.channel_by_id.entry(temporary_channel_id) { + hash_map::Entry::Occupied(_) => { + if cfg!(fuzzing) { + return Err(APIError::APIMisuseError { err: "Fuzzy bad RNG".to_owned() }); + } else { + panic!("RNG is bad???"); + } + }, + hash_map::Entry::Vacant(entry) => { entry.insert(channel); } + } + + channel_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { + node_id: their_network_key, + msg: res, + }); + Ok(temporary_channel_id) } - } else { unreachable!() } - channel_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { - node_id: their_network_key, - msg: res, - }); - Ok(temporary_channel_id) + } } fn list_channels_with_filter)) -> bool + Copy>(&self, f: Fn) -> Vec { @@ -2789,12 +2786,15 @@ impl ChannelMana fn funding_transaction_generated_intern, &Transaction) -> Result>( &self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, funding_transaction: Transaction, find_funding_output: FundingOutput ) -> Result<(), APIError> { - let (chan, msg) = { - let (res, chan) = { - let per_peer_state = self.per_peer_state.read().unwrap(); - if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let channel_state = &mut *channel_state_lock; + let per_peer_state = self.per_peer_state.read().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + + let (chan, msg) = { + let (res, chan) = { match peer_state.channel_by_id.remove(temporary_channel_id) { Some(mut chan) => { let funding_txo = find_funding_output(&chan, &funding_transaction)?; @@ -2807,31 +2807,22 @@ impl ChannelMana }, None => { return Err(APIError::ChannelUnavailable { err: "No such channel".to_owned() }) }, } - } else { - return Err(APIError::APIMisuseError { err: format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id) }) + }; + match handle_error!(self, res, chan.get_counterparty_node_id()) { + Ok(funding_msg) => { + (chan, funding_msg) + }, + Err(_) => { return Err(APIError::ChannelUnavailable { + err: "Error deriving keys or signing initial commitment transactions - either our RNG or our counterparty's RNG is broken or the Signer refused to sign".to_owned() + }) }, } }; - match handle_error!(self, res, chan.get_counterparty_node_id()) { - Ok(funding_msg) => { - (chan, funding_msg) - }, - Err(_) => { return Err(APIError::ChannelUnavailable { - err: "Error deriving keys or signing initial commitment transactions - either our RNG or our counterparty's RNG is broken or the Signer refused to sign".to_owned() - }) }, - } - }; - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; - channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingCreated { - node_id: chan.get_counterparty_node_id(), - msg, - }); - let per_peer_state = self.per_peer_state.read().unwrap(); - let chan_id = chan.channel_id(); - if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; + channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingCreated { + node_id: chan.get_counterparty_node_id(), + msg, + }); + let chan_id = chan.channel_id(); match peer_state.channel_by_id.entry(chan_id) { hash_map::Entry::Occupied(_) => { panic!("Generated duplicate funding txid?"); @@ -2844,8 +2835,10 @@ impl ChannelMana } } } - } else { unreachable!(); } - Ok(()) + Ok(()) + } else { + return Err(APIError::APIMisuseError { err: format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id) }) + } } #[cfg(test)] From 6c4b80d76160a27a78a6ff3bfd1402322c61d6b4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Tigerstr=C3=B6m?= <11711198+ViktorTigerstrom@users.noreply.github.com> Date: Mon, 6 Jun 2022 23:57:24 +0200 Subject: [PATCH 13/21] Update failure to query `Channel` error messages --- lightning/src/ln/channelmanager.rs | 96 ++++++---------------------- lightning/src/ln/functional_tests.rs | 8 +-- lightning/src/ln/payment_tests.rs | 2 +- lightning/src/ln/shutdown_tests.rs | 2 +- 4 files changed, 27 insertions(+), 81 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 1a5e9999129..b94dd5e98c9 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1710,7 +1710,7 @@ impl ChannelMana let mut channel_state = self.channel_state.lock().unwrap(); let per_peer_state = self.per_peer_state.read().unwrap(); match per_peer_state.get(&their_network_key) { - None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", their_network_key) }), + None => return Err(APIError::APIMisuseError { err: format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", their_network_key) }), Some(peer_state) => { let mut peer_state = peer_state.lock().unwrap(); let channel = { @@ -1867,9 +1867,6 @@ impl ChannelMana let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(channel_id.clone()) { hash_map::Entry::Occupied(mut chan_entry) => { - if *counterparty_node_id != chan_entry.get().get_counterparty_node_id(){ - return Err(APIError::APIMisuseError { err: "The passed counterparty_node_id doesn't match the channel's counterparty node_id".to_owned() }); - } let (shutdown_msg, monitor_update, htlcs) = chan_entry.get_mut().get_shutdown(&self.keys_manager, &peer_state.latest_features, target_feerate_sats_per_1000_weight)?; failed_htlcs = htlcs; @@ -1901,10 +1898,10 @@ impl ChannelMana } break Ok(()); }, - hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable { err: "No such channel".to_owned() }) + hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable{err: format!("No such channel for the passed counterparty_node_id {}", counterparty_node_id) }) } } else { - return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", counterparty_node_id) }); + return Err(APIError::APIMisuseError { err: format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id) }); } }; @@ -1987,9 +1984,6 @@ impl ChannelMana let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; if let hash_map::Entry::Occupied(chan) = peer_state.channel_by_id.entry(channel_id.clone()) { - if chan.get().get_counterparty_node_id() != *peer_node_id { - return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()}); - } if let Some(peer_msg) = peer_msg { self.issue_channel_close_events(chan.get(),ClosureReason::CounterpartyForceClosed { peer_msg: peer_msg.to_string() }); } else { @@ -1997,7 +1991,7 @@ impl ChannelMana } remove_channel!(self, channel_state, chan) } else { - return Err(APIError::ChannelUnavailable{ err: "No such channel".to_owned() }); + return Err(APIError::ChannelUnavailable{ err: format!("No such channel for the passed counterparty_node_id {}", peer_node_id) }); } } else { return Err(APIError::APIMisuseError{ err: format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", peer_node_id) }); @@ -2805,7 +2799,7 @@ impl ChannelMana } else { unreachable!(); }) , chan) }, - None => { return Err(APIError::ChannelUnavailable { err: "No such channel".to_owned() }) }, + None => { return Err(APIError::ChannelUnavailable { err: format!("No such channel for the passed counterparty_node_id {}", counterparty_node_id) }) }, } }; match handle_error!(self, res, chan.get_counterparty_node_id()) { @@ -4386,9 +4380,6 @@ impl ChannelMana if !channel.get().inbound_is_awaiting_accept() { return Err(APIError::APIMisuseError { err: "The channel isn't currently awaiting to be accepted.".to_owned() }); } - if *counterparty_node_id != channel.get().get_counterparty_node_id() { - return Err(APIError::APIMisuseError { err: "The passed counterparty_node_id doesn't match the channel's counterparty node_id".to_owned() }); - } if accept_0conf { channel.get_mut().set_0conf(); } else if channel.get().get_channel_type().requires_zero_conf() { @@ -4409,7 +4400,7 @@ impl ChannelMana }); } hash_map::Entry::Vacant(_) => { - return Err(APIError::ChannelUnavailable { err: "Can't accept a channel that doesn't exist".to_owned() }); + return Err(APIError::APIMisuseError { err: format!("No such channel for the passed counterparty_node_id {}", counterparty_node_id) }); } } } else { @@ -4490,13 +4481,10 @@ impl ChannelMana let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.temporary_channel_id) { hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.temporary_channel_id)); - } try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &their_features), channel_state, chan); (chan.get().get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().get_user_id()) }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.temporary_channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id)) } } else { return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id)) @@ -4524,12 +4512,9 @@ impl ChannelMana let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.temporary_channel_id.clone()) { hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.temporary_channel_id)); - } (try_chan_entry!(self, chan.get_mut().funding_created(msg, best_block, &self.logger), channel_state, chan), chan.remove()) }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.temporary_channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id)) } } else { return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id)) @@ -4602,9 +4587,6 @@ impl ChannelMana let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } let (monitor, funding_tx, channel_ready) = match chan.get_mut().funding_signed(&msg, best_block, &self.logger) { Ok(update) => update, Err(e) => try_chan_entry!(self, Err(e), channel_state, chan), @@ -4626,7 +4608,7 @@ impl ChannelMana } funding_tx }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } } else { return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) @@ -4646,9 +4628,6 @@ impl ChannelMana let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } let announcement_sigs_opt = try_chan_entry!(self, chan.get_mut().channel_ready(&msg, self.get_our_node_id(), self.genesis_hash.clone(), &self.best_block.read().unwrap(), &self.logger), channel_state, chan); if let Some(announcement_sigs) = announcement_sigs_opt { @@ -4673,7 +4652,7 @@ impl ChannelMana } Ok(()) }, - hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } } else { Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) @@ -4692,9 +4671,6 @@ impl ChannelMana let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id.clone()) { hash_map::Entry::Occupied(mut chan_entry) => { - if chan_entry.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } if !chan_entry.get().received_shutdown() { log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.", @@ -4726,7 +4702,7 @@ impl ChannelMana break Ok(()); }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } } else { return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) @@ -4750,9 +4726,6 @@ impl ChannelMana let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id.clone()) { hash_map::Entry::Occupied(mut chan_entry) => { - if chan_entry.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } let (closing_signed, tx) = try_chan_entry!(self, chan_entry.get_mut().closing_signed(&self.fee_estimator, &msg), channel_state, chan_entry); if let Some(msg) = closing_signed { channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { @@ -4769,7 +4742,7 @@ impl ChannelMana (tx, Some(remove_channel!(self, channel_state, chan_entry))) } else { (tx, None) } }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } } else { return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) @@ -4810,9 +4783,6 @@ impl ChannelMana let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } let create_pending_htlc_status = |chan: &Channel, pending_forward_info: PendingHTLCStatus, error_code: u16| { // If the update_add is completely bogus, the call will Err and we will close, @@ -4838,7 +4808,7 @@ impl ChannelMana }; try_chan_entry!(self, chan.get_mut().update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.logger), channel_state, chan); }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } } else { return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) @@ -4856,12 +4826,9 @@ impl ChannelMana let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } try_chan_entry!(self, chan.get_mut().update_fulfill_htlc(&msg), channel_state, chan) }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } } else { return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) @@ -4880,12 +4847,9 @@ impl ChannelMana let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } try_chan_entry!(self, chan.get_mut().update_fail_htlc(&msg, HTLCFailReason::LightningError { err: msg.reason.clone() }), channel_state, chan); }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } } else { return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)); @@ -4902,9 +4866,6 @@ impl ChannelMana let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } if (msg.failure_code & 0x8000) == 0 { let chan_err: ChannelError = ChannelError::Close("Got update_fail_malformed_htlc with BADONION not set".to_owned()); try_chan_entry!(self, Err(chan_err), channel_state, chan); @@ -4912,7 +4873,7 @@ impl ChannelMana try_chan_entry!(self, chan.get_mut().update_fail_malformed_htlc(&msg, HTLCFailReason::Reason { failure_code: msg.failure_code, data: Vec::new() }), channel_state, chan); Ok(()) }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } } else { return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) @@ -4928,9 +4889,6 @@ impl ChannelMana let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } let (revoke_and_ack, commitment_signed, monitor_update) = match chan.get_mut().commitment_signed(&msg, &self.logger) { Err((None, e)) => try_chan_entry!(self, Err(e), channel_state, chan), @@ -4964,7 +4922,7 @@ impl ChannelMana } Ok(()) }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } } else { return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) @@ -5020,9 +4978,6 @@ impl ChannelMana let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - break Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update(); let raa_updates = break_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.logger), channel_state, chan); @@ -5056,7 +5011,7 @@ impl ChannelMana .unwrap_or(chan.get().outbound_scid_alias()), chan.get().get_funding_txo().unwrap())) }, - hash_map::Entry::Vacant(_) => break Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + hash_map::Entry::Vacant(_) => break Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } } else { break Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) @@ -5087,12 +5042,9 @@ impl ChannelMana let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } try_chan_entry!(self, chan.get_mut().update_fee(&self.fee_estimator, &msg), channel_state, chan); }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } } else { return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)); @@ -5110,9 +5062,6 @@ impl ChannelMana let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } if !chan.get().is_usable() { return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it".to_owned(), action: msgs::ErrorAction::IgnoreError})); } @@ -5125,7 +5074,7 @@ impl ChannelMana update_msg: self.get_channel_update_for_broadcast(chan.get()).unwrap(), }); }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } } else { return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)); @@ -5185,9 +5134,6 @@ impl ChannelMana let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } // Currently, we expect all holding cell update_adds to be dropped on peer // disconnect, so Channel's reestablish will never hand us any holding cell // freed HTLCs to fail backwards. If in the future we no longer drop pending @@ -5221,7 +5167,7 @@ impl ChannelMana } (responses.holding_cell_failed_htlcs, need_lnd_workaround) }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } } else { return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)); diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 0806fa8cc78..bf03742aeaa 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -7535,7 +7535,7 @@ fn test_data_loss_protect() { if let MessageSendEvent::HandleError { ref action, .. } = msg { match action { &ErrorAction::SendErrorMessage { ref msg } => { - assert_eq!(msg.data, "Failed to find corresponding channel"); + assert_eq!(msg.data, format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id())); err_msgs_0.push(msg.clone()); }, _ => panic!("Unexpected event!"), @@ -7548,7 +7548,7 @@ fn test_data_loss_protect() { nodes[1].node.handle_error(&nodes[0].node.get_our_node_id(), &err_msgs_0[0]); assert!(nodes[1].node.list_usable_channels().is_empty()); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: "Failed to find corresponding channel".to_owned() }); + check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id()) }); check_closed_broadcast!(nodes[1], false); } @@ -8548,8 +8548,8 @@ fn test_can_not_accept_unknown_inbound_channel() { let unknown_channel_id = [0; 32]; let api_res = nodes[0].node.accept_inbound_channel(&unknown_channel_id, &nodes[1].node.get_our_node_id(), 0); match api_res { - Err(APIError::ChannelUnavailable { err }) => { - assert_eq!(err, "Can't accept a channel that doesn't exist"); + Err(APIError::APIMisuseError { err }) => { + assert_eq!(err, format!("No such channel for the passed counterparty_node_id {}", nodes[1].node.get_our_node_id())); }, Ok(_) => panic!("It shouldn't be possible to accept an unkown channel"), Err(_) => panic!("Unexpected Error"), diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index b231dececbc..9a156632019 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -463,7 +463,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg } } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()); nodes[1].node.handle_error(&nodes[0].node.get_our_node_id(), msg); - check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: "Failed to find corresponding channel".to_string() }); + check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id()) }); check_added_monitors!(nodes[1], 1); assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1); }, diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs index e1b7b20d45a..496f64ee97b 100644 --- a/lightning/src/ln/shutdown_tests.rs +++ b/lightning/src/ln/shutdown_tests.rs @@ -381,7 +381,7 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { // closing_signed so we do it ourselves check_closed_broadcast!(nodes[1], false); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: "Failed to find corresponding channel".to_string() }); + check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id()) }); } assert!(nodes[0].node.list_channels().is_empty()); From 499145637c7066b29a3b8119c092b6d65c6472e7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Tigerstr=C3=B6m?= <11711198+ViktorTigerstrom@users.noreply.github.com> Date: Tue, 31 May 2022 21:20:55 +0200 Subject: [PATCH 14/21] Add duplicate temporary_channel_id for 2 peers test --- lightning/src/ln/functional_tests.rs | 50 ++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index bf03742aeaa..0a592f774d7 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -9220,6 +9220,56 @@ fn test_onchain_htlc_settlement_after_close() { do_test_onchain_htlc_settlement_after_close(false, false); } +#[test] +fn test_duplicate_temporary_channel_id_from_different_peers() { + // Tests that we can accept two different `OpenChannel` requests with the same + // `temporary_channel_id`, as long as they are from different peers. + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + // Create an first channel channel + nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 100000, 10001, 42, None).unwrap(); + let mut open_chan_msg_chan_1_0 = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id()); + + // Create an second channel + nodes[2].node.create_channel(nodes[0].node.get_our_node_id(), 100000, 10001, 43, None).unwrap(); + let mut open_chan_msg_chan_2_0 = get_event_msg!(nodes[2], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id()); + + // Modify the `OpenChannel` from `nodes[2]` to `nodes[0]` to ensure that it uses the same + // `temporary_channel_id` as the `OpenChannel` from nodes[1] to nodes[0]. + open_chan_msg_chan_2_0.temporary_channel_id = open_chan_msg_chan_1_0.temporary_channel_id; + + // Assert that `nodes[0]` can accept both `OpenChannel` requests, even though they use the same + // `temporary_channel_id` as they are from different peers. + nodes[0].node.handle_open_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &open_chan_msg_chan_1_0); + { + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match &events[0] { + MessageSendEvent::SendAcceptChannel { node_id, msg } => { + assert_eq!(node_id, &nodes[1].node.get_our_node_id()); + assert_eq!(msg.temporary_channel_id, open_chan_msg_chan_1_0.temporary_channel_id); + }, + _ => panic!("Unexpected event"), + } + } + + nodes[0].node.handle_open_channel(&nodes[2].node.get_our_node_id(), InitFeatures::known(), &open_chan_msg_chan_2_0); + { + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match &events[0] { + MessageSendEvent::SendAcceptChannel { node_id, msg } => { + assert_eq!(node_id, &nodes[2].node.get_our_node_id()); + assert_eq!(msg.temporary_channel_id, open_chan_msg_chan_1_0.temporary_channel_id); + }, + _ => panic!("Unexpected event"), + } + } +} + #[test] fn test_duplicate_chan_id() { // Test that if a given peer tries to open a channel with the same channel_id as one that is From ae665ce35facbb56a4723d6c0ca5a6373252b8f5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Tigerstr=C3=B6m?= <11711198+ViktorTigerstrom@users.noreply.github.com> Date: Mon, 13 Jun 2022 21:24:51 +0200 Subject: [PATCH 15/21] Add handle unkown peer test --- lightning/src/ln/channelmanager.rs | 211 ++++++++++++++++++++++++++++- 1 file changed, 210 insertions(+), 1 deletion(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index b94dd5e98c9..c771bf94614 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -7363,6 +7363,12 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> mod tests { use bitcoin::hashes::Hash; use bitcoin::hashes::sha256::Hash as Sha256; + use bitcoin::hashes::hex::FromHex; + use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey}; + use bitcoin::secp256k1::ecdsa::Signature; + use bitcoin::secp256k1::ffi::Signature as FFISignature; + use bitcoin::blockdata::script::Script; + use bitcoin::Txid; use core::time::Duration; use core::sync::atomic::Ordering; use ln::{PaymentPreimage, PaymentHash, PaymentSecret}; @@ -7371,7 +7377,7 @@ mod tests { use ln::features::InitFeatures; use ln::functional_test_utils::*; use ln::msgs; - use ln::msgs::ChannelMessageHandler; + use ln::msgs::{ChannelMessageHandler, OptionalField}; use routing::router::{PaymentParameters, RouteParameters, find_route}; use util::errors::APIError; use util::events::{Event, MessageSendEvent, MessageSendEventsProvider}; @@ -7980,6 +7986,209 @@ mod tests { nodes[0].node.get_and_clear_pending_events(); nodes[1].node.get_and_clear_pending_events(); } + + fn check_unkown_peer_msg_event<'a, 'b: 'a, 'c: 'b>(node: &Node<'a, 'b, 'c>, closing_node_id: PublicKey, closed_channel_id: [u8; 32]){ + let close_msg_ev = node.node.get_and_clear_pending_msg_events(); + let expected_error_str = format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", closing_node_id); + match close_msg_ev[0] { + MessageSendEvent::HandleError { + ref node_id, action: msgs::ErrorAction::SendErrorMessage { + msg: msgs::ErrorMessage { ref channel_id, ref data } + } + } => { + assert_eq!(*node_id, closing_node_id); + assert_eq!(*data, expected_error_str); + assert_eq!(*channel_id, closed_channel_id); + } + _ => panic!("Unexpected event"), + } + } + + fn check_unkown_peer_error(res_err: Result, expected_public_key: PublicKey) { + match res_err { + Err(APIError::APIMisuseError { err }) => { + assert_eq!(err, format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", expected_public_key)); + }, + Ok(_) => panic!("Unexpected Ok"), + Err(_) => panic!("Unexpected Error"), + } + } + + #[test] + fn test_api_calls_with_unkown_counterparty_node() { + // Tests that our API functions and message handlers that expects a `counterparty_node_id` + // as input, behaves as expected if the `counterparty_node_id` is an unkown peer in the + // `ChannelManager::per_peer_state` map. + let chanmon_cfg = create_chanmon_cfgs(2); + let node_cfg = create_node_cfgs(2, &chanmon_cfg); + let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[None, None]); + let nodes = create_network(2, &node_cfg, &node_chanmgr); + + // Boilerplate code to produce `open_channel` and `accept_channel` msgs more densly than + // creating dummy ones. + nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None).unwrap(); + let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel_msg); + let accept_channel_msg = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); + + // Dummy values + let channel_id = [4; 32]; + let signature = Signature::from(unsafe { FFISignature::new() }); + let unkown_public_key = PublicKey::from_secret_key(&Secp256k1::signing_only(), &SecretKey::from_slice(&[42; 32]).unwrap()); + + // Dummy msgs + let funding_created_msg = msgs::FundingCreated { + temporary_channel_id: open_channel_msg.temporary_channel_id, + funding_txid: Txid::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap(), + funding_output_index: 0, + signature: signature, + }; + + let funding_signed_msg = msgs::FundingSigned { + channel_id: channel_id, + signature: signature, + }; + + let channel_ready_msg = msgs::ChannelReady { + channel_id: channel_id, + next_per_commitment_point: unkown_public_key, + short_channel_id_alias: None, + }; + + let announcement_signatures_msg = msgs::AnnouncementSignatures { + channel_id: channel_id, + short_channel_id: 0, + node_signature: signature, + bitcoin_signature: signature, + }; + + let channel_reestablish_msg = msgs::ChannelReestablish { + channel_id: channel_id, + next_local_commitment_number: 0, + next_remote_commitment_number: 0, + data_loss_protect: OptionalField::Absent, + }; + + let closing_signed_msg = msgs::ClosingSigned { + channel_id: channel_id, + fee_satoshis: 1000, + signature: signature, + fee_range: None, + }; + + let shutdown_msg = msgs::Shutdown { + channel_id: channel_id, + scriptpubkey: Script::new(), + }; + + let onion_routing_packet = msgs::OnionPacket { + version: 255, + public_key: Ok(unkown_public_key), + hop_data: [1; 20*65], + hmac: [2; 32] + }; + + let update_add_htlc_msg = msgs::UpdateAddHTLC { + channel_id: channel_id, + htlc_id: 0, + amount_msat: 1000000, + payment_hash: PaymentHash([1; 32]), + cltv_expiry: 821716, + onion_routing_packet + }; + + let commitment_signed_msg = msgs::CommitmentSigned { + channel_id: channel_id, + signature: signature, + htlc_signatures: Vec::new(), + }; + + let update_fee_msg = msgs::UpdateFee { + channel_id: channel_id, + feerate_per_kw: 1000, + }; + + let malformed_update_msg = msgs::UpdateFailMalformedHTLC{ + channel_id: channel_id, + htlc_id: 0, + sha256_of_onion: [1; 32], + failure_code: 0x8000, + }; + + let fulfill_update_msg = msgs::UpdateFulfillHTLC{ + channel_id: channel_id, + htlc_id: 0, + payment_preimage: PaymentPreimage([1; 32]), + }; + + let fail_update_msg = msgs::UpdateFailHTLC{ + channel_id: channel_id, + htlc_id: 0, + reason: msgs::OnionErrorPacket { data: Vec::new()}, + }; + + let revoke_and_ack_msg = msgs::RevokeAndACK { + channel_id: channel_id, + per_commitment_secret: [1; 32], + next_per_commitment_point: unkown_public_key, + }; + + // Test the API functions and message handlers. + check_unkown_peer_error(nodes[0].node.create_channel(unkown_public_key, 1_000_000, 500_000_000, 42, None), unkown_public_key); + + nodes[1].node.handle_open_channel(&unkown_public_key, InitFeatures::known(), &open_channel_msg); + check_unkown_peer_msg_event(&nodes[1], unkown_public_key, open_channel_msg.temporary_channel_id); + + nodes[0].node.handle_accept_channel(&unkown_public_key, InitFeatures::known(), &accept_channel_msg); + check_unkown_peer_msg_event(&nodes[0], unkown_public_key, open_channel_msg.temporary_channel_id); + + check_unkown_peer_error(nodes[0].node.accept_inbound_channel(&open_channel_msg.temporary_channel_id, &unkown_public_key, 42), unkown_public_key); + nodes[1].node.handle_funding_created(&unkown_public_key, &funding_created_msg); + check_unkown_peer_msg_event(&nodes[1], unkown_public_key, open_channel_msg.temporary_channel_id); + + nodes[0].node.handle_funding_signed(&unkown_public_key, &funding_signed_msg); + check_unkown_peer_msg_event(&nodes[0], unkown_public_key, channel_id); + + nodes[0].node.handle_channel_ready(&unkown_public_key, &channel_ready_msg); + check_unkown_peer_msg_event(&nodes[0], unkown_public_key, channel_id); + + nodes[1].node.handle_announcement_signatures(&unkown_public_key, &announcement_signatures_msg); + check_unkown_peer_msg_event(&nodes[1], unkown_public_key, channel_id); + + check_unkown_peer_error(nodes[0].node.close_channel(&channel_id, &unkown_public_key), unkown_public_key); + + check_unkown_peer_error(nodes[0].node.force_close_channel(&channel_id, &unkown_public_key), unkown_public_key); + + nodes[0].node.handle_shutdown(&unkown_public_key, &InitFeatures::known(), &shutdown_msg); + check_unkown_peer_msg_event(&nodes[0], unkown_public_key, channel_id); + + nodes[1].node.handle_closing_signed(&unkown_public_key, &closing_signed_msg); + check_unkown_peer_msg_event(&nodes[1], unkown_public_key, channel_id); + + nodes[0].node.handle_channel_reestablish(&unkown_public_key, &channel_reestablish_msg); + check_unkown_peer_msg_event(&nodes[0], unkown_public_key, channel_id); + + nodes[1].node.handle_update_add_htlc(&unkown_public_key, &update_add_htlc_msg); + check_unkown_peer_msg_event(&nodes[1], unkown_public_key, channel_id); + + nodes[1].node.handle_commitment_signed(&unkown_public_key, &commitment_signed_msg); + check_unkown_peer_msg_event(&nodes[1], unkown_public_key, channel_id); + + nodes[1].node.handle_update_fail_malformed_htlc(&unkown_public_key, &malformed_update_msg); + check_unkown_peer_msg_event(&nodes[1], unkown_public_key, channel_id); + + nodes[1].node.handle_update_fail_htlc(&unkown_public_key, &fail_update_msg); + check_unkown_peer_msg_event(&nodes[1], unkown_public_key, channel_id); + + nodes[1].node.handle_update_fulfill_htlc(&unkown_public_key, &fulfill_update_msg); + check_unkown_peer_msg_event(&nodes[1], unkown_public_key, channel_id); + + nodes[1].node.handle_revoke_and_ack(&unkown_public_key, &revoke_and_ack_msg); + check_unkown_peer_msg_event(&nodes[1], unkown_public_key, channel_id); + + nodes[1].node.handle_update_fee(&unkown_public_key, &update_fee_msg); + check_unkown_peer_msg_event(&nodes[1], unkown_public_key, channel_id); + } } #[cfg(all(any(test, feature = "_test_utils"), feature = "_bench_unstable"))] From ef4107a696d9aa2f6afd4d6bf182c14c969bed4d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Tigerstr=C3=B6m?= <11711198+ViktorTigerstrom@users.noreply.github.com> Date: Tue, 21 Jun 2022 01:46:08 +0200 Subject: [PATCH 16/21] Enable htlc forwarding over substitute channels Attempt to forward htlcs over alternative channels to the same peer, in case forwarding over the channel specified in the onion payload fails. --- lightning/src/ln/channelmanager.rs | 184 ++++++++++++++++++----------- 1 file changed, 118 insertions(+), 66 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index c771bf94614..4aac12c318e 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3081,9 +3081,36 @@ impl ChannelMana if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; - if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(forward_chan_id) { - let mut add_htlc_msgs = Vec::new(); - let mut fail_htlc_msgs = Vec::new(); + if peer_state.channel_by_id.contains_key(&forward_chan_id) { + let mut htlcs_msgs_by_id: HashMap<[u8; 32], (Vec, Vec)> = HashMap::new(); + + macro_rules! add_channel_key { + ($channel_id: expr) => {{ + if !htlcs_msgs_by_id.contains_key(&$channel_id){ + htlcs_msgs_by_id.insert($channel_id, (Vec::new(), Vec::new())); + } + }} + } + + macro_rules! add_update_add_htlc { + ($add_htlc_msg: expr, $channel_id: expr) => {{ + add_channel_key!($channel_id); + if let hash_map::Entry::Occupied(mut entry) = htlcs_msgs_by_id.entry($channel_id) { + let msgs_entry = entry.get_mut(); + msgs_entry.0.push($add_htlc_msg); + } + }} + } + + macro_rules! add_update_fail_htlc { + ($fail_htlc_msg: expr, $channel_id: expr) => {{ + add_channel_key!($channel_id); + if let hash_map::Entry::Occupied(mut entry) = htlcs_msgs_by_id.entry($channel_id) { + let msgs_entry = entry.get_mut(); + msgs_entry.1.push($fail_htlc_msg); + } + }} + } for forward_info in pending_forwards.drain(..) { match forward_info { HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo { @@ -3100,41 +3127,64 @@ impl ChannelMana // Phantom payments are only PendingHTLCRouting::Receive. phantom_shared_secret: None, }); - match chan.get_mut().send_htlc(amt_to_forward, payment_hash, outgoing_cltv_value, htlc_source.clone(), onion_packet, &self.logger) { - Err(e) => { - if let ChannelError::Ignore(msg) = e { - log_trace!(self.logger, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(payment_hash.0), msg); - } else { - panic!("Stated return value requirements in send_htlc() were not met"); - } - let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan.get()); - failed_forwards.push((htlc_source, payment_hash, - HTLCFailReason::Reason { failure_code, data } - )); - continue; - }, - Ok(update_add) => { - match update_add { - Some(msg) => { add_htlc_msgs.push(msg); }, - None => { - // Nothing to do here...we're waiting on a remote - // revoke_and_ack before we can add anymore HTLCs. The Channel - // will automatically handle building the update_add_htlc and - // commitment_signed messages when we can. - // TODO: Do some kind of timer to set the channel as !is_live() - // as we don't really want others relying on us relaying through - // this channel currently :/. + + // Attempt to forward the HTLC over all available channels + // to the peer, but attempt to forward the HTLC over the + // channel specified in the onion payload first. + let mut counterparty_channel_ids = peer_state.channel_by_id.keys() + .filter(|chan_id| **chan_id != forward_chan_id) + .map(|chan_id| *chan_id).collect::>(); + counterparty_channel_ids.insert(0, forward_chan_id); + let mut send_succeeded = false; + for chan_id in counterparty_channel_ids { + match peer_state.channel_by_id.get_mut(&chan_id).unwrap().send_htlc(amt_to_forward, payment_hash, outgoing_cltv_value, htlc_source.clone(), onion_packet.clone(), &self.logger) { + Err(e) => { + if let ChannelError::Ignore(msg) = e { + log_trace!( + self.logger, + "Could not forward HTLC with payment_hash {}, over channel {} to peer {}. Will attempt to forward the HTLC over a substitute channel instead if possible. Reason: {}", + log_bytes!(payment_hash.0), log_bytes!(chan_id), counterparty_node_id, msg + ); + } else { + panic!("Stated return value requirements in send_htlc() were not met"); + } + }, + Ok(update_add) => { + match update_add { + Some(msg) => { + log_info!(self.logger, "Will forward HTLC with payment_hash {}, over channel {}", log_bytes!(payment_hash.0), log_bytes!(chan_id)); + add_update_add_htlc!(msg, chan_id); + }, + None => { + // Nothing to do here...we're waiting on a remote + // revoke_and_ack before we can add anymore HTLCs. The Channel + // will automatically handle building the update_add_htlc and + // commitment_signed messages when we can. + // TODO: Do some kind of timer to set the channel as !is_live() + // as we don't really want others relying on us relaying through + // this channel currently :/. + } } + send_succeeded = true; + break; } } } + if !send_succeeded { + log_trace!(self.logger, "Failed to forward HTLC with payment_hash {} over all of the available channels to the peer", log_bytes!(payment_hash.0)); + let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, peer_state.channel_by_id.get(&forward_chan_id).unwrap()); + failed_forwards.push((htlc_source, payment_hash, + HTLCFailReason::Reason { failure_code, data } + )); + continue; + } }, HTLCForwardInfo::AddHTLC { .. } => { panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward"); }, HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => { log_trace!(self.logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id); - match chan.get_mut().get_update_fail_htlc(htlc_id, err_packet, &self.logger) { + match peer_state.channel_by_id.get_mut(&forward_chan_id).unwrap().get_update_fail_htlc(htlc_id, err_packet, &self.logger) { Err(e) => { if let ChannelError::Ignore(msg) = e { log_trace!(self.logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg); @@ -3146,7 +3196,7 @@ impl ChannelMana // the chain and sending the HTLC-Timeout is their problem. continue; }, - Ok(Some(msg)) => { fail_htlc_msgs.push(msg); }, + Ok(Some(msg)) => { add_update_fail_htlc!(msg, forward_chan_id); }, Ok(None) => { // Nothing to do here...we're waiting on a remote // revoke_and_ack before we can update the commitment @@ -3162,46 +3212,48 @@ impl ChannelMana } } - if !add_htlc_msgs.is_empty() || !fail_htlc_msgs.is_empty() { - let (commitment_msg, monitor_update) = match chan.get_mut().send_commitment(&self.logger) { - Ok(res) => res, - Err(e) => { - // We surely failed send_commitment due to bad keys, in that case - // close channel and then send error message to peer. - let counterparty_node_id = chan.get().get_counterparty_node_id(); - let err: Result<(), _> = match e { - ChannelError::Ignore(_) | ChannelError::Warn(_) => { - panic!("Stated return value requirements in send_commitment() were not met"); - } - ChannelError::Close(msg) => { - log_trace!(self.logger, "Closing channel {} due to Close-required error: {}", log_bytes!(chan.key()[..]), msg); - let mut channel = remove_channel!(self, channel_state, chan); - // ChannelClosed event is generated by handle_error for us. - Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel.channel_id(), channel.get_user_id(), channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok())) - }, - ChannelError::CloseDelayBroadcast(_) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); } - }; - handle_errors.push((counterparty_node_id, err)); + for (chan_id, (add_htlc_msgs, fail_htlc_msgs)) in htlcs_msgs_by_id.into_iter() { + if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(chan_id) { + let (commitment_msg, monitor_update) = match chan.get_mut().send_commitment(&self.logger) { + Ok(res) => res, + Err(e) => { + // We surely failed send_commitment due to bad keys, in that case + // close channel and then send error message to peer. + let counterparty_node_id = chan.get().get_counterparty_node_id(); + let err: Result<(), _> = match e { + ChannelError::Ignore(_) | ChannelError::Warn(_) => { + panic!("Stated return value requirements in send_commitment() were not met"); + } + ChannelError::Close(msg) => { + log_trace!(self.logger, "Closing channel {} due to Close-required error: {}", log_bytes!(chan.key()[..]), msg); + let mut channel = remove_channel!(self, channel_state, chan); + // ChannelClosed event is generated by handle_error for us. + Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel.channel_id(), channel.get_user_id(), channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok())) + }, + ChannelError::CloseDelayBroadcast(_) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); } + }; + handle_errors.push((counterparty_node_id, err)); + continue; + } + }; + if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { + handle_errors.push((chan.get().get_counterparty_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true))); continue; } - }; - if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { - handle_errors.push((chan.get().get_counterparty_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true))); - continue; + log_debug!(self.logger, "Forwarding HTLCs resulted in a commitment update with {} HTLCs added and {} HTLCs failed for channel {}", + add_htlc_msgs.len(), fail_htlc_msgs.len(), log_bytes!(chan.get().channel_id())); + channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: chan.get().get_counterparty_node_id(), + updates: msgs::CommitmentUpdate { + update_add_htlcs: add_htlc_msgs, + update_fulfill_htlcs: Vec::new(), + update_fail_htlcs: fail_htlc_msgs, + update_fail_malformed_htlcs: Vec::new(), + update_fee: None, + commitment_signed: commitment_msg, + }, + }); } - log_debug!(self.logger, "Forwarding HTLCs resulted in a commitment update with {} HTLCs added and {} HTLCs failed for channel {}", - add_htlc_msgs.len(), fail_htlc_msgs.len(), log_bytes!(chan.get().channel_id())); - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: chan.get().get_counterparty_node_id(), - updates: msgs::CommitmentUpdate { - update_add_htlcs: add_htlc_msgs, - update_fulfill_htlcs: Vec::new(), - update_fail_htlcs: fail_htlc_msgs, - update_fail_malformed_htlcs: Vec::new(), - update_fee: None, - commitment_signed: commitment_msg, - }, - }); } } else { let err = Err(MsgHandleErrInternal::send_err_msg_no_close(format!("No such channel for the counterparty_node_id {}, as indicated by the short_to_id map", counterparty_node_id), forward_chan_id)); From 1d44945fee11d83ddd67b2ceeaaac36d1bd1e53f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Tigerstr=C3=B6m?= <11711198+ViktorTigerstrom@users.noreply.github.com> Date: Tue, 21 Jun 2022 01:46:25 +0200 Subject: [PATCH 17/21] Test htlc forwarding over substitute channels --- lightning/src/ln/functional_test_utils.rs | 10 +++- lightning/src/ln/functional_tests.rs | 61 ++++++++++++++++++++++- 2 files changed, 67 insertions(+), 4 deletions(-) diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 9ee19437e61..25feeddeaf2 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1561,12 +1561,18 @@ pub fn send_along_route_with_secret<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, payment_id } -pub fn do_pass_along_path<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_path: &[&Node<'a, 'b, 'c>], recv_value: u64, our_payment_hash: PaymentHash, our_payment_secret: Option, ev: MessageSendEvent, payment_received_expected: bool, clear_recipient_events: bool, expected_preimage: Option) { +pub fn do_pass_along_path<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_path: &[&Node<'a, 'b, 'c>], recv_value: u64, our_payment_hash: PaymentHash, our_payment_secret: Option, ev: MessageSendEvent, payment_received_expected: bool, clear_recipient_events: bool, expected_preimage: Option, expected_channels: Option<&Vec<[u8; 32]>>) { let mut payment_event = SendEvent::from_event(ev); let mut prev_node = origin_node; + if let Some(channel_ids) = expected_channels { + assert_eq!(expected_path.len(), channel_ids.len()); + } for (idx, &node) in expected_path.iter().enumerate() { assert_eq!(node.node.get_our_node_id(), payment_event.node_id); + if let Some(channel_ids) = expected_channels { + assert_eq!(payment_event.msgs[0].channel_id, channel_ids[idx]); + } node.node.handle_update_add_htlc(&prev_node.node.get_our_node_id(), &payment_event.msgs[0]); check_added_monitors!(node, 0); @@ -1611,7 +1617,7 @@ pub fn do_pass_along_path<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_p } pub fn pass_along_path<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_path: &[&Node<'a, 'b, 'c>], recv_value: u64, our_payment_hash: PaymentHash, our_payment_secret: Option, ev: MessageSendEvent, payment_received_expected: bool, expected_preimage: Option) { - do_pass_along_path(origin_node, expected_path, recv_value, our_payment_hash, our_payment_secret, ev, payment_received_expected, true, expected_preimage); + do_pass_along_path(origin_node, expected_path, recv_value, our_payment_hash, our_payment_secret, ev, payment_received_expected, true, expected_preimage, None); } pub fn pass_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_route: &[&[&Node<'a, 'b, 'c>]], recv_value: u64, our_payment_hash: PaymentHash, our_payment_secret: PaymentSecret) { diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 0a592f774d7..f8c3d8b26af 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -1100,6 +1100,63 @@ fn fake_network_test() { check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure); } +#[test] +fn test_forward_through_substitute_channel() { + // Test that we forward htlcs over substitute channels if we receive an onion payload that + // specifies that we should forward an htlc over a channel where we have insufficient + // liquidity, but an alternative channel to the same node with enough liquidity exists. + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let chan_0_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known()); + let chan_1_2a = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 10000, 1001, InitFeatures::known(), InitFeatures::known()); + let chan_1_2b = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1000000, 100001, InitFeatures::known(), InitFeatures::known()); + + let mut hops = Vec::with_capacity(2); + hops.push(RouteHop { + pubkey: nodes[1].node.get_our_node_id(), + node_features: NodeFeatures::known(), + short_channel_id: chan_0_1.0.contents.short_channel_id, + channel_features: ChannelFeatures::known(), + fee_msat: 1000, + cltv_expiry_delta: chan_1_2a.0.contents.cltv_expiry_delta as u32 + }); + hops.push(RouteHop { + pubkey: nodes[2].node.get_our_node_id(), + node_features: NodeFeatures::known(), + short_channel_id: chan_1_2a.0.contents.short_channel_id, + channel_features: ChannelFeatures::known(), + fee_msat: 2000000, + cltv_expiry_delta: TEST_FINAL_CLTV + }); + + let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 2000000); + // Explicity specify that is `chan_1_2a` is the last hop in the `route`, which doesn't have + // enough liquidity forward 2000000 msat from `nodes[1]` to `nodes[2]`. + route = Route { paths: vec![hops], payment_params: None }; + + nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap(); + check_added_monitors!(nodes[0], 1); + + let mut send_events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(send_events.len(), 1); + + // Ensure that `chan_1_2b` is used instead of `chan_1_2a` when the htlc is actaully forwarded. + let expected_channels = vec![chan_0_1.2, chan_1_2b.2]; + do_pass_along_path(&nodes[0], &[&nodes[1], &nodes[2]], 2000000, payment_hash, Some(payment_secret), send_events[0].clone(), true, false, None, Some(&expected_channels)); + + expect_payment_received!(nodes[2], payment_hash, payment_secret, 2000000); + + nodes[2].node.claim_funds(payment_preimage); + check_added_monitors!(nodes[2], 1); + expect_payment_claimed!(nodes[2], payment_hash, 2000000); + + // Clear `nodes[2]`'s pending `UpdateFulfillHTLC` msg, as handling it isn't relevant for this + // test. + nodes[2].node.get_and_clear_pending_msg_events(); +} + #[test] fn holding_cell_htlc_counting() { // Tests that HTLCs in the holding cell count towards the pending HTLC limits on outbound HTLCs @@ -10080,8 +10137,8 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) { // Send the payment through to nodes[3] *without* clearing the PaymentReceived event let mut send_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(send_events.len(), 2); - do_pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), send_events[0].clone(), true, false, None); - do_pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), send_events[1].clone(), true, false, None); + do_pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), send_events[0].clone(), true, false, None, None); + do_pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), send_events[1].clone(), true, false, None, None); // Now that we have an MPP payment pending, get the latest encoded copies of nodes[3]'s // monitors and ChannelManager, for use later, if we don't want to persist both monitors. From 1dec1f974d8897ef5455cfa2c0f8cdcb5ecfe16d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Tigerstr=C3=B6m?= <11711198+ViktorTigerstrom@users.noreply.github.com> Date: Mon, 27 Jun 2022 22:37:23 +0200 Subject: [PATCH 18/21] f - Move creation of CommitmentUpdate structs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As we now may forward HTLCS over other channels than the channels specified in channel_state.forward_htlcs, we move the creation of CommitmentUpdate outside of the loop för the channel_state.forward_htlcs to ensure that we don't create multiple CommitmentUpdate structs for the same channels. --- lightning/src/ln/channelmanager.rs | 154 +++++++++++++++-------------- 1 file changed, 79 insertions(+), 75 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 4aac12c318e..b879710f572 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3005,6 +3005,36 @@ impl ChannelMana let channel_state = &mut *channel_state_lock; let per_peer_state = self.per_peer_state.read().unwrap(); + let mut htlcs_msgs_by_id: HashMap<[u8; 32], (Vec, Vec, PublicKey)> = HashMap::new(); + + macro_rules! add_channel_key { + ($channel_id: expr, $counterparty_node_id: expr) => {{ + if !htlcs_msgs_by_id.contains_key(&$channel_id){ + htlcs_msgs_by_id.insert($channel_id, (Vec::new(), Vec::new(), $counterparty_node_id)); + } + }} + } + + macro_rules! add_update_add_htlc { + ($add_htlc_msg: expr, $channel_id: expr, $counterparty_node_id: expr) => {{ + add_channel_key!($channel_id, $counterparty_node_id); + if let hash_map::Entry::Occupied(mut entry) = htlcs_msgs_by_id.entry($channel_id) { + let msgs_entry = entry.get_mut(); + msgs_entry.0.push($add_htlc_msg); + } + }} + } + + macro_rules! add_update_fail_htlc { + ($fail_htlc_msg: expr, $channel_id: expr, $counterparty_node_id: expr) => {{ + add_channel_key!($channel_id, $counterparty_node_id); + if let hash_map::Entry::Occupied(mut entry) = htlcs_msgs_by_id.entry($channel_id) { + let msgs_entry = entry.get_mut(); + msgs_entry.1.push($fail_htlc_msg); + } + }} + } + for (short_chan_id, mut pending_forwards) in channel_state.forward_htlcs.drain() { if short_chan_id != 0 { let (counterparty_node_id, forward_chan_id) = match channel_state.short_to_chan_info.get(&short_chan_id) { @@ -3082,35 +3112,6 @@ impl ChannelMana let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; if peer_state.channel_by_id.contains_key(&forward_chan_id) { - let mut htlcs_msgs_by_id: HashMap<[u8; 32], (Vec, Vec)> = HashMap::new(); - - macro_rules! add_channel_key { - ($channel_id: expr) => {{ - if !htlcs_msgs_by_id.contains_key(&$channel_id){ - htlcs_msgs_by_id.insert($channel_id, (Vec::new(), Vec::new())); - } - }} - } - - macro_rules! add_update_add_htlc { - ($add_htlc_msg: expr, $channel_id: expr) => {{ - add_channel_key!($channel_id); - if let hash_map::Entry::Occupied(mut entry) = htlcs_msgs_by_id.entry($channel_id) { - let msgs_entry = entry.get_mut(); - msgs_entry.0.push($add_htlc_msg); - } - }} - } - - macro_rules! add_update_fail_htlc { - ($fail_htlc_msg: expr, $channel_id: expr) => {{ - add_channel_key!($channel_id); - if let hash_map::Entry::Occupied(mut entry) = htlcs_msgs_by_id.entry($channel_id) { - let msgs_entry = entry.get_mut(); - msgs_entry.1.push($fail_htlc_msg); - } - }} - } for forward_info in pending_forwards.drain(..) { match forward_info { HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo { @@ -3153,7 +3154,7 @@ impl ChannelMana match update_add { Some(msg) => { log_info!(self.logger, "Will forward HTLC with payment_hash {}, over channel {}", log_bytes!(payment_hash.0), log_bytes!(chan_id)); - add_update_add_htlc!(msg, chan_id); + add_update_add_htlc!(msg, chan_id, counterparty_node_id); }, None => { // Nothing to do here...we're waiting on a remote @@ -3196,7 +3197,7 @@ impl ChannelMana // the chain and sending the HTLC-Timeout is their problem. continue; }, - Ok(Some(msg)) => { add_update_fail_htlc!(msg, forward_chan_id); }, + Ok(Some(msg)) => { add_update_fail_htlc!(msg, forward_chan_id, counterparty_node_id); }, Ok(None) => { // Nothing to do here...we're waiting on a remote // revoke_and_ack before we can update the commitment @@ -3211,50 +3212,6 @@ impl ChannelMana }, } } - - for (chan_id, (add_htlc_msgs, fail_htlc_msgs)) in htlcs_msgs_by_id.into_iter() { - if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(chan_id) { - let (commitment_msg, monitor_update) = match chan.get_mut().send_commitment(&self.logger) { - Ok(res) => res, - Err(e) => { - // We surely failed send_commitment due to bad keys, in that case - // close channel and then send error message to peer. - let counterparty_node_id = chan.get().get_counterparty_node_id(); - let err: Result<(), _> = match e { - ChannelError::Ignore(_) | ChannelError::Warn(_) => { - panic!("Stated return value requirements in send_commitment() were not met"); - } - ChannelError::Close(msg) => { - log_trace!(self.logger, "Closing channel {} due to Close-required error: {}", log_bytes!(chan.key()[..]), msg); - let mut channel = remove_channel!(self, channel_state, chan); - // ChannelClosed event is generated by handle_error for us. - Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel.channel_id(), channel.get_user_id(), channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok())) - }, - ChannelError::CloseDelayBroadcast(_) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); } - }; - handle_errors.push((counterparty_node_id, err)); - continue; - } - }; - if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { - handle_errors.push((chan.get().get_counterparty_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true))); - continue; - } - log_debug!(self.logger, "Forwarding HTLCs resulted in a commitment update with {} HTLCs added and {} HTLCs failed for channel {}", - add_htlc_msgs.len(), fail_htlc_msgs.len(), log_bytes!(chan.get().channel_id())); - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: chan.get().get_counterparty_node_id(), - updates: msgs::CommitmentUpdate { - update_add_htlcs: add_htlc_msgs, - update_fulfill_htlcs: Vec::new(), - update_fail_htlcs: fail_htlc_msgs, - update_fail_malformed_htlcs: Vec::new(), - update_fee: None, - commitment_signed: commitment_msg, - }, - }); - } - } } else { let err = Err(MsgHandleErrInternal::send_err_msg_no_close(format!("No such channel for the counterparty_node_id {}, as indicated by the short_to_id map", counterparty_node_id), forward_chan_id)); handle_errors.push((counterparty_node_id, err)); @@ -3438,6 +3395,53 @@ impl ChannelMana } } } + for (chan_id, (add_htlc_msgs, fail_htlc_msgs, counterparty_node_id)) in htlcs_msgs_by_id.into_iter() { + if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(chan_id) { + let (commitment_msg, monitor_update) = match chan.get_mut().send_commitment(&self.logger) { + Ok(res) => res, + Err(e) => { + // We surely failed send_commitment due to bad keys, in that case + // close channel and then send error message to peer. + let counterparty_node_id = chan.get().get_counterparty_node_id(); + let err: Result<(), _> = match e { + ChannelError::Ignore(_) | ChannelError::Warn(_) => { + panic!("Stated return value requirements in send_commitment() were not met"); + } + ChannelError::Close(msg) => { + log_trace!(self.logger, "Closing channel {} due to Close-required error: {}", log_bytes!(chan.key()[..]), msg); + let mut channel = remove_channel!(self, channel_state, chan); + // ChannelClosed event is generated by handle_error for us. + Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel.channel_id(), channel.get_user_id(), channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok())) + }, + ChannelError::CloseDelayBroadcast(_) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); } + }; + handle_errors.push((counterparty_node_id, err)); + continue; + } + }; + if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { + handle_errors.push((chan.get().get_counterparty_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true))); + continue; + } + log_debug!(self.logger, "Forwarding HTLCs resulted in a commitment update with {} HTLCs added and {} HTLCs failed for channel {}", + add_htlc_msgs.len(), fail_htlc_msgs.len(), log_bytes!(chan.get().channel_id())); + channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: chan.get().get_counterparty_node_id(), + updates: msgs::CommitmentUpdate { + update_add_htlcs: add_htlc_msgs, + update_fulfill_htlcs: Vec::new(), + update_fail_htlcs: fail_htlc_msgs, + update_fail_malformed_htlcs: Vec::new(), + update_fee: None, + commitment_signed: commitment_msg, + }, + }); + } + } + } } for (htlc_source, payment_hash, failure_reason) in failed_forwards.drain(..) { From 6b9a261e458d4c01acc29204075290170b28bab6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Tigerstr=C3=B6m?= <11711198+ViktorTigerstrom@users.noreply.github.com> Date: Tue, 28 Jun 2022 00:13:08 +0200 Subject: [PATCH 19/21] -f DRY up htlc_msg macros --- lightning/src/ln/channelmanager.rs | 27 ++++++++++++--------------- 1 file changed, 12 insertions(+), 15 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index b879710f572..091b894ff20 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -71,6 +71,7 @@ use core::ops::Deref; #[cfg(any(test, feature = "std"))] use std::time::Instant; +use std::any::Any; use util::crypto::sign; // We hold various information about HTLC relay in the HTLC objects in Channel itself: @@ -3015,22 +3016,18 @@ impl ChannelMana }} } - macro_rules! add_update_add_htlc { - ($add_htlc_msg: expr, $channel_id: expr, $counterparty_node_id: expr) => {{ + macro_rules! add_update_htlc_msg { + ($htlc_msg: expr, $channel_id: expr, $counterparty_node_id: expr) => {{ add_channel_key!($channel_id, $counterparty_node_id); if let hash_map::Entry::Occupied(mut entry) = htlcs_msgs_by_id.entry($channel_id) { let msgs_entry = entry.get_mut(); - msgs_entry.0.push($add_htlc_msg); - } - }} - } - - macro_rules! add_update_fail_htlc { - ($fail_htlc_msg: expr, $channel_id: expr, $counterparty_node_id: expr) => {{ - add_channel_key!($channel_id, $counterparty_node_id); - if let hash_map::Entry::Occupied(mut entry) = htlcs_msgs_by_id.entry($channel_id) { - let msgs_entry = entry.get_mut(); - msgs_entry.1.push($fail_htlc_msg); + if let Some(msg) = (&$htlc_msg as &Any).downcast_ref::() { + msgs_entry.0.push(msg.clone()); + } else if let Some(msg) = (&$htlc_msg as &Any).downcast_ref::() { + msgs_entry.1.push(msg.clone()); + } else { + panic!("Only UpdateAddHTLC or UpdateFailHTLC msgs supported for add_update_htlc_msg"); + } } }} } @@ -3154,7 +3151,7 @@ impl ChannelMana match update_add { Some(msg) => { log_info!(self.logger, "Will forward HTLC with payment_hash {}, over channel {}", log_bytes!(payment_hash.0), log_bytes!(chan_id)); - add_update_add_htlc!(msg, chan_id, counterparty_node_id); + add_update_htlc_msg!(msg, chan_id, counterparty_node_id); }, None => { // Nothing to do here...we're waiting on a remote @@ -3197,7 +3194,7 @@ impl ChannelMana // the chain and sending the HTLC-Timeout is their problem. continue; }, - Ok(Some(msg)) => { add_update_fail_htlc!(msg, forward_chan_id, counterparty_node_id); }, + Ok(Some(msg)) => { add_update_htlc_msg!(msg, forward_chan_id, counterparty_node_id); }, Ok(None) => { // Nothing to do here...we're waiting on a remote // revoke_and_ack before we can update the commitment From 13dbfe30d7829a78a76f92c3c391d87e6df801f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Tigerstr=C3=B6m?= <11711198+ViktorTigerstrom@users.noreply.github.com> Date: Wed, 29 Jun 2022 12:21:36 +0200 Subject: [PATCH 20/21] f - cleanup channel selection order --- lightning/src/ln/channelmanager.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 091b894ff20..c4178d0e3ee 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3129,10 +3129,11 @@ impl ChannelMana // Attempt to forward the HTLC over all available channels // to the peer, but attempt to forward the HTLC over the // channel specified in the onion payload first. - let mut counterparty_channel_ids = peer_state.channel_by_id.keys() - .filter(|chan_id| **chan_id != forward_chan_id) - .map(|chan_id| *chan_id).collect::>(); - counterparty_channel_ids.insert(0, forward_chan_id); + let counterparty_channel_ids = core::iter::once(forward_chan_id) + .chain(peer_state.channel_by_id.keys() + .filter(|&chan_id| *chan_id != forward_chan_id) + .map(|&chan_id| chan_id)) + .collect::>(); let mut send_succeeded = false; for chan_id in counterparty_channel_ids { match peer_state.channel_by_id.get_mut(&chan_id).unwrap().send_htlc(amt_to_forward, payment_hash, outgoing_cltv_value, htlc_source.clone(), onion_packet.clone(), &self.logger) { From 92a067cc95786624484c15fb965c2bbd9c02ce5b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Tigerstr=C3=B6m?= <11711198+ViktorTigerstrom@users.noreply.github.com> Date: Wed, 29 Jun 2022 12:22:42 +0200 Subject: [PATCH 21/21] f - cleanup tests --- lightning/src/ln/functional_tests.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index f8c3d8b26af..1a89186ad4b 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -1131,10 +1131,10 @@ fn test_forward_through_substitute_channel() { cltv_expiry_delta: TEST_FINAL_CLTV }); - let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 2000000); + let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[2], Some(2000000)); // Explicity specify that is `chan_1_2a` is the last hop in the `route`, which doesn't have // enough liquidity forward 2000000 msat from `nodes[1]` to `nodes[2]`. - route = Route { paths: vec![hops], payment_params: None }; + let route = Route { paths: vec![hops], payment_params: None }; nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap(); check_added_monitors!(nodes[0], 1);