Skip to content

Commit

Permalink
Refuse to send and forward OMs to disconnected peers
Browse files Browse the repository at this point in the history
We also refuse to connect to peers that don't advertise onion message
forwarding support.

However, we don't always enforce this in fuzzing, to maximize coverage.
  • Loading branch information
valentinewallace committed Aug 31, 2022
1 parent f49e04d commit 366662e
Show file tree
Hide file tree
Showing 5 changed files with 71 additions and 14 deletions.
7 changes: 6 additions & 1 deletion lightning/src/ln/features.rs
Original file line number Diff line number Diff line change
Expand Up @@ -433,6 +433,11 @@ mod sealed {
define_feature!(27, ShutdownAnySegwit, [InitContext, NodeContext],
"Feature flags for `opt_shutdown_anysegwit`.", set_shutdown_any_segwit_optional,
set_shutdown_any_segwit_required, supports_shutdown_anysegwit, requires_shutdown_anysegwit);
// We do not yet advertise the onion messages feature bit, but we need to detect when peers
// support it.
define_feature!(39, OnionMessages, [InitContext, NodeContext],
"Feature flags for `option_onion_messages`.", set_onion_messages_optional,
set_onion_messages_required, supports_onion_messages, requires_onion_messages);
define_feature!(45, ChannelType, [InitContext, NodeContext],
"Feature flags for `option_channel_type`.", set_channel_type_optional,
set_channel_type_required, supports_channel_type, requires_channel_type);
Expand Down Expand Up @@ -767,7 +772,7 @@ impl<T: sealed::GossipQueries> Features<T> {

impl<T: sealed::InitialRoutingSync> Features<T> {
// We are no longer setting initial_routing_sync now that gossip_queries
// is enabled. This feature is ignored by a peer when gossip_queries has
// is enabled. This feature is ignored by a peer when gossip_queries has
// been negotiated.
#[cfg(test)]
pub(crate) fn clear_initial_routing_sync(&mut self) {
Expand Down
6 changes: 6 additions & 0 deletions lightning/src/ln/msgs.rs
Original file line number Diff line number Diff line change
Expand Up @@ -949,6 +949,12 @@ pub trait RoutingMessageHandler : MessageSendEventsProvider {
pub trait OnionMessageHandler : OnionMessageProvider {
/// Handle an incoming onion_message message from the given peer.
fn handle_onion_message(&self, peer_node_id: &PublicKey, msg: &OnionMessage);
/// Called when a connection is established with a peer. Can be used to track which peers
/// advertise onion message support and are online.
fn peer_connected(&self, their_node_id: &PublicKey, init: &Init);
/// Indicates a connection to the peer failed/an existing connection was lost. Allows handlers to
/// drop and refuse to forward onion messages to this peer.
fn peer_disconnected(&self, their_node_id: &PublicKey, no_connection_possible: bool);
}

mod fuzzy_internal_msgs {
Expand Down
10 changes: 9 additions & 1 deletion lightning/src/ln/peer_handler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,8 @@ impl OnionMessageProvider for IgnoringMessageHandler {
}
impl OnionMessageHandler for IgnoringMessageHandler {
fn handle_onion_message(&self, _their_node_id: &PublicKey, _msg: &msgs::OnionMessage) {}
fn peer_connected(&self, _their_node_id: &PublicKey, _init: &msgs::Init) {}
fn peer_disconnected(&self, _their_node_id: &PublicKey, _no_connection_possible: bool) {}
}
impl Deref for IgnoringMessageHandler {
type Target = IgnoringMessageHandler;
Expand Down Expand Up @@ -1173,8 +1175,9 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
}

self.message_handler.route_handler.peer_connected(&their_node_id, &msg);

self.message_handler.chan_handler.peer_connected(&their_node_id, &msg);
self.message_handler.onion_message_handler.peer_connected(&their_node_id, &msg);

peer_lock.their_features = Some(msg.features);
return Ok(None);
} else if peer_lock.their_features.is_none() {
Expand Down Expand Up @@ -1729,6 +1732,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
}
descriptor.disconnect_socket();
self.message_handler.chan_handler.peer_disconnected(&node_id, false);
self.message_handler.onion_message_handler.peer_disconnected(&node_id, false);
}
}
}
Expand Down Expand Up @@ -1756,6 +1760,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
log_pubkey!(node_id), if no_connection_possible { "no " } else { "" });
self.node_id_to_descriptor.lock().unwrap().remove(&node_id);
self.message_handler.chan_handler.peer_disconnected(&node_id, no_connection_possible);
self.message_handler.onion_message_handler.peer_disconnected(&node_id, no_connection_possible);
}
}
};
Expand All @@ -1776,6 +1781,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
log_trace!(self.logger, "Disconnecting peer with id {} due to client request", node_id);
peers_lock.remove(&descriptor);
self.message_handler.chan_handler.peer_disconnected(&node_id, no_connection_possible);
self.message_handler.onion_message_handler.peer_disconnected(&node_id, no_connection_possible);
descriptor.disconnect_socket();
}
}
Expand All @@ -1791,6 +1797,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
if let Some(node_id) = peer.lock().unwrap().their_node_id {
log_trace!(self.logger, "Disconnecting peer with id {} due to client request to disconnect all peers", node_id);
self.message_handler.chan_handler.peer_disconnected(&node_id, false);
self.message_handler.onion_message_handler.peer_disconnected(&node_id, false);
}
descriptor.disconnect_socket();
}
Expand Down Expand Up @@ -1881,6 +1888,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
log_trace!(self.logger, "Disconnecting peer with id {} due to ping timeout", node_id);
self.node_id_to_descriptor.lock().unwrap().remove(&node_id);
self.message_handler.chan_handler.peer_disconnected(&node_id, false);
self.message_handler.onion_message_handler.peer_disconnected(&node_id, false);
}
}
}
Expand Down
27 changes: 16 additions & 11 deletions lightning/src/onion_message/functional_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,13 +10,14 @@
//! Onion message testing and test utilities live here.
use chain::keysinterface::{KeysInterface, Recipient};
use ln::msgs::OnionMessageHandler;
use ln::features::InitFeatures;
use ln::msgs::{self, OnionMessageHandler};
use super::{BlindedRoute, Destination, OnionMessenger, SendError};
use util::enforcing_trait_impls::EnforcingSigner;
use util::test_utils;

use bitcoin::network::constants::Network;
use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
use bitcoin::secp256k1::{PublicKey, Secp256k1};

use sync::Arc;

Expand All @@ -34,26 +35,33 @@ impl MessengerNode {
}

fn create_nodes(num_messengers: u8) -> Vec<MessengerNode> {
let mut res = Vec::new();
let mut nodes = Vec::new();
for i in 0..num_messengers {
let logger = Arc::new(test_utils::TestLogger::with_id(format!("node {}", i)));
let seed = [i as u8; 32];
let keys_manager = Arc::new(test_utils::TestKeysInterface::new(&seed, Network::Testnet));
res.push(MessengerNode {
nodes.push(MessengerNode {
keys_manager: keys_manager.clone(),
messenger: OnionMessenger::new(keys_manager, logger.clone()),
logger,
});
}
res
for idx in 0..num_messengers - 1 {
let i = idx as usize;
let mut features = InitFeatures::known();
features.set_onion_messages_optional();
let init_msg = msgs::Init { features, remote_network_address: None };
nodes[i].messenger.peer_connected(&nodes[i + 1].get_node_pk(), &init_msg.clone());
nodes[i + 1].messenger.peer_connected(&nodes[i].get_node_pk(), &init_msg.clone());
}
nodes
}

fn pass_along_path(path: &Vec<MessengerNode>, expected_path_id: Option<[u8; 32]>) {
let mut prev_node = &path[0];
let num_nodes = path.len();
for (idx, node) in path.into_iter().skip(1).enumerate() {
let events = prev_node.messenger.release_pending_msgs();
assert_eq!(events.len(), 1);
let onion_msg = {
let msgs = events.get(&node.get_node_pk()).unwrap();
assert_eq!(msgs.len(), 1);
Expand Down Expand Up @@ -110,12 +118,9 @@ fn three_blinded_hops() {
#[test]
fn too_big_packet_error() {
// Make sure we error as expected if a packet is too big to send.
let nodes = create_nodes(1);

let hop_secret = SecretKey::from_slice(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()[..]).unwrap();
let secp_ctx = Secp256k1::new();
let hop_node_id = PublicKey::from_secret_key(&secp_ctx, &hop_secret);
let nodes = create_nodes(2);

let hop_node_id = nodes[1].get_node_pk();
let hops = [hop_node_id; 400];
let err = nodes[0].messenger.send_onion_message(&hops, Destination::Node(hop_node_id), None).unwrap_err();
assert_eq!(err, SendError::TooBigPacket);
Expand Down
35 changes: 34 additions & 1 deletion lightning/src/onion_message/messenger.rs
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,8 @@ pub enum SendError {
/// The provided [`Destination`] was an invalid [`BlindedRoute`], due to having fewer than two
/// blinded hops.
TooFewBlindedHops,
/// Our next-hop peer was offline or does not support onion message forwarding.
InvalidFirstHop,
}

impl<Signer: Sign, K: Deref, L: Deref> OnionMessenger<Signer, K, L>
Expand Down Expand Up @@ -158,6 +160,9 @@ impl<Signer: Sign, K: Deref, L: Deref> OnionMessenger<Signer, K, L>
(introduction_node_id, blinding_point),
}
};
if !self.peer_is_connected(&introduction_node_id) {
return Err(SendError::InvalidFirstHop)
}
let (packet_payloads, packet_keys) = packet_payloads_and_keys(
&self.secp_ctx, intermediate_nodes, destination, reply_path, &blinding_secret)
.map_err(|e| SendError::Secp256k1(e))?;
Expand All @@ -177,11 +182,20 @@ impl<Signer: Sign, K: Deref, L: Deref> OnionMessenger<Signer, K, L>
Ok(())
}

fn peer_is_connected(&self, peer_node_id: &PublicKey) -> bool {
self.pending_messages.lock().unwrap().contains_key(peer_node_id)
}

#[cfg(test)]
pub(super) fn release_pending_msgs(&self) -> HashMap<PublicKey, VecDeque<msgs::OnionMessage>> {
let mut pending_msgs = self.pending_messages.lock().unwrap();
let mut msgs = HashMap::new();
core::mem::swap(&mut *pending_msgs, &mut msgs);
// We don't want to disconnect the peers by removing them entirely from the original map, so we
// swap the pending message buffers individually.
for (peer_node_id, pending_messages) in &mut *pending_msgs {
msgs.insert(*peer_node_id, VecDeque::new());
core::mem::swap(pending_messages, msgs.get_mut(&peer_node_id).unwrap());
}
msgs
}
}
Expand Down Expand Up @@ -230,6 +244,13 @@ impl<Signer: Sign, K: Deref, L: Deref> OnionMessageHandler for OnionMessenger<Si
Ok((Payload::Forward(ForwardControlTlvs::Unblinded(ForwardTlvs {
next_node_id, next_blinding_override
})), Some((next_hop_hmac, new_packet_bytes)))) => {
if !self.peer_is_connected(&next_node_id) {
#[cfg(not(fuzzing))]
{
log_trace!(self.logger, "Dropping onion message to disconnected peer {:?}", next_node_id);
return
}
}
// TODO: we need to check whether `next_node_id` is our node, in which case this is a dummy
// blinded hop and this onion message is destined for us. In this situation, we should keep
// unwrapping the onion layers to get to the final payload. Since we don't have the option
Expand Down Expand Up @@ -285,6 +306,18 @@ impl<Signer: Sign, K: Deref, L: Deref> OnionMessageHandler for OnionMessenger<Si
},
};
}

fn peer_connected(&self, their_node_id: &PublicKey, init: &msgs::Init) {
if init.features.supports_onion_messages() {
let mut peers = self.pending_messages.lock().unwrap();
peers.insert(their_node_id.clone(), VecDeque::new());
}
}

fn peer_disconnected(&self, their_node_id: &PublicKey, _no_connection_possible: bool) {
let mut pending_msgs = self.pending_messages.lock().unwrap();
pending_msgs.remove(their_node_id);
}
}

impl<Signer: Sign, K: Deref, L: Deref> OnionMessageProvider for OnionMessenger<Signer, K, L>
Expand Down

0 comments on commit 366662e

Please sign in to comment.