From df9f3c0f49db9bd1b6cd42f6b0a44b3d7fe4cb42 Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Thu, 7 Mar 2024 15:12:27 -0800 Subject: [PATCH 1/2] Add expected_htlc_destination argument to run_onion_failure_test This argument will be asserted on in a future commit to ensure we obtain the intended `HTLCHandlingFailed::failed_next_destination` per HTLC failure. --- lightning/src/ln/onion_route_tests.rs | 72 ++++++++++++++------------- 1 file changed, 37 insertions(+), 35 deletions(-) diff --git a/lightning/src/ln/onion_route_tests.rs b/lightning/src/ln/onion_route_tests.rs index abd930a9a91..1736103def0 100644 --- a/lightning/src/ln/onion_route_tests.rs +++ b/lightning/src/ln/onion_route_tests.rs @@ -44,11 +44,11 @@ use bitcoin::hex::FromHex; use crate::ln::functional_test_utils::*; -fn run_onion_failure_test(_name: &str, test_case: u8, nodes: &Vec, route: &Route, payment_hash: &PaymentHash, payment_secret: &PaymentSecret, callback_msg: F1, callback_node: F2, expected_retryable: bool, expected_error_code: Option, expected_channel_update: Option, expected_short_channel_id: Option) +fn run_onion_failure_test(_name: &str, test_case: u8, nodes: &Vec, route: &Route, payment_hash: &PaymentHash, payment_secret: &PaymentSecret, callback_msg: F1, callback_node: F2, expected_retryable: bool, expected_error_code: Option, expected_channel_update: Option, expected_short_channel_id: Option, expected_htlc_destination: Option) where F1: for <'a> FnMut(&'a mut msgs::UpdateAddHTLC), F2: FnMut(), { - run_onion_failure_test_with_fail_intercept(_name, test_case, nodes, route, payment_hash, payment_secret, callback_msg, |_|{}, callback_node, expected_retryable, expected_error_code, expected_channel_update, expected_short_channel_id); + run_onion_failure_test_with_fail_intercept(_name, test_case, nodes, route, payment_hash, payment_secret, callback_msg, |_|{}, callback_node, expected_retryable, expected_error_code, expected_channel_update, expected_short_channel_id, expected_htlc_destination); } // test_case @@ -62,7 +62,8 @@ fn run_onion_failure_test_with_fail_intercept( _name: &str, test_case: u8, nodes: &Vec, route: &Route, payment_hash: &PaymentHash, payment_secret: &PaymentSecret, mut callback_msg: F1, mut callback_fail: F2, mut callback_node: F3, expected_retryable: bool, expected_error_code: Option, - expected_channel_update: Option, expected_short_channel_id: Option + expected_channel_update: Option, expected_short_channel_id: Option, + expected_htlc_destination: Option, ) where F1: for <'a> FnMut(&'a mut msgs::UpdateAddHTLC), F2: for <'a> FnMut(&'a mut msgs::UpdateFailHTLC), @@ -302,7 +303,7 @@ fn test_fee_failures() { let short_channel_id = channels[1].0.contents.short_channel_id; run_onion_failure_test("fee_insufficient", 0, &nodes, &route, &payment_hash, &payment_secret, |msg| { msg.amount_msat -= 1; - }, || {}, true, Some(UPDATE|12), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false}), Some(short_channel_id)); + }, || {}, true, Some(UPDATE|12), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false}), Some(short_channel_id), None); // In an earlier version, we spuriously failed to forward payments if the expected feerate // changed between the channel open and the payment. @@ -365,7 +366,7 @@ fn test_onion_failure() { // describing a length-1 TLV payload, which is obviously bogus. new_payloads[0].data[0] = 1; msg.onion_routing_packet = onion_utils::construct_onion_packet_with_writable_hopdata(new_payloads, onion_keys, [0; 32], &payment_hash).unwrap(); - }, ||{}, true, Some(PERM|22), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id)); + }, ||{}, true, Some(PERM|22), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id), None); // final node failure let short_channel_id = channels[1].0.contents.short_channel_id; @@ -384,7 +385,7 @@ fn test_onion_failure() { // length-1 TLV payload, which is obviously bogus. new_payloads[1].data[0] = 1; msg.onion_routing_packet = onion_utils::construct_onion_packet_with_writable_hopdata(new_payloads, onion_keys, [0; 32], &payment_hash).unwrap(); - }, ||{}, false, Some(PERM|22), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id)); + }, ||{}, false, Some(PERM|22), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id), None); // the following three with run_onion_failure_test_with_fail_intercept() test only the origin node // receiving simulated fail messages @@ -397,7 +398,7 @@ fn test_onion_failure() { let session_priv = SecretKey::from_slice(&[3; 32]).unwrap(); let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap(); msg.reason = onion_utils::build_first_hop_failure_packet(onion_keys[0].shared_secret.as_ref(), NODE|2, &[0;0]); - }, ||{}, true, Some(NODE|2), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[0].pubkey, is_permanent: false}), Some(route.paths[0].hops[0].short_channel_id)); + }, ||{}, true, Some(NODE|2), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[0].pubkey, is_permanent: false}), Some(route.paths[0].hops[0].short_channel_id), None); // final node failure run_onion_failure_test_with_fail_intercept("temporary_node_failure", 200, &nodes, &route, &payment_hash, &payment_secret, |_msg| {}, |msg| { @@ -407,7 +408,7 @@ fn test_onion_failure() { msg.reason = onion_utils::build_first_hop_failure_packet(onion_keys[1].shared_secret.as_ref(), NODE|2, &[0;0]); }, ||{ nodes[2].node.fail_htlc_backwards(&payment_hash); - }, true, Some(NODE|2), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[1].pubkey, is_permanent: false}), Some(route.paths[0].hops[1].short_channel_id)); + }, true, Some(NODE|2), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[1].pubkey, is_permanent: false}), Some(route.paths[0].hops[1].short_channel_id), None); let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[2]); // intermediate node failure @@ -417,7 +418,7 @@ fn test_onion_failure() { let session_priv = SecretKey::from_slice(&[3; 32]).unwrap(); let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap(); msg.reason = onion_utils::build_first_hop_failure_packet(onion_keys[0].shared_secret.as_ref(), PERM|NODE|2, &[0;0]); - }, ||{}, true, Some(PERM|NODE|2), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[0].pubkey, is_permanent: true}), Some(route.paths[0].hops[0].short_channel_id)); + }, ||{}, true, Some(PERM|NODE|2), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[0].pubkey, is_permanent: true}), Some(route.paths[0].hops[0].short_channel_id), None); // final node failure run_onion_failure_test_with_fail_intercept("permanent_node_failure", 200, &nodes, &route, &payment_hash, &payment_secret, |_msg| {}, |msg| { @@ -426,7 +427,7 @@ fn test_onion_failure() { msg.reason = onion_utils::build_first_hop_failure_packet(onion_keys[1].shared_secret.as_ref(), PERM|NODE|2, &[0;0]); }, ||{ nodes[2].node.fail_htlc_backwards(&payment_hash); - }, false, Some(PERM|NODE|2), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[1].pubkey, is_permanent: true}), Some(route.paths[0].hops[1].short_channel_id)); + }, false, Some(PERM|NODE|2), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[1].pubkey, is_permanent: true}), Some(route.paths[0].hops[1].short_channel_id), None); let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[2]); // intermediate node failure @@ -438,7 +439,7 @@ fn test_onion_failure() { msg.reason = onion_utils::build_first_hop_failure_packet(onion_keys[0].shared_secret.as_ref(), PERM|NODE|3, &[0;0]); }, ||{ nodes[2].node.fail_htlc_backwards(&payment_hash); - }, true, Some(PERM|NODE|3), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[0].pubkey, is_permanent: true}), Some(route.paths[0].hops[0].short_channel_id)); + }, true, Some(PERM|NODE|3), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[0].pubkey, is_permanent: true}), Some(route.paths[0].hops[0].short_channel_id), None); // final node failure run_onion_failure_test_with_fail_intercept("required_node_feature_missing", 200, &nodes, &route, &payment_hash, &payment_secret, |_msg| {}, |msg| { @@ -447,20 +448,20 @@ fn test_onion_failure() { msg.reason = onion_utils::build_first_hop_failure_packet(onion_keys[1].shared_secret.as_ref(), PERM|NODE|3, &[0;0]); }, ||{ nodes[2].node.fail_htlc_backwards(&payment_hash); - }, false, Some(PERM|NODE|3), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[1].pubkey, is_permanent: true}), Some(route.paths[0].hops[1].short_channel_id)); + }, false, Some(PERM|NODE|3), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[1].pubkey, is_permanent: true}), Some(route.paths[0].hops[1].short_channel_id), None); let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[2]); // Our immediate peer sent UpdateFailMalformedHTLC because it couldn't understand the onion in // the UpdateAddHTLC that we sent. let short_channel_id = channels[0].0.contents.short_channel_id; run_onion_failure_test("invalid_onion_version", 0, &nodes, &route, &payment_hash, &payment_secret, |msg| { msg.onion_routing_packet.version = 1; }, ||{}, true, - Some(BADONION|PERM|4), None, Some(short_channel_id)); + Some(BADONION|PERM|4), None, Some(short_channel_id), None); run_onion_failure_test("invalid_onion_hmac", 0, &nodes, &route, &payment_hash, &payment_secret, |msg| { msg.onion_routing_packet.hmac = [3; 32]; }, ||{}, true, - Some(BADONION|PERM|5), None, Some(short_channel_id)); + Some(BADONION|PERM|5), None, Some(short_channel_id), None); run_onion_failure_test("invalid_onion_key", 0, &nodes, &route, &payment_hash, &payment_secret, |msg| { msg.onion_routing_packet.public_key = Err(secp256k1::Error::InvalidPublicKey);}, ||{}, true, - Some(BADONION|PERM|6), None, Some(short_channel_id)); + Some(BADONION|PERM|6), None, Some(short_channel_id), None); let short_channel_id = channels[1].0.contents.short_channel_id; let chan_update = ChannelUpdate::dummy(short_channel_id); @@ -477,7 +478,7 @@ fn test_onion_failure() { msg.reason = onion_utils::build_first_hop_failure_packet(onion_keys[0].shared_secret.as_ref(), UPDATE|7, &err_data); }, ||{}, true, Some(UPDATE|7), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false }), - Some(short_channel_id)); + Some(short_channel_id), None); // Check we can still handle onion failures that include channel updates without a type prefix let err_data_without_type = chan_update.encode_with_len(); @@ -489,7 +490,7 @@ fn test_onion_failure() { msg.reason = onion_utils::build_first_hop_failure_packet(onion_keys[0].shared_secret.as_ref(), UPDATE|7, &err_data_without_type); }, ||{}, true, Some(UPDATE|7), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false }), - Some(short_channel_id)); + Some(short_channel_id), None); let short_channel_id = channels[1].0.contents.short_channel_id; run_onion_failure_test_with_fail_intercept("permanent_channel_failure", 100, &nodes, &route, &payment_hash, &payment_secret, |msg| { @@ -499,7 +500,7 @@ fn test_onion_failure() { let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap(); msg.reason = onion_utils::build_first_hop_failure_packet(onion_keys[0].shared_secret.as_ref(), PERM|8, &[0;0]); // short_channel_id from the processing node - }, ||{}, true, Some(PERM|8), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id)); + }, ||{}, true, Some(PERM|8), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id), None); let short_channel_id = channels[1].0.contents.short_channel_id; run_onion_failure_test_with_fail_intercept("required_channel_feature_missing", 100, &nodes, &route, &payment_hash, &payment_secret, |msg| { @@ -509,13 +510,13 @@ fn test_onion_failure() { let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap(); msg.reason = onion_utils::build_first_hop_failure_packet(onion_keys[0].shared_secret.as_ref(), PERM|9, &[0;0]); // short_channel_id from the processing node - }, ||{}, true, Some(PERM|9), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id)); + }, ||{}, true, Some(PERM|9), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id), None); let mut bogus_route = route.clone(); bogus_route.paths[0].hops[1].short_channel_id -= 1; let short_channel_id = bogus_route.paths[0].hops[1].short_channel_id; run_onion_failure_test("unknown_next_peer", 0, &nodes, &bogus_route, &payment_hash, &payment_secret, |_| {}, ||{}, true, Some(PERM|10), - Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent:true}), Some(short_channel_id)); + Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent:true}), Some(short_channel_id), None); let short_channel_id = channels[1].0.contents.short_channel_id; let amt_to_forward = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[2].node.get_our_node_id()) @@ -526,7 +527,7 @@ fn test_onion_failure() { bogus_route.paths[0].hops[route_len-1].fee_msat = amt_to_forward; run_onion_failure_test("amount_below_minimum", 0, &nodes, &bogus_route, &payment_hash, &payment_secret, |_| {}, ||{}, true, Some(UPDATE|11), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false }), - Some(short_channel_id)); + Some(short_channel_id), None); // Clear pending payments so that the following positive test has the correct payment hash. for node in nodes.iter() { @@ -543,13 +544,13 @@ fn test_onion_failure() { let short_channel_id = channels[1].0.contents.short_channel_id; run_onion_failure_test("fee_insufficient", 0, &nodes, &route, &payment_hash, &payment_secret, |msg| { msg.amount_msat -= 1; - }, || {}, true, Some(UPDATE|12), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false}), Some(short_channel_id)); + }, || {}, true, Some(UPDATE|12), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false}), Some(short_channel_id), None); let short_channel_id = channels[1].0.contents.short_channel_id; run_onion_failure_test("incorrect_cltv_expiry", 0, &nodes, &route, &payment_hash, &payment_secret, |msg| { // need to violate: cltv_expiry - cltv_expiry_delta >= outgoing_cltv_value msg.cltv_expiry -= 1; - }, || {}, true, Some(UPDATE|13), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false}), Some(short_channel_id)); + }, || {}, true, Some(UPDATE|13), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false}), Some(short_channel_id), None); let short_channel_id = channels[1].0.contents.short_channel_id; run_onion_failure_test("expiry_too_soon", 0, &nodes, &route, &payment_hash, &payment_secret, |msg| { @@ -559,11 +560,11 @@ fn test_onion_failure() { connect_blocks(&nodes[2], height - nodes[2].best_block_info().1); }, ||{}, true, Some(UPDATE|14), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false }), - Some(short_channel_id)); + Some(short_channel_id), None); run_onion_failure_test("unknown_payment_hash", 2, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || { nodes[2].node.fail_htlc_backwards(&payment_hash); - }, false, Some(PERM|15), None, None); + }, false, Some(PERM|15), None, None, None); let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[2]); run_onion_failure_test("final_expiry_too_soon", 1, &nodes, &route, &payment_hash, &payment_secret, |msg| { @@ -571,7 +572,7 @@ fn test_onion_failure() { connect_blocks(&nodes[0], height - nodes[0].best_block_info().1); connect_blocks(&nodes[1], height - nodes[1].best_block_info().1); connect_blocks(&nodes[2], height - nodes[2].best_block_info().1); - }, || {}, false, Some(0x4000 | 15), None, None); + }, || {}, false, Some(0x4000 | 15), None, None, None); run_onion_failure_test("final_incorrect_cltv_expiry", 1, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || { for (_, pending_forwards) in nodes[1].node.forward_htlcs.lock().unwrap().iter_mut() { @@ -583,7 +584,7 @@ fn test_onion_failure() { } } } - }, true, Some(18), None, Some(channels[1].0.contents.short_channel_id)); + }, true, Some(18), None, Some(channels[1].0.contents.short_channel_id), None); run_onion_failure_test("final_incorrect_htlc_amount", 1, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || { // violate amt_to_forward > msg.amount_msat @@ -596,7 +597,7 @@ fn test_onion_failure() { } } } - }, true, Some(19), None, Some(channels[1].0.contents.short_channel_id)); + }, true, Some(19), None, Some(channels[1].0.contents.short_channel_id), None); let short_channel_id = channels[1].0.contents.short_channel_id; run_onion_failure_test("channel_disabled", 0, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || { @@ -605,7 +606,7 @@ fn test_onion_failure() { nodes[2].node.peer_disconnected(nodes[1].node.get_our_node_id()); }, true, Some(UPDATE|7), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false }), - Some(short_channel_id)); + Some(short_channel_id), None); run_onion_failure_test("channel_disabled", 0, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || { // disconnect event to the channel between nodes[1] ~ nodes[2] for _ in 0..DISABLE_GOSSIP_TICKS + 1 { @@ -616,7 +617,7 @@ fn test_onion_failure() { nodes[2].node.get_and_clear_pending_msg_events(); }, true, Some(UPDATE|20), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false }), - Some(short_channel_id)); + Some(short_channel_id), None); reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[2])); run_onion_failure_test("expiry_too_far", 0, &nodes, &route, &payment_hash, &payment_secret, |msg| { @@ -631,7 +632,7 @@ fn test_onion_failure() { let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap(); msg.cltv_expiry = htlc_cltv; msg.onion_routing_packet = onion_packet; - }, ||{}, true, Some(21), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[0].pubkey, is_permanent: true}), Some(route.paths[0].hops[0].short_channel_id)); + }, ||{}, true, Some(21), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[0].pubkey, is_permanent: true}), Some(route.paths[0].hops[0].short_channel_id), None); run_onion_failure_test_with_fail_intercept("mpp_timeout", 200, &nodes, &route, &payment_hash, &payment_secret, |_msg| {}, |msg| { // Tamper returning error message @@ -640,7 +641,7 @@ fn test_onion_failure() { msg.reason = onion_utils::build_first_hop_failure_packet(onion_keys[1].shared_secret.as_ref(), 23, &[0;0]); }, ||{ nodes[2].node.fail_htlc_backwards(&payment_hash); - }, true, Some(23), None, None); + }, true, Some(23), None, None, None); run_onion_failure_test_with_fail_intercept("bogus err packet with valid hmac", 200, &nodes, &route, &payment_hash, &payment_secret, |_msg| {}, |msg| { @@ -659,7 +660,7 @@ fn test_onion_failure() { &onion_keys[1].shared_secret.as_ref(), &decoded_err_packet.encode()[..]) }, || nodes[2].node.fail_htlc_backwards(&payment_hash), false, None, Some(NetworkUpdate::NodeFailure { node_id: route.paths[0].hops[1].pubkey, is_permanent: true }), - Some(channels[1].0.contents.short_channel_id)); + Some(channels[1].0.contents.short_channel_id), None); run_onion_failure_test_with_fail_intercept("0-length channel update in intermediate node UPDATE onion failure", 100, &nodes, &route, &payment_hash, &payment_secret, |msg| { msg.amount_msat -= 1; @@ -685,7 +686,7 @@ fn test_onion_failure() { short_channel_id: channels[1].0.contents.short_channel_id, is_permanent: false, }), - Some(channels[1].0.contents.short_channel_id)); + Some(channels[1].0.contents.short_channel_id), None); run_onion_failure_test_with_fail_intercept("0-length channel update in final node UPDATE onion failure", 200, &nodes, &route, &payment_hash, &payment_secret, |_msg| {}, |msg| { let session_priv = SecretKey::from_slice(&[3; 32]).unwrap(); @@ -709,7 +710,7 @@ fn test_onion_failure() { short_channel_id: channels[1].0.contents.short_channel_id, is_permanent: false, }), - Some(channels[1].0.contents.short_channel_id)); + Some(channels[1].0.contents.short_channel_id), None); } #[test] @@ -861,6 +862,7 @@ fn do_test_onion_failure_stale_channel_update(announce_for_forwarding: bool) { run_onion_failure_test( name, 0, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || {}, true, Some(error_code), Some(network_update), Some(short_channel_id), + None, ); }; From d8d9dc7d781a9e82bd3ac485903254c0f3570399 Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Fri, 8 Mar 2024 01:43:07 -0800 Subject: [PATCH 2/2] Enable decoding new incoming HTLC onions when fully committed This commit ensures all new incoming HTLCs going forward will have their onion decoded when they become fully committed to decide how we should proceed with each one. As a result, we'll obtain `HTLCHandlingFailed` events for _any_ failed HTLC that comes across a channel. Previously, we would evaluate the incoming HTLC within `can_accept_incoming_htlc` upon receiving it, but not yet committed, so we'd always have to account for it ourselves manually when checking certain HTLC limits. With this change, we no longer need to do so as it will already be accounted for within the pending HTLC stats computation. We will now start writing channels with the new serialization version (4), and we will still be able to downgrade back to the commit that introduced it since reading version 4 is supported. Note that existing pending inbound HTLCs may already have their resolution if they were received in a previous version of LDK. We must support those until we no longer allow downgrading beyond this commit. --- fuzz/src/full_stack.rs | 24 +-- lightning/src/events/mod.rs | 10 -- lightning/src/ln/blinded_payment_tests.rs | 68 ++++++-- lightning/src/ln/channel.rs | 96 ++++------- lightning/src/ln/channelmanager.rs | 84 +--------- lightning/src/ln/functional_test_utils.rs | 17 +- lightning/src/ln/functional_tests.rs | 24 ++- lightning/src/ln/onion_route_tests.rs | 119 +++++++++----- lightning/src/ln/payment_tests.rs | 190 +++++++++++++--------- lightning/src/ln/priv_short_conf_tests.rs | 19 +++ lightning/src/ln/reload_tests.rs | 9 +- lightning/src/ln/shutdown_tests.rs | 6 + 12 files changed, 356 insertions(+), 310 deletions(-) diff --git a/fuzz/src/full_stack.rs b/fuzz/src/full_stack.rs index c1f2dd11b1e..1b3e576da6c 100644 --- a/fuzz/src/full_stack.rs +++ b/fuzz/src/full_stack.rs @@ -1338,8 +1338,8 @@ mod tests { // end of update_add_htlc from 0 to 1 via client and mac ext_from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff ab00000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000", &mut test); - // Two feerate requests to check dust exposure - ext_from_hex("00fd00fd", &mut test); + // One feerate request to check dust exposure + ext_from_hex("00fd", &mut test); // inbound read from peer id 0 of len 18 ext_from_hex("030012", &mut test); @@ -1362,8 +1362,8 @@ mod tests { // process the now-pending HTLC forward ext_from_hex("07", &mut test); - // Three feerate requests to check dust exposure - ext_from_hex("00fd00fd00fd", &mut test); + // Four feerate requests to check dust exposure while forwarding the HTLC + ext_from_hex("00fd00fd00fd00fd", &mut test); // client now sends id 1 update_add_htlc and commitment_signed (CHECK 7: UpdateHTLCs event for node 03020000 with 1 HTLCs for channel 3f000000) // we respond with commitment_signed then revoke_and_ack (a weird, but valid, order) @@ -1439,8 +1439,8 @@ mod tests { // end of update_add_htlc from 0 to 1 via client and mac ext_from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff ab00000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000", &mut test); - // Two feerate requests to check dust exposure - ext_from_hex("00fd00fd", &mut test); + // One feerate request to check dust exposure + ext_from_hex("00fd", &mut test); // now respond to the update_fulfill_htlc+commitment_signed messages the client sent to peer 0 // inbound read from peer id 0 of len 18 @@ -1474,8 +1474,8 @@ mod tests { // process the now-pending HTLC forward ext_from_hex("07", &mut test); - // Three feerate requests to check dust exposure - ext_from_hex("00fd00fd00fd", &mut test); + // Four feerate requests to check dust exposure while forwarding the HTLC + ext_from_hex("00fd00fd00fd00fd", &mut test); // client now sends id 1 update_add_htlc and commitment_signed (CHECK 7 duplicate) // we respond with revoke_and_ack, then commitment_signed, then update_fail_htlc @@ -1574,8 +1574,8 @@ mod tests { // end of update_add_htlc from 0 to 1 via client and mac ext_from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff 5300000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000", &mut test); - // Two feerate requests to check dust exposure - ext_from_hex("00fd00fd", &mut test); + // One feerate request to check dust exposure + ext_from_hex("00fd", &mut test); // inbound read from peer id 0 of len 18 ext_from_hex("030012", &mut test); @@ -1598,8 +1598,8 @@ mod tests { // process the now-pending HTLC forward ext_from_hex("07", &mut test); - // Three feerate requests to check dust exposure - ext_from_hex("00fd00fd00fd", &mut test); + // Four feerate requests to check dust exposure while forwarding the HTLC + ext_from_hex("00fd00fd00fd00fd", &mut test); // client now sends id 1 update_add_htlc and commitment_signed (CHECK 7 duplicate) // connect a block with one transaction of len 125 diff --git a/lightning/src/events/mod.rs b/lightning/src/events/mod.rs index 5bc446f9724..10583e82b31 100644 --- a/lightning/src/events/mod.rs +++ b/lightning/src/events/mod.rs @@ -1426,16 +1426,6 @@ pub enum Event { /// Indicates that the HTLC was accepted, but could not be processed when or after attempting to /// forward it. /// - /// Some scenarios where this event may be sent include: - /// * Insufficient capacity in the outbound channel - /// * While waiting to forward the HTLC, the channel it is meant to be forwarded through closes - /// * When an unknown SCID is requested for forwarding a payment. - /// * Expected MPP amount has already been reached - /// * The HTLC has timed out - /// - /// This event, however, does not get generated if an HTLC fails to meet the forwarding - /// requirements (i.e. insufficient fees paid, or a CLTV that is too soon). - /// /// # Failure Behavior and Persistence /// This event will eventually be replayed after failures-to-handle (i.e., the event handler /// returning `Err(ReplayEvent ())`) and will be persisted across restarts. diff --git a/lightning/src/ln/blinded_payment_tests.rs b/lightning/src/ln/blinded_payment_tests.rs index c1fad65c14f..d27a7c8a235 100644 --- a/lightning/src/ln/blinded_payment_tests.rs +++ b/lightning/src/ln/blinded_payment_tests.rs @@ -309,8 +309,10 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) { // We need the session priv to construct a bogus onion packet later. *nodes[0].keys_manager.override_random_bytes.lock().unwrap() = Some([3; 32]); create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0); - let chan_upd_1_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0).0.contents; - let chan_upd_2_3 = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 1_000_000, 0).0.contents; + let chan_1_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0); + let chan_upd_1_2 = chan_1_2.0.contents; + let chan_2_3 = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 1_000_000, 0); + let chan_upd_2_3 = chan_2_3.0.contents; let amt_msat = 5000; let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[3], Some(amt_msat), None); @@ -364,18 +366,27 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) { check_added_monitors!(nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &updates_0_1.commitment_signed, true, true); + expect_pending_htlcs_forwardable!(nodes[1]); + check_added_monitors!(nodes[1], 1); + if intro_fails { let mut updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false); + let failed_destination = match check { + ForwardCheckFail::InboundOnionCheck => HTLCDestination::InvalidOnion, + ForwardCheckFail::ForwardPayloadEncodedAsReceive => HTLCDestination::FailedPayment { payment_hash }, + ForwardCheckFail::OutboundChannelCheck => + HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_1_2.2 }, + }; + expect_htlc_handling_failed_destinations!( + nodes[1].node.get_and_clear_pending_events(), &[failed_destination.clone()] + ); expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().expected_htlc_error_data(INVALID_ONION_BLINDING, &[0; 32])); return } - expect_pending_htlcs_forwardable!(nodes[1]); - check_added_monitors!(nodes[1], 1); - let mut updates_1_2 = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); let mut update_add = &mut updates_1_2.update_add_htlcs[0]; @@ -385,6 +396,17 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) { check_added_monitors!(nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &updates_1_2.commitment_signed, true, true); + expect_pending_htlcs_forwardable!(nodes[2]); + let failed_destination = match check { + ForwardCheckFail::InboundOnionCheck|ForwardCheckFail::ForwardPayloadEncodedAsReceive => HTLCDestination::InvalidOnion, + ForwardCheckFail::OutboundChannelCheck => + HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }, + }; + expect_htlc_handling_failed_destinations!( + nodes[2].node.get_and_clear_pending_events(), &[failed_destination.clone()] + ); + check_added_monitors!(nodes[2], 1); + let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); let update_malformed = &mut updates.update_fail_malformed_htlcs[0]; assert_eq!(update_malformed.failure_code, INVALID_ONION_BLINDING); @@ -444,7 +466,10 @@ fn failed_backwards_to_intro_node() { nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); check_added_monitors!(nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event.commitment_msg, true, true); - nodes[2].node.process_pending_htlc_forwards(); + + expect_pending_htlcs_forwardable!(nodes[2]); + expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::InvalidOnion]); + check_added_monitors(&nodes[2], 1); let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); let mut update_malformed = &mut updates.update_fail_malformed_htlcs[0]; @@ -521,7 +546,7 @@ fn do_forward_fail_in_process_pending_htlc_fwds(check: ProcessPendingHTLCsCheck, // intro node will error backwards. $curr_node.node.peer_disconnected($next_node.node.get_our_node_id()); expect_pending_htlcs_forwardable!($curr_node); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!($curr_node, + expect_htlc_handling_failed_destinations!($curr_node.node.get_and_clear_pending_events(), vec![HTLCDestination::NextHopChannel { node_id: Some($next_node.node.get_our_node_id()), channel_id: $failed_chan_id }]); }, ProcessPendingHTLCsCheck::FwdChannelClosed => { @@ -537,12 +562,12 @@ fn do_forward_fail_in_process_pending_htlc_fwds(check: ProcessPendingHTLCsCheck, crate::events::Event::ChannelClosed { .. } => {}, _ => panic!("Unexpected event {:?}", events), } + check_closed_broadcast(&$curr_node, 1, true); + check_added_monitors!($curr_node, 1); $curr_node.node.process_pending_htlc_forwards(); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!($curr_node, + expect_htlc_handling_failed_destinations!($curr_node.node.get_and_clear_pending_events(), vec![HTLCDestination::UnknownNextHop { requested_forward_scid: $failed_scid }]); - check_closed_broadcast(&$curr_node, 1, true); - check_added_monitors!($curr_node, 1); $curr_node.node.process_pending_htlc_forwards(); }, } @@ -628,6 +653,7 @@ fn do_blinded_intercept_payment(intercept_node_fails: bool) { }; nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], &payment_event.commitment_msg, false, true); + expect_pending_htlcs_forwardable!(nodes[1]); let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); @@ -933,6 +959,9 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), update_add); check_added_monitors!(nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true); + expect_pending_htlcs_forwardable!(nodes[2]); + expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::InvalidOnion]); + check_added_monitors(&nodes[2], 1); }, ReceiveCheckFail::ReceiveRequirements => { let update_add = &mut payment_event_1_2.msgs[0]; @@ -940,6 +969,9 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), update_add); check_added_monitors!(nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true); + expect_pending_htlcs_forwardable!(nodes[2]); + expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash }]); + check_added_monitors(&nodes[2], 1); }, ReceiveCheckFail::ChannelCheck => { nodes[2].node.close_channel(&chan_id_1_2, &nodes[1].node.get_our_node_id()).unwrap(); @@ -953,6 +985,9 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { nodes[2].node.handle_shutdown(nodes[1].node.get_our_node_id(), &node_1_shutdown); commitment_signed_dance!(nodes[2], nodes[1], (), false, true, false, false); + expect_pending_htlcs_forwardable!(nodes[2]); + expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash }]); + check_added_monitors(&nodes[2], 1); }, ReceiveCheckFail::ProcessPendingHTLCsCheck => { assert_eq!(payment_event_1_2.msgs[0].cltv_expiry, nodes[0].best_block_info().1 + 1 + excess_final_cltv_delta_opt.unwrap() as u32 + TEST_FINAL_CLTV); @@ -968,6 +1003,9 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event_1_2.msgs[0]); check_added_monitors!(nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true); + expect_pending_htlcs_forwardable!(nodes[2]); + expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash }]); + check_added_monitors(&nodes[2], 1); } } @@ -1168,6 +1206,12 @@ fn min_htlc() { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event_0_1.msgs[0]); check_added_monitors!(nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event_0_1.commitment_msg, true, true); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_htlc_handling_failed_destinations!( + nodes[1].node.get_and_clear_pending_events(), + &[HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_1_2.2 }] + ); + check_added_monitors(&nodes[1], 1); let mut updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false); @@ -1497,9 +1541,11 @@ fn fails_receive_tlvs_authentication() { let mut payment_event = SendEvent::from_event(ev); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - check_added_monitors!(nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, true, true); + expect_pending_htlcs_forwardable!(nodes[1]); nodes[1].node.process_pending_htlc_forwards(); + check_added_monitors!(nodes[1], 1); + expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCDestination::InvalidOnion]); let mut update_fail = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(update_fail.update_fail_htlcs.len() == 1); diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index b092a437fe9..2dae3fb619d 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -3736,7 +3736,7 @@ impl ChannelContext where SP::Target: SignerProvider { } let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered); - let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(htlc_above_dust, None); + let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(Some(htlc_above_dust), None); let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000; let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat) @@ -3942,7 +3942,9 @@ impl ChannelContext where SP::Target: SignerProvider { /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added. /// /// Dust HTLCs are excluded. - fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 { + fn next_remote_commit_tx_fee_msat(&self, htlc: Option, fee_spike_buffer_htlc: Option<()>) -> u64 { + debug_assert!(htlc.is_some() || fee_spike_buffer_htlc.is_some(), "At least one of the options must be set"); + let context = &self; assert!(!context.is_outbound()); @@ -3957,15 +3959,17 @@ impl ChannelContext where SP::Target: SignerProvider { let mut addl_htlcs = 0; if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; } - match htlc.origin { - HTLCInitiator::LocalOffered => { - if htlc.amount_msat / 1000 >= real_dust_limit_success_sat { - addl_htlcs += 1; - } - }, - HTLCInitiator::RemoteOffered => { - if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat { - addl_htlcs += 1; + if let Some(htlc) = &htlc { + match htlc.origin { + HTLCInitiator::LocalOffered => { + if htlc.amount_msat / 1000 >= real_dust_limit_success_sat { + addl_htlcs += 1; + } + }, + HTLCInitiator::RemoteOffered => { + if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat { + addl_htlcs += 1; + } } } } @@ -3998,7 +4002,7 @@ impl ChannelContext where SP::Target: SignerProvider { let num_htlcs = included_htlcs + addl_htlcs; let res = commit_tx_fee_sat(context.feerate_per_kw, num_htlcs, &context.channel_type) * 1000; #[cfg(any(test, fuzzing))] - { + if let Some(htlc) = &htlc { let mut fee = res; if fee_spike_buffer_htlc.is_some() { fee = commit_tx_fee_sat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type) * 1000; @@ -5014,8 +5018,7 @@ impl FundedChannel where } pub fn update_add_htlc( - &mut self, msg: &msgs::UpdateAddHTLC, pending_forward_status: PendingHTLCStatus, - fee_estimator: &LowerBoundedFeeEstimator, + &mut self, msg: &msgs::UpdateAddHTLC, fee_estimator: &LowerBoundedFeeEstimator, ) -> Result<(), ChannelError> where F::Target: FeeEstimator { if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) { return Err(ChannelError::close("Got add HTLC message when channel was not in an operational state".to_owned())); @@ -5080,7 +5083,7 @@ impl FundedChannel where { let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else { let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered); - self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations + self.context.next_remote_commit_tx_fee_msat(Some(htlc_candidate), None) // Don't include the extra fee spike buffer HTLC in calculations }; let anchor_outputs_value_msat = if !self.context.is_outbound() && self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000 @@ -5115,12 +5118,6 @@ impl FundedChannel where return Err(ChannelError::close("Remote provided CLTV expiry in seconds instead of block height".to_owned())); } - if self.context.channel_state.is_local_shutdown_sent() { - if let PendingHTLCStatus::Forward(_) = pending_forward_status { - panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing"); - } - } - // Now update local state: self.context.next_counterparty_htlc_id += 1; self.context.pending_inbound_htlcs.push(InboundHTLCOutput { @@ -5128,8 +5125,8 @@ impl FundedChannel where amount_msat: msg.amount_msat, payment_hash: msg.payment_hash, cltv_expiry: msg.cltv_expiry, - state: InboundHTLCState::RemoteAnnounced(InboundHTLCResolution::Resolved { - pending_htlc_status: pending_forward_status + state: InboundHTLCState::RemoteAnnounced(InboundHTLCResolution::Pending { + update_add_htlc: msg.clone(), }), }); Ok(()) @@ -7242,7 +7239,7 @@ impl FundedChannel where }; let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis; if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats { - let on_counterparty_tx_dust_htlc_exposure_msat = htlc_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat; + let on_counterparty_tx_dust_htlc_exposure_msat = htlc_stats.on_counterparty_tx_dust_exposure_msat; if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat { log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat); @@ -7262,7 +7259,7 @@ impl FundedChannel where let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis; if msg.amount_msat / 1000 < exposure_dust_limit_success_sats { - let on_holder_tx_dust_htlc_exposure_msat = htlc_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat; + let on_holder_tx_dust_htlc_exposure_msat = htlc_stats.on_holder_tx_dust_exposure_msat; if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat { log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat); @@ -7295,12 +7292,14 @@ impl FundedChannel where // the spec because the fee spike buffer requirement doesn't exist on the receiver's // side, only on the sender's. Note that with anchor outputs we are no longer as // sensitive to fee spikes, so we need to account for them. - let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered); - let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(())); + // + // A `None` `HTLCCandidate` is used as in this case because we're already accounting for + // the incoming HTLC as it has been fully committed by both sides. + let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(None, Some(())); if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE; } - if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat { + if pending_remote_value_msat.saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat { log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id()); return Err(("Fee spike buffer violation", 0x1000|7)); } @@ -9380,7 +9379,7 @@ fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) } const SERIALIZATION_VERSION: u8 = 4; -const MIN_SERIALIZATION_VERSION: u8 = 3; +const MIN_SERIALIZATION_VERSION: u8 = 4; impl_writeable_tlv_based_enum_legacy!(InboundHTLCRemovalReason,; (0, FailRelay), @@ -9441,18 +9440,7 @@ impl Writeable for FundedChannel where SP::Target: SignerProvider // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been // called. - let version_to_write = if self.context.pending_inbound_htlcs.iter().any(|htlc| match htlc.state { - InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_resolution)| - InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_resolution) => { - matches!(htlc_resolution, InboundHTLCResolution::Pending { .. }) - }, - _ => false, - }) { - SERIALIZATION_VERSION - } else { - MIN_SERIALIZATION_VERSION - }; - write_ver_prefix!(writer, version_to_write, MIN_SERIALIZATION_VERSION); + write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION); // `user_id` used to be a single u64 value. In order to remain backwards compatible with // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write @@ -9510,27 +9498,11 @@ impl Writeable for FundedChannel where SP::Target: SignerProvider &InboundHTLCState::RemoteAnnounced(_) => unreachable!(), &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_resolution) => { 1u8.write(writer)?; - if version_to_write <= 3 { - if let InboundHTLCResolution::Resolved { pending_htlc_status } = htlc_resolution { - pending_htlc_status.write(writer)?; - } else { - panic!(); - } - } else { - htlc_resolution.write(writer)?; - } + htlc_resolution.write(writer)?; }, &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_resolution) => { 2u8.write(writer)?; - if version_to_write <= 3 { - if let InboundHTLCResolution::Resolved { pending_htlc_status } = htlc_resolution { - pending_htlc_status.write(writer)?; - } else { - panic!(); - } - } else { - htlc_resolution.write(writer)?; - } + htlc_resolution.write(writer)?; }, &InboundHTLCState::Committed => { 3u8.write(writer)?; @@ -10678,7 +10650,7 @@ mod tests { node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false; let remote_commit_fee_3_htlcs = commit_tx_fee_sat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.get_channel_type()) * 1000; let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered); - let remote_commit_tx_fee = node_a_chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None); + let remote_commit_tx_fee = node_a_chan.context.next_remote_commit_tx_fee_msat(Some(htlc_candidate), None); assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs); } @@ -10720,13 +10692,13 @@ mod tests { // If swapped: this HTLC would be counted as non-dust when it shouldn't be. let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000; let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered); - let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None); + let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(Some(htlc_candidate), None); assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs); // If swapped: this HTLC would be counted as dust when it shouldn't be. let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000; let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered); - let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None); + let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(Some(htlc_candidate), None); assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc); } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 06fe0ee33a4..a7d6500aa08 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -4344,34 +4344,6 @@ where }) } - fn decode_update_add_htlc_onion( - &self, msg: &msgs::UpdateAddHTLC, counterparty_node_id: &PublicKey, - ) -> Result< - (onion_utils::Hop, [u8; 32], Option>), HTLCFailureMsg - > { - let (next_hop, shared_secret, next_packet_details_opt) = decode_incoming_update_add_htlc_onion( - msg, &*self.node_signer, &*self.logger, &self.secp_ctx - )?; - - let next_packet_details = match next_packet_details_opt { - Some(next_packet_details) => next_packet_details, - // it is a receive, so no need for outbound checks - None => return Ok((next_hop, shared_secret, None)), - }; - - // Perform outbound checks here instead of in [`Self::construct_pending_htlc_info`] because we - // can't hold the outbound peer state lock at the same time as the inbound peer state lock. - self.can_forward_htlc(&msg, &next_packet_details).map_err(|e| { - let (err_msg, err_code) = e; - self.htlc_failure_from_update_add_err( - msg, counterparty_node_id, err_msg, err_code, - next_hop.is_intro_node_blinded_forward(), &shared_secret - ) - })?; - - Ok((next_hop, shared_secret, Some(next_packet_details.next_packet_pubkey))) - } - fn construct_pending_htlc_status<'a>( &self, msg: &msgs::UpdateAddHTLC, counterparty_node_id: &PublicKey, shared_secret: [u8; 32], decoded_hop: onion_utils::Hop, allow_underpay: bool, @@ -5581,7 +5553,7 @@ where Ok(()) } - fn process_pending_update_add_htlcs(&self) { + pub(crate) fn process_pending_update_add_htlcs(&self) { let mut decode_update_add_htlcs = new_hash_map(); mem::swap(&mut decode_update_add_htlcs, &mut self.decode_update_add_htlcs.lock().unwrap()); @@ -8659,7 +8631,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // Note that the ChannelManager is NOT re-persisted on disk after this (unless we error // closing a channel), so any changes are likely to be lost on restart! - let decoded_hop_res = self.decode_update_add_htlc_onion(msg, counterparty_node_id); let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { @@ -8671,53 +8642,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan_phase_entry) => { if let Some(chan) = chan_phase_entry.get_mut().as_funded_mut() { - let mut pending_forward_info = match decoded_hop_res { - Ok((next_hop, shared_secret, next_packet_pk_opt)) => - self.construct_pending_htlc_status( - msg, counterparty_node_id, shared_secret, next_hop, - chan.context.config().accept_underpaying_htlcs, next_packet_pk_opt, - ), - Err(e) => PendingHTLCStatus::Fail(e) - }; - let logger = WithChannelContext::from(&self.logger, &chan.context, Some(msg.payment_hash)); - // If the update_add is completely bogus, the call will Err and we will close, - // but if we've sent a shutdown and they haven't acknowledged it yet, we just - // want to reject the new HTLC and fail it backwards instead of forwarding. - if let Err((_, error_code)) = chan.can_accept_incoming_htlc(&msg, &self.fee_estimator, &logger) { - if msg.blinding_point.is_some() { - pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed( - msgs::UpdateFailMalformedHTLC { - channel_id: msg.channel_id, - htlc_id: msg.htlc_id, - sha256_of_onion: [0; 32], - failure_code: INVALID_ONION_BLINDING, - } - )) - } else { - match pending_forward_info { - PendingHTLCStatus::Forward(PendingHTLCInfo { - ref incoming_shared_secret, ref routing, .. - }) => { - let reason = if routing.blinded_failure().is_some() { - HTLCFailReason::reason(INVALID_ONION_BLINDING, vec![0; 32]) - } else if (error_code & 0x1000) != 0 { - let error_data = self.get_htlc_inbound_temp_fail_data(error_code); - HTLCFailReason::reason(error_code, error_data) - } else { - HTLCFailReason::from_failure_code(error_code) - }.get_encrypted_failure_packet(incoming_shared_secret, &None); - let msg = msgs::UpdateFailHTLC { - channel_id: msg.channel_id, - htlc_id: msg.htlc_id, - reason - }; - pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msg)); - }, - _ => {}, - } - } - } - try_chan_phase_entry!(self, peer_state, chan.update_add_htlc(&msg, pending_forward_info, &self.fee_estimator), chan_phase_entry); + try_chan_phase_entry!(self, peer_state, chan.update_add_htlc(&msg, &self.fee_estimator), chan_phase_entry); } else { return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close( "Got an update_add_htlc message for an unfunded channel!".into())), chan_phase_entry); @@ -14727,6 +14652,11 @@ mod tests { assert!(updates.update_fail_malformed_htlcs.is_empty()); assert!(updates.update_fee.is_none()); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); + commitment_signed_dance!(nodes[1], nodes[0], &updates.commitment_signed, false); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash: mismatch_payment_hash }]); + check_added_monitors(&nodes[1], 1); + let _ = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Payment preimage didn't match payment hash", 1); } diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index e95b3334850..200248dc978 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1941,15 +1941,18 @@ macro_rules! expect_pending_htlcs_forwardable_conditions { #[macro_export] macro_rules! expect_htlc_handling_failed_destinations { ($events: expr, $expected_failures: expr) => {{ + let mut num_expected_failures = $expected_failures.len(); for event in $events { match event { $crate::events::Event::PendingHTLCsForwardable { .. } => { }, $crate::events::Event::HTLCHandlingFailed { ref failed_next_destination, .. } => { - assert!($expected_failures.contains(&failed_next_destination)) + assert!($expected_failures.contains(&failed_next_destination)); + num_expected_failures -= 1; }, _ => panic!("Unexpected destination"), } } + assert_eq!(num_expected_failures, 0); }} } @@ -2718,6 +2721,8 @@ pub fn do_pass_along_path<'a, 'b, 'c>(args: PassAlongPathArgs) -> Option if is_last_hop && is_probe { commitment_signed_dance!(node, prev_node, payment_event.commitment_msg, true, true); + expect_pending_htlcs_forwardable!(node); + check_added_monitors(node, 1); } else { commitment_signed_dance!(node, prev_node, payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(node); @@ -2801,22 +2806,26 @@ pub fn pass_along_path<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_path do_pass_along_path(args) } -pub fn send_probe_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_route: &[&[&Node<'a, 'b, 'c>]]) { +pub fn send_probe_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_route: &[(&[&Node<'a, 'b, 'c>], PaymentHash)]) { let mut events = origin_node.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), expected_route.len()); check_added_monitors!(origin_node, expected_route.len()); - for path in expected_route.iter() { + for (path, payment_hash) in expected_route.iter() { let ev = remove_first_msg_event_to_node(&path[0].node.get_our_node_id(), &mut events); - do_pass_along_path(PassAlongPathArgs::new(origin_node, path, 0, PaymentHash([0_u8; 32]), ev) + do_pass_along_path(PassAlongPathArgs::new(origin_node, path, 0, *payment_hash, ev) .is_probe() .without_clearing_recipient_events()); let nodes_to_fail_payment: Vec<_> = vec![origin_node].into_iter().chain(path.iter().cloned()).collect(); fail_payment_along_path(nodes_to_fail_payment.as_slice()); + expect_htlc_handling_failed_destinations!( + path.last().unwrap().node.get_and_clear_pending_events(), + &[HTLCDestination::FailedPayment { payment_hash: *payment_hash }] + ); } } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 5a7f870078a..4723901b945 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -1544,6 +1544,8 @@ fn test_fee_spike_violation_fails_htlc() { next_local_nonce: None, }; nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &raa_msg); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash }]); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -1558,7 +1560,7 @@ fn test_fee_spike_violation_fails_htlc() { nodes[1].logger.assert_log("lightning::ln::channel", format!("Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", raa_msg.channel_id), 1); - check_added_monitors!(nodes[1], 2); + check_added_monitors!(nodes[1], 3); } #[test] @@ -6798,6 +6800,9 @@ fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_messag nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], updates.commitment_signed, false, true); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCDestination::InvalidOnion]); + check_added_monitors(&nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); @@ -6862,6 +6867,9 @@ fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_upda nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); check_added_monitors!(nodes[2], 0); commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false, true); + expect_pending_htlcs_forwardable!(nodes[2]); + expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::InvalidOnion]); + check_added_monitors(&nodes[2], 1); let events_3 = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events_3.len(), 1); @@ -6933,6 +6941,9 @@ fn test_channel_failed_after_message_with_badonion_node_perm_bits_set() { nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); check_added_monitors!(nodes[2], 0); commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false, true); + expect_pending_htlcs_forwardable!(nodes[2]); + expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::InvalidOnion]); + check_added_monitors(&nodes[2], 1); let events_3 = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events_3.len(), 1); @@ -9987,9 +9998,7 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e // Outbound dust balance: 4372 sats // Note, we need sent payment to be above outbound dust threshold on counterparty_tx of 2132 sats for _ in 0..dust_outbound_htlc_on_holder_tx { - let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], dust_outbound_htlc_on_holder_tx_msat); - nodes[0].node.send_payment_with_route(route, payment_hash, - RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + route_payment(&nodes[0], &[&nodes[1]], dust_outbound_htlc_on_holder_tx_msat); } } else { // Inbound dust threshold: 2324 sats (`dust_buffer_feerate` * HTLC_SUCCESS_TX_WEIGHT / 1000 + holder's `dust_limit_satoshis`) @@ -10004,9 +10013,7 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e // Outbound dust threshold: 2132 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + counteparty's `dust_limit_satoshis`) // Outbound dust balance: 5000 sats for _ in 0..dust_htlc_on_counterparty_tx - 1 { - let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], dust_htlc_on_counterparty_tx_msat); - nodes[0].node.send_payment_with_route(route, payment_hash, - RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + route_payment(&nodes[0], &[&nodes[1]], dust_htlc_on_counterparty_tx_msat); } } else { // Inbound dust threshold: 2031 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + counteparty's `dust_limit_satoshis`) @@ -10039,6 +10046,9 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.remove(0)); nodes[0].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); + commitment_signed_dance!(nodes[0], nodes[1], payment_event.commitment_msg, false); + expect_pending_htlcs_forwardable!(nodes[0]); + expect_htlc_handling_failed_destinations!(nodes[0].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash }]); // With default dust exposure: 5000 sats if on_holder_tx { // Outbound dust balance: 6399 sats diff --git a/lightning/src/ln/onion_route_tests.rs b/lightning/src/ln/onion_route_tests.rs index 1736103def0..c919a9e1a42 100644 --- a/lightning/src/ln/onion_route_tests.rs +++ b/lightning/src/ln/onion_route_tests.rs @@ -105,6 +105,9 @@ fn run_onion_failure_test_with_fail_intercept( let update_1_0 = match test_case { 0|100 => { // intermediate node failure; fail backward to 0 + expect_pending_htlcs_forwardable!(nodes[1]); + expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[expected_htlc_destination.clone().unwrap()]); + check_added_monitors(&nodes[1], 1); let update_1_0 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(update_1_0.update_fail_htlcs.len()+update_1_0.update_fail_malformed_htlcs.len()==1 && (update_1_0.update_fail_htlcs.len()==1 || update_1_0.update_fail_malformed_htlcs.len()==1)); update_1_0 @@ -135,12 +138,13 @@ fn run_onion_failure_test_with_fail_intercept( expect_event!(&nodes[2], Event::PaymentClaimable); callback_node(); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }]); + } else if test_case == 1 || test_case == 3 { + expect_htlc_forward!(&nodes[2]); + expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), vec![expected_htlc_destination.clone().unwrap()]); } + check_added_monitors!(&nodes[2], 1); let update_2_1 = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); - if test_case == 2 || test_case == 200 { - check_added_monitors!(&nodes[2], 1); - } assert!(update_2_1.update_fail_htlcs.len() == 1); let mut fail_msg = update_2_1.update_fail_htlcs[0].clone(); @@ -301,9 +305,10 @@ fn test_fee_failures() { // because we ignore channel update contents, we will still blame the 2nd channel. let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[2]); let short_channel_id = channels[1].0.contents.short_channel_id; - run_onion_failure_test("fee_insufficient", 0, &nodes, &route, &payment_hash, &payment_secret, |msg| { + run_onion_failure_test("fee_insufficient", 100, &nodes, &route, &payment_hash, &payment_secret, |msg| { msg.amount_msat -= 1; - }, || {}, true, Some(UPDATE|12), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false}), Some(short_channel_id), None); + }, || {}, true, Some(UPDATE|12), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false}), Some(short_channel_id), + Some(HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channels[1].2 })); // In an earlier version, we spuriously failed to forward payments if the expected feerate // changed between the channel open and the payment. @@ -349,6 +354,8 @@ fn test_onion_failure() { // positive case send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 40000); + let next_hop_failure = HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channels[1].2 }; + // intermediate node failure let short_channel_id = channels[1].0.contents.short_channel_id; run_onion_failure_test("invalid_realm", 0, &nodes, &route, &payment_hash, &payment_secret, |msg| { @@ -366,7 +373,7 @@ fn test_onion_failure() { // describing a length-1 TLV payload, which is obviously bogus. new_payloads[0].data[0] = 1; msg.onion_routing_packet = onion_utils::construct_onion_packet_with_writable_hopdata(new_payloads, onion_keys, [0; 32], &payment_hash).unwrap(); - }, ||{}, true, Some(PERM|22), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id), None); + }, ||{}, true, Some(PERM|22), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id), Some(HTLCDestination::InvalidOnion)); // final node failure let short_channel_id = channels[1].0.contents.short_channel_id; @@ -385,7 +392,7 @@ fn test_onion_failure() { // length-1 TLV payload, which is obviously bogus. new_payloads[1].data[0] = 1; msg.onion_routing_packet = onion_utils::construct_onion_packet_with_writable_hopdata(new_payloads, onion_keys, [0; 32], &payment_hash).unwrap(); - }, ||{}, false, Some(PERM|22), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id), None); + }, ||{}, false, Some(PERM|22), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id), Some(HTLCDestination::InvalidOnion)); // the following three with run_onion_failure_test_with_fail_intercept() test only the origin node // receiving simulated fail messages @@ -398,7 +405,7 @@ fn test_onion_failure() { let session_priv = SecretKey::from_slice(&[3; 32]).unwrap(); let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap(); msg.reason = onion_utils::build_first_hop_failure_packet(onion_keys[0].shared_secret.as_ref(), NODE|2, &[0;0]); - }, ||{}, true, Some(NODE|2), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[0].pubkey, is_permanent: false}), Some(route.paths[0].hops[0].short_channel_id), None); + }, ||{}, true, Some(NODE|2), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[0].pubkey, is_permanent: false}), Some(route.paths[0].hops[0].short_channel_id), Some(next_hop_failure.clone())); // final node failure run_onion_failure_test_with_fail_intercept("temporary_node_failure", 200, &nodes, &route, &payment_hash, &payment_secret, |_msg| {}, |msg| { @@ -418,7 +425,7 @@ fn test_onion_failure() { let session_priv = SecretKey::from_slice(&[3; 32]).unwrap(); let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap(); msg.reason = onion_utils::build_first_hop_failure_packet(onion_keys[0].shared_secret.as_ref(), PERM|NODE|2, &[0;0]); - }, ||{}, true, Some(PERM|NODE|2), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[0].pubkey, is_permanent: true}), Some(route.paths[0].hops[0].short_channel_id), None); + }, ||{}, true, Some(PERM|NODE|2), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[0].pubkey, is_permanent: true}), Some(route.paths[0].hops[0].short_channel_id), Some(next_hop_failure.clone())); // final node failure run_onion_failure_test_with_fail_intercept("permanent_node_failure", 200, &nodes, &route, &payment_hash, &payment_secret, |_msg| {}, |msg| { @@ -439,7 +446,7 @@ fn test_onion_failure() { msg.reason = onion_utils::build_first_hop_failure_packet(onion_keys[0].shared_secret.as_ref(), PERM|NODE|3, &[0;0]); }, ||{ nodes[2].node.fail_htlc_backwards(&payment_hash); - }, true, Some(PERM|NODE|3), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[0].pubkey, is_permanent: true}), Some(route.paths[0].hops[0].short_channel_id), None); + }, true, Some(PERM|NODE|3), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[0].pubkey, is_permanent: true}), Some(route.paths[0].hops[0].short_channel_id), Some(next_hop_failure.clone())); // final node failure run_onion_failure_test_with_fail_intercept("required_node_feature_missing", 200, &nodes, &route, &payment_hash, &payment_secret, |_msg| {}, |msg| { @@ -455,13 +462,13 @@ fn test_onion_failure() { // the UpdateAddHTLC that we sent. let short_channel_id = channels[0].0.contents.short_channel_id; run_onion_failure_test("invalid_onion_version", 0, &nodes, &route, &payment_hash, &payment_secret, |msg| { msg.onion_routing_packet.version = 1; }, ||{}, true, - Some(BADONION|PERM|4), None, Some(short_channel_id), None); + Some(BADONION|PERM|4), None, Some(short_channel_id), Some(HTLCDestination::InvalidOnion)); run_onion_failure_test("invalid_onion_hmac", 0, &nodes, &route, &payment_hash, &payment_secret, |msg| { msg.onion_routing_packet.hmac = [3; 32]; }, ||{}, true, - Some(BADONION|PERM|5), None, Some(short_channel_id), None); + Some(BADONION|PERM|5), None, Some(short_channel_id), Some(HTLCDestination::InvalidOnion)); run_onion_failure_test("invalid_onion_key", 0, &nodes, &route, &payment_hash, &payment_secret, |msg| { msg.onion_routing_packet.public_key = Err(secp256k1::Error::InvalidPublicKey);}, ||{}, true, - Some(BADONION|PERM|6), None, Some(short_channel_id), None); + Some(BADONION|PERM|6), None, Some(short_channel_id), Some(HTLCDestination::InvalidOnion)); let short_channel_id = channels[1].0.contents.short_channel_id; let chan_update = ChannelUpdate::dummy(short_channel_id); @@ -478,7 +485,7 @@ fn test_onion_failure() { msg.reason = onion_utils::build_first_hop_failure_packet(onion_keys[0].shared_secret.as_ref(), UPDATE|7, &err_data); }, ||{}, true, Some(UPDATE|7), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false }), - Some(short_channel_id), None); + Some(short_channel_id), Some(next_hop_failure.clone())); // Check we can still handle onion failures that include channel updates without a type prefix let err_data_without_type = chan_update.encode_with_len(); @@ -490,7 +497,7 @@ fn test_onion_failure() { msg.reason = onion_utils::build_first_hop_failure_packet(onion_keys[0].shared_secret.as_ref(), UPDATE|7, &err_data_without_type); }, ||{}, true, Some(UPDATE|7), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false }), - Some(short_channel_id), None); + Some(short_channel_id), Some(next_hop_failure.clone())); let short_channel_id = channels[1].0.contents.short_channel_id; run_onion_failure_test_with_fail_intercept("permanent_channel_failure", 100, &nodes, &route, &payment_hash, &payment_secret, |msg| { @@ -500,7 +507,7 @@ fn test_onion_failure() { let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap(); msg.reason = onion_utils::build_first_hop_failure_packet(onion_keys[0].shared_secret.as_ref(), PERM|8, &[0;0]); // short_channel_id from the processing node - }, ||{}, true, Some(PERM|8), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id), None); + }, ||{}, true, Some(PERM|8), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id), Some(next_hop_failure.clone())); let short_channel_id = channels[1].0.contents.short_channel_id; run_onion_failure_test_with_fail_intercept("required_channel_feature_missing", 100, &nodes, &route, &payment_hash, &payment_secret, |msg| { @@ -510,13 +517,13 @@ fn test_onion_failure() { let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap(); msg.reason = onion_utils::build_first_hop_failure_packet(onion_keys[0].shared_secret.as_ref(), PERM|9, &[0;0]); // short_channel_id from the processing node - }, ||{}, true, Some(PERM|9), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id), None); + }, ||{}, true, Some(PERM|9), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id), Some(next_hop_failure.clone())); let mut bogus_route = route.clone(); bogus_route.paths[0].hops[1].short_channel_id -= 1; let short_channel_id = bogus_route.paths[0].hops[1].short_channel_id; - run_onion_failure_test("unknown_next_peer", 0, &nodes, &bogus_route, &payment_hash, &payment_secret, |_| {}, ||{}, true, Some(PERM|10), - Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent:true}), Some(short_channel_id), None); + run_onion_failure_test("unknown_next_peer", 100, &nodes, &bogus_route, &payment_hash, &payment_secret, |_| {}, ||{}, true, Some(PERM|10), + Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent:true}), Some(short_channel_id), Some(HTLCDestination::UnknownNextHop { requested_forward_scid: short_channel_id })); let short_channel_id = channels[1].0.contents.short_channel_id; let amt_to_forward = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[2].node.get_our_node_id()) @@ -525,9 +532,9 @@ fn test_onion_failure() { let mut bogus_route = route.clone(); let route_len = bogus_route.paths[0].hops.len(); bogus_route.paths[0].hops[route_len-1].fee_msat = amt_to_forward; - run_onion_failure_test("amount_below_minimum", 0, &nodes, &bogus_route, &payment_hash, &payment_secret, |_| {}, ||{}, true, Some(UPDATE|11), + run_onion_failure_test("amount_below_minimum", 100, &nodes, &bogus_route, &payment_hash, &payment_secret, |_| {}, ||{}, true, Some(UPDATE|11), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false }), - Some(short_channel_id), None); + Some(short_channel_id), Some(next_hop_failure.clone())); // Clear pending payments so that the following positive test has the correct payment hash. for node in nodes.iter() { @@ -542,25 +549,25 @@ fn test_onion_failure() { // We ignore channel update contents in onion errors, so will blame the 2nd channel even though // the first node is the one that messed up. let short_channel_id = channels[1].0.contents.short_channel_id; - run_onion_failure_test("fee_insufficient", 0, &nodes, &route, &payment_hash, &payment_secret, |msg| { + run_onion_failure_test("fee_insufficient", 100, &nodes, &route, &payment_hash, &payment_secret, |msg| { msg.amount_msat -= 1; - }, || {}, true, Some(UPDATE|12), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false}), Some(short_channel_id), None); + }, || {}, true, Some(UPDATE|12), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false}), Some(short_channel_id), Some(next_hop_failure.clone())); let short_channel_id = channels[1].0.contents.short_channel_id; - run_onion_failure_test("incorrect_cltv_expiry", 0, &nodes, &route, &payment_hash, &payment_secret, |msg| { + run_onion_failure_test("incorrect_cltv_expiry", 100, &nodes, &route, &payment_hash, &payment_secret, |msg| { // need to violate: cltv_expiry - cltv_expiry_delta >= outgoing_cltv_value msg.cltv_expiry -= 1; - }, || {}, true, Some(UPDATE|13), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false}), Some(short_channel_id), None); + }, || {}, true, Some(UPDATE|13), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false}), Some(short_channel_id), Some(next_hop_failure.clone())); let short_channel_id = channels[1].0.contents.short_channel_id; - run_onion_failure_test("expiry_too_soon", 0, &nodes, &route, &payment_hash, &payment_secret, |msg| { + run_onion_failure_test("expiry_too_soon", 100, &nodes, &route, &payment_hash, &payment_secret, |msg| { let height = msg.cltv_expiry - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS + 1; connect_blocks(&nodes[0], height - nodes[0].best_block_info().1); connect_blocks(&nodes[1], height - nodes[1].best_block_info().1); connect_blocks(&nodes[2], height - nodes[2].best_block_info().1); }, ||{}, true, Some(UPDATE|14), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false }), - Some(short_channel_id), None); + Some(short_channel_id), Some(next_hop_failure.clone())); run_onion_failure_test("unknown_payment_hash", 2, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || { nodes[2].node.fail_htlc_backwards(&payment_hash); @@ -572,9 +579,10 @@ fn test_onion_failure() { connect_blocks(&nodes[0], height - nodes[0].best_block_info().1); connect_blocks(&nodes[1], height - nodes[1].best_block_info().1); connect_blocks(&nodes[2], height - nodes[2].best_block_info().1); - }, || {}, false, Some(0x4000 | 15), None, None, None); + }, || {}, false, Some(0x4000 | 15), None, None, Some(HTLCDestination::FailedPayment { payment_hash })); run_onion_failure_test("final_incorrect_cltv_expiry", 1, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || { + nodes[1].node.process_pending_update_add_htlcs(); for (_, pending_forwards) in nodes[1].node.forward_htlcs.lock().unwrap().iter_mut() { for f in pending_forwards.iter_mut() { match f { @@ -584,9 +592,10 @@ fn test_onion_failure() { } } } - }, true, Some(18), None, Some(channels[1].0.contents.short_channel_id), None); + }, true, Some(18), None, Some(channels[1].0.contents.short_channel_id), Some(HTLCDestination::FailedPayment { payment_hash })); run_onion_failure_test("final_incorrect_htlc_amount", 1, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || { + nodes[1].node.process_pending_update_add_htlcs(); // violate amt_to_forward > msg.amount_msat for (_, pending_forwards) in nodes[1].node.forward_htlcs.lock().unwrap().iter_mut() { for f in pending_forwards.iter_mut() { @@ -597,17 +606,17 @@ fn test_onion_failure() { } } } - }, true, Some(19), None, Some(channels[1].0.contents.short_channel_id), None); + }, true, Some(19), None, Some(channels[1].0.contents.short_channel_id), Some(HTLCDestination::FailedPayment { payment_hash })); let short_channel_id = channels[1].0.contents.short_channel_id; - run_onion_failure_test("channel_disabled", 0, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || { + run_onion_failure_test("channel_disabled", 100, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || { // disconnect event to the channel between nodes[1] ~ nodes[2] nodes[1].node.peer_disconnected(nodes[2].node.get_our_node_id()); nodes[2].node.peer_disconnected(nodes[1].node.get_our_node_id()); }, true, Some(UPDATE|7), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false }), - Some(short_channel_id), None); - run_onion_failure_test("channel_disabled", 0, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || { + Some(short_channel_id), Some(next_hop_failure.clone())); + run_onion_failure_test("channel_disabled", 100, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || { // disconnect event to the channel between nodes[1] ~ nodes[2] for _ in 0..DISABLE_GOSSIP_TICKS + 1 { nodes[1].node.timer_tick_occurred(); @@ -617,10 +626,10 @@ fn test_onion_failure() { nodes[2].node.get_and_clear_pending_msg_events(); }, true, Some(UPDATE|20), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false }), - Some(short_channel_id), None); + Some(short_channel_id), Some(next_hop_failure.clone())); reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[2])); - run_onion_failure_test("expiry_too_far", 0, &nodes, &route, &payment_hash, &payment_secret, |msg| { + run_onion_failure_test("expiry_too_far", 100, &nodes, &route, &payment_hash, &payment_secret, |msg| { let session_priv = SecretKey::from_slice(&[3; 32]).unwrap(); let mut route = route.clone(); let height = nodes[2].best_block_info().1; @@ -632,7 +641,7 @@ fn test_onion_failure() { let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap(); msg.cltv_expiry = htlc_cltv; msg.onion_routing_packet = onion_packet; - }, ||{}, true, Some(21), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[0].pubkey, is_permanent: true}), Some(route.paths[0].hops[0].short_channel_id), None); + }, ||{}, true, Some(21), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[0].pubkey, is_permanent: true}), Some(route.paths[0].hops[0].short_channel_id), Some(next_hop_failure.clone())); run_onion_failure_test_with_fail_intercept("mpp_timeout", 200, &nodes, &route, &payment_hash, &payment_secret, |_msg| {}, |msg| { // Tamper returning error message @@ -686,7 +695,7 @@ fn test_onion_failure() { short_channel_id: channels[1].0.contents.short_channel_id, is_permanent: false, }), - Some(channels[1].0.contents.short_channel_id), None); + Some(channels[1].0.contents.short_channel_id), Some(next_hop_failure.clone())); run_onion_failure_test_with_fail_intercept("0-length channel update in final node UPDATE onion failure", 200, &nodes, &route, &payment_hash, &payment_secret, |_msg| {}, |msg| { let session_priv = SecretKey::from_slice(&[3; 32]).unwrap(); @@ -860,9 +869,9 @@ fn do_test_onion_failure_stale_channel_update(announce_for_forwarding: bool) { let short_channel_id = channel_to_update.1; let network_update = NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false }; run_onion_failure_test( - name, 0, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || {}, true, + name, 100, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || {}, true, Some(error_code), Some(network_update), Some(short_channel_id), - None, + Some(HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channel_to_update.0 }), ); }; @@ -1143,6 +1152,8 @@ fn test_phantom_onion_hmac_failure() { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &update_add); commitment_signed_dance!(nodes[1], nodes[0], &update_0.commitment_signed, false, true); + expect_pending_htlcs_forwardable_ignore!(nodes[1]); + nodes[1].node.process_pending_update_add_htlcs(); // Modify the payload so the phantom hop's HMAC is bogus. let sha256_of_onion = { @@ -1161,7 +1172,6 @@ fn test_phantom_onion_hmac_failure() { _ => panic!("Unexpected forward"), } }; - expect_pending_htlcs_forwardable_ignore!(nodes[1]); nodes[1].node.process_pending_htlc_forwards(); expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]); nodes[1].node.process_pending_htlc_forwards(); @@ -1205,6 +1215,8 @@ fn test_phantom_invalid_onion_payload() { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &update_add); commitment_signed_dance!(nodes[1], nodes[0], &update_0.commitment_signed, false, true); + expect_pending_htlcs_forwardable_ignore!(nodes[1]); + nodes[1].node.process_pending_update_add_htlcs(); // Modify the onion packet to have an invalid payment amount. for (_, pending_forwards) in nodes[1].node.forward_htlcs.lock().unwrap().iter_mut() { @@ -1237,7 +1249,6 @@ fn test_phantom_invalid_onion_payload() { } } } - expect_pending_htlcs_forwardable_ignore!(nodes[1]); nodes[1].node.process_pending_htlc_forwards(); expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]); nodes[1].node.process_pending_htlc_forwards(); @@ -1280,6 +1291,8 @@ fn test_phantom_final_incorrect_cltv_expiry() { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &update_add); commitment_signed_dance!(nodes[1], nodes[0], &update_0.commitment_signed, false, true); + expect_pending_htlcs_forwardable_ignore!(nodes[1]); + nodes[1].node.process_pending_update_add_htlcs(); // Modify the payload so the phantom hop's HMAC is bogus. for (_, pending_forwards) in nodes[1].node.forward_htlcs.lock().unwrap().iter_mut() { @@ -1294,7 +1307,6 @@ fn test_phantom_final_incorrect_cltv_expiry() { } } } - expect_pending_htlcs_forwardable_ignore!(nodes[1]); nodes[1].node.process_pending_htlc_forwards(); expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]); nodes[1].node.process_pending_htlc_forwards(); @@ -1391,6 +1403,12 @@ fn test_phantom_failure_modified_cltv() { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &update_add); commitment_signed_dance!(nodes[1], nodes[0], &update_0.commitment_signed, false, true); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_htlc_handling_failed_destinations!( + nodes[1].node.get_and_clear_pending_events(), + &[HTLCDestination::UnknownNextHop { requested_forward_scid: phantom_scid }] + ); + check_added_monitors(&nodes[1], 1); let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(update_1.update_fail_htlcs.len() == 1); @@ -1434,6 +1452,12 @@ fn test_phantom_failure_expires_too_soon() { connect_blocks(&nodes[1], CLTV_FAR_FAR_AWAY); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &update_add); commitment_signed_dance!(nodes[1], nodes[0], &update_0.commitment_signed, false, true); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_htlc_handling_failed_destinations!( + nodes[1].node.get_and_clear_pending_events(), + &[HTLCDestination::UnknownNextHop { requested_forward_scid: phantom_scid }] + ); + check_added_monitors(&nodes[1], 1); let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(update_1.update_fail_htlcs.len() == 1); @@ -1522,7 +1546,7 @@ fn do_test_phantom_dust_exposure_failure(multiplier_dust_limit: bool) { // Get the route with an amount exceeding the dust exposure threshold of nodes[1]. let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[1], Some(max_dust_exposure + 1)); - let (mut route, _) = get_phantom_route!(nodes, max_dust_exposure + 1, channel); + let (mut route, phantom_scid) = get_phantom_route!(nodes, max_dust_exposure + 1, channel); // Route the HTLC through to the destination. nodes[0].node.send_payment_with_route(route.clone(), payment_hash, @@ -1533,6 +1557,12 @@ fn do_test_phantom_dust_exposure_failure(multiplier_dust_limit: bool) { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &update_add); commitment_signed_dance!(nodes[1], nodes[0], &update_0.commitment_signed, false, true); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_htlc_handling_failed_destinations!( + nodes[1].node.get_and_clear_pending_events(), + &[HTLCDestination::UnknownNextHop { requested_forward_scid: phantom_scid }] + ); + check_added_monitors(&nodes[1], 1); let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(update_1.update_fail_htlcs.len() == 1); @@ -1543,8 +1573,7 @@ fn do_test_phantom_dust_exposure_failure(multiplier_dust_limit: bool) { // Ensure the payment fails with the expected error. let err_data = 0u16.to_be_bytes(); let mut fail_conditions = PaymentFailedConditions::new() - .blamed_scid(route.paths[0].hops.last().as_ref().unwrap().short_channel_id) - .blamed_chan_closed(false) + .blamed_scid(phantom_scid) .expected_htlc_error_data(0x1000 | 7, &err_data); expect_payment_failed_conditions(&nodes[0], payment_hash, false, fail_conditions); } diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 0c9c5d0e920..430f9014703 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -494,6 +494,8 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { let update_add_1 = update_1.update_add_htlcs[0].clone(); nodes[3].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &update_add_1); commitment_signed_dance!(nodes[3], nodes[1], update_1.commitment_signed, false, true); + expect_pending_htlcs_forwardable_ignore!(nodes[3]); + nodes[3].node.process_pending_update_add_htlcs(); assert!(nodes[3].node.get_and_clear_pending_msg_events().is_empty()); for (_, pending_forwards) in nodes[3].node.forward_htlcs.lock().unwrap().iter_mut() { @@ -514,7 +516,7 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { } } } - expect_pending_htlcs_forwardable!(nodes[3]); + nodes[3].node.process_pending_htlc_forwards(); // Pay along nodes[2] route.paths[0].hops[0].pubkey = nodes[2].node.get_our_node_id(); @@ -540,6 +542,8 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { let update_add_3 = update_3.update_add_htlcs[0].clone(); nodes[3].node.handle_update_add_htlc(nodes[2].node.get_our_node_id(), &update_add_3); commitment_signed_dance!(nodes[3], nodes[2], update_3.commitment_signed, false, true); + expect_pending_htlcs_forwardable_ignore!(nodes[3]); + nodes[3].node.process_pending_update_add_htlcs(); assert!(nodes[3].node.get_and_clear_pending_msg_events().is_empty()); for (_, pending_forwards) in nodes[3].node.forward_htlcs.lock().unwrap().iter_mut() { @@ -560,7 +564,8 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { } } } - expect_pending_htlcs_forwardable!(nodes[3]); + nodes[3].node.process_pending_htlc_forwards(); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], vec![HTLCDestination::FailedPayment { payment_hash }]); check_added_monitors!(nodes[3], 1); // Fail back along nodes[2] @@ -575,7 +580,6 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { commitment_signed_dance!(nodes[0], nodes[2], update_fail_1.commitment_signed, false); expect_payment_failed_conditions(&nodes[0], payment_hash, true, PaymentFailedConditions::new()); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], vec![HTLCDestination::FailedPayment { payment_hash }]); } @@ -656,6 +660,12 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_htlc_handling_failed_destinations!( + nodes[1].node.get_and_clear_pending_events(), + &[HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2}] + ); + check_added_monitors(&nodes[1], 1); // nodes[1] now immediately fails the HTLC as the next-hop channel is disconnected let _ = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -1281,7 +1291,7 @@ fn successful_probe_yields_event() { let res = nodes[0].node.send_probe(route.paths[0].clone()).unwrap(); - let expected_route: &[&[&Node]] = &[&[&nodes[1], &nodes[2]]]; + let expected_route: &[(&[&Node], PaymentHash)] = &[(&[&nodes[1], &nodes[2]], res.0)]; send_probe_along_route(&nodes[0], expected_route); @@ -1422,7 +1432,7 @@ fn preflight_probes_yield_event_skip_private_hop() { let route_params = RouteParameters::from_payment_params_and_value(payment_params, recv_value); let res = nodes[0].node.send_preflight_probes(route_params, None).unwrap(); - let expected_route: &[&[&Node]] = &[&[&nodes[1], &nodes[2], &nodes[3]]]; + let expected_route: &[(&[&Node], PaymentHash)] = &[(&[&nodes[1], &nodes[2], &nodes[3]], res[0].0)]; assert_eq!(res.len(), expected_route.len()); @@ -1468,7 +1478,7 @@ fn preflight_probes_yield_event() { let route_params = RouteParameters::from_payment_params_and_value(payment_params, recv_value); let res = nodes[0].node.send_preflight_probes(route_params, None).unwrap(); - let expected_route: &[&[&Node]] = &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]]; + let expected_route: &[(&[&Node], PaymentHash)] = &[(&[&nodes[1], &nodes[3]], res[0].0), (&[&nodes[2], &nodes[3]], res[1].0)]; assert_eq!(res.len(), expected_route.len()); @@ -1515,7 +1525,7 @@ fn preflight_probes_yield_event_and_skip() { let route_params = RouteParameters::from_payment_params_and_value(payment_params, recv_value); let res = nodes[0].node.send_preflight_probes(route_params, None).unwrap(); - let expected_route : &[&[&Node]] = &[&[&nodes[1], &nodes[2], &nodes[4]]]; + let expected_route: &[(&[&Node], PaymentHash)] = &[(&[&nodes[1], &nodes[2], &nodes[4]], res[0].0)]; // We check that only one probe was sent, the other one was skipped due to limited liquidity. assert_eq!(res.len(), 1); @@ -1902,6 +1912,7 @@ fn do_test_intercepted_payment(test: InterceptTest) { }; nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], &payment_event.commitment_msg, false, true); + expect_pending_htlcs_forwardable!(nodes[1]); // Check that we generate the PaymentIntercepted event when an intercept forward is detected. let events = nodes[1].node.get_and_clear_pending_events(); @@ -2086,6 +2097,7 @@ fn do_accept_underpaying_htlcs_config(num_mpp_parts: usize) { for (idx, ev) in events.into_iter().enumerate() { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &ev.msgs[0]); do_commitment_signed_dance(&nodes[1], &nodes[0], &ev.commitment_msg, false, true); + expect_pending_htlcs_forwardable!(nodes[1]); let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); @@ -2894,8 +2906,10 @@ fn no_extra_retries_on_back_to_back_fail() { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); - let chan_1_scid = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 0).0.contents.short_channel_id; - let chan_2_scid = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 10_000_000, 0).0.contents.short_channel_id; + let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 0); + let chan_1_scid = chan_1.0.contents.short_channel_id; + let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 10_000_000, 0); + let chan_2_scid = chan_2.0.contents.short_channel_id; let amt_msat = 200_000_000; let (_, payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], amt_msat); @@ -2964,66 +2978,58 @@ fn no_extra_retries_on_back_to_back_fail() { route.route_params = Some(retry_params.clone()); nodes[0].router.expect_find_route(retry_params, Ok(route.clone())); + // We can't use the commitment_signed_dance macro helper because in this test we'll be sending + // two HTLCs back-to-back on the same channel, and the macro only expects to handle one at a + // time. nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap(); - let htlc_updates = SendEvent::from_node(&nodes[0]); + + let first_htlc_updates = SendEvent::from_node(&nodes[0]); check_added_monitors!(nodes[0], 1); - assert_eq!(htlc_updates.msgs.len(), 1); + assert_eq!(first_htlc_updates.msgs.len(), 1); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &htlc_updates.msgs[0]); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &htlc_updates.commitment_msg); + nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &first_htlc_updates.msgs[0]); + nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &first_htlc_updates.commitment_msg); check_added_monitors!(nodes[1], 1); - let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_first_raa); check_added_monitors!(nodes[0], 1); + let second_htlc_updates = SendEvent::from_node(&nodes[0]); + assert_eq!(second_htlc_updates.msgs.len(), 1); nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_first_cs); check_added_monitors!(nodes[0], 1); + let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_first_raa); + check_added_monitors!(nodes[1], 1); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &second_htlc_updates.msgs[0]); nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &second_htlc_updates.commitment_msg); check_added_monitors!(nodes[1], 1); - let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); - - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_first_raa); - check_added_monitors!(nodes[1], 1); - let bs_fail_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let (bs_second_raa, bs_second_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_second_raa); check_added_monitors!(nodes[0], 1); - - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &bs_fail_update.update_fail_htlcs[0]); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_fail_update.commitment_signed); + nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_second_cs); check_added_monitors!(nodes[0], 1); - let (as_second_raa, as_third_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_second_raa); check_added_monitors!(nodes[1], 1); - let bs_second_fail_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &as_third_cs); - check_added_monitors!(nodes[1], 1); - let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &bs_second_fail_update.update_fail_htlcs[0]); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_second_fail_update.commitment_signed); - check_added_monitors!(nodes[0], 1); - - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_third_raa); - check_added_monitors!(nodes[0], 1); - let (as_third_raa, as_fourth_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_third_raa); - check_added_monitors!(nodes[1], 1); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &as_fourth_cs); - check_added_monitors!(nodes[1], 1); - let bs_fourth_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + expect_pending_htlcs_forwardable!(nodes[1]); + let next_hop_failure = HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }; + expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[next_hop_failure.clone(), next_hop_failure.clone()]); + check_added_monitors(&nodes[1], 1); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_fourth_raa); - check_added_monitors!(nodes[0], 1); + let bs_fail_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + assert_eq!(bs_fail_update.update_fail_htlcs.len(), 2); + nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &bs_fail_update.update_fail_htlcs[0]); + nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &bs_fail_update.update_fail_htlcs[1]); + commitment_signed_dance!(nodes[0], nodes[1], bs_fail_update.commitment_signed, false); // At this point A has sent two HTLCs which both failed due to lack of fee. It now has two // pending `PaymentPathFailed` events, one with `all_paths_failed` unset, and the second @@ -3064,6 +3070,10 @@ fn no_extra_retries_on_back_to_back_fail() { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &retry_htlc_updates.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], &retry_htlc_updates.commitment_msg, false, true); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[next_hop_failure.clone()]); + check_added_monitors(&nodes[1], 1); + let bs_fail_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &bs_fail_update.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], &bs_fail_update.commitment_signed, false, true); @@ -3099,8 +3109,10 @@ fn test_simple_partial_retry() { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); - let chan_1_scid = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 0).0.contents.short_channel_id; - let chan_2_scid = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 10_000_000, 0).0.contents.short_channel_id; + let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 0); + let chan_1_scid = chan_1.0.contents.short_channel_id; + let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 10_000_000, 0); + let chan_2_scid = chan_2.0.contents.short_channel_id; let amt_msat = 200_000_000; let (_, payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[2], amt_msat); @@ -3169,52 +3181,64 @@ fn test_simple_partial_retry() { route.route_params = Some(retry_params.clone()); nodes[0].router.expect_find_route(retry_params, Ok(route.clone())); + // We can't use the commitment_signed_dance macro helper because in this test we'll be sending + // two HTLCs back-to-back on the same channel, and the macro only expects to handle one at a + // time. nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap(); - let htlc_updates = SendEvent::from_node(&nodes[0]); + let first_htlc_updates = SendEvent::from_node(&nodes[0]); check_added_monitors!(nodes[0], 1); - assert_eq!(htlc_updates.msgs.len(), 1); + assert_eq!(first_htlc_updates.msgs.len(), 1); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &htlc_updates.msgs[0]); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &htlc_updates.commitment_msg); + nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &first_htlc_updates.msgs[0]); + nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &first_htlc_updates.commitment_msg); check_added_monitors!(nodes[1], 1); - let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_first_raa); check_added_monitors!(nodes[0], 1); + let second_htlc_updates = SendEvent::from_node(&nodes[0]); + assert_eq!(second_htlc_updates.msgs.len(), 1); nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_first_cs); check_added_monitors!(nodes[0], 1); - let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &second_htlc_updates.msgs[0]); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &second_htlc_updates.commitment_msg); - check_added_monitors!(nodes[1], 1); - let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_first_raa); check_added_monitors!(nodes[1], 1); - let bs_fail_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_second_raa); - check_added_monitors!(nodes[0], 1); - - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &bs_fail_update.update_fail_htlcs[0]); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_fail_update.commitment_signed); - check_added_monitors!(nodes[0], 1); - let (as_second_raa, as_third_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_second_raa); - check_added_monitors!(nodes[1], 1); - - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &as_third_cs); - check_added_monitors!(nodes[1], 1); + nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &second_htlc_updates.msgs[0]); + commitment_signed_dance!(nodes[1], nodes[0], second_htlc_updates.commitment_msg, false); - let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + expect_pending_htlcs_forwardable!(nodes[1]); + let next_hop_failure = HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }; + expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[next_hop_failure.clone()]); + check_added_monitors(&nodes[1], 2); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_third_raa); - check_added_monitors!(nodes[0], 1); + { + let mut msg_events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 2); + let mut handle_update_htlcs = |event: MessageSendEvent| { + if let MessageSendEvent::UpdateHTLCs { node_id, updates } = event { + if node_id == nodes[0].node.get_our_node_id() { + assert_eq!(updates.update_fail_htlcs.len(), 1); + nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], &updates.commitment_signed, false); + } else if node_id == nodes[2].node.get_our_node_id() { + assert_eq!(updates.update_add_htlcs.len(), 1); + nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &updates.update_add_htlcs[0]); + commitment_signed_dance!(nodes[2], nodes[1], &updates.commitment_signed, false); + } else { + panic!("Unexpected node_id for UpdateHTLCs send"); + } + } else { + panic!("Unexpected event"); + } + }; + handle_update_htlcs(msg_events.remove(0)); + handle_update_htlcs(msg_events.remove(0)); + } let mut events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); @@ -3240,10 +3264,9 @@ fn test_simple_partial_retry() { expect_pending_htlcs_forwardable!(nodes[1]); check_added_monitors!(nodes[1], 1); - let bs_forward_update = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); - nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[0]); - nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[1]); - commitment_signed_dance!(nodes[2], nodes[1], &bs_forward_update.commitment_signed, false); + let bs_second_forward_update = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); + nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &bs_second_forward_update.update_add_htlcs[0]); + commitment_signed_dance!(nodes[2], nodes[1], &bs_second_forward_update.commitment_signed, false); expect_pending_htlcs_forwardable!(nodes[2]); expect_payment_claimable!(nodes[2], payment_hash, payment_secret, amt_msat); @@ -3378,6 +3401,13 @@ fn test_threaded_payment_retries() { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &send_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true); + expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); + expect_htlc_handling_failed_destinations!( + nodes[1].node.get_and_clear_pending_events(), + &[HTLCDestination::UnknownNextHop { requested_forward_scid: route.paths[0].hops[1].short_channel_id }] + ); + check_added_monitors(&nodes[1], 1); // Note that we only push one route into `expect_find_route` at a time, because that's all // the retries (should) need. If the bug is reintroduced "real" routes may be selected, but @@ -4055,6 +4085,12 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { nodes[2].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &c_recv_ev.msgs[0]); commitment_signed_dance!(nodes[2], nodes[0], c_recv_ev.commitment_msg, false, true); + expect_pending_htlcs_forwardable!(nodes[2]); + expect_htlc_handling_failed_destinations!( + nodes[2].node.get_and_clear_pending_events(), + &[HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_id_cd }] + ); + check_added_monitors(&nodes[2], 1); let cs_fail = get_htlc_update_msgs(&nodes[2], &nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &cs_fail.update_fail_htlcs[0]); diff --git a/lightning/src/ln/priv_short_conf_tests.rs b/lightning/src/ln/priv_short_conf_tests.rs index 55e97e06894..5c45498753b 100644 --- a/lightning/src/ln/priv_short_conf_tests.rs +++ b/lightning/src/ln/priv_short_conf_tests.rs @@ -71,6 +71,12 @@ fn test_priv_forwarding_rejection() { let payment_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_htlc_handling_failed_destinations!( + nodes[1].node.get_and_clear_pending_events(), + &[HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }] + ); + check_added_monitors(&nodes[1], 1); let htlc_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(htlc_fail_updates.update_add_htlcs.is_empty()); @@ -435,6 +441,12 @@ fn test_inbound_scid_privacy() { assert_eq!(nodes[1].node.get_our_node_id(), payment_event.node_id); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, true, true); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_htlc_handling_failed_destinations!( + nodes[1].node.get_and_clear_pending_events(), + &[HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: last_hop[0].channel_id }] + ); + check_added_monitors(&nodes[1], 1); nodes[1].logger.assert_log_regex("lightning::ln::channelmanager", regex::Regex::new(r"Refusing to forward over real channel SCID as our counterparty requested").unwrap(), 1); @@ -514,6 +526,13 @@ fn test_scid_alias_returned() { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &as_updates.update_add_htlcs[0]); commitment_signed_dance!(nodes[1], nodes[0], &as_updates.commitment_signed, false, true); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_htlc_handling_failed_destinations!( + nodes[1].node.get_and_clear_pending_events(), + &[HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan.0.channel_id }] + ); + check_added_monitors(&nodes[1], 1); + let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false, true); diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index 28465a09660..039fe85197f 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -744,10 +744,6 @@ fn test_forwardable_regen() { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); - // There is already a PendingHTLCsForwardable event "pending" so another one will not be - // generated - assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); - // Now restart nodes[1] and make sure it regenerates a single PendingHTLCsForwardable nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); nodes[2].node.peer_disconnected(nodes[1].node.get_our_node_id()); @@ -1017,9 +1013,12 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht // present when we serialized. let node_encoded = nodes[1].node.encode(); + expect_pending_htlcs_forwardable_ignore!(nodes[1]); + let mut intercept_id = None; let mut expected_outbound_amount_msat = None; if use_intercept { + nodes[1].node.process_pending_update_add_htlcs(); let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { @@ -1033,7 +1032,7 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht nodes[2].node.get_our_node_id(), expected_outbound_amount_msat.unwrap()).unwrap(); } - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); let payment_event = SendEvent::from_node(&nodes[1]); nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs index 9fd428329af..ba682f68375 100644 --- a/lightning/src/ln/shutdown_tests.rs +++ b/lightning/src/ln/shutdown_tests.rs @@ -465,6 +465,12 @@ fn do_htlc_fail_async_shutdown(blinded_recipient: bool) { check_added_monitors!(nodes[1], 1); nodes[1].node.handle_shutdown(nodes[0].node.get_our_node_id(), &node_0_shutdown); commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false, false); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_htlc_handling_failed_destinations!( + nodes[1].node.get_and_clear_pending_events(), + &[HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }] + ); + check_added_monitors(&nodes[1], 1); let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(updates_2.update_add_htlcs.is_empty());