diff --git a/fuzz/src/bin/chanmon_consistency_target.rs b/fuzz/src/bin/chanmon_consistency_target.rs index c01eb57b2b0..c480b3d17c1 100644 --- a/fuzz/src/bin/chanmon_consistency_target.rs +++ b/fuzz/src/bin/chanmon_consistency_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/chanmon_deser_target.rs b/fuzz/src/bin/chanmon_deser_target.rs index 8f426c402f8..8b57874cf6c 100644 --- a/fuzz/src/bin/chanmon_deser_target.rs +++ b/fuzz/src/bin/chanmon_deser_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/full_stack_target.rs b/fuzz/src/bin/full_stack_target.rs index 527fac217d7..7dae655316b 100644 --- a/fuzz/src/bin/full_stack_target.rs +++ b/fuzz/src/bin/full_stack_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_accept_channel_target.rs b/fuzz/src/bin/msg_accept_channel_target.rs index 8e36dbbb64a..d28895a0fe9 100644 --- a/fuzz/src/bin/msg_accept_channel_target.rs +++ b/fuzz/src/bin/msg_accept_channel_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_announcement_signatures_target.rs b/fuzz/src/bin/msg_announcement_signatures_target.rs index 1b15d2fd3e9..dcfc006eb0c 100644 --- a/fuzz/src/bin/msg_announcement_signatures_target.rs +++ b/fuzz/src/bin/msg_announcement_signatures_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_channel_announcement_target.rs b/fuzz/src/bin/msg_channel_announcement_target.rs index 5363423076d..0efc0b41fc6 100644 --- a/fuzz/src/bin/msg_channel_announcement_target.rs +++ b/fuzz/src/bin/msg_channel_announcement_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_channel_reestablish_target.rs b/fuzz/src/bin/msg_channel_reestablish_target.rs index 5d8d028684c..0111d2b2754 100644 --- a/fuzz/src/bin/msg_channel_reestablish_target.rs +++ b/fuzz/src/bin/msg_channel_reestablish_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_channel_update_target.rs b/fuzz/src/bin/msg_channel_update_target.rs index beac3f0ce56..4bfb9860c8c 100644 --- a/fuzz/src/bin/msg_channel_update_target.rs +++ b/fuzz/src/bin/msg_channel_update_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_closing_signed_target.rs b/fuzz/src/bin/msg_closing_signed_target.rs index dfb9c18df89..12dbb31cd20 100644 --- a/fuzz/src/bin/msg_closing_signed_target.rs +++ b/fuzz/src/bin/msg_closing_signed_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_commitment_signed_target.rs b/fuzz/src/bin/msg_commitment_signed_target.rs index 585d97aaad4..407f0288cf5 100644 --- a/fuzz/src/bin/msg_commitment_signed_target.rs +++ b/fuzz/src/bin/msg_commitment_signed_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_decoded_onion_error_packet_target.rs b/fuzz/src/bin/msg_decoded_onion_error_packet_target.rs index 0be01b95cf3..9f76e69679a 100644 --- a/fuzz/src/bin/msg_decoded_onion_error_packet_target.rs +++ b/fuzz/src/bin/msg_decoded_onion_error_packet_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_error_message_target.rs b/fuzz/src/bin/msg_error_message_target.rs index dbe6590ab42..dc418c93d69 100644 --- a/fuzz/src/bin/msg_error_message_target.rs +++ b/fuzz/src/bin/msg_error_message_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_funding_created_target.rs b/fuzz/src/bin/msg_funding_created_target.rs index 8f174cce5d8..6fb87c4082a 100644 --- a/fuzz/src/bin/msg_funding_created_target.rs +++ b/fuzz/src/bin/msg_funding_created_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_funding_locked_target.rs b/fuzz/src/bin/msg_funding_locked_target.rs index 47ce4a905b8..7b8f21f8a0b 100644 --- a/fuzz/src/bin/msg_funding_locked_target.rs +++ b/fuzz/src/bin/msg_funding_locked_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_funding_signed_target.rs b/fuzz/src/bin/msg_funding_signed_target.rs index c1b756fe673..ea05acdcee1 100644 --- a/fuzz/src/bin/msg_funding_signed_target.rs +++ b/fuzz/src/bin/msg_funding_signed_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_gossip_timestamp_filter_target.rs b/fuzz/src/bin/msg_gossip_timestamp_filter_target.rs index 1ebd769035a..1aba4e3fefe 100644 --- a/fuzz/src/bin/msg_gossip_timestamp_filter_target.rs +++ b/fuzz/src/bin/msg_gossip_timestamp_filter_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_init_target.rs b/fuzz/src/bin/msg_init_target.rs index 2a39565fbbf..dd7b197a42e 100644 --- a/fuzz/src/bin/msg_init_target.rs +++ b/fuzz/src/bin/msg_init_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_node_announcement_target.rs b/fuzz/src/bin/msg_node_announcement_target.rs index 1c150beb0bd..63b23bd669d 100644 --- a/fuzz/src/bin/msg_node_announcement_target.rs +++ b/fuzz/src/bin/msg_node_announcement_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_onion_hop_data_target.rs b/fuzz/src/bin/msg_onion_hop_data_target.rs index 73b7d5f27e6..493817c7eea 100644 --- a/fuzz/src/bin/msg_onion_hop_data_target.rs +++ b/fuzz/src/bin/msg_onion_hop_data_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_open_channel_target.rs b/fuzz/src/bin/msg_open_channel_target.rs index 515cba9e87c..d6c9de48f28 100644 --- a/fuzz/src/bin/msg_open_channel_target.rs +++ b/fuzz/src/bin/msg_open_channel_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_ping_target.rs b/fuzz/src/bin/msg_ping_target.rs index 2a02fac5638..e739cd58052 100644 --- a/fuzz/src/bin/msg_ping_target.rs +++ b/fuzz/src/bin/msg_ping_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_pong_target.rs b/fuzz/src/bin/msg_pong_target.rs index 20304c4530e..73113472c33 100644 --- a/fuzz/src/bin/msg_pong_target.rs +++ b/fuzz/src/bin/msg_pong_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_query_channel_range_target.rs b/fuzz/src/bin/msg_query_channel_range_target.rs index b7e242467b9..db22fa525b8 100644 --- a/fuzz/src/bin/msg_query_channel_range_target.rs +++ b/fuzz/src/bin/msg_query_channel_range_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_query_short_channel_ids_target.rs b/fuzz/src/bin/msg_query_short_channel_ids_target.rs index ad18f21e2c0..6c470e3b12e 100644 --- a/fuzz/src/bin/msg_query_short_channel_ids_target.rs +++ b/fuzz/src/bin/msg_query_short_channel_ids_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_reply_channel_range_target.rs b/fuzz/src/bin/msg_reply_channel_range_target.rs index 283ac0d4662..68651347b6f 100644 --- a/fuzz/src/bin/msg_reply_channel_range_target.rs +++ b/fuzz/src/bin/msg_reply_channel_range_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_reply_short_channel_ids_end_target.rs b/fuzz/src/bin/msg_reply_short_channel_ids_end_target.rs index c8b0e7804db..de3e9f1cce3 100644 --- a/fuzz/src/bin/msg_reply_short_channel_ids_end_target.rs +++ b/fuzz/src/bin/msg_reply_short_channel_ids_end_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_revoke_and_ack_target.rs b/fuzz/src/bin/msg_revoke_and_ack_target.rs index 34df3579d7d..e0f781986d4 100644 --- a/fuzz/src/bin/msg_revoke_and_ack_target.rs +++ b/fuzz/src/bin/msg_revoke_and_ack_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_shutdown_target.rs b/fuzz/src/bin/msg_shutdown_target.rs index f682e2afaa8..6220fc92803 100644 --- a/fuzz/src/bin/msg_shutdown_target.rs +++ b/fuzz/src/bin/msg_shutdown_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_update_add_htlc_target.rs b/fuzz/src/bin/msg_update_add_htlc_target.rs index a8b3d4bab32..dffdb830939 100644 --- a/fuzz/src/bin/msg_update_add_htlc_target.rs +++ b/fuzz/src/bin/msg_update_add_htlc_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_update_fail_htlc_target.rs b/fuzz/src/bin/msg_update_fail_htlc_target.rs index ee0b5206789..2828b396ad4 100644 --- a/fuzz/src/bin/msg_update_fail_htlc_target.rs +++ b/fuzz/src/bin/msg_update_fail_htlc_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_update_fail_malformed_htlc_target.rs b/fuzz/src/bin/msg_update_fail_malformed_htlc_target.rs index 1b5435c6789..8bf82a517cf 100644 --- a/fuzz/src/bin/msg_update_fail_malformed_htlc_target.rs +++ b/fuzz/src/bin/msg_update_fail_malformed_htlc_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_update_fee_target.rs b/fuzz/src/bin/msg_update_fee_target.rs index b06d958ede6..e9e63f28a8c 100644 --- a/fuzz/src/bin/msg_update_fee_target.rs +++ b/fuzz/src/bin/msg_update_fee_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/msg_update_fulfill_htlc_target.rs b/fuzz/src/bin/msg_update_fulfill_htlc_target.rs index 26f53cfcf54..4820b0adf9f 100644 --- a/fuzz/src/bin/msg_update_fulfill_htlc_target.rs +++ b/fuzz/src/bin/msg_update_fulfill_htlc_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/peer_crypt_target.rs b/fuzz/src/bin/peer_crypt_target.rs index e0fc97b5798..8f62626983c 100644 --- a/fuzz/src/bin/peer_crypt_target.rs +++ b/fuzz/src/bin/peer_crypt_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/router_target.rs b/fuzz/src/bin/router_target.rs index c2ef438bb8d..4df8b2a0065 100644 --- a/fuzz/src/bin/router_target.rs +++ b/fuzz/src/bin/router_target.rs @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/bin/target_template.txt b/fuzz/src/bin/target_template.txt index f73fc6d83d3..7db3bc80b9b 100644 --- a/fuzz/src/bin/target_template.txt +++ b/fuzz/src/bin/target_template.txt @@ -93,10 +93,18 @@ fn run_test_cases() { } } } + let mut failed_outputs = Vec::new(); for (test, thread) in threads.drain(..) { if let Some(output) = thread.join().unwrap() { - println!("Output of {}:\n{}", test, output); - panic!(); + println!("\nOutput of {}:\n{}\n", test, output); + failed_outputs.push(test); } } + if !failed_outputs.is_empty() { + println!("Test cases which failed: "); + for case in failed_outputs { + println!("{}", case); + } + panic!(); + } } diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index e180805f236..810b51066a6 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -29,15 +29,14 @@ use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hash_types::{BlockHash, WPubkeyHash}; use lightning::chain; -use lightning::chain::chainmonitor; -use lightning::chain::channelmonitor; +use lightning::chain::{chainmonitor, channelmonitor, Watch}; use lightning::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, MonitorEvent}; use lightning::chain::transaction::OutPoint; use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator}; use lightning::chain::keysinterface::{KeysInterface, InMemorySigner}; use lightning::ln::channelmanager::{ChannelManager, PaymentHash, PaymentPreimage, PaymentSecret, PaymentSendFailure, ChannelManagerReadArgs}; use lightning::ln::features::{ChannelFeatures, InitFeatures, NodeFeatures}; -use lightning::ln::msgs::{CommitmentUpdate, ChannelMessageHandler, DecodeError, ErrorAction, UpdateAddHTLC, Init}; +use lightning::ln::msgs::{CommitmentUpdate, ChannelMessageHandler, DecodeError, UpdateAddHTLC, Init}; use lightning::util::enforcing_trait_impls::{EnforcingSigner, INITIAL_REVOKED_COMMITMENT_NUMBER}; use lightning::util::errors::APIError; use lightning::util::events; @@ -45,7 +44,6 @@ use lightning::util::logger::Logger; use lightning::util::config::UserConfig; use lightning::util::events::{EventsProvider, MessageSendEventsProvider}; use lightning::util::ser::{Readable, ReadableArgs, Writeable, Writer}; -use lightning::util::test_utils::OnlyReadsKeysInterface; use lightning::routing::router::{Route, RouteHop}; @@ -87,6 +85,7 @@ impl Writer for VecWriter { struct TestChainMonitor { pub logger: Arc, + pub keys: Arc, pub chain_monitor: Arc, Arc, Arc, Arc, Arc>>, pub update_ret: Mutex>, // If we reload a node with an old copy of ChannelMonitors, the ChannelManager deserialization @@ -98,17 +97,18 @@ struct TestChainMonitor { pub should_update_manager: atomic::AtomicBool, } impl TestChainMonitor { - pub fn new(broadcaster: Arc, logger: Arc, feeest: Arc, persister: Arc) -> Self { + pub fn new(broadcaster: Arc, logger: Arc, feeest: Arc, persister: Arc, keys: Arc) -> Self { Self { chain_monitor: Arc::new(chainmonitor::ChainMonitor::new(None, broadcaster, logger.clone(), feeest, persister)), logger, + keys, update_ret: Mutex::new(Ok(())), latest_monitors: Mutex::new(HashMap::new()), should_update_manager: atomic::AtomicBool::new(false), } } } -impl chain::Watch for TestChainMonitor { +impl Watch for TestChainMonitor { fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> { let mut ser = VecWriter(Vec::new()); monitor.write(&mut ser).unwrap(); @@ -127,12 +127,13 @@ impl chain::Watch for TestChainMonitor { hash_map::Entry::Vacant(_) => panic!("Didn't have monitor on update call"), }; let mut deserialized_monitor = <(BlockHash, channelmonitor::ChannelMonitor)>:: - read(&mut Cursor::new(&map_entry.get().1), &OnlyReadsKeysInterface {}).unwrap().1; + read(&mut Cursor::new(&map_entry.get().1), &*self.keys).unwrap().1; deserialized_monitor.update_monitor(&update, &&TestBroadcaster{}, &&FuzzEstimator{}, &self.logger).unwrap(); let mut ser = VecWriter(Vec::new()); deserialized_monitor.write(&mut ser).unwrap(); map_entry.insert((update.update_id, ser.0)); self.should_update_manager.store(true, atomic::Ordering::Relaxed); + assert!(self.chain_monitor.update_channel(funding_txo, update).is_ok()); self.update_ret.lock().unwrap().clone() } @@ -311,9 +312,9 @@ pub fn do_test(data: &[u8], out: Out) { macro_rules! make_node { ($node_id: expr) => { { let logger: Arc = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone())); - let monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), fee_est.clone(), Arc::new(TestPersister{}))); - let keys_manager = Arc::new(KeyProvider { node_id: $node_id, rand_bytes_id: atomic::AtomicU8::new(0), revoked_commitments: Mutex::new(HashMap::new()) }); + let monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), fee_est.clone(), Arc::new(TestPersister{}), Arc::clone(&keys_manager))); + let mut config = UserConfig::default(); config.channel_options.fee_proportional_millionths = 0; config.channel_options.announced_channel = true; @@ -327,7 +328,7 @@ pub fn do_test(data: &[u8], out: Out) { ($ser: expr, $node_id: expr, $old_monitors: expr, $keys_manager: expr) => { { let keys_manager = Arc::clone(& $keys_manager); let logger: Arc = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone())); - let chain_monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), fee_est.clone(), Arc::new(TestPersister{}))); + let chain_monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), fee_est.clone(), Arc::new(TestPersister{}), Arc::clone(& $keys_manager))); let mut config = UserConfig::default(); config.channel_options.fee_proportional_millionths = 0; @@ -337,7 +338,7 @@ pub fn do_test(data: &[u8], out: Out) { let mut monitors = HashMap::new(); let mut old_monitors = $old_monitors.latest_monitors.lock().unwrap(); for (outpoint, (update_id, monitor_ser)) in old_monitors.drain() { - monitors.insert(outpoint, <(BlockHash, ChannelMonitor)>::read(&mut Cursor::new(&monitor_ser), &OnlyReadsKeysInterface {}).expect("Failed to read monitor").1); + monitors.insert(outpoint, <(BlockHash, ChannelMonitor)>::read(&mut Cursor::new(&monitor_ser), &*$keys_manager).expect("Failed to read monitor").1); chain_monitor.latest_monitors.lock().unwrap().insert(outpoint, (update_id, monitor_ser)); } let mut monitor_refs = HashMap::new(); @@ -355,7 +356,11 @@ pub fn do_test(data: &[u8], out: Out) { channel_monitors: monitor_refs, }; - (<(BlockHash, ChanMan)>::read(&mut Cursor::new(&$ser.0), read_args).expect("Failed to read manager").1, chain_monitor) + let res = (<(BlockHash, ChanMan)>::read(&mut Cursor::new(&$ser.0), read_args).expect("Failed to read manager").1, chain_monitor.clone()); + for (funding_txo, mon) in monitors.drain() { + assert!(chain_monitor.chain_monitor.watch_channel(funding_txo, mon).is_ok()); + } + res } } } @@ -624,6 +629,9 @@ pub fn do_test(data: &[u8], out: Out) { events::MessageSendEvent::SendFundingLocked { .. } => { // Can be generated as a reestablish response }, + events::MessageSendEvent::SendAnnouncementSignatures { .. } => { + // Can be generated as a reestablish response + }, events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => { // Can be generated due to a payment forward being rejected due to a // channel having previously failed a monitor update @@ -644,8 +652,8 @@ pub fn do_test(data: &[u8], out: Out) { events::MessageSendEvent::SendRevokeAndACK { .. } => {}, events::MessageSendEvent::SendChannelReestablish { .. } => {}, events::MessageSendEvent::SendFundingLocked { .. } => {}, + events::MessageSendEvent::SendAnnouncementSignatures { .. } => {}, events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {}, - events::MessageSendEvent::HandleError { action: ErrorAction::IgnoreError, .. } => {}, _ => panic!("Unhandled message event"), } } @@ -657,8 +665,8 @@ pub fn do_test(data: &[u8], out: Out) { events::MessageSendEvent::SendRevokeAndACK { .. } => {}, events::MessageSendEvent::SendChannelReestablish { .. } => {}, events::MessageSendEvent::SendFundingLocked { .. } => {}, + events::MessageSendEvent::SendAnnouncementSignatures { .. } => {}, events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {}, - events::MessageSendEvent::HandleError { action: ErrorAction::IgnoreError, .. } => {}, _ => panic!("Unhandled message event"), } } @@ -679,8 +687,8 @@ pub fn do_test(data: &[u8], out: Out) { if *node_id != drop_node_id { true } else { false } }, events::MessageSendEvent::SendFundingLocked { .. } => false, + events::MessageSendEvent::SendAnnouncementSignatures { .. } => false, events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => false, - events::MessageSendEvent::HandleError { action: ErrorAction::IgnoreError, .. } => false, _ => panic!("Unhandled message event"), }; if push { msg_sink.push(event); } @@ -748,23 +756,27 @@ pub fn do_test(data: &[u8], out: Out) { 0x06 => *monitor_c.update_ret.lock().unwrap() = Ok(()), 0x08 => { - if let Some((id, _)) = monitor_a.latest_monitors.lock().unwrap().get(&chan_1_funding) { - nodes[0].channel_monitor_updated(&chan_1_funding, *id); + let mon_id = monitor_a.latest_monitors.lock().unwrap().get(&chan_1_funding).map(|(id, _)| *id); + if let Some(id) = mon_id { + nodes[0].channel_monitor_updated(&chan_1_funding, id); } }, 0x09 => { - if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_1_funding) { - nodes[1].channel_monitor_updated(&chan_1_funding, *id); + let mon_id = monitor_b.latest_monitors.lock().unwrap().get(&chan_1_funding).map(|(id, _)| *id); + if let Some(id) = mon_id { + nodes[1].channel_monitor_updated(&chan_1_funding, id); } }, 0x0a => { - if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_2_funding) { - nodes[1].channel_monitor_updated(&chan_2_funding, *id); + let mon_id = monitor_b.latest_monitors.lock().unwrap().get(&chan_2_funding).map(|(id, _)| *id); + if let Some(id) = mon_id { + nodes[1].channel_monitor_updated(&chan_2_funding, id); } }, 0x0b => { - if let Some((id, _)) = monitor_c.latest_monitors.lock().unwrap().get(&chan_2_funding) { - nodes[2].channel_monitor_updated(&chan_2_funding, *id); + let mon_id = monitor_c.latest_monitors.lock().unwrap().get(&chan_2_funding).map(|(id, _)| *id); + if let Some(id) = mon_id { + nodes[2].channel_monitor_updated(&chan_2_funding, id); } }, @@ -919,17 +931,29 @@ pub fn do_test(data: &[u8], out: Out) { *monitor_b.update_ret.lock().unwrap() = Ok(()); *monitor_c.update_ret.lock().unwrap() = Ok(()); - if let Some((id, _)) = monitor_a.latest_monitors.lock().unwrap().get(&chan_1_funding) { - nodes[0].channel_monitor_updated(&chan_1_funding, *id); + { + let mon_id = monitor_a.latest_monitors.lock().unwrap().get(&chan_1_funding).map(|(id, _)| *id); + if let Some(id) = mon_id { + nodes[0].channel_monitor_updated(&chan_1_funding, id); + } } - if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_1_funding) { - nodes[1].channel_monitor_updated(&chan_1_funding, *id); + { + let mon_id = monitor_b.latest_monitors.lock().unwrap().get(&chan_1_funding).map(|(id, _)| *id); + if let Some(id) = mon_id { + nodes[1].channel_monitor_updated(&chan_1_funding, id); + } } - if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_2_funding) { - nodes[1].channel_monitor_updated(&chan_2_funding, *id); + { + let mon_id = monitor_b.latest_monitors.lock().unwrap().get(&chan_2_funding).map(|(id, _)| *id); + if let Some(id) = mon_id { + nodes[1].channel_monitor_updated(&chan_2_funding, id); + } } - if let Some((id, _)) = monitor_c.latest_monitors.lock().unwrap().get(&chan_2_funding) { - nodes[2].channel_monitor_updated(&chan_2_funding, *id); + { + let mon_id = monitor_c.latest_monitors.lock().unwrap().get(&chan_2_funding).map(|(id, _)| *id); + if let Some(id) = mon_id { + nodes[2].channel_monitor_updated(&chan_2_funding, id); + } } // Next, make sure peers are all connected to each other diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 8fc773f68ae..1a89a5cb92b 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -18,11 +18,12 @@ use bitcoin::network::constants::Network; use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr}; use chain::transaction::OutPoint; use chain::Watch; -use ln::channelmanager::{RAACommitmentOrder, PaymentPreimage, PaymentHash, PaymentSecret, PaymentSendFailure}; +use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentPreimage, PaymentHash, PaymentSecret, PaymentSendFailure}; use ln::features::InitFeatures; use ln::msgs; use ln::msgs::{ChannelMessageHandler, ErrorAction, RoutingMessageHandler}; use routing::router::get_route; +use util::config::UserConfig; use util::enforcing_trait_impls::EnforcingSigner; use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider}; use util::errors::APIError; @@ -35,6 +36,8 @@ use ln::functional_test_utils::*; use util::test_utils; +use std::collections::HashMap; + // If persister_fail is true, we have the persister return a PermanentFailure // instead of the higher-level ChainMonitor. fn do_test_simple_monitor_permanent_update_fail(persister_fail: bool) { @@ -1972,3 +1975,199 @@ fn test_path_paused_mpp() { claim_payment_along_route_with_secret(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage, Some(payment_secret), 200_000); } + +fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { + // Tests that, when we serialize a channel with AddHTLC entries in the holding cell, we + // properly free them on reconnect. We previously failed such HTLCs upon serialization, but + // that behavior was both somewhat unexpected and also broken (there was a debug assertion + // which failed in such a case). + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let persister: test_utils::TestPersister; + let new_chain_monitor: test_utils::TestChainMonitor; + let nodes_0_deserialized: ChannelManager; + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let chan_id = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 15_000_000, 7_000_000_000, InitFeatures::known(), InitFeatures::known()).2; + let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(&nodes[0]); + let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(&nodes[0]); + + // Do a really complicated dance to get an HTLC into the holding cell, with MonitorUpdateFailed + // set but AwaitingRemoteRevoke unset. When this test was written, any attempts to send an HTLC + // while MonitorUpdateFailed is set are immediately failed-backwards. Thus, the only way to get + // an AddHTLC into the holding cell is to add it while AwaitingRemoteRevoke is set but + // MonitorUpdateFailed is unset, and then swap the flags. + // + // We do this by: + // a) routing a payment from node B to node A, + // b) sending a payment from node A to node B without delivering any of the generated messages, + // putting node A in AwaitingRemoteRevoke, + // c) sending a second payment from node A to node B, which is immediately placed in the + // holding cell, + // d) claiming the first payment from B, allowing us to fail the monitor update which occurs + // when we try to persist the payment preimage, + // e) delivering A's commitment_signed from (b) and the resulting B revoke_and_ack message, + // clearing AwaitingRemoteRevoke on node A. + // + // Note that because, at the end, MonitorUpdateFailed is still set, the HTLC generated in (c) + // will not be freed from the holding cell. + let (payment_preimage_0, payment_hash_0) = route_payment(&nodes[1], &[&nodes[0]], 100000); + + let route = { + let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; + get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), None, &Vec::new(), 100000, TEST_FINAL_CLTV, nodes[0].logger).unwrap() + }; + + nodes[0].node.send_payment(&route, payment_hash_1, &None).unwrap(); + check_added_monitors!(nodes[0], 1); + let send = SendEvent::from_node(&nodes[0]); + assert_eq!(send.msgs.len(), 1); + + nodes[0].node.send_payment(&route, payment_hash_2, &None).unwrap(); + check_added_monitors!(nodes[0], 0); + + *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + assert!(nodes[0].node.claim_funds(payment_preimage_0, &None, 100000)); + check_added_monitors!(nodes[0], 1); + + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send.msgs[0]); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send.commitment_msg); + check_added_monitors!(nodes[1], 1); + + let (raa, cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa); + check_added_monitors!(nodes[0], 1); + + if disconnect { + // Optionally reload nodes[0] entirely through a serialization roundtrip, otherwise just + // disconnect the peers. Note that the fuzzer originally found this issue because + // deserializing a ChannelManager in this state causes an assertion failure. + if reload_a { + let nodes_0_serialized = nodes[0].node.encode(); + let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new()); + nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write(&mut chan_0_monitor_serialized).unwrap(); + + persister = test_utils::TestPersister::new(); + let keys_manager = &chanmon_cfgs[0].keys_manager; + new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), nodes[0].logger, node_cfgs[0].fee_estimator, &persister, keys_manager); + nodes[0].chain_monitor = &new_chain_monitor; + let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..]; + let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor)>::read( + &mut chan_0_monitor_read, keys_manager).unwrap(); + assert!(chan_0_monitor_read.is_empty()); + + let mut nodes_0_read = &nodes_0_serialized[..]; + let config = UserConfig::default(); + nodes_0_deserialized = { + let mut channel_monitors = HashMap::new(); + channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor); + <(BlockHash, ChannelManager)>::read(&mut nodes_0_read, ChannelManagerReadArgs { + default_config: config, + keys_manager, + fee_estimator: node_cfgs[0].fee_estimator, + chain_monitor: nodes[0].chain_monitor, + tx_broadcaster: nodes[0].tx_broadcaster.clone(), + logger: nodes[0].logger, + channel_monitors, + }).unwrap().1 + }; + nodes[0].node = &nodes_0_deserialized; + assert!(nodes_0_read.is_empty()); + + nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0.clone(), chan_0_monitor).unwrap(); + check_added_monitors!(nodes[0], 1); + } else { + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + } + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + + // Now reconnect the two + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() }); + let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); + assert_eq!(reestablish_1.len(), 1); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() }); + let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); + assert_eq!(reestablish_2.len(), 1); + + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]); + let resp_1 = handle_chan_reestablish_msgs!(nodes[1], nodes[0]); + check_added_monitors!(nodes[1], 0); + + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]); + let resp_0 = handle_chan_reestablish_msgs!(nodes[0], nodes[1]); + + assert!(resp_0.0.is_none()); + assert!(resp_0.1.is_none()); + assert!(resp_0.2.is_none()); + assert!(resp_1.0.is_none()); + assert!(resp_1.1.is_none()); + + // Check that the freshly-generated cs is equal to the original (which we will deliver in a + // moment). + if let Some(pending_cs) = resp_1.2 { + assert!(pending_cs.update_add_htlcs.is_empty()); + assert!(pending_cs.update_fail_htlcs.is_empty()); + assert!(pending_cs.update_fulfill_htlcs.is_empty()); + assert_eq!(pending_cs.commitment_signed, cs); + } else { panic!(); } + + // There should be no monitor updates as we are still pending awaiting a failed one. + check_added_monitors!(nodes[0], 0); + check_added_monitors!(nodes[1], 0); + } + + // If we finish updating the monitor, we should free the holding cell right away (this did + // not occur prior to #756). + *nodes[0].chain_monitor.update_ret.lock().unwrap() = None; + let (funding_txo, mon_id) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone(); + nodes[0].node.channel_monitor_updated(&funding_txo, mon_id); + + // New outbound messages should be generated immediately. + check_added_monitors!(nodes[0], 1); + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + + // Deliver the pending in-flight CS + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &cs); + check_added_monitors!(nodes[0], 1); + + let commitment_msg = match events.pop().unwrap() { + MessageSendEvent::UpdateHTLCs { node_id, updates } => { + assert!(updates.update_fail_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + assert_eq!(updates.update_fulfill_htlcs.len(), 1); + nodes[1].node.handle_update_fulfill_htlc(&nodes[0].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + expect_payment_sent!(nodes[1], payment_preimage_0); + assert_eq!(updates.update_add_htlcs.len(), 1); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); + updates.commitment_signed + }, + _ => panic!("Unexpected event type!"), + }; + + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_msg); + check_added_monitors!(nodes[1], 1); + + let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_payment_received!(nodes[1], payment_hash_1, 100000); + check_added_monitors!(nodes[1], 1); + + commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false); + + expect_pending_htlcs_forwardable!(nodes[1]); + expect_payment_received!(nodes[1], payment_hash_2, 100000); + + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1, 100000); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2, 100000); +} +#[test] +fn channel_holding_cell_serialize() { + do_channel_holding_cell_serialize(true, true); + do_channel_holding_cell_serialize(true, false); + do_channel_holding_cell_serialize(false, true); // last arg doesn't matter +} diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index d9c7fc5ad27..fba92ce7a41 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -187,6 +187,21 @@ enum HTLCUpdateAwaitingACK { }, } +/// A struct capturing the set of things we may need to do after a reconnect or after monitor +/// updating has been restored. +pub(super) struct ChannelRestoredUpdates { + pub(super) revoke_and_ack: Option, + pub(super) commitment_update: Option, + pub(super) raa_commitment_order: RAACommitmentOrder, + pub(super) chanmon_update: Option, + pub(super) pending_forwards: Vec<(PendingHTLCInfo, u64)>, + pub(super) pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, + pub(super) forwarding_failures: Vec<(HTLCSource, PaymentHash)>, + pub(super) needs_broadcast_safe: bool, + pub(super) funding_locked: Option, + pub(super) shutdown: Option, +} + /// There are a few "states" and then a number of flags which can be applied: /// We first move through init with OurInitSent -> TheirInitSent -> FundingCreated -> FundingSent. /// TheirFundingLocked and OurFundingLocked then get set on FundingSent, and when both are set we @@ -2668,13 +2683,11 @@ impl Channel { /// implicitly dropping) and the payment_hashes of HTLCs we tried to add but are dropping. /// No further message handling calls may be made until a channel_reestablish dance has /// completed. - pub fn remove_uncommitted_htlcs_and_mark_paused(&mut self, logger: &L) -> Vec<(HTLCSource, PaymentHash)> where L::Target: Logger { - let mut outbound_drops = Vec::new(); - + pub fn remove_uncommitted_htlcs_and_mark_paused(&mut self, logger: &L) where L::Target: Logger { assert_eq!(self.channel_state & ChannelState::ShutdownComplete as u32, 0); if self.channel_state < ChannelState::FundingSent as u32 { self.channel_state = ChannelState::ShutdownComplete as u32; - return outbound_drops; + return; } // Upon reconnect we have to start the closing_signed dance over, but shutdown messages // will be retransmitted. @@ -2717,23 +2730,8 @@ impl Channel { } } - self.holding_cell_htlc_updates.retain(|htlc_update| { - match htlc_update { - // Note that currently on channel reestablish we assert that there are - // no holding cell HTLC update_adds, so if in the future we stop - // dropping added HTLCs here and failing them backwards, then there will - // need to be corresponding changes made in the Channel's re-establish - // logic. - &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => { - outbound_drops.push((source.clone(), payment_hash.clone())); - false - }, - &HTLCUpdateAwaitingACK::ClaimHTLC {..} | &HTLCUpdateAwaitingACK::FailHTLC {..} => true, - } - }); self.channel_state |= ChannelState::PeerDisconnected as u32; - log_debug!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops and {} waiting-to-locally-announced HTLC drops on channel {}", outbound_drops.len(), inbound_drop_count, log_bytes!(self.channel_id())); - outbound_drops + log_debug!(logger, "Peer disconnection resulted in {} waiting-to-locally-announced HTLC drops on channel {}", inbound_drop_count, log_bytes!(self.channel_id())); } /// Indicates that a ChannelMonitor update failed to be stored by the client and further @@ -2756,7 +2754,7 @@ impl Channel { /// Indicates that the latest ChannelMonitor update has been committed by the client /// successfully and we should restore normal operation. Returns messages which should be sent /// to the remote side. - pub fn monitor_updating_restored(&mut self, logger: &L) -> (Option, Option, RAACommitmentOrder, Vec<(PendingHTLCInfo, u64)>, Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, bool, Option) where L::Target: Logger { + pub fn monitor_updating_restored(&mut self, logger: &L) -> ChannelRestoredUpdates where L::Target: Logger { assert_eq!(self.channel_state & ChannelState::MonitorUpdateFailed as u32, ChannelState::MonitorUpdateFailed as u32); self.channel_state &= !(ChannelState::MonitorUpdateFailed as u32); @@ -2778,33 +2776,70 @@ impl Channel { }) } else { None }; - let mut forwards = Vec::new(); - mem::swap(&mut forwards, &mut self.monitor_pending_forwards); - let mut failures = Vec::new(); - mem::swap(&mut failures, &mut self.monitor_pending_failures); + let mut pending_forwards = Vec::new(); + mem::swap(&mut pending_forwards, &mut self.monitor_pending_forwards); + let mut pending_failures = Vec::new(); + mem::swap(&mut pending_failures, &mut self.monitor_pending_failures); if self.channel_state & (ChannelState::PeerDisconnected as u32) != 0 { self.monitor_pending_revoke_and_ack = false; self.monitor_pending_commitment_signed = false; - return (None, None, RAACommitmentOrder::RevokeAndACKFirst, forwards, failures, needs_broadcast_safe, funding_locked); + return ChannelRestoredUpdates { + revoke_and_ack: None, + commitment_update: None, + raa_commitment_order: RAACommitmentOrder::RevokeAndACKFirst, + chanmon_update: None, + pending_forwards, + pending_failures, + forwarding_failures: Vec::new(), + needs_broadcast_safe, + funding_locked, + shutdown: None, + } } - let raa = if self.monitor_pending_revoke_and_ack { + let revoke_and_ack = if self.monitor_pending_revoke_and_ack { Some(self.get_last_revoke_and_ack()) } else { None }; - let commitment_update = if self.monitor_pending_commitment_signed { + let mut commitment_update = if self.monitor_pending_commitment_signed { Some(self.get_last_commitment_update(logger)) } else { None }; + let mut raa_commitment_order = self.resend_order.clone(); self.monitor_pending_revoke_and_ack = false; self.monitor_pending_commitment_signed = false; - let order = self.resend_order.clone(); + + let mut forwarding_failures = Vec::new(); + let mut chanmon_update = None; + if commitment_update.is_none() && self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32) == 0 { + raa_commitment_order = RAACommitmentOrder::RevokeAndACKFirst; + + let (update_opt, mut failed_htlcs) = self.free_holding_cell_htlcs(logger).unwrap(); + forwarding_failures.append(&mut failed_htlcs); + if let Some((com_update, mon_update)) = update_opt { + commitment_update = Some(com_update); + chanmon_update = Some(mon_update); + } + } + log_trace!(logger, "Restored monitor updating resulting in {}{} commitment update and {} RAA, with {} first", if needs_broadcast_safe { "a funding broadcast safe, " } else { "" }, if commitment_update.is_some() { "a" } else { "no" }, - if raa.is_some() { "an" } else { "no" }, - match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"}); - (raa, commitment_update, order, forwards, failures, needs_broadcast_safe, funding_locked) + if revoke_and_ack.is_some() { "an" } else { "no" }, + match raa_commitment_order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"}); + + ChannelRestoredUpdates { + revoke_and_ack, + commitment_update, + raa_commitment_order, + chanmon_update, + pending_forwards, + pending_failures, + forwarding_failures, + needs_broadcast_safe, + funding_locked, + shutdown: None, + } } pub fn update_fee(&mut self, fee_estimator: &F, msg: &msgs::UpdateFee) -> Result<(), ChannelError> @@ -2891,7 +2926,7 @@ impl Channel { /// May panic if some calls other than message-handling calls (which will all Err immediately) /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call. - pub fn channel_reestablish(&mut self, msg: &msgs::ChannelReestablish, logger: &L) -> Result<(Option, Option, Option, Option, RAACommitmentOrder, Option), ChannelError> where L::Target: Logger { + pub fn channel_reestablish(&mut self, msg: &msgs::ChannelReestablish, logger: &L) -> Result where L::Target: Logger { if self.channel_state & (ChannelState::PeerDisconnected as u32) == 0 { // While BOLT 2 doesn't indicate explicitly we should error this channel here, it // almost certainly indicates we are going to end up out-of-sync in some way, so we @@ -2942,15 +2977,38 @@ impl Channel { return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent funding_locked yet".to_owned())); } // Short circuit the whole handler as there is nothing we can resend them - return Ok((None, None, None, None, RAACommitmentOrder::CommitmentFirst, shutdown_msg)); + return Ok(ChannelRestoredUpdates { + revoke_and_ack: None, + commitment_update: None, + raa_commitment_order: RAACommitmentOrder::CommitmentFirst, + chanmon_update: None, + pending_forwards: Vec::new(), + pending_failures: Vec::new(), + forwarding_failures: Vec::new(), + needs_broadcast_safe: false, + funding_locked: None, + shutdown: shutdown_msg, + }); } // We have OurFundingLocked set! let next_per_commitment_point = self.holder_signer.get_per_commitment_point(self.cur_holder_commitment_transaction_number, &self.secp_ctx); - return Ok((Some(msgs::FundingLocked { - channel_id: self.channel_id(), - next_per_commitment_point, - }), None, None, None, RAACommitmentOrder::CommitmentFirst, shutdown_msg)); + + return Ok(ChannelRestoredUpdates { + revoke_and_ack: None, + commitment_update: None, + raa_commitment_order: RAACommitmentOrder::CommitmentFirst, + chanmon_update: None, + pending_forwards: Vec::new(), + pending_failures: Vec::new(), + forwarding_failures: Vec::new(), + needs_broadcast_safe: false, + funding_locked: Some(msgs::FundingLocked { + channel_id: self.channel_id(), + next_per_commitment_point, + }), + shutdown: shutdown_msg, + }); } let required_revoke = if msg.next_remote_commitment_number + 1 == INITIAL_COMMITMENT_NUMBER - self.cur_holder_commitment_transaction_number { @@ -2991,14 +3049,6 @@ impl Channel { } if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateFailed as u32)) == 0 { - // Note that if in the future we no longer drop holding cell update_adds on peer - // disconnect, this logic will need to be updated. - for htlc_update in self.holding_cell_htlc_updates.iter() { - if let &HTLCUpdateAwaitingACK::AddHTLC { .. } = htlc_update { - debug_assert!(false, "There shouldn't be any add-HTLCs in the holding cell now because they should have been dropped on peer disconnect. Panic here because said HTLCs won't be handled correctly."); - } - } - // We're up-to-date and not waiting on a remote revoke (if we are our // channel_reestablish should result in them sending a revoke_and_ack), but we may // have received some updates while we were disconnected. Free the holding cell @@ -3007,20 +3057,47 @@ impl Channel { Err(ChannelError::Close(msg)) => return Err(ChannelError::Close(msg)), Err(ChannelError::Ignore(_)) | Err(ChannelError::CloseDelayBroadcast(_)) => panic!("Got non-channel-failing result from free_holding_cell_htlcs"), Ok((Some((commitment_update, monitor_update)), htlcs_to_fail)) => { - // If in the future we no longer drop holding cell update_adds on peer - // disconnect, we may be handed some HTLCs to fail backwards here. - assert!(htlcs_to_fail.is_empty()); - return Ok((resend_funding_locked, required_revoke, Some(commitment_update), Some(monitor_update), self.resend_order.clone(), shutdown_msg)); + return Ok(ChannelRestoredUpdates { + revoke_and_ack: required_revoke, + commitment_update: Some(commitment_update), + raa_commitment_order: self.resend_order.clone(), + chanmon_update: Some(monitor_update), + pending_forwards: Vec::new(), + pending_failures: Vec::new(), + forwarding_failures: htlcs_to_fail, + needs_broadcast_safe: false, + funding_locked: resend_funding_locked, + shutdown: shutdown_msg, + }); }, Ok((None, htlcs_to_fail)) => { - // If in the future we no longer drop holding cell update_adds on peer - // disconnect, we may be handed some HTLCs to fail backwards here. - assert!(htlcs_to_fail.is_empty()); - return Ok((resend_funding_locked, required_revoke, None, None, self.resend_order.clone(), shutdown_msg)); + return Ok(ChannelRestoredUpdates { + revoke_and_ack: required_revoke, + commitment_update: None, + raa_commitment_order: self.resend_order.clone(), + chanmon_update: None, + pending_forwards: Vec::new(), + pending_failures: Vec::new(), + forwarding_failures: htlcs_to_fail, + needs_broadcast_safe: false, + funding_locked: resend_funding_locked, + shutdown: shutdown_msg, + }); }, } } else { - return Ok((resend_funding_locked, required_revoke, None, None, self.resend_order.clone(), shutdown_msg)); + return Ok(ChannelRestoredUpdates { + revoke_and_ack: required_revoke, + commitment_update: None, + raa_commitment_order: self.resend_order.clone(), + chanmon_update: None, + pending_forwards: Vec::new(), + pending_failures: Vec::new(), + forwarding_failures: Vec::new(), + needs_broadcast_safe: false, + funding_locked: resend_funding_locked, + shutdown: shutdown_msg, + }); } } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 { if required_revoke.is_some() { @@ -3031,10 +3108,32 @@ impl Channel { if self.channel_state & (ChannelState::MonitorUpdateFailed as u32) != 0 { self.monitor_pending_commitment_signed = true; - return Ok((resend_funding_locked, None, None, None, self.resend_order.clone(), shutdown_msg)); + return Ok(ChannelRestoredUpdates { + revoke_and_ack: None, + commitment_update: None, + raa_commitment_order: self.resend_order.clone(), + chanmon_update: None, + pending_forwards: Vec::new(), + pending_failures: Vec::new(), + forwarding_failures: Vec::new(), + needs_broadcast_safe: false, + funding_locked: resend_funding_locked, + shutdown: shutdown_msg, + }); } - return Ok((resend_funding_locked, required_revoke, Some(self.get_last_commitment_update(logger)), None, self.resend_order.clone(), shutdown_msg)); + return Ok(ChannelRestoredUpdates { + revoke_and_ack: required_revoke, + commitment_update: Some(self.get_last_commitment_update(logger)), + raa_commitment_order: self.resend_order.clone(), + chanmon_update: None, + pending_forwards: Vec::new(), + pending_failures: Vec::new(), + forwarding_failures: Vec::new(), + needs_broadcast_safe: false, + funding_locked: resend_funding_locked, + shutdown: shutdown_msg, + }); } else { return Err(ChannelError::Close("Peer attempted to reestablish channel with a very old remote commitment transaction".to_owned())); } @@ -4272,7 +4371,7 @@ impl Readable for InboundHTLCRemovalReason { impl Writeable for Channel { fn write(&self, writer: &mut W) -> Result<(), ::std::io::Error> { // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been - // called but include holding cell updates (and obviously we don't modify self). + // called. writer.write_all(&[SERIALIZATION_VERSION; 1])?; writer.write_all(&[MIN_SERIALIZATION_VERSION; 1])?; diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 4b10340f4c3..2315a21f614 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -744,6 +744,114 @@ macro_rules! maybe_break_monitor_err { } } +macro_rules! handle_chan_restoration_locked { + ($self: ident, $channel_lock: expr, $channel_state: expr, $channel_entry: expr, $updates: expr) => { { + let mut htlc_forwards = None; + let mut funding_broadcast_safe = None; + let counterparty_node_id = $channel_entry.get().get_counterparty_node_id(); + let channel_id = $channel_entry.get().channel_id(); + + let res = loop { + if let Some(msg) = $updates.shutdown { + $channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { + node_id: counterparty_node_id.clone(), + msg, + }); + } + + let forwards: Vec<(PendingHTLCInfo, u64)> = $updates.pending_forwards; // Force type-checking to resolve + if !forwards.is_empty() { + htlc_forwards = Some(($channel_entry.get().get_short_channel_id().expect("We can't have pending forwards before funding confirmation"), + $channel_entry.get().get_funding_txo().unwrap(), forwards)); + } + if $updates.chanmon_update.is_some() { + assert!($updates.commitment_update.is_some()); + assert!($updates.funding_locked.is_none()); + } + + if let Some(msg) = $updates.funding_locked { + $channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingLocked { + node_id: counterparty_node_id, + msg, + }); + if let Some(announcement_sigs) = $self.get_announcement_sigs($channel_entry.get()) { + $channel_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { + node_id: counterparty_node_id, + msg: announcement_sigs, + }); + } + $channel_state.short_to_id.insert($channel_entry.get().get_short_channel_id().unwrap(), channel_id); + } + + macro_rules! handle_cs { () => { + if let Some(monitor_update) = $updates.chanmon_update { + assert!($updates.raa_commitment_order == RAACommitmentOrder::RevokeAndACKFirst); + assert!(!$updates.needs_broadcast_safe); + assert!($updates.commitment_update.is_some()); + if let Err(e) = $self.chain_monitor.update_channel($channel_entry.get().get_funding_txo().unwrap(), monitor_update) { + break handle_monitor_err!($self, e, $channel_state, $channel_entry, RAACommitmentOrder::CommitmentFirst, false, true); + } + } + if let Some(update) = $updates.commitment_update { + $channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: counterparty_node_id, + updates: update, + }); + } + } } + macro_rules! handle_raa { () => { + if let Some(revoke_and_ack) = $updates.revoke_and_ack { + $channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK { + node_id: counterparty_node_id, + msg: revoke_and_ack, + }); + } + } } + match $updates.raa_commitment_order { + RAACommitmentOrder::CommitmentFirst => { + handle_cs!(); + handle_raa!(); + }, + RAACommitmentOrder::RevokeAndACKFirst => { + handle_raa!(); + handle_cs!(); + }, + } + if $updates.needs_broadcast_safe { + funding_broadcast_safe = Some(events::Event::FundingBroadcastSafe { + funding_txo: $channel_entry.get().get_funding_txo().unwrap(), + user_channel_id: $channel_entry.get().get_user_id(), + }); + } + break Ok(()); + }; + + ($updates.pending_failures, $updates.forwarding_failures, htlc_forwards, + funding_broadcast_safe, res, channel_id, counterparty_node_id) + } } +} + +macro_rules! post_handle_chan_restoration { + ($self: ident, $locked_res: expr) => { { + let (mut pending_failures, forwarding_failures, htlc_forwards, funding_broadcast_safe, + res, channel_id, counterparty_node_id) = $locked_res; + + let _ = handle_error!($self, res, counterparty_node_id); + + if let Some(ev) = funding_broadcast_safe { + $self.pending_events.lock().unwrap().push(ev); + } + + $self.fail_holding_cell_htlcs(forwarding_failures, channel_id); + for failure in pending_failures.drain(..) { + $self.fail_htlc_backwards_internal($self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2); + } + if let Some(forwards) = htlc_forwards { + $self.forward_htlcs(&mut [forwards][..]); + } + } } +} + impl ChannelManager where M::Target: chain::Watch, T::Target: BroadcasterInterface, @@ -2251,6 +2359,12 @@ impl ChannelMana /// ChannelMonitorUpdateErr::TemporaryFailures is fine. The highest_applied_update_id field /// exists largely only to prevent races between this and concurrent update_monitor calls. /// + /// In some cases, this may generate a monitor update, resulting in a call to the + /// `chain::Watch`'s `update_channel` method for the same channel monitor which is being + /// notified of a successful update here. Because of this, please be very careful with + /// reentrancy bugs! It is incredibly easy to write an implementation of `update_channel` which + /// will take a lock that is also held when calling this method. + /// /// Thus, the anticipated use is, at a high level: /// 1) You register a chain::Watch with this ChannelManager, /// 2) it stores each update to disk, and begins updating any remote (eg watchtower) copies of @@ -2262,87 +2376,21 @@ impl ChannelMana pub fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64) { let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier); - let mut close_results = Vec::new(); - let mut htlc_forwards = Vec::new(); - let mut htlc_failures = Vec::new(); - let mut pending_events = Vec::new(); - - { + let chan_restoration_res = { let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_lock; - let short_to_id = &mut channel_state.short_to_id; - let pending_msg_events = &mut channel_state.pending_msg_events; - let channel = match channel_state.by_id.get_mut(&funding_txo.to_channel_id()) { - Some(chan) => chan, - None => return, + let mut channel = match channel_state.by_id.entry(funding_txo.to_channel_id()) { + hash_map::Entry::Occupied(chan) => chan, + hash_map::Entry::Vacant(_) => return, }; - if !channel.is_awaiting_monitor_update() || channel.get_latest_monitor_update_id() != highest_applied_update_id { + if !channel.get().is_awaiting_monitor_update() || channel.get().get_latest_monitor_update_id() != highest_applied_update_id { return; } - let (raa, commitment_update, order, pending_forwards, mut pending_failures, needs_broadcast_safe, funding_locked) = channel.monitor_updating_restored(&self.logger); - if !pending_forwards.is_empty() { - htlc_forwards.push((channel.get_short_channel_id().expect("We can't have pending forwards before funding confirmation"), funding_txo.clone(), pending_forwards)); - } - htlc_failures.append(&mut pending_failures); - - macro_rules! handle_cs { () => { - if let Some(update) = commitment_update { - pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: channel.get_counterparty_node_id(), - updates: update, - }); - } - } } - macro_rules! handle_raa { () => { - if let Some(revoke_and_ack) = raa { - pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK { - node_id: channel.get_counterparty_node_id(), - msg: revoke_and_ack, - }); - } - } } - match order { - RAACommitmentOrder::CommitmentFirst => { - handle_cs!(); - handle_raa!(); - }, - RAACommitmentOrder::RevokeAndACKFirst => { - handle_raa!(); - handle_cs!(); - }, - } - if needs_broadcast_safe { - pending_events.push(events::Event::FundingBroadcastSafe { - funding_txo: channel.get_funding_txo().unwrap(), - user_channel_id: channel.get_user_id(), - }); - } - if let Some(msg) = funding_locked { - pending_msg_events.push(events::MessageSendEvent::SendFundingLocked { - node_id: channel.get_counterparty_node_id(), - msg, - }); - if let Some(announcement_sigs) = self.get_announcement_sigs(channel) { - pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { - node_id: channel.get_counterparty_node_id(), - msg: announcement_sigs, - }); - } - short_to_id.insert(channel.get_short_channel_id().unwrap(), channel.channel_id()); - } - } - - self.pending_events.lock().unwrap().append(&mut pending_events); - - for failure in htlc_failures.drain(..) { - self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2); - } - self.forward_htlcs(&mut htlc_forwards[..]); - - for res in close_results.drain(..) { - self.finish_force_close_channel(res); - } + let updates = channel.get_mut().monitor_updating_restored(&self.logger); + handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, updates) + }; + post_handle_chan_restoration!(self, chan_restoration_res); } fn internal_open_channel(&self, counterparty_node_id: &PublicKey, their_features: InitFeatures, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> { @@ -2931,77 +2979,27 @@ impl ChannelMana } fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; + let chan_restoration_res = { + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let channel_state = &mut *channel_state_lock; - match channel_state.by_id.entry(msg.channel_id) { - hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } - // Currently, we expect all holding cell update_adds to be dropped on peer - // disconnect, so Channel's reestablish will never hand us any holding cell - // freed HTLCs to fail backwards. If in the future we no longer drop pending - // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here. - let (funding_locked, revoke_and_ack, commitment_update, monitor_update_opt, mut order, shutdown) = - try_chan_entry!(self, chan.get_mut().channel_reestablish(msg, &self.logger), channel_state, chan); - if let Some(monitor_update) = monitor_update_opt { - if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { - // channel_reestablish doesn't guarantee the order it returns is sensical - // for the messages it returns, but if we're setting what messages to - // re-transmit on monitor update success, we need to make sure it is sane. - if revoke_and_ack.is_none() { - order = RAACommitmentOrder::CommitmentFirst; - } - if commitment_update.is_none() { - order = RAACommitmentOrder::RevokeAndACKFirst; - } - return_monitor_err!(self, e, channel_state, chan, order, revoke_and_ack.is_some(), commitment_update.is_some()); - //TODO: Resend the funding_locked if needed once we get the monitor running again - } - } - if let Some(msg) = funding_locked { - channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingLocked { - node_id: counterparty_node_id.clone(), - msg - }); - } - macro_rules! send_raa { () => { - if let Some(msg) = revoke_and_ack { - channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK { - node_id: counterparty_node_id.clone(), - msg - }); - } - } } - macro_rules! send_cu { () => { - if let Some(updates) = commitment_update { - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: counterparty_node_id.clone(), - updates - }); + match channel_state.by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_counterparty_node_id() != *counterparty_node_id { + return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); } - } } - match order { - RAACommitmentOrder::RevokeAndACKFirst => { - send_raa!(); - send_cu!(); - }, - RAACommitmentOrder::CommitmentFirst => { - send_cu!(); - send_raa!(); - }, - } - if let Some(msg) = shutdown { - channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { - node_id: counterparty_node_id.clone(), - msg, - }); - } - Ok(()) - }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) - } + // Currently, we expect all holding cell update_adds to be dropped on peer + // disconnect, so Channel's reestablish will never hand us any holding cell + // freed HTLCs to fail backwards. If in the future we no longer drop pending + // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here. + let updates = try_chan_entry!(self, chan.get_mut().channel_reestablish(msg, &self.logger), channel_state, chan); + handle_chan_restoration_locked!(self, channel_state_lock, channel_state, chan, updates) + }, + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + } + }; + post_handle_chan_restoration!(self, chan_restoration_res); + Ok(()) } /// Begin Update fee process. Allowed only on an outbound channel. @@ -3428,7 +3426,6 @@ impl