From 5b3302d29984a0512ac03eafd34c352a4b118904 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 21 Apr 2026 16:45:06 +0200 Subject: [PATCH 01/29] Extract chanmon bootstrap helpers Extract the repeated peer-connection and channel-funding setup into small helpers. This leaves the fuzz scenario setup behavior unchanged while making later harness refactors easier to review. --- fuzz/src/chanmon_consistency.rs | 579 +++++++++++++++++--------------- 1 file changed, 306 insertions(+), 273 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 655fb76200b..3d4c4bbd865 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -208,9 +208,7 @@ impl ChainState { fn is_outpoint_spent(&self, outpoint: &bitcoin::OutPoint) -> bool { self.blocks.iter().any(|(_, txs)| { - txs.iter().any(|tx| { - tx.input.iter().any(|input| input.previous_output == *outpoint) - }) + txs.iter().any(|tx| tx.input.iter().any(|input| input.previous_output == *outpoint)) }) } @@ -938,12 +936,240 @@ fn assert_action_timeout_awaiting_response(action: &msgs::ErrorAction) { ); } +#[derive(Copy, Clone)] enum ChanType { Legacy, KeyedAnchors, ZeroFeeCommitments, } +fn build_node_config(chan_type: ChanType) -> UserConfig { + let mut config = UserConfig::default(); + config.channel_config.forwarding_fee_proportional_millionths = 0; + config.channel_handshake_config.announce_for_forwarding = true; + config.reject_inbound_splices = false; + match chan_type { + ChanType::Legacy => { + config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = false; + config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = false; + }, + ChanType::KeyedAnchors => { + config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; + config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = false; + }, + ChanType::ZeroFeeCommitments => { + config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = false; + config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = true; + }, + } + config +} + +fn complete_all_pending_monitor_updates(monitor: &Arc) { + for (channel_id, state) in monitor.latest_monitors.lock().unwrap().iter_mut() { + for (id, data) in state.pending_monitors.drain(..) { + monitor.chain_monitor.channel_monitor_updated(*channel_id, id).unwrap(); + if id >= state.persisted_monitor_id { + state.persisted_monitor_id = id; + state.persisted_monitor = data; + } + } + } +} + +fn connect_peers(source: &ChanMan<'_>, dest: &ChanMan<'_>) { + let init_dest = + Init { features: dest.init_features(), networks: None, remote_network_address: None }; + source.peer_connected(dest.get_our_node_id(), &init_dest, true).unwrap(); + let init_src = + Init { features: source.init_features(), networks: None, remote_network_address: None }; + dest.peer_connected(source.get_our_node_id(), &init_src, false).unwrap(); +} + +fn make_channel( + source: &ChanMan<'_>, dest: &ChanMan<'_>, source_monitor: &Arc, + dest_monitor: &Arc, dest_keys_manager: &Arc, chan_id: i32, + trusted_open: bool, trusted_accept: bool, chain_state: &mut ChainState, +) { + if trusted_open { + source + .create_channel_to_trusted_peer_0reserve( + dest.get_our_node_id(), + 100_000, + 42, + 0, + None, + None, + ) + .unwrap(); + } else { + source.create_channel(dest.get_our_node_id(), 100_000, 42, 0, None, None).unwrap(); + } + let open_channel = { + let events = source.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + if let MessageSendEvent::SendOpenChannel { ref msg, .. } = events[0] { + msg.clone() + } else { + panic!("Wrong event type"); + } + }; + + dest.handle_open_channel(source.get_our_node_id(), &open_channel); + let accept_channel = { + let events = dest.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + if let events::Event::OpenChannelRequest { + ref temporary_channel_id, + ref counterparty_node_id, + .. + } = events[0] + { + let mut random_bytes = [0u8; 16]; + random_bytes.copy_from_slice(&dest_keys_manager.get_secure_random_bytes()[..16]); + let user_channel_id = u128::from_be_bytes(random_bytes); + if trusted_accept { + dest.accept_inbound_channel_from_trusted_peer( + temporary_channel_id, + counterparty_node_id, + user_channel_id, + TrustedChannelFeatures::ZeroReserve, + None, + ) + .unwrap(); + } else { + dest.accept_inbound_channel( + temporary_channel_id, + counterparty_node_id, + user_channel_id, + None, + ) + .unwrap(); + } + } else { + panic!("Wrong event type"); + } + let events = dest.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + if let MessageSendEvent::SendAcceptChannel { ref msg, .. } = events[0] { + msg.clone() + } else { + panic!("Wrong event type"); + } + }; + + source.handle_accept_channel(dest.get_our_node_id(), &accept_channel); + { + let mut events = source.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + if let events::Event::FundingGenerationReady { + temporary_channel_id, + channel_value_satoshis, + output_script, + .. + } = events.pop().unwrap() + { + let tx = Transaction { + version: Version(chan_id), + lock_time: LockTime::ZERO, + input: Vec::new(), + output: vec![TxOut { + value: Amount::from_sat(channel_value_satoshis), + script_pubkey: output_script, + }], + }; + source + .funding_transaction_generated( + temporary_channel_id, + dest.get_our_node_id(), + tx.clone(), + ) + .unwrap(); + chain_state.confirm_tx(tx); + } else { + panic!("Wrong event type"); + } + } + + let funding_created = { + let events = source.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + if let MessageSendEvent::SendFundingCreated { ref msg, .. } = events[0] { + msg.clone() + } else { + panic!("Wrong event type"); + } + }; + dest.handle_funding_created(source.get_our_node_id(), &funding_created); + // Complete any pending monitor updates for dest after watch_channel. + complete_all_pending_monitor_updates(dest_monitor); + + let (funding_signed, channel_id) = { + let events = dest.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + if let MessageSendEvent::SendFundingSigned { ref msg, .. } = events[0] { + (msg.clone(), msg.channel_id) + } else { + panic!("Wrong event type"); + } + }; + let events = dest.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + if let events::Event::ChannelPending { ref counterparty_node_id, .. } = events[0] { + assert_eq!(counterparty_node_id, &source.get_our_node_id()); + } else { + panic!("Wrong event type"); + } + + source.handle_funding_signed(dest.get_our_node_id(), &funding_signed); + // Complete any pending monitor updates for source after watch_channel. + complete_all_pending_monitor_updates(source_monitor); + + let events = source.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + if let events::Event::ChannelPending { + ref counterparty_node_id, + channel_id: ref event_channel_id, + .. + } = events[0] + { + assert_eq!(counterparty_node_id, &dest.get_our_node_id()); + assert_eq!(*event_channel_id, channel_id); + } else { + panic!("Wrong event type"); + } +} + +fn lock_fundings(nodes: &[ChanMan<'_>; 3]) { + let mut node_events = Vec::new(); + for node in nodes.iter() { + node_events.push(node.get_and_clear_pending_msg_events()); + } + for (idx, node_event) in node_events.iter().enumerate() { + for event in node_event { + if let MessageSendEvent::SendChannelReady { ref node_id, ref msg } = event { + for node in nodes.iter() { + if node.get_our_node_id() == *node_id { + node.handle_channel_ready(nodes[idx].get_our_node_id(), msg); + } + } + } else { + panic!("Wrong event type"); + } + } + } + + for node in nodes.iter() { + let events = node.get_and_clear_pending_msg_events(); + for event in events { + if let MessageSendEvent::SendAnnouncementSignatures { .. } = event { + } else { + panic!("Wrong event type"); + } + } + } +} + #[inline] pub fn do_test(data: &[u8], out: Out) { let broadcast_a = Arc::new(TestBroadcaster { txn_broadcasted: RefCell::new(Vec::new()) }); @@ -1007,27 +1233,10 @@ pub fn do_test(data: &[u8], out: Out) { Arc::clone(&keys_manager), )); - let mut config = UserConfig::default(); - config.channel_config.forwarding_fee_proportional_millionths = 0; - config.channel_handshake_config.announce_for_forwarding = true; - config.reject_inbound_splices = false; - match chan_type { - ChanType::Legacy => { - config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = false; - config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = false; - }, - ChanType::KeyedAnchors => { - config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; - config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = false; - }, - ChanType::ZeroFeeCommitments => { - config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = false; - config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = true; - }, - } let network = Network::Bitcoin; let best_block_timestamp = genesis_block(network).header.time; - let params = ChainParameters { network, best_block: BlockLocator::from_network(network) }; + let params = + ChainParameters { network, best_block: BlockLocator::from_network(network) }; ( ChannelManager::new( $fee_estimator.clone(), @@ -1039,7 +1248,7 @@ pub fn do_test(data: &[u8], out: Out) { keys_manager.clone(), keys_manager.clone(), keys_manager.clone(), - config, + build_node_config(chan_type), params, best_block_timestamp, ), @@ -1070,25 +1279,6 @@ pub fn do_test(data: &[u8], out: Out) { Arc::clone(keys), )); - let mut config = UserConfig::default(); - config.channel_config.forwarding_fee_proportional_millionths = 0; - config.channel_handshake_config.announce_for_forwarding = true; - config.reject_inbound_splices = false; - match chan_type { - ChanType::Legacy => { - config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = false; - config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = false; - }, - ChanType::KeyedAnchors => { - config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; - config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = false; - }, - ChanType::ZeroFeeCommitments => { - config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = false; - config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = true; - }, - } - let mut monitors = new_hash_map(); let mut old_monitors = old_monitors.latest_monitors.lock().unwrap(); for (channel_id, mut prev_state) in old_monitors.drain() { @@ -1138,12 +1328,12 @@ pub fn do_test(data: &[u8], out: Out) { router: &router, message_router: &router, logger, - config, + config: build_node_config(chan_type), channel_monitors: monitor_refs, }; - let manager = - <(BlockLocator, ChanMan)>::read(&mut &ser[..], read_args).expect("Failed to read manager"); + let manager = <(BlockLocator, ChanMan)>::read(&mut &ser[..], read_args) + .expect("Failed to read manager"); let res = (manager.1, chain_monitor.clone()); for (channel_id, mon) in monitors.drain() { assert_eq!( @@ -1155,224 +1345,6 @@ pub fn do_test(data: &[u8], out: Out) { res }; - macro_rules! complete_all_pending_monitor_updates { - ($monitor: expr) => {{ - for (channel_id, state) in $monitor.latest_monitors.lock().unwrap().iter_mut() { - for (id, data) in state.pending_monitors.drain(..) { - $monitor.chain_monitor.channel_monitor_updated(*channel_id, id).unwrap(); - if id >= state.persisted_monitor_id { - state.persisted_monitor_id = id; - state.persisted_monitor = data; - } - } - } - }}; - } - macro_rules! connect_peers { - ($source: expr, $dest: expr) => {{ - let init_dest = Init { - features: $dest.init_features(), - networks: None, - remote_network_address: None, - }; - $source.peer_connected($dest.get_our_node_id(), &init_dest, true).unwrap(); - let init_src = Init { - features: $source.init_features(), - networks: None, - remote_network_address: None, - }; - $dest.peer_connected($source.get_our_node_id(), &init_src, false).unwrap(); - }}; - } - macro_rules! make_channel { - ($source: expr, $dest: expr, $source_monitor: expr, $dest_monitor: expr, $dest_keys_manager: expr, $chan_id: expr, $trusted_open: expr, $trusted_accept: expr) => {{ - if $trusted_open { - $source - .create_channel_to_trusted_peer_0reserve( - $dest.get_our_node_id(), - 100_000, - 42, - 0, - None, - None, - ) - .unwrap(); - } else { - $source - .create_channel($dest.get_our_node_id(), 100_000, 42, 0, None, None) - .unwrap(); - } - let open_channel = { - let events = $source.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - if let MessageSendEvent::SendOpenChannel { ref msg, .. } = events[0] { - msg.clone() - } else { - panic!("Wrong event type"); - } - }; - - $dest.handle_open_channel($source.get_our_node_id(), &open_channel); - let accept_channel = { - let events = $dest.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - if let events::Event::OpenChannelRequest { - ref temporary_channel_id, - ref counterparty_node_id, - .. - } = events[0] - { - let mut random_bytes = [0u8; 16]; - random_bytes - .copy_from_slice(&$dest_keys_manager.get_secure_random_bytes()[..16]); - let user_channel_id = u128::from_be_bytes(random_bytes); - if $trusted_accept { - $dest - .accept_inbound_channel_from_trusted_peer( - temporary_channel_id, - counterparty_node_id, - user_channel_id, - TrustedChannelFeatures::ZeroReserve, - None, - ) - .unwrap(); - } else { - $dest - .accept_inbound_channel( - temporary_channel_id, - counterparty_node_id, - user_channel_id, - None, - ) - .unwrap(); - } - } else { - panic!("Wrong event type"); - } - let events = $dest.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - if let MessageSendEvent::SendAcceptChannel { ref msg, .. } = events[0] { - msg.clone() - } else { - panic!("Wrong event type"); - } - }; - - $source.handle_accept_channel($dest.get_our_node_id(), &accept_channel); - { - let mut events = $source.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - if let events::Event::FundingGenerationReady { - temporary_channel_id, - channel_value_satoshis, - output_script, - .. - } = events.pop().unwrap() - { - let tx = Transaction { - version: Version($chan_id), - lock_time: LockTime::ZERO, - input: Vec::new(), - output: vec![TxOut { - value: Amount::from_sat(channel_value_satoshis), - script_pubkey: output_script, - }], - }; - $source - .funding_transaction_generated( - temporary_channel_id, - $dest.get_our_node_id(), - tx.clone(), - ) - .unwrap(); - chain_state.confirm_tx(tx); - } else { - panic!("Wrong event type"); - } - } - - let funding_created = { - let events = $source.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - if let MessageSendEvent::SendFundingCreated { ref msg, .. } = events[0] { - msg.clone() - } else { - panic!("Wrong event type"); - } - }; - $dest.handle_funding_created($source.get_our_node_id(), &funding_created); - // Complete any pending monitor updates for dest after watch_channel - complete_all_pending_monitor_updates!($dest_monitor); - - let (funding_signed, channel_id) = { - let events = $dest.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - if let MessageSendEvent::SendFundingSigned { ref msg, .. } = events[0] { - (msg.clone(), msg.channel_id.clone()) - } else { - panic!("Wrong event type"); - } - }; - let events = $dest.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - if let events::Event::ChannelPending { ref counterparty_node_id, .. } = events[0] { - assert_eq!(counterparty_node_id, &$source.get_our_node_id()); - } else { - panic!("Wrong event type"); - } - - $source.handle_funding_signed($dest.get_our_node_id(), &funding_signed); - // Complete any pending monitor updates for source after watch_channel - complete_all_pending_monitor_updates!($source_monitor); - - let events = $source.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - if let events::Event::ChannelPending { - ref counterparty_node_id, - channel_id: ref event_channel_id, - .. - } = events[0] - { - assert_eq!(counterparty_node_id, &$dest.get_our_node_id()); - assert_eq!(*event_channel_id, channel_id); - } else { - panic!("Wrong event type"); - } - }}; - } - - macro_rules! lock_fundings { - ($nodes: expr) => {{ - let mut node_events = Vec::new(); - for node in $nodes.iter() { - node_events.push(node.get_and_clear_pending_msg_events()); - } - for (idx, node_event) in node_events.iter().enumerate() { - for event in node_event { - if let MessageSendEvent::SendChannelReady { ref node_id, ref msg } = event { - for node in $nodes.iter() { - if node.get_our_node_id() == *node_id { - node.handle_channel_ready($nodes[idx].get_our_node_id(), msg); - } - } - } else { - panic!("Wrong event type"); - } - } - } - - for node in $nodes.iter() { - let events = node.get_and_clear_pending_msg_events(); - for event in events { - if let MessageSendEvent::SendAnnouncementSignatures { .. } = event { - } else { - panic!("Wrong event type"); - } - } - } - }}; - } - let wallet_a = TestWalletSource::new(SecretKey::from_slice(&[1; 32]).unwrap()); let wallet_b = TestWalletSource::new(SecretKey::from_slice(&[2; 32]).unwrap()); let wallet_c = TestWalletSource::new(SecretKey::from_slice(&[3; 32]).unwrap()); @@ -1414,8 +1386,8 @@ pub fn do_test(data: &[u8], out: Out) { let fee_estimators = [Arc::clone(&fee_est_a), Arc::clone(&fee_est_b), Arc::clone(&fee_est_c)]; // Connect peers first, then create channels - connect_peers!(nodes[0], nodes[1]); - connect_peers!(nodes[1], nodes[2]); + connect_peers(&nodes[0], &nodes[1]); + connect_peers(&nodes[1], &nodes[2]); // Create 3 channels between A-B and 3 channels between B-C (6 total). // @@ -1423,14 +1395,74 @@ pub fn do_test(data: &[u8], out: Out) { // txid and funding outpoint. // A-B: channel 2 A and B have 0-reserve (trusted open + trusted accept), // channel 3 A has 0-reserve (trusted accept) - make_channel!(nodes[0], nodes[1], monitor_a, monitor_b, keys_manager_b, 1, false, false); - make_channel!(nodes[0], nodes[1], monitor_a, monitor_b, keys_manager_b, 2, true, true); - make_channel!(nodes[0], nodes[1], monitor_a, monitor_b, keys_manager_b, 3, false, true); + make_channel( + &nodes[0], + &nodes[1], + &monitor_a, + &monitor_b, + &keys_manager_b, + 1, + false, + false, + &mut chain_state, + ); + make_channel( + &nodes[0], + &nodes[1], + &monitor_a, + &monitor_b, + &keys_manager_b, + 2, + true, + true, + &mut chain_state, + ); + make_channel( + &nodes[0], + &nodes[1], + &monitor_a, + &monitor_b, + &keys_manager_b, + 3, + false, + true, + &mut chain_state, + ); // B-C: channel 4 B has 0-reserve (via trusted accept), // channel 5 C has 0-reserve (via trusted open) - make_channel!(nodes[1], nodes[2], monitor_b, monitor_c, keys_manager_c, 4, false, true); - make_channel!(nodes[1], nodes[2], monitor_b, monitor_c, keys_manager_c, 5, true, false); - make_channel!(nodes[1], nodes[2], monitor_b, monitor_c, keys_manager_c, 6, false, false); + make_channel( + &nodes[1], + &nodes[2], + &monitor_b, + &monitor_c, + &keys_manager_c, + 4, + false, + true, + &mut chain_state, + ); + make_channel( + &nodes[1], + &nodes[2], + &monitor_b, + &monitor_c, + &keys_manager_c, + 5, + true, + false, + &mut chain_state, + ); + make_channel( + &nodes[1], + &nodes[2], + &monitor_b, + &monitor_c, + &keys_manager_c, + 6, + false, + false, + &mut chain_state, + ); // Wipe the transactions-broadcasted set to make sure we don't broadcast any transactions // during normal operation in `test_return`. @@ -1464,7 +1496,7 @@ pub fn do_test(data: &[u8], out: Out) { sync_with_chain_state(&mut chain_state, &nodes[1], &mut node_height_b, None); sync_with_chain_state(&mut chain_state, &nodes[2], &mut node_height_c, None); - lock_fundings!(nodes); + lock_fundings(&nodes); // Get channel IDs for all A-B channels (from node A's perspective) let chan_ab_ids = { @@ -2106,7 +2138,8 @@ pub fn do_test(data: &[u8], out: Out) { }, events::Event::SpliceFailed { .. } => {}, events::Event::DiscardFunding { - funding_info: events::FundingInfo::Contribution { .. } + funding_info: + events::FundingInfo::Contribution { .. } | events::FundingInfo::Tx { .. }, .. } => {}, From fe9811e355bd3e747a34c6bbb9991c0c868edfd6 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 21 Apr 2026 17:31:38 +0200 Subject: [PATCH 02/29] Wrap chanmon nodes in HarnessNode Introduce a small wrapper around each channel manager and its test resources. This keeps node-local state together before moving more operations onto the harness. --- fuzz/src/chanmon_consistency.rs | 249 ++++++++++++++------------------ 1 file changed, 110 insertions(+), 139 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 3d4c4bbd865..5fdddaad6c4 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -943,6 +943,38 @@ enum ChanType { ZeroFeeCommitments, } +struct HarnessNode<'a> { + node: ChanMan<'a>, + monitor: Arc, + keys_manager: Arc, +} + +impl<'a> std::ops::Deref for HarnessNode<'a> { + type Target = ChanMan<'a>; + + fn deref(&self) -> &Self::Target { + &self.node + } +} + +impl<'a> HarnessNode<'a> { + fn our_node_id(&self) -> PublicKey { + self.node.get_our_node_id() + } + + fn complete_all_pending_monitor_updates(&self) { + for (channel_id, state) in self.monitor.latest_monitors.lock().unwrap().iter_mut() { + for (id, data) in state.pending_monitors.drain(..) { + self.monitor.chain_monitor.channel_monitor_updated(*channel_id, id).unwrap(); + if id >= state.persisted_monitor_id { + state.persisted_monitor_id = id; + state.persisted_monitor = data; + } + } + } + } +} + fn build_node_config(chan_type: ChanType) -> UserConfig { let mut config = UserConfig::default(); config.channel_config.forwarding_fee_proportional_millionths = 0; @@ -965,18 +997,6 @@ fn build_node_config(chan_type: ChanType) -> UserConfig { config } -fn complete_all_pending_monitor_updates(monitor: &Arc) { - for (channel_id, state) in monitor.latest_monitors.lock().unwrap().iter_mut() { - for (id, data) in state.pending_monitors.drain(..) { - monitor.chain_monitor.channel_monitor_updated(*channel_id, id).unwrap(); - if id >= state.persisted_monitor_id { - state.persisted_monitor_id = id; - state.persisted_monitor = data; - } - } - } -} - fn connect_peers(source: &ChanMan<'_>, dest: &ChanMan<'_>) { let init_dest = Init { features: dest.init_features(), networks: None, remote_network_address: None }; @@ -987,26 +1007,19 @@ fn connect_peers(source: &ChanMan<'_>, dest: &ChanMan<'_>) { } fn make_channel( - source: &ChanMan<'_>, dest: &ChanMan<'_>, source_monitor: &Arc, - dest_monitor: &Arc, dest_keys_manager: &Arc, chan_id: i32, - trusted_open: bool, trusted_accept: bool, chain_state: &mut ChainState, + source: &HarnessNode<'_>, dest: &HarnessNode<'_>, chan_id: i32, trusted_open: bool, + trusted_accept: bool, chain_state: &mut ChainState, ) { if trusted_open { source - .create_channel_to_trusted_peer_0reserve( - dest.get_our_node_id(), - 100_000, - 42, - 0, - None, - None, - ) + .node + .create_channel_to_trusted_peer_0reserve(dest.our_node_id(), 100_000, 42, 0, None, None) .unwrap(); } else { - source.create_channel(dest.get_our_node_id(), 100_000, 42, 0, None, None).unwrap(); + source.node.create_channel(dest.our_node_id(), 100_000, 42, 0, None, None).unwrap(); } let open_channel = { - let events = source.get_and_clear_pending_msg_events(); + let events = source.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); if let MessageSendEvent::SendOpenChannel { ref msg, .. } = events[0] { msg.clone() @@ -1015,9 +1028,9 @@ fn make_channel( } }; - dest.handle_open_channel(source.get_our_node_id(), &open_channel); + dest.node.handle_open_channel(source.our_node_id(), &open_channel); let accept_channel = { - let events = dest.get_and_clear_pending_events(); + let events = dest.node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); if let events::Event::OpenChannelRequest { ref temporary_channel_id, @@ -1026,30 +1039,32 @@ fn make_channel( } = events[0] { let mut random_bytes = [0u8; 16]; - random_bytes.copy_from_slice(&dest_keys_manager.get_secure_random_bytes()[..16]); + random_bytes.copy_from_slice(&dest.keys_manager.get_secure_random_bytes()[..16]); let user_channel_id = u128::from_be_bytes(random_bytes); if trusted_accept { - dest.accept_inbound_channel_from_trusted_peer( - temporary_channel_id, - counterparty_node_id, - user_channel_id, - TrustedChannelFeatures::ZeroReserve, - None, - ) - .unwrap(); + dest.node + .accept_inbound_channel_from_trusted_peer( + temporary_channel_id, + counterparty_node_id, + user_channel_id, + TrustedChannelFeatures::ZeroReserve, + None, + ) + .unwrap(); } else { - dest.accept_inbound_channel( - temporary_channel_id, - counterparty_node_id, - user_channel_id, - None, - ) - .unwrap(); + dest.node + .accept_inbound_channel( + temporary_channel_id, + counterparty_node_id, + user_channel_id, + None, + ) + .unwrap(); } } else { panic!("Wrong event type"); } - let events = dest.get_and_clear_pending_msg_events(); + let events = dest.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); if let MessageSendEvent::SendAcceptChannel { ref msg, .. } = events[0] { msg.clone() @@ -1058,9 +1073,9 @@ fn make_channel( } }; - source.handle_accept_channel(dest.get_our_node_id(), &accept_channel); + source.node.handle_accept_channel(dest.our_node_id(), &accept_channel); { - let mut events = source.get_and_clear_pending_events(); + let mut events = source.node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); if let events::Event::FundingGenerationReady { temporary_channel_id, @@ -1079,11 +1094,8 @@ fn make_channel( }], }; source - .funding_transaction_generated( - temporary_channel_id, - dest.get_our_node_id(), - tx.clone(), - ) + .node + .funding_transaction_generated(temporary_channel_id, dest.our_node_id(), tx.clone()) .unwrap(); chain_state.confirm_tx(tx); } else { @@ -1092,7 +1104,7 @@ fn make_channel( } let funding_created = { - let events = source.get_and_clear_pending_msg_events(); + let events = source.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); if let MessageSendEvent::SendFundingCreated { ref msg, .. } = events[0] { msg.clone() @@ -1100,12 +1112,12 @@ fn make_channel( panic!("Wrong event type"); } }; - dest.handle_funding_created(source.get_our_node_id(), &funding_created); + dest.node.handle_funding_created(source.our_node_id(), &funding_created); // Complete any pending monitor updates for dest after watch_channel. - complete_all_pending_monitor_updates(dest_monitor); + dest.complete_all_pending_monitor_updates(); let (funding_signed, channel_id) = { - let events = dest.get_and_clear_pending_msg_events(); + let events = dest.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); if let MessageSendEvent::SendFundingSigned { ref msg, .. } = events[0] { (msg.clone(), msg.channel_id) @@ -1113,19 +1125,19 @@ fn make_channel( panic!("Wrong event type"); } }; - let events = dest.get_and_clear_pending_events(); + let events = dest.node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); if let events::Event::ChannelPending { ref counterparty_node_id, .. } = events[0] { - assert_eq!(counterparty_node_id, &source.get_our_node_id()); + assert_eq!(counterparty_node_id, &source.our_node_id()); } else { panic!("Wrong event type"); } - source.handle_funding_signed(dest.get_our_node_id(), &funding_signed); + source.node.handle_funding_signed(dest.our_node_id(), &funding_signed); // Complete any pending monitor updates for source after watch_channel. - complete_all_pending_monitor_updates(source_monitor); + source.complete_all_pending_monitor_updates(); - let events = source.get_and_clear_pending_events(); + let events = source.node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); if let events::Event::ChannelPending { ref counterparty_node_id, @@ -1133,24 +1145,24 @@ fn make_channel( .. } = events[0] { - assert_eq!(counterparty_node_id, &dest.get_our_node_id()); + assert_eq!(counterparty_node_id, &dest.our_node_id()); assert_eq!(*event_channel_id, channel_id); } else { panic!("Wrong event type"); } } -fn lock_fundings(nodes: &[ChanMan<'_>; 3]) { +fn lock_fundings(nodes: &[HarnessNode<'_>; 3]) { let mut node_events = Vec::new(); for node in nodes.iter() { - node_events.push(node.get_and_clear_pending_msg_events()); + node_events.push(node.node.get_and_clear_pending_msg_events()); } for (idx, node_event) in node_events.iter().enumerate() { for event in node_event { if let MessageSendEvent::SendChannelReady { ref node_id, ref msg } = event { for node in nodes.iter() { - if node.get_our_node_id() == *node_id { - node.handle_channel_ready(nodes[idx].get_our_node_id(), msg); + if node.our_node_id() == *node_id { + node.node.handle_channel_ready(nodes[idx].our_node_id(), msg); } } } else { @@ -1160,7 +1172,7 @@ fn lock_fundings(nodes: &[ChanMan<'_>; 3]) { } for node in nodes.iter() { - let events = node.get_and_clear_pending_msg_events(); + let events = node.node.get_and_clear_pending_msg_events(); for event in events { if let MessageSendEvent::SendAnnouncementSignatures { .. } = event { } else { @@ -1379,7 +1391,23 @@ pub fn do_test(data: &[u8], out: Out) { let (node_b, mut monitor_b, keys_manager_b, logger_b) = make_node!(1, fee_est_b, broadcast_b); let (node_c, mut monitor_c, keys_manager_c, logger_c) = make_node!(2, fee_est_c, broadcast_c); - let mut nodes = [node_a, node_b, node_c]; + let mut nodes = [ + HarnessNode { + node: node_a, + monitor: Arc::clone(&monitor_a), + keys_manager: Arc::clone(&keys_manager_a), + }, + HarnessNode { + node: node_b, + monitor: Arc::clone(&monitor_b), + keys_manager: Arc::clone(&keys_manager_b), + }, + HarnessNode { + node: node_c, + monitor: Arc::clone(&monitor_c), + keys_manager: Arc::clone(&keys_manager_c), + }, + ]; #[allow(unused_variables)] let loggers = [logger_a, logger_b, logger_c]; #[allow(unused_variables)] @@ -1395,74 +1423,14 @@ pub fn do_test(data: &[u8], out: Out) { // txid and funding outpoint. // A-B: channel 2 A and B have 0-reserve (trusted open + trusted accept), // channel 3 A has 0-reserve (trusted accept) - make_channel( - &nodes[0], - &nodes[1], - &monitor_a, - &monitor_b, - &keys_manager_b, - 1, - false, - false, - &mut chain_state, - ); - make_channel( - &nodes[0], - &nodes[1], - &monitor_a, - &monitor_b, - &keys_manager_b, - 2, - true, - true, - &mut chain_state, - ); - make_channel( - &nodes[0], - &nodes[1], - &monitor_a, - &monitor_b, - &keys_manager_b, - 3, - false, - true, - &mut chain_state, - ); + make_channel(&nodes[0], &nodes[1], 1, false, false, &mut chain_state); + make_channel(&nodes[0], &nodes[1], 2, true, true, &mut chain_state); + make_channel(&nodes[0], &nodes[1], 3, false, true, &mut chain_state); // B-C: channel 4 B has 0-reserve (via trusted accept), // channel 5 C has 0-reserve (via trusted open) - make_channel( - &nodes[1], - &nodes[2], - &monitor_b, - &monitor_c, - &keys_manager_c, - 4, - false, - true, - &mut chain_state, - ); - make_channel( - &nodes[1], - &nodes[2], - &monitor_b, - &monitor_c, - &keys_manager_c, - 5, - true, - false, - &mut chain_state, - ); - make_channel( - &nodes[1], - &nodes[2], - &monitor_b, - &monitor_c, - &keys_manager_c, - 6, - false, - false, - &mut chain_state, - ); + make_channel(&nodes[1], &nodes[2], 4, false, true, &mut chain_state); + make_channel(&nodes[1], &nodes[2], 5, true, false, &mut chain_state); + make_channel(&nodes[1], &nodes[2], 6, false, false, &mut chain_state); // Wipe the transactions-broadcasted set to make sure we don't broadcast any transactions // during normal operation in `test_return`. @@ -2656,8 +2624,9 @@ pub fn do_test(data: &[u8], out: Out) { &fee_est_a, broadcast_a.clone(), ); - nodes[0] = new_node_a; - monitor_a = new_monitor_a; + nodes[0].node = new_node_a; + monitor_a = Arc::clone(&new_monitor_a); + nodes[0].monitor = new_monitor_a; }, 0xb3..=0xbb => { // Restart node B, picking among the in-flight `ChannelMonitor`s to use based on @@ -2685,8 +2654,9 @@ pub fn do_test(data: &[u8], out: Out) { &fee_est_b, broadcast_b.clone(), ); - nodes[1] = new_node_b; - monitor_b = new_monitor_b; + nodes[1].node = new_node_b; + monitor_b = Arc::clone(&new_monitor_b); + nodes[1].monitor = new_monitor_b; }, 0xbc | 0xbd | 0xbe => { // Restart node C, picking among the in-flight `ChannelMonitor`s to use based on @@ -2710,8 +2680,9 @@ pub fn do_test(data: &[u8], out: Out) { &fee_est_c, broadcast_c.clone(), ); - nodes[2] = new_node_c; - monitor_c = new_monitor_c; + nodes[2].node = new_node_c; + monitor_c = Arc::clone(&new_monitor_c); + nodes[2].monitor = new_monitor_c; }, 0xc0 => keys_manager_a.disable_supported_ops_for_all_signers(), From c0de5836bb837162fde1cb3e54026847e143b1f6 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 21 Apr 2026 18:00:00 +0200 Subject: [PATCH 03/29] Build chanmon node resources Move construction of loggers, keys, monitors, broadcasters, wallets, and fee estimators into node resource setup. This removes ad hoc local closures while preserving the deterministic test inputs used by the fuzzer. --- fuzz/src/chanmon_consistency.rs | 669 ++++++++++++++++---------------- 1 file changed, 345 insertions(+), 324 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 5fdddaad6c4..4abbb9665fd 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -947,6 +947,10 @@ struct HarnessNode<'a> { node: ChanMan<'a>, monitor: Arc, keys_manager: Arc, + logger: Arc, + broadcaster: Arc, + fee_estimator: Arc, + wallet: TestWalletSource, } impl<'a> std::ops::Deref for HarnessNode<'a> { @@ -958,6 +962,72 @@ impl<'a> std::ops::Deref for HarnessNode<'a> { } impl<'a> HarnessNode<'a> { + fn build_loggers( + node_id: u8, out: &Out, + ) -> (Arc, Arc) { + let raw_logger = Arc::new(test_logger::TestLogger::new(node_id.to_string(), out.clone())); + let logger_for_monitor: Arc = raw_logger.clone(); + let logger: Arc = raw_logger; + (logger_for_monitor, logger) + } + + fn build_chain_monitor( + broadcaster: &Arc, fee_estimator: &Arc, + keys_manager: &Arc, logger_for_monitor: Arc, + persistence_style: ChannelMonitorUpdateStatus, + ) -> Arc { + Arc::new(TestChainMonitor::new( + Arc::clone(broadcaster), + logger_for_monitor, + Arc::clone(fee_estimator), + Arc::new(TestPersister { update_ret: Mutex::new(persistence_style) }), + Arc::clone(keys_manager), + )) + } + + fn new( + node_id: u8, wallet: TestWalletSource, fee_estimator: Arc, + broadcaster: Arc, persistence_style: ChannelMonitorUpdateStatus, + out: &Out, router: &'a FuzzRouter, chan_type: ChanType, + ) -> Self { + let (logger_for_monitor, logger) = Self::build_loggers(node_id, out); + let node_secret = SecretKey::from_slice(&[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1, node_id, + ]) + .unwrap(); + let keys_manager = Arc::new(KeyProvider { + node_secret, + rand_bytes_id: atomic::AtomicU32::new(0), + enforcement_states: Mutex::new(new_hash_map()), + }); + let monitor = Self::build_chain_monitor( + &broadcaster, + &fee_estimator, + &keys_manager, + logger_for_monitor, + persistence_style, + ); + let network = Network::Bitcoin; + let best_block_timestamp = genesis_block(network).header.time; + let params = ChainParameters { network, best_block: BlockLocator::from_network(network) }; + let node = ChannelManager::new( + Arc::clone(&fee_estimator), + Arc::clone(&monitor), + Arc::clone(&broadcaster), + router, + router, + Arc::clone(&logger), + Arc::clone(&keys_manager), + Arc::clone(&keys_manager), + Arc::clone(&keys_manager), + build_node_config(chan_type), + params, + best_block_timestamp, + ); + Self { node, monitor, keys_manager, logger, broadcaster, fee_estimator, wallet } + } + fn our_node_id(&self) -> PublicKey { self.node.get_our_node_id() } @@ -997,6 +1067,17 @@ fn build_node_config(chan_type: ChanType) -> UserConfig { config } +fn assert_test_invariants(nodes: &[HarnessNode<'_>; 3]) { + assert_eq!(nodes[0].list_channels().len(), 3); + assert_eq!(nodes[1].list_channels().len(), 6); + assert_eq!(nodes[2].list_channels().len(), 3); + + // All broadcasters should be empty. Broadcast transactions are handled explicitly. + assert!(nodes[0].broadcaster.txn_broadcasted.borrow().is_empty()); + assert!(nodes[1].broadcaster.txn_broadcasted.borrow().is_empty()); + assert!(nodes[2].broadcaster.txn_broadcasted.borrow().is_empty()); +} + fn connect_peers(source: &ChanMan<'_>, dest: &ChanMan<'_>) { let init_dest = Init { features: dest.init_features(), networks: None, remote_network_address: None }; @@ -1184,9 +1265,6 @@ fn lock_fundings(nodes: &[HarnessNode<'_>; 3]) { #[inline] pub fn do_test(data: &[u8], out: Out) { - let broadcast_a = Arc::new(TestBroadcaster { txn_broadcasted: RefCell::new(Vec::new()) }); - let broadcast_b = Arc::new(TestBroadcaster { txn_broadcasted: RefCell::new(Vec::new()) }); - let broadcast_c = Arc::new(TestBroadcaster { txn_broadcasted: RefCell::new(Vec::new()) }); let router = FuzzRouter {}; // Read initial monitor styles and channel type from fuzz input byte 0: @@ -1220,163 +1298,26 @@ pub fn do_test(data: &[u8], out: Out) { let mut node_height_a: u32 = 0; let mut node_height_b: u32 = 0; let mut node_height_c: u32 = 0; - - macro_rules! make_node { - ($node_id: expr, $fee_estimator: expr, $broadcaster: expr) => {{ - let logger: Arc = - Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone())); - let node_secret = SecretKey::from_slice(&[ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1, $node_id, - ]) - .unwrap(); - let keys_manager = Arc::new(KeyProvider { - node_secret, - rand_bytes_id: atomic::AtomicU32::new(0), - enforcement_states: Mutex::new(new_hash_map()), - }); - let monitor = Arc::new(TestChainMonitor::new( - $broadcaster.clone(), - logger.clone(), - $fee_estimator.clone(), - Arc::new(TestPersister { - update_ret: Mutex::new(mon_style[$node_id as usize].borrow().clone()), - }), - Arc::clone(&keys_manager), - )); - - let network = Network::Bitcoin; - let best_block_timestamp = genesis_block(network).header.time; - let params = - ChainParameters { network, best_block: BlockLocator::from_network(network) }; - ( - ChannelManager::new( - $fee_estimator.clone(), - monitor.clone(), - $broadcaster.clone(), - &router, - &router, - Arc::clone(&logger), - keys_manager.clone(), - keys_manager.clone(), - keys_manager.clone(), - build_node_config(chan_type), - params, - best_block_timestamp, - ), - monitor, - keys_manager, - logger, - ) - }}; - } - - let reload_node = |ser: &Vec, - node_id: u8, - old_monitors: &TestChainMonitor, - mut use_old_mons, - keys, - fee_estimator, - broadcaster: Arc| { - let keys_manager = Arc::clone(keys); - let logger: Arc = - Arc::new(test_logger::TestLogger::new(node_id.to_string(), out.clone())); - let chain_monitor = Arc::new(TestChainMonitor::new( - broadcaster.clone(), - logger.clone(), - Arc::clone(fee_estimator), - Arc::new(TestPersister { - update_ret: Mutex::new(ChannelMonitorUpdateStatus::Completed), - }), - Arc::clone(keys), - )); - - let mut monitors = new_hash_map(); - let mut old_monitors = old_monitors.latest_monitors.lock().unwrap(); - for (channel_id, mut prev_state) in old_monitors.drain() { - let (mon_id, serialized_mon) = if use_old_mons % 3 == 0 { - // Reload with the oldest `ChannelMonitor` (the one that we already told - // `ChannelManager` we finished persisting). - (prev_state.persisted_monitor_id, prev_state.persisted_monitor) - } else if use_old_mons % 3 == 1 { - // Reload with the second-oldest `ChannelMonitor` - let old_mon = (prev_state.persisted_monitor_id, prev_state.persisted_monitor); - prev_state.pending_monitors.drain(..).next().unwrap_or(old_mon) - } else { - // Reload with the newest `ChannelMonitor` - let old_mon = (prev_state.persisted_monitor_id, prev_state.persisted_monitor); - prev_state.pending_monitors.pop().unwrap_or(old_mon) - }; - // Use a different value of `use_old_mons` if we have another monitor (only for node B) - // by shifting `use_old_mons` one in base-3. - use_old_mons /= 3; - let mon = <(BlockLocator, ChannelMonitor)>::read( - &mut &serialized_mon[..], - (&**keys, &**keys), - ) - .expect("Failed to read monitor"); - monitors.insert(channel_id, mon.1); - // Update the latest `ChannelMonitor` state to match what we just told LDK. - prev_state.persisted_monitor = serialized_mon; - prev_state.persisted_monitor_id = mon_id; - // Wipe any `ChannelMonitor`s which we never told LDK we finished persisting, - // considering them discarded. LDK should replay these for us as they're stored in - // the `ChannelManager`. - prev_state.pending_monitors.clear(); - chain_monitor.latest_monitors.lock().unwrap().insert(channel_id, prev_state); - } - let mut monitor_refs = new_hash_map(); - for (channel_id, monitor) in monitors.iter() { - monitor_refs.insert(*channel_id, monitor); - } - - let read_args = ChannelManagerReadArgs { - entropy_source: Arc::clone(&keys_manager), - node_signer: Arc::clone(&keys_manager), - signer_provider: keys_manager, - fee_estimator: Arc::clone(fee_estimator), - chain_monitor: chain_monitor.clone(), - tx_broadcaster: broadcaster, - router: &router, - message_router: &router, - logger, - config: build_node_config(chan_type), - channel_monitors: monitor_refs, - }; - - let manager = <(BlockLocator, ChanMan)>::read(&mut &ser[..], read_args) - .expect("Failed to read manager"); - let res = (manager.1, chain_monitor.clone()); - for (channel_id, mon) in monitors.drain() { - assert_eq!( - chain_monitor.chain_monitor.watch_channel(channel_id, mon), - Ok(ChannelMonitorUpdateStatus::Completed) - ); - } - *chain_monitor.persister.update_ret.lock().unwrap() = *mon_style[node_id as usize].borrow(); - res - }; - let wallet_a = TestWalletSource::new(SecretKey::from_slice(&[1; 32]).unwrap()); let wallet_b = TestWalletSource::new(SecretKey::from_slice(&[2; 32]).unwrap()); let wallet_c = TestWalletSource::new(SecretKey::from_slice(&[3; 32]).unwrap()); - let wallets = vec![wallet_a, wallet_b, wallet_c]; + let wallets = [&wallet_a, &wallet_b, &wallet_c]; let coinbase_tx = bitcoin::Transaction { version: bitcoin::transaction::Version::TWO, lock_time: bitcoin::absolute::LockTime::ZERO, input: vec![bitcoin::TxIn { ..Default::default() }], output: wallets .iter() - .map(|w| TxOut { + .map(|wallet| TxOut { value: Amount::from_sat(100_000), - script_pubkey: w.get_change_script().unwrap(), + script_pubkey: wallet.get_change_script().unwrap(), }) .collect(), }; - wallets.iter().enumerate().for_each(|(i, w)| { - w.add_utxo(coinbase_tx.clone(), i as u32); - }); + for (idx, wallet) in wallets.iter().enumerate() { + wallet.add_utxo(coinbase_tx.clone(), idx as u32); + } let fee_est_a = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }); let mut last_htlc_clear_fee_a = 253; @@ -1384,34 +1325,50 @@ pub fn do_test(data: &[u8], out: Out) { let mut last_htlc_clear_fee_b = 253; let fee_est_c = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }); let mut last_htlc_clear_fee_c = 253; + let broadcast_a = Arc::new(TestBroadcaster { txn_broadcasted: RefCell::new(Vec::new()) }); + let broadcast_b = Arc::new(TestBroadcaster { txn_broadcasted: RefCell::new(Vec::new()) }); + let broadcast_c = Arc::new(TestBroadcaster { txn_broadcasted: RefCell::new(Vec::new()) }); // 3 nodes is enough to hit all the possible cases, notably unknown-source-unknown-dest // forwarding. - let (node_a, mut monitor_a, keys_manager_a, logger_a) = make_node!(0, fee_est_a, broadcast_a); - let (node_b, mut monitor_b, keys_manager_b, logger_b) = make_node!(1, fee_est_b, broadcast_b); - let (node_c, mut monitor_c, keys_manager_c, logger_c) = make_node!(2, fee_est_c, broadcast_c); - let mut nodes = [ - HarnessNode { - node: node_a, - monitor: Arc::clone(&monitor_a), - keys_manager: Arc::clone(&keys_manager_a), - }, - HarnessNode { - node: node_b, - monitor: Arc::clone(&monitor_b), - keys_manager: Arc::clone(&keys_manager_b), - }, - HarnessNode { - node: node_c, - monitor: Arc::clone(&monitor_c), - keys_manager: Arc::clone(&keys_manager_c), - }, + HarnessNode::new( + 0, + wallet_a, + Arc::clone(&fee_est_a), + Arc::clone(&broadcast_a), + mon_style[0].borrow().clone(), + &out, + &router, + chan_type, + ), + HarnessNode::new( + 1, + wallet_b, + Arc::clone(&fee_est_b), + Arc::clone(&broadcast_b), + mon_style[1].borrow().clone(), + &out, + &router, + chan_type, + ), + HarnessNode::new( + 2, + wallet_c, + Arc::clone(&fee_est_c), + Arc::clone(&broadcast_c), + mon_style[2].borrow().clone(), + &out, + &router, + chan_type, + ), ]; - #[allow(unused_variables)] - let loggers = [logger_a, logger_b, logger_c]; - #[allow(unused_variables)] - let fee_estimators = [Arc::clone(&fee_est_a), Arc::clone(&fee_est_b), Arc::clone(&fee_est_c)]; + let mut monitor_a = Arc::clone(&nodes[0].monitor); + let mut monitor_b = Arc::clone(&nodes[1].monitor); + let mut monitor_c = Arc::clone(&nodes[2].monitor); + let keys_manager_a = Arc::clone(&nodes[0].keys_manager); + let keys_manager_b = Arc::clone(&nodes[1].keys_manager); + let keys_manager_c = Arc::clone(&nodes[2].keys_manager); // Connect peers first, then create channels connect_peers(&nodes[0], &nodes[1]); @@ -1434,12 +1391,12 @@ pub fn do_test(data: &[u8], out: Out) { // Wipe the transactions-broadcasted set to make sure we don't broadcast any transactions // during normal operation in `test_return`. - broadcast_a.txn_broadcasted.borrow_mut().clear(); - broadcast_b.txn_broadcasted.borrow_mut().clear(); - broadcast_c.txn_broadcasted.borrow_mut().clear(); + nodes[0].broadcaster.txn_broadcasted.borrow_mut().clear(); + nodes[1].broadcaster.txn_broadcasted.borrow_mut().clear(); + nodes[2].broadcaster.txn_broadcasted.borrow_mut().clear(); let sync_with_chain_state = |chain_state: &ChainState, - node: &ChannelManager<_, _, _, _, _, _, _, _, _>, + node: &HarnessNode<'_>, node_height: &mut u32, num_blocks: Option| { let target_height = if let Some(num_blocks) = num_blocks { @@ -1447,7 +1404,6 @@ pub fn do_test(data: &[u8], out: Out) { } else { chain_state.tip_height() }; - while *node_height < target_height { *node_height += 1; let (header, txn) = chain_state.block_at(*node_height); @@ -1460,9 +1416,9 @@ pub fn do_test(data: &[u8], out: Out) { }; // Sync all nodes to tip to lock the funding. - sync_with_chain_state(&mut chain_state, &nodes[0], &mut node_height_a, None); - sync_with_chain_state(&mut chain_state, &nodes[1], &mut node_height_b, None); - sync_with_chain_state(&mut chain_state, &nodes[2], &mut node_height_c, None); + sync_with_chain_state(&chain_state, &nodes[0], &mut node_height_a, None); + sync_with_chain_state(&chain_state, &nodes[1], &mut node_height_b, None); + sync_with_chain_state(&chain_state, &nodes[2], &mut node_height_c, None); lock_fundings(&nodes); @@ -1502,20 +1458,93 @@ pub fn do_test(data: &[u8], out: Out) { macro_rules! test_return { () => {{ - assert_eq!(nodes[0].list_channels().len(), 3); - assert_eq!(nodes[1].list_channels().len(), 6); - assert_eq!(nodes[2].list_channels().len(), 3); - - // All broadcasters should be empty (all broadcast transactions should be handled - // explicitly). - assert!(broadcast_a.txn_broadcasted.borrow().is_empty()); - assert!(broadcast_b.txn_broadcasted.borrow().is_empty()); - assert!(broadcast_c.txn_broadcasted.borrow().is_empty()); - + assert_test_invariants(&nodes); return; }}; } + let reload_node = |ser: &Vec, + node_id: u8, + old_monitors: &TestChainMonitor, + mut use_old_mons, + keys: &Arc, + fee_estimator: &Arc, + broadcaster: Arc| { + let keys_manager = Arc::clone(keys); + let (logger_for_monitor, logger) = HarnessNode::build_loggers(node_id, &out); + let chain_monitor = HarnessNode::build_chain_monitor( + &broadcaster, + fee_estimator, + &keys_manager, + logger_for_monitor, + ChannelMonitorUpdateStatus::Completed, + ); + + let mut monitors = new_hash_map(); + let mut old_monitors = old_monitors.latest_monitors.lock().unwrap(); + for (channel_id, mut prev_state) in old_monitors.drain() { + let (mon_id, serialized_mon) = if use_old_mons % 3 == 0 { + // Reload with the oldest `ChannelMonitor` (the one that we already told + // `ChannelManager` we finished persisting). + (prev_state.persisted_monitor_id, prev_state.persisted_monitor) + } else if use_old_mons % 3 == 1 { + // Reload with the second-oldest `ChannelMonitor` + let old_mon = (prev_state.persisted_monitor_id, prev_state.persisted_monitor); + prev_state.pending_monitors.drain(..).next().unwrap_or(old_mon) + } else { + // Reload with the newest `ChannelMonitor` + let old_mon = (prev_state.persisted_monitor_id, prev_state.persisted_monitor); + prev_state.pending_monitors.pop().unwrap_or(old_mon) + }; + // Use a different value of `use_old_mons` if we have another monitor (only for node B) + // by shifting `use_old_mons` one in base-3. + use_old_mons /= 3; + let mon = <(BlockLocator, ChannelMonitor)>::read( + &mut &serialized_mon[..], + (&*keys_manager, &*keys_manager), + ) + .expect("Failed to read monitor"); + monitors.insert(channel_id, mon.1); + // Update the latest `ChannelMonitor` state to match what we just told LDK. + prev_state.persisted_monitor = serialized_mon; + prev_state.persisted_monitor_id = mon_id; + // Wipe any `ChannelMonitor`s which we never told LDK we finished persisting, + // considering them discarded. LDK should replay these for us as they're stored in + // the `ChannelManager`. + prev_state.pending_monitors.clear(); + chain_monitor.latest_monitors.lock().unwrap().insert(channel_id, prev_state); + } + let mut monitor_refs = new_hash_map(); + for (channel_id, monitor) in monitors.iter() { + monitor_refs.insert(*channel_id, monitor); + } + + let read_args = ChannelManagerReadArgs { + entropy_source: Arc::clone(&keys_manager), + node_signer: Arc::clone(&keys_manager), + signer_provider: Arc::clone(&keys_manager), + fee_estimator: Arc::clone(fee_estimator), + chain_monitor: chain_monitor.clone(), + tx_broadcaster: broadcaster, + router: &router, + message_router: &router, + logger: Arc::clone(&logger), + config: build_node_config(chan_type), + channel_monitors: monitor_refs, + }; + + let manager = <(BlockLocator, ChanMan)>::read(&mut &ser[..], read_args) + .expect("Failed to read manager"); + for (channel_id, mon) in monitors.drain() { + assert_eq!( + chain_monitor.chain_monitor.watch_channel(channel_id, mon), + Ok(ChannelMonitorUpdateStatus::Completed) + ); + } + *chain_monitor.persister.update_ret.lock().unwrap() = *mon_style[node_id as usize].borrow(); + (manager.1, chain_monitor, logger) + }; + let mut read_pos = 1; // First byte was consumed for initial config (mon_style + chan_type) macro_rules! get_slice { ($len: expr) => {{ @@ -1528,82 +1557,6 @@ pub fn do_test(data: &[u8], out: Out) { }}; } - let splice_channel = - |node: &ChanMan, - counterparty_node_id: &PublicKey, - channel_id: &ChannelId, - f: &dyn Fn(FundingTemplate) -> Result| { - match node.splice_channel(channel_id, counterparty_node_id) { - Ok(funding_template) => { - if let Ok(contribution) = f(funding_template) { - let _ = node.funding_contributed( - channel_id, - counterparty_node_id, - contribution, - None, - ); - } - }, - Err(e) => { - assert!( - matches!(e, APIError::APIMisuseError { ref err } if err.contains("splice")), - "{:?}", - e - ); - }, - } - }; - - let splice_in = - |node: &ChanMan, - counterparty_node_id: &PublicKey, - channel_id: &ChannelId, - wallet: &WalletSync<&TestWalletSource, Arc>, - funding_feerate_sat_per_kw: FeeRate| { - splice_channel( - node, - counterparty_node_id, - channel_id, - &move |funding_template: FundingTemplate| { - let feerate = - funding_template.min_rbf_feerate().unwrap_or(funding_feerate_sat_per_kw); - funding_template.splice_in_sync( - Amount::from_sat(10_000), - feerate, - FeeRate::MAX, - wallet, - ) - }, - ); - }; - - let splice_out = |node: &ChanMan, - counterparty_node_id: &PublicKey, - channel_id: &ChannelId, - wallet: &TestWalletSource, - funding_feerate_sat_per_kw: FeeRate| { - // We conditionally splice out `MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS` only when the node - // has double the balance required to send a payment upon a `0xff` byte. We do this to - // ensure there's always liquidity available for a payment to succeed then. - let outbound_capacity_msat = node - .list_channels() - .iter() - .find(|chan| chan.channel_id == *channel_id) - .map(|chan| chan.outbound_capacity_msat) - .unwrap(); - if outbound_capacity_msat < 20_000_000 { - return; - } - splice_channel(node, counterparty_node_id, channel_id, &move |funding_template| { - let feerate = funding_template.min_rbf_feerate().unwrap_or(funding_feerate_sat_per_kw); - let outputs = vec![TxOut { - value: Amount::from_sat(MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS), - script_pubkey: wallet.get_change_script().unwrap(), - }]; - funding_template.splice_out(outputs, feerate, FeeRate::MAX) - }); - }; - loop { // Push any events from Node B onto ba_events and bc_events macro_rules! push_excess_b_events { @@ -2083,7 +2036,8 @@ pub fn do_test(data: &[u8], out: Out) { unsigned_transaction, .. } => { - let signed_tx = wallets[$node].sign_tx(unsigned_transaction).unwrap(); + let signed_tx = + nodes[$node].wallet.sign_tx(unsigned_transaction).unwrap(); nodes[$node] .funding_transaction_signed( &channel_id, @@ -2093,12 +2047,7 @@ pub fn do_test(data: &[u8], out: Out) { .unwrap(); }, events::Event::SplicePending { new_funding_txo, .. } => { - let broadcaster = match $node { - 0 => &broadcast_a, - 1 => &broadcast_b, - _ => &broadcast_c, - }; - let mut txs = broadcaster.txn_broadcasted.borrow_mut(); + let mut txs = nodes[$node].broadcaster.txn_broadcasted.borrow_mut(); assert!(txs.len() >= 1); let splice_tx = txs.remove(0); assert_eq!(new_funding_txo.txid, splice_tx.compute_txid()); @@ -2148,7 +2097,6 @@ pub fn do_test(data: &[u8], out: Out) { } } }; - let complete_all_monitor_updates = |monitor: &Arc, chan_id| { if let Some(state) = monitor.latest_monitors.lock().unwrap().get_mut(chan_id) { assert!( @@ -2165,6 +2113,85 @@ pub fn do_test(data: &[u8], out: Out) { } }; + let splice_channel = + |node: &HarnessNode<'_>, + counterparty_node_id: &PublicKey, + channel_id: &ChannelId, + f: &dyn Fn( + FundingTemplate, + ) -> Result| { + match node.splice_channel(channel_id, counterparty_node_id) { + Ok(funding_template) => { + if let Ok(contribution) = f(funding_template) { + let _ = node.funding_contributed( + channel_id, + counterparty_node_id, + contribution, + None, + ); + } + }, + Err(e) => { + assert!( + matches!(e, APIError::APIMisuseError { ref err } if err.contains("splice")), + "{:?}", + e + ); + }, + } + }; + + let splice_in = |node: &HarnessNode<'_>, + counterparty_node_id: &PublicKey, + channel_id: &ChannelId| { + let wallet = WalletSync::new(&node.wallet, Arc::clone(&node.logger)); + let funding_feerate_sat_per_kw = node.fee_estimator.feerate_sat_per_kw(); + splice_channel( + node, + counterparty_node_id, + channel_id, + &move |funding_template: FundingTemplate| { + let feerate = + funding_template.min_rbf_feerate().unwrap_or(funding_feerate_sat_per_kw); + funding_template.splice_in_sync( + Amount::from_sat(10_000), + feerate, + FeeRate::MAX, + &wallet, + ) + }, + ); + }; + + let splice_out = |node: &HarnessNode<'_>, + counterparty_node_id: &PublicKey, + channel_id: &ChannelId| { + let outbound_capacity_msat = node + .list_channels() + .iter() + .find(|chan| chan.channel_id == *channel_id) + .map(|chan| chan.outbound_capacity_msat) + .unwrap(); + if outbound_capacity_msat < 20_000_000 { + return; + } + let funding_feerate_sat_per_kw = node.fee_estimator.feerate_sat_per_kw(); + splice_channel( + node, + counterparty_node_id, + channel_id, + &move |funding_template: FundingTemplate| { + let feerate = + funding_template.min_rbf_feerate().unwrap_or(funding_feerate_sat_per_kw); + let outputs = vec![TxOut { + value: Amount::from_sat(MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS), + script_pubkey: node.wallet.get_change_script().unwrap(), + }]; + funding_template.splice_out(outputs, feerate, FeeRate::MAX) + }, + ); + }; + let send = |source_idx: usize, dest_idx: usize, dest_chan_id, amt, payment_ctr: &mut u64| { let source = &nodes[source_idx]; @@ -2461,43 +2488,47 @@ pub fn do_test(data: &[u8], out: Out) { if matches!(chan_type, ChanType::Legacy) { max_feerate *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32; } - if fee_est_a.ret_val.fetch_add(250, atomic::Ordering::AcqRel) + 250 > max_feerate { - fee_est_a.ret_val.store(max_feerate, atomic::Ordering::Release); + if nodes[0].fee_estimator.ret_val.fetch_add(250, atomic::Ordering::AcqRel) + 250 + > max_feerate + { + nodes[0].fee_estimator.ret_val.store(max_feerate, atomic::Ordering::Release); } nodes[0].timer_tick_occurred(); }, 0x81 => { - fee_est_a.ret_val.store(253, atomic::Ordering::Release); + nodes[0].fee_estimator.ret_val.store(253, atomic::Ordering::Release); nodes[0].timer_tick_occurred(); }, - 0x84 => { let mut max_feerate = last_htlc_clear_fee_b; if matches!(chan_type, ChanType::Legacy) { max_feerate *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32; } - if fee_est_b.ret_val.fetch_add(250, atomic::Ordering::AcqRel) + 250 > max_feerate { - fee_est_b.ret_val.store(max_feerate, atomic::Ordering::Release); + if nodes[1].fee_estimator.ret_val.fetch_add(250, atomic::Ordering::AcqRel) + 250 + > max_feerate + { + nodes[1].fee_estimator.ret_val.store(max_feerate, atomic::Ordering::Release); } nodes[1].timer_tick_occurred(); }, 0x85 => { - fee_est_b.ret_val.store(253, atomic::Ordering::Release); + nodes[1].fee_estimator.ret_val.store(253, atomic::Ordering::Release); nodes[1].timer_tick_occurred(); }, - 0x88 => { let mut max_feerate = last_htlc_clear_fee_c; if matches!(chan_type, ChanType::Legacy) { max_feerate *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32; } - if fee_est_c.ret_val.fetch_add(250, atomic::Ordering::AcqRel) + 250 > max_feerate { - fee_est_c.ret_val.store(max_feerate, atomic::Ordering::Release); + if nodes[2].fee_estimator.ret_val.fetch_add(250, atomic::Ordering::AcqRel) + 250 + > max_feerate + { + nodes[2].fee_estimator.ret_val.store(max_feerate, atomic::Ordering::Release); } nodes[2].timer_tick_occurred(); }, 0x89 => { - fee_est_c.ret_val.store(253, atomic::Ordering::Release); + nodes[2].fee_estimator.ret_val.store(253, atomic::Ordering::Release); nodes[2].timer_tick_occurred(); }, @@ -2506,36 +2537,28 @@ pub fn do_test(data: &[u8], out: Out) { test_return!(); } let cp_node_id = nodes[1].get_our_node_id(); - let wallet = WalletSync::new(&wallets[0], Arc::clone(&loggers[0])); - let feerate_sat_per_kw = fee_estimators[0].feerate_sat_per_kw(); - splice_in(&nodes[0], &cp_node_id, &chan_a_id, &wallet, feerate_sat_per_kw); + splice_in(&nodes[0], &cp_node_id, &chan_a_id); }, 0xa1 => { if !cfg!(splicing) { test_return!(); } let cp_node_id = nodes[0].get_our_node_id(); - let wallet = WalletSync::new(&wallets[1], Arc::clone(&loggers[1])); - let feerate_sat_per_kw = fee_estimators[1].feerate_sat_per_kw(); - splice_in(&nodes[1], &cp_node_id, &chan_a_id, &wallet, feerate_sat_per_kw); + splice_in(&nodes[1], &cp_node_id, &chan_a_id); }, 0xa2 => { if !cfg!(splicing) { test_return!(); } let cp_node_id = nodes[2].get_our_node_id(); - let wallet = WalletSync::new(&wallets[1], Arc::clone(&loggers[1])); - let feerate_sat_per_kw = fee_estimators[1].feerate_sat_per_kw(); - splice_in(&nodes[1], &cp_node_id, &chan_b_id, &wallet, feerate_sat_per_kw); + splice_in(&nodes[1], &cp_node_id, &chan_b_id); }, 0xa3 => { if !cfg!(splicing) { test_return!(); } let cp_node_id = nodes[1].get_our_node_id(); - let wallet = WalletSync::new(&wallets[2], Arc::clone(&loggers[2])); - let feerate_sat_per_kw = fee_estimators[2].feerate_sat_per_kw(); - splice_in(&nodes[2], &cp_node_id, &chan_b_id, &wallet, feerate_sat_per_kw); + splice_in(&nodes[2], &cp_node_id, &chan_b_id); }, 0xa4 => { @@ -2543,63 +2566,55 @@ pub fn do_test(data: &[u8], out: Out) { test_return!(); } let cp_node_id = nodes[1].get_our_node_id(); - let wallet = &wallets[0]; - let feerate_sat_per_kw = fee_estimators[0].feerate_sat_per_kw(); - splice_out(&nodes[0], &cp_node_id, &chan_a_id, wallet, feerate_sat_per_kw); + splice_out(&nodes[0], &cp_node_id, &chan_a_id); }, 0xa5 => { if !cfg!(splicing) { test_return!(); } let cp_node_id = nodes[0].get_our_node_id(); - let wallet = &wallets[1]; - let feerate_sat_per_kw = fee_estimators[1].feerate_sat_per_kw(); - splice_out(&nodes[1], &cp_node_id, &chan_a_id, wallet, feerate_sat_per_kw); + splice_out(&nodes[1], &cp_node_id, &chan_a_id); }, 0xa6 => { if !cfg!(splicing) { test_return!(); } let cp_node_id = nodes[2].get_our_node_id(); - let wallet = &wallets[1]; - let feerate_sat_per_kw = fee_estimators[1].feerate_sat_per_kw(); - splice_out(&nodes[1], &cp_node_id, &chan_b_id, wallet, feerate_sat_per_kw); + splice_out(&nodes[1], &cp_node_id, &chan_b_id); }, 0xa7 => { if !cfg!(splicing) { test_return!(); } let cp_node_id = nodes[1].get_our_node_id(); - let wallet = &wallets[2]; - let feerate_sat_per_kw = fee_estimators[2].feerate_sat_per_kw(); - splice_out(&nodes[2], &cp_node_id, &chan_b_id, wallet, feerate_sat_per_kw); + splice_out(&nodes[2], &cp_node_id, &chan_b_id); }, // Sync node by 1 block to cover confirmation of a transaction. 0xa8 => { chain_state.confirm_pending_txs(); - sync_with_chain_state(&mut chain_state, &nodes[0], &mut node_height_a, Some(1)); + sync_with_chain_state(&chain_state, &nodes[0], &mut node_height_a, Some(1)); }, 0xa9 => { chain_state.confirm_pending_txs(); - sync_with_chain_state(&mut chain_state, &nodes[1], &mut node_height_b, Some(1)); + sync_with_chain_state(&chain_state, &nodes[1], &mut node_height_b, Some(1)); }, 0xaa => { chain_state.confirm_pending_txs(); - sync_with_chain_state(&mut chain_state, &nodes[2], &mut node_height_c, Some(1)); + sync_with_chain_state(&chain_state, &nodes[2], &mut node_height_c, Some(1)); }, // Sync node to chain tip to cover confirmation of a transaction post-reorg-risk. 0xab => { chain_state.confirm_pending_txs(); - sync_with_chain_state(&mut chain_state, &nodes[0], &mut node_height_a, None); + sync_with_chain_state(&chain_state, &nodes[0], &mut node_height_a, None); }, 0xac => { chain_state.confirm_pending_txs(); - sync_with_chain_state(&mut chain_state, &nodes[1], &mut node_height_b, None); + sync_with_chain_state(&chain_state, &nodes[1], &mut node_height_b, None); }, 0xad => { chain_state.confirm_pending_txs(); - sync_with_chain_state(&mut chain_state, &nodes[2], &mut node_height_c, None); + sync_with_chain_state(&chain_state, &nodes[2], &mut node_height_c, None); }, 0xb0 | 0xb1 | 0xb2 => { @@ -2615,18 +2630,19 @@ pub fn do_test(data: &[u8], out: Out) { ab_events.clear(); ba_events.clear(); } - let (new_node_a, new_monitor_a) = reload_node( + let (new_node_a, new_monitor_a, new_logger_a) = reload_node( &node_a_ser, 0, &monitor_a, v, &keys_manager_a, &fee_est_a, - broadcast_a.clone(), + Arc::clone(&broadcast_a), ); nodes[0].node = new_node_a; monitor_a = Arc::clone(&new_monitor_a); nodes[0].monitor = new_monitor_a; + nodes[0].logger = new_logger_a; }, 0xb3..=0xbb => { // Restart node B, picking among the in-flight `ChannelMonitor`s to use based on @@ -2645,18 +2661,19 @@ pub fn do_test(data: &[u8], out: Out) { bc_events.clear(); cb_events.clear(); } - let (new_node_b, new_monitor_b) = reload_node( + let (new_node_b, new_monitor_b, new_logger_b) = reload_node( &node_b_ser, 1, &monitor_b, v, &keys_manager_b, &fee_est_b, - broadcast_b.clone(), + Arc::clone(&broadcast_b), ); nodes[1].node = new_node_b; monitor_b = Arc::clone(&new_monitor_b); nodes[1].monitor = new_monitor_b; + nodes[1].logger = new_logger_b; }, 0xbc | 0xbd | 0xbe => { // Restart node C, picking among the in-flight `ChannelMonitor`s to use based on @@ -2671,18 +2688,19 @@ pub fn do_test(data: &[u8], out: Out) { bc_events.clear(); cb_events.clear(); } - let (new_node_c, new_monitor_c) = reload_node( + let (new_node_c, new_monitor_c, new_logger_c) = reload_node( &node_c_ser, 2, &monitor_c, v, &keys_manager_c, &fee_est_c, - broadcast_c.clone(), + Arc::clone(&broadcast_c), ); nodes[2].node = new_node_c; monitor_c = Arc::clone(&new_monitor_c); nodes[2].monitor = new_monitor_c; + nodes[2].logger = new_logger_c; }, 0xc0 => keys_manager_a.disable_supported_ops_for_all_signers(), @@ -2957,20 +2975,23 @@ pub fn do_test(data: &[u8], out: Out) { ); } - last_htlc_clear_fee_a = fee_est_a.ret_val.load(atomic::Ordering::Acquire); - last_htlc_clear_fee_b = fee_est_b.ret_val.load(atomic::Ordering::Acquire); - last_htlc_clear_fee_c = fee_est_c.ret_val.load(atomic::Ordering::Acquire); + last_htlc_clear_fee_a = + nodes[0].fee_estimator.ret_val.load(atomic::Ordering::Acquire); + last_htlc_clear_fee_b = + nodes[1].fee_estimator.ret_val.load(atomic::Ordering::Acquire); + last_htlc_clear_fee_c = + nodes[2].fee_estimator.ret_val.load(atomic::Ordering::Acquire); }, _ => test_return!(), } - if nodes[0].get_and_clear_needs_persistence() == true { + if nodes[0].get_and_clear_needs_persistence() { node_a_ser = nodes[0].encode(); } - if nodes[1].get_and_clear_needs_persistence() == true { + if nodes[1].get_and_clear_needs_persistence() { node_b_ser = nodes[1].encode(); } - if nodes[2].get_and_clear_needs_persistence() == true { + if nodes[2].get_and_clear_needs_persistence() { node_c_ser = nodes[2].encode(); } } From db0ea51f5e746e306a97a234602a6ff1b91d53a5 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 21 Apr 2026 18:00:24 +0200 Subject: [PATCH 04/29] Extract chanmon harness nodes Centralize creation of the three chanmon harness nodes. The fuzzer now initializes the node array through one path, which reduces duplicated setup before the event and payment helpers are split out. --- fuzz/src/chanmon_consistency.rs | 155 +++++++++++++------------------- 1 file changed, 63 insertions(+), 92 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 4abbb9665fd..905d989bfdb 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -1363,12 +1363,6 @@ pub fn do_test(data: &[u8], out: Out) { chan_type, ), ]; - let mut monitor_a = Arc::clone(&nodes[0].monitor); - let mut monitor_b = Arc::clone(&nodes[1].monitor); - let mut monitor_c = Arc::clone(&nodes[2].monitor); - let keys_manager_a = Arc::clone(&nodes[0].keys_manager); - let keys_manager_b = Arc::clone(&nodes[1].keys_manager); - let keys_manager_c = Arc::clone(&nodes[2].keys_manager); // Connect peers first, then create channels connect_peers(&nodes[0], &nodes[1]); @@ -1463,25 +1457,18 @@ pub fn do_test(data: &[u8], out: Out) { }}; } - let reload_node = |ser: &Vec, - node_id: u8, - old_monitors: &TestChainMonitor, - mut use_old_mons, - keys: &Arc, - fee_estimator: &Arc, - broadcaster: Arc| { - let keys_manager = Arc::clone(keys); + let reload_node = |ser: &Vec, node_id: u8, old_node: &HarnessNode<'_>, mut use_old_mons| { let (logger_for_monitor, logger) = HarnessNode::build_loggers(node_id, &out); let chain_monitor = HarnessNode::build_chain_monitor( - &broadcaster, - fee_estimator, - &keys_manager, + &old_node.broadcaster, + &old_node.fee_estimator, + &old_node.keys_manager, logger_for_monitor, ChannelMonitorUpdateStatus::Completed, ); let mut monitors = new_hash_map(); - let mut old_monitors = old_monitors.latest_monitors.lock().unwrap(); + let mut old_monitors = old_node.monitor.latest_monitors.lock().unwrap(); for (channel_id, mut prev_state) in old_monitors.drain() { let (mon_id, serialized_mon) = if use_old_mons % 3 == 0 { // Reload with the oldest `ChannelMonitor` (the one that we already told @@ -1501,7 +1488,7 @@ pub fn do_test(data: &[u8], out: Out) { use_old_mons /= 3; let mon = <(BlockLocator, ChannelMonitor)>::read( &mut &serialized_mon[..], - (&*keys_manager, &*keys_manager), + (&*old_node.keys_manager, &*old_node.keys_manager), ) .expect("Failed to read monitor"); monitors.insert(channel_id, mon.1); @@ -1520,12 +1507,12 @@ pub fn do_test(data: &[u8], out: Out) { } let read_args = ChannelManagerReadArgs { - entropy_source: Arc::clone(&keys_manager), - node_signer: Arc::clone(&keys_manager), - signer_provider: Arc::clone(&keys_manager), - fee_estimator: Arc::clone(fee_estimator), + entropy_source: Arc::clone(&old_node.keys_manager), + node_signer: Arc::clone(&old_node.keys_manager), + signer_provider: Arc::clone(&old_node.keys_manager), + fee_estimator: Arc::clone(&old_node.fee_estimator), chain_monitor: chain_monitor.clone(), - tx_broadcaster: broadcaster, + tx_broadcaster: Arc::clone(&old_node.broadcaster), router: &router, message_router: &router, logger: Arc::clone(&logger), @@ -2312,22 +2299,22 @@ pub fn do_test(data: &[u8], out: Out) { 0x08 => { for id in &chan_ab_ids { - complete_all_monitor_updates(&monitor_a, id); + complete_all_monitor_updates(&nodes[0].monitor, id); } }, 0x09 => { for id in &chan_ab_ids { - complete_all_monitor_updates(&monitor_b, id); + complete_all_monitor_updates(&nodes[1].monitor, id); } }, 0x0a => { for id in &chan_bc_ids { - complete_all_monitor_updates(&monitor_b, id); + complete_all_monitor_updates(&nodes[1].monitor, id); } }, 0x0b => { for id in &chan_bc_ids { - complete_all_monitor_updates(&monitor_c, id); + complete_all_monitor_updates(&nodes[2].monitor, id); } }, @@ -2630,17 +2617,9 @@ pub fn do_test(data: &[u8], out: Out) { ab_events.clear(); ba_events.clear(); } - let (new_node_a, new_monitor_a, new_logger_a) = reload_node( - &node_a_ser, - 0, - &monitor_a, - v, - &keys_manager_a, - &fee_est_a, - Arc::clone(&broadcast_a), - ); + let (new_node_a, new_monitor_a, new_logger_a) = + reload_node(&node_a_ser, 0, &nodes[0], v); nodes[0].node = new_node_a; - monitor_a = Arc::clone(&new_monitor_a); nodes[0].monitor = new_monitor_a; nodes[0].logger = new_logger_a; }, @@ -2661,17 +2640,9 @@ pub fn do_test(data: &[u8], out: Out) { bc_events.clear(); cb_events.clear(); } - let (new_node_b, new_monitor_b, new_logger_b) = reload_node( - &node_b_ser, - 1, - &monitor_b, - v, - &keys_manager_b, - &fee_est_b, - Arc::clone(&broadcast_b), - ); + let (new_node_b, new_monitor_b, new_logger_b) = + reload_node(&node_b_ser, 1, &nodes[1], v); nodes[1].node = new_node_b; - monitor_b = Arc::clone(&new_monitor_b); nodes[1].monitor = new_monitor_b; nodes[1].logger = new_logger_b; }, @@ -2688,140 +2659,140 @@ pub fn do_test(data: &[u8], out: Out) { bc_events.clear(); cb_events.clear(); } - let (new_node_c, new_monitor_c, new_logger_c) = reload_node( - &node_c_ser, - 2, - &monitor_c, - v, - &keys_manager_c, - &fee_est_c, - Arc::clone(&broadcast_c), - ); + let (new_node_c, new_monitor_c, new_logger_c) = + reload_node(&node_c_ser, 2, &nodes[2], v); nodes[2].node = new_node_c; - monitor_c = Arc::clone(&new_monitor_c); nodes[2].monitor = new_monitor_c; nodes[2].logger = new_logger_c; }, - 0xc0 => keys_manager_a.disable_supported_ops_for_all_signers(), - 0xc1 => keys_manager_b.disable_supported_ops_for_all_signers(), - 0xc2 => keys_manager_c.disable_supported_ops_for_all_signers(), + 0xc0 => nodes[0].keys_manager.disable_supported_ops_for_all_signers(), + 0xc1 => nodes[1].keys_manager.disable_supported_ops_for_all_signers(), + 0xc2 => nodes[2].keys_manager.disable_supported_ops_for_all_signers(), 0xc3 => { - keys_manager_a.enable_op_for_all_signers(SignerOp::SignCounterpartyCommitment); + nodes[0] + .keys_manager + .enable_op_for_all_signers(SignerOp::SignCounterpartyCommitment); nodes[0].signer_unblocked(None); }, 0xc4 => { - keys_manager_b.enable_op_for_all_signers(SignerOp::SignCounterpartyCommitment); + nodes[1] + .keys_manager + .enable_op_for_all_signers(SignerOp::SignCounterpartyCommitment); let filter = Some((nodes[0].get_our_node_id(), chan_a_id)); nodes[1].signer_unblocked(filter); }, 0xc5 => { - keys_manager_b.enable_op_for_all_signers(SignerOp::SignCounterpartyCommitment); + nodes[1] + .keys_manager + .enable_op_for_all_signers(SignerOp::SignCounterpartyCommitment); let filter = Some((nodes[2].get_our_node_id(), chan_b_id)); nodes[1].signer_unblocked(filter); }, 0xc6 => { - keys_manager_c.enable_op_for_all_signers(SignerOp::SignCounterpartyCommitment); + nodes[2] + .keys_manager + .enable_op_for_all_signers(SignerOp::SignCounterpartyCommitment); nodes[2].signer_unblocked(None); }, 0xc7 => { - keys_manager_a.enable_op_for_all_signers(SignerOp::GetPerCommitmentPoint); + nodes[0].keys_manager.enable_op_for_all_signers(SignerOp::GetPerCommitmentPoint); nodes[0].signer_unblocked(None); }, 0xc8 => { - keys_manager_b.enable_op_for_all_signers(SignerOp::GetPerCommitmentPoint); + nodes[1].keys_manager.enable_op_for_all_signers(SignerOp::GetPerCommitmentPoint); let filter = Some((nodes[0].get_our_node_id(), chan_a_id)); nodes[1].signer_unblocked(filter); }, 0xc9 => { - keys_manager_b.enable_op_for_all_signers(SignerOp::GetPerCommitmentPoint); + nodes[1].keys_manager.enable_op_for_all_signers(SignerOp::GetPerCommitmentPoint); let filter = Some((nodes[2].get_our_node_id(), chan_b_id)); nodes[1].signer_unblocked(filter); }, 0xca => { - keys_manager_c.enable_op_for_all_signers(SignerOp::GetPerCommitmentPoint); + nodes[2].keys_manager.enable_op_for_all_signers(SignerOp::GetPerCommitmentPoint); nodes[2].signer_unblocked(None); }, 0xcb => { - keys_manager_a.enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); + nodes[0].keys_manager.enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); nodes[0].signer_unblocked(None); }, 0xcc => { - keys_manager_b.enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); + nodes[1].keys_manager.enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); let filter = Some((nodes[0].get_our_node_id(), chan_a_id)); nodes[1].signer_unblocked(filter); }, 0xcd => { - keys_manager_b.enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); + nodes[1].keys_manager.enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); let filter = Some((nodes[2].get_our_node_id(), chan_b_id)); nodes[1].signer_unblocked(filter); }, 0xce => { - keys_manager_c.enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); + nodes[2].keys_manager.enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); nodes[2].signer_unblocked(None); }, 0xf0 => { for id in &chan_ab_ids { - complete_monitor_update(&monitor_a, id, &complete_first); + complete_monitor_update(&nodes[0].monitor, id, &complete_first); } }, 0xf1 => { for id in &chan_ab_ids { - complete_monitor_update(&monitor_a, id, &complete_second); + complete_monitor_update(&nodes[0].monitor, id, &complete_second); } }, 0xf2 => { for id in &chan_ab_ids { - complete_monitor_update(&monitor_a, id, &Vec::pop); + complete_monitor_update(&nodes[0].monitor, id, &Vec::pop); } }, 0xf4 => { for id in &chan_ab_ids { - complete_monitor_update(&monitor_b, id, &complete_first); + complete_monitor_update(&nodes[1].monitor, id, &complete_first); } }, 0xf5 => { for id in &chan_ab_ids { - complete_monitor_update(&monitor_b, id, &complete_second); + complete_monitor_update(&nodes[1].monitor, id, &complete_second); } }, 0xf6 => { for id in &chan_ab_ids { - complete_monitor_update(&monitor_b, id, &Vec::pop); + complete_monitor_update(&nodes[1].monitor, id, &Vec::pop); } }, 0xf8 => { for id in &chan_bc_ids { - complete_monitor_update(&monitor_b, id, &complete_first); + complete_monitor_update(&nodes[1].monitor, id, &complete_first); } }, 0xf9 => { for id in &chan_bc_ids { - complete_monitor_update(&monitor_b, id, &complete_second); + complete_monitor_update(&nodes[1].monitor, id, &complete_second); } }, 0xfa => { for id in &chan_bc_ids { - complete_monitor_update(&monitor_b, id, &Vec::pop); + complete_monitor_update(&nodes[1].monitor, id, &Vec::pop); } }, 0xfc => { for id in &chan_bc_ids { - complete_monitor_update(&monitor_c, id, &complete_first); + complete_monitor_update(&nodes[2].monitor, id, &complete_first); } }, 0xfd => { for id in &chan_bc_ids { - complete_monitor_update(&monitor_c, id, &complete_second); + complete_monitor_update(&nodes[2].monitor, id, &complete_second); } }, 0xfe => { for id in &chan_bc_ids { - complete_monitor_update(&monitor_c, id, &Vec::pop); + complete_monitor_update(&nodes[2].monitor, id, &Vec::pop); } }, @@ -2862,9 +2833,9 @@ pub fn do_test(data: &[u8], out: Out) { } for op in SUPPORTED_SIGNER_OPS { - keys_manager_a.enable_op_for_all_signers(op); - keys_manager_b.enable_op_for_all_signers(op); - keys_manager_c.enable_op_for_all_signers(op); + nodes[0].keys_manager.enable_op_for_all_signers(op); + nodes[1].keys_manager.enable_op_for_all_signers(op); + nodes[2].keys_manager.enable_op_for_all_signers(op); } nodes[0].signer_unblocked(None); nodes[1].signer_unblocked(None); @@ -2879,12 +2850,12 @@ pub fn do_test(data: &[u8], out: Out) { } // Next, make sure no monitor updates are pending for id in &chan_ab_ids { - complete_all_monitor_updates(&monitor_a, id); - complete_all_monitor_updates(&monitor_b, id); + complete_all_monitor_updates(&nodes[0].monitor, id); + complete_all_monitor_updates(&nodes[1].monitor, id); } for id in &chan_bc_ids { - complete_all_monitor_updates(&monitor_b, id); - complete_all_monitor_updates(&monitor_c, id); + complete_all_monitor_updates(&nodes[1].monitor, id); + complete_all_monitor_updates(&nodes[2].monitor, id); } // Then, make sure any current forwards make their way to their destination if process_msg_events!(0, false, ProcessMessages::AllMessages) { From bd4ee1da8c9c0db9b4a09d6f751519b9de58325a Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 21 Apr 2026 17:24:01 +0200 Subject: [PATCH 05/29] Extract chanmon harness node lifecycle Move persistence, reload, and chain sync state onto each harness node. Keeping serialized managers and heights with the node makes restarts and block updates easier to reason about. --- fuzz/src/chanmon_consistency.rs | 337 ++++++++++++++++---------------- 1 file changed, 167 insertions(+), 170 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 905d989bfdb..de6a824eace 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -944,6 +944,7 @@ enum ChanType { } struct HarnessNode<'a> { + node_id: u8, node: ChanMan<'a>, monitor: Arc, keys_manager: Arc, @@ -951,6 +952,10 @@ struct HarnessNode<'a> { broadcaster: Arc, fee_estimator: Arc, wallet: TestWalletSource, + persistence_style: ChannelMonitorUpdateStatus, + serialized_manager: Vec, + height: u32, + last_htlc_clear_fee: u32, } impl<'a> std::ops::Deref for HarnessNode<'a> { @@ -1025,13 +1030,30 @@ impl<'a> HarnessNode<'a> { params, best_block_timestamp, ); - Self { node, monitor, keys_manager, logger, broadcaster, fee_estimator, wallet } + Self { + node_id, + node, + monitor, + keys_manager, + logger, + broadcaster, + fee_estimator, + wallet, + persistence_style, + serialized_manager: Vec::new(), + height: 0, + last_htlc_clear_fee: 253, + } } fn our_node_id(&self) -> PublicKey { self.node.get_our_node_id() } + fn set_persistence_style(&mut self, style: ChannelMonitorUpdateStatus) { + self.persistence_style = style; + } + fn complete_all_pending_monitor_updates(&self) { for (channel_id, state) in self.monitor.latest_monitors.lock().unwrap().iter_mut() { for (id, data) in state.pending_monitors.drain(..) { @@ -1043,6 +1065,94 @@ impl<'a> HarnessNode<'a> { } } } + + fn refresh_serialized_manager(&mut self) { + if self.node.get_and_clear_needs_persistence() { + self.serialized_manager = self.node.encode(); + } + } + + fn reload( + &mut self, use_old_mons: u8, out: &Out, router: &'a FuzzRouter, chan_type: ChanType, + ) { + let (logger_for_monitor, logger) = Self::build_loggers(self.node_id, out); + let chain_monitor = Self::build_chain_monitor( + &self.broadcaster, + &self.fee_estimator, + &self.keys_manager, + logger_for_monitor, + ChannelMonitorUpdateStatus::Completed, + ); + + let mut monitors = new_hash_map(); + let mut use_old_mons = use_old_mons; + { + let mut old_monitors = self.monitor.latest_monitors.lock().unwrap(); + for (channel_id, mut prev_state) in old_monitors.drain() { + let (mon_id, serialized_mon) = if use_old_mons % 3 == 0 { + // Reload with the oldest `ChannelMonitor` (the one that we already told + // `ChannelManager` we finished persisting). + (prev_state.persisted_monitor_id, prev_state.persisted_monitor) + } else if use_old_mons % 3 == 1 { + // Reload with the second-oldest `ChannelMonitor`. + let old_mon = (prev_state.persisted_monitor_id, prev_state.persisted_monitor); + prev_state.pending_monitors.drain(..).next().unwrap_or(old_mon) + } else { + // Reload with the newest `ChannelMonitor`. + let old_mon = (prev_state.persisted_monitor_id, prev_state.persisted_monitor); + prev_state.pending_monitors.pop().unwrap_or(old_mon) + }; + // Use a different value of `use_old_mons` if we have another monitor + // (only for node B) by shifting `use_old_mons` one in base-3. + use_old_mons /= 3; + let mon = <(BlockLocator, ChannelMonitor)>::read( + &mut &serialized_mon[..], + (&*self.keys_manager, &*self.keys_manager), + ) + .expect("Failed to read monitor"); + monitors.insert(channel_id, mon.1); + // Update the latest `ChannelMonitor` state to match what we just told LDK. + prev_state.persisted_monitor = serialized_mon; + prev_state.persisted_monitor_id = mon_id; + // Wipe any `ChannelMonitor`s which we never told LDK we finished persisting, + // considering them discarded. LDK should replay these for us as they're stored in + // the `ChannelManager`. + prev_state.pending_monitors.clear(); + chain_monitor.latest_monitors.lock().unwrap().insert(channel_id, prev_state); + } + } + let mut monitor_refs = new_hash_map(); + for (channel_id, monitor) in monitors.iter() { + monitor_refs.insert(*channel_id, monitor); + } + + let read_args = ChannelManagerReadArgs { + entropy_source: Arc::clone(&self.keys_manager), + node_signer: Arc::clone(&self.keys_manager), + signer_provider: Arc::clone(&self.keys_manager), + fee_estimator: Arc::clone(&self.fee_estimator), + chain_monitor: Arc::clone(&chain_monitor), + tx_broadcaster: Arc::clone(&self.broadcaster), + router, + message_router: router, + logger: Arc::clone(&logger), + config: build_node_config(chan_type), + channel_monitors: monitor_refs, + }; + + let manager = <(BlockLocator, ChanMan)>::read(&mut &self.serialized_manager[..], read_args) + .expect("Failed to read manager"); + for (channel_id, mon) in monitors.drain() { + assert_eq!( + chain_monitor.chain_monitor.watch_channel(channel_id, mon), + Ok(ChannelMonitorUpdateStatus::Completed) + ); + } + *chain_monitor.persister.update_ret.lock().unwrap() = self.persistence_style; + self.node = manager.1; + self.monitor = chain_monitor; + self.logger = logger; + } } fn build_node_config(chan_type: ChanType) -> UserConfig { @@ -1276,28 +1386,25 @@ pub fn do_test(data: &[u8], out: Out) { 1 => ChanType::KeyedAnchors, _ => ChanType::ZeroFeeCommitments, }; - let mon_style = [ - RefCell::new(if config_byte & 0b01 != 0 { + let persistence_styles = [ + if config_byte & 0b01 != 0 { ChannelMonitorUpdateStatus::InProgress } else { ChannelMonitorUpdateStatus::Completed - }), - RefCell::new(if config_byte & 0b10 != 0 { + }, + if config_byte & 0b10 != 0 { ChannelMonitorUpdateStatus::InProgress } else { ChannelMonitorUpdateStatus::Completed - }), - RefCell::new(if config_byte & 0b100 != 0 { + }, + if config_byte & 0b100 != 0 { ChannelMonitorUpdateStatus::InProgress } else { ChannelMonitorUpdateStatus::Completed - }), + }, ]; let mut chain_state = ChainState::new(); - let mut node_height_a: u32 = 0; - let mut node_height_b: u32 = 0; - let mut node_height_c: u32 = 0; let wallet_a = TestWalletSource::new(SecretKey::from_slice(&[1; 32]).unwrap()); let wallet_b = TestWalletSource::new(SecretKey::from_slice(&[2; 32]).unwrap()); let wallet_c = TestWalletSource::new(SecretKey::from_slice(&[3; 32]).unwrap()); @@ -1320,11 +1427,8 @@ pub fn do_test(data: &[u8], out: Out) { } let fee_est_a = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }); - let mut last_htlc_clear_fee_a = 253; let fee_est_b = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }); - let mut last_htlc_clear_fee_b = 253; let fee_est_c = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }); - let mut last_htlc_clear_fee_c = 253; let broadcast_a = Arc::new(TestBroadcaster { txn_broadcasted: RefCell::new(Vec::new()) }); let broadcast_b = Arc::new(TestBroadcaster { txn_broadcasted: RefCell::new(Vec::new()) }); let broadcast_c = Arc::new(TestBroadcaster { txn_broadcasted: RefCell::new(Vec::new()) }); @@ -1337,7 +1441,7 @@ pub fn do_test(data: &[u8], out: Out) { wallet_a, Arc::clone(&fee_est_a), Arc::clone(&broadcast_a), - mon_style[0].borrow().clone(), + persistence_styles[0], &out, &router, chan_type, @@ -1347,7 +1451,7 @@ pub fn do_test(data: &[u8], out: Out) { wallet_b, Arc::clone(&fee_est_b), Arc::clone(&broadcast_b), - mon_style[1].borrow().clone(), + persistence_styles[1], &out, &router, chan_type, @@ -1357,7 +1461,7 @@ pub fn do_test(data: &[u8], out: Out) { wallet_c, Arc::clone(&fee_est_c), Arc::clone(&broadcast_c), - mon_style[2].borrow().clone(), + persistence_styles[2], &out, &router, chan_type, @@ -1389,30 +1493,28 @@ pub fn do_test(data: &[u8], out: Out) { nodes[1].broadcaster.txn_broadcasted.borrow_mut().clear(); nodes[2].broadcaster.txn_broadcasted.borrow_mut().clear(); - let sync_with_chain_state = |chain_state: &ChainState, - node: &HarnessNode<'_>, - node_height: &mut u32, - num_blocks: Option| { - let target_height = if let Some(num_blocks) = num_blocks { - std::cmp::min(*node_height + num_blocks, chain_state.tip_height()) - } else { - chain_state.tip_height() - }; - while *node_height < target_height { - *node_height += 1; - let (header, txn) = chain_state.block_at(*node_height); - let txdata: Vec<_> = txn.iter().enumerate().map(|(i, tx)| (i + 1, tx)).collect(); - if !txdata.is_empty() { - node.transactions_confirmed(header, &txdata, *node_height); + let sync_with_chain_state = + |node: &mut HarnessNode<'_>, chain_state: &ChainState, num_blocks: Option| { + let target_height = if let Some(num_blocks) = num_blocks { + std::cmp::min(node.height + num_blocks, chain_state.tip_height()) + } else { + chain_state.tip_height() + }; + while node.height < target_height { + node.height += 1; + let (header, txn) = chain_state.block_at(node.height); + let txdata: Vec<_> = txn.iter().enumerate().map(|(i, tx)| (i + 1, tx)).collect(); + if !txdata.is_empty() { + node.transactions_confirmed(header, &txdata, node.height); + } + node.best_block_updated(header, node.height); } - node.best_block_updated(header, *node_height); - } - }; + }; // Sync all nodes to tip to lock the funding. - sync_with_chain_state(&chain_state, &nodes[0], &mut node_height_a, None); - sync_with_chain_state(&chain_state, &nodes[1], &mut node_height_b, None); - sync_with_chain_state(&chain_state, &nodes[2], &mut node_height_c, None); + sync_with_chain_state(&mut nodes[0], &chain_state, None); + sync_with_chain_state(&mut nodes[1], &chain_state, None); + sync_with_chain_state(&mut nodes[2], &chain_state, None); lock_fundings(&nodes); @@ -1439,9 +1541,9 @@ pub fn do_test(data: &[u8], out: Out) { let mut bc_events = Vec::new(); let mut cb_events = Vec::new(); - let mut node_a_ser = nodes[0].encode(); - let mut node_b_ser = nodes[1].encode(); - let mut node_c_ser = nodes[2].encode(); + for node in &mut nodes { + node.serialized_manager = node.encode(); + } let pending_payments = RefCell::new([Vec::new(), Vec::new(), Vec::new()]); let resolved_payments: RefCell<[HashMap>; 3]> = @@ -1457,82 +1559,7 @@ pub fn do_test(data: &[u8], out: Out) { }}; } - let reload_node = |ser: &Vec, node_id: u8, old_node: &HarnessNode<'_>, mut use_old_mons| { - let (logger_for_monitor, logger) = HarnessNode::build_loggers(node_id, &out); - let chain_monitor = HarnessNode::build_chain_monitor( - &old_node.broadcaster, - &old_node.fee_estimator, - &old_node.keys_manager, - logger_for_monitor, - ChannelMonitorUpdateStatus::Completed, - ); - - let mut monitors = new_hash_map(); - let mut old_monitors = old_node.monitor.latest_monitors.lock().unwrap(); - for (channel_id, mut prev_state) in old_monitors.drain() { - let (mon_id, serialized_mon) = if use_old_mons % 3 == 0 { - // Reload with the oldest `ChannelMonitor` (the one that we already told - // `ChannelManager` we finished persisting). - (prev_state.persisted_monitor_id, prev_state.persisted_monitor) - } else if use_old_mons % 3 == 1 { - // Reload with the second-oldest `ChannelMonitor` - let old_mon = (prev_state.persisted_monitor_id, prev_state.persisted_monitor); - prev_state.pending_monitors.drain(..).next().unwrap_or(old_mon) - } else { - // Reload with the newest `ChannelMonitor` - let old_mon = (prev_state.persisted_monitor_id, prev_state.persisted_monitor); - prev_state.pending_monitors.pop().unwrap_or(old_mon) - }; - // Use a different value of `use_old_mons` if we have another monitor (only for node B) - // by shifting `use_old_mons` one in base-3. - use_old_mons /= 3; - let mon = <(BlockLocator, ChannelMonitor)>::read( - &mut &serialized_mon[..], - (&*old_node.keys_manager, &*old_node.keys_manager), - ) - .expect("Failed to read monitor"); - monitors.insert(channel_id, mon.1); - // Update the latest `ChannelMonitor` state to match what we just told LDK. - prev_state.persisted_monitor = serialized_mon; - prev_state.persisted_monitor_id = mon_id; - // Wipe any `ChannelMonitor`s which we never told LDK we finished persisting, - // considering them discarded. LDK should replay these for us as they're stored in - // the `ChannelManager`. - prev_state.pending_monitors.clear(); - chain_monitor.latest_monitors.lock().unwrap().insert(channel_id, prev_state); - } - let mut monitor_refs = new_hash_map(); - for (channel_id, monitor) in monitors.iter() { - monitor_refs.insert(*channel_id, monitor); - } - - let read_args = ChannelManagerReadArgs { - entropy_source: Arc::clone(&old_node.keys_manager), - node_signer: Arc::clone(&old_node.keys_manager), - signer_provider: Arc::clone(&old_node.keys_manager), - fee_estimator: Arc::clone(&old_node.fee_estimator), - chain_monitor: chain_monitor.clone(), - tx_broadcaster: Arc::clone(&old_node.broadcaster), - router: &router, - message_router: &router, - logger: Arc::clone(&logger), - config: build_node_config(chan_type), - channel_monitors: monitor_refs, - }; - - let manager = <(BlockLocator, ChanMan)>::read(&mut &ser[..], read_args) - .expect("Failed to read manager"); - for (channel_id, mon) in monitors.drain() { - assert_eq!( - chain_monitor.chain_monitor.watch_channel(channel_id, mon), - Ok(ChannelMonitorUpdateStatus::Completed) - ); - } - *chain_monitor.persister.update_ret.lock().unwrap() = *mon_style[node_id as usize].borrow(); - (manager.1, chain_monitor, logger) - }; - - let mut read_pos = 1; // First byte was consumed for initial config (mon_style + chan_type) + let mut read_pos = 1; // First byte was consumed for initial config (persistence styles + chan_type) macro_rules! get_slice { ($len: expr) => {{ let slice_len = $len as usize; @@ -2278,24 +2305,12 @@ pub fn do_test(data: &[u8], out: Out) { // In general, we keep related message groups close together in binary form, allowing // bit-twiddling mutations to have similar effects. This is probably overkill, but no // harm in doing so. - 0x00 => { - *mon_style[0].borrow_mut() = ChannelMonitorUpdateStatus::InProgress; - }, - 0x01 => { - *mon_style[1].borrow_mut() = ChannelMonitorUpdateStatus::InProgress; - }, - 0x02 => { - *mon_style[2].borrow_mut() = ChannelMonitorUpdateStatus::InProgress; - }, - 0x04 => { - *mon_style[0].borrow_mut() = ChannelMonitorUpdateStatus::Completed; - }, - 0x05 => { - *mon_style[1].borrow_mut() = ChannelMonitorUpdateStatus::Completed; - }, - 0x06 => { - *mon_style[2].borrow_mut() = ChannelMonitorUpdateStatus::Completed; - }, + 0x00 => nodes[0].set_persistence_style(ChannelMonitorUpdateStatus::InProgress), + 0x01 => nodes[1].set_persistence_style(ChannelMonitorUpdateStatus::InProgress), + 0x02 => nodes[2].set_persistence_style(ChannelMonitorUpdateStatus::InProgress), + 0x04 => nodes[0].set_persistence_style(ChannelMonitorUpdateStatus::Completed), + 0x05 => nodes[1].set_persistence_style(ChannelMonitorUpdateStatus::Completed), + 0x06 => nodes[2].set_persistence_style(ChannelMonitorUpdateStatus::Completed), 0x08 => { for id in &chan_ab_ids { @@ -2471,7 +2486,7 @@ pub fn do_test(data: &[u8], out: Out) { }, 0x80 => { - let mut max_feerate = last_htlc_clear_fee_a; + let mut max_feerate = nodes[0].last_htlc_clear_fee; if matches!(chan_type, ChanType::Legacy) { max_feerate *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32; } @@ -2487,7 +2502,7 @@ pub fn do_test(data: &[u8], out: Out) { nodes[0].timer_tick_occurred(); }, 0x84 => { - let mut max_feerate = last_htlc_clear_fee_b; + let mut max_feerate = nodes[1].last_htlc_clear_fee; if matches!(chan_type, ChanType::Legacy) { max_feerate *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32; } @@ -2503,7 +2518,7 @@ pub fn do_test(data: &[u8], out: Out) { nodes[1].timer_tick_occurred(); }, 0x88 => { - let mut max_feerate = last_htlc_clear_fee_c; + let mut max_feerate = nodes[2].last_htlc_clear_fee; if matches!(chan_type, ChanType::Legacy) { max_feerate *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32; } @@ -2580,28 +2595,28 @@ pub fn do_test(data: &[u8], out: Out) { // Sync node by 1 block to cover confirmation of a transaction. 0xa8 => { chain_state.confirm_pending_txs(); - sync_with_chain_state(&chain_state, &nodes[0], &mut node_height_a, Some(1)); + sync_with_chain_state(&mut nodes[0], &chain_state, Some(1)); }, 0xa9 => { chain_state.confirm_pending_txs(); - sync_with_chain_state(&chain_state, &nodes[1], &mut node_height_b, Some(1)); + sync_with_chain_state(&mut nodes[1], &chain_state, Some(1)); }, 0xaa => { chain_state.confirm_pending_txs(); - sync_with_chain_state(&chain_state, &nodes[2], &mut node_height_c, Some(1)); + sync_with_chain_state(&mut nodes[2], &chain_state, Some(1)); }, // Sync node to chain tip to cover confirmation of a transaction post-reorg-risk. 0xab => { chain_state.confirm_pending_txs(); - sync_with_chain_state(&chain_state, &nodes[0], &mut node_height_a, None); + sync_with_chain_state(&mut nodes[0], &chain_state, None); }, 0xac => { chain_state.confirm_pending_txs(); - sync_with_chain_state(&chain_state, &nodes[1], &mut node_height_b, None); + sync_with_chain_state(&mut nodes[1], &chain_state, None); }, 0xad => { chain_state.confirm_pending_txs(); - sync_with_chain_state(&chain_state, &nodes[2], &mut node_height_c, None); + sync_with_chain_state(&mut nodes[2], &chain_state, None); }, 0xb0 | 0xb1 | 0xb2 => { @@ -2617,11 +2632,7 @@ pub fn do_test(data: &[u8], out: Out) { ab_events.clear(); ba_events.clear(); } - let (new_node_a, new_monitor_a, new_logger_a) = - reload_node(&node_a_ser, 0, &nodes[0], v); - nodes[0].node = new_node_a; - nodes[0].monitor = new_monitor_a; - nodes[0].logger = new_logger_a; + nodes[0].reload(v, &out, &router, chan_type); }, 0xb3..=0xbb => { // Restart node B, picking among the in-flight `ChannelMonitor`s to use based on @@ -2640,11 +2651,7 @@ pub fn do_test(data: &[u8], out: Out) { bc_events.clear(); cb_events.clear(); } - let (new_node_b, new_monitor_b, new_logger_b) = - reload_node(&node_b_ser, 1, &nodes[1], v); - nodes[1].node = new_node_b; - nodes[1].monitor = new_monitor_b; - nodes[1].logger = new_logger_b; + nodes[1].reload(v, &out, &router, chan_type); }, 0xbc | 0xbd | 0xbe => { // Restart node C, picking among the in-flight `ChannelMonitor`s to use based on @@ -2659,11 +2666,7 @@ pub fn do_test(data: &[u8], out: Out) { bc_events.clear(); cb_events.clear(); } - let (new_node_c, new_monitor_c, new_logger_c) = - reload_node(&node_c_ser, 2, &nodes[2], v); - nodes[2].node = new_node_c; - nodes[2].monitor = new_monitor_c; - nodes[2].logger = new_logger_c; + nodes[2].reload(v, &out, &router, chan_type); }, 0xc0 => nodes[0].keys_manager.disable_supported_ops_for_all_signers(), @@ -2946,24 +2949,18 @@ pub fn do_test(data: &[u8], out: Out) { ); } - last_htlc_clear_fee_a = + nodes[0].last_htlc_clear_fee = nodes[0].fee_estimator.ret_val.load(atomic::Ordering::Acquire); - last_htlc_clear_fee_b = + nodes[1].last_htlc_clear_fee = nodes[1].fee_estimator.ret_val.load(atomic::Ordering::Acquire); - last_htlc_clear_fee_c = + nodes[2].last_htlc_clear_fee = nodes[2].fee_estimator.ret_val.load(atomic::Ordering::Acquire); }, _ => test_return!(), } - if nodes[0].get_and_clear_needs_persistence() { - node_a_ser = nodes[0].encode(); - } - if nodes[1].get_and_clear_needs_persistence() { - node_b_ser = nodes[1].encode(); - } - if nodes[2].get_and_clear_needs_persistence() { - node_c_ser = nodes[2].encode(); + for node in &mut nodes { + node.refresh_serialized_manager(); } } } From a49834ec8e75a6a346d9ab395192d4c3c2aeaeb4 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 21 Apr 2026 16:32:31 +0200 Subject: [PATCH 06/29] Extract chanmon harness node action helpers Lift monitor update, splice, and chain sync actions into named helper functions. This keeps the byte-dispatch loop focused on choosing actions rather than spelling out each operation. --- fuzz/src/chanmon_consistency.rs | 299 ++++++++++++++++---------------- 1 file changed, 153 insertions(+), 146 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index de6a824eace..86b89a8f95f 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -56,7 +56,6 @@ use lightning::ln::channelmanager::{ TrustedChannelFeatures, }; use lightning::ln::functional_test_utils::*; -use lightning::ln::funding::{FundingContribution, FundingContributionError, FundingTemplate}; use lightning::ln::inbound_payment::ExpandedKey; use lightning::ln::msgs::{ self, BaseMessageHandler, ChannelMessageHandler, CommitmentUpdate, Init, MessageSendEvent, @@ -1155,6 +1154,147 @@ impl<'a> HarnessNode<'a> { } } +#[derive(Copy, Clone)] +enum MonitorUpdateSelector { + First, + Second, + Last, +} + +fn complete_monitor_update( + monitor: &Arc, chan_id: &ChannelId, selector: MonitorUpdateSelector, +) { + if let Some(state) = monitor.latest_monitors.lock().unwrap().get_mut(chan_id) { + assert!( + state.pending_monitors.windows(2).all(|pair| pair[0].0 < pair[1].0), + "updates should be sorted by id" + ); + let update = match selector { + MonitorUpdateSelector::First => { + if state.pending_monitors.is_empty() { + None + } else { + Some(state.pending_monitors.remove(0)) + } + }, + MonitorUpdateSelector::Second => { + if state.pending_monitors.len() > 1 { + Some(state.pending_monitors.remove(1)) + } else { + None + } + }, + MonitorUpdateSelector::Last => state.pending_monitors.pop(), + }; + if let Some((id, data)) = update { + monitor.chain_monitor.channel_monitor_updated(*chan_id, id).unwrap(); + if id > state.persisted_monitor_id { + state.persisted_monitor_id = id; + state.persisted_monitor = data; + } + } + } +} + +fn complete_all_monitor_updates(monitor: &Arc, chan_id: &ChannelId) { + if let Some(state) = monitor.latest_monitors.lock().unwrap().get_mut(chan_id) { + assert!( + state.pending_monitors.windows(2).all(|pair| pair[0].0 < pair[1].0), + "updates should be sorted by id" + ); + for (id, data) in state.pending_monitors.drain(..) { + monitor.chain_monitor.channel_monitor_updated(*chan_id, id).unwrap(); + if id > state.persisted_monitor_id { + state.persisted_monitor_id = id; + state.persisted_monitor = data; + } + } + } +} + +fn sync_with_chain_state( + node: &mut HarnessNode<'_>, chain_state: &ChainState, num_blocks: Option, +) { + let target_height = if let Some(num_blocks) = num_blocks { + std::cmp::min(node.height + num_blocks, chain_state.tip_height()) + } else { + chain_state.tip_height() + }; + while node.height < target_height { + node.height += 1; + let (header, txn) = chain_state.block_at(node.height); + let txdata: Vec<_> = txn.iter().enumerate().map(|(i, tx)| (i + 1, tx)).collect(); + if !txdata.is_empty() { + node.transactions_confirmed(header, &txdata, node.height); + } + node.best_block_updated(header, node.height); + } +} + +fn splice_in(node: &HarnessNode<'_>, counterparty_node_id: &PublicKey, channel_id: &ChannelId) { + let wallet = WalletSync::new(&node.wallet, Arc::clone(&node.logger)); + match node.splice_channel(channel_id, counterparty_node_id) { + Ok(funding_template) => { + let feerate = funding_template + .min_rbf_feerate() + .unwrap_or(node.fee_estimator.feerate_sat_per_kw()); + if let Ok(contribution) = funding_template.splice_in_sync( + Amount::from_sat(10_000), + feerate, + FeeRate::MAX, + &wallet, + ) { + let _ = + node.funding_contributed(channel_id, counterparty_node_id, contribution, None); + } + }, + Err(e) => { + assert!( + matches!(e, APIError::APIMisuseError { ref err } if err.contains("splice")), + "{:?}", + e + ); + }, + } +} + +fn splice_out(node: &HarnessNode<'_>, counterparty_node_id: &PublicKey, channel_id: &ChannelId) { + // We conditionally splice out `MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS` only when the node + // has double the balance required to send a payment upon a `0xff` byte. We do this to + // ensure there's always liquidity available for a payment to succeed then. + let outbound_capacity_msat = node + .list_channels() + .iter() + .find(|chan| chan.channel_id == *channel_id) + .map(|chan| chan.outbound_capacity_msat) + .unwrap(); + if outbound_capacity_msat < 20_000_000 { + return; + } + match node.splice_channel(channel_id, counterparty_node_id) { + Ok(funding_template) => { + let feerate = funding_template + .min_rbf_feerate() + .unwrap_or(node.fee_estimator.feerate_sat_per_kw()); + let outputs = vec![TxOut { + value: Amount::from_sat(MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS), + script_pubkey: node.wallet.get_change_script().unwrap(), + }]; + if let Ok(contribution) = funding_template.splice_out(outputs, feerate, FeeRate::MAX) { + let _ = + node.funding_contributed(channel_id, counterparty_node_id, contribution, None); + } + }, + Err(e) => { + assert!( + matches!(e, APIError::APIMisuseError { ref err } if err.contains("splice")), + "{:?}", + e + ); + }, + } +} + fn build_node_config(chan_type: ChanType) -> UserConfig { let mut config = UserConfig::default(); config.channel_config.forwarding_fee_proportional_millionths = 0; @@ -1493,24 +1633,6 @@ pub fn do_test(data: &[u8], out: Out) { nodes[1].broadcaster.txn_broadcasted.borrow_mut().clear(); nodes[2].broadcaster.txn_broadcasted.borrow_mut().clear(); - let sync_with_chain_state = - |node: &mut HarnessNode<'_>, chain_state: &ChainState, num_blocks: Option| { - let target_height = if let Some(num_blocks) = num_blocks { - std::cmp::min(node.height + num_blocks, chain_state.tip_height()) - } else { - chain_state.tip_height() - }; - while node.height < target_height { - node.height += 1; - let (header, txn) = chain_state.block_at(node.height); - let txdata: Vec<_> = txn.iter().enumerate().map(|(i, tx)| (i + 1, tx)).collect(); - if !txdata.is_empty() { - node.transactions_confirmed(header, &txdata, node.height); - } - node.best_block_updated(header, node.height); - } - }; - // Sync all nodes to tip to lock the funding. sync_with_chain_state(&mut nodes[0], &chain_state, None); sync_with_chain_state(&mut nodes[1], &chain_state, None); @@ -2091,121 +2213,6 @@ pub fn do_test(data: &[u8], out: Out) { }}; } - let complete_first = |v: &mut Vec<_>| if !v.is_empty() { Some(v.remove(0)) } else { None }; - let complete_second = |v: &mut Vec<_>| if v.len() > 1 { Some(v.remove(1)) } else { None }; - let complete_monitor_update = - |monitor: &Arc, - chan_funding, - compl_selector: &dyn Fn(&mut Vec<(u64, Vec)>) -> Option<(u64, Vec)>| { - if let Some(state) = monitor.latest_monitors.lock().unwrap().get_mut(chan_funding) { - assert!( - state.pending_monitors.windows(2).all(|pair| pair[0].0 < pair[1].0), - "updates should be sorted by id" - ); - if let Some((id, data)) = compl_selector(&mut state.pending_monitors) { - monitor.chain_monitor.channel_monitor_updated(*chan_funding, id).unwrap(); - if id > state.persisted_monitor_id { - state.persisted_monitor_id = id; - state.persisted_monitor = data; - } - } - } - }; - let complete_all_monitor_updates = |monitor: &Arc, chan_id| { - if let Some(state) = monitor.latest_monitors.lock().unwrap().get_mut(chan_id) { - assert!( - state.pending_monitors.windows(2).all(|pair| pair[0].0 < pair[1].0), - "updates should be sorted by id" - ); - for (id, data) in state.pending_monitors.drain(..) { - monitor.chain_monitor.channel_monitor_updated(*chan_id, id).unwrap(); - if id > state.persisted_monitor_id { - state.persisted_monitor_id = id; - state.persisted_monitor = data; - } - } - } - }; - - let splice_channel = - |node: &HarnessNode<'_>, - counterparty_node_id: &PublicKey, - channel_id: &ChannelId, - f: &dyn Fn( - FundingTemplate, - ) -> Result| { - match node.splice_channel(channel_id, counterparty_node_id) { - Ok(funding_template) => { - if let Ok(contribution) = f(funding_template) { - let _ = node.funding_contributed( - channel_id, - counterparty_node_id, - contribution, - None, - ); - } - }, - Err(e) => { - assert!( - matches!(e, APIError::APIMisuseError { ref err } if err.contains("splice")), - "{:?}", - e - ); - }, - } - }; - - let splice_in = |node: &HarnessNode<'_>, - counterparty_node_id: &PublicKey, - channel_id: &ChannelId| { - let wallet = WalletSync::new(&node.wallet, Arc::clone(&node.logger)); - let funding_feerate_sat_per_kw = node.fee_estimator.feerate_sat_per_kw(); - splice_channel( - node, - counterparty_node_id, - channel_id, - &move |funding_template: FundingTemplate| { - let feerate = - funding_template.min_rbf_feerate().unwrap_or(funding_feerate_sat_per_kw); - funding_template.splice_in_sync( - Amount::from_sat(10_000), - feerate, - FeeRate::MAX, - &wallet, - ) - }, - ); - }; - - let splice_out = |node: &HarnessNode<'_>, - counterparty_node_id: &PublicKey, - channel_id: &ChannelId| { - let outbound_capacity_msat = node - .list_channels() - .iter() - .find(|chan| chan.channel_id == *channel_id) - .map(|chan| chan.outbound_capacity_msat) - .unwrap(); - if outbound_capacity_msat < 20_000_000 { - return; - } - let funding_feerate_sat_per_kw = node.fee_estimator.feerate_sat_per_kw(); - splice_channel( - node, - counterparty_node_id, - channel_id, - &move |funding_template: FundingTemplate| { - let feerate = - funding_template.min_rbf_feerate().unwrap_or(funding_feerate_sat_per_kw); - let outputs = vec![TxOut { - value: Amount::from_sat(MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS), - script_pubkey: node.wallet.get_change_script().unwrap(), - }]; - funding_template.splice_out(outputs, feerate, FeeRate::MAX) - }, - ); - }; - let send = |source_idx: usize, dest_idx: usize, dest_chan_id, amt, payment_ctr: &mut u64| { let source = &nodes[source_idx]; @@ -2737,65 +2744,65 @@ pub fn do_test(data: &[u8], out: Out) { 0xf0 => { for id in &chan_ab_ids { - complete_monitor_update(&nodes[0].monitor, id, &complete_first); + complete_monitor_update(&nodes[0].monitor, id, MonitorUpdateSelector::First); } }, 0xf1 => { for id in &chan_ab_ids { - complete_monitor_update(&nodes[0].monitor, id, &complete_second); + complete_monitor_update(&nodes[0].monitor, id, MonitorUpdateSelector::Second); } }, 0xf2 => { for id in &chan_ab_ids { - complete_monitor_update(&nodes[0].monitor, id, &Vec::pop); + complete_monitor_update(&nodes[0].monitor, id, MonitorUpdateSelector::Last); } }, 0xf4 => { for id in &chan_ab_ids { - complete_monitor_update(&nodes[1].monitor, id, &complete_first); + complete_monitor_update(&nodes[1].monitor, id, MonitorUpdateSelector::First); } }, 0xf5 => { for id in &chan_ab_ids { - complete_monitor_update(&nodes[1].monitor, id, &complete_second); + complete_monitor_update(&nodes[1].monitor, id, MonitorUpdateSelector::Second); } }, 0xf6 => { for id in &chan_ab_ids { - complete_monitor_update(&nodes[1].monitor, id, &Vec::pop); + complete_monitor_update(&nodes[1].monitor, id, MonitorUpdateSelector::Last); } }, 0xf8 => { for id in &chan_bc_ids { - complete_monitor_update(&nodes[1].monitor, id, &complete_first); + complete_monitor_update(&nodes[1].monitor, id, MonitorUpdateSelector::First); } }, 0xf9 => { for id in &chan_bc_ids { - complete_monitor_update(&nodes[1].monitor, id, &complete_second); + complete_monitor_update(&nodes[1].monitor, id, MonitorUpdateSelector::Second); } }, 0xfa => { for id in &chan_bc_ids { - complete_monitor_update(&nodes[1].monitor, id, &Vec::pop); + complete_monitor_update(&nodes[1].monitor, id, MonitorUpdateSelector::Last); } }, 0xfc => { for id in &chan_bc_ids { - complete_monitor_update(&nodes[2].monitor, id, &complete_first); + complete_monitor_update(&nodes[2].monitor, id, MonitorUpdateSelector::First); } }, 0xfd => { for id in &chan_bc_ids { - complete_monitor_update(&nodes[2].monitor, id, &complete_second); + complete_monitor_update(&nodes[2].monitor, id, MonitorUpdateSelector::Second); } }, 0xfe => { for id in &chan_bc_ids { - complete_monitor_update(&nodes[2].monitor, id, &Vec::pop); + complete_monitor_update(&nodes[2].monitor, id, MonitorUpdateSelector::Last); } }, From 4bf18e92409d9beb0a60fd719b160a258bd74763 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 21 Apr 2026 16:20:57 +0200 Subject: [PATCH 07/29] Extract chanmon harness node operations Move the action helpers onto `HarnessNode` methods. Node-local operations now live with the state they mutate, which reduces argument threading through the fuzz loop. --- fuzz/src/chanmon_consistency.rs | 435 +++++++++++++++----------------- 1 file changed, 210 insertions(+), 225 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 86b89a8f95f..4872e23fca6 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -1053,6 +1053,22 @@ impl<'a> HarnessNode<'a> { self.persistence_style = style; } + fn complete_all_monitor_updates(&self, chan_id: &ChannelId) { + if let Some(state) = self.monitor.latest_monitors.lock().unwrap().get_mut(chan_id) { + assert!( + state.pending_monitors.windows(2).all(|pair| pair[0].0 < pair[1].0), + "updates should be sorted by id" + ); + for (id, data) in state.pending_monitors.drain(..) { + self.monitor.chain_monitor.channel_monitor_updated(*chan_id, id).unwrap(); + if id > state.persisted_monitor_id { + state.persisted_monitor_id = id; + state.persisted_monitor = data; + } + } + } + } + fn complete_all_pending_monitor_updates(&self) { for (channel_id, state) in self.monitor.latest_monitors.lock().unwrap().iter_mut() { for (id, data) in state.pending_monitors.drain(..) { @@ -1065,12 +1081,160 @@ impl<'a> HarnessNode<'a> { } } + fn complete_monitor_update(&self, chan_id: &ChannelId, selector: MonitorUpdateSelector) { + if let Some(state) = self.monitor.latest_monitors.lock().unwrap().get_mut(chan_id) { + assert!( + state.pending_monitors.windows(2).all(|pair| pair[0].0 < pair[1].0), + "updates should be sorted by id" + ); + let update = match selector { + MonitorUpdateSelector::First => { + if state.pending_monitors.is_empty() { + None + } else { + Some(state.pending_monitors.remove(0)) + } + }, + MonitorUpdateSelector::Second => { + if state.pending_monitors.len() > 1 { + Some(state.pending_monitors.remove(1)) + } else { + None + } + }, + MonitorUpdateSelector::Last => state.pending_monitors.pop(), + }; + if let Some((id, data)) = update { + self.monitor.chain_monitor.channel_monitor_updated(*chan_id, id).unwrap(); + if id > state.persisted_monitor_id { + state.persisted_monitor_id = id; + state.persisted_monitor = data; + } + } + } + } + + fn sync_with_chain_state(&mut self, chain_state: &ChainState, num_blocks: Option) { + let target_height = if let Some(num_blocks) = num_blocks { + std::cmp::min(self.height + num_blocks, chain_state.tip_height()) + } else { + chain_state.tip_height() + }; + + while self.height < target_height { + self.height += 1; + let (header, txn) = chain_state.block_at(self.height); + let txdata: Vec<_> = txn.iter().enumerate().map(|(i, tx)| (i + 1, tx)).collect(); + if !txdata.is_empty() { + self.node.transactions_confirmed(header, &txdata, self.height); + } + self.node.best_block_updated(header, self.height); + } + } + fn refresh_serialized_manager(&mut self) { if self.node.get_and_clear_needs_persistence() { self.serialized_manager = self.node.encode(); } } + fn bump_fee_estimate(&mut self, chan_type: ChanType) { + let mut max_feerate = self.last_htlc_clear_fee; + if matches!(chan_type, ChanType::Legacy) { + max_feerate *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32; + } + if self.fee_estimator.ret_val.fetch_add(250, atomic::Ordering::AcqRel) + 250 > max_feerate { + self.fee_estimator.ret_val.store(max_feerate, atomic::Ordering::Release); + } + self.node.timer_tick_occurred(); + } + + fn reset_fee_estimate(&self) { + self.fee_estimator.ret_val.store(253, atomic::Ordering::Release); + self.node.timer_tick_occurred(); + } + + fn current_feerate_sat_per_kw(&self) -> FeeRate { + self.fee_estimator.feerate_sat_per_kw() + } + + fn record_last_htlc_clear_fee(&mut self) { + self.last_htlc_clear_fee = self.fee_estimator.ret_val.load(atomic::Ordering::Acquire); + } + + fn splice_in(&self, counterparty_node_id: &PublicKey, channel_id: &ChannelId) { + let wallet = WalletSync::new(&self.wallet, Arc::clone(&self.logger)); + match self.node.splice_channel(channel_id, counterparty_node_id) { + Ok(funding_template) => { + let feerate = + funding_template.min_rbf_feerate().unwrap_or(self.current_feerate_sat_per_kw()); + if let Ok(contribution) = funding_template.splice_in_sync( + Amount::from_sat(10_000), + feerate, + FeeRate::MAX, + &wallet, + ) { + let _ = self.node.funding_contributed( + channel_id, + counterparty_node_id, + contribution, + None, + ); + } + }, + Err(e) => { + assert!( + matches!(e, APIError::APIMisuseError { ref err } if err.contains("splice")), + "{:?}", + e + ); + }, + } + } + + fn splice_out(&self, counterparty_node_id: &PublicKey, channel_id: &ChannelId) { + // We conditionally splice out `MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS` only when the node + // has double the balance required to send a payment upon a `0xff` byte. We do this to + // ensure there's always liquidity available for a payment to succeed then. + let outbound_capacity_msat = self + .node + .list_channels() + .iter() + .find(|chan| chan.channel_id == *channel_id) + .map(|chan| chan.outbound_capacity_msat) + .unwrap(); + if outbound_capacity_msat < 20_000_000 { + return; + } + match self.node.splice_channel(channel_id, counterparty_node_id) { + Ok(funding_template) => { + let feerate = + funding_template.min_rbf_feerate().unwrap_or(self.current_feerate_sat_per_kw()); + let outputs = vec![TxOut { + value: Amount::from_sat(MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS), + script_pubkey: self.wallet.get_change_script().unwrap(), + }]; + if let Ok(contribution) = + funding_template.splice_out(outputs, feerate, FeeRate::MAX) + { + let _ = self.node.funding_contributed( + channel_id, + counterparty_node_id, + contribution, + None, + ); + } + }, + Err(e) => { + assert!( + matches!(e, APIError::APIMisuseError { ref err } if err.contains("splice")), + "{:?}", + e + ); + }, + } + } + fn reload( &mut self, use_old_mons: u8, out: &Out, router: &'a FuzzRouter, chan_type: ChanType, ) { @@ -1161,140 +1325,6 @@ enum MonitorUpdateSelector { Last, } -fn complete_monitor_update( - monitor: &Arc, chan_id: &ChannelId, selector: MonitorUpdateSelector, -) { - if let Some(state) = monitor.latest_monitors.lock().unwrap().get_mut(chan_id) { - assert!( - state.pending_monitors.windows(2).all(|pair| pair[0].0 < pair[1].0), - "updates should be sorted by id" - ); - let update = match selector { - MonitorUpdateSelector::First => { - if state.pending_monitors.is_empty() { - None - } else { - Some(state.pending_monitors.remove(0)) - } - }, - MonitorUpdateSelector::Second => { - if state.pending_monitors.len() > 1 { - Some(state.pending_monitors.remove(1)) - } else { - None - } - }, - MonitorUpdateSelector::Last => state.pending_monitors.pop(), - }; - if let Some((id, data)) = update { - monitor.chain_monitor.channel_monitor_updated(*chan_id, id).unwrap(); - if id > state.persisted_monitor_id { - state.persisted_monitor_id = id; - state.persisted_monitor = data; - } - } - } -} - -fn complete_all_monitor_updates(monitor: &Arc, chan_id: &ChannelId) { - if let Some(state) = monitor.latest_monitors.lock().unwrap().get_mut(chan_id) { - assert!( - state.pending_monitors.windows(2).all(|pair| pair[0].0 < pair[1].0), - "updates should be sorted by id" - ); - for (id, data) in state.pending_monitors.drain(..) { - monitor.chain_monitor.channel_monitor_updated(*chan_id, id).unwrap(); - if id > state.persisted_monitor_id { - state.persisted_monitor_id = id; - state.persisted_monitor = data; - } - } - } -} - -fn sync_with_chain_state( - node: &mut HarnessNode<'_>, chain_state: &ChainState, num_blocks: Option, -) { - let target_height = if let Some(num_blocks) = num_blocks { - std::cmp::min(node.height + num_blocks, chain_state.tip_height()) - } else { - chain_state.tip_height() - }; - while node.height < target_height { - node.height += 1; - let (header, txn) = chain_state.block_at(node.height); - let txdata: Vec<_> = txn.iter().enumerate().map(|(i, tx)| (i + 1, tx)).collect(); - if !txdata.is_empty() { - node.transactions_confirmed(header, &txdata, node.height); - } - node.best_block_updated(header, node.height); - } -} - -fn splice_in(node: &HarnessNode<'_>, counterparty_node_id: &PublicKey, channel_id: &ChannelId) { - let wallet = WalletSync::new(&node.wallet, Arc::clone(&node.logger)); - match node.splice_channel(channel_id, counterparty_node_id) { - Ok(funding_template) => { - let feerate = funding_template - .min_rbf_feerate() - .unwrap_or(node.fee_estimator.feerate_sat_per_kw()); - if let Ok(contribution) = funding_template.splice_in_sync( - Amount::from_sat(10_000), - feerate, - FeeRate::MAX, - &wallet, - ) { - let _ = - node.funding_contributed(channel_id, counterparty_node_id, contribution, None); - } - }, - Err(e) => { - assert!( - matches!(e, APIError::APIMisuseError { ref err } if err.contains("splice")), - "{:?}", - e - ); - }, - } -} - -fn splice_out(node: &HarnessNode<'_>, counterparty_node_id: &PublicKey, channel_id: &ChannelId) { - // We conditionally splice out `MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS` only when the node - // has double the balance required to send a payment upon a `0xff` byte. We do this to - // ensure there's always liquidity available for a payment to succeed then. - let outbound_capacity_msat = node - .list_channels() - .iter() - .find(|chan| chan.channel_id == *channel_id) - .map(|chan| chan.outbound_capacity_msat) - .unwrap(); - if outbound_capacity_msat < 20_000_000 { - return; - } - match node.splice_channel(channel_id, counterparty_node_id) { - Ok(funding_template) => { - let feerate = funding_template - .min_rbf_feerate() - .unwrap_or(node.fee_estimator.feerate_sat_per_kw()); - let outputs = vec![TxOut { - value: Amount::from_sat(MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS), - script_pubkey: node.wallet.get_change_script().unwrap(), - }]; - if let Ok(contribution) = funding_template.splice_out(outputs, feerate, FeeRate::MAX) { - let _ = - node.funding_contributed(channel_id, counterparty_node_id, contribution, None); - } - }, - Err(e) => { - assert!( - matches!(e, APIError::APIMisuseError { ref err } if err.contains("splice")), - "{:?}", - e - ); - }, - } -} - fn build_node_config(chan_type: ChanType) -> UserConfig { let mut config = UserConfig::default(); config.channel_config.forwarding_fee_proportional_millionths = 0; @@ -1634,9 +1664,9 @@ pub fn do_test(data: &[u8], out: Out) { nodes[2].broadcaster.txn_broadcasted.borrow_mut().clear(); // Sync all nodes to tip to lock the funding. - sync_with_chain_state(&mut nodes[0], &chain_state, None); - sync_with_chain_state(&mut nodes[1], &chain_state, None); - sync_with_chain_state(&mut nodes[2], &chain_state, None); + nodes[0].sync_with_chain_state(&chain_state, None); + nodes[1].sync_with_chain_state(&chain_state, None); + nodes[2].sync_with_chain_state(&chain_state, None); lock_fundings(&nodes); @@ -2321,22 +2351,22 @@ pub fn do_test(data: &[u8], out: Out) { 0x08 => { for id in &chan_ab_ids { - complete_all_monitor_updates(&nodes[0].monitor, id); + nodes[0].complete_all_monitor_updates(id); } }, 0x09 => { for id in &chan_ab_ids { - complete_all_monitor_updates(&nodes[1].monitor, id); + nodes[1].complete_all_monitor_updates(id); } }, 0x0a => { for id in &chan_bc_ids { - complete_all_monitor_updates(&nodes[1].monitor, id); + nodes[1].complete_all_monitor_updates(id); } }, 0x0b => { for id in &chan_bc_ids { - complete_all_monitor_updates(&nodes[2].monitor, id); + nodes[2].complete_all_monitor_updates(id); } }, @@ -2492,82 +2522,40 @@ pub fn do_test(data: &[u8], out: Out) { send_mpp_direct(0, 1, &[chan_a_id, chan_a_id, chan_a_id], 1_000_000, &mut p_ctr) }, - 0x80 => { - let mut max_feerate = nodes[0].last_htlc_clear_fee; - if matches!(chan_type, ChanType::Legacy) { - max_feerate *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32; - } - if nodes[0].fee_estimator.ret_val.fetch_add(250, atomic::Ordering::AcqRel) + 250 - > max_feerate - { - nodes[0].fee_estimator.ret_val.store(max_feerate, atomic::Ordering::Release); - } - nodes[0].timer_tick_occurred(); - }, - 0x81 => { - nodes[0].fee_estimator.ret_val.store(253, atomic::Ordering::Release); - nodes[0].timer_tick_occurred(); - }, - 0x84 => { - let mut max_feerate = nodes[1].last_htlc_clear_fee; - if matches!(chan_type, ChanType::Legacy) { - max_feerate *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32; - } - if nodes[1].fee_estimator.ret_val.fetch_add(250, atomic::Ordering::AcqRel) + 250 - > max_feerate - { - nodes[1].fee_estimator.ret_val.store(max_feerate, atomic::Ordering::Release); - } - nodes[1].timer_tick_occurred(); - }, - 0x85 => { - nodes[1].fee_estimator.ret_val.store(253, atomic::Ordering::Release); - nodes[1].timer_tick_occurred(); - }, - 0x88 => { - let mut max_feerate = nodes[2].last_htlc_clear_fee; - if matches!(chan_type, ChanType::Legacy) { - max_feerate *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32; - } - if nodes[2].fee_estimator.ret_val.fetch_add(250, atomic::Ordering::AcqRel) + 250 - > max_feerate - { - nodes[2].fee_estimator.ret_val.store(max_feerate, atomic::Ordering::Release); - } - nodes[2].timer_tick_occurred(); - }, - 0x89 => { - nodes[2].fee_estimator.ret_val.store(253, atomic::Ordering::Release); - nodes[2].timer_tick_occurred(); - }, + 0x80 => nodes[0].bump_fee_estimate(chan_type), + 0x81 => nodes[0].reset_fee_estimate(), + 0x84 => nodes[1].bump_fee_estimate(chan_type), + 0x85 => nodes[1].reset_fee_estimate(), + 0x88 => nodes[2].bump_fee_estimate(chan_type), + 0x89 => nodes[2].reset_fee_estimate(), 0xa0 => { if !cfg!(splicing) { test_return!(); } let cp_node_id = nodes[1].get_our_node_id(); - splice_in(&nodes[0], &cp_node_id, &chan_a_id); + nodes[0].splice_in(&cp_node_id, &chan_a_id); }, 0xa1 => { if !cfg!(splicing) { test_return!(); } let cp_node_id = nodes[0].get_our_node_id(); - splice_in(&nodes[1], &cp_node_id, &chan_a_id); + nodes[1].splice_in(&cp_node_id, &chan_a_id); }, 0xa2 => { if !cfg!(splicing) { test_return!(); } let cp_node_id = nodes[2].get_our_node_id(); - splice_in(&nodes[1], &cp_node_id, &chan_b_id); + nodes[1].splice_in(&cp_node_id, &chan_b_id); }, 0xa3 => { if !cfg!(splicing) { test_return!(); } let cp_node_id = nodes[1].get_our_node_id(); - splice_in(&nodes[2], &cp_node_id, &chan_b_id); + nodes[2].splice_in(&cp_node_id, &chan_b_id); }, 0xa4 => { @@ -2575,55 +2563,55 @@ pub fn do_test(data: &[u8], out: Out) { test_return!(); } let cp_node_id = nodes[1].get_our_node_id(); - splice_out(&nodes[0], &cp_node_id, &chan_a_id); + nodes[0].splice_out(&cp_node_id, &chan_a_id); }, 0xa5 => { if !cfg!(splicing) { test_return!(); } let cp_node_id = nodes[0].get_our_node_id(); - splice_out(&nodes[1], &cp_node_id, &chan_a_id); + nodes[1].splice_out(&cp_node_id, &chan_a_id); }, 0xa6 => { if !cfg!(splicing) { test_return!(); } let cp_node_id = nodes[2].get_our_node_id(); - splice_out(&nodes[1], &cp_node_id, &chan_b_id); + nodes[1].splice_out(&cp_node_id, &chan_b_id); }, 0xa7 => { if !cfg!(splicing) { test_return!(); } let cp_node_id = nodes[1].get_our_node_id(); - splice_out(&nodes[2], &cp_node_id, &chan_b_id); + nodes[2].splice_out(&cp_node_id, &chan_b_id); }, // Sync node by 1 block to cover confirmation of a transaction. 0xa8 => { chain_state.confirm_pending_txs(); - sync_with_chain_state(&mut nodes[0], &chain_state, Some(1)); + nodes[0].sync_with_chain_state(&chain_state, Some(1)); }, 0xa9 => { chain_state.confirm_pending_txs(); - sync_with_chain_state(&mut nodes[1], &chain_state, Some(1)); + nodes[1].sync_with_chain_state(&chain_state, Some(1)); }, 0xaa => { chain_state.confirm_pending_txs(); - sync_with_chain_state(&mut nodes[2], &chain_state, Some(1)); + nodes[2].sync_with_chain_state(&chain_state, Some(1)); }, // Sync node to chain tip to cover confirmation of a transaction post-reorg-risk. 0xab => { chain_state.confirm_pending_txs(); - sync_with_chain_state(&mut nodes[0], &chain_state, None); + nodes[0].sync_with_chain_state(&chain_state, None); }, 0xac => { chain_state.confirm_pending_txs(); - sync_with_chain_state(&mut nodes[1], &chain_state, None); + nodes[1].sync_with_chain_state(&chain_state, None); }, 0xad => { chain_state.confirm_pending_txs(); - sync_with_chain_state(&mut nodes[2], &chain_state, None); + nodes[2].sync_with_chain_state(&chain_state, None); }, 0xb0 | 0xb1 | 0xb2 => { @@ -2744,65 +2732,65 @@ pub fn do_test(data: &[u8], out: Out) { 0xf0 => { for id in &chan_ab_ids { - complete_monitor_update(&nodes[0].monitor, id, MonitorUpdateSelector::First); + nodes[0].complete_monitor_update(id, MonitorUpdateSelector::First); } }, 0xf1 => { for id in &chan_ab_ids { - complete_monitor_update(&nodes[0].monitor, id, MonitorUpdateSelector::Second); + nodes[0].complete_monitor_update(id, MonitorUpdateSelector::Second); } }, 0xf2 => { for id in &chan_ab_ids { - complete_monitor_update(&nodes[0].monitor, id, MonitorUpdateSelector::Last); + nodes[0].complete_monitor_update(id, MonitorUpdateSelector::Last); } }, 0xf4 => { for id in &chan_ab_ids { - complete_monitor_update(&nodes[1].monitor, id, MonitorUpdateSelector::First); + nodes[1].complete_monitor_update(id, MonitorUpdateSelector::First); } }, 0xf5 => { for id in &chan_ab_ids { - complete_monitor_update(&nodes[1].monitor, id, MonitorUpdateSelector::Second); + nodes[1].complete_monitor_update(id, MonitorUpdateSelector::Second); } }, 0xf6 => { for id in &chan_ab_ids { - complete_monitor_update(&nodes[1].monitor, id, MonitorUpdateSelector::Last); + nodes[1].complete_monitor_update(id, MonitorUpdateSelector::Last); } }, 0xf8 => { for id in &chan_bc_ids { - complete_monitor_update(&nodes[1].monitor, id, MonitorUpdateSelector::First); + nodes[1].complete_monitor_update(id, MonitorUpdateSelector::First); } }, 0xf9 => { for id in &chan_bc_ids { - complete_monitor_update(&nodes[1].monitor, id, MonitorUpdateSelector::Second); + nodes[1].complete_monitor_update(id, MonitorUpdateSelector::Second); } }, 0xfa => { for id in &chan_bc_ids { - complete_monitor_update(&nodes[1].monitor, id, MonitorUpdateSelector::Last); + nodes[1].complete_monitor_update(id, MonitorUpdateSelector::Last); } }, 0xfc => { for id in &chan_bc_ids { - complete_monitor_update(&nodes[2].monitor, id, MonitorUpdateSelector::First); + nodes[2].complete_monitor_update(id, MonitorUpdateSelector::First); } }, 0xfd => { for id in &chan_bc_ids { - complete_monitor_update(&nodes[2].monitor, id, MonitorUpdateSelector::Second); + nodes[2].complete_monitor_update(id, MonitorUpdateSelector::Second); } }, 0xfe => { for id in &chan_bc_ids { - complete_monitor_update(&nodes[2].monitor, id, MonitorUpdateSelector::Last); + nodes[2].complete_monitor_update(id, MonitorUpdateSelector::Last); } }, @@ -2860,12 +2848,12 @@ pub fn do_test(data: &[u8], out: Out) { } // Next, make sure no monitor updates are pending for id in &chan_ab_ids { - complete_all_monitor_updates(&nodes[0].monitor, id); - complete_all_monitor_updates(&nodes[1].monitor, id); + nodes[0].complete_all_monitor_updates(id); + nodes[1].complete_all_monitor_updates(id); } for id in &chan_bc_ids { - complete_all_monitor_updates(&nodes[1].monitor, id); - complete_all_monitor_updates(&nodes[2].monitor, id); + nodes[1].complete_all_monitor_updates(id); + nodes[2].complete_all_monitor_updates(id); } // Then, make sure any current forwards make their way to their destination if process_msg_events!(0, false, ProcessMessages::AllMessages) { @@ -2956,12 +2944,9 @@ pub fn do_test(data: &[u8], out: Out) { ); } - nodes[0].last_htlc_clear_fee = - nodes[0].fee_estimator.ret_val.load(atomic::Ordering::Acquire); - nodes[1].last_htlc_clear_fee = - nodes[1].fee_estimator.ret_val.load(atomic::Ordering::Acquire); - nodes[2].last_htlc_clear_fee = - nodes[2].fee_estimator.ret_val.load(atomic::Ordering::Acquire); + nodes[0].record_last_htlc_clear_fee(); + nodes[1].record_last_htlc_clear_fee(); + nodes[2].record_last_htlc_clear_fee(); }, _ => test_return!(), } From 60bff763e91252f8ce2a80ff9b777f2f3eaaf8a4 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 28 Apr 2026 11:40:02 +0200 Subject: [PATCH 08/29] Route chanmon messages through EventQueues Replace the four directional message vectors with one queue owner. The fuzz loop now uses that owner at send, receive, drain, and reload sites while preserving the existing routing behavior. --- fuzz/src/chanmon_consistency.rs | 506 ++++++++++++++++++++++++-------- 1 file changed, 385 insertions(+), 121 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 4872e23fca6..67f5bf9db2e 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -1325,6 +1325,19 @@ enum MonitorUpdateSelector { Last, } +struct EventQueues { + ab: Vec, + ba: Vec, + bc: Vec, + cb: Vec, +} + +impl EventQueues { + fn new() -> Self { + Self { ab: Vec::new(), ba: Vec::new(), bc: Vec::new(), cb: Vec::new() } + } +} + fn build_node_config(chan_type: ChanType) -> UserConfig { let mut config = UserConfig::default(); config.channel_config.forwarding_fee_proportional_millionths = 0; @@ -1680,18 +1693,12 @@ pub fn do_test(data: &[u8], out: Out) { let node_c_chans = nodes[2].list_usable_channels(); [node_c_chans[0].channel_id, node_c_chans[1].channel_id, node_c_chans[2].channel_id] }; - // Keep old names for backward compatibility in existing code let chan_a_id = chan_ab_ids[0]; let chan_b_id = chan_bc_ids[0]; - - let mut p_ctr: u64 = 0; - let mut peers_ab_disconnected = false; let mut peers_bc_disconnected = false; - let mut ab_events = Vec::new(); - let mut ba_events = Vec::new(); - let mut bc_events = Vec::new(); - let mut cb_events = Vec::new(); + let mut queues = EventQueues::new(); + let mut p_ctr: u64 = 0; for node in &mut nodes { node.serialized_manager = node.encode(); @@ -1724,95 +1731,175 @@ pub fn do_test(data: &[u8], out: Out) { } loop { - // Push any events from Node B onto ba_events and bc_events + // Push any events from Node B onto queues.ba and queues.bc macro_rules! push_excess_b_events { - ($excess_events: expr, $expect_drop_node: expr) => { { + ($excess_events: expr, $expect_drop_node: expr) => {{ let a_id = nodes[0].get_our_node_id(); let expect_drop_node: Option = $expect_drop_node; - let expect_drop_id = if let Some(id) = expect_drop_node { Some(nodes[id].get_our_node_id()) } else { None }; + let expect_drop_id = if let Some(id) = expect_drop_node { + Some(nodes[id].get_our_node_id()) + } else { + None + }; for event in $excess_events { let push_a = match event { MessageSendEvent::UpdateHTLCs { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + if Some(*node_id) == expect_drop_id { + panic!( + "peer_disconnected should drop msgs bound for the disconnected peer" + ); + } *node_id == a_id }, MessageSendEvent::SendRevokeAndACK { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + if Some(*node_id) == expect_drop_id { + panic!( + "peer_disconnected should drop msgs bound for the disconnected peer" + ); + } *node_id == a_id }, MessageSendEvent::SendChannelReestablish { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + if Some(*node_id) == expect_drop_id { + panic!( + "peer_disconnected should drop msgs bound for the disconnected peer" + ); + } *node_id == a_id }, MessageSendEvent::SendStfu { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + if Some(*node_id) == expect_drop_id { + panic!( + "peer_disconnected should drop msgs bound for the disconnected peer" + ); + } *node_id == a_id }, MessageSendEvent::SendSpliceInit { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + if Some(*node_id) == expect_drop_id { + panic!( + "peer_disconnected should drop msgs bound for the disconnected peer" + ); + } *node_id == a_id }, MessageSendEvent::SendSpliceAck { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + if Some(*node_id) == expect_drop_id { + panic!( + "peer_disconnected should drop msgs bound for the disconnected peer" + ); + } *node_id == a_id }, MessageSendEvent::SendSpliceLocked { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + if Some(*node_id) == expect_drop_id { + panic!( + "peer_disconnected should drop msgs bound for the disconnected peer" + ); + } *node_id == a_id }, MessageSendEvent::SendTxAddInput { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + if Some(*node_id) == expect_drop_id { + panic!( + "peer_disconnected should drop msgs bound for the disconnected peer" + ); + } *node_id == a_id }, MessageSendEvent::SendTxAddOutput { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + if Some(*node_id) == expect_drop_id { + panic!( + "peer_disconnected should drop msgs bound for the disconnected peer" + ); + } *node_id == a_id }, MessageSendEvent::SendTxRemoveInput { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + if Some(*node_id) == expect_drop_id { + panic!( + "peer_disconnected should drop msgs bound for the disconnected peer" + ); + } *node_id == a_id }, MessageSendEvent::SendTxRemoveOutput { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + if Some(*node_id) == expect_drop_id { + panic!( + "peer_disconnected should drop msgs bound for the disconnected peer" + ); + } *node_id == a_id }, MessageSendEvent::SendTxComplete { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + if Some(*node_id) == expect_drop_id { + panic!( + "peer_disconnected should drop msgs bound for the disconnected peer" + ); + } *node_id == a_id }, MessageSendEvent::SendTxAbort { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + if Some(*node_id) == expect_drop_id { + panic!( + "peer_disconnected should drop msgs bound for the disconnected peer" + ); + } *node_id == a_id }, MessageSendEvent::SendTxInitRbf { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + if Some(*node_id) == expect_drop_id { + panic!( + "peer_disconnected should drop msgs bound for the disconnected peer" + ); + } *node_id == a_id }, MessageSendEvent::SendTxAckRbf { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + if Some(*node_id) == expect_drop_id { + panic!( + "peer_disconnected should drop msgs bound for the disconnected peer" + ); + } *node_id == a_id }, MessageSendEvent::SendTxSignatures { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + if Some(*node_id) == expect_drop_id { + panic!( + "peer_disconnected should drop msgs bound for the disconnected peer" + ); + } *node_id == a_id }, MessageSendEvent::SendChannelReady { .. } => continue, MessageSendEvent::SendAnnouncementSignatures { .. } => continue, MessageSendEvent::BroadcastChannelUpdate { .. } => continue, MessageSendEvent::SendChannelUpdate { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + if Some(*node_id) == expect_drop_id { + panic!( + "peer_disconnected should drop msgs bound for the disconnected peer" + ); + } *node_id == a_id }, MessageSendEvent::HandleError { ref action, ref node_id } => { assert_action_timeout_awaiting_response(action); - if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + if Some(*node_id) == expect_drop_id { + panic!( + "peer_disconnected should drop msgs bound for the disconnected peer" + ); + } *node_id == a_id }, _ => panic!("Unhandled message event {:?}", event), }; - if push_a { ba_events.push(event); } else { bc_events.push(event); } + if push_a { + queues.ba.push(event); + } else { + queues.bc.push(event); + } } - } } + }}; } // While delivering messages, we select across three possible message selection processes @@ -1833,20 +1920,20 @@ pub fn do_test(data: &[u8], out: Out) { } macro_rules! process_msg_events { - ($node: expr, $corrupt_forward: expr, $limit_events: expr) => { { + ($node: expr, $corrupt_forward: expr, $limit_events: expr) => {{ let mut events = if $node == 1 { let mut new_events = Vec::new(); - mem::swap(&mut new_events, &mut ba_events); - new_events.extend_from_slice(&bc_events[..]); - bc_events.clear(); + mem::swap(&mut new_events, &mut queues.ba); + new_events.extend_from_slice(&queues.bc[..]); + queues.bc.clear(); new_events } else if $node == 0 { let mut new_events = Vec::new(); - mem::swap(&mut new_events, &mut ab_events); + mem::swap(&mut new_events, &mut queues.ab); new_events } else { let mut new_events = Vec::new(); - mem::swap(&mut new_events, &mut cb_events); + mem::swap(&mut new_events, &mut queues.cb); new_events }; let mut new_events = Vec::new(); @@ -1859,13 +1946,35 @@ pub fn do_test(data: &[u8], out: Out) { for event in &mut events_iter { had_events = true; match event { - MessageSendEvent::UpdateHTLCs { node_id, channel_id, updates: CommitmentUpdate { update_add_htlcs, update_fail_htlcs, update_fulfill_htlcs, update_fail_malformed_htlcs, update_fee, commitment_signed } } => { + MessageSendEvent::UpdateHTLCs { + node_id, + channel_id, + updates: + CommitmentUpdate { + update_add_htlcs, + update_fail_htlcs, + update_fulfill_htlcs, + update_fail_malformed_htlcs, + update_fee, + commitment_signed, + }, + } => { for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == node_id { for update_add in update_add_htlcs.iter() { - out.locked_write(format!("Delivering update_add_htlc from node {} to node {}.\n", $node, idx).as_bytes()); + out.locked_write( + format!( + "Delivering update_add_htlc from node {} to node {}.\n", + $node, + idx + ) + .as_bytes(), + ); if !$corrupt_forward { - dest.handle_update_add_htlc(nodes[$node].get_our_node_id(), update_add); + dest.handle_update_add_htlc( + nodes[$node].get_our_node_id(), + update_add, + ); } else { // Corrupt the update_add_htlc message so that its HMAC // check will fail and we generate a @@ -1873,42 +1982,105 @@ pub fn do_test(data: &[u8], out: Out) { // update_fail_htlc as we do when we reject a payment. let mut msg_ser = update_add.encode(); msg_ser[1000] ^= 0xff; - let new_msg = UpdateAddHTLC::read_from_fixed_length_buffer(&mut &msg_ser[..]).unwrap(); - dest.handle_update_add_htlc(nodes[$node].get_our_node_id(), &new_msg); + let new_msg = + UpdateAddHTLC::read_from_fixed_length_buffer( + &mut &msg_ser[..], + ) + .unwrap(); + dest.handle_update_add_htlc( + nodes[$node].get_our_node_id(), + &new_msg, + ); } } - let processed_change = !update_add_htlcs.is_empty() || !update_fulfill_htlcs.is_empty() || - !update_fail_htlcs.is_empty() || !update_fail_malformed_htlcs.is_empty(); + let processed_change = !update_add_htlcs.is_empty() + || !update_fulfill_htlcs.is_empty() + || !update_fail_htlcs.is_empty() + || !update_fail_malformed_htlcs.is_empty(); for update_fulfill in update_fulfill_htlcs { - out.locked_write(format!("Delivering update_fulfill_htlc from node {} to node {}.\n", $node, idx).as_bytes()); - dest.handle_update_fulfill_htlc(nodes[$node].get_our_node_id(), update_fulfill); + out.locked_write( + format!( + "Delivering update_fulfill_htlc from node {} to node {}.\n", + $node, + idx + ) + .as_bytes(), + ); + dest.handle_update_fulfill_htlc( + nodes[$node].get_our_node_id(), + update_fulfill, + ); } for update_fail in update_fail_htlcs.iter() { - out.locked_write(format!("Delivering update_fail_htlc from node {} to node {}.\n", $node, idx).as_bytes()); - dest.handle_update_fail_htlc(nodes[$node].get_our_node_id(), update_fail); + out.locked_write( + format!( + "Delivering update_fail_htlc from node {} to node {}.\n", + $node, + idx + ) + .as_bytes(), + ); + dest.handle_update_fail_htlc( + nodes[$node].get_our_node_id(), + update_fail, + ); } for update_fail_malformed in update_fail_malformed_htlcs.iter() { - out.locked_write(format!("Delivering update_fail_malformed_htlc from node {} to node {}.\n", $node, idx).as_bytes()); - dest.handle_update_fail_malformed_htlc(nodes[$node].get_our_node_id(), update_fail_malformed); + out.locked_write( + format!( + "Delivering update_fail_malformed_htlc from node {} to node {}.\n", + $node, + idx + ) + .as_bytes(), + ); + dest.handle_update_fail_malformed_htlc( + nodes[$node].get_our_node_id(), + update_fail_malformed, + ); } if let Some(msg) = update_fee { - out.locked_write(format!("Delivering update_fee from node {} to node {}.\n", $node, idx).as_bytes()); + out.locked_write( + format!( + "Delivering update_fee from node {} to node {}.\n", + $node, + idx + ) + .as_bytes(), + ); dest.handle_update_fee(nodes[$node].get_our_node_id(), &msg); } - if $limit_events != ProcessMessages::AllMessages && processed_change { - // If we only want to process some messages, don't deliver the CS until later. - extra_ev = Some(MessageSendEvent::UpdateHTLCs { node_id, channel_id, updates: CommitmentUpdate { - update_add_htlcs: Vec::new(), - update_fail_htlcs: Vec::new(), - update_fulfill_htlcs: Vec::new(), - update_fail_malformed_htlcs: Vec::new(), - update_fee: None, - commitment_signed - } }); + if $limit_events != ProcessMessages::AllMessages + && processed_change + { + // If we only want to process some messages, don't deliver the + // CS until later. + extra_ev = Some(MessageSendEvent::UpdateHTLCs { + node_id, + channel_id, + updates: CommitmentUpdate { + update_add_htlcs: Vec::new(), + update_fail_htlcs: Vec::new(), + update_fulfill_htlcs: Vec::new(), + update_fail_malformed_htlcs: Vec::new(), + update_fee: None, + commitment_signed, + }, + }); break; } - out.locked_write(format!("Delivering commitment_signed from node {} to node {}.\n", $node, idx).as_bytes()); - dest.handle_commitment_signed_batch_test(nodes[$node].get_our_node_id(), &commitment_signed); + out.locked_write( + format!( + "Delivering commitment_signed from node {} to node {}.\n", + $node, + idx + ) + .as_bytes(), + ); + dest.handle_commitment_signed_batch_test( + nodes[$node].get_our_node_id(), + &commitment_signed, + ); break; } } @@ -1916,7 +2088,14 @@ pub fn do_test(data: &[u8], out: Out) { MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == *node_id { - out.locked_write(format!("Delivering revoke_and_ack from node {} to node {}.\n", $node, idx).as_bytes()); + out.locked_write( + format!( + "Delivering revoke_and_ack from node {} to node {}.\n", + $node, + idx + ) + .as_bytes(), + ); dest.handle_revoke_and_ack(nodes[$node].get_our_node_id(), msg); } } @@ -1924,15 +2103,28 @@ pub fn do_test(data: &[u8], out: Out) { MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => { for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == *node_id { - out.locked_write(format!("Delivering channel_reestablish from node {} to node {}.\n", $node, idx).as_bytes()); - dest.handle_channel_reestablish(nodes[$node].get_our_node_id(), msg); + out.locked_write( + format!( + "Delivering channel_reestablish from node {} to node {}.\n", + $node, + idx + ) + .as_bytes(), + ); + dest.handle_channel_reestablish( + nodes[$node].get_our_node_id(), + msg, + ); } } }, MessageSendEvent::SendStfu { ref node_id, ref msg } => { for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == *node_id { - out.locked_write(format!("Delivering stfu from node {} to node {}.\n", $node, idx).as_bytes()); + out.locked_write( + format!("Delivering stfu from node {} to node {}.\n", $node, idx) + .as_bytes(), + ); dest.handle_stfu(nodes[$node].get_our_node_id(), msg); } } @@ -1940,7 +2132,14 @@ pub fn do_test(data: &[u8], out: Out) { MessageSendEvent::SendTxAddInput { ref node_id, ref msg } => { for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == *node_id { - out.locked_write(format!("Delivering tx_add_input from node {} to node {}.\n", $node, idx).as_bytes()); + out.locked_write( + format!( + "Delivering tx_add_input from node {} to node {}.\n", + $node, + idx + ) + .as_bytes(), + ); dest.handle_tx_add_input(nodes[$node].get_our_node_id(), msg); } } @@ -1948,7 +2147,14 @@ pub fn do_test(data: &[u8], out: Out) { MessageSendEvent::SendTxAddOutput { ref node_id, ref msg } => { for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == *node_id { - out.locked_write(format!("Delivering tx_add_output from node {} to node {}.\n", $node, idx).as_bytes()); + out.locked_write( + format!( + "Delivering tx_add_output from node {} to node {}.\n", + $node, + idx + ) + .as_bytes(), + ); dest.handle_tx_add_output(nodes[$node].get_our_node_id(), msg); } } @@ -1956,7 +2162,14 @@ pub fn do_test(data: &[u8], out: Out) { MessageSendEvent::SendTxRemoveInput { ref node_id, ref msg } => { for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == *node_id { - out.locked_write(format!("Delivering tx_remove_input from node {} to node {}.\n", $node, idx).as_bytes()); + out.locked_write( + format!( + "Delivering tx_remove_input from node {} to node {}.\n", + $node, + idx + ) + .as_bytes(), + ); dest.handle_tx_remove_input(nodes[$node].get_our_node_id(), msg); } } @@ -1964,7 +2177,14 @@ pub fn do_test(data: &[u8], out: Out) { MessageSendEvent::SendTxRemoveOutput { ref node_id, ref msg } => { for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == *node_id { - out.locked_write(format!("Delivering tx_remove_output from node {} to node {}.\n", $node, idx).as_bytes()); + out.locked_write( + format!( + "Delivering tx_remove_output from node {} to node {}.\n", + $node, + idx + ) + .as_bytes(), + ); dest.handle_tx_remove_output(nodes[$node].get_our_node_id(), msg); } } @@ -1972,7 +2192,14 @@ pub fn do_test(data: &[u8], out: Out) { MessageSendEvent::SendTxComplete { ref node_id, ref msg } => { for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == *node_id { - out.locked_write(format!("Delivering tx_complete from node {} to node {}.\n", $node, idx).as_bytes()); + out.locked_write( + format!( + "Delivering tx_complete from node {} to node {}.\n", + $node, + idx + ) + .as_bytes(), + ); dest.handle_tx_complete(nodes[$node].get_our_node_id(), msg); } } @@ -1980,7 +2207,14 @@ pub fn do_test(data: &[u8], out: Out) { MessageSendEvent::SendTxAbort { ref node_id, ref msg } => { for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == *node_id { - out.locked_write(format!("Delivering tx_abort from node {} to node {}.\n", $node, idx).as_bytes()); + out.locked_write( + format!( + "Delivering tx_abort from node {} to node {}.\n", + $node, + idx + ) + .as_bytes(), + ); dest.handle_tx_abort(nodes[$node].get_our_node_id(), msg); } } @@ -1988,7 +2222,14 @@ pub fn do_test(data: &[u8], out: Out) { MessageSendEvent::SendTxInitRbf { ref node_id, ref msg } => { for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == *node_id { - out.locked_write(format!("Delivering tx_init_rbf from node {} to node {}.\n", $node, idx).as_bytes()); + out.locked_write( + format!( + "Delivering tx_init_rbf from node {} to node {}.\n", + $node, + idx + ) + .as_bytes(), + ); dest.handle_tx_init_rbf(nodes[$node].get_our_node_id(), msg); } } @@ -1996,7 +2237,14 @@ pub fn do_test(data: &[u8], out: Out) { MessageSendEvent::SendTxAckRbf { ref node_id, ref msg } => { for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == *node_id { - out.locked_write(format!("Delivering tx_ack_rbf from node {} to node {}.\n", $node, idx).as_bytes()); + out.locked_write( + format!( + "Delivering tx_ack_rbf from node {} to node {}.\n", + $node, + idx + ) + .as_bytes(), + ); dest.handle_tx_ack_rbf(nodes[$node].get_our_node_id(), msg); } } @@ -2004,7 +2252,14 @@ pub fn do_test(data: &[u8], out: Out) { MessageSendEvent::SendTxSignatures { ref node_id, ref msg } => { for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == *node_id { - out.locked_write(format!("Delivering tx_signatures from node {} to node {}.\n", $node, idx).as_bytes()); + out.locked_write( + format!( + "Delivering tx_signatures from node {} to node {}.\n", + $node, + idx + ) + .as_bytes(), + ); dest.handle_tx_signatures(nodes[$node].get_our_node_id(), msg); } } @@ -2012,7 +2267,14 @@ pub fn do_test(data: &[u8], out: Out) { MessageSendEvent::SendSpliceInit { ref node_id, ref msg } => { for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == *node_id { - out.locked_write(format!("Delivering splice_init from node {} to node {}.\n", $node, idx).as_bytes()); + out.locked_write( + format!( + "Delivering splice_init from node {} to node {}.\n", + $node, + idx + ) + .as_bytes(), + ); dest.handle_splice_init(nodes[$node].get_our_node_id(), msg); } } @@ -2020,7 +2282,14 @@ pub fn do_test(data: &[u8], out: Out) { MessageSendEvent::SendSpliceAck { ref node_id, ref msg } => { for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == *node_id { - out.locked_write(format!("Delivering splice_ack from node {} to node {}.\n", $node, idx).as_bytes()); + out.locked_write( + format!( + "Delivering splice_ack from node {} to node {}.\n", + $node, + idx + ) + .as_bytes(), + ); dest.handle_splice_ack(nodes[$node].get_our_node_id(), msg); } } @@ -2028,7 +2297,14 @@ pub fn do_test(data: &[u8], out: Out) { MessageSendEvent::SendSpliceLocked { ref node_id, ref msg } => { for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == *node_id { - out.locked_write(format!("Delivering splice_locked from node {} to node {}.\n", $node, idx).as_bytes()); + out.locked_write( + format!( + "Delivering splice_locked from node {} to node {}.\n", + $node, + idx + ) + .as_bytes(), + ); dest.handle_splice_locked(nodes[$node].get_our_node_id(), msg); } } @@ -2058,14 +2334,22 @@ pub fn do_test(data: &[u8], out: Out) { if $node == 1 { push_excess_b_events!(extra_ev.into_iter().chain(events_iter), None); } else if $node == 0 { - if let Some(ev) = extra_ev { ab_events.push(ev); } - for event in events_iter { ab_events.push(event); } + if let Some(ev) = extra_ev { + queues.ab.push(ev); + } + for event in events_iter { + queues.ab.push(event); + } } else { - if let Some(ev) = extra_ev { cb_events.push(ev); } - for event in events_iter { cb_events.push(event); } + if let Some(ev) = extra_ev { + queues.cb.push(ev); + } + for event in events_iter { + queues.cb.push(event); + } } had_events - } } + }}; } macro_rules! process_msg_noret { @@ -2097,8 +2381,8 @@ pub fn do_test(data: &[u8], out: Out) { nodes[1].get_and_clear_pending_msg_events().drain(..), Some(0) ); - ab_events.clear(); - ba_events.clear(); + queues.ab.clear(); + queues.ba.clear(); } else { for event in nodes[2].get_and_clear_pending_msg_events() { match event { @@ -2120,8 +2404,8 @@ pub fn do_test(data: &[u8], out: Out) { nodes[1].get_and_clear_pending_msg_events().drain(..), Some(2) ); - bc_events.clear(); - cb_events.clear(); + queues.bc.clear(); + queues.cb.clear(); } }}; } @@ -2289,7 +2573,6 @@ pub fn do_test(data: &[u8], out: Out) { } }; - // Direct MPP payment (no hop) let send_mpp_direct = |source_idx: usize, dest_idx: usize, dest_chan_ids: &[ChannelId], @@ -2306,7 +2589,6 @@ pub fn do_test(data: &[u8], out: Out) { } }; - // MPP payment via hop - splits payment across multiple channels on either or both hops let send_mpp_hop = |source_idx: usize, middle_idx: usize, middle_chan_ids: &[ChannelId], @@ -2615,8 +2897,6 @@ pub fn do_test(data: &[u8], out: Out) { }, 0xb0 | 0xb1 | 0xb2 => { - // Restart node A, picking among the in-flight `ChannelMonitor`s to use based on - // the value of `v` we're matching. if !peers_ab_disconnected { nodes[1].peer_disconnected(nodes[0].get_our_node_id()); peers_ab_disconnected = true; @@ -2624,33 +2904,29 @@ pub fn do_test(data: &[u8], out: Out) { nodes[1].get_and_clear_pending_msg_events().drain(..), Some(0) ); - ab_events.clear(); - ba_events.clear(); + queues.ab.clear(); + queues.ba.clear(); } nodes[0].reload(v, &out, &router, chan_type); }, 0xb3..=0xbb => { - // Restart node B, picking among the in-flight `ChannelMonitor`s to use based on - // the value of `v` we're matching. if !peers_ab_disconnected { nodes[0].peer_disconnected(nodes[1].get_our_node_id()); peers_ab_disconnected = true; nodes[0].get_and_clear_pending_msg_events(); - ab_events.clear(); - ba_events.clear(); + queues.ab.clear(); + queues.ba.clear(); } if !peers_bc_disconnected { nodes[2].peer_disconnected(nodes[1].get_our_node_id()); peers_bc_disconnected = true; nodes[2].get_and_clear_pending_msg_events(); - bc_events.clear(); - cb_events.clear(); + queues.bc.clear(); + queues.cb.clear(); } nodes[1].reload(v, &out, &router, chan_type); }, 0xbc | 0xbd | 0xbe => { - // Restart node C, picking among the in-flight `ChannelMonitor`s to use based on - // the value of `v` we're matching. if !peers_bc_disconnected { nodes[1].peer_disconnected(nodes[2].get_our_node_id()); peers_bc_disconnected = true; @@ -2658,8 +2934,8 @@ pub fn do_test(data: &[u8], out: Out) { nodes[1].get_and_clear_pending_msg_events().drain(..), Some(2) ); - bc_events.clear(); - cb_events.clear(); + queues.bc.clear(); + queues.cb.clear(); } nodes[2].reload(v, &out, &router, chan_type); }, @@ -2798,7 +3074,6 @@ pub fn do_test(data: &[u8], out: Out) { // Test that no channel is in a stuck state where neither party can send funds even // after we resolve all pending events. - // First, make sure peers are all connected to each other if peers_ab_disconnected { let init_1 = Init { features: nodes[1].init_features(), @@ -2840,13 +3115,14 @@ pub fn do_test(data: &[u8], out: Out) { nodes[2].signer_unblocked(None); macro_rules! process_all_events { - () => { { + () => {{ let mut last_pass_no_updates = false; for i in 0..std::usize::MAX { if i == 100 { - panic!("It may take may iterations to settle the state, but it should not take forever"); + panic!( + "It may take may iterations to settle the state, but it should not take forever" + ); } - // Next, make sure no monitor updates are pending for id in &chan_ab_ids { nodes[0].complete_all_monitor_updates(id); nodes[1].complete_all_monitor_updates(id); @@ -2855,7 +3131,6 @@ pub fn do_test(data: &[u8], out: Out) { nodes[1].complete_all_monitor_updates(id); nodes[2].complete_all_monitor_updates(id); } - // Then, make sure any current forwards make their way to their destination if process_msg_events!(0, false, ProcessMessages::AllMessages) { last_pass_no_updates = false; continue; @@ -2868,7 +3143,6 @@ pub fn do_test(data: &[u8], out: Out) { last_pass_no_updates = false; continue; } - // ...making sure any payments are claimed. if process_events!(0, false) { last_pass_no_updates = false; continue; @@ -2882,18 +3156,11 @@ pub fn do_test(data: &[u8], out: Out) { continue; } if last_pass_no_updates { - // In some cases, we may generate a message to send in - // `process_msg_events`, but block sending until - // `complete_all_monitor_updates` gets called on the next - // iteration. - // - // Thus, we only exit if we manage two iterations with no messages - // or events to process. break; } last_pass_no_updates = true; } - } }; + }}; } process_all_events!(); @@ -2906,7 +3173,6 @@ pub fn do_test(data: &[u8], out: Out) { } process_all_events!(); - // Verify no payments are stuck - all should have resolved for (idx, pending) in pending_payments.borrow().iter().enumerate() { assert!( pending.is_empty(), @@ -2916,8 +3182,6 @@ pub fn do_test(data: &[u8], out: Out) { ); } - // Verify that every payment claimed by a receiver resulted in a - // PaymentSent event at the sender. let resolved = resolved_payments.borrow(); for hash in claimed_payment_hashes.borrow().iter() { let found = resolved.iter().any(|node_resolved| { From 4f0471c17c197012c1f3f1bff2180d7b4e989591 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 28 Apr 2026 11:40:36 +0200 Subject: [PATCH 09/29] Move chanmon queue routing into EventQueues Move per-node queue draining, middle-node routing, and disconnect cleanup into EventQueues. The fuzz loop now asks the queue owner to route remaining messages instead of mutating each directional vector directly. --- fuzz/src/chanmon_consistency.rs | 415 ++++++++++++-------------------- 1 file changed, 160 insertions(+), 255 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 67f5bf9db2e..8a5756321be 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -1336,6 +1336,145 @@ impl EventQueues { fn new() -> Self { Self { ab: Vec::new(), ba: Vec::new(), bc: Vec::new(), cb: Vec::new() } } + + fn take_for_node(&mut self, node_idx: usize) -> Vec { + match node_idx { + 0 => { + let mut events = Vec::new(); + mem::swap(&mut events, &mut self.ab); + events + }, + 1 => { + let mut events = Vec::new(); + mem::swap(&mut events, &mut self.ba); + events.extend_from_slice(&self.bc[..]); + self.bc.clear(); + events + }, + 2 => { + let mut events = Vec::new(); + mem::swap(&mut events, &mut self.cb); + events + }, + _ => panic!("invalid node index"), + } + } + + fn push_for_node(&mut self, node_idx: usize, event: MessageSendEvent) { + match node_idx { + 0 => self.ab.push(event), + 2 => self.cb.push(event), + _ => panic!("cannot directly queue messages for node {}", node_idx), + } + } + + fn extend_for_node>( + &mut self, node_idx: usize, events: I, + ) { + match node_idx { + 0 => self.ab.extend(events), + 2 => self.cb.extend(events), + _ => panic!("cannot directly queue messages for node {}", node_idx), + } + } + + fn route_from_middle<'a, I: IntoIterator>( + &mut self, excess_events: I, expect_drop_node: Option, nodes: &[HarnessNode<'a>; 3], + ) { + // Push any events from Node B onto queues.ba and queues.bc. + let a_id = nodes[0].our_node_id(); + let expect_drop_id = expect_drop_node.map(|id| nodes[id].our_node_id()); + for event in excess_events { + let push_a = match event { + MessageSendEvent::UpdateHTLCs { ref node_id, .. } + | MessageSendEvent::SendRevokeAndACK { ref node_id, .. } + | MessageSendEvent::SendChannelReestablish { ref node_id, .. } + | MessageSendEvent::SendStfu { ref node_id, .. } + | MessageSendEvent::SendSpliceInit { ref node_id, .. } + | MessageSendEvent::SendSpliceAck { ref node_id, .. } + | MessageSendEvent::SendSpliceLocked { ref node_id, .. } + | MessageSendEvent::SendTxAddInput { ref node_id, .. } + | MessageSendEvent::SendTxAddOutput { ref node_id, .. } + | MessageSendEvent::SendTxRemoveInput { ref node_id, .. } + | MessageSendEvent::SendTxRemoveOutput { ref node_id, .. } + | MessageSendEvent::SendTxComplete { ref node_id, .. } + | MessageSendEvent::SendTxAbort { ref node_id, .. } + | MessageSendEvent::SendTxInitRbf { ref node_id, .. } + | MessageSendEvent::SendTxAckRbf { ref node_id, .. } + | MessageSendEvent::SendTxSignatures { ref node_id, .. } + | MessageSendEvent::SendChannelUpdate { ref node_id, .. } => { + if Some(*node_id) == expect_drop_id { + panic!( + "peer_disconnected should drop msgs bound for the disconnected peer" + ); + } + *node_id == a_id + }, + MessageSendEvent::HandleError { ref action, ref node_id } => { + assert_action_timeout_awaiting_response(action); + if Some(*node_id) == expect_drop_id { + panic!( + "peer_disconnected should drop msgs bound for the disconnected peer" + ); + } + *node_id == a_id + }, + MessageSendEvent::SendChannelReady { .. } + | MessageSendEvent::SendAnnouncementSignatures { .. } + | MessageSendEvent::BroadcastChannelUpdate { .. } => continue, + _ => panic!("Unhandled message event {:?}", event), + }; + if push_a { + self.ba.push(event); + } else { + self.bc.push(event); + } + } + } + + fn drain_on_disconnect(&mut self, edge_node: usize, nodes: &[HarnessNode<'_>; 3]) { + match edge_node { + 0 => { + for event in nodes[0].get_and_clear_pending_msg_events() { + match event { + MessageSendEvent::UpdateHTLCs { .. } => {}, + MessageSendEvent::SendRevokeAndACK { .. } => {}, + MessageSendEvent::SendChannelReestablish { .. } => {}, + MessageSendEvent::SendStfu { .. } => {}, + MessageSendEvent::SendChannelReady { .. } => {}, + MessageSendEvent::SendAnnouncementSignatures { .. } => {}, + MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + MessageSendEvent::SendChannelUpdate { .. } => {}, + MessageSendEvent::HandleError { ref action, .. } => { + assert_action_timeout_awaiting_response(action); + }, + _ => panic!("Unhandled message event"), + } + } + self.route_from_middle(nodes[1].get_and_clear_pending_msg_events(), Some(0), nodes); + }, + 2 => { + for event in nodes[2].get_and_clear_pending_msg_events() { + match event { + MessageSendEvent::UpdateHTLCs { .. } => {}, + MessageSendEvent::SendRevokeAndACK { .. } => {}, + MessageSendEvent::SendChannelReestablish { .. } => {}, + MessageSendEvent::SendStfu { .. } => {}, + MessageSendEvent::SendChannelReady { .. } => {}, + MessageSendEvent::SendAnnouncementSignatures { .. } => {}, + MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + MessageSendEvent::SendChannelUpdate { .. } => {}, + MessageSendEvent::HandleError { ref action, .. } => { + assert_action_timeout_awaiting_response(action); + }, + _ => panic!("Unhandled message event"), + } + } + self.route_from_middle(nodes[1].get_and_clear_pending_msg_events(), Some(2), nodes); + }, + _ => panic!("unsupported disconnected edge"), + } + } } fn build_node_config(chan_type: ChanType) -> UserConfig { @@ -1731,177 +1870,6 @@ pub fn do_test(data: &[u8], out: Out) { } loop { - // Push any events from Node B onto queues.ba and queues.bc - macro_rules! push_excess_b_events { - ($excess_events: expr, $expect_drop_node: expr) => {{ - let a_id = nodes[0].get_our_node_id(); - let expect_drop_node: Option = $expect_drop_node; - let expect_drop_id = if let Some(id) = expect_drop_node { - Some(nodes[id].get_our_node_id()) - } else { - None - }; - for event in $excess_events { - let push_a = match event { - MessageSendEvent::UpdateHTLCs { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { - panic!( - "peer_disconnected should drop msgs bound for the disconnected peer" - ); - } - *node_id == a_id - }, - MessageSendEvent::SendRevokeAndACK { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { - panic!( - "peer_disconnected should drop msgs bound for the disconnected peer" - ); - } - *node_id == a_id - }, - MessageSendEvent::SendChannelReestablish { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { - panic!( - "peer_disconnected should drop msgs bound for the disconnected peer" - ); - } - *node_id == a_id - }, - MessageSendEvent::SendStfu { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { - panic!( - "peer_disconnected should drop msgs bound for the disconnected peer" - ); - } - *node_id == a_id - }, - MessageSendEvent::SendSpliceInit { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { - panic!( - "peer_disconnected should drop msgs bound for the disconnected peer" - ); - } - *node_id == a_id - }, - MessageSendEvent::SendSpliceAck { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { - panic!( - "peer_disconnected should drop msgs bound for the disconnected peer" - ); - } - *node_id == a_id - }, - MessageSendEvent::SendSpliceLocked { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { - panic!( - "peer_disconnected should drop msgs bound for the disconnected peer" - ); - } - *node_id == a_id - }, - MessageSendEvent::SendTxAddInput { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { - panic!( - "peer_disconnected should drop msgs bound for the disconnected peer" - ); - } - *node_id == a_id - }, - MessageSendEvent::SendTxAddOutput { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { - panic!( - "peer_disconnected should drop msgs bound for the disconnected peer" - ); - } - *node_id == a_id - }, - MessageSendEvent::SendTxRemoveInput { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { - panic!( - "peer_disconnected should drop msgs bound for the disconnected peer" - ); - } - *node_id == a_id - }, - MessageSendEvent::SendTxRemoveOutput { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { - panic!( - "peer_disconnected should drop msgs bound for the disconnected peer" - ); - } - *node_id == a_id - }, - MessageSendEvent::SendTxComplete { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { - panic!( - "peer_disconnected should drop msgs bound for the disconnected peer" - ); - } - *node_id == a_id - }, - MessageSendEvent::SendTxAbort { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { - panic!( - "peer_disconnected should drop msgs bound for the disconnected peer" - ); - } - *node_id == a_id - }, - MessageSendEvent::SendTxInitRbf { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { - panic!( - "peer_disconnected should drop msgs bound for the disconnected peer" - ); - } - *node_id == a_id - }, - MessageSendEvent::SendTxAckRbf { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { - panic!( - "peer_disconnected should drop msgs bound for the disconnected peer" - ); - } - *node_id == a_id - }, - MessageSendEvent::SendTxSignatures { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { - panic!( - "peer_disconnected should drop msgs bound for the disconnected peer" - ); - } - *node_id == a_id - }, - MessageSendEvent::SendChannelReady { .. } => continue, - MessageSendEvent::SendAnnouncementSignatures { .. } => continue, - MessageSendEvent::BroadcastChannelUpdate { .. } => continue, - MessageSendEvent::SendChannelUpdate { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { - panic!( - "peer_disconnected should drop msgs bound for the disconnected peer" - ); - } - *node_id == a_id - }, - MessageSendEvent::HandleError { ref action, ref node_id } => { - assert_action_timeout_awaiting_response(action); - if Some(*node_id) == expect_drop_id { - panic!( - "peer_disconnected should drop msgs bound for the disconnected peer" - ); - } - *node_id == a_id - }, - _ => panic!("Unhandled message event {:?}", event), - }; - if push_a { - queues.ba.push(event); - } else { - queues.bc.push(event); - } - } - }}; - } - // While delivering messages, we select across three possible message selection processes // to ensure we get as much coverage as possible. See the individual enum variants for more // details. @@ -1921,21 +1889,7 @@ pub fn do_test(data: &[u8], out: Out) { macro_rules! process_msg_events { ($node: expr, $corrupt_forward: expr, $limit_events: expr) => {{ - let mut events = if $node == 1 { - let mut new_events = Vec::new(); - mem::swap(&mut new_events, &mut queues.ba); - new_events.extend_from_slice(&queues.bc[..]); - queues.bc.clear(); - new_events - } else if $node == 0 { - let mut new_events = Vec::new(); - mem::swap(&mut new_events, &mut queues.ab); - new_events - } else { - let mut new_events = Vec::new(); - mem::swap(&mut new_events, &mut queues.cb); - new_events - }; + let mut events = queues.take_for_node($node); let mut new_events = Vec::new(); if $limit_events != ProcessMessages::OnePendingMessage { new_events = nodes[$node].get_and_clear_pending_msg_events(); @@ -2332,21 +2286,18 @@ pub fn do_test(data: &[u8], out: Out) { } } if $node == 1 { - push_excess_b_events!(extra_ev.into_iter().chain(events_iter), None); + let remaining = extra_ev.into_iter().chain(events_iter).collect::>(); + queues.route_from_middle(remaining, None, &nodes); } else if $node == 0 { if let Some(ev) = extra_ev { - queues.ab.push(ev); - } - for event in events_iter { - queues.ab.push(event); + queues.push_for_node(0, ev); } + queues.extend_for_node(0, events_iter); } else { if let Some(ev) = extra_ev { - queues.cb.push(ev); - } - for event in events_iter { - queues.cb.push(event); + queues.push_for_node(2, ev); } + queues.extend_for_node(2, events_iter); } had_events }}; @@ -2358,58 +2309,6 @@ pub fn do_test(data: &[u8], out: Out) { }}; } - macro_rules! drain_msg_events_on_disconnect { - ($counterparty_id: expr) => {{ - if $counterparty_id == 0 { - for event in nodes[0].get_and_clear_pending_msg_events() { - match event { - MessageSendEvent::UpdateHTLCs { .. } => {}, - MessageSendEvent::SendRevokeAndACK { .. } => {}, - MessageSendEvent::SendChannelReestablish { .. } => {}, - MessageSendEvent::SendStfu { .. } => {}, - MessageSendEvent::SendChannelReady { .. } => {}, - MessageSendEvent::SendAnnouncementSignatures { .. } => {}, - MessageSendEvent::BroadcastChannelUpdate { .. } => {}, - MessageSendEvent::SendChannelUpdate { .. } => {}, - MessageSendEvent::HandleError { ref action, .. } => { - assert_action_timeout_awaiting_response(action); - }, - _ => panic!("Unhandled message event"), - } - } - push_excess_b_events!( - nodes[1].get_and_clear_pending_msg_events().drain(..), - Some(0) - ); - queues.ab.clear(); - queues.ba.clear(); - } else { - for event in nodes[2].get_and_clear_pending_msg_events() { - match event { - MessageSendEvent::UpdateHTLCs { .. } => {}, - MessageSendEvent::SendRevokeAndACK { .. } => {}, - MessageSendEvent::SendChannelReestablish { .. } => {}, - MessageSendEvent::SendStfu { .. } => {}, - MessageSendEvent::SendChannelReady { .. } => {}, - MessageSendEvent::SendAnnouncementSignatures { .. } => {}, - MessageSendEvent::BroadcastChannelUpdate { .. } => {}, - MessageSendEvent::SendChannelUpdate { .. } => {}, - MessageSendEvent::HandleError { ref action, .. } => { - assert_action_timeout_awaiting_response(action); - }, - _ => panic!("Unhandled message event"), - } - } - push_excess_b_events!( - nodes[1].get_and_clear_pending_msg_events().drain(..), - Some(2) - ); - queues.bc.clear(); - queues.cb.clear(); - } - }}; - } - macro_rules! process_events { ($node: expr, $fail: expr) => {{ // Multiple HTLCs can resolve for the same payment hash, so deduplicate @@ -2657,7 +2556,9 @@ pub fn do_test(data: &[u8], out: Out) { nodes[0].peer_disconnected(nodes[1].get_our_node_id()); nodes[1].peer_disconnected(nodes[0].get_our_node_id()); peers_ab_disconnected = true; - drain_msg_events_on_disconnect!(0); + queues.drain_on_disconnect(0, &nodes); + queues.ab.clear(); + queues.ba.clear(); } }, 0x0d => { @@ -2665,7 +2566,9 @@ pub fn do_test(data: &[u8], out: Out) { nodes[1].peer_disconnected(nodes[2].get_our_node_id()); nodes[2].peer_disconnected(nodes[1].get_our_node_id()); peers_bc_disconnected = true; - drain_msg_events_on_disconnect!(2); + queues.drain_on_disconnect(2, &nodes); + queues.bc.clear(); + queues.cb.clear(); } }, 0x0e => { @@ -2900,9 +2803,10 @@ pub fn do_test(data: &[u8], out: Out) { if !peers_ab_disconnected { nodes[1].peer_disconnected(nodes[0].get_our_node_id()); peers_ab_disconnected = true; - push_excess_b_events!( - nodes[1].get_and_clear_pending_msg_events().drain(..), - Some(0) + queues.route_from_middle( + nodes[1].get_and_clear_pending_msg_events(), + Some(0), + &nodes, ); queues.ab.clear(); queues.ba.clear(); @@ -2930,9 +2834,10 @@ pub fn do_test(data: &[u8], out: Out) { if !peers_bc_disconnected { nodes[1].peer_disconnected(nodes[2].get_our_node_id()); peers_bc_disconnected = true; - push_excess_b_events!( - nodes[1].get_and_clear_pending_msg_events().drain(..), - Some(2) + queues.route_from_middle( + nodes[1].get_and_clear_pending_msg_events(), + Some(2), + &nodes, ); queues.bc.clear(); queues.cb.clear(); From f1631a3723802dfdfaa289bebdf67de3e518246a Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 21 Apr 2026 17:09:02 +0200 Subject: [PATCH 10/29] Extract chanmon harness message processing Pull message-event delivery into standalone helpers. This keeps the fuzz dispatch loop smaller while preserving the same corruption and one-message processing modes. --- fuzz/src/chanmon_consistency.rs | 711 +++++++++++++------------------- 1 file changed, 283 insertions(+), 428 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 8a5756321be..47b027c597f 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -942,6 +942,21 @@ enum ChanType { ZeroFeeCommitments, } +// While delivering messages, select across three possible message selection +// processes to maximize coverage. See the individual enum variants for details. +#[derive(Copy, Clone, PartialEq, Eq)] +enum ProcessMessages { + /// Deliver all available messages, including fetching any new messages from + /// `get_and_clear_pending_msg_events()` which may have side effects. + AllMessages, + /// Call `get_and_clear_pending_msg_events()` first, then deliver up to one + /// message, which may already be queued. + OneMessage, + /// Deliver up to one already-queued message. This avoids the side effects of + /// `get_and_clear_pending_msg_events()`, such as freeing the HTLC holding cell. + OnePendingMessage, +} + struct HarnessNode<'a> { node_id: u8, node: ChanMan<'a>, @@ -1695,6 +1710,266 @@ fn lock_fundings(nodes: &[HarnessNode<'_>; 3]) { } } +fn process_msg_events_impl( + node_idx: usize, corrupt_forward: bool, limit_events: ProcessMessages, + nodes: &[HarnessNode<'_>; 3], out: &Out, queues: &mut EventQueues, +) -> bool { + fn find_destination_node(nodes: &[HarnessNode<'_>; 3], node_id: &PublicKey) -> usize { + nodes + .iter() + .position(|node| node.our_node_id() == *node_id) + .expect("message destination should be a known harness node") + } + + fn log_msg_delivery( + node_idx: usize, dest_idx: usize, msg_name: &str, out: &Out, + ) { + out.locked_write( + format!("Delivering {} from node {} to node {}.\n", msg_name, node_idx, dest_idx) + .as_bytes(), + ); + } + + fn log_peer_message( + node_idx: usize, node_id: &PublicKey, nodes: &[HarnessNode<'_>; 3], out: &Out, + msg_name: &str, + ) -> usize { + let dest_idx = find_destination_node(nodes, node_id); + log_msg_delivery(node_idx, dest_idx, msg_name, out); + dest_idx + } + + fn handle_update_add_htlc( + source_node_id: PublicKey, dest: &HarnessNode<'_>, update_add: &UpdateAddHTLC, + corrupt_forward: bool, + ) { + if !corrupt_forward { + dest.handle_update_add_htlc(source_node_id, update_add); + } else { + // Corrupt the update_add_htlc message so that its HMAC check will fail and we + // generate an update_fail_malformed_htlc instead of an update_fail_htlc as we do + // when we reject a payment. + let mut msg_ser = update_add.encode(); + msg_ser[1000] ^= 0xff; + let new_msg = UpdateAddHTLC::read_from_fixed_length_buffer(&mut &msg_ser[..]).unwrap(); + dest.handle_update_add_htlc(source_node_id, &new_msg); + } + } + + fn handle_update_htlcs_event( + node_idx: usize, source_node_id: PublicKey, node_id: PublicKey, channel_id: ChannelId, + updates: CommitmentUpdate, corrupt_forward: bool, limit_events: ProcessMessages, + nodes: &[HarnessNode<'_>; 3], out: &Out, + ) -> Option { + let dest_idx = find_destination_node(nodes, &node_id); + let dest = &nodes[dest_idx]; + let CommitmentUpdate { + update_add_htlcs, + update_fail_htlcs, + update_fulfill_htlcs, + update_fail_malformed_htlcs, + update_fee, + commitment_signed, + } = updates; + + for update_add in update_add_htlcs.iter() { + log_msg_delivery(node_idx, dest_idx, "update_add_htlc", out); + handle_update_add_htlc(source_node_id, dest, update_add, corrupt_forward); + } + let processed_change = !update_add_htlcs.is_empty() + || !update_fulfill_htlcs.is_empty() + || !update_fail_htlcs.is_empty() + || !update_fail_malformed_htlcs.is_empty(); + for update_fulfill in update_fulfill_htlcs { + log_msg_delivery(node_idx, dest_idx, "update_fulfill_htlc", out); + dest.handle_update_fulfill_htlc(source_node_id, update_fulfill); + } + for update_fail in update_fail_htlcs.iter() { + log_msg_delivery(node_idx, dest_idx, "update_fail_htlc", out); + dest.handle_update_fail_htlc(source_node_id, update_fail); + } + for update_fail_malformed in update_fail_malformed_htlcs.iter() { + log_msg_delivery(node_idx, dest_idx, "update_fail_malformed_htlc", out); + dest.handle_update_fail_malformed_htlc(source_node_id, update_fail_malformed); + } + if let Some(msg) = update_fee { + log_msg_delivery(node_idx, dest_idx, "update_fee", out); + dest.handle_update_fee(source_node_id, &msg); + } + if limit_events != ProcessMessages::AllMessages && processed_change { + // If we only want to process some messages, don't deliver the CS until later. + return Some(MessageSendEvent::UpdateHTLCs { + node_id, + channel_id, + updates: CommitmentUpdate { + update_add_htlcs: Vec::new(), + update_fail_htlcs: Vec::new(), + update_fulfill_htlcs: Vec::new(), + update_fail_malformed_htlcs: Vec::new(), + update_fee: None, + commitment_signed, + }, + }); + } + log_msg_delivery(node_idx, dest_idx, "commitment_signed", out); + dest.handle_commitment_signed_batch_test(source_node_id, &commitment_signed); + None + } + + fn process_msg_event( + node_idx: usize, source_node_id: PublicKey, event: MessageSendEvent, corrupt_forward: bool, + limit_events: ProcessMessages, nodes: &[HarnessNode<'_>; 3], out: &Out, + ) -> Option { + match event { + MessageSendEvent::UpdateHTLCs { node_id, channel_id, updates } => { + handle_update_htlcs_event( + node_idx, + source_node_id, + node_id, + channel_id, + updates, + corrupt_forward, + limit_events, + nodes, + out, + ) + }, + MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { + let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "revoke_and_ack"); + nodes[dest_idx].handle_revoke_and_ack(source_node_id, msg); + None + }, + MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => { + let dest_idx = + log_peer_message(node_idx, node_id, nodes, out, "channel_reestablish"); + nodes[dest_idx].handle_channel_reestablish(source_node_id, msg); + None + }, + MessageSendEvent::SendStfu { ref node_id, ref msg } => { + let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "stfu"); + nodes[dest_idx].handle_stfu(source_node_id, msg); + None + }, + MessageSendEvent::SendTxAddInput { ref node_id, ref msg } => { + let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "tx_add_input"); + nodes[dest_idx].handle_tx_add_input(source_node_id, msg); + None + }, + MessageSendEvent::SendTxAddOutput { ref node_id, ref msg } => { + let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "tx_add_output"); + nodes[dest_idx].handle_tx_add_output(source_node_id, msg); + None + }, + MessageSendEvent::SendTxRemoveInput { ref node_id, ref msg } => { + let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "tx_remove_input"); + nodes[dest_idx].handle_tx_remove_input(source_node_id, msg); + None + }, + MessageSendEvent::SendTxRemoveOutput { ref node_id, ref msg } => { + let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "tx_remove_output"); + nodes[dest_idx].handle_tx_remove_output(source_node_id, msg); + None + }, + MessageSendEvent::SendTxComplete { ref node_id, ref msg } => { + let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "tx_complete"); + nodes[dest_idx].handle_tx_complete(source_node_id, msg); + None + }, + MessageSendEvent::SendTxAbort { ref node_id, ref msg } => { + let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "tx_abort"); + nodes[dest_idx].handle_tx_abort(source_node_id, msg); + None + }, + MessageSendEvent::SendTxInitRbf { ref node_id, ref msg } => { + let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "tx_init_rbf"); + nodes[dest_idx].handle_tx_init_rbf(source_node_id, msg); + None + }, + MessageSendEvent::SendTxAckRbf { ref node_id, ref msg } => { + let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "tx_ack_rbf"); + nodes[dest_idx].handle_tx_ack_rbf(source_node_id, msg); + None + }, + MessageSendEvent::SendTxSignatures { ref node_id, ref msg } => { + let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "tx_signatures"); + nodes[dest_idx].handle_tx_signatures(source_node_id, msg); + None + }, + MessageSendEvent::SendSpliceInit { ref node_id, ref msg } => { + let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "splice_init"); + nodes[dest_idx].handle_splice_init(source_node_id, msg); + None + }, + MessageSendEvent::SendSpliceAck { ref node_id, ref msg } => { + let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "splice_ack"); + nodes[dest_idx].handle_splice_ack(source_node_id, msg); + None + }, + MessageSendEvent::SendSpliceLocked { ref node_id, ref msg } => { + let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "splice_locked"); + nodes[dest_idx].handle_splice_locked(source_node_id, msg); + None + }, + MessageSendEvent::HandleError { ref action, .. } => { + assert_action_timeout_awaiting_response(action); + None + }, + MessageSendEvent::SendChannelReady { .. } + | MessageSendEvent::SendAnnouncementSignatures { .. } + | MessageSendEvent::SendChannelUpdate { .. } => { + // Can be generated as a reestablish response. + None + }, + MessageSendEvent::BroadcastChannelUpdate { .. } => { + // Can be generated as a result of calling `timer_tick_occurred` enough + // times while peers are disconnected. + None + }, + _ => panic!("Unhandled message event {:?}", event), + } + } + + let mut events = queues.take_for_node(node_idx); + let mut new_events = Vec::new(); + if limit_events != ProcessMessages::OnePendingMessage { + new_events = nodes[node_idx].get_and_clear_pending_msg_events(); + } + let mut had_events = false; + let source_node_id = nodes[node_idx].our_node_id(); + let mut events_iter = events.drain(..).chain(new_events.drain(..)); + let mut extra_ev = None; + for event in &mut events_iter { + had_events = true; + extra_ev = process_msg_event( + node_idx, + source_node_id, + event, + corrupt_forward, + limit_events, + nodes, + out, + ); + if limit_events != ProcessMessages::AllMessages { + break; + } + } + if node_idx == 1 { + let remaining = extra_ev.into_iter().chain(events_iter).collect::>(); + queues.route_from_middle(remaining, None, nodes); + } else if node_idx == 0 { + if let Some(ev) = extra_ev { + queues.push_for_node(0, ev); + } + queues.extend_for_node(0, events_iter); + } else { + if let Some(ev) = extra_ev { + queues.push_for_node(2, ev); + } + queues.extend_for_node(2, events_iter); + } + had_events +} + #[inline] pub fn do_test(data: &[u8], out: Out) { let router = FuzzRouter {}; @@ -1870,436 +2145,16 @@ pub fn do_test(data: &[u8], out: Out) { } loop { - // While delivering messages, we select across three possible message selection processes - // to ensure we get as much coverage as possible. See the individual enum variants for more - // details. - #[derive(PartialEq)] - enum ProcessMessages { - /// Deliver all available messages, including fetching any new messages from - /// `get_and_clear_pending_msg_events()` (which may have side effects). - AllMessages, - /// Call `get_and_clear_pending_msg_events()` first, and then deliver up to one - /// message (which may already be queued). - OneMessage, - /// Deliver up to one already-queued message. This avoids any potential side-effects - /// of `get_and_clear_pending_msg_events()` (eg freeing the HTLC holding cell), which - /// provides potentially more coverage. - OnePendingMessage, - } - macro_rules! process_msg_events { ($node: expr, $corrupt_forward: expr, $limit_events: expr) => {{ - let mut events = queues.take_for_node($node); - let mut new_events = Vec::new(); - if $limit_events != ProcessMessages::OnePendingMessage { - new_events = nodes[$node].get_and_clear_pending_msg_events(); - } - let mut had_events = false; - let mut events_iter = events.drain(..).chain(new_events.drain(..)); - let mut extra_ev = None; - for event in &mut events_iter { - had_events = true; - match event { - MessageSendEvent::UpdateHTLCs { - node_id, - channel_id, - updates: - CommitmentUpdate { - update_add_htlcs, - update_fail_htlcs, - update_fulfill_htlcs, - update_fail_malformed_htlcs, - update_fee, - commitment_signed, - }, - } => { - for (idx, dest) in nodes.iter().enumerate() { - if dest.get_our_node_id() == node_id { - for update_add in update_add_htlcs.iter() { - out.locked_write( - format!( - "Delivering update_add_htlc from node {} to node {}.\n", - $node, - idx - ) - .as_bytes(), - ); - if !$corrupt_forward { - dest.handle_update_add_htlc( - nodes[$node].get_our_node_id(), - update_add, - ); - } else { - // Corrupt the update_add_htlc message so that its HMAC - // check will fail and we generate a - // update_fail_malformed_htlc instead of an - // update_fail_htlc as we do when we reject a payment. - let mut msg_ser = update_add.encode(); - msg_ser[1000] ^= 0xff; - let new_msg = - UpdateAddHTLC::read_from_fixed_length_buffer( - &mut &msg_ser[..], - ) - .unwrap(); - dest.handle_update_add_htlc( - nodes[$node].get_our_node_id(), - &new_msg, - ); - } - } - let processed_change = !update_add_htlcs.is_empty() - || !update_fulfill_htlcs.is_empty() - || !update_fail_htlcs.is_empty() - || !update_fail_malformed_htlcs.is_empty(); - for update_fulfill in update_fulfill_htlcs { - out.locked_write( - format!( - "Delivering update_fulfill_htlc from node {} to node {}.\n", - $node, - idx - ) - .as_bytes(), - ); - dest.handle_update_fulfill_htlc( - nodes[$node].get_our_node_id(), - update_fulfill, - ); - } - for update_fail in update_fail_htlcs.iter() { - out.locked_write( - format!( - "Delivering update_fail_htlc from node {} to node {}.\n", - $node, - idx - ) - .as_bytes(), - ); - dest.handle_update_fail_htlc( - nodes[$node].get_our_node_id(), - update_fail, - ); - } - for update_fail_malformed in update_fail_malformed_htlcs.iter() { - out.locked_write( - format!( - "Delivering update_fail_malformed_htlc from node {} to node {}.\n", - $node, - idx - ) - .as_bytes(), - ); - dest.handle_update_fail_malformed_htlc( - nodes[$node].get_our_node_id(), - update_fail_malformed, - ); - } - if let Some(msg) = update_fee { - out.locked_write( - format!( - "Delivering update_fee from node {} to node {}.\n", - $node, - idx - ) - .as_bytes(), - ); - dest.handle_update_fee(nodes[$node].get_our_node_id(), &msg); - } - if $limit_events != ProcessMessages::AllMessages - && processed_change - { - // If we only want to process some messages, don't deliver the - // CS until later. - extra_ev = Some(MessageSendEvent::UpdateHTLCs { - node_id, - channel_id, - updates: CommitmentUpdate { - update_add_htlcs: Vec::new(), - update_fail_htlcs: Vec::new(), - update_fulfill_htlcs: Vec::new(), - update_fail_malformed_htlcs: Vec::new(), - update_fee: None, - commitment_signed, - }, - }); - break; - } - out.locked_write( - format!( - "Delivering commitment_signed from node {} to node {}.\n", - $node, - idx - ) - .as_bytes(), - ); - dest.handle_commitment_signed_batch_test( - nodes[$node].get_our_node_id(), - &commitment_signed, - ); - break; - } - } - }, - MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { - for (idx, dest) in nodes.iter().enumerate() { - if dest.get_our_node_id() == *node_id { - out.locked_write( - format!( - "Delivering revoke_and_ack from node {} to node {}.\n", - $node, - idx - ) - .as_bytes(), - ); - dest.handle_revoke_and_ack(nodes[$node].get_our_node_id(), msg); - } - } - }, - MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => { - for (idx, dest) in nodes.iter().enumerate() { - if dest.get_our_node_id() == *node_id { - out.locked_write( - format!( - "Delivering channel_reestablish from node {} to node {}.\n", - $node, - idx - ) - .as_bytes(), - ); - dest.handle_channel_reestablish( - nodes[$node].get_our_node_id(), - msg, - ); - } - } - }, - MessageSendEvent::SendStfu { ref node_id, ref msg } => { - for (idx, dest) in nodes.iter().enumerate() { - if dest.get_our_node_id() == *node_id { - out.locked_write( - format!("Delivering stfu from node {} to node {}.\n", $node, idx) - .as_bytes(), - ); - dest.handle_stfu(nodes[$node].get_our_node_id(), msg); - } - } - }, - MessageSendEvent::SendTxAddInput { ref node_id, ref msg } => { - for (idx, dest) in nodes.iter().enumerate() { - if dest.get_our_node_id() == *node_id { - out.locked_write( - format!( - "Delivering tx_add_input from node {} to node {}.\n", - $node, - idx - ) - .as_bytes(), - ); - dest.handle_tx_add_input(nodes[$node].get_our_node_id(), msg); - } - } - }, - MessageSendEvent::SendTxAddOutput { ref node_id, ref msg } => { - for (idx, dest) in nodes.iter().enumerate() { - if dest.get_our_node_id() == *node_id { - out.locked_write( - format!( - "Delivering tx_add_output from node {} to node {}.\n", - $node, - idx - ) - .as_bytes(), - ); - dest.handle_tx_add_output(nodes[$node].get_our_node_id(), msg); - } - } - }, - MessageSendEvent::SendTxRemoveInput { ref node_id, ref msg } => { - for (idx, dest) in nodes.iter().enumerate() { - if dest.get_our_node_id() == *node_id { - out.locked_write( - format!( - "Delivering tx_remove_input from node {} to node {}.\n", - $node, - idx - ) - .as_bytes(), - ); - dest.handle_tx_remove_input(nodes[$node].get_our_node_id(), msg); - } - } - }, - MessageSendEvent::SendTxRemoveOutput { ref node_id, ref msg } => { - for (idx, dest) in nodes.iter().enumerate() { - if dest.get_our_node_id() == *node_id { - out.locked_write( - format!( - "Delivering tx_remove_output from node {} to node {}.\n", - $node, - idx - ) - .as_bytes(), - ); - dest.handle_tx_remove_output(nodes[$node].get_our_node_id(), msg); - } - } - }, - MessageSendEvent::SendTxComplete { ref node_id, ref msg } => { - for (idx, dest) in nodes.iter().enumerate() { - if dest.get_our_node_id() == *node_id { - out.locked_write( - format!( - "Delivering tx_complete from node {} to node {}.\n", - $node, - idx - ) - .as_bytes(), - ); - dest.handle_tx_complete(nodes[$node].get_our_node_id(), msg); - } - } - }, - MessageSendEvent::SendTxAbort { ref node_id, ref msg } => { - for (idx, dest) in nodes.iter().enumerate() { - if dest.get_our_node_id() == *node_id { - out.locked_write( - format!( - "Delivering tx_abort from node {} to node {}.\n", - $node, - idx - ) - .as_bytes(), - ); - dest.handle_tx_abort(nodes[$node].get_our_node_id(), msg); - } - } - }, - MessageSendEvent::SendTxInitRbf { ref node_id, ref msg } => { - for (idx, dest) in nodes.iter().enumerate() { - if dest.get_our_node_id() == *node_id { - out.locked_write( - format!( - "Delivering tx_init_rbf from node {} to node {}.\n", - $node, - idx - ) - .as_bytes(), - ); - dest.handle_tx_init_rbf(nodes[$node].get_our_node_id(), msg); - } - } - }, - MessageSendEvent::SendTxAckRbf { ref node_id, ref msg } => { - for (idx, dest) in nodes.iter().enumerate() { - if dest.get_our_node_id() == *node_id { - out.locked_write( - format!( - "Delivering tx_ack_rbf from node {} to node {}.\n", - $node, - idx - ) - .as_bytes(), - ); - dest.handle_tx_ack_rbf(nodes[$node].get_our_node_id(), msg); - } - } - }, - MessageSendEvent::SendTxSignatures { ref node_id, ref msg } => { - for (idx, dest) in nodes.iter().enumerate() { - if dest.get_our_node_id() == *node_id { - out.locked_write( - format!( - "Delivering tx_signatures from node {} to node {}.\n", - $node, - idx - ) - .as_bytes(), - ); - dest.handle_tx_signatures(nodes[$node].get_our_node_id(), msg); - } - } - }, - MessageSendEvent::SendSpliceInit { ref node_id, ref msg } => { - for (idx, dest) in nodes.iter().enumerate() { - if dest.get_our_node_id() == *node_id { - out.locked_write( - format!( - "Delivering splice_init from node {} to node {}.\n", - $node, - idx - ) - .as_bytes(), - ); - dest.handle_splice_init(nodes[$node].get_our_node_id(), msg); - } - } - }, - MessageSendEvent::SendSpliceAck { ref node_id, ref msg } => { - for (idx, dest) in nodes.iter().enumerate() { - if dest.get_our_node_id() == *node_id { - out.locked_write( - format!( - "Delivering splice_ack from node {} to node {}.\n", - $node, - idx - ) - .as_bytes(), - ); - dest.handle_splice_ack(nodes[$node].get_our_node_id(), msg); - } - } - }, - MessageSendEvent::SendSpliceLocked { ref node_id, ref msg } => { - for (idx, dest) in nodes.iter().enumerate() { - if dest.get_our_node_id() == *node_id { - out.locked_write( - format!( - "Delivering splice_locked from node {} to node {}.\n", - $node, - idx - ) - .as_bytes(), - ); - dest.handle_splice_locked(nodes[$node].get_our_node_id(), msg); - } - } - }, - MessageSendEvent::HandleError { ref action, .. } => { - assert_action_timeout_awaiting_response(action); - }, - MessageSendEvent::SendChannelReady { .. } => { - // Can be generated as a reestablish response - }, - MessageSendEvent::SendAnnouncementSignatures { .. } => { - // Can be generated as a reestablish response - }, - MessageSendEvent::SendChannelUpdate { .. } => { - // Can be generated as a reestablish response - }, - MessageSendEvent::BroadcastChannelUpdate { .. } => { - // Can be generated as a result of calling `timer_tick_occurred` enough - // times while peers are disconnected - }, - _ => panic!("Unhandled message event {:?}", event), - } - if $limit_events != ProcessMessages::AllMessages { - break; - } - } - if $node == 1 { - let remaining = extra_ev.into_iter().chain(events_iter).collect::>(); - queues.route_from_middle(remaining, None, &nodes); - } else if $node == 0 { - if let Some(ev) = extra_ev { - queues.push_for_node(0, ev); - } - queues.extend_for_node(0, events_iter); - } else { - if let Some(ev) = extra_ev { - queues.push_for_node(2, ev); - } - queues.extend_for_node(2, events_iter); - } - had_events + process_msg_events_impl( + $node, + $corrupt_forward, + $limit_events, + &nodes, + &out, + &mut queues, + ) }}; } From 259ec4dff7eb9fc4d489dae6af5b04dc6904a126 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 21 Apr 2026 16:07:32 +0200 Subject: [PATCH 11/29] Extract chanmon harness peer links Represent each channel pair as a peer link with its channel ids and disconnect state. Link methods now own peer reconnect, disconnect, and monitor-update operations for that channel group. --- fuzz/src/chanmon_consistency.rs | 351 ++++++++++++++++---------------- 1 file changed, 173 insertions(+), 178 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 47b027c597f..4e498e131e5 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -1447,6 +1447,20 @@ impl EventQueues { } } + fn clear_link(&mut self, link: &PeerLink) { + match (link.node_a, link.node_b) { + (0, 1) | (1, 0) => { + self.ab.clear(); + self.ba.clear(); + }, + (1, 2) | (2, 1) => { + self.bc.clear(); + self.cb.clear(); + }, + _ => panic!("unsupported link"), + } + } + fn drain_on_disconnect(&mut self, edge_node: usize, nodes: &[HarnessNode<'_>; 3]) { match edge_node { 0 => { @@ -1492,6 +1506,110 @@ impl EventQueues { } } +struct PeerLink { + node_a: usize, + node_b: usize, + channel_ids: [ChannelId; 3], + disconnected: bool, +} + +impl PeerLink { + fn new(node_a: usize, node_b: usize, channel_ids: [ChannelId; 3]) -> Self { + Self { node_a, node_b, channel_ids, disconnected: false } + } + + fn first_channel_id(&self) -> ChannelId { + self.channel_ids[0] + } + + fn channel_ids(&self) -> &[ChannelId; 3] { + &self.channel_ids + } + + fn complete_all_monitor_updates(&self, nodes: &[HarnessNode<'_>; 3]) { + for id in &self.channel_ids { + nodes[self.node_a].complete_all_monitor_updates(id); + nodes[self.node_b].complete_all_monitor_updates(id); + } + } + + fn complete_monitor_updates_for_node( + &self, node_idx: usize, nodes: &[HarnessNode<'_>; 3], selector: MonitorUpdateSelector, + ) { + assert!(node_idx == self.node_a || node_idx == self.node_b); + for id in &self.channel_ids { + nodes[node_idx].complete_monitor_update(id, selector); + } + } + + fn disconnect(&mut self, nodes: &mut [HarnessNode<'_>; 3], queues: &mut EventQueues) { + if self.disconnected { + return; + } + let node_a_id = nodes[self.node_a].our_node_id(); + let node_b_id = nodes[self.node_b].our_node_id(); + nodes[self.node_a].peer_disconnected(node_b_id); + nodes[self.node_b].peer_disconnected(node_a_id); + self.disconnected = true; + let edge_node = if self.node_a == 1 { + self.node_b + } else if self.node_b == 1 { + self.node_a + } else { + panic!("unsupported link topology") + }; + queues.drain_on_disconnect(edge_node, nodes); + queues.clear_link(self); + } + + fn reconnect(&mut self, nodes: &mut [HarnessNode<'_>; 3]) { + if !self.disconnected { + return; + } + let node_a_id = nodes[self.node_a].our_node_id(); + let node_b_id = nodes[self.node_b].our_node_id(); + let init_b = Init { + features: nodes[self.node_b].init_features(), + networks: None, + remote_network_address: None, + }; + nodes[self.node_a].peer_connected(node_b_id, &init_b, true).unwrap(); + let init_a = Init { + features: nodes[self.node_a].init_features(), + networks: None, + remote_network_address: None, + }; + nodes[self.node_b].peer_connected(node_a_id, &init_a, false).unwrap(); + self.disconnected = false; + } + + fn disconnect_for_reload( + &mut self, restarted_node: usize, nodes: &mut [HarnessNode<'_>; 3], + queues: &mut EventQueues, + ) { + if self.disconnected { + return; + } + assert!(restarted_node == self.node_a || restarted_node == self.node_b); + + let remaining_node = if restarted_node == self.node_a { self.node_b } else { self.node_a }; + let restarted_node_id = nodes[restarted_node].our_node_id(); + nodes[remaining_node].peer_disconnected(restarted_node_id); + self.disconnected = true; + + if remaining_node == 1 { + queues.route_from_middle( + nodes[1].get_and_clear_pending_msg_events(), + Some(restarted_node), + nodes, + ); + } else { + nodes[remaining_node].get_and_clear_pending_msg_events(); + } + queues.clear_link(self); + } +} + fn build_node_config(chan_type: ChanType) -> UserConfig { let mut config = UserConfig::default(); config.channel_config.forwarding_fee_proportional_millionths = 0; @@ -2107,10 +2225,10 @@ pub fn do_test(data: &[u8], out: Out) { let node_c_chans = nodes[2].list_usable_channels(); [node_c_chans[0].channel_id, node_c_chans[1].channel_id, node_c_chans[2].channel_id] }; - let chan_a_id = chan_ab_ids[0]; - let chan_b_id = chan_bc_ids[0]; - let mut peers_ab_disconnected = false; - let mut peers_bc_disconnected = false; + let mut ab_link = PeerLink::new(0, 1, chan_ab_ids); + let mut bc_link = PeerLink::new(1, 2, chan_bc_ids); + let chan_a_id = ab_link.first_channel_id(); + let chan_b_id = bc_link.first_channel_id(); let mut queues = EventQueues::new(); let mut p_ctr: u64 = 0; @@ -2386,80 +2504,30 @@ pub fn do_test(data: &[u8], out: Out) { 0x06 => nodes[2].set_persistence_style(ChannelMonitorUpdateStatus::Completed), 0x08 => { - for id in &chan_ab_ids { + for id in ab_link.channel_ids() { nodes[0].complete_all_monitor_updates(id); } }, 0x09 => { - for id in &chan_ab_ids { + for id in ab_link.channel_ids() { nodes[1].complete_all_monitor_updates(id); } }, 0x0a => { - for id in &chan_bc_ids { + for id in bc_link.channel_ids() { nodes[1].complete_all_monitor_updates(id); } }, 0x0b => { - for id in &chan_bc_ids { + for id in bc_link.channel_ids() { nodes[2].complete_all_monitor_updates(id); } }, - 0x0c => { - if !peers_ab_disconnected { - nodes[0].peer_disconnected(nodes[1].get_our_node_id()); - nodes[1].peer_disconnected(nodes[0].get_our_node_id()); - peers_ab_disconnected = true; - queues.drain_on_disconnect(0, &nodes); - queues.ab.clear(); - queues.ba.clear(); - } - }, - 0x0d => { - if !peers_bc_disconnected { - nodes[1].peer_disconnected(nodes[2].get_our_node_id()); - nodes[2].peer_disconnected(nodes[1].get_our_node_id()); - peers_bc_disconnected = true; - queues.drain_on_disconnect(2, &nodes); - queues.bc.clear(); - queues.cb.clear(); - } - }, - 0x0e => { - if peers_ab_disconnected { - let init_1 = Init { - features: nodes[1].init_features(), - networks: None, - remote_network_address: None, - }; - nodes[0].peer_connected(nodes[1].get_our_node_id(), &init_1, true).unwrap(); - let init_0 = Init { - features: nodes[0].init_features(), - networks: None, - remote_network_address: None, - }; - nodes[1].peer_connected(nodes[0].get_our_node_id(), &init_0, false).unwrap(); - peers_ab_disconnected = false; - } - }, - 0x0f => { - if peers_bc_disconnected { - let init_2 = Init { - features: nodes[2].init_features(), - networks: None, - remote_network_address: None, - }; - nodes[1].peer_connected(nodes[2].get_our_node_id(), &init_2, true).unwrap(); - let init_1 = Init { - features: nodes[1].init_features(), - networks: None, - remote_network_address: None, - }; - nodes[2].peer_connected(nodes[1].get_our_node_id(), &init_1, false).unwrap(); - peers_bc_disconnected = false; - } - }, + 0x0c => ab_link.disconnect(&mut nodes, &mut queues), + 0x0d => bc_link.disconnect(&mut nodes, &mut queues), + 0x0e => ab_link.reconnect(&mut nodes), + 0x0f => bc_link.reconnect(&mut nodes), 0x10 => process_msg_noret!(0, true, ProcessMessages::AllMessages), 0x11 => process_msg_noret!(0, false, ProcessMessages::AllMessages), @@ -2655,48 +2723,16 @@ pub fn do_test(data: &[u8], out: Out) { }, 0xb0 | 0xb1 | 0xb2 => { - if !peers_ab_disconnected { - nodes[1].peer_disconnected(nodes[0].get_our_node_id()); - peers_ab_disconnected = true; - queues.route_from_middle( - nodes[1].get_and_clear_pending_msg_events(), - Some(0), - &nodes, - ); - queues.ab.clear(); - queues.ba.clear(); - } + ab_link.disconnect_for_reload(0, &mut nodes, &mut queues); nodes[0].reload(v, &out, &router, chan_type); }, 0xb3..=0xbb => { - if !peers_ab_disconnected { - nodes[0].peer_disconnected(nodes[1].get_our_node_id()); - peers_ab_disconnected = true; - nodes[0].get_and_clear_pending_msg_events(); - queues.ab.clear(); - queues.ba.clear(); - } - if !peers_bc_disconnected { - nodes[2].peer_disconnected(nodes[1].get_our_node_id()); - peers_bc_disconnected = true; - nodes[2].get_and_clear_pending_msg_events(); - queues.bc.clear(); - queues.cb.clear(); - } + ab_link.disconnect_for_reload(1, &mut nodes, &mut queues); + bc_link.disconnect_for_reload(1, &mut nodes, &mut queues); nodes[1].reload(v, &out, &router, chan_type); }, 0xbc | 0xbd | 0xbe => { - if !peers_bc_disconnected { - nodes[1].peer_disconnected(nodes[2].get_our_node_id()); - peers_bc_disconnected = true; - queues.route_from_middle( - nodes[1].get_and_clear_pending_msg_events(), - Some(2), - &nodes, - ); - queues.bc.clear(); - queues.cb.clear(); - } + bc_link.disconnect_for_reload(2, &mut nodes, &mut queues); nodes[2].reload(v, &out, &router, chan_type); }, @@ -2767,103 +2803,51 @@ pub fn do_test(data: &[u8], out: Out) { }, 0xf0 => { - for id in &chan_ab_ids { - nodes[0].complete_monitor_update(id, MonitorUpdateSelector::First); - } + ab_link.complete_monitor_updates_for_node(0, &nodes, MonitorUpdateSelector::First) }, 0xf1 => { - for id in &chan_ab_ids { - nodes[0].complete_monitor_update(id, MonitorUpdateSelector::Second); - } + ab_link.complete_monitor_updates_for_node(0, &nodes, MonitorUpdateSelector::Second) }, 0xf2 => { - for id in &chan_ab_ids { - nodes[0].complete_monitor_update(id, MonitorUpdateSelector::Last); - } + ab_link.complete_monitor_updates_for_node(0, &nodes, MonitorUpdateSelector::Last) }, 0xf4 => { - for id in &chan_ab_ids { - nodes[1].complete_monitor_update(id, MonitorUpdateSelector::First); - } + ab_link.complete_monitor_updates_for_node(1, &nodes, MonitorUpdateSelector::First) }, 0xf5 => { - for id in &chan_ab_ids { - nodes[1].complete_monitor_update(id, MonitorUpdateSelector::Second); - } + ab_link.complete_monitor_updates_for_node(1, &nodes, MonitorUpdateSelector::Second) }, 0xf6 => { - for id in &chan_ab_ids { - nodes[1].complete_monitor_update(id, MonitorUpdateSelector::Last); - } + ab_link.complete_monitor_updates_for_node(1, &nodes, MonitorUpdateSelector::Last) }, 0xf8 => { - for id in &chan_bc_ids { - nodes[1].complete_monitor_update(id, MonitorUpdateSelector::First); - } + bc_link.complete_monitor_updates_for_node(1, &nodes, MonitorUpdateSelector::First) }, 0xf9 => { - for id in &chan_bc_ids { - nodes[1].complete_monitor_update(id, MonitorUpdateSelector::Second); - } + bc_link.complete_monitor_updates_for_node(1, &nodes, MonitorUpdateSelector::Second) }, 0xfa => { - for id in &chan_bc_ids { - nodes[1].complete_monitor_update(id, MonitorUpdateSelector::Last); - } + bc_link.complete_monitor_updates_for_node(1, &nodes, MonitorUpdateSelector::Last) }, 0xfc => { - for id in &chan_bc_ids { - nodes[2].complete_monitor_update(id, MonitorUpdateSelector::First); - } + bc_link.complete_monitor_updates_for_node(2, &nodes, MonitorUpdateSelector::First) }, 0xfd => { - for id in &chan_bc_ids { - nodes[2].complete_monitor_update(id, MonitorUpdateSelector::Second); - } + bc_link.complete_monitor_updates_for_node(2, &nodes, MonitorUpdateSelector::Second) }, 0xfe => { - for id in &chan_bc_ids { - nodes[2].complete_monitor_update(id, MonitorUpdateSelector::Last); - } + bc_link.complete_monitor_updates_for_node(2, &nodes, MonitorUpdateSelector::Last) }, 0xff => { // Test that no channel is in a stuck state where neither party can send funds even // after we resolve all pending events. - if peers_ab_disconnected { - let init_1 = Init { - features: nodes[1].init_features(), - networks: None, - remote_network_address: None, - }; - nodes[0].peer_connected(nodes[1].get_our_node_id(), &init_1, true).unwrap(); - let init_0 = Init { - features: nodes[0].init_features(), - networks: None, - remote_network_address: None, - }; - nodes[1].peer_connected(nodes[0].get_our_node_id(), &init_0, false).unwrap(); - peers_ab_disconnected = false; - } - if peers_bc_disconnected { - let init_2 = Init { - features: nodes[2].init_features(), - networks: None, - remote_network_address: None, - }; - nodes[1].peer_connected(nodes[2].get_our_node_id(), &init_2, true).unwrap(); - let init_1 = Init { - features: nodes[1].init_features(), - networks: None, - remote_network_address: None, - }; - nodes[2].peer_connected(nodes[1].get_our_node_id(), &init_1, false).unwrap(); - peers_bc_disconnected = false; - } + ab_link.reconnect(&mut nodes); + bc_link.reconnect(&mut nodes); for op in SUPPORTED_SIGNER_OPS { nodes[0].keys_manager.enable_op_for_all_signers(op); @@ -2883,14 +2867,8 @@ pub fn do_test(data: &[u8], out: Out) { "It may take may iterations to settle the state, but it should not take forever" ); } - for id in &chan_ab_ids { - nodes[0].complete_all_monitor_updates(id); - nodes[1].complete_all_monitor_updates(id); - } - for id in &chan_bc_ids { - nodes[1].complete_all_monitor_updates(id); - nodes[2].complete_all_monitor_updates(id); - } + ab_link.complete_all_monitor_updates(&nodes); + bc_link.complete_all_monitor_updates(&nodes); if process_msg_events!(0, false, ProcessMessages::AllMessages) { last_pass_no_updates = false; continue; @@ -2955,16 +2933,33 @@ pub fn do_test(data: &[u8], out: Out) { } // Finally, make sure that at least one end of each channel can make a substantial payment - for &chan_id in &chan_ab_ids { + let send_after_settle = |source_idx: usize, + dest_idx: usize, + dest_chan_id, + amt, + payment_ctr: &mut u64| { + let source = &nodes[source_idx]; + let dest = &nodes[dest_idx]; + let (secret, hash) = + get_payment_secret_hash(dest, payment_ctr, &payment_preimages); + let mut id = PaymentId([0; 32]); + id.0[0..8].copy_from_slice(&payment_ctr.to_ne_bytes()); + let succeeded = send_payment(source, dest, dest_chan_id, amt, secret, hash, id); + if succeeded { + pending_payments.borrow_mut()[source_idx].push(id); + } + succeeded + }; + for &chan_id in ab_link.channel_ids() { assert!( - send(0, 1, chan_id, 10_000_000, &mut p_ctr) - || send(1, 0, chan_id, 10_000_000, &mut p_ctr) + send_after_settle(0, 1, chan_id, 10_000_000, &mut p_ctr) + || send_after_settle(1, 0, chan_id, 10_000_000, &mut p_ctr) ); } - for &chan_id in &chan_bc_ids { + for &chan_id in bc_link.channel_ids() { assert!( - send(1, 2, chan_id, 10_000_000, &mut p_ctr) - || send(2, 1, chan_id, 10_000_000, &mut p_ctr) + send_after_settle(1, 2, chan_id, 10_000_000, &mut p_ctr) + || send_after_settle(2, 1, chan_id, 10_000_000, &mut p_ctr) ); } From 8d5bd2798e364aaa07509f74002eca94f0c55334 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 21 Apr 2026 15:51:28 +0200 Subject: [PATCH 12/29] Extract chanmon harness payment helpers Move payment bookkeeping into a payment tracker. Sending, resolving, claiming, and stuck-payment assertions now share one state owner instead of borrowing several local maps. --- fuzz/src/chanmon_consistency.rs | 867 +++++++++++++++++++------------- 1 file changed, 514 insertions(+), 353 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 4e498e131e5..1554d670c4f 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -632,7 +632,7 @@ type ChanMan<'a> = ChannelManager< #[inline] fn get_payment_secret_hash( dest: &ChanMan, payment_ctr: &mut u64, - payment_preimages: &RefCell>, + payment_preimages: &mut HashMap, ) -> (PaymentSecret, PaymentHash) { *payment_ctr += 1; let mut payment_preimage = PaymentPreimage([0; 32]); @@ -641,7 +641,7 @@ fn get_payment_secret_hash( let payment_secret = dest .create_inbound_payment_for_hash(payment_hash, None, 3600, None) .expect("create_inbound_payment_for_hash failed"); - assert!(payment_preimages.borrow_mut().insert(payment_hash, payment_preimage).is_none()); + assert!(payment_preimages.insert(payment_hash, payment_preimage).is_none()); (payment_secret, payment_hash) } @@ -1610,6 +1610,179 @@ impl PeerLink { } } +struct PaymentTracker { + pending_payments: [Vec; 3], + resolved_payments: [HashMap>; 3], + claimed_payment_hashes: HashSet, + payment_preimages: HashMap, + payment_ctr: u64, +} + +impl PaymentTracker { + fn new() -> Self { + Self { + pending_payments: [Vec::new(), Vec::new(), Vec::new()], + resolved_payments: [new_hash_map(), new_hash_map(), new_hash_map()], + claimed_payment_hashes: HashSet::new(), + payment_preimages: new_hash_map(), + payment_ctr: 0, + } + } + + fn next_payment(&mut self, dest: &ChanMan) -> (PaymentSecret, PaymentHash, PaymentId) { + let (secret, hash) = + get_payment_secret_hash(dest, &mut self.payment_ctr, &mut self.payment_preimages); + let mut id = PaymentId([0; 32]); + id.0[0..8].copy_from_slice(&self.payment_ctr.to_ne_bytes()); + (secret, hash, id) + } + + fn send_direct( + &mut self, nodes: &[HarnessNode<'_>; 3], source_idx: usize, dest_idx: usize, + dest_chan_id: ChannelId, amt: u64, + ) -> bool { + let source = &nodes[source_idx]; + let dest = &nodes[dest_idx]; + let (secret, hash, id) = self.next_payment(dest); + let succeeded = send_payment(source, dest, dest_chan_id, amt, secret, hash, id); + if succeeded { + self.pending_payments[source_idx].push(id); + } + succeeded + } + + fn send_hop( + &mut self, nodes: &[HarnessNode<'_>; 3], source_idx: usize, middle_idx: usize, + middle_chan_id: ChannelId, dest_idx: usize, dest_chan_id: ChannelId, amt: u64, + ) { + let source = &nodes[source_idx]; + let middle = &nodes[middle_idx]; + let dest = &nodes[dest_idx]; + let (secret, hash, id) = self.next_payment(dest); + let succeeded = send_hop_payment( + source, + middle, + middle_chan_id, + dest, + dest_chan_id, + amt, + secret, + hash, + id, + ); + if succeeded { + self.pending_payments[source_idx].push(id); + } + } + + fn send_mpp_direct( + &mut self, nodes: &[HarnessNode<'_>; 3], source_idx: usize, dest_idx: usize, + dest_chan_ids: &[ChannelId], amt: u64, + ) { + let source = &nodes[source_idx]; + let dest = &nodes[dest_idx]; + let (secret, hash, id) = self.next_payment(dest); + let succeeded = send_mpp_payment(source, dest, dest_chan_ids, amt, secret, hash, id); + if succeeded { + self.pending_payments[source_idx].push(id); + } + } + + fn send_mpp_hop( + &mut self, nodes: &[HarnessNode<'_>; 3], source_idx: usize, middle_idx: usize, + middle_chan_ids: &[ChannelId], dest_idx: usize, dest_chan_ids: &[ChannelId], amt: u64, + ) { + let source = &nodes[source_idx]; + let middle = &nodes[middle_idx]; + let dest = &nodes[dest_idx]; + let (secret, hash, id) = self.next_payment(dest); + let succeeded = send_mpp_hop_payment( + source, + middle, + middle_chan_ids, + dest, + dest_chan_ids, + amt, + secret, + hash, + id, + ); + if succeeded { + self.pending_payments[source_idx].push(id); + } + } + + fn claim_payment(&mut self, node: &HarnessNode<'_>, payment_hash: PaymentHash, fail: bool) { + if fail { + node.fail_htlc_backwards(&payment_hash); + } else { + let payment_preimage = *self + .payment_preimages + .get(&payment_hash) + .expect("PaymentClaimable for unknown payment hash"); + node.claim_funds(payment_preimage); + self.claimed_payment_hashes.insert(payment_hash); + } + } + + fn mark_sent(&mut self, node_idx: usize, sent_id: PaymentId, payment_hash: PaymentHash) { + let idx_opt = self.pending_payments[node_idx].iter().position(|id| *id == sent_id); + if let Some(idx) = idx_opt { + self.pending_payments[node_idx].remove(idx); + self.resolved_payments[node_idx].insert(sent_id, Some(payment_hash)); + } else { + assert!(self.resolved_payments[node_idx].contains_key(&sent_id)); + } + } + + fn mark_resolved_without_hash(&mut self, node_idx: usize, payment_id: PaymentId) { + let idx_opt = self.pending_payments[node_idx].iter().position(|id| *id == payment_id); + if let Some(idx) = idx_opt { + self.pending_payments[node_idx].remove(idx); + self.resolved_payments[node_idx].insert(payment_id, None); + } else if !self.resolved_payments[node_idx].contains_key(&payment_id) { + // Some resolutions can arrive immediately, before the send helper records + // the payment as pending. Track them so later duplicate events are accepted. + self.resolved_payments[node_idx].insert(payment_id, None); + } + } + + fn mark_successful_probe(&mut self, node_idx: usize, payment_id: PaymentId) { + let idx_opt = self.pending_payments[node_idx].iter().position(|id| *id == payment_id); + if let Some(idx) = idx_opt { + self.pending_payments[node_idx].remove(idx); + self.resolved_payments[node_idx].insert(payment_id, None); + } else { + assert!(self.resolved_payments[node_idx].contains_key(&payment_id)); + } + } + + fn assert_all_resolved(&self) { + for (idx, pending) in self.pending_payments.iter().enumerate() { + assert!( + pending.is_empty(), + "Node {} has {} stuck pending payments after settling all state", + idx, + pending.len() + ); + } + } + + fn assert_claims_reported(&self) { + for hash in self.claimed_payment_hashes.iter() { + let found = self + .resolved_payments + .iter() + .any(|node_resolved| node_resolved.values().any(|h| h.as_ref() == Some(hash))); + assert!( + found, + "Payment {:?} was claimed by receiver but sender never got PaymentSent", + hash + ); + } + } +} + fn build_node_config(chan_type: ChanType) -> UserConfig { let mut config = UserConfig::default(); config.channel_config.forwarding_fee_proportional_millionths = 0; @@ -2088,6 +2261,125 @@ fn process_msg_events_impl( had_events } +fn process_events_impl( + node_idx: usize, fail: bool, nodes: &[HarnessNode<'_>; 3], chain_state: &mut ChainState, + payments: &mut PaymentTracker, +) -> bool { + // Multiple HTLCs can resolve for the same payment hash, so deduplicate + // claim/fail handling per event batch. + let mut claim_set = new_hash_map(); + let mut events = nodes[node_idx].get_and_clear_pending_events(); + let had_events = !events.is_empty(); + for event in events.drain(..) { + match event { + events::Event::PaymentClaimable { payment_hash, .. } => { + if claim_set.insert(payment_hash.0, ()).is_none() { + payments.claim_payment(&nodes[node_idx], payment_hash, fail); + } + }, + events::Event::PaymentSent { payment_id, payment_hash, .. } => { + payments.mark_sent(node_idx, payment_id.unwrap(), payment_hash); + }, + // Even though we don't explicitly send probes, because probes are detected based on + // hashing the payment hash+preimage, it is rather trivial for the fuzzer to build + // payments that accidentally end up looking like probes. + events::Event::ProbeSuccessful { payment_id, .. } => { + payments.mark_successful_probe(node_idx, payment_id); + }, + events::Event::PaymentFailed { payment_id, .. } + | events::Event::ProbeFailed { payment_id, .. } => { + payments.mark_resolved_without_hash(node_idx, payment_id); + }, + events::Event::PaymentClaimed { .. } => {}, + events::Event::PaymentPathSuccessful { .. } => {}, + events::Event::PaymentPathFailed { .. } => {}, + events::Event::PaymentForwarded { .. } if node_idx == 1 => {}, + events::Event::ChannelReady { .. } => {}, + events::Event::HTLCHandlingFailed { .. } => {}, + events::Event::FundingTransactionReadyForSigning { + channel_id, + counterparty_node_id, + unsigned_transaction, + .. + } => { + let signed_tx = nodes[node_idx].wallet.sign_tx(unsigned_transaction).unwrap(); + nodes[node_idx] + .funding_transaction_signed(&channel_id, &counterparty_node_id, signed_tx) + .unwrap(); + }, + events::Event::SplicePending { new_funding_txo, .. } => { + let mut txs = nodes[node_idx].broadcaster.txn_broadcasted.borrow_mut(); + assert!(txs.len() >= 1); + let splice_tx = txs.remove(0); + assert_eq!(new_funding_txo.txid, splice_tx.compute_txid()); + chain_state.add_pending_tx(splice_tx); + }, + events::Event::SpliceFailed { .. } => {}, + events::Event::DiscardFunding { + funding_info: + events::FundingInfo::Contribution { .. } | events::FundingInfo::Tx { .. }, + .. + } => {}, + _ => panic!("Unhandled event"), + } + } + while nodes[node_idx].needs_pending_htlc_processing() { + nodes[node_idx].process_pending_htlc_forwards(); + } + had_events +} + +fn process_all_events_impl( + nodes: &[HarnessNode<'_>; 3], out: &Out, ab_link: &PeerLink, bc_link: &PeerLink, + chain_state: &mut ChainState, payments: &mut PaymentTracker, queues: &mut EventQueues, +) { + let mut last_pass_no_updates = false; + for i in 0..std::usize::MAX { + if i == 100 { + panic!( + "It may take may iterations to settle the state, but it should not take forever" + ); + } + // First, make sure no monitor updates are pending. + ab_link.complete_all_monitor_updates(nodes); + bc_link.complete_all_monitor_updates(nodes); + // Then, make sure any current forwards make their way to their destination. + if process_msg_events_impl(0, false, ProcessMessages::AllMessages, nodes, out, queues) { + last_pass_no_updates = false; + continue; + } + if process_msg_events_impl(1, false, ProcessMessages::AllMessages, nodes, out, queues) { + last_pass_no_updates = false; + continue; + } + if process_msg_events_impl(2, false, ProcessMessages::AllMessages, nodes, out, queues) { + last_pass_no_updates = false; + continue; + } + // Finally, make sure any payments are claimed. + if process_events_impl(0, false, nodes, chain_state, payments) { + last_pass_no_updates = false; + continue; + } + if process_events_impl(1, false, nodes, chain_state, payments) { + last_pass_no_updates = false; + continue; + } + if process_events_impl(2, false, nodes, chain_state, payments) { + last_pass_no_updates = false; + continue; + } + if last_pass_no_updates { + // In some cases, `process_msg_events_impl` may generate a message to send, but + // block sending until `complete_all_monitor_updates` gets called on the next + // iteration. Thus, we only exit if we manage two iterations with no messages or + // events to process. + break; + } + last_pass_no_updates = true; + } +} + #[inline] pub fn do_test(data: &[u8], out: Out) { let router = FuzzRouter {}; @@ -2230,19 +2522,12 @@ pub fn do_test(data: &[u8], out: Out) { let chan_a_id = ab_link.first_channel_id(); let chan_b_id = bc_link.first_channel_id(); let mut queues = EventQueues::new(); - let mut p_ctr: u64 = 0; + let mut payments = PaymentTracker::new(); for node in &mut nodes { node.serialized_manager = node.encode(); } - let pending_payments = RefCell::new([Vec::new(), Vec::new(), Vec::new()]); - let resolved_payments: RefCell<[HashMap>; 3]> = - RefCell::new([new_hash_map(), new_hash_map(), new_hash_map()]); - let claimed_payment_hashes: RefCell> = RefCell::new(HashSet::new()); - let payment_preimages: RefCell> = - RefCell::new(new_hash_map()); - macro_rules! test_return { () => {{ assert_test_invariants(&nodes); @@ -2284,112 +2569,7 @@ pub fn do_test(data: &[u8], out: Out) { macro_rules! process_events { ($node: expr, $fail: expr) => {{ - // Multiple HTLCs can resolve for the same payment hash, so deduplicate - // claim/fail handling per event batch. - let mut claim_set = new_hash_map(); - let mut events = nodes[$node].get_and_clear_pending_events(); - let had_events = !events.is_empty(); - let mut pending_payments = pending_payments.borrow_mut(); - let mut resolved_payments = resolved_payments.borrow_mut(); - for event in events.drain(..) { - match event { - events::Event::PaymentClaimable { payment_hash, .. } => { - if claim_set.insert(payment_hash.0, ()).is_none() { - if $fail { - nodes[$node].fail_htlc_backwards(&payment_hash); - } else { - let payment_preimage = *payment_preimages - .borrow() - .get(&payment_hash) - .expect("PaymentClaimable for unknown payment hash"); - nodes[$node].claim_funds(payment_preimage); - claimed_payment_hashes.borrow_mut().insert(payment_hash); - } - } - }, - events::Event::PaymentSent { payment_id, payment_hash, .. } => { - let sent_id = payment_id.unwrap(); - let idx_opt = - pending_payments[$node].iter().position(|id| *id == sent_id); - if let Some(idx) = idx_opt { - pending_payments[$node].remove(idx); - resolved_payments[$node].insert(sent_id, Some(payment_hash)); - } else { - assert!(resolved_payments[$node].contains_key(&sent_id)); - } - }, - // Even though we don't explicitly send probes, because probes are - // detected based on hashing the payment hash+preimage, its rather - // trivial for the fuzzer to build payments that accidentally end up - // looking like probes. - events::Event::ProbeSuccessful { payment_id, .. } => { - let idx_opt = - pending_payments[$node].iter().position(|id| *id == payment_id); - if let Some(idx) = idx_opt { - pending_payments[$node].remove(idx); - resolved_payments[$node].insert(payment_id, None); - } else { - assert!(resolved_payments[$node].contains_key(&payment_id)); - } - }, - events::Event::PaymentFailed { payment_id, .. } - | events::Event::ProbeFailed { payment_id, .. } => { - let idx_opt = - pending_payments[$node].iter().position(|id| *id == payment_id); - if let Some(idx) = idx_opt { - pending_payments[$node].remove(idx); - resolved_payments[$node].insert(payment_id, None); - } else if !resolved_payments[$node].contains_key(&payment_id) { - // Payment failed immediately on send, so it was never added to - // pending_payments. Add it to resolved_payments to track it. - resolved_payments[$node].insert(payment_id, None); - } - }, - events::Event::PaymentClaimed { .. } => {}, - events::Event::PaymentPathSuccessful { .. } => {}, - events::Event::PaymentPathFailed { .. } => {}, - events::Event::PaymentForwarded { .. } if $node == 1 => {}, - events::Event::ChannelReady { .. } => {}, - events::Event::HTLCHandlingFailed { .. } => {}, - - events::Event::FundingTransactionReadyForSigning { - channel_id, - counterparty_node_id, - unsigned_transaction, - .. - } => { - let signed_tx = - nodes[$node].wallet.sign_tx(unsigned_transaction).unwrap(); - nodes[$node] - .funding_transaction_signed( - &channel_id, - &counterparty_node_id, - signed_tx, - ) - .unwrap(); - }, - events::Event::SplicePending { new_funding_txo, .. } => { - let mut txs = nodes[$node].broadcaster.txn_broadcasted.borrow_mut(); - assert!(txs.len() >= 1); - let splice_tx = txs.remove(0); - assert_eq!(new_funding_txo.txid, splice_tx.compute_txid()); - chain_state.add_pending_tx(splice_tx); - }, - events::Event::SpliceFailed { .. } => {}, - events::Event::DiscardFunding { - funding_info: - events::FundingInfo::Contribution { .. } - | events::FundingInfo::Tx { .. }, - .. - } => {}, - - _ => panic!("Unhandled event: {:?}", event), - } - } - while nodes[$node].needs_pending_htlc_processing() { - nodes[$node].process_pending_htlc_forwards(); - } - had_events + process_events_impl($node, $fail, &nodes, &mut chain_state, &mut payments) }}; } @@ -2399,97 +2579,6 @@ pub fn do_test(data: &[u8], out: Out) { }}; } - let send = - |source_idx: usize, dest_idx: usize, dest_chan_id, amt, payment_ctr: &mut u64| { - let source = &nodes[source_idx]; - let dest = &nodes[dest_idx]; - let (secret, hash) = get_payment_secret_hash(dest, payment_ctr, &payment_preimages); - let mut id = PaymentId([0; 32]); - id.0[0..8].copy_from_slice(&payment_ctr.to_ne_bytes()); - let succeeded = send_payment(source, dest, dest_chan_id, amt, secret, hash, id); - if succeeded { - pending_payments.borrow_mut()[source_idx].push(id); - } - succeeded - }; - let send_noret = |source_idx, dest_idx, dest_chan_id, amt, payment_ctr: &mut u64| { - send(source_idx, dest_idx, dest_chan_id, amt, payment_ctr); - }; - - let send_hop_noret = |source_idx: usize, - middle_idx: usize, - middle_chan_id: ChannelId, - dest_idx: usize, - dest_chan_id: ChannelId, - amt: u64, - payment_ctr: &mut u64| { - let source = &nodes[source_idx]; - let middle = &nodes[middle_idx]; - let dest = &nodes[dest_idx]; - let (secret, hash) = get_payment_secret_hash(dest, payment_ctr, &payment_preimages); - let mut id = PaymentId([0; 32]); - id.0[0..8].copy_from_slice(&payment_ctr.to_ne_bytes()); - let succeeded = send_hop_payment( - source, - middle, - middle_chan_id, - dest, - dest_chan_id, - amt, - secret, - hash, - id, - ); - if succeeded { - pending_payments.borrow_mut()[source_idx].push(id); - } - }; - - let send_mpp_direct = |source_idx: usize, - dest_idx: usize, - dest_chan_ids: &[ChannelId], - amt: u64, - payment_ctr: &mut u64| { - let source = &nodes[source_idx]; - let dest = &nodes[dest_idx]; - let (secret, hash) = get_payment_secret_hash(dest, payment_ctr, &payment_preimages); - let mut id = PaymentId([0; 32]); - id.0[0..8].copy_from_slice(&payment_ctr.to_ne_bytes()); - let succeeded = send_mpp_payment(source, dest, dest_chan_ids, amt, secret, hash, id); - if succeeded { - pending_payments.borrow_mut()[source_idx].push(id); - } - }; - - let send_mpp_hop = |source_idx: usize, - middle_idx: usize, - middle_chan_ids: &[ChannelId], - dest_idx: usize, - dest_chan_ids: &[ChannelId], - amt: u64, - payment_ctr: &mut u64| { - let source = &nodes[source_idx]; - let middle = &nodes[middle_idx]; - let dest = &nodes[dest_idx]; - let (secret, hash) = get_payment_secret_hash(dest, payment_ctr, &payment_preimages); - let mut id = PaymentId([0; 32]); - id.0[0..8].copy_from_slice(&payment_ctr.to_ne_bytes()); - let succeeded = send_mpp_hop_payment( - source, - middle, - middle_chan_ids, - dest, - dest_chan_ids, - amt, - secret, - hash, - id, - ); - if succeeded { - pending_payments.borrow_mut()[source_idx].push(id); - } - }; - let v = get_slice!(1)[0]; out.locked_write(format!("READ A BYTE! HANDLING INPUT {:x}...........\n", v).as_bytes()); match v { @@ -2560,74 +2649,208 @@ pub fn do_test(data: &[u8], out: Out) { 0x27 => process_ev_noret!(2, false), // 1/10th the channel size: - 0x30 => send_noret(0, 1, chan_a_id, 10_000_000, &mut p_ctr), - 0x31 => send_noret(1, 0, chan_a_id, 10_000_000, &mut p_ctr), - 0x32 => send_noret(1, 2, chan_b_id, 10_000_000, &mut p_ctr), - 0x33 => send_noret(2, 1, chan_b_id, 10_000_000, &mut p_ctr), - 0x34 => send_hop_noret(0, 1, chan_a_id, 2, chan_b_id, 10_000_000, &mut p_ctr), - 0x35 => send_hop_noret(2, 1, chan_b_id, 0, chan_a_id, 10_000_000, &mut p_ctr), - - 0x38 => send_noret(0, 1, chan_a_id, 1_000_000, &mut p_ctr), - 0x39 => send_noret(1, 0, chan_a_id, 1_000_000, &mut p_ctr), - 0x3a => send_noret(1, 2, chan_b_id, 1_000_000, &mut p_ctr), - 0x3b => send_noret(2, 1, chan_b_id, 1_000_000, &mut p_ctr), - 0x3c => send_hop_noret(0, 1, chan_a_id, 2, chan_b_id, 1_000_000, &mut p_ctr), - 0x3d => send_hop_noret(2, 1, chan_b_id, 0, chan_a_id, 1_000_000, &mut p_ctr), - - 0x40 => send_noret(0, 1, chan_a_id, 100_000, &mut p_ctr), - 0x41 => send_noret(1, 0, chan_a_id, 100_000, &mut p_ctr), - 0x42 => send_noret(1, 2, chan_b_id, 100_000, &mut p_ctr), - 0x43 => send_noret(2, 1, chan_b_id, 100_000, &mut p_ctr), - 0x44 => send_hop_noret(0, 1, chan_a_id, 2, chan_b_id, 100_000, &mut p_ctr), - 0x45 => send_hop_noret(2, 1, chan_b_id, 0, chan_a_id, 100_000, &mut p_ctr), - - 0x48 => send_noret(0, 1, chan_a_id, 10_000, &mut p_ctr), - 0x49 => send_noret(1, 0, chan_a_id, 10_000, &mut p_ctr), - 0x4a => send_noret(1, 2, chan_b_id, 10_000, &mut p_ctr), - 0x4b => send_noret(2, 1, chan_b_id, 10_000, &mut p_ctr), - 0x4c => send_hop_noret(0, 1, chan_a_id, 2, chan_b_id, 10_000, &mut p_ctr), - 0x4d => send_hop_noret(2, 1, chan_b_id, 0, chan_a_id, 10_000, &mut p_ctr), - - 0x50 => send_noret(0, 1, chan_a_id, 1_000, &mut p_ctr), - 0x51 => send_noret(1, 0, chan_a_id, 1_000, &mut p_ctr), - 0x52 => send_noret(1, 2, chan_b_id, 1_000, &mut p_ctr), - 0x53 => send_noret(2, 1, chan_b_id, 1_000, &mut p_ctr), - 0x54 => send_hop_noret(0, 1, chan_a_id, 2, chan_b_id, 1_000, &mut p_ctr), - 0x55 => send_hop_noret(2, 1, chan_b_id, 0, chan_a_id, 1_000, &mut p_ctr), - - 0x58 => send_noret(0, 1, chan_a_id, 100, &mut p_ctr), - 0x59 => send_noret(1, 0, chan_a_id, 100, &mut p_ctr), - 0x5a => send_noret(1, 2, chan_b_id, 100, &mut p_ctr), - 0x5b => send_noret(2, 1, chan_b_id, 100, &mut p_ctr), - 0x5c => send_hop_noret(0, 1, chan_a_id, 2, chan_b_id, 100, &mut p_ctr), - 0x5d => send_hop_noret(2, 1, chan_b_id, 0, chan_a_id, 100, &mut p_ctr), - - 0x60 => send_noret(0, 1, chan_a_id, 10, &mut p_ctr), - 0x61 => send_noret(1, 0, chan_a_id, 10, &mut p_ctr), - 0x62 => send_noret(1, 2, chan_b_id, 10, &mut p_ctr), - 0x63 => send_noret(2, 1, chan_b_id, 10, &mut p_ctr), - 0x64 => send_hop_noret(0, 1, chan_a_id, 2, chan_b_id, 10, &mut p_ctr), - 0x65 => send_hop_noret(2, 1, chan_b_id, 0, chan_a_id, 10, &mut p_ctr), - - 0x68 => send_noret(0, 1, chan_a_id, 1, &mut p_ctr), - 0x69 => send_noret(1, 0, chan_a_id, 1, &mut p_ctr), - 0x6a => send_noret(1, 2, chan_b_id, 1, &mut p_ctr), - 0x6b => send_noret(2, 1, chan_b_id, 1, &mut p_ctr), - 0x6c => send_hop_noret(0, 1, chan_a_id, 2, chan_b_id, 1, &mut p_ctr), - 0x6d => send_hop_noret(2, 1, chan_b_id, 0, chan_a_id, 1, &mut p_ctr), + 0x30 => { + payments.send_direct(&nodes, 0, 1, chan_a_id, 10_000_000); + }, + 0x31 => { + payments.send_direct(&nodes, 1, 0, chan_a_id, 10_000_000); + }, + 0x32 => { + payments.send_direct(&nodes, 1, 2, chan_b_id, 10_000_000); + }, + 0x33 => { + payments.send_direct(&nodes, 2, 1, chan_b_id, 10_000_000); + }, + 0x34 => { + payments.send_hop(&nodes, 0, 1, chan_a_id, 2, chan_b_id, 10_000_000); + }, + 0x35 => { + payments.send_hop(&nodes, 2, 1, chan_b_id, 0, chan_a_id, 10_000_000); + }, + + 0x38 => { + payments.send_direct(&nodes, 0, 1, chan_a_id, 1_000_000); + }, + 0x39 => { + payments.send_direct(&nodes, 1, 0, chan_a_id, 1_000_000); + }, + 0x3a => { + payments.send_direct(&nodes, 1, 2, chan_b_id, 1_000_000); + }, + 0x3b => { + payments.send_direct(&nodes, 2, 1, chan_b_id, 1_000_000); + }, + 0x3c => { + payments.send_hop(&nodes, 0, 1, chan_a_id, 2, chan_b_id, 1_000_000); + }, + 0x3d => { + payments.send_hop(&nodes, 2, 1, chan_b_id, 0, chan_a_id, 1_000_000); + }, + + 0x40 => { + payments.send_direct(&nodes, 0, 1, chan_a_id, 100_000); + }, + 0x41 => { + payments.send_direct(&nodes, 1, 0, chan_a_id, 100_000); + }, + 0x42 => { + payments.send_direct(&nodes, 1, 2, chan_b_id, 100_000); + }, + 0x43 => { + payments.send_direct(&nodes, 2, 1, chan_b_id, 100_000); + }, + 0x44 => { + payments.send_hop(&nodes, 0, 1, chan_a_id, 2, chan_b_id, 100_000); + }, + 0x45 => { + payments.send_hop(&nodes, 2, 1, chan_b_id, 0, chan_a_id, 100_000); + }, + + 0x48 => { + payments.send_direct(&nodes, 0, 1, chan_a_id, 10_000); + }, + 0x49 => { + payments.send_direct(&nodes, 1, 0, chan_a_id, 10_000); + }, + 0x4a => { + payments.send_direct(&nodes, 1, 2, chan_b_id, 10_000); + }, + 0x4b => { + payments.send_direct(&nodes, 2, 1, chan_b_id, 10_000); + }, + 0x4c => { + payments.send_hop(&nodes, 0, 1, chan_a_id, 2, chan_b_id, 10_000); + }, + 0x4d => { + payments.send_hop(&nodes, 2, 1, chan_b_id, 0, chan_a_id, 10_000); + }, + + 0x50 => { + payments.send_direct(&nodes, 0, 1, chan_a_id, 1_000); + }, + 0x51 => { + payments.send_direct(&nodes, 1, 0, chan_a_id, 1_000); + }, + 0x52 => { + payments.send_direct(&nodes, 1, 2, chan_b_id, 1_000); + }, + 0x53 => { + payments.send_direct(&nodes, 2, 1, chan_b_id, 1_000); + }, + 0x54 => { + payments.send_hop(&nodes, 0, 1, chan_a_id, 2, chan_b_id, 1_000); + }, + 0x55 => { + payments.send_hop(&nodes, 2, 1, chan_b_id, 0, chan_a_id, 1_000); + }, + + 0x58 => { + payments.send_direct(&nodes, 0, 1, chan_a_id, 100); + }, + 0x59 => { + payments.send_direct(&nodes, 1, 0, chan_a_id, 100); + }, + 0x5a => { + payments.send_direct(&nodes, 1, 2, chan_b_id, 100); + }, + 0x5b => { + payments.send_direct(&nodes, 2, 1, chan_b_id, 100); + }, + 0x5c => { + payments.send_hop(&nodes, 0, 1, chan_a_id, 2, chan_b_id, 100); + }, + 0x5d => { + payments.send_hop(&nodes, 2, 1, chan_b_id, 0, chan_a_id, 100); + }, + + 0x60 => { + payments.send_direct(&nodes, 0, 1, chan_a_id, 10); + }, + 0x61 => { + payments.send_direct(&nodes, 1, 0, chan_a_id, 10); + }, + 0x62 => { + payments.send_direct(&nodes, 1, 2, chan_b_id, 10); + }, + 0x63 => { + payments.send_direct(&nodes, 2, 1, chan_b_id, 10); + }, + 0x64 => { + payments.send_hop(&nodes, 0, 1, chan_a_id, 2, chan_b_id, 10); + }, + 0x65 => { + payments.send_hop(&nodes, 2, 1, chan_b_id, 0, chan_a_id, 10); + }, + + 0x68 => { + payments.send_direct(&nodes, 0, 1, chan_a_id, 1); + }, + 0x69 => { + payments.send_direct(&nodes, 1, 0, chan_a_id, 1); + }, + 0x6a => { + payments.send_direct(&nodes, 1, 2, chan_b_id, 1); + }, + 0x6b => { + payments.send_direct(&nodes, 2, 1, chan_b_id, 1); + }, + 0x6c => { + payments.send_hop(&nodes, 0, 1, chan_a_id, 2, chan_b_id, 1); + }, + 0x6d => { + payments.send_hop(&nodes, 2, 1, chan_b_id, 0, chan_a_id, 1); + }, // MPP payments // 0x70: direct MPP from 0 to 1 (multi A-B channels) - 0x70 => send_mpp_direct(0, 1, &chan_ab_ids, 1_000_000, &mut p_ctr), + 0x70 => { + payments.send_mpp_direct(&nodes, 0, 1, ab_link.channel_ids(), 1_000_000); + }, // 0x71: MPP 0->1->2, multi channels on first hop (A-B) - 0x71 => send_mpp_hop(0, 1, &chan_ab_ids, 2, &[chan_b_id], 1_000_000, &mut p_ctr), + 0x71 => { + payments.send_mpp_hop( + &nodes, + 0, + 1, + ab_link.channel_ids(), + 2, + &[chan_b_id], + 1_000_000, + ); + }, // 0x72: MPP 0->1->2, multi channels on both hops (A-B and B-C) - 0x72 => send_mpp_hop(0, 1, &chan_ab_ids, 2, &chan_bc_ids, 1_000_000, &mut p_ctr), + 0x72 => { + payments.send_mpp_hop( + &nodes, + 0, + 1, + ab_link.channel_ids(), + 2, + bc_link.channel_ids(), + 1_000_000, + ); + }, // 0x73: MPP 0->1->2, multi channels on second hop (B-C) - 0x73 => send_mpp_hop(0, 1, &[chan_a_id], 2, &chan_bc_ids, 1_000_000, &mut p_ctr), + 0x73 => { + payments.send_mpp_hop( + &nodes, + 0, + 1, + &[chan_a_id], + 2, + bc_link.channel_ids(), + 1_000_000, + ); + }, // 0x74: direct MPP from 0 to 1, multi parts over single channel 0x74 => { - send_mpp_direct(0, 1, &[chan_a_id, chan_a_id, chan_a_id], 1_000_000, &mut p_ctr) + payments.send_mpp_direct( + &nodes, + 0, + 1, + &[chan_a_id, chan_a_id, chan_a_id], + 1_000_000, + ); }, 0x80 => nodes[0].bump_fee_estimate(chan_type), @@ -2858,50 +3081,15 @@ pub fn do_test(data: &[u8], out: Out) { nodes[1].signer_unblocked(None); nodes[2].signer_unblocked(None); - macro_rules! process_all_events { - () => {{ - let mut last_pass_no_updates = false; - for i in 0..std::usize::MAX { - if i == 100 { - panic!( - "It may take may iterations to settle the state, but it should not take forever" - ); - } - ab_link.complete_all_monitor_updates(&nodes); - bc_link.complete_all_monitor_updates(&nodes); - if process_msg_events!(0, false, ProcessMessages::AllMessages) { - last_pass_no_updates = false; - continue; - } - if process_msg_events!(1, false, ProcessMessages::AllMessages) { - last_pass_no_updates = false; - continue; - } - if process_msg_events!(2, false, ProcessMessages::AllMessages) { - last_pass_no_updates = false; - continue; - } - if process_events!(0, false) { - last_pass_no_updates = false; - continue; - } - if process_events!(1, false) { - last_pass_no_updates = false; - continue; - } - if process_events!(2, false) { - last_pass_no_updates = false; - continue; - } - if last_pass_no_updates { - break; - } - last_pass_no_updates = true; - } - }}; - } - - process_all_events!(); + process_all_events_impl( + &nodes, + &out, + &ab_link, + &bc_link, + &mut chain_state, + &mut payments, + &mut queues, + ); // Since MPP payments are supported, we wait until we fully settle the state of all // channels to see if we have any committed HTLC parts of an MPP payment that need @@ -2909,57 +3097,30 @@ pub fn do_test(data: &[u8], out: Out) { for node in &nodes { node.timer_tick_occurred(); } - process_all_events!(); - - for (idx, pending) in pending_payments.borrow().iter().enumerate() { - assert!( - pending.is_empty(), - "Node {} has {} stuck pending payments after settling all state", - idx, - pending.len() - ); - } + process_all_events_impl( + &nodes, + &out, + &ab_link, + &bc_link, + &mut chain_state, + &mut payments, + &mut queues, + ); - let resolved = resolved_payments.borrow(); - for hash in claimed_payment_hashes.borrow().iter() { - let found = resolved.iter().any(|node_resolved| { - node_resolved.values().any(|h| h.as_ref() == Some(hash)) - }); - assert!( - found, - "Payment {:?} was claimed by receiver but sender never got PaymentSent", - hash - ); - } + payments.assert_all_resolved(); + payments.assert_claims_reported(); // Finally, make sure that at least one end of each channel can make a substantial payment - let send_after_settle = |source_idx: usize, - dest_idx: usize, - dest_chan_id, - amt, - payment_ctr: &mut u64| { - let source = &nodes[source_idx]; - let dest = &nodes[dest_idx]; - let (secret, hash) = - get_payment_secret_hash(dest, payment_ctr, &payment_preimages); - let mut id = PaymentId([0; 32]); - id.0[0..8].copy_from_slice(&payment_ctr.to_ne_bytes()); - let succeeded = send_payment(source, dest, dest_chan_id, amt, secret, hash, id); - if succeeded { - pending_payments.borrow_mut()[source_idx].push(id); - } - succeeded - }; for &chan_id in ab_link.channel_ids() { assert!( - send_after_settle(0, 1, chan_id, 10_000_000, &mut p_ctr) - || send_after_settle(1, 0, chan_id, 10_000_000, &mut p_ctr) + payments.send_direct(&nodes, 0, 1, chan_id, 10_000_000) + || payments.send_direct(&nodes, 1, 0, chan_id, 10_000_000) ); } for &chan_id in bc_link.channel_ids() { assert!( - send_after_settle(1, 2, chan_id, 10_000_000, &mut p_ctr) - || send_after_settle(2, 1, chan_id, 10_000_000, &mut p_ctr) + payments.send_direct(&nodes, 1, 2, chan_id, 10_000_000) + || payments.send_direct(&nodes, 2, 1, chan_id, 10_000_000) ); } From 23734df9ec28ec6f981e17e4467e7041a5ec01fa Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 21 Apr 2026 17:46:44 +0200 Subject: [PATCH 13/29] Build chanmon harness setup Collect the node, link, queue, chain, and payment setup into a harness builder. This keeps the initial fuzz scenario construction together and leaves the action loop with a smaller state surface. --- fuzz/src/chanmon_consistency.rs | 593 +++++++++++++++++--------------- 1 file changed, 317 insertions(+), 276 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 1554d670c4f..d5e547ae185 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -972,14 +972,6 @@ struct HarnessNode<'a> { last_htlc_clear_fee: u32, } -impl<'a> std::ops::Deref for HarnessNode<'a> { - type Target = ChanMan<'a>; - - fn deref(&self) -> &Self::Target { - &self.node - } -} - impl<'a> HarnessNode<'a> { fn build_loggers( node_id: u8, out: &Out, @@ -1333,13 +1325,6 @@ impl<'a> HarnessNode<'a> { } } -#[derive(Copy, Clone)] -enum MonitorUpdateSelector { - First, - Second, - Last, -} - struct EventQueues { ab: Vec, ba: Vec, @@ -1464,7 +1449,7 @@ impl EventQueues { fn drain_on_disconnect(&mut self, edge_node: usize, nodes: &[HarnessNode<'_>; 3]) { match edge_node { 0 => { - for event in nodes[0].get_and_clear_pending_msg_events() { + for event in nodes[0].node.get_and_clear_pending_msg_events() { match event { MessageSendEvent::UpdateHTLCs { .. } => {}, MessageSendEvent::SendRevokeAndACK { .. } => {}, @@ -1480,10 +1465,14 @@ impl EventQueues { _ => panic!("Unhandled message event"), } } - self.route_from_middle(nodes[1].get_and_clear_pending_msg_events(), Some(0), nodes); + self.route_from_middle( + nodes[1].node.get_and_clear_pending_msg_events(), + Some(0), + nodes, + ); }, 2 => { - for event in nodes[2].get_and_clear_pending_msg_events() { + for event in nodes[2].node.get_and_clear_pending_msg_events() { match event { MessageSendEvent::UpdateHTLCs { .. } => {}, MessageSendEvent::SendRevokeAndACK { .. } => {}, @@ -1499,7 +1488,11 @@ impl EventQueues { _ => panic!("Unhandled message event"), } } - self.route_from_middle(nodes[1].get_and_clear_pending_msg_events(), Some(2), nodes); + self.route_from_middle( + nodes[1].node.get_and_clear_pending_msg_events(), + Some(2), + nodes, + ); }, _ => panic!("unsupported disconnected edge"), } @@ -1548,8 +1541,8 @@ impl PeerLink { } let node_a_id = nodes[self.node_a].our_node_id(); let node_b_id = nodes[self.node_b].our_node_id(); - nodes[self.node_a].peer_disconnected(node_b_id); - nodes[self.node_b].peer_disconnected(node_a_id); + nodes[self.node_a].node.peer_disconnected(node_b_id); + nodes[self.node_b].node.peer_disconnected(node_a_id); self.disconnected = true; let edge_node = if self.node_a == 1 { self.node_b @@ -1569,17 +1562,17 @@ impl PeerLink { let node_a_id = nodes[self.node_a].our_node_id(); let node_b_id = nodes[self.node_b].our_node_id(); let init_b = Init { - features: nodes[self.node_b].init_features(), + features: nodes[self.node_b].node.init_features(), networks: None, remote_network_address: None, }; - nodes[self.node_a].peer_connected(node_b_id, &init_b, true).unwrap(); + nodes[self.node_a].node.peer_connected(node_b_id, &init_b, true).unwrap(); let init_a = Init { - features: nodes[self.node_a].init_features(), + features: nodes[self.node_a].node.init_features(), networks: None, remote_network_address: None, }; - nodes[self.node_b].peer_connected(node_a_id, &init_a, false).unwrap(); + nodes[self.node_b].node.peer_connected(node_a_id, &init_a, false).unwrap(); self.disconnected = false; } @@ -1594,38 +1587,45 @@ impl PeerLink { let remaining_node = if restarted_node == self.node_a { self.node_b } else { self.node_a }; let restarted_node_id = nodes[restarted_node].our_node_id(); - nodes[remaining_node].peer_disconnected(restarted_node_id); + nodes[remaining_node].node.peer_disconnected(restarted_node_id); self.disconnected = true; if remaining_node == 1 { queues.route_from_middle( - nodes[1].get_and_clear_pending_msg_events(), + nodes[1].node.get_and_clear_pending_msg_events(), Some(restarted_node), nodes, ); } else { - nodes[remaining_node].get_and_clear_pending_msg_events(); + nodes[remaining_node].node.get_and_clear_pending_msg_events(); } queues.clear_link(self); } } +#[derive(Copy, Clone)] +enum MonitorUpdateSelector { + First, + Second, + Last, +} + struct PaymentTracker { + payment_ctr: u64, pending_payments: [Vec; 3], resolved_payments: [HashMap>; 3], claimed_payment_hashes: HashSet, payment_preimages: HashMap, - payment_ctr: u64, } impl PaymentTracker { fn new() -> Self { Self { + payment_ctr: 0, pending_payments: [Vec::new(), Vec::new(), Vec::new()], resolved_payments: [new_hash_map(), new_hash_map(), new_hash_map()], claimed_payment_hashes: HashSet::new(), payment_preimages: new_hash_map(), - payment_ctr: 0, } } @@ -1641,8 +1641,8 @@ impl PaymentTracker { &mut self, nodes: &[HarnessNode<'_>; 3], source_idx: usize, dest_idx: usize, dest_chan_id: ChannelId, amt: u64, ) -> bool { - let source = &nodes[source_idx]; - let dest = &nodes[dest_idx]; + let source = &nodes[source_idx].node; + let dest = &nodes[dest_idx].node; let (secret, hash, id) = self.next_payment(dest); let succeeded = send_payment(source, dest, dest_chan_id, amt, secret, hash, id); if succeeded { @@ -1655,9 +1655,9 @@ impl PaymentTracker { &mut self, nodes: &[HarnessNode<'_>; 3], source_idx: usize, middle_idx: usize, middle_chan_id: ChannelId, dest_idx: usize, dest_chan_id: ChannelId, amt: u64, ) { - let source = &nodes[source_idx]; - let middle = &nodes[middle_idx]; - let dest = &nodes[dest_idx]; + let source = &nodes[source_idx].node; + let middle = &nodes[middle_idx].node; + let dest = &nodes[dest_idx].node; let (secret, hash, id) = self.next_payment(dest); let succeeded = send_hop_payment( source, @@ -1679,8 +1679,8 @@ impl PaymentTracker { &mut self, nodes: &[HarnessNode<'_>; 3], source_idx: usize, dest_idx: usize, dest_chan_ids: &[ChannelId], amt: u64, ) { - let source = &nodes[source_idx]; - let dest = &nodes[dest_idx]; + let source = &nodes[source_idx].node; + let dest = &nodes[dest_idx].node; let (secret, hash, id) = self.next_payment(dest); let succeeded = send_mpp_payment(source, dest, dest_chan_ids, amt, secret, hash, id); if succeeded { @@ -1692,9 +1692,9 @@ impl PaymentTracker { &mut self, nodes: &[HarnessNode<'_>; 3], source_idx: usize, middle_idx: usize, middle_chan_ids: &[ChannelId], dest_idx: usize, dest_chan_ids: &[ChannelId], amt: u64, ) { - let source = &nodes[source_idx]; - let middle = &nodes[middle_idx]; - let dest = &nodes[dest_idx]; + let source = &nodes[source_idx].node; + let middle = &nodes[middle_idx].node; + let dest = &nodes[dest_idx].node; let (secret, hash, id) = self.next_payment(dest); let succeeded = send_mpp_hop_payment( source, @@ -1714,13 +1714,13 @@ impl PaymentTracker { fn claim_payment(&mut self, node: &HarnessNode<'_>, payment_hash: PaymentHash, fail: bool) { if fail { - node.fail_htlc_backwards(&payment_hash); + node.node.fail_htlc_backwards(&payment_hash); } else { let payment_preimage = *self .payment_preimages .get(&payment_hash) .expect("PaymentClaimable for unknown payment hash"); - node.claim_funds(payment_preimage); + node.node.claim_funds(payment_preimage); self.claimed_payment_hashes.insert(payment_hash); } } @@ -1783,6 +1783,18 @@ impl PaymentTracker { } } +struct Harness<'a, Out: Output + MaybeSend + MaybeSync> { + out: Out, + chan_type: ChanType, + chain_state: ChainState, + nodes: [HarnessNode<'a>; 3], + ab_link: PeerLink, + bc_link: PeerLink, + queues: EventQueues, + payments: PaymentTracker, + read_pos: usize, +} + fn build_node_config(chan_type: ChanType) -> UserConfig { let mut config = UserConfig::default(); config.channel_config.forwarding_fee_proportional_millionths = 0; @@ -1806,9 +1818,9 @@ fn build_node_config(chan_type: ChanType) -> UserConfig { } fn assert_test_invariants(nodes: &[HarnessNode<'_>; 3]) { - assert_eq!(nodes[0].list_channels().len(), 3); - assert_eq!(nodes[1].list_channels().len(), 6); - assert_eq!(nodes[2].list_channels().len(), 3); + assert_eq!(nodes[0].node.list_channels().len(), 3); + assert_eq!(nodes[1].node.list_channels().len(), 6); + assert_eq!(nodes[2].node.list_channels().len(), 3); // All broadcasters should be empty. Broadcast transactions are handled explicitly. assert!(nodes[0].broadcaster.txn_broadcasted.borrow().is_empty()); @@ -2001,6 +2013,159 @@ fn lock_fundings(nodes: &[HarnessNode<'_>; 3]) { } } +impl<'a, Out: Output + MaybeSend + MaybeSync> Harness<'a, Out> { + fn new(data: &[u8], out: Out, router: &'a FuzzRouter) -> Self { + // Read initial monitor styles and channel type from fuzz input byte 0: + // bits 0-2: monitor styles (1 bit per node) + // bits 3-4: channel type (0=Legacy, 1=KeyedAnchors, 2=ZeroFeeCommitments) + let config_byte = if !data.is_empty() { data[0] } else { 0 }; + let chan_type = match (config_byte >> 3) & 0b11 { + 0 => ChanType::Legacy, + 1 => ChanType::KeyedAnchors, + _ => ChanType::ZeroFeeCommitments, + }; + let persistence_styles = [ + if config_byte & 0b01 != 0 { + ChannelMonitorUpdateStatus::InProgress + } else { + ChannelMonitorUpdateStatus::Completed + }, + if config_byte & 0b10 != 0 { + ChannelMonitorUpdateStatus::InProgress + } else { + ChannelMonitorUpdateStatus::Completed + }, + if config_byte & 0b100 != 0 { + ChannelMonitorUpdateStatus::InProgress + } else { + ChannelMonitorUpdateStatus::Completed + }, + ]; + + let wallet_a = TestWalletSource::new(SecretKey::from_slice(&[1; 32]).unwrap()); + let wallet_b = TestWalletSource::new(SecretKey::from_slice(&[2; 32]).unwrap()); + let wallet_c = TestWalletSource::new(SecretKey::from_slice(&[3; 32]).unwrap()); + let wallets = [&wallet_a, &wallet_b, &wallet_c]; + let coinbase_tx = bitcoin::Transaction { + version: bitcoin::transaction::Version::TWO, + lock_time: bitcoin::absolute::LockTime::ZERO, + input: vec![bitcoin::TxIn { ..Default::default() }], + output: wallets + .iter() + .map(|wallet| TxOut { + value: Amount::from_sat(100_000), + script_pubkey: wallet.get_change_script().unwrap(), + }) + .collect(), + }; + for (idx, wallet) in wallets.iter().enumerate() { + wallet.add_utxo(coinbase_tx.clone(), idx as u32); + } + + let fee_est_a = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }); + let fee_est_b = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }); + let fee_est_c = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }); + let broadcast_a = Arc::new(TestBroadcaster { txn_broadcasted: RefCell::new(Vec::new()) }); + let broadcast_b = Arc::new(TestBroadcaster { txn_broadcasted: RefCell::new(Vec::new()) }); + let broadcast_c = Arc::new(TestBroadcaster { txn_broadcasted: RefCell::new(Vec::new()) }); + + // 3 nodes is enough to hit all the possible cases, notably + // unknown-source-unknown-dest forwarding. + let mut nodes = [ + HarnessNode::new( + 0, + wallet_a, + Arc::clone(&fee_est_a), + Arc::clone(&broadcast_a), + persistence_styles[0], + &out, + router, + chan_type, + ), + HarnessNode::new( + 1, + wallet_b, + Arc::clone(&fee_est_b), + Arc::clone(&broadcast_b), + persistence_styles[1], + &out, + router, + chan_type, + ), + HarnessNode::new( + 2, + wallet_c, + Arc::clone(&fee_est_c), + Arc::clone(&broadcast_c), + persistence_styles[2], + &out, + router, + chan_type, + ), + ]; + let mut chain_state = ChainState::new(); + + // Connect peers first, then create channels. + connect_peers(&nodes[0].node, &nodes[1].node); + connect_peers(&nodes[1].node, &nodes[2].node); + + // Create 3 channels between A-B and 3 channels between B-C (6 total). + // + // Use distinct version numbers for each funding transaction so each test + // channel gets its own txid and funding outpoint. + // A-B: channel 2 A and B have 0-reserve (trusted open + trusted accept), + // channel 3 A has 0-reserve (trusted accept). + make_channel(&nodes[0], &nodes[1], 1, false, false, &mut chain_state); + make_channel(&nodes[0], &nodes[1], 2, true, true, &mut chain_state); + make_channel(&nodes[0], &nodes[1], 3, false, true, &mut chain_state); + // B-C: channel 4 B has 0-reserve (via trusted accept), + // channel 5 C has 0-reserve (via trusted open). + make_channel(&nodes[1], &nodes[2], 4, false, true, &mut chain_state); + make_channel(&nodes[1], &nodes[2], 5, true, false, &mut chain_state); + make_channel(&nodes[1], &nodes[2], 6, false, false, &mut chain_state); + + // Wipe the transactions-broadcasted set to make sure we don't broadcast + // any transactions during normal operation after setup. + nodes[0].broadcaster.txn_broadcasted.borrow_mut().clear(); + nodes[1].broadcaster.txn_broadcasted.borrow_mut().clear(); + nodes[2].broadcaster.txn_broadcasted.borrow_mut().clear(); + + // Sync all nodes to tip to lock the funding. + nodes[0].sync_with_chain_state(&chain_state, None); + nodes[1].sync_with_chain_state(&chain_state, None); + nodes[2].sync_with_chain_state(&chain_state, None); + + lock_fundings(&nodes); + + let chan_ab_ids = { + // Get channel IDs for all A-B channels (from node A's perspective). + let node_a_chans = nodes[0].node.list_usable_channels(); + [node_a_chans[0].channel_id, node_a_chans[1].channel_id, node_a_chans[2].channel_id] + }; + let chan_bc_ids = { + // Get channel IDs for all B-C channels (from node C's perspective). + let node_c_chans = nodes[2].node.list_usable_channels(); + [node_c_chans[0].channel_id, node_c_chans[1].channel_id, node_c_chans[2].channel_id] + }; + + for node in &mut nodes { + node.serialized_manager = node.node.encode(); + } + + Self { + out, + chan_type, + chain_state, + nodes, + ab_link: PeerLink::new(0, 1, chan_ab_ids), + bc_link: PeerLink::new(1, 2, chan_bc_ids), + queues: EventQueues::new(), + payments: PaymentTracker::new(), + read_pos: 1, + } + } +} + fn process_msg_events_impl( node_idx: usize, corrupt_forward: bool, limit_events: ProcessMessages, nodes: &[HarnessNode<'_>; 3], out: &Out, queues: &mut EventQueues, @@ -2035,7 +2200,7 @@ fn process_msg_events_impl( corrupt_forward: bool, ) { if !corrupt_forward { - dest.handle_update_add_htlc(source_node_id, update_add); + dest.node.handle_update_add_htlc(source_node_id, update_add); } else { // Corrupt the update_add_htlc message so that its HMAC check will fail and we // generate an update_fail_malformed_htlc instead of an update_fail_htlc as we do @@ -2043,7 +2208,7 @@ fn process_msg_events_impl( let mut msg_ser = update_add.encode(); msg_ser[1000] ^= 0xff; let new_msg = UpdateAddHTLC::read_from_fixed_length_buffer(&mut &msg_ser[..]).unwrap(); - dest.handle_update_add_htlc(source_node_id, &new_msg); + dest.node.handle_update_add_htlc(source_node_id, &new_msg); } } @@ -2073,19 +2238,19 @@ fn process_msg_events_impl( || !update_fail_malformed_htlcs.is_empty(); for update_fulfill in update_fulfill_htlcs { log_msg_delivery(node_idx, dest_idx, "update_fulfill_htlc", out); - dest.handle_update_fulfill_htlc(source_node_id, update_fulfill); + dest.node.handle_update_fulfill_htlc(source_node_id, update_fulfill); } for update_fail in update_fail_htlcs.iter() { log_msg_delivery(node_idx, dest_idx, "update_fail_htlc", out); - dest.handle_update_fail_htlc(source_node_id, update_fail); + dest.node.handle_update_fail_htlc(source_node_id, update_fail); } for update_fail_malformed in update_fail_malformed_htlcs.iter() { log_msg_delivery(node_idx, dest_idx, "update_fail_malformed_htlc", out); - dest.handle_update_fail_malformed_htlc(source_node_id, update_fail_malformed); + dest.node.handle_update_fail_malformed_htlc(source_node_id, update_fail_malformed); } if let Some(msg) = update_fee { log_msg_delivery(node_idx, dest_idx, "update_fee", out); - dest.handle_update_fee(source_node_id, &msg); + dest.node.handle_update_fee(source_node_id, &msg); } if limit_events != ProcessMessages::AllMessages && processed_change { // If we only want to process some messages, don't deliver the CS until later. @@ -2103,7 +2268,7 @@ fn process_msg_events_impl( }); } log_msg_delivery(node_idx, dest_idx, "commitment_signed", out); - dest.handle_commitment_signed_batch_test(source_node_id, &commitment_signed); + dest.node.handle_commitment_signed_batch_test(source_node_id, &commitment_signed); None } @@ -2127,78 +2292,78 @@ fn process_msg_events_impl( }, MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "revoke_and_ack"); - nodes[dest_idx].handle_revoke_and_ack(source_node_id, msg); + nodes[dest_idx].node.handle_revoke_and_ack(source_node_id, msg); None }, MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => { let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "channel_reestablish"); - nodes[dest_idx].handle_channel_reestablish(source_node_id, msg); + nodes[dest_idx].node.handle_channel_reestablish(source_node_id, msg); None }, MessageSendEvent::SendStfu { ref node_id, ref msg } => { let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "stfu"); - nodes[dest_idx].handle_stfu(source_node_id, msg); + nodes[dest_idx].node.handle_stfu(source_node_id, msg); None }, MessageSendEvent::SendTxAddInput { ref node_id, ref msg } => { let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "tx_add_input"); - nodes[dest_idx].handle_tx_add_input(source_node_id, msg); + nodes[dest_idx].node.handle_tx_add_input(source_node_id, msg); None }, MessageSendEvent::SendTxAddOutput { ref node_id, ref msg } => { let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "tx_add_output"); - nodes[dest_idx].handle_tx_add_output(source_node_id, msg); + nodes[dest_idx].node.handle_tx_add_output(source_node_id, msg); None }, MessageSendEvent::SendTxRemoveInput { ref node_id, ref msg } => { let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "tx_remove_input"); - nodes[dest_idx].handle_tx_remove_input(source_node_id, msg); + nodes[dest_idx].node.handle_tx_remove_input(source_node_id, msg); None }, MessageSendEvent::SendTxRemoveOutput { ref node_id, ref msg } => { let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "tx_remove_output"); - nodes[dest_idx].handle_tx_remove_output(source_node_id, msg); + nodes[dest_idx].node.handle_tx_remove_output(source_node_id, msg); None }, MessageSendEvent::SendTxComplete { ref node_id, ref msg } => { let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "tx_complete"); - nodes[dest_idx].handle_tx_complete(source_node_id, msg); + nodes[dest_idx].node.handle_tx_complete(source_node_id, msg); None }, MessageSendEvent::SendTxAbort { ref node_id, ref msg } => { let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "tx_abort"); - nodes[dest_idx].handle_tx_abort(source_node_id, msg); + nodes[dest_idx].node.handle_tx_abort(source_node_id, msg); None }, MessageSendEvent::SendTxInitRbf { ref node_id, ref msg } => { let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "tx_init_rbf"); - nodes[dest_idx].handle_tx_init_rbf(source_node_id, msg); + nodes[dest_idx].node.handle_tx_init_rbf(source_node_id, msg); None }, MessageSendEvent::SendTxAckRbf { ref node_id, ref msg } => { let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "tx_ack_rbf"); - nodes[dest_idx].handle_tx_ack_rbf(source_node_id, msg); + nodes[dest_idx].node.handle_tx_ack_rbf(source_node_id, msg); None }, MessageSendEvent::SendTxSignatures { ref node_id, ref msg } => { let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "tx_signatures"); - nodes[dest_idx].handle_tx_signatures(source_node_id, msg); + nodes[dest_idx].node.handle_tx_signatures(source_node_id, msg); None }, MessageSendEvent::SendSpliceInit { ref node_id, ref msg } => { let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "splice_init"); - nodes[dest_idx].handle_splice_init(source_node_id, msg); + nodes[dest_idx].node.handle_splice_init(source_node_id, msg); None }, MessageSendEvent::SendSpliceAck { ref node_id, ref msg } => { let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "splice_ack"); - nodes[dest_idx].handle_splice_ack(source_node_id, msg); + nodes[dest_idx].node.handle_splice_ack(source_node_id, msg); None }, MessageSendEvent::SendSpliceLocked { ref node_id, ref msg } => { let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "splice_locked"); - nodes[dest_idx].handle_splice_locked(source_node_id, msg); + nodes[dest_idx].node.handle_splice_locked(source_node_id, msg); None }, MessageSendEvent::HandleError { ref action, .. } => { @@ -2223,7 +2388,7 @@ fn process_msg_events_impl( let mut events = queues.take_for_node(node_idx); let mut new_events = Vec::new(); if limit_events != ProcessMessages::OnePendingMessage { - new_events = nodes[node_idx].get_and_clear_pending_msg_events(); + new_events = nodes[node_idx].node.get_and_clear_pending_msg_events(); } let mut had_events = false; let source_node_id = nodes[node_idx].our_node_id(); @@ -2268,7 +2433,7 @@ fn process_events_impl( // Multiple HTLCs can resolve for the same payment hash, so deduplicate // claim/fail handling per event batch. let mut claim_set = new_hash_map(); - let mut events = nodes[node_idx].get_and_clear_pending_events(); + let mut events = nodes[node_idx].node.get_and_clear_pending_events(); let had_events = !events.is_empty(); for event in events.drain(..) { match event { @@ -2304,6 +2469,7 @@ fn process_events_impl( } => { let signed_tx = nodes[node_idx].wallet.sign_tx(unsigned_transaction).unwrap(); nodes[node_idx] + .node .funding_transaction_signed(&channel_id, &counterparty_node_id, signed_tx) .unwrap(); }, @@ -2323,8 +2489,8 @@ fn process_events_impl( _ => panic!("Unhandled event"), } } - while nodes[node_idx].needs_pending_htlc_processing() { - nodes[node_idx].process_pending_htlc_forwards(); + while nodes[node_idx].node.needs_pending_htlc_processing() { + nodes[node_idx].node.process_pending_htlc_forwards(); } had_events } @@ -2383,150 +2549,19 @@ fn process_all_events_impl( #[inline] pub fn do_test(data: &[u8], out: Out) { let router = FuzzRouter {}; - - // Read initial monitor styles and channel type from fuzz input byte 0: - // bits 0-2: monitor styles (1 bit per node) - // bits 3-4: channel type (0=Legacy, 1=KeyedAnchors, 2=ZeroFeeCommitments) - let config_byte = if !data.is_empty() { data[0] } else { 0 }; - let chan_type = match (config_byte >> 3) & 0b11 { - 0 => ChanType::Legacy, - 1 => ChanType::KeyedAnchors, - _ => ChanType::ZeroFeeCommitments, - }; - let persistence_styles = [ - if config_byte & 0b01 != 0 { - ChannelMonitorUpdateStatus::InProgress - } else { - ChannelMonitorUpdateStatus::Completed - }, - if config_byte & 0b10 != 0 { - ChannelMonitorUpdateStatus::InProgress - } else { - ChannelMonitorUpdateStatus::Completed - }, - if config_byte & 0b100 != 0 { - ChannelMonitorUpdateStatus::InProgress - } else { - ChannelMonitorUpdateStatus::Completed - }, - ]; - - let mut chain_state = ChainState::new(); - let wallet_a = TestWalletSource::new(SecretKey::from_slice(&[1; 32]).unwrap()); - let wallet_b = TestWalletSource::new(SecretKey::from_slice(&[2; 32]).unwrap()); - let wallet_c = TestWalletSource::new(SecretKey::from_slice(&[3; 32]).unwrap()); - - let wallets = [&wallet_a, &wallet_b, &wallet_c]; - let coinbase_tx = bitcoin::Transaction { - version: bitcoin::transaction::Version::TWO, - lock_time: bitcoin::absolute::LockTime::ZERO, - input: vec![bitcoin::TxIn { ..Default::default() }], - output: wallets - .iter() - .map(|wallet| TxOut { - value: Amount::from_sat(100_000), - script_pubkey: wallet.get_change_script().unwrap(), - }) - .collect(), - }; - for (idx, wallet) in wallets.iter().enumerate() { - wallet.add_utxo(coinbase_tx.clone(), idx as u32); - } - - let fee_est_a = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }); - let fee_est_b = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }); - let fee_est_c = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }); - let broadcast_a = Arc::new(TestBroadcaster { txn_broadcasted: RefCell::new(Vec::new()) }); - let broadcast_b = Arc::new(TestBroadcaster { txn_broadcasted: RefCell::new(Vec::new()) }); - let broadcast_c = Arc::new(TestBroadcaster { txn_broadcasted: RefCell::new(Vec::new()) }); - - // 3 nodes is enough to hit all the possible cases, notably unknown-source-unknown-dest - // forwarding. - let mut nodes = [ - HarnessNode::new( - 0, - wallet_a, - Arc::clone(&fee_est_a), - Arc::clone(&broadcast_a), - persistence_styles[0], - &out, - &router, - chan_type, - ), - HarnessNode::new( - 1, - wallet_b, - Arc::clone(&fee_est_b), - Arc::clone(&broadcast_b), - persistence_styles[1], - &out, - &router, - chan_type, - ), - HarnessNode::new( - 2, - wallet_c, - Arc::clone(&fee_est_c), - Arc::clone(&broadcast_c), - persistence_styles[2], - &out, - &router, - chan_type, - ), - ]; - - // Connect peers first, then create channels - connect_peers(&nodes[0], &nodes[1]); - connect_peers(&nodes[1], &nodes[2]); - - // Create 3 channels between A-B and 3 channels between B-C (6 total). - // - // Use distinct version numbers for each funding transaction so each test channel gets its own - // txid and funding outpoint. - // A-B: channel 2 A and B have 0-reserve (trusted open + trusted accept), - // channel 3 A has 0-reserve (trusted accept) - make_channel(&nodes[0], &nodes[1], 1, false, false, &mut chain_state); - make_channel(&nodes[0], &nodes[1], 2, true, true, &mut chain_state); - make_channel(&nodes[0], &nodes[1], 3, false, true, &mut chain_state); - // B-C: channel 4 B has 0-reserve (via trusted accept), - // channel 5 C has 0-reserve (via trusted open) - make_channel(&nodes[1], &nodes[2], 4, false, true, &mut chain_state); - make_channel(&nodes[1], &nodes[2], 5, true, false, &mut chain_state); - make_channel(&nodes[1], &nodes[2], 6, false, false, &mut chain_state); - - // Wipe the transactions-broadcasted set to make sure we don't broadcast any transactions - // during normal operation in `test_return`. - nodes[0].broadcaster.txn_broadcasted.borrow_mut().clear(); - nodes[1].broadcaster.txn_broadcasted.borrow_mut().clear(); - nodes[2].broadcaster.txn_broadcasted.borrow_mut().clear(); - - // Sync all nodes to tip to lock the funding. - nodes[0].sync_with_chain_state(&chain_state, None); - nodes[1].sync_with_chain_state(&chain_state, None); - nodes[2].sync_with_chain_state(&chain_state, None); - - lock_fundings(&nodes); - - // Get channel IDs for all A-B channels (from node A's perspective) - let chan_ab_ids = { - let node_a_chans = nodes[0].list_usable_channels(); - [node_a_chans[0].channel_id, node_a_chans[1].channel_id, node_a_chans[2].channel_id] - }; - // Get channel IDs for all B-C channels (from node C's perspective) - let chan_bc_ids = { - let node_c_chans = nodes[2].list_usable_channels(); - [node_c_chans[0].channel_id, node_c_chans[1].channel_id, node_c_chans[2].channel_id] - }; - let mut ab_link = PeerLink::new(0, 1, chan_ab_ids); - let mut bc_link = PeerLink::new(1, 2, chan_bc_ids); + let Harness { + out, + chan_type, + mut chain_state, + mut nodes, + mut ab_link, + mut bc_link, + mut queues, + mut payments, + mut read_pos, + } = Harness::new(data, out, &router); let chan_a_id = ab_link.first_channel_id(); let chan_b_id = bc_link.first_channel_id(); - let mut queues = EventQueues::new(); - let mut payments = PaymentTracker::new(); - - for node in &mut nodes { - node.serialized_manager = node.encode(); - } macro_rules! test_return { () => {{ @@ -2535,18 +2570,6 @@ pub fn do_test(data: &[u8], out: Out) { }}; } - let mut read_pos = 1; // First byte was consumed for initial config (persistence styles + chan_type) - macro_rules! get_slice { - ($len: expr) => {{ - let slice_len = $len as usize; - if data.len() < read_pos + slice_len { - test_return!(); - } - read_pos += slice_len; - &data[read_pos - slice_len..read_pos] - }}; - } - loop { macro_rules! process_msg_events { ($node: expr, $corrupt_forward: expr, $limit_events: expr) => {{ @@ -2579,7 +2602,11 @@ pub fn do_test(data: &[u8], out: Out) { }}; } - let v = get_slice!(1)[0]; + if data.len() < read_pos + 1 { + test_return!(); + } + let v = data[read_pos]; + read_pos += 1; out.locked_write(format!("READ A BYTE! HANDLING INPUT {:x}...........\n", v).as_bytes()); match v { // In general, we keep related message groups close together in binary form, allowing @@ -2862,59 +2889,67 @@ pub fn do_test(data: &[u8], out: Out) { 0xa0 => { if !cfg!(splicing) { - test_return!(); + assert_test_invariants(&nodes); + return; } - let cp_node_id = nodes[1].get_our_node_id(); + let cp_node_id = nodes[1].our_node_id(); nodes[0].splice_in(&cp_node_id, &chan_a_id); }, 0xa1 => { if !cfg!(splicing) { - test_return!(); + assert_test_invariants(&nodes); + return; } - let cp_node_id = nodes[0].get_our_node_id(); + let cp_node_id = nodes[0].our_node_id(); nodes[1].splice_in(&cp_node_id, &chan_a_id); }, 0xa2 => { if !cfg!(splicing) { - test_return!(); + assert_test_invariants(&nodes); + return; } - let cp_node_id = nodes[2].get_our_node_id(); + let cp_node_id = nodes[2].our_node_id(); nodes[1].splice_in(&cp_node_id, &chan_b_id); }, 0xa3 => { if !cfg!(splicing) { - test_return!(); + assert_test_invariants(&nodes); + return; } - let cp_node_id = nodes[1].get_our_node_id(); + let cp_node_id = nodes[1].our_node_id(); nodes[2].splice_in(&cp_node_id, &chan_b_id); }, 0xa4 => { if !cfg!(splicing) { - test_return!(); + assert_test_invariants(&nodes); + return; } - let cp_node_id = nodes[1].get_our_node_id(); + let cp_node_id = nodes[1].our_node_id(); nodes[0].splice_out(&cp_node_id, &chan_a_id); }, 0xa5 => { if !cfg!(splicing) { - test_return!(); + assert_test_invariants(&nodes); + return; } - let cp_node_id = nodes[0].get_our_node_id(); + let cp_node_id = nodes[0].our_node_id(); nodes[1].splice_out(&cp_node_id, &chan_a_id); }, 0xa6 => { if !cfg!(splicing) { - test_return!(); + assert_test_invariants(&nodes); + return; } - let cp_node_id = nodes[2].get_our_node_id(); + let cp_node_id = nodes[2].our_node_id(); nodes[1].splice_out(&cp_node_id, &chan_b_id); }, 0xa7 => { if !cfg!(splicing) { - test_return!(); + assert_test_invariants(&nodes); + return; } - let cp_node_id = nodes[1].get_our_node_id(); + let cp_node_id = nodes[1].our_node_id(); nodes[2].splice_out(&cp_node_id, &chan_b_id); }, @@ -2946,15 +2981,21 @@ pub fn do_test(data: &[u8], out: Out) { }, 0xb0 | 0xb1 | 0xb2 => { + // Restart node A, picking among the in-flight `ChannelMonitor`s to use based on + // the value of `v` we're matching. ab_link.disconnect_for_reload(0, &mut nodes, &mut queues); nodes[0].reload(v, &out, &router, chan_type); }, 0xb3..=0xbb => { + // Restart node B, picking among the in-flight `ChannelMonitor`s to use based on + // the value of `v` we're matching. ab_link.disconnect_for_reload(1, &mut nodes, &mut queues); bc_link.disconnect_for_reload(1, &mut nodes, &mut queues); nodes[1].reload(v, &out, &router, chan_type); }, 0xbc | 0xbd | 0xbe => { + // Restart node C, picking among the in-flight `ChannelMonitor`s to use based on + // the value of `v` we're matching. bc_link.disconnect_for_reload(2, &mut nodes, &mut queues); nodes[2].reload(v, &out, &router, chan_type); }, @@ -2966,103 +3007,103 @@ pub fn do_test(data: &[u8], out: Out) { nodes[0] .keys_manager .enable_op_for_all_signers(SignerOp::SignCounterpartyCommitment); - nodes[0].signer_unblocked(None); + nodes[0].node.signer_unblocked(None); }, 0xc4 => { nodes[1] .keys_manager .enable_op_for_all_signers(SignerOp::SignCounterpartyCommitment); - let filter = Some((nodes[0].get_our_node_id(), chan_a_id)); - nodes[1].signer_unblocked(filter); + let filter = Some((nodes[0].our_node_id(), chan_a_id)); + nodes[1].node.signer_unblocked(filter); }, 0xc5 => { nodes[1] .keys_manager .enable_op_for_all_signers(SignerOp::SignCounterpartyCommitment); - let filter = Some((nodes[2].get_our_node_id(), chan_b_id)); - nodes[1].signer_unblocked(filter); + let filter = Some((nodes[2].our_node_id(), chan_b_id)); + nodes[1].node.signer_unblocked(filter); }, 0xc6 => { nodes[2] .keys_manager .enable_op_for_all_signers(SignerOp::SignCounterpartyCommitment); - nodes[2].signer_unblocked(None); + nodes[2].node.signer_unblocked(None); }, 0xc7 => { nodes[0].keys_manager.enable_op_for_all_signers(SignerOp::GetPerCommitmentPoint); - nodes[0].signer_unblocked(None); + nodes[0].node.signer_unblocked(None); }, 0xc8 => { nodes[1].keys_manager.enable_op_for_all_signers(SignerOp::GetPerCommitmentPoint); - let filter = Some((nodes[0].get_our_node_id(), chan_a_id)); - nodes[1].signer_unblocked(filter); + let filter = Some((nodes[0].our_node_id(), chan_a_id)); + nodes[1].node.signer_unblocked(filter); }, 0xc9 => { nodes[1].keys_manager.enable_op_for_all_signers(SignerOp::GetPerCommitmentPoint); - let filter = Some((nodes[2].get_our_node_id(), chan_b_id)); - nodes[1].signer_unblocked(filter); + let filter = Some((nodes[2].our_node_id(), chan_b_id)); + nodes[1].node.signer_unblocked(filter); }, 0xca => { nodes[2].keys_manager.enable_op_for_all_signers(SignerOp::GetPerCommitmentPoint); - nodes[2].signer_unblocked(None); + nodes[2].node.signer_unblocked(None); }, 0xcb => { nodes[0].keys_manager.enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); - nodes[0].signer_unblocked(None); + nodes[0].node.signer_unblocked(None); }, 0xcc => { nodes[1].keys_manager.enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); - let filter = Some((nodes[0].get_our_node_id(), chan_a_id)); - nodes[1].signer_unblocked(filter); + let filter = Some((nodes[0].our_node_id(), chan_a_id)); + nodes[1].node.signer_unblocked(filter); }, 0xcd => { nodes[1].keys_manager.enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); - let filter = Some((nodes[2].get_our_node_id(), chan_b_id)); - nodes[1].signer_unblocked(filter); + let filter = Some((nodes[2].our_node_id(), chan_b_id)); + nodes[1].node.signer_unblocked(filter); }, 0xce => { nodes[2].keys_manager.enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); - nodes[2].signer_unblocked(None); + nodes[2].node.signer_unblocked(None); }, 0xf0 => { - ab_link.complete_monitor_updates_for_node(0, &nodes, MonitorUpdateSelector::First) + ab_link.complete_monitor_updates_for_node(0, &nodes, MonitorUpdateSelector::First); }, 0xf1 => { - ab_link.complete_monitor_updates_for_node(0, &nodes, MonitorUpdateSelector::Second) + ab_link.complete_monitor_updates_for_node(0, &nodes, MonitorUpdateSelector::Second); }, 0xf2 => { - ab_link.complete_monitor_updates_for_node(0, &nodes, MonitorUpdateSelector::Last) + ab_link.complete_monitor_updates_for_node(0, &nodes, MonitorUpdateSelector::Last); }, 0xf4 => { - ab_link.complete_monitor_updates_for_node(1, &nodes, MonitorUpdateSelector::First) + ab_link.complete_monitor_updates_for_node(1, &nodes, MonitorUpdateSelector::First); }, 0xf5 => { - ab_link.complete_monitor_updates_for_node(1, &nodes, MonitorUpdateSelector::Second) + ab_link.complete_monitor_updates_for_node(1, &nodes, MonitorUpdateSelector::Second); }, 0xf6 => { - ab_link.complete_monitor_updates_for_node(1, &nodes, MonitorUpdateSelector::Last) + ab_link.complete_monitor_updates_for_node(1, &nodes, MonitorUpdateSelector::Last); }, 0xf8 => { - bc_link.complete_monitor_updates_for_node(1, &nodes, MonitorUpdateSelector::First) + bc_link.complete_monitor_updates_for_node(1, &nodes, MonitorUpdateSelector::First); }, 0xf9 => { - bc_link.complete_monitor_updates_for_node(1, &nodes, MonitorUpdateSelector::Second) + bc_link.complete_monitor_updates_for_node(1, &nodes, MonitorUpdateSelector::Second); }, 0xfa => { - bc_link.complete_monitor_updates_for_node(1, &nodes, MonitorUpdateSelector::Last) + bc_link.complete_monitor_updates_for_node(1, &nodes, MonitorUpdateSelector::Last); }, 0xfc => { - bc_link.complete_monitor_updates_for_node(2, &nodes, MonitorUpdateSelector::First) + bc_link.complete_monitor_updates_for_node(2, &nodes, MonitorUpdateSelector::First); }, 0xfd => { - bc_link.complete_monitor_updates_for_node(2, &nodes, MonitorUpdateSelector::Second) + bc_link.complete_monitor_updates_for_node(2, &nodes, MonitorUpdateSelector::Second); }, 0xfe => { - bc_link.complete_monitor_updates_for_node(2, &nodes, MonitorUpdateSelector::Last) + bc_link.complete_monitor_updates_for_node(2, &nodes, MonitorUpdateSelector::Last); }, 0xff => { @@ -3077,9 +3118,9 @@ pub fn do_test(data: &[u8], out: Out) { nodes[1].keys_manager.enable_op_for_all_signers(op); nodes[2].keys_manager.enable_op_for_all_signers(op); } - nodes[0].signer_unblocked(None); - nodes[1].signer_unblocked(None); - nodes[2].signer_unblocked(None); + nodes[0].node.signer_unblocked(None); + nodes[1].node.signer_unblocked(None); + nodes[2].node.signer_unblocked(None); process_all_events_impl( &nodes, @@ -3095,7 +3136,7 @@ pub fn do_test(data: &[u8], out: Out) { // channels to see if we have any committed HTLC parts of an MPP payment that need // to be failed back. for node in &nodes { - node.timer_tick_occurred(); + node.node.timer_tick_occurred(); } process_all_events_impl( &nodes, From b3752745c88242f563bb879face8320b99550d17 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 21 Apr 2026 17:03:43 +0200 Subject: [PATCH 14/29] Wrap chanmon consistency state in Harness Wrap the chanmon consistency state in a `Harness` struct. The fuzz loop now accesses nodes, links, queues, payments, and chain state through one owner while keeping the existing byte actions intact. --- fuzz/src/chanmon_consistency.rs | 594 +++++++++++++++++++------------- 1 file changed, 357 insertions(+), 237 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index d5e547ae185..553c3ec3aaa 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -1783,7 +1783,8 @@ impl PaymentTracker { } } -struct Harness<'a, Out: Output + MaybeSend + MaybeSync> { +struct Harness<'a, 'd, Out: Output + MaybeSend + MaybeSync> { + data: &'d [u8], out: Out, chan_type: ChanType, chain_state: ChainState, @@ -2013,8 +2014,8 @@ fn lock_fundings(nodes: &[HarnessNode<'_>; 3]) { } } -impl<'a, Out: Output + MaybeSend + MaybeSync> Harness<'a, Out> { - fn new(data: &[u8], out: Out, router: &'a FuzzRouter) -> Self { +impl<'a, 'd, Out: Output + MaybeSend + MaybeSync> Harness<'a, 'd, Out> { + fn new(data: &'d [u8], out: Out, router: &'a FuzzRouter) -> Self { // Read initial monitor styles and channel type from fuzz input byte 0: // bits 0-2: monitor styles (1 bit per node) // bits 3-4: channel type (0=Legacy, 1=KeyedAnchors, 2=ZeroFeeCommitments) @@ -2153,6 +2154,7 @@ impl<'a, Out: Output + MaybeSend + MaybeSync> Harness<'a, Out> { } Self { + data, out, chan_type, chain_state, @@ -2164,6 +2166,34 @@ impl<'a, Out: Output + MaybeSend + MaybeSync> Harness<'a, Out> { read_pos: 1, } } + + fn chan_a_id(&self) -> ChannelId { + self.ab_link.first_channel_id() + } + + fn chan_b_id(&self) -> ChannelId { + self.bc_link.first_channel_id() + } + + fn next_input_byte(&mut self) -> Option { + if self.data.len() < self.read_pos + 1 { + None + } else { + let value = self.data[self.read_pos]; + self.read_pos += 1; + Some(value) + } + } + + fn finish(&self) { + assert_test_invariants(&self.nodes); + } + + fn refresh_serialized_managers(&mut self) { + for node in &mut self.nodes { + node.refresh_serialized_manager(); + } + } } fn process_msg_events_impl( @@ -2549,23 +2579,13 @@ fn process_all_events_impl( #[inline] pub fn do_test(data: &[u8], out: Out) { let router = FuzzRouter {}; - let Harness { - out, - chan_type, - mut chain_state, - mut nodes, - mut ab_link, - mut bc_link, - mut queues, - mut payments, - mut read_pos, - } = Harness::new(data, out, &router); - let chan_a_id = ab_link.first_channel_id(); - let chan_b_id = bc_link.first_channel_id(); + let mut harness = Harness::new(data, out, &router); + let chan_a_id = harness.chan_a_id(); + let chan_b_id = harness.chan_b_id(); macro_rules! test_return { () => {{ - assert_test_invariants(&nodes); + harness.finish(); return; }}; } @@ -2577,9 +2597,9 @@ pub fn do_test(data: &[u8], out: Out) { $node, $corrupt_forward, $limit_events, - &nodes, - &out, - &mut queues, + &harness.nodes, + &harness.out, + &mut harness.queues, ) }}; } @@ -2592,7 +2612,13 @@ pub fn do_test(data: &[u8], out: Out) { macro_rules! process_events { ($node: expr, $fail: expr) => {{ - process_events_impl($node, $fail, &nodes, &mut chain_state, &mut payments) + process_events_impl( + $node, + $fail, + &harness.nodes, + &mut harness.chain_state, + &mut harness.payments, + ) }}; } @@ -2602,48 +2628,46 @@ pub fn do_test(data: &[u8], out: Out) { }}; } - if data.len() < read_pos + 1 { - test_return!(); - } - let v = data[read_pos]; - read_pos += 1; - out.locked_write(format!("READ A BYTE! HANDLING INPUT {:x}...........\n", v).as_bytes()); + let v = if let Some(value) = harness.next_input_byte() { value } else { test_return!() }; + harness + .out + .locked_write(format!("READ A BYTE! HANDLING INPUT {:x}...........\n", v).as_bytes()); match v { // In general, we keep related message groups close together in binary form, allowing // bit-twiddling mutations to have similar effects. This is probably overkill, but no // harm in doing so. - 0x00 => nodes[0].set_persistence_style(ChannelMonitorUpdateStatus::InProgress), - 0x01 => nodes[1].set_persistence_style(ChannelMonitorUpdateStatus::InProgress), - 0x02 => nodes[2].set_persistence_style(ChannelMonitorUpdateStatus::InProgress), - 0x04 => nodes[0].set_persistence_style(ChannelMonitorUpdateStatus::Completed), - 0x05 => nodes[1].set_persistence_style(ChannelMonitorUpdateStatus::Completed), - 0x06 => nodes[2].set_persistence_style(ChannelMonitorUpdateStatus::Completed), + 0x00 => harness.nodes[0].set_persistence_style(ChannelMonitorUpdateStatus::InProgress), + 0x01 => harness.nodes[1].set_persistence_style(ChannelMonitorUpdateStatus::InProgress), + 0x02 => harness.nodes[2].set_persistence_style(ChannelMonitorUpdateStatus::InProgress), + 0x04 => harness.nodes[0].set_persistence_style(ChannelMonitorUpdateStatus::Completed), + 0x05 => harness.nodes[1].set_persistence_style(ChannelMonitorUpdateStatus::Completed), + 0x06 => harness.nodes[2].set_persistence_style(ChannelMonitorUpdateStatus::Completed), 0x08 => { - for id in ab_link.channel_ids() { - nodes[0].complete_all_monitor_updates(id); + for id in harness.ab_link.channel_ids() { + harness.nodes[0].complete_all_monitor_updates(id); } }, 0x09 => { - for id in ab_link.channel_ids() { - nodes[1].complete_all_monitor_updates(id); + for id in harness.ab_link.channel_ids() { + harness.nodes[1].complete_all_monitor_updates(id); } }, 0x0a => { - for id in bc_link.channel_ids() { - nodes[1].complete_all_monitor_updates(id); + for id in harness.bc_link.channel_ids() { + harness.nodes[1].complete_all_monitor_updates(id); } }, 0x0b => { - for id in bc_link.channel_ids() { - nodes[2].complete_all_monitor_updates(id); + for id in harness.bc_link.channel_ids() { + harness.nodes[2].complete_all_monitor_updates(id); } }, - 0x0c => ab_link.disconnect(&mut nodes, &mut queues), - 0x0d => bc_link.disconnect(&mut nodes, &mut queues), - 0x0e => ab_link.reconnect(&mut nodes), - 0x0f => bc_link.reconnect(&mut nodes), + 0x0c => harness.ab_link.disconnect(&mut harness.nodes, &mut harness.queues), + 0x0d => harness.bc_link.disconnect(&mut harness.nodes, &mut harness.queues), + 0x0e => harness.ab_link.reconnect(&mut harness.nodes), + 0x0f => harness.bc_link.reconnect(&mut harness.nodes), 0x10 => process_msg_noret!(0, true, ProcessMessages::AllMessages), 0x11 => process_msg_noret!(0, false, ProcessMessages::AllMessages), @@ -2677,169 +2701,191 @@ pub fn do_test(data: &[u8], out: Out) { // 1/10th the channel size: 0x30 => { - payments.send_direct(&nodes, 0, 1, chan_a_id, 10_000_000); + harness.payments.send_direct(&harness.nodes, 0, 1, chan_a_id, 10_000_000); }, 0x31 => { - payments.send_direct(&nodes, 1, 0, chan_a_id, 10_000_000); + harness.payments.send_direct(&harness.nodes, 1, 0, chan_a_id, 10_000_000); }, 0x32 => { - payments.send_direct(&nodes, 1, 2, chan_b_id, 10_000_000); + harness.payments.send_direct(&harness.nodes, 1, 2, chan_b_id, 10_000_000); }, 0x33 => { - payments.send_direct(&nodes, 2, 1, chan_b_id, 10_000_000); + harness.payments.send_direct(&harness.nodes, 2, 1, chan_b_id, 10_000_000); }, 0x34 => { - payments.send_hop(&nodes, 0, 1, chan_a_id, 2, chan_b_id, 10_000_000); + harness.payments.send_hop( + &harness.nodes, + 0, + 1, + chan_a_id, + 2, + chan_b_id, + 10_000_000, + ); }, 0x35 => { - payments.send_hop(&nodes, 2, 1, chan_b_id, 0, chan_a_id, 10_000_000); + harness.payments.send_hop( + &harness.nodes, + 2, + 1, + chan_b_id, + 0, + chan_a_id, + 10_000_000, + ); }, 0x38 => { - payments.send_direct(&nodes, 0, 1, chan_a_id, 1_000_000); + harness.payments.send_direct(&harness.nodes, 0, 1, chan_a_id, 1_000_000); }, 0x39 => { - payments.send_direct(&nodes, 1, 0, chan_a_id, 1_000_000); + harness.payments.send_direct(&harness.nodes, 1, 0, chan_a_id, 1_000_000); }, 0x3a => { - payments.send_direct(&nodes, 1, 2, chan_b_id, 1_000_000); + harness.payments.send_direct(&harness.nodes, 1, 2, chan_b_id, 1_000_000); }, 0x3b => { - payments.send_direct(&nodes, 2, 1, chan_b_id, 1_000_000); + harness.payments.send_direct(&harness.nodes, 2, 1, chan_b_id, 1_000_000); }, 0x3c => { - payments.send_hop(&nodes, 0, 1, chan_a_id, 2, chan_b_id, 1_000_000); + harness.payments.send_hop(&harness.nodes, 0, 1, chan_a_id, 2, chan_b_id, 1_000_000); }, 0x3d => { - payments.send_hop(&nodes, 2, 1, chan_b_id, 0, chan_a_id, 1_000_000); + harness.payments.send_hop(&harness.nodes, 2, 1, chan_b_id, 0, chan_a_id, 1_000_000); }, 0x40 => { - payments.send_direct(&nodes, 0, 1, chan_a_id, 100_000); + harness.payments.send_direct(&harness.nodes, 0, 1, chan_a_id, 100_000); }, 0x41 => { - payments.send_direct(&nodes, 1, 0, chan_a_id, 100_000); + harness.payments.send_direct(&harness.nodes, 1, 0, chan_a_id, 100_000); }, 0x42 => { - payments.send_direct(&nodes, 1, 2, chan_b_id, 100_000); + harness.payments.send_direct(&harness.nodes, 1, 2, chan_b_id, 100_000); }, 0x43 => { - payments.send_direct(&nodes, 2, 1, chan_b_id, 100_000); + harness.payments.send_direct(&harness.nodes, 2, 1, chan_b_id, 100_000); }, 0x44 => { - payments.send_hop(&nodes, 0, 1, chan_a_id, 2, chan_b_id, 100_000); + harness.payments.send_hop(&harness.nodes, 0, 1, chan_a_id, 2, chan_b_id, 100_000); }, 0x45 => { - payments.send_hop(&nodes, 2, 1, chan_b_id, 0, chan_a_id, 100_000); + harness.payments.send_hop(&harness.nodes, 2, 1, chan_b_id, 0, chan_a_id, 100_000); }, 0x48 => { - payments.send_direct(&nodes, 0, 1, chan_a_id, 10_000); + harness.payments.send_direct(&harness.nodes, 0, 1, chan_a_id, 10_000); }, 0x49 => { - payments.send_direct(&nodes, 1, 0, chan_a_id, 10_000); + harness.payments.send_direct(&harness.nodes, 1, 0, chan_a_id, 10_000); }, 0x4a => { - payments.send_direct(&nodes, 1, 2, chan_b_id, 10_000); + harness.payments.send_direct(&harness.nodes, 1, 2, chan_b_id, 10_000); }, 0x4b => { - payments.send_direct(&nodes, 2, 1, chan_b_id, 10_000); + harness.payments.send_direct(&harness.nodes, 2, 1, chan_b_id, 10_000); }, 0x4c => { - payments.send_hop(&nodes, 0, 1, chan_a_id, 2, chan_b_id, 10_000); + harness.payments.send_hop(&harness.nodes, 0, 1, chan_a_id, 2, chan_b_id, 10_000); }, 0x4d => { - payments.send_hop(&nodes, 2, 1, chan_b_id, 0, chan_a_id, 10_000); + harness.payments.send_hop(&harness.nodes, 2, 1, chan_b_id, 0, chan_a_id, 10_000); }, 0x50 => { - payments.send_direct(&nodes, 0, 1, chan_a_id, 1_000); + harness.payments.send_direct(&harness.nodes, 0, 1, chan_a_id, 1_000); }, 0x51 => { - payments.send_direct(&nodes, 1, 0, chan_a_id, 1_000); + harness.payments.send_direct(&harness.nodes, 1, 0, chan_a_id, 1_000); }, 0x52 => { - payments.send_direct(&nodes, 1, 2, chan_b_id, 1_000); + harness.payments.send_direct(&harness.nodes, 1, 2, chan_b_id, 1_000); }, 0x53 => { - payments.send_direct(&nodes, 2, 1, chan_b_id, 1_000); + harness.payments.send_direct(&harness.nodes, 2, 1, chan_b_id, 1_000); }, 0x54 => { - payments.send_hop(&nodes, 0, 1, chan_a_id, 2, chan_b_id, 1_000); + harness.payments.send_hop(&harness.nodes, 0, 1, chan_a_id, 2, chan_b_id, 1_000); }, 0x55 => { - payments.send_hop(&nodes, 2, 1, chan_b_id, 0, chan_a_id, 1_000); + harness.payments.send_hop(&harness.nodes, 2, 1, chan_b_id, 0, chan_a_id, 1_000); }, 0x58 => { - payments.send_direct(&nodes, 0, 1, chan_a_id, 100); + harness.payments.send_direct(&harness.nodes, 0, 1, chan_a_id, 100); }, 0x59 => { - payments.send_direct(&nodes, 1, 0, chan_a_id, 100); + harness.payments.send_direct(&harness.nodes, 1, 0, chan_a_id, 100); }, 0x5a => { - payments.send_direct(&nodes, 1, 2, chan_b_id, 100); + harness.payments.send_direct(&harness.nodes, 1, 2, chan_b_id, 100); }, 0x5b => { - payments.send_direct(&nodes, 2, 1, chan_b_id, 100); + harness.payments.send_direct(&harness.nodes, 2, 1, chan_b_id, 100); }, 0x5c => { - payments.send_hop(&nodes, 0, 1, chan_a_id, 2, chan_b_id, 100); + harness.payments.send_hop(&harness.nodes, 0, 1, chan_a_id, 2, chan_b_id, 100); }, 0x5d => { - payments.send_hop(&nodes, 2, 1, chan_b_id, 0, chan_a_id, 100); + harness.payments.send_hop(&harness.nodes, 2, 1, chan_b_id, 0, chan_a_id, 100); }, 0x60 => { - payments.send_direct(&nodes, 0, 1, chan_a_id, 10); + harness.payments.send_direct(&harness.nodes, 0, 1, chan_a_id, 10); }, 0x61 => { - payments.send_direct(&nodes, 1, 0, chan_a_id, 10); + harness.payments.send_direct(&harness.nodes, 1, 0, chan_a_id, 10); }, 0x62 => { - payments.send_direct(&nodes, 1, 2, chan_b_id, 10); + harness.payments.send_direct(&harness.nodes, 1, 2, chan_b_id, 10); }, 0x63 => { - payments.send_direct(&nodes, 2, 1, chan_b_id, 10); + harness.payments.send_direct(&harness.nodes, 2, 1, chan_b_id, 10); }, 0x64 => { - payments.send_hop(&nodes, 0, 1, chan_a_id, 2, chan_b_id, 10); + harness.payments.send_hop(&harness.nodes, 0, 1, chan_a_id, 2, chan_b_id, 10); }, 0x65 => { - payments.send_hop(&nodes, 2, 1, chan_b_id, 0, chan_a_id, 10); + harness.payments.send_hop(&harness.nodes, 2, 1, chan_b_id, 0, chan_a_id, 10); }, 0x68 => { - payments.send_direct(&nodes, 0, 1, chan_a_id, 1); + harness.payments.send_direct(&harness.nodes, 0, 1, chan_a_id, 1); }, 0x69 => { - payments.send_direct(&nodes, 1, 0, chan_a_id, 1); + harness.payments.send_direct(&harness.nodes, 1, 0, chan_a_id, 1); }, 0x6a => { - payments.send_direct(&nodes, 1, 2, chan_b_id, 1); + harness.payments.send_direct(&harness.nodes, 1, 2, chan_b_id, 1); }, 0x6b => { - payments.send_direct(&nodes, 2, 1, chan_b_id, 1); + harness.payments.send_direct(&harness.nodes, 2, 1, chan_b_id, 1); }, 0x6c => { - payments.send_hop(&nodes, 0, 1, chan_a_id, 2, chan_b_id, 1); + harness.payments.send_hop(&harness.nodes, 0, 1, chan_a_id, 2, chan_b_id, 1); }, 0x6d => { - payments.send_hop(&nodes, 2, 1, chan_b_id, 0, chan_a_id, 1); + harness.payments.send_hop(&harness.nodes, 2, 1, chan_b_id, 0, chan_a_id, 1); }, // MPP payments // 0x70: direct MPP from 0 to 1 (multi A-B channels) 0x70 => { - payments.send_mpp_direct(&nodes, 0, 1, ab_link.channel_ids(), 1_000_000); + harness.payments.send_mpp_direct( + &harness.nodes, + 0, + 1, + harness.ab_link.channel_ids(), + 1_000_000, + ); }, // 0x71: MPP 0->1->2, multi channels on first hop (A-B) 0x71 => { - payments.send_mpp_hop( - &nodes, + harness.payments.send_mpp_hop( + &harness.nodes, 0, 1, - ab_link.channel_ids(), + harness.ab_link.channel_ids(), 2, &[chan_b_id], 1_000_000, @@ -2847,32 +2893,32 @@ pub fn do_test(data: &[u8], out: Out) { }, // 0x72: MPP 0->1->2, multi channels on both hops (A-B and B-C) 0x72 => { - payments.send_mpp_hop( - &nodes, + harness.payments.send_mpp_hop( + &harness.nodes, 0, 1, - ab_link.channel_ids(), + harness.ab_link.channel_ids(), 2, - bc_link.channel_ids(), + harness.bc_link.channel_ids(), 1_000_000, ); }, // 0x73: MPP 0->1->2, multi channels on second hop (B-C) 0x73 => { - payments.send_mpp_hop( - &nodes, + harness.payments.send_mpp_hop( + &harness.nodes, 0, 1, &[chan_a_id], 2, - bc_link.channel_ids(), + harness.bc_link.channel_ids(), 1_000_000, ); }, // 0x74: direct MPP from 0 to 1, multi parts over single channel 0x74 => { - payments.send_mpp_direct( - &nodes, + harness.payments.send_mpp_direct( + &harness.nodes, 0, 1, &[chan_a_id, chan_a_id, chan_a_id], @@ -2880,301 +2926,375 @@ pub fn do_test(data: &[u8], out: Out) { ); }, - 0x80 => nodes[0].bump_fee_estimate(chan_type), - 0x81 => nodes[0].reset_fee_estimate(), - 0x84 => nodes[1].bump_fee_estimate(chan_type), - 0x85 => nodes[1].reset_fee_estimate(), - 0x88 => nodes[2].bump_fee_estimate(chan_type), - 0x89 => nodes[2].reset_fee_estimate(), + 0x80 => harness.nodes[0].bump_fee_estimate(harness.chan_type), + 0x81 => harness.nodes[0].reset_fee_estimate(), + 0x84 => harness.nodes[1].bump_fee_estimate(harness.chan_type), + 0x85 => harness.nodes[1].reset_fee_estimate(), + 0x88 => harness.nodes[2].bump_fee_estimate(harness.chan_type), + 0x89 => harness.nodes[2].reset_fee_estimate(), 0xa0 => { if !cfg!(splicing) { - assert_test_invariants(&nodes); + assert_test_invariants(&harness.nodes); return; } - let cp_node_id = nodes[1].our_node_id(); - nodes[0].splice_in(&cp_node_id, &chan_a_id); + let cp_node_id = harness.nodes[1].our_node_id(); + harness.nodes[0].splice_in(&cp_node_id, &harness.chan_a_id()); }, 0xa1 => { if !cfg!(splicing) { - assert_test_invariants(&nodes); + assert_test_invariants(&harness.nodes); return; } - let cp_node_id = nodes[0].our_node_id(); - nodes[1].splice_in(&cp_node_id, &chan_a_id); + let cp_node_id = harness.nodes[0].our_node_id(); + harness.nodes[1].splice_in(&cp_node_id, &harness.chan_a_id()); }, 0xa2 => { if !cfg!(splicing) { - assert_test_invariants(&nodes); + assert_test_invariants(&harness.nodes); return; } - let cp_node_id = nodes[2].our_node_id(); - nodes[1].splice_in(&cp_node_id, &chan_b_id); + let cp_node_id = harness.nodes[2].our_node_id(); + harness.nodes[1].splice_in(&cp_node_id, &harness.chan_b_id()); }, 0xa3 => { if !cfg!(splicing) { - assert_test_invariants(&nodes); + assert_test_invariants(&harness.nodes); return; } - let cp_node_id = nodes[1].our_node_id(); - nodes[2].splice_in(&cp_node_id, &chan_b_id); + let cp_node_id = harness.nodes[1].our_node_id(); + harness.nodes[2].splice_in(&cp_node_id, &harness.chan_b_id()); }, 0xa4 => { if !cfg!(splicing) { - assert_test_invariants(&nodes); + assert_test_invariants(&harness.nodes); return; } - let cp_node_id = nodes[1].our_node_id(); - nodes[0].splice_out(&cp_node_id, &chan_a_id); + let cp_node_id = harness.nodes[1].our_node_id(); + harness.nodes[0].splice_out(&cp_node_id, &harness.chan_a_id()); }, 0xa5 => { if !cfg!(splicing) { - assert_test_invariants(&nodes); + assert_test_invariants(&harness.nodes); return; } - let cp_node_id = nodes[0].our_node_id(); - nodes[1].splice_out(&cp_node_id, &chan_a_id); + let cp_node_id = harness.nodes[0].our_node_id(); + harness.nodes[1].splice_out(&cp_node_id, &harness.chan_a_id()); }, 0xa6 => { if !cfg!(splicing) { - assert_test_invariants(&nodes); + assert_test_invariants(&harness.nodes); return; } - let cp_node_id = nodes[2].our_node_id(); - nodes[1].splice_out(&cp_node_id, &chan_b_id); + let cp_node_id = harness.nodes[2].our_node_id(); + harness.nodes[1].splice_out(&cp_node_id, &harness.chan_b_id()); }, 0xa7 => { if !cfg!(splicing) { - assert_test_invariants(&nodes); + assert_test_invariants(&harness.nodes); return; } - let cp_node_id = nodes[1].our_node_id(); - nodes[2].splice_out(&cp_node_id, &chan_b_id); + let cp_node_id = harness.nodes[1].our_node_id(); + harness.nodes[2].splice_out(&cp_node_id, &harness.chan_b_id()); }, // Sync node by 1 block to cover confirmation of a transaction. 0xa8 => { - chain_state.confirm_pending_txs(); - nodes[0].sync_with_chain_state(&chain_state, Some(1)); + harness.chain_state.confirm_pending_txs(); + harness.nodes[0].sync_with_chain_state(&harness.chain_state, Some(1)); }, 0xa9 => { - chain_state.confirm_pending_txs(); - nodes[1].sync_with_chain_state(&chain_state, Some(1)); + harness.chain_state.confirm_pending_txs(); + harness.nodes[1].sync_with_chain_state(&harness.chain_state, Some(1)); }, 0xaa => { - chain_state.confirm_pending_txs(); - nodes[2].sync_with_chain_state(&chain_state, Some(1)); + harness.chain_state.confirm_pending_txs(); + harness.nodes[2].sync_with_chain_state(&harness.chain_state, Some(1)); }, // Sync node to chain tip to cover confirmation of a transaction post-reorg-risk. 0xab => { - chain_state.confirm_pending_txs(); - nodes[0].sync_with_chain_state(&chain_state, None); + harness.chain_state.confirm_pending_txs(); + harness.nodes[0].sync_with_chain_state(&harness.chain_state, None); }, 0xac => { - chain_state.confirm_pending_txs(); - nodes[1].sync_with_chain_state(&chain_state, None); + harness.chain_state.confirm_pending_txs(); + harness.nodes[1].sync_with_chain_state(&harness.chain_state, None); }, 0xad => { - chain_state.confirm_pending_txs(); - nodes[2].sync_with_chain_state(&chain_state, None); + harness.chain_state.confirm_pending_txs(); + harness.nodes[2].sync_with_chain_state(&harness.chain_state, None); }, 0xb0 | 0xb1 | 0xb2 => { // Restart node A, picking among the in-flight `ChannelMonitor`s to use based on // the value of `v` we're matching. - ab_link.disconnect_for_reload(0, &mut nodes, &mut queues); - nodes[0].reload(v, &out, &router, chan_type); + harness.ab_link.disconnect_for_reload(0, &mut harness.nodes, &mut harness.queues); + harness.nodes[0].reload(v, &harness.out, &router, harness.chan_type); }, 0xb3..=0xbb => { // Restart node B, picking among the in-flight `ChannelMonitor`s to use based on // the value of `v` we're matching. - ab_link.disconnect_for_reload(1, &mut nodes, &mut queues); - bc_link.disconnect_for_reload(1, &mut nodes, &mut queues); - nodes[1].reload(v, &out, &router, chan_type); + harness.ab_link.disconnect_for_reload(1, &mut harness.nodes, &mut harness.queues); + harness.bc_link.disconnect_for_reload(1, &mut harness.nodes, &mut harness.queues); + harness.nodes[1].reload(v, &harness.out, &router, harness.chan_type); }, 0xbc | 0xbd | 0xbe => { // Restart node C, picking among the in-flight `ChannelMonitor`s to use based on // the value of `v` we're matching. - bc_link.disconnect_for_reload(2, &mut nodes, &mut queues); - nodes[2].reload(v, &out, &router, chan_type); + harness.bc_link.disconnect_for_reload(2, &mut harness.nodes, &mut harness.queues); + harness.nodes[2].reload(v, &harness.out, &router, harness.chan_type); }, - 0xc0 => nodes[0].keys_manager.disable_supported_ops_for_all_signers(), - 0xc1 => nodes[1].keys_manager.disable_supported_ops_for_all_signers(), - 0xc2 => nodes[2].keys_manager.disable_supported_ops_for_all_signers(), + 0xc0 => harness.nodes[0].keys_manager.disable_supported_ops_for_all_signers(), + 0xc1 => harness.nodes[1].keys_manager.disable_supported_ops_for_all_signers(), + 0xc2 => harness.nodes[2].keys_manager.disable_supported_ops_for_all_signers(), 0xc3 => { - nodes[0] + harness.nodes[0] .keys_manager .enable_op_for_all_signers(SignerOp::SignCounterpartyCommitment); - nodes[0].node.signer_unblocked(None); + harness.nodes[0].node.signer_unblocked(None); }, 0xc4 => { - nodes[1] + harness.nodes[1] .keys_manager .enable_op_for_all_signers(SignerOp::SignCounterpartyCommitment); - let filter = Some((nodes[0].our_node_id(), chan_a_id)); - nodes[1].node.signer_unblocked(filter); + let filter = Some((harness.nodes[0].our_node_id(), harness.chan_a_id())); + harness.nodes[1].node.signer_unblocked(filter); }, 0xc5 => { - nodes[1] + harness.nodes[1] .keys_manager .enable_op_for_all_signers(SignerOp::SignCounterpartyCommitment); - let filter = Some((nodes[2].our_node_id(), chan_b_id)); - nodes[1].node.signer_unblocked(filter); + let filter = Some((harness.nodes[2].our_node_id(), harness.chan_b_id())); + harness.nodes[1].node.signer_unblocked(filter); }, 0xc6 => { - nodes[2] + harness.nodes[2] .keys_manager .enable_op_for_all_signers(SignerOp::SignCounterpartyCommitment); - nodes[2].node.signer_unblocked(None); + harness.nodes[2].node.signer_unblocked(None); }, 0xc7 => { - nodes[0].keys_manager.enable_op_for_all_signers(SignerOp::GetPerCommitmentPoint); - nodes[0].node.signer_unblocked(None); + harness.nodes[0] + .keys_manager + .enable_op_for_all_signers(SignerOp::GetPerCommitmentPoint); + harness.nodes[0].node.signer_unblocked(None); }, 0xc8 => { - nodes[1].keys_manager.enable_op_for_all_signers(SignerOp::GetPerCommitmentPoint); - let filter = Some((nodes[0].our_node_id(), chan_a_id)); - nodes[1].node.signer_unblocked(filter); + harness.nodes[1] + .keys_manager + .enable_op_for_all_signers(SignerOp::GetPerCommitmentPoint); + let filter = Some((harness.nodes[0].our_node_id(), harness.chan_a_id())); + harness.nodes[1].node.signer_unblocked(filter); }, 0xc9 => { - nodes[1].keys_manager.enable_op_for_all_signers(SignerOp::GetPerCommitmentPoint); - let filter = Some((nodes[2].our_node_id(), chan_b_id)); - nodes[1].node.signer_unblocked(filter); + harness.nodes[1] + .keys_manager + .enable_op_for_all_signers(SignerOp::GetPerCommitmentPoint); + let filter = Some((harness.nodes[2].our_node_id(), harness.chan_b_id())); + harness.nodes[1].node.signer_unblocked(filter); }, 0xca => { - nodes[2].keys_manager.enable_op_for_all_signers(SignerOp::GetPerCommitmentPoint); - nodes[2].node.signer_unblocked(None); + harness.nodes[2] + .keys_manager + .enable_op_for_all_signers(SignerOp::GetPerCommitmentPoint); + harness.nodes[2].node.signer_unblocked(None); }, 0xcb => { - nodes[0].keys_manager.enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); - nodes[0].node.signer_unblocked(None); + harness.nodes[0] + .keys_manager + .enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); + harness.nodes[0].node.signer_unblocked(None); }, 0xcc => { - nodes[1].keys_manager.enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); - let filter = Some((nodes[0].our_node_id(), chan_a_id)); - nodes[1].node.signer_unblocked(filter); + harness.nodes[1] + .keys_manager + .enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); + let filter = Some((harness.nodes[0].our_node_id(), harness.chan_a_id())); + harness.nodes[1].node.signer_unblocked(filter); }, 0xcd => { - nodes[1].keys_manager.enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); - let filter = Some((nodes[2].our_node_id(), chan_b_id)); - nodes[1].node.signer_unblocked(filter); + harness.nodes[1] + .keys_manager + .enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); + let filter = Some((harness.nodes[2].our_node_id(), harness.chan_b_id())); + harness.nodes[1].node.signer_unblocked(filter); }, 0xce => { - nodes[2].keys_manager.enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); - nodes[2].node.signer_unblocked(None); + harness.nodes[2] + .keys_manager + .enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); + harness.nodes[2].node.signer_unblocked(None); }, 0xf0 => { - ab_link.complete_monitor_updates_for_node(0, &nodes, MonitorUpdateSelector::First); + harness.ab_link.complete_monitor_updates_for_node( + 0, + &harness.nodes, + MonitorUpdateSelector::First, + ); }, 0xf1 => { - ab_link.complete_monitor_updates_for_node(0, &nodes, MonitorUpdateSelector::Second); + harness.ab_link.complete_monitor_updates_for_node( + 0, + &harness.nodes, + MonitorUpdateSelector::Second, + ); }, 0xf2 => { - ab_link.complete_monitor_updates_for_node(0, &nodes, MonitorUpdateSelector::Last); + harness.ab_link.complete_monitor_updates_for_node( + 0, + &harness.nodes, + MonitorUpdateSelector::Last, + ); }, 0xf4 => { - ab_link.complete_monitor_updates_for_node(1, &nodes, MonitorUpdateSelector::First); + harness.ab_link.complete_monitor_updates_for_node( + 1, + &harness.nodes, + MonitorUpdateSelector::First, + ); }, 0xf5 => { - ab_link.complete_monitor_updates_for_node(1, &nodes, MonitorUpdateSelector::Second); + harness.ab_link.complete_monitor_updates_for_node( + 1, + &harness.nodes, + MonitorUpdateSelector::Second, + ); }, 0xf6 => { - ab_link.complete_monitor_updates_for_node(1, &nodes, MonitorUpdateSelector::Last); + harness.ab_link.complete_monitor_updates_for_node( + 1, + &harness.nodes, + MonitorUpdateSelector::Last, + ); }, 0xf8 => { - bc_link.complete_monitor_updates_for_node(1, &nodes, MonitorUpdateSelector::First); + harness.bc_link.complete_monitor_updates_for_node( + 1, + &harness.nodes, + MonitorUpdateSelector::First, + ); }, 0xf9 => { - bc_link.complete_monitor_updates_for_node(1, &nodes, MonitorUpdateSelector::Second); + harness.bc_link.complete_monitor_updates_for_node( + 1, + &harness.nodes, + MonitorUpdateSelector::Second, + ); }, 0xfa => { - bc_link.complete_monitor_updates_for_node(1, &nodes, MonitorUpdateSelector::Last); + harness.bc_link.complete_monitor_updates_for_node( + 1, + &harness.nodes, + MonitorUpdateSelector::Last, + ); }, 0xfc => { - bc_link.complete_monitor_updates_for_node(2, &nodes, MonitorUpdateSelector::First); + harness.bc_link.complete_monitor_updates_for_node( + 2, + &harness.nodes, + MonitorUpdateSelector::First, + ); }, 0xfd => { - bc_link.complete_monitor_updates_for_node(2, &nodes, MonitorUpdateSelector::Second); + harness.bc_link.complete_monitor_updates_for_node( + 2, + &harness.nodes, + MonitorUpdateSelector::Second, + ); }, 0xfe => { - bc_link.complete_monitor_updates_for_node(2, &nodes, MonitorUpdateSelector::Last); + harness.bc_link.complete_monitor_updates_for_node( + 2, + &harness.nodes, + MonitorUpdateSelector::Last, + ); }, 0xff => { // Test that no channel is in a stuck state where neither party can send funds even // after we resolve all pending events. - ab_link.reconnect(&mut nodes); - bc_link.reconnect(&mut nodes); + harness.ab_link.reconnect(&mut harness.nodes); + harness.bc_link.reconnect(&mut harness.nodes); for op in SUPPORTED_SIGNER_OPS { - nodes[0].keys_manager.enable_op_for_all_signers(op); - nodes[1].keys_manager.enable_op_for_all_signers(op); - nodes[2].keys_manager.enable_op_for_all_signers(op); + harness.nodes[0].keys_manager.enable_op_for_all_signers(op); + harness.nodes[1].keys_manager.enable_op_for_all_signers(op); + harness.nodes[2].keys_manager.enable_op_for_all_signers(op); } - nodes[0].node.signer_unblocked(None); - nodes[1].node.signer_unblocked(None); - nodes[2].node.signer_unblocked(None); + harness.nodes[0].node.signer_unblocked(None); + harness.nodes[1].node.signer_unblocked(None); + harness.nodes[2].node.signer_unblocked(None); process_all_events_impl( - &nodes, - &out, - &ab_link, - &bc_link, - &mut chain_state, - &mut payments, - &mut queues, + &harness.nodes, + &harness.out, + &harness.ab_link, + &harness.bc_link, + &mut harness.chain_state, + &mut harness.payments, + &mut harness.queues, ); // Since MPP payments are supported, we wait until we fully settle the state of all // channels to see if we have any committed HTLC parts of an MPP payment that need // to be failed back. - for node in &nodes { + for node in &harness.nodes { node.node.timer_tick_occurred(); } process_all_events_impl( - &nodes, - &out, - &ab_link, - &bc_link, - &mut chain_state, - &mut payments, - &mut queues, + &harness.nodes, + &harness.out, + &harness.ab_link, + &harness.bc_link, + &mut harness.chain_state, + &mut harness.payments, + &mut harness.queues, ); - payments.assert_all_resolved(); - payments.assert_claims_reported(); + harness.payments.assert_all_resolved(); + harness.payments.assert_claims_reported(); // Finally, make sure that at least one end of each channel can make a substantial payment - for &chan_id in ab_link.channel_ids() { + for &chan_id in harness.ab_link.channel_ids() { assert!( - payments.send_direct(&nodes, 0, 1, chan_id, 10_000_000) - || payments.send_direct(&nodes, 1, 0, chan_id, 10_000_000) + harness.payments.send_direct(&harness.nodes, 0, 1, chan_id, 10_000_000) + || harness.payments.send_direct( + &harness.nodes, + 1, + 0, + chan_id, + 10_000_000 + ) ); } - for &chan_id in bc_link.channel_ids() { + for &chan_id in harness.bc_link.channel_ids() { assert!( - payments.send_direct(&nodes, 1, 2, chan_id, 10_000_000) - || payments.send_direct(&nodes, 2, 1, chan_id, 10_000_000) + harness.payments.send_direct(&harness.nodes, 1, 2, chan_id, 10_000_000) + || harness.payments.send_direct( + &harness.nodes, + 2, + 1, + chan_id, + 10_000_000 + ) ); } - nodes[0].record_last_htlc_clear_fee(); - nodes[1].record_last_htlc_clear_fee(); - nodes[2].record_last_htlc_clear_fee(); + harness.nodes[0].record_last_htlc_clear_fee(); + harness.nodes[1].record_last_htlc_clear_fee(); + harness.nodes[2].record_last_htlc_clear_fee(); }, _ => test_return!(), } - for node in &mut nodes { - node.refresh_serialized_manager(); - } + harness.refresh_serialized_managers(); } } From 6feccc10937ac374d65f6f54bb532187f6ad2627 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 21 Apr 2026 16:00:30 +0200 Subject: [PATCH 15/29] Wrap chanmon consistency flow in Harness Move the main fuzz flow onto the harness. This completes the structural refactor so `do_test` is responsible for constructing and running the harness rather than managing the full scenario directly. Keep the final event-settling loop on the harness as well, avoiding an extra free function once the state it needs is already owned by `Harness`. --- fuzz/src/chanmon_consistency.rs | 660 +++++++++++++++++--------------- 1 file changed, 351 insertions(+), 309 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 553c3ec3aaa..8ca544294a7 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -2189,6 +2189,199 @@ impl<'a, 'd, Out: Output + MaybeSend + MaybeSync> Harness<'a, 'd, Out> { assert_test_invariants(&self.nodes); } + fn send_direct( + &mut self, source_idx: usize, dest_idx: usize, dest_chan_id: ChannelId, amt: u64, + ) -> bool { + self.payments.send_direct(&self.nodes, source_idx, dest_idx, dest_chan_id, amt) + } + + fn send_hop( + &mut self, source_idx: usize, middle_idx: usize, middle_chan_id: ChannelId, + dest_idx: usize, dest_chan_id: ChannelId, amt: u64, + ) { + self.payments.send_hop( + &self.nodes, + source_idx, + middle_idx, + middle_chan_id, + dest_idx, + dest_chan_id, + amt, + ); + } + + fn send_mpp_direct( + &mut self, source_idx: usize, dest_idx: usize, dest_chan_ids: &[ChannelId], amt: u64, + ) { + // Direct MPP payment with no hop. + self.payments.send_mpp_direct(&self.nodes, source_idx, dest_idx, dest_chan_ids, amt); + } + + fn send_mpp_hop( + &mut self, source_idx: usize, middle_idx: usize, middle_chan_ids: &[ChannelId], + dest_idx: usize, dest_chan_ids: &[ChannelId], amt: u64, + ) { + // MPP payment via hop, split across multiple channels on either or both hops. + self.payments.send_mpp_hop( + &self.nodes, + source_idx, + middle_idx, + middle_chan_ids, + dest_idx, + dest_chan_ids, + amt, + ); + } + + fn process_msg_events( + &mut self, node_idx: usize, corrupt_forward: bool, limit_events: ProcessMessages, + ) -> bool { + process_msg_events_impl( + node_idx, + corrupt_forward, + limit_events, + &self.nodes, + &self.out, + &mut self.queues, + ) + } + + fn process_events(&mut self, node_idx: usize, fail: bool) -> bool { + process_events_impl(node_idx, fail, &self.nodes, &mut self.chain_state, &mut self.payments) + } + + fn process_all_events(&mut self) { + let mut last_pass_no_updates = false; + for i in 0..std::usize::MAX { + if i == 100 { + panic!( + "It may take may iterations to settle the state, but it should not take forever" + ); + } + // First, make sure no monitor updates are pending. + self.ab_link.complete_all_monitor_updates(&self.nodes); + self.bc_link.complete_all_monitor_updates(&self.nodes); + // Then, make sure any current forwards make their way to their destination. + if self.process_msg_events(0, false, ProcessMessages::AllMessages) { + last_pass_no_updates = false; + continue; + } + if self.process_msg_events(1, false, ProcessMessages::AllMessages) { + last_pass_no_updates = false; + continue; + } + if self.process_msg_events(2, false, ProcessMessages::AllMessages) { + last_pass_no_updates = false; + continue; + } + // Finally, make sure any payments are claimed. + if self.process_events(0, false) { + last_pass_no_updates = false; + continue; + } + if self.process_events(1, false) { + last_pass_no_updates = false; + continue; + } + if self.process_events(2, false) { + last_pass_no_updates = false; + continue; + } + if last_pass_no_updates { + // In some cases, `process_msg_events` may generate a message to send, but block + // sending until `complete_all_monitor_updates` gets called on the next iteration. + // Thus, we only exit if we manage two iterations with no messages or events to + // process. + break; + } + last_pass_no_updates = true; + } + } + + fn disconnect_ab(&mut self) { + self.ab_link.disconnect(&mut self.nodes, &mut self.queues); + } + + fn disconnect_bc(&mut self) { + self.bc_link.disconnect(&mut self.nodes, &mut self.queues); + } + + fn reconnect_ab(&mut self) { + self.ab_link.reconnect(&mut self.nodes); + } + + fn reconnect_bc(&mut self) { + self.bc_link.reconnect(&mut self.nodes); + } + + fn restart_node(&mut self, node_idx: usize, v: u8, router: &'a FuzzRouter) { + match node_idx { + 0 => { + self.ab_link.disconnect_for_reload(0, &mut self.nodes, &mut self.queues); + }, + 1 => { + self.ab_link.disconnect_for_reload(1, &mut self.nodes, &mut self.queues); + self.bc_link.disconnect_for_reload(1, &mut self.nodes, &mut self.queues); + }, + 2 => { + self.bc_link.disconnect_for_reload(2, &mut self.nodes, &mut self.queues); + }, + _ => panic!("invalid node index"), + } + self.nodes[node_idx].reload(v, &self.out, router, self.chan_type); + } + + fn settle_all(&mut self) { + // First, make sure peers are all connected to each other. + self.reconnect_ab(); + self.reconnect_bc(); + + for op in SUPPORTED_SIGNER_OPS { + self.nodes[0].keys_manager.enable_op_for_all_signers(op); + self.nodes[1].keys_manager.enable_op_for_all_signers(op); + self.nodes[2].keys_manager.enable_op_for_all_signers(op); + } + self.nodes[0].node.signer_unblocked(None); + self.nodes[1].node.signer_unblocked(None); + self.nodes[2].node.signer_unblocked(None); + + self.process_all_events(); + + // Since MPP payments are supported, we wait until we fully settle the state of all + // channels to see if we have any committed HTLC parts of an MPP payment that need + // to be failed back. + for node in self.nodes.iter() { + node.node.timer_tick_occurred(); + } + self.process_all_events(); + + // Verify no payments are stuck, all should have resolved. + self.payments.assert_all_resolved(); + // Verify that every payment claimed by a receiver resulted in a PaymentSent event at + // the sender. + self.payments.assert_claims_reported(); + + // Finally, make sure that at least one end of each channel can make a substantial payment. + let chan_ab_ids = self.ab_link.channel_ids().clone(); + let chan_bc_ids = self.bc_link.channel_ids().clone(); + for chan_id in chan_ab_ids { + assert!( + self.send_direct(0, 1, chan_id, 10_000_000) + || self.send_direct(1, 0, chan_id, 10_000_000) + ); + } + for chan_id in chan_bc_ids { + assert!( + self.send_direct(1, 2, chan_id, 10_000_000) + || self.send_direct(2, 1, chan_id, 10_000_000) + ); + } + + self.nodes[0].record_last_htlc_clear_fee(); + self.nodes[1].record_last_htlc_clear_fee(); + self.nodes[2].record_last_htlc_clear_fee(); + } + fn refresh_serialized_managers(&mut self) { for node in &mut self.nodes { node.refresh_serialized_manager(); @@ -2525,110 +2718,18 @@ fn process_events_impl( had_events } -fn process_all_events_impl( - nodes: &[HarnessNode<'_>; 3], out: &Out, ab_link: &PeerLink, bc_link: &PeerLink, - chain_state: &mut ChainState, payments: &mut PaymentTracker, queues: &mut EventQueues, -) { - let mut last_pass_no_updates = false; - for i in 0..std::usize::MAX { - if i == 100 { - panic!( - "It may take may iterations to settle the state, but it should not take forever" - ); - } - // First, make sure no monitor updates are pending. - ab_link.complete_all_monitor_updates(nodes); - bc_link.complete_all_monitor_updates(nodes); - // Then, make sure any current forwards make their way to their destination. - if process_msg_events_impl(0, false, ProcessMessages::AllMessages, nodes, out, queues) { - last_pass_no_updates = false; - continue; - } - if process_msg_events_impl(1, false, ProcessMessages::AllMessages, nodes, out, queues) { - last_pass_no_updates = false; - continue; - } - if process_msg_events_impl(2, false, ProcessMessages::AllMessages, nodes, out, queues) { - last_pass_no_updates = false; - continue; - } - // Finally, make sure any payments are claimed. - if process_events_impl(0, false, nodes, chain_state, payments) { - last_pass_no_updates = false; - continue; - } - if process_events_impl(1, false, nodes, chain_state, payments) { - last_pass_no_updates = false; - continue; - } - if process_events_impl(2, false, nodes, chain_state, payments) { - last_pass_no_updates = false; - continue; - } - if last_pass_no_updates { - // In some cases, `process_msg_events_impl` may generate a message to send, but - // block sending until `complete_all_monitor_updates` gets called on the next - // iteration. Thus, we only exit if we manage two iterations with no messages or - // events to process. - break; - } - last_pass_no_updates = true; - } -} - #[inline] pub fn do_test(data: &[u8], out: Out) { let router = FuzzRouter {}; let mut harness = Harness::new(data, out, &router); - let chan_a_id = harness.chan_a_id(); - let chan_b_id = harness.chan_b_id(); - macro_rules! test_return { - () => {{ + loop { + let v = if let Some(value) = harness.next_input_byte() { + value + } else { harness.finish(); return; - }}; - } - - loop { - macro_rules! process_msg_events { - ($node: expr, $corrupt_forward: expr, $limit_events: expr) => {{ - process_msg_events_impl( - $node, - $corrupt_forward, - $limit_events, - &harness.nodes, - &harness.out, - &mut harness.queues, - ) - }}; - } - - macro_rules! process_msg_noret { - ($node: expr, $corrupt_forward: expr, $limit_events: expr) => {{ - process_msg_events!($node, $corrupt_forward, $limit_events); - }}; - } - - macro_rules! process_events { - ($node: expr, $fail: expr) => {{ - process_events_impl( - $node, - $fail, - &harness.nodes, - &mut harness.chain_state, - &mut harness.payments, - ) - }}; - } - - macro_rules! process_ev_noret { - ($node: expr, $fail: expr) => {{ - process_events!($node, $fail); - }}; - } - - let v = if let Some(value) = harness.next_input_byte() { value } else { test_return!() }; + }; harness .out .locked_write(format!("READ A BYTE! HANDLING INPUT {:x}...........\n", v).as_bytes()); @@ -2664,266 +2765,278 @@ pub fn do_test(data: &[u8], out: Out) { } }, - 0x0c => harness.ab_link.disconnect(&mut harness.nodes, &mut harness.queues), - 0x0d => harness.bc_link.disconnect(&mut harness.nodes, &mut harness.queues), - 0x0e => harness.ab_link.reconnect(&mut harness.nodes), - 0x0f => harness.bc_link.reconnect(&mut harness.nodes), + 0x0c => { + harness.disconnect_ab(); + }, + 0x0d => { + harness.disconnect_bc(); + }, + 0x0e => { + harness.reconnect_ab(); + }, + 0x0f => { + harness.reconnect_bc(); + }, - 0x10 => process_msg_noret!(0, true, ProcessMessages::AllMessages), - 0x11 => process_msg_noret!(0, false, ProcessMessages::AllMessages), - 0x12 => process_msg_noret!(0, true, ProcessMessages::OneMessage), - 0x13 => process_msg_noret!(0, false, ProcessMessages::OneMessage), - 0x14 => process_msg_noret!(0, true, ProcessMessages::OnePendingMessage), - 0x15 => process_msg_noret!(0, false, ProcessMessages::OnePendingMessage), + 0x10 => { + harness.process_msg_events(0, true, ProcessMessages::AllMessages); + }, + 0x11 => { + harness.process_msg_events(0, false, ProcessMessages::AllMessages); + }, + 0x12 => { + harness.process_msg_events(0, true, ProcessMessages::OneMessage); + }, + 0x13 => { + harness.process_msg_events(0, false, ProcessMessages::OneMessage); + }, + 0x14 => { + harness.process_msg_events(0, true, ProcessMessages::OnePendingMessage); + }, + 0x15 => { + harness.process_msg_events(0, false, ProcessMessages::OnePendingMessage); + }, - 0x16 => process_ev_noret!(0, true), - 0x17 => process_ev_noret!(0, false), + 0x16 => { + harness.process_events(0, true); + }, + 0x17 => { + harness.process_events(0, false); + }, - 0x18 => process_msg_noret!(1, true, ProcessMessages::AllMessages), - 0x19 => process_msg_noret!(1, false, ProcessMessages::AllMessages), - 0x1a => process_msg_noret!(1, true, ProcessMessages::OneMessage), - 0x1b => process_msg_noret!(1, false, ProcessMessages::OneMessage), - 0x1c => process_msg_noret!(1, true, ProcessMessages::OnePendingMessage), - 0x1d => process_msg_noret!(1, false, ProcessMessages::OnePendingMessage), + 0x18 => { + harness.process_msg_events(1, true, ProcessMessages::AllMessages); + }, + 0x19 => { + harness.process_msg_events(1, false, ProcessMessages::AllMessages); + }, + 0x1a => { + harness.process_msg_events(1, true, ProcessMessages::OneMessage); + }, + 0x1b => { + harness.process_msg_events(1, false, ProcessMessages::OneMessage); + }, + 0x1c => { + harness.process_msg_events(1, true, ProcessMessages::OnePendingMessage); + }, + 0x1d => { + harness.process_msg_events(1, false, ProcessMessages::OnePendingMessage); + }, - 0x1e => process_ev_noret!(1, true), - 0x1f => process_ev_noret!(1, false), + 0x1e => { + harness.process_events(1, true); + }, + 0x1f => { + harness.process_events(1, false); + }, - 0x20 => process_msg_noret!(2, true, ProcessMessages::AllMessages), - 0x21 => process_msg_noret!(2, false, ProcessMessages::AllMessages), - 0x22 => process_msg_noret!(2, true, ProcessMessages::OneMessage), - 0x23 => process_msg_noret!(2, false, ProcessMessages::OneMessage), - 0x24 => process_msg_noret!(2, true, ProcessMessages::OnePendingMessage), - 0x25 => process_msg_noret!(2, false, ProcessMessages::OnePendingMessage), + 0x20 => { + harness.process_msg_events(2, true, ProcessMessages::AllMessages); + }, + 0x21 => { + harness.process_msg_events(2, false, ProcessMessages::AllMessages); + }, + 0x22 => { + harness.process_msg_events(2, true, ProcessMessages::OneMessage); + }, + 0x23 => { + harness.process_msg_events(2, false, ProcessMessages::OneMessage); + }, + 0x24 => { + harness.process_msg_events(2, true, ProcessMessages::OnePendingMessage); + }, + 0x25 => { + harness.process_msg_events(2, false, ProcessMessages::OnePendingMessage); + }, - 0x26 => process_ev_noret!(2, true), - 0x27 => process_ev_noret!(2, false), + 0x26 => { + harness.process_events(2, true); + }, + 0x27 => { + harness.process_events(2, false); + }, // 1/10th the channel size: 0x30 => { - harness.payments.send_direct(&harness.nodes, 0, 1, chan_a_id, 10_000_000); + harness.send_direct(0, 1, harness.chan_a_id(), 10_000_000); }, 0x31 => { - harness.payments.send_direct(&harness.nodes, 1, 0, chan_a_id, 10_000_000); + harness.send_direct(1, 0, harness.chan_a_id(), 10_000_000); }, 0x32 => { - harness.payments.send_direct(&harness.nodes, 1, 2, chan_b_id, 10_000_000); + harness.send_direct(1, 2, harness.chan_b_id(), 10_000_000); }, 0x33 => { - harness.payments.send_direct(&harness.nodes, 2, 1, chan_b_id, 10_000_000); + harness.send_direct(2, 1, harness.chan_b_id(), 10_000_000); }, 0x34 => { - harness.payments.send_hop( - &harness.nodes, - 0, - 1, - chan_a_id, - 2, - chan_b_id, - 10_000_000, - ); + harness.send_hop(0, 1, harness.chan_a_id(), 2, harness.chan_b_id(), 10_000_000); }, 0x35 => { - harness.payments.send_hop( - &harness.nodes, - 2, - 1, - chan_b_id, - 0, - chan_a_id, - 10_000_000, - ); + harness.send_hop(2, 1, harness.chan_b_id(), 0, harness.chan_a_id(), 10_000_000); }, 0x38 => { - harness.payments.send_direct(&harness.nodes, 0, 1, chan_a_id, 1_000_000); + harness.send_direct(0, 1, harness.chan_a_id(), 1_000_000); }, 0x39 => { - harness.payments.send_direct(&harness.nodes, 1, 0, chan_a_id, 1_000_000); + harness.send_direct(1, 0, harness.chan_a_id(), 1_000_000); }, 0x3a => { - harness.payments.send_direct(&harness.nodes, 1, 2, chan_b_id, 1_000_000); + harness.send_direct(1, 2, harness.chan_b_id(), 1_000_000); }, 0x3b => { - harness.payments.send_direct(&harness.nodes, 2, 1, chan_b_id, 1_000_000); + harness.send_direct(2, 1, harness.chan_b_id(), 1_000_000); }, 0x3c => { - harness.payments.send_hop(&harness.nodes, 0, 1, chan_a_id, 2, chan_b_id, 1_000_000); + harness.send_hop(0, 1, harness.chan_a_id(), 2, harness.chan_b_id(), 1_000_000); }, 0x3d => { - harness.payments.send_hop(&harness.nodes, 2, 1, chan_b_id, 0, chan_a_id, 1_000_000); + harness.send_hop(2, 1, harness.chan_b_id(), 0, harness.chan_a_id(), 1_000_000); }, 0x40 => { - harness.payments.send_direct(&harness.nodes, 0, 1, chan_a_id, 100_000); + harness.send_direct(0, 1, harness.chan_a_id(), 100_000); }, 0x41 => { - harness.payments.send_direct(&harness.nodes, 1, 0, chan_a_id, 100_000); + harness.send_direct(1, 0, harness.chan_a_id(), 100_000); }, 0x42 => { - harness.payments.send_direct(&harness.nodes, 1, 2, chan_b_id, 100_000); + harness.send_direct(1, 2, harness.chan_b_id(), 100_000); }, 0x43 => { - harness.payments.send_direct(&harness.nodes, 2, 1, chan_b_id, 100_000); + harness.send_direct(2, 1, harness.chan_b_id(), 100_000); }, 0x44 => { - harness.payments.send_hop(&harness.nodes, 0, 1, chan_a_id, 2, chan_b_id, 100_000); + harness.send_hop(0, 1, harness.chan_a_id(), 2, harness.chan_b_id(), 100_000); }, 0x45 => { - harness.payments.send_hop(&harness.nodes, 2, 1, chan_b_id, 0, chan_a_id, 100_000); + harness.send_hop(2, 1, harness.chan_b_id(), 0, harness.chan_a_id(), 100_000); }, 0x48 => { - harness.payments.send_direct(&harness.nodes, 0, 1, chan_a_id, 10_000); + harness.send_direct(0, 1, harness.chan_a_id(), 10_000); }, 0x49 => { - harness.payments.send_direct(&harness.nodes, 1, 0, chan_a_id, 10_000); + harness.send_direct(1, 0, harness.chan_a_id(), 10_000); }, 0x4a => { - harness.payments.send_direct(&harness.nodes, 1, 2, chan_b_id, 10_000); + harness.send_direct(1, 2, harness.chan_b_id(), 10_000); }, 0x4b => { - harness.payments.send_direct(&harness.nodes, 2, 1, chan_b_id, 10_000); + harness.send_direct(2, 1, harness.chan_b_id(), 10_000); }, 0x4c => { - harness.payments.send_hop(&harness.nodes, 0, 1, chan_a_id, 2, chan_b_id, 10_000); + harness.send_hop(0, 1, harness.chan_a_id(), 2, harness.chan_b_id(), 10_000); }, 0x4d => { - harness.payments.send_hop(&harness.nodes, 2, 1, chan_b_id, 0, chan_a_id, 10_000); + harness.send_hop(2, 1, harness.chan_b_id(), 0, harness.chan_a_id(), 10_000); }, 0x50 => { - harness.payments.send_direct(&harness.nodes, 0, 1, chan_a_id, 1_000); + harness.send_direct(0, 1, harness.chan_a_id(), 1_000); }, 0x51 => { - harness.payments.send_direct(&harness.nodes, 1, 0, chan_a_id, 1_000); + harness.send_direct(1, 0, harness.chan_a_id(), 1_000); }, 0x52 => { - harness.payments.send_direct(&harness.nodes, 1, 2, chan_b_id, 1_000); + harness.send_direct(1, 2, harness.chan_b_id(), 1_000); }, 0x53 => { - harness.payments.send_direct(&harness.nodes, 2, 1, chan_b_id, 1_000); + harness.send_direct(2, 1, harness.chan_b_id(), 1_000); }, 0x54 => { - harness.payments.send_hop(&harness.nodes, 0, 1, chan_a_id, 2, chan_b_id, 1_000); + harness.send_hop(0, 1, harness.chan_a_id(), 2, harness.chan_b_id(), 1_000); }, 0x55 => { - harness.payments.send_hop(&harness.nodes, 2, 1, chan_b_id, 0, chan_a_id, 1_000); + harness.send_hop(2, 1, harness.chan_b_id(), 0, harness.chan_a_id(), 1_000); }, 0x58 => { - harness.payments.send_direct(&harness.nodes, 0, 1, chan_a_id, 100); + harness.send_direct(0, 1, harness.chan_a_id(), 100); }, 0x59 => { - harness.payments.send_direct(&harness.nodes, 1, 0, chan_a_id, 100); + harness.send_direct(1, 0, harness.chan_a_id(), 100); }, 0x5a => { - harness.payments.send_direct(&harness.nodes, 1, 2, chan_b_id, 100); + harness.send_direct(1, 2, harness.chan_b_id(), 100); }, 0x5b => { - harness.payments.send_direct(&harness.nodes, 2, 1, chan_b_id, 100); + harness.send_direct(2, 1, harness.chan_b_id(), 100); }, 0x5c => { - harness.payments.send_hop(&harness.nodes, 0, 1, chan_a_id, 2, chan_b_id, 100); + harness.send_hop(0, 1, harness.chan_a_id(), 2, harness.chan_b_id(), 100); }, 0x5d => { - harness.payments.send_hop(&harness.nodes, 2, 1, chan_b_id, 0, chan_a_id, 100); + harness.send_hop(2, 1, harness.chan_b_id(), 0, harness.chan_a_id(), 100); }, 0x60 => { - harness.payments.send_direct(&harness.nodes, 0, 1, chan_a_id, 10); + harness.send_direct(0, 1, harness.chan_a_id(), 10); }, 0x61 => { - harness.payments.send_direct(&harness.nodes, 1, 0, chan_a_id, 10); + harness.send_direct(1, 0, harness.chan_a_id(), 10); }, 0x62 => { - harness.payments.send_direct(&harness.nodes, 1, 2, chan_b_id, 10); + harness.send_direct(1, 2, harness.chan_b_id(), 10); }, 0x63 => { - harness.payments.send_direct(&harness.nodes, 2, 1, chan_b_id, 10); + harness.send_direct(2, 1, harness.chan_b_id(), 10); }, 0x64 => { - harness.payments.send_hop(&harness.nodes, 0, 1, chan_a_id, 2, chan_b_id, 10); + harness.send_hop(0, 1, harness.chan_a_id(), 2, harness.chan_b_id(), 10); }, 0x65 => { - harness.payments.send_hop(&harness.nodes, 2, 1, chan_b_id, 0, chan_a_id, 10); + harness.send_hop(2, 1, harness.chan_b_id(), 0, harness.chan_a_id(), 10); }, 0x68 => { - harness.payments.send_direct(&harness.nodes, 0, 1, chan_a_id, 1); + harness.send_direct(0, 1, harness.chan_a_id(), 1); }, 0x69 => { - harness.payments.send_direct(&harness.nodes, 1, 0, chan_a_id, 1); + harness.send_direct(1, 0, harness.chan_a_id(), 1); }, 0x6a => { - harness.payments.send_direct(&harness.nodes, 1, 2, chan_b_id, 1); + harness.send_direct(1, 2, harness.chan_b_id(), 1); }, 0x6b => { - harness.payments.send_direct(&harness.nodes, 2, 1, chan_b_id, 1); + harness.send_direct(2, 1, harness.chan_b_id(), 1); }, 0x6c => { - harness.payments.send_hop(&harness.nodes, 0, 1, chan_a_id, 2, chan_b_id, 1); + harness.send_hop(0, 1, harness.chan_a_id(), 2, harness.chan_b_id(), 1); }, 0x6d => { - harness.payments.send_hop(&harness.nodes, 2, 1, chan_b_id, 0, chan_a_id, 1); + harness.send_hop(2, 1, harness.chan_b_id(), 0, harness.chan_a_id(), 1); }, // MPP payments // 0x70: direct MPP from 0 to 1 (multi A-B channels) 0x70 => { - harness.payments.send_mpp_direct( - &harness.nodes, - 0, - 1, - harness.ab_link.channel_ids(), - 1_000_000, - ); + let chan_ab_ids = harness.ab_link.channel_ids().clone(); + harness.send_mpp_direct(0, 1, &chan_ab_ids, 1_000_000); }, // 0x71: MPP 0->1->2, multi channels on first hop (A-B) 0x71 => { - harness.payments.send_mpp_hop( - &harness.nodes, - 0, - 1, - harness.ab_link.channel_ids(), - 2, - &[chan_b_id], - 1_000_000, - ); + let chan_ab_ids = harness.ab_link.channel_ids().clone(); + let chan_b_id = harness.chan_b_id(); + harness.send_mpp_hop(0, 1, &chan_ab_ids, 2, &[chan_b_id], 1_000_000); }, // 0x72: MPP 0->1->2, multi channels on both hops (A-B and B-C) 0x72 => { - harness.payments.send_mpp_hop( - &harness.nodes, - 0, - 1, - harness.ab_link.channel_ids(), - 2, - harness.bc_link.channel_ids(), - 1_000_000, - ); + let chan_ab_ids = harness.ab_link.channel_ids().clone(); + let chan_bc_ids = harness.bc_link.channel_ids().clone(); + harness.send_mpp_hop(0, 1, &chan_ab_ids, 2, &chan_bc_ids, 1_000_000); }, // 0x73: MPP 0->1->2, multi channels on second hop (B-C) 0x73 => { - harness.payments.send_mpp_hop( - &harness.nodes, - 0, - 1, - &[chan_a_id], - 2, - harness.bc_link.channel_ids(), - 1_000_000, - ); + let chan_a_id = harness.chan_a_id(); + let chan_bc_ids = harness.bc_link.channel_ids().clone(); + harness.send_mpp_hop(0, 1, &[chan_a_id], 2, &chan_bc_ids, 1_000_000); }, // 0x74: direct MPP from 0 to 1, multi parts over single channel 0x74 => { - harness.payments.send_mpp_direct( - &harness.nodes, - 0, - 1, - &[chan_a_id, chan_a_id, chan_a_id], - 1_000_000, - ); + let chan_a_id = harness.chan_a_id(); + harness.send_mpp_direct(0, 1, &[chan_a_id, chan_a_id, chan_a_id], 1_000_000); }, 0x80 => harness.nodes[0].bump_fee_estimate(harness.chan_type), @@ -3029,21 +3142,17 @@ pub fn do_test(data: &[u8], out: Out) { 0xb0 | 0xb1 | 0xb2 => { // Restart node A, picking among the in-flight `ChannelMonitor`s to use based on // the value of `v` we're matching. - harness.ab_link.disconnect_for_reload(0, &mut harness.nodes, &mut harness.queues); - harness.nodes[0].reload(v, &harness.out, &router, harness.chan_type); + harness.restart_node(0, v, &router); }, 0xb3..=0xbb => { // Restart node B, picking among the in-flight `ChannelMonitor`s to use based on // the value of `v` we're matching. - harness.ab_link.disconnect_for_reload(1, &mut harness.nodes, &mut harness.queues); - harness.bc_link.disconnect_for_reload(1, &mut harness.nodes, &mut harness.queues); - harness.nodes[1].reload(v, &harness.out, &router, harness.chan_type); + harness.restart_node(1, v, &router); }, 0xbc | 0xbd | 0xbe => { // Restart node C, picking among the in-flight `ChannelMonitor`s to use based on // the value of `v` we're matching. - harness.bc_link.disconnect_for_reload(2, &mut harness.nodes, &mut harness.queues); - harness.nodes[2].reload(v, &harness.out, &router, harness.chan_type); + harness.restart_node(2, v, &router); }, 0xc0 => harness.nodes[0].keys_manager.disable_supported_ops_for_all_signers(), @@ -3219,79 +3328,12 @@ pub fn do_test(data: &[u8], out: Out) { 0xff => { // Test that no channel is in a stuck state where neither party can send funds even // after we resolve all pending events. - - harness.ab_link.reconnect(&mut harness.nodes); - harness.bc_link.reconnect(&mut harness.nodes); - - for op in SUPPORTED_SIGNER_OPS { - harness.nodes[0].keys_manager.enable_op_for_all_signers(op); - harness.nodes[1].keys_manager.enable_op_for_all_signers(op); - harness.nodes[2].keys_manager.enable_op_for_all_signers(op); - } - harness.nodes[0].node.signer_unblocked(None); - harness.nodes[1].node.signer_unblocked(None); - harness.nodes[2].node.signer_unblocked(None); - - process_all_events_impl( - &harness.nodes, - &harness.out, - &harness.ab_link, - &harness.bc_link, - &mut harness.chain_state, - &mut harness.payments, - &mut harness.queues, - ); - - // Since MPP payments are supported, we wait until we fully settle the state of all - // channels to see if we have any committed HTLC parts of an MPP payment that need - // to be failed back. - for node in &harness.nodes { - node.node.timer_tick_occurred(); - } - process_all_events_impl( - &harness.nodes, - &harness.out, - &harness.ab_link, - &harness.bc_link, - &mut harness.chain_state, - &mut harness.payments, - &mut harness.queues, - ); - - harness.payments.assert_all_resolved(); - harness.payments.assert_claims_reported(); - - // Finally, make sure that at least one end of each channel can make a substantial payment - for &chan_id in harness.ab_link.channel_ids() { - assert!( - harness.payments.send_direct(&harness.nodes, 0, 1, chan_id, 10_000_000) - || harness.payments.send_direct( - &harness.nodes, - 1, - 0, - chan_id, - 10_000_000 - ) - ); - } - for &chan_id in harness.bc_link.channel_ids() { - assert!( - harness.payments.send_direct(&harness.nodes, 1, 2, chan_id, 10_000_000) - || harness.payments.send_direct( - &harness.nodes, - 2, - 1, - chan_id, - 10_000_000 - ) - ); - } - - harness.nodes[0].record_last_htlc_clear_fee(); - harness.nodes[1].record_last_htlc_clear_fee(); - harness.nodes[2].record_last_htlc_clear_fee(); + harness.settle_all(); + }, + _ => { + assert_test_invariants(&harness.nodes); + return; }, - _ => test_return!(), } harness.refresh_serialized_managers(); From df76016383a9d1c270f0926c983acc3233f5d36c Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 21 Apr 2026 10:27:34 +0200 Subject: [PATCH 16/29] [dropme] testcases and docs Collect local notes and fuzz inputs used while investigating the force-close chanmon_consistency work. These files are marked dropme so they can aid review and reduction without becoming part of the final patch set. --- fc-crashes.md | 162 +++++++++ fuzz/.gitignore | 1 + fuzz/FC-INFO.md | 107 ++++++ fuzz/ONCHAINTX-BUGS.md | 327 ++++++++++++++++++ fuzz/OPEN-ISSUES.md | 39 +++ fuzz/test_cases/base32/smoke | 1 - fuzz/test_cases/bech32_parse/smoke | 1 - ...h-02830a6ff7757f3570924b0c0fd9118a7cdd9770 | Bin 0 -> 24 bytes ...h-0473b0e767d9a98de62538ce5afcbbc2e6ec5af2 | 1 + ...h-05e175d40f60b823f730fa874d98dc10dd2bb6ad | 1 + ...h-07bdc4e56ee67bd2ffa409f76529199d748ab2d8 | 1 + ...h-096cc3008264dccaefb945f5a4b7a2d3c9f8e90c | 1 + ...h-09a17e06913dea74dba796940cec86cb4e2dd597 | 1 + ...h-09f5a41270b07f70a031884cbdfd081e8600923e | Bin 0 -> 22 bytes ...h-0b87d8b430697fe9d1781a38f41a68ebcf7b18c1 | 1 + ...h-0c3334736f5c55e44088d6140580354827026732 | 1 + ...h-0dcddb7aa2b729fa8de829e5ea82c38b5918acfa | 1 + ...h-0f0ca42c8b4c815495919663652db18483d5e846 | 1 + ...h-14a022e3e4d88420a08bc4c2d67193f74e4f8bdd | 1 + ...h-15b45517356c182051c2b334e09c00f4f9368e94 | 1 + ...h-18062bd37528e06c4921e7ef7df2b2c3e676823b | 1 + ...h-22125d8a200205d52723ec232f5aab710856f4b0 | Bin 0 -> 22 bytes ...h-228ea00412a2fab1e866fc6df32ffd00bbfe81ad | Bin 0 -> 24 bytes ...h-242de208110143401fcf4e1ebaa7d9d38fb93611 | 1 + ...h-24f1373b1cf51f95af854d6d8730336b77728007 | 1 + ...h-2923c14608fb259c21862cd71ffeb6ac74b0ba32 | 1 + ...h-2a0852bec1d75334538dacec26831db6995b6e33 | 1 + ...h-2d93541536e19c030d95d236e6be545352d98b80 | 1 + ...h-2e002fcfdc76c5981f5f93c0f842b548fb56c7a7 | 1 + ...h-2fad50c7fd20b250f0349887445af198124900df | Bin 0 -> 23 bytes ...h-2fcd63b2ed709dfcd9c6a08dc673d1f896b6cdad | Bin 0 -> 26 bytes ...h-304db9c93d320420bdef656699ad1f49c37feaf7 | 1 + ...h-315119ea09b9febec156d212fe57020def4b5af4 | Bin 0 -> 25 bytes ...h-32a013d8bd38f3ba39d4a214ba0780edd41ccb85 | Bin 0 -> 23 bytes ...h-33c08a8f15f1c842df5da4fc92228d00606573f9 | 1 + ...h-33e77c2f720493e306bbfea79f151388ca7a04ea | Bin 0 -> 24 bytes ...h-37a18356d608c97415c0a1bef6a0f13fe04c8b97 | Bin 0 -> 22 bytes ...h-380ee6f8c1030828f4d80582154b0418fca58c90 | Bin 0 -> 20 bytes ...h-38192a6cb0500969f301c7a6742949ecd213bfae | 1 + ...h-387c18b4c7235aa1960400de5b0d5798202ec3b1 | 1 + ...h-3bb94b7b4397397caa5eb0e9ba6abb9a18028270 | Bin 0 -> 23 bytes ...h-3be4d9d7a75c8459b3ec349474c7fc206b00fe9c | Bin 0 -> 26 bytes ...h-3cda5b606ce05f4207207e8fd1480fe530a51b13 | 1 + ...h-3f8a6e5b806235b795ebea3d6998943a3ab6ff9d | Bin 0 -> 22 bytes ...h-41ffe016736ddfef0eb1d877b35a0c85bd5cfd5f | 1 + ...h-45240f379a3a24948c4b091fd658a9f0ef4d4963 | 1 + ...h-45872f91e28e4ed1e8814084bbf5ada6fe4963f0 | 1 + ...h-49e1240588c1b4507b24c4f07dae75faef02a639 | 1 + ...h-4da789d875488d8f244bccefaff4295ae801c745 | Bin 0 -> 21 bytes ...h-4e4b47b5a0f4c4689868a3003ae7d62e5ac78484 | 1 + ...h-53d6404dc8dee21adf112f3c909459f67e176301 | Bin 0 -> 24 bytes ...h-544eff2c026e0464aff1a9afaa4acd2912e93267 | 1 + ...h-54a3422e8e1c578813d5cfce1f8b732040fc668e | 1 + ...h-55fd3e4e7c2506a9ce067b0e0a468161db22dec0 | Bin 0 -> 20 bytes ...h-56271abf5206dd39ac1a1035d49d41f61ee0606e | 1 + ...h-5be7542ec7a98b835a2c3dca63e3d89a76050fe6 | 3 + ...h-5d2ca379ca5dabcbfae13c3eca104e48a4bf94c9 | 1 + ...h-63164e99d1a0561c352ea11be619b8505a83ceb4 | Bin 0 -> 20 bytes ...h-6aec66d5104839013b44f977a01915c29f2e6795 | 1 + ...h-6af2409d5c331f44f76e165e735cd2e9104aed9e | 1 + ...h-6b5c5549ee7ed6e7fcf9613d62c295fd65d100ce | 1 + ...h-6bd8c4ea12175b25bb1d239699622ba5485248cf | 1 + ...h-6bda1f46384cf85ae2d9ca8048619963a9416ddc | 1 + ...h-767cf8ac05cf878f93f55fe21f96a9e76b28c5f9 | 1 + ...h-7776698efb54442fa8170cb39b7c7bf72e515335 | 1 + ...h-78202f87ee8c211227082479a8bd67cd1e7f16e5 | Bin 0 -> 8 bytes ...h-79790f24a47ad8f39398df48800b946cd85fc3fe | 1 + ...h-7ab7fa1fb4303a91c57ec241fefdf5826d2b52aa | Bin 0 -> 26 bytes ...h-7b7826cea32794a2ab2c245cd3dc024355b07c78 | Bin 0 -> 16 bytes ...h-7c72226eeba2eb5192d9b7adfead405d3b93fdf9 | Bin 0 -> 24 bytes ...h-7cb0cf9df154821deb68a78001ce9c0e27f97b0a | 1 + ...h-815718bf6e59d981220f037f7509c9cfe5401485 | 1 + ...h-8453a4a3cf9dd9f60e5aa40fdce440b69f62869d | 1 + ...h-86ee8ae4c13784d3d750f6d4b970ec0852ea2bc3 | 1 + ...h-87093ec5446a84482f5a728fc65a51a15b6de843 | Bin 0 -> 27 bytes ...h-87f98b753291bd37f92795d32e2df4c3597dd6dd | Bin 0 -> 25 bytes ...h-8ab54f3642a60a239a7bb787838f3e5a6b6f4f41 | 1 + ...h-8ec6798103af6cedfdec68373991c0c0a73e3770 | 1 + ...h-8f5cc4f6de42f52dcb571b6c0f21df957eb25462 | Bin 0 -> 27 bytes ...h-8fb6d213ac7d14f6c62c09e7baf392f01e8688d0 | 1 + ...h-90c560825e852e3dfb64e09d6764b85cf9f7689d | 1 + ...h-91d8898837e425d607ef36ed73fa364b0fa58121 | Bin 0 -> 17 bytes ...h-91ebb8583ed7705e2601334e52428ea5eb80a681 | Bin 0 -> 23 bytes ...h-93c44c96a5c5e1d4532370b2c77bb372170bd59b | 1 + ...h-9c69d63a708c0a83d2d1fa60577a1a9270924ff2 | 1 + ...h-9c84f405725b7c171338f776b7ac7f3a3b010f34 | Bin 0 -> 17 bytes ...h-a235e98ab95f66315cef361c49eea5483ce2d91a | 1 + ...h-a6628891c34498ca2cb4122c2ee66fe4ba6cd01d | Bin 0 -> 26 bytes ...h-a8f59ca92bcc53e042fd759493c67a35f308721a | 1 + ...h-ace48b23767637be15eb3763e88170f7aab17cd4 | Bin 0 -> 23 bytes ...h-adf5f907d4bc584e6348b7188532f6fc08cda464 | 1 + ...h-af7499de68300f3346be7b69ff913c8da2394d23 | 1 + ...h-b2e70396bda55d716c022a683df49d72e28b5cae | Bin 0 -> 24 bytes ...h-b5aed7ccaeacb4347cc7599a258e28e1ccf3855b | 1 + ...h-ba5dd0ee55c764b2ae71543e95fd63c496d924bd | 1 + ...h-baa2cd71d1e22c966f3c2ddc44cf5b297da5d671 | 1 + ...h-c1fe932fa21c4382ba71ec745790386f010b939c | 1 + ...h-c29e58a510e698fc8205e4896a938adb92424105 | 1 + ...h-c7b166535d5d3591604aeb239b01592f24fff27b | 1 + ...h-cb39e58d20b35ceb4ecd9fc8dd91272e308f11a1 | 1 + ...h-cc144c9fa2f889e3c3665b1e7c870ddd41cb3e15 | 1 + ...h-cec678efd9c2c03dccf92f62c20e9520566d130f | Bin 0 -> 25 bytes ...h-cedac69cfff63a360470d6f051164b149f74bc18 | Bin 0 -> 25 bytes ...h-cf44c3acf507cae6fd00e0bf331d18536c551ce1 | 1 + ...h-d09e9319d459f21b180f1c730fbf4e89840bd6c5 | Bin 0 -> 20 bytes ...h-d11e5e5259e57e32f120f0d005bc52aead73d099 | 1 + ...h-d3ee0bad80fbd14f1f62903fc6d23f26ed5eb405 | 1 + ...h-d5124444b5e39d9a67c395e6325d340fff97a159 | 1 + ...h-d87da6cc047b35d69808787157394a0ac7c9ff92 | Bin 0 -> 26 bytes ...h-d91352ebdfa46f3734403e7e041bf0faa559e97a | 1 + ...h-d9affe3db851b50c3b1186ff86f97710cfd115b0 | 1 + ...h-db2606af8c9a718bd0da6a6e03c51fd4c84909cd | Bin 0 -> 17 bytes ...h-dbf141642a66403570204baf8a310783885e081d | 1 + ...h-dd67d75d834201769b29d89a5243fdae7f6d8ad1 | 1 + ...h-df426fe2abe15519a7ad994034bd2711f26f80af | 1 + ...h-ea07f1a57bd66e8a0b48347a45f12a4e48fa4b02 | Bin 0 -> 25 bytes ...h-efc04dc2a68b17479ad445cce2b84a91a7d3e9b9 | 1 + ...h-f2e76e1926cc2604f35de1316e48cb7c8e2aee65 | 1 + ...h-f4567ec41df8f30f9c0975e2b9cb3bed9278df8c | Bin 0 -> 26 bytes ...h-f804080d84b3bfc7adfe563ad1ac9013733983f6 | Bin 0 -> 24 bytes ...h-f995b58793f0e17361d409df7ddb99d7c14873cd | 1 + ...h-fd80c35839107ef932a09d1fd63e34d2a6cd6451 | 1 + ...h-fda69e901e92ce81134859dfbd53ceec84393aeb | 1 + .../fc_advance_before_drain | Bin 0 -> 9 bytes .../fc_advance_before_drain_keyed_anchors | 1 + ..._advance_before_drain_zero_fee_commitments | 1 + .../fc_after_claim_before_forward | Bin 0 -> 18 bytes ...c_after_claim_before_forward_keyed_anchors | 1 + ..._claim_before_forward_zero_fee_commitments | 1 + .../chanmon_consistency/fc_after_disconnect | Bin 0 -> 10 bytes .../fc_after_disconnect_keyed_anchors | 1 + .../fc_after_disconnect_zero_fee_commitments | 1 + .../chanmon_consistency/fc_after_fee_update | Bin 0 -> 11 bytes .../fc_after_fee_update_keyed_anchors | 1 + .../fc_after_fee_update_zero_fee_commitments | 1 + .../chanmon_consistency/fc_after_timer_ticks | Bin 0 -> 13 bytes .../fc_after_timer_ticks_keyed_anchors | 1 + .../fc_after_timer_ticks_zero_fee_commitments | 1 + .../chanmon_consistency/fc_all_channels | Bin 0 -> 12 bytes .../fc_all_channels_keyed_anchors | 1 + .../fc_all_channels_zero_fee_commitments | 1 + .../fc_async_complete_after | 2 + .../fc_async_complete_after_keyed_anchors | 2 + ..._async_complete_after_zero_fee_commitments | 2 + .../fc_async_hop_middle_closes | 4 + .../fc_async_hop_middle_closes_keyed_anchors | 4 + ...ync_hop_middle_closes_zero_fee_commitments | 4 + .../chanmon_consistency/fc_async_many_pays | 2 + .../fc_async_many_pays_keyed_anchors | 2 + .../fc_async_many_pays_zero_fee_commitments | 2 + .../chanmon_consistency/fc_async_no_complete | 1 + .../fc_async_no_complete_keyed_anchors | 1 + .../fc_async_no_complete_zero_fee_commitments | 1 + .../fc_async_pending_never_complete | 1 + ...async_pending_never_complete_keyed_anchors | 1 + ...ending_never_complete_zero_fee_commitments | 1 + .../chanmon_consistency/fc_async_restart | 1 + .../fc_async_restart_keyed_anchors | 1 + .../fc_async_restart_zero_fee_commitments | 1 + .../chanmon_consistency/fc_b_closes_both | Bin 0 -> 10 bytes .../fc_b_closes_both_hop_inflight | Bin 0 -> 17 bytes ...c_b_closes_both_hop_inflight_keyed_anchors | 1 + ...ses_both_hop_inflight_zero_fee_commitments | 1 + .../fc_b_closes_both_keyed_anchors | 1 + .../fc_b_closes_both_zero_fee_commitments | 1 + .../fc_bc_during_hop_ab_only | Bin 0 -> 12 bytes .../fc_bc_during_hop_ab_only_keyed_anchors | 1 + ...bc_during_hop_ab_only_zero_fee_commitments | 1 + .../chanmon_consistency/fc_bc_while_ab_htlc | Bin 0 -> 15 bytes .../fc_bc_while_ab_htlc_keyed_anchors | 1 + .../fc_bc_while_ab_htlc_zero_fee_commitments | 1 + .../chanmon_consistency/fc_bidir_htlcs | Bin 0 -> 15 bytes .../fc_bidir_htlcs_keyed_anchors | 1 + .../fc_bidir_htlcs_zero_fee_commitments | 1 + .../fc_both_sides_same_chan | Bin 0 -> 10 bytes .../fc_both_sides_same_chan_keyed_anchors | 1 + ..._both_sides_same_chan_zero_fee_commitments | 1 + .../fc_bump_htlc_p2wpkh_fee_estimate | 1 + .../fc_c_initiates_b_restart | Bin 0 -> 10 bytes .../fc_c_initiates_b_restart_keyed_anchors | 1 + ...c_initiates_b_restart_zero_fee_commitments | 1 + .../chanmon_consistency/fc_cascade_c_then_b | Bin 0 -> 14 bytes .../fc_cascade_c_then_b_keyed_anchors | 1 + .../fc_cascade_c_then_b_zero_fee_commitments | 1 + .../fc_claimable_on_close_needs_confirmation | 1 + .../fc_claimed_dust_htlc_sender_fails | Bin 0 -> 8 bytes .../fc_claimed_mpp_dust_path_still_succeeds | 1 + .../fc_claimed_payment_sender_completion | 1 + .../fc_close_then_disconnect_all | Bin 0 -> 12 bytes ...fc_close_then_disconnect_all_keyed_anchors | 1 + ...e_then_disconnect_all_zero_fee_commitments | 1 + ..._retire_old_snapshots_zero_fee_commitments | Bin 0 -> 8 bytes .../fc_completed_update_retires_old_snapshot | Bin 0 -> 8 bytes ...tious_claim_stuck_after_force_close_218996 | 1 + ...tious_claim_stuck_after_force_close_36a22e | 1 + ...tious_claim_stuck_after_force_close_d7793e | 1 + .../chanmon_consistency/fc_direct_pay_claimed | Bin 0 -> 16 bytes .../fc_direct_pay_claimed_keyed_anchors | 1 + ...fc_direct_pay_claimed_zero_fee_commitments | 1 + .../chanmon_consistency/fc_disabled_signers | Bin 0 -> 13 bytes .../fc_disabled_signers_keyed_anchors | 1 + .../fc_disabled_signers_zero_fee_commitments | 1 + .../fc_disconnect_close_reconnect | Bin 0 -> 13 bytes ...c_disconnect_close_reconnect_keyed_anchors | 1 + ...nnect_close_reconnect_zero_fee_commitments | 1 + .../fc_disconnect_drain_reconnect | Bin 0 -> 13 bytes ...c_disconnect_drain_reconnect_keyed_anchors | 1 + ...nnect_drain_reconnect_zero_fee_commitments | 1 + .../chanmon_consistency/fc_drain_a_only | Bin 0 -> 7 bytes .../fc_drain_a_only_keyed_anchors | 1 + .../fc_drain_a_only_zero_fee_commitments | 1 + ...cate_pending_claim_event_after_force_close | 1 + ...ent_after_force_close_zero_fee_commitments | 1 + ...ing_claim_request_after_force_close_39b47f | 1 + ...ing_claim_request_after_force_close_ed278d | 1 + .../chanmon_consistency/fc_during_reconnect | Bin 0 -> 13 bytes .../fc_during_reconnect_keyed_anchors | 1 + .../fc_during_reconnect_zero_fee_commitments | 1 + .../chanmon_consistency/fc_dust_htlcs | Bin 0 -> 19 bytes .../fc_dust_htlcs_keyed_anchors | 1 + .../fc_dust_htlcs_zero_fee_commitments | 1 + ...t_path_claim_expected_fail_but_sent_5099d3 | 1 + ...t_path_claim_expected_fail_but_sent_595140 | 1 + ...t_path_claim_expected_fail_but_sent_7a4062 | 1 + ...t_path_claim_expected_fail_but_sent_9d7311 | 1 + ...t_path_claim_expected_fail_but_sent_b1281e | 1 + ...t_path_claim_expected_fail_but_sent_bf210c | 1 + .../fc_events_between_drains | Bin 0 -> 15 bytes .../fc_events_between_drains_keyed_anchors | 1 + ...events_between_drains_zero_fee_commitments | 1 + .../chanmon_consistency/fc_events_only | Bin 0 -> 12 bytes .../fc_events_only_keyed_anchors | 1 + .../fc_events_only_zero_fee_commitments | 1 + .../chanmon_consistency/fc_exact_cltv_height | Bin 0 -> 15 bytes .../fc_exact_cltv_height_keyed_anchors | 1 + .../fc_exact_cltv_height_zero_fee_commitments | 1 + .../chanmon_consistency/fc_hop_b_has_preimage | Bin 0 -> 20 bytes .../fc_hop_b_has_preimage_keyed_anchors | 1 + ...fc_hop_b_has_preimage_zero_fee_commitments | 1 + .../fc_hop_before_bc_commit | Bin 0 -> 13 bytes .../fc_hop_before_bc_commit_keyed_anchors | 1 + ..._hop_before_bc_commit_zero_fee_commitments | 1 + .../chanmon_consistency/fc_hop_mid_flight | Bin 0 -> 16 bytes .../fc_hop_mid_flight_keyed_anchors | 1 + .../fc_hop_mid_flight_zero_fee_commitments | 1 + .../chanmon_consistency/fc_htlc_late_signer | Bin 0 -> 17 bytes .../fc_htlc_late_signer_keyed_anchors | 1 + .../fc_htlc_late_signer_zero_fee_commitments | 1 + .../chanmon_consistency/fc_immediate_settle | Bin 0 -> 3 bytes .../fc_immediate_settle_keyed_anchors | 1 + .../fc_immediate_settle_zero_fee_commitments | 1 + .../fc_inprogress_monitors | Bin 0 -> 11 bytes .../fc_inprogress_monitors_keyed_anchors | Bin 0 -> 11 bytes ...c_inprogress_monitors_zero_fee_commitments | Bin 0 -> 11 bytes .../fc_interleaved_channels | Bin 0 -> 17 bytes .../fc_interleaved_channels_keyed_anchors | 1 + ..._interleaved_channels_zero_fee_commitments | 1 + .../fc_large_payment_resolve | Bin 0 -> 16 bytes .../fc_large_payment_resolve_keyed_anchors | 1 + ...large_payment_resolve_zero_fee_commitments | 1 + .../chanmon_consistency/fc_many_htlcs | Bin 0 -> 15 bytes .../fc_many_htlcs_keyed_anchors | 1 + .../fc_many_htlcs_zero_fee_commitments | 1 + .../fc_mid_fulfill_propagation | Bin 0 -> 21 bytes .../fc_mid_fulfill_propagation_keyed_anchors | 1 + ...d_fulfill_propagation_zero_fee_commitments | 1 + ..._monitor_update_replay_out_of_order_dcbc86 | 1 + .../chanmon_consistency/fc_msgs_before_drain | Bin 0 -> 13 bytes .../fc_msgs_before_drain_keyed_anchors | 1 + .../fc_msgs_before_drain_zero_fee_commitments | 1 + .../chanmon_consistency/fc_multi_drain_rounds | Bin 0 -> 16 bytes .../fc_multi_drain_rounds_keyed_anchors | 1 + ...fc_multi_drain_rounds_zero_fee_commitments | 1 + .../chanmon_consistency/fc_no_settle | Bin 0 -> 3 bytes .../fc_no_settle_keyed_anchors | 1 + .../fc_no_settle_zero_fee_commitments | 1 + .../chanmon_consistency/fc_node_restart | Bin 0 -> 10 bytes .../fc_node_restart_keyed_anchors | 1 + .../fc_node_restart_zero_fee_commitments | 1 + .../chanmon_consistency/fc_one_msg_at_a_time | Bin 0 -> 13 bytes .../fc_one_msg_at_a_time_keyed_anchors | 1 + .../fc_one_msg_at_a_time_zero_fee_commitments | 1 + .../fc_pay_claim_close_pay | Bin 0 -> 17 bytes .../fc_pay_claim_close_pay_keyed_anchors | 1 + ...c_pay_claim_close_pay_zero_fee_commitments | 1 + ..._observable_lifecycle_zero_fee_commitments | 1 + .../chanmon_consistency/fc_pending_monitor | Bin 0 -> 15 bytes .../fc_pending_monitor_keyed_anchors | Bin 0 -> 15 bytes .../fc_pending_monitor_zero_fee_commitments | Bin 0 -> 15 bytes ...le_probe_skips_zero_outbound_limit_channel | 1 + ...ettle_probe_uses_advertised_sendable_range | 2 + .../chanmon_consistency/fc_rapid_fire | Bin 0 -> 12 bytes .../fc_rapid_fire_keyed_anchors | 1 + .../fc_rapid_fire_zero_fee_commitments | 1 + .../chanmon_consistency/fc_reconnect | Bin 0 -> 13 bytes .../fc_reconnect_broadcast_announcements | 1 + .../fc_reconnect_keyed_anchors | 1 + .../fc_reconnect_zero_fee_commitments | 1 + .../fc_repeated_same_channel | Bin 0 -> 11 bytes .../fc_repeated_same_channel_keyed_anchors | 1 + ...repeated_same_channel_zero_fee_commitments | 1 + ...start_claimed_payment_stale_monitor_replay | 1 + .../fc_restart_in_progress_chain_sync_replay | 1 + .../fc_restart_mid_resolve | Bin 0 -> 10 bytes .../fc_restart_mid_resolve_keyed_anchors | 1 + ...c_restart_mid_resolve_zero_fee_commitments | 1 + .../fc_restart_then_counterparty_closes | Bin 0 -> 10 bytes ...art_then_counterparty_closes_keyed_anchors | 1 + ...n_counterparty_closes_zero_fee_commitments | 1 + .../chanmon_consistency/fc_reverse_hop | Bin 0 -> 16 bytes .../fc_reverse_hop_keyed_anchors | 1 + .../fc_reverse_hop_zero_fee_commitments | 1 + .../fc_signer_disabled_holder | Bin 0 -> 14 bytes .../fc_signer_disabled_holder_keyed_anchors | 1 + ...igner_disabled_holder_zero_fee_commitments | 1 + .../fc_stale_monitor_restart | Bin 0 -> 13 bytes .../fc_stale_monitor_restart_keyed_anchors | 1 + ...stale_monitor_restart_zero_fee_commitments | 1 + .../chanmon_consistency/fc_sync_one_block | Bin 0 -> 14 bytes .../fc_sync_one_block_keyed_anchors | 1 + .../fc_sync_one_block_zero_fee_commitments | 1 + .../chanmon_consistency/fc_sync_to_tip | Bin 0 -> 7 bytes .../fc_sync_to_tip_keyed_anchors | 1 + .../fc_sync_to_tip_zero_fee_commitments | 1 + .../chanmon_consistency/fc_then_send | Bin 0 -> 10 bytes .../fc_then_send_keyed_anchors | 1 + .../fc_then_send_zero_fee_commitments | 1 + .../chanmon_consistency/fc_timer_tick_after | Bin 0 -> 15 bytes .../fc_timer_tick_after_keyed_anchors | 1 + .../fc_timer_tick_after_zero_fee_commitments | 1 + .../fc_unclaimed_mpp_timeout_variant_a | 1 + .../fc_unclaimed_mpp_timeout_variant_b | 1 + .../chanmon_consistency/force_close_basic | Bin 0 -> 9 bytes .../force_close_basic_async | 1 + .../force_close_basic_async_keyed_anchors | 1 + ...rce_close_basic_async_zero_fee_commitments | 1 + .../force_close_basic_keyed_anchors | 1 + .../force_close_basic_zero_fee_commitments | 1 + .../force_close_both_directions | Bin 0 -> 10 bytes .../force_close_both_directions_async | 1 + ..._close_both_directions_async_keyed_anchors | 1 + ...both_directions_async_zero_fee_commitments | 1 + .../force_close_both_directions_keyed_anchors | 1 + ...close_both_directions_zero_fee_commitments | 1 + .../force_close_htlc_needs_height | Bin 0 -> 11 bytes .../force_close_htlc_needs_height_async | 1 + ...lose_htlc_needs_height_async_keyed_anchors | 1 + ...lc_needs_height_async_zero_fee_commitments | 1 + ...orce_close_htlc_needs_height_keyed_anchors | 1 + ...ose_htlc_needs_height_zero_fee_commitments | 1 + .../force_close_htlc_resolved | Bin 0 -> 11 bytes .../force_close_htlc_resolved_async | 1 + ...ce_close_htlc_resolved_async_keyed_anchors | 1 + ...e_htlc_resolved_async_zero_fee_commitments | 1 + .../force_close_htlc_resolved_keyed_anchors | 1 + ...e_close_htlc_resolved_zero_fee_commitments | 1 + .../force_close_middle_node | Bin 0 -> 9 bytes .../force_close_middle_node_async | 1 + ...orce_close_middle_node_async_keyed_anchors | 1 + ...ose_middle_node_async_zero_fee_commitments | 1 + .../force_close_middle_node_keyed_anchors | 1 + ...rce_close_middle_node_zero_fee_commitments | 1 + .../force_close_no_confirm | Bin 0 -> 2 bytes .../force_close_no_confirm_async | 1 + ...force_close_no_confirm_async_keyed_anchors | 1 + ...lose_no_confirm_async_zero_fee_commitments | 1 + .../force_close_no_confirm_keyed_anchors | 1 + ...orce_close_no_confirm_zero_fee_commitments | 1 + .../force_close_three_node_preimage | Bin 0 -> 14 bytes .../force_close_three_node_preimage_async | 4 + ...se_three_node_preimage_async_keyed_anchors | 4 + ...e_node_preimage_async_zero_fee_commitments | 4 + ...ce_close_three_node_preimage_keyed_anchors | 1 + ...e_three_node_preimage_zero_fee_commitments | 1 + .../ldk_crash_channelmanager_19484 | 1 + .../ldk_crash_channelmanager_9836 | Bin 0 -> 13 bytes .../ldk_crash_channelmonitor_2727 | 1 + .../ldk_crash_onchaintx_1025 | Bin 0 -> 14 bytes .../ldk_crash_onchaintx_913 | 1 + .../chanmon_consistency/ldk_crash_signer_395 | 1 + ...t-0103befb3dc5aa050668752668d04e85bd1fc14e | Bin 0 -> 24 bytes ...t-05fc1bb98f2a3b29e826a4de636474de0b23c895 | Bin 0 -> 25 bytes ...t-2bd72986b31d87f9260cf627e63971b5b8310a60 | Bin 0 -> 22 bytes ...t-3d9c399d0e2d915375da243fb57023df803a5dc6 | 1 + ...t-475eb92f6d72aed80ce7cdaed4181b99b11b2fcd | Bin 0 -> 26 bytes ...t-4c5cc7debdfdf2569e21b13b21c4270a9b558267 | Bin 0 -> 20 bytes ...t-505b331015cbe51169de31e09acc6d8330c8e385 | 2 + ...t-593eb3357e98be529b0ef35f21577ef6eede171b | Bin 0 -> 18 bytes ...t-6ee72dbba68dbca58038d2f9b8525e4d0df25f94 | 1 + ...t-76a82aa89161d0428192e725650324a74a710dca | 1 + ...t-7a7ea04ead9439ad7db2eefb23f6e242d547e459 | Bin 0 -> 25 bytes ...t-8250da1564cda1a1dde38a431859afab8ac2934d | Bin 0 -> 23 bytes ...t-838b7d436a92ae2a68aa9ad9badd88cbf96b407b | Bin 0 -> 24 bytes ...t-87b55c5b37383fe43420089fd3e8ccecbb034b44 | 1 + ...t-885f446335ae279baed408d42af8c398dfdb8c9b | 1 + ...t-8a81e4c066465a2975ef22625c0b91da6332a2c8 | Bin 0 -> 24 bytes ...t-8f2aebf3aeeb70d8edd39a886e30beb770f3b42b | Bin 0 -> 23 bytes ...t-91d97d9eea2bd59f746681ad822488262e832ff1 | Bin 0 -> 26 bytes ...t-95a90908391d3398084b77eb11ff5c9d7fdde008 | Bin 0 -> 24 bytes ...t-a31cdfc423211489c841a6ddd067f9e6cf5bed4b | Bin 0 -> 21 bytes ...t-b1c4840ea1279dd8d6080d79373ae55bbcad3061 | Bin 0 -> 25 bytes ...t-b6ef84eec94d70bbc385c98c4ab0bac77da00a2f | 1 + ...t-bae8693182b102dfebab143a0f48992dad76245d | Bin 0 -> 23 bytes ...t-bcab049322729e275e3bbdacebc633495da7643f | Bin 0 -> 26 bytes ...t-d5afdff02a253c9f2fbce95cbaf730eb210128fa | 1 + ...t-d6494f068fb2b2d31f1ac8627752692b3c8b7d2f | 1 + ...t-edd3f8168217501dd93f3c24d09c2c095cdf7784 | Bin 0 -> 14 bytes ...t-fcbcc131184e33d5b000820b0972f6197b0801d2 | 1 + 408 files changed, 938 insertions(+), 2 deletions(-) create mode 100644 fc-crashes.md create mode 100644 fuzz/FC-INFO.md create mode 100644 fuzz/ONCHAINTX-BUGS.md create mode 100644 fuzz/OPEN-ISSUES.md delete mode 100644 fuzz/test_cases/base32/smoke delete mode 100644 fuzz/test_cases/bech32_parse/smoke create mode 100644 fuzz/test_cases/chanmon_consistency/crash-02830a6ff7757f3570924b0c0fd9118a7cdd9770 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-0473b0e767d9a98de62538ce5afcbbc2e6ec5af2 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-05e175d40f60b823f730fa874d98dc10dd2bb6ad create mode 100644 fuzz/test_cases/chanmon_consistency/crash-07bdc4e56ee67bd2ffa409f76529199d748ab2d8 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-096cc3008264dccaefb945f5a4b7a2d3c9f8e90c create mode 100644 fuzz/test_cases/chanmon_consistency/crash-09a17e06913dea74dba796940cec86cb4e2dd597 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-09f5a41270b07f70a031884cbdfd081e8600923e create mode 100644 fuzz/test_cases/chanmon_consistency/crash-0b87d8b430697fe9d1781a38f41a68ebcf7b18c1 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-0c3334736f5c55e44088d6140580354827026732 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-0dcddb7aa2b729fa8de829e5ea82c38b5918acfa create mode 100644 fuzz/test_cases/chanmon_consistency/crash-0f0ca42c8b4c815495919663652db18483d5e846 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-14a022e3e4d88420a08bc4c2d67193f74e4f8bdd create mode 100644 fuzz/test_cases/chanmon_consistency/crash-15b45517356c182051c2b334e09c00f4f9368e94 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-18062bd37528e06c4921e7ef7df2b2c3e676823b create mode 100644 fuzz/test_cases/chanmon_consistency/crash-22125d8a200205d52723ec232f5aab710856f4b0 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-228ea00412a2fab1e866fc6df32ffd00bbfe81ad create mode 100644 fuzz/test_cases/chanmon_consistency/crash-242de208110143401fcf4e1ebaa7d9d38fb93611 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-24f1373b1cf51f95af854d6d8730336b77728007 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-2923c14608fb259c21862cd71ffeb6ac74b0ba32 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-2a0852bec1d75334538dacec26831db6995b6e33 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-2d93541536e19c030d95d236e6be545352d98b80 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-2e002fcfdc76c5981f5f93c0f842b548fb56c7a7 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-2fad50c7fd20b250f0349887445af198124900df create mode 100644 fuzz/test_cases/chanmon_consistency/crash-2fcd63b2ed709dfcd9c6a08dc673d1f896b6cdad create mode 100644 fuzz/test_cases/chanmon_consistency/crash-304db9c93d320420bdef656699ad1f49c37feaf7 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-315119ea09b9febec156d212fe57020def4b5af4 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-32a013d8bd38f3ba39d4a214ba0780edd41ccb85 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-33c08a8f15f1c842df5da4fc92228d00606573f9 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-33e77c2f720493e306bbfea79f151388ca7a04ea create mode 100644 fuzz/test_cases/chanmon_consistency/crash-37a18356d608c97415c0a1bef6a0f13fe04c8b97 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-380ee6f8c1030828f4d80582154b0418fca58c90 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-38192a6cb0500969f301c7a6742949ecd213bfae create mode 100644 fuzz/test_cases/chanmon_consistency/crash-387c18b4c7235aa1960400de5b0d5798202ec3b1 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-3bb94b7b4397397caa5eb0e9ba6abb9a18028270 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-3be4d9d7a75c8459b3ec349474c7fc206b00fe9c create mode 100644 fuzz/test_cases/chanmon_consistency/crash-3cda5b606ce05f4207207e8fd1480fe530a51b13 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-3f8a6e5b806235b795ebea3d6998943a3ab6ff9d create mode 100644 fuzz/test_cases/chanmon_consistency/crash-41ffe016736ddfef0eb1d877b35a0c85bd5cfd5f create mode 100644 fuzz/test_cases/chanmon_consistency/crash-45240f379a3a24948c4b091fd658a9f0ef4d4963 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-45872f91e28e4ed1e8814084bbf5ada6fe4963f0 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-49e1240588c1b4507b24c4f07dae75faef02a639 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-4da789d875488d8f244bccefaff4295ae801c745 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-4e4b47b5a0f4c4689868a3003ae7d62e5ac78484 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-53d6404dc8dee21adf112f3c909459f67e176301 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-544eff2c026e0464aff1a9afaa4acd2912e93267 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-54a3422e8e1c578813d5cfce1f8b732040fc668e create mode 100644 fuzz/test_cases/chanmon_consistency/crash-55fd3e4e7c2506a9ce067b0e0a468161db22dec0 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-56271abf5206dd39ac1a1035d49d41f61ee0606e create mode 100644 fuzz/test_cases/chanmon_consistency/crash-5be7542ec7a98b835a2c3dca63e3d89a76050fe6 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-5d2ca379ca5dabcbfae13c3eca104e48a4bf94c9 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-63164e99d1a0561c352ea11be619b8505a83ceb4 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-6aec66d5104839013b44f977a01915c29f2e6795 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-6af2409d5c331f44f76e165e735cd2e9104aed9e create mode 100644 fuzz/test_cases/chanmon_consistency/crash-6b5c5549ee7ed6e7fcf9613d62c295fd65d100ce create mode 100644 fuzz/test_cases/chanmon_consistency/crash-6bd8c4ea12175b25bb1d239699622ba5485248cf create mode 100644 fuzz/test_cases/chanmon_consistency/crash-6bda1f46384cf85ae2d9ca8048619963a9416ddc create mode 100644 fuzz/test_cases/chanmon_consistency/crash-767cf8ac05cf878f93f55fe21f96a9e76b28c5f9 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-7776698efb54442fa8170cb39b7c7bf72e515335 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-78202f87ee8c211227082479a8bd67cd1e7f16e5 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-79790f24a47ad8f39398df48800b946cd85fc3fe create mode 100644 fuzz/test_cases/chanmon_consistency/crash-7ab7fa1fb4303a91c57ec241fefdf5826d2b52aa create mode 100644 fuzz/test_cases/chanmon_consistency/crash-7b7826cea32794a2ab2c245cd3dc024355b07c78 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-7c72226eeba2eb5192d9b7adfead405d3b93fdf9 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-7cb0cf9df154821deb68a78001ce9c0e27f97b0a create mode 100644 fuzz/test_cases/chanmon_consistency/crash-815718bf6e59d981220f037f7509c9cfe5401485 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-8453a4a3cf9dd9f60e5aa40fdce440b69f62869d create mode 100644 fuzz/test_cases/chanmon_consistency/crash-86ee8ae4c13784d3d750f6d4b970ec0852ea2bc3 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-87093ec5446a84482f5a728fc65a51a15b6de843 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-87f98b753291bd37f92795d32e2df4c3597dd6dd create mode 100644 fuzz/test_cases/chanmon_consistency/crash-8ab54f3642a60a239a7bb787838f3e5a6b6f4f41 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-8ec6798103af6cedfdec68373991c0c0a73e3770 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-8f5cc4f6de42f52dcb571b6c0f21df957eb25462 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-8fb6d213ac7d14f6c62c09e7baf392f01e8688d0 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-90c560825e852e3dfb64e09d6764b85cf9f7689d create mode 100644 fuzz/test_cases/chanmon_consistency/crash-91d8898837e425d607ef36ed73fa364b0fa58121 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-91ebb8583ed7705e2601334e52428ea5eb80a681 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-93c44c96a5c5e1d4532370b2c77bb372170bd59b create mode 100644 fuzz/test_cases/chanmon_consistency/crash-9c69d63a708c0a83d2d1fa60577a1a9270924ff2 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-9c84f405725b7c171338f776b7ac7f3a3b010f34 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-a235e98ab95f66315cef361c49eea5483ce2d91a create mode 100644 fuzz/test_cases/chanmon_consistency/crash-a6628891c34498ca2cb4122c2ee66fe4ba6cd01d create mode 100644 fuzz/test_cases/chanmon_consistency/crash-a8f59ca92bcc53e042fd759493c67a35f308721a create mode 100644 fuzz/test_cases/chanmon_consistency/crash-ace48b23767637be15eb3763e88170f7aab17cd4 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-adf5f907d4bc584e6348b7188532f6fc08cda464 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-af7499de68300f3346be7b69ff913c8da2394d23 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-b2e70396bda55d716c022a683df49d72e28b5cae create mode 100644 fuzz/test_cases/chanmon_consistency/crash-b5aed7ccaeacb4347cc7599a258e28e1ccf3855b create mode 100644 fuzz/test_cases/chanmon_consistency/crash-ba5dd0ee55c764b2ae71543e95fd63c496d924bd create mode 100644 fuzz/test_cases/chanmon_consistency/crash-baa2cd71d1e22c966f3c2ddc44cf5b297da5d671 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-c1fe932fa21c4382ba71ec745790386f010b939c create mode 100644 fuzz/test_cases/chanmon_consistency/crash-c29e58a510e698fc8205e4896a938adb92424105 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-c7b166535d5d3591604aeb239b01592f24fff27b create mode 100644 fuzz/test_cases/chanmon_consistency/crash-cb39e58d20b35ceb4ecd9fc8dd91272e308f11a1 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-cc144c9fa2f889e3c3665b1e7c870ddd41cb3e15 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-cec678efd9c2c03dccf92f62c20e9520566d130f create mode 100644 fuzz/test_cases/chanmon_consistency/crash-cedac69cfff63a360470d6f051164b149f74bc18 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-cf44c3acf507cae6fd00e0bf331d18536c551ce1 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-d09e9319d459f21b180f1c730fbf4e89840bd6c5 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-d11e5e5259e57e32f120f0d005bc52aead73d099 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-d3ee0bad80fbd14f1f62903fc6d23f26ed5eb405 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-d5124444b5e39d9a67c395e6325d340fff97a159 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-d87da6cc047b35d69808787157394a0ac7c9ff92 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-d91352ebdfa46f3734403e7e041bf0faa559e97a create mode 100644 fuzz/test_cases/chanmon_consistency/crash-d9affe3db851b50c3b1186ff86f97710cfd115b0 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-db2606af8c9a718bd0da6a6e03c51fd4c84909cd create mode 100644 fuzz/test_cases/chanmon_consistency/crash-dbf141642a66403570204baf8a310783885e081d create mode 100644 fuzz/test_cases/chanmon_consistency/crash-dd67d75d834201769b29d89a5243fdae7f6d8ad1 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-df426fe2abe15519a7ad994034bd2711f26f80af create mode 100644 fuzz/test_cases/chanmon_consistency/crash-ea07f1a57bd66e8a0b48347a45f12a4e48fa4b02 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-efc04dc2a68b17479ad445cce2b84a91a7d3e9b9 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-f2e76e1926cc2604f35de1316e48cb7c8e2aee65 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-f4567ec41df8f30f9c0975e2b9cb3bed9278df8c create mode 100644 fuzz/test_cases/chanmon_consistency/crash-f804080d84b3bfc7adfe563ad1ac9013733983f6 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-f995b58793f0e17361d409df7ddb99d7c14873cd create mode 100644 fuzz/test_cases/chanmon_consistency/crash-fd80c35839107ef932a09d1fd63e34d2a6cd6451 create mode 100644 fuzz/test_cases/chanmon_consistency/crash-fda69e901e92ce81134859dfbd53ceec84393aeb create mode 100644 fuzz/test_cases/chanmon_consistency/fc_advance_before_drain create mode 100644 fuzz/test_cases/chanmon_consistency/fc_advance_before_drain_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_advance_before_drain_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_after_claim_before_forward create mode 100644 fuzz/test_cases/chanmon_consistency/fc_after_claim_before_forward_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_after_claim_before_forward_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_after_disconnect create mode 100644 fuzz/test_cases/chanmon_consistency/fc_after_disconnect_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_after_disconnect_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_after_fee_update create mode 100644 fuzz/test_cases/chanmon_consistency/fc_after_fee_update_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_after_fee_update_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_after_timer_ticks create mode 100644 fuzz/test_cases/chanmon_consistency/fc_after_timer_ticks_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_after_timer_ticks_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_all_channels create mode 100644 fuzz/test_cases/chanmon_consistency/fc_all_channels_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_all_channels_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_async_complete_after create mode 100644 fuzz/test_cases/chanmon_consistency/fc_async_complete_after_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_async_complete_after_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_async_hop_middle_closes create mode 100644 fuzz/test_cases/chanmon_consistency/fc_async_hop_middle_closes_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_async_hop_middle_closes_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_async_many_pays create mode 100644 fuzz/test_cases/chanmon_consistency/fc_async_many_pays_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_async_many_pays_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_async_no_complete create mode 100644 fuzz/test_cases/chanmon_consistency/fc_async_no_complete_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_async_no_complete_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_async_pending_never_complete create mode 100644 fuzz/test_cases/chanmon_consistency/fc_async_pending_never_complete_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_async_pending_never_complete_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_async_restart create mode 100644 fuzz/test_cases/chanmon_consistency/fc_async_restart_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_async_restart_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_b_closes_both create mode 100644 fuzz/test_cases/chanmon_consistency/fc_b_closes_both_hop_inflight create mode 100644 fuzz/test_cases/chanmon_consistency/fc_b_closes_both_hop_inflight_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_b_closes_both_hop_inflight_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_b_closes_both_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_b_closes_both_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_bc_during_hop_ab_only create mode 100644 fuzz/test_cases/chanmon_consistency/fc_bc_during_hop_ab_only_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_bc_during_hop_ab_only_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_bc_while_ab_htlc create mode 100644 fuzz/test_cases/chanmon_consistency/fc_bc_while_ab_htlc_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_bc_while_ab_htlc_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_bidir_htlcs create mode 100644 fuzz/test_cases/chanmon_consistency/fc_bidir_htlcs_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_bidir_htlcs_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_both_sides_same_chan create mode 100644 fuzz/test_cases/chanmon_consistency/fc_both_sides_same_chan_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_both_sides_same_chan_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_bump_htlc_p2wpkh_fee_estimate create mode 100644 fuzz/test_cases/chanmon_consistency/fc_c_initiates_b_restart create mode 100644 fuzz/test_cases/chanmon_consistency/fc_c_initiates_b_restart_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_c_initiates_b_restart_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_cascade_c_then_b create mode 100644 fuzz/test_cases/chanmon_consistency/fc_cascade_c_then_b_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_cascade_c_then_b_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_claimable_on_close_needs_confirmation create mode 100644 fuzz/test_cases/chanmon_consistency/fc_claimed_dust_htlc_sender_fails create mode 100644 fuzz/test_cases/chanmon_consistency/fc_claimed_mpp_dust_path_still_succeeds create mode 100644 fuzz/test_cases/chanmon_consistency/fc_claimed_payment_sender_completion create mode 100644 fuzz/test_cases/chanmon_consistency/fc_close_then_disconnect_all create mode 100644 fuzz/test_cases/chanmon_consistency/fc_close_then_disconnect_all_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_close_then_disconnect_all_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_completed_parallel_updates_retire_old_snapshots_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_completed_update_retires_old_snapshot create mode 100644 fuzz/test_cases/chanmon_consistency/fc_contentious_claim_stuck_after_force_close_218996 create mode 100644 fuzz/test_cases/chanmon_consistency/fc_contentious_claim_stuck_after_force_close_36a22e create mode 100644 fuzz/test_cases/chanmon_consistency/fc_contentious_claim_stuck_after_force_close_d7793e create mode 100644 fuzz/test_cases/chanmon_consistency/fc_direct_pay_claimed create mode 100644 fuzz/test_cases/chanmon_consistency/fc_direct_pay_claimed_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_direct_pay_claimed_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_disabled_signers create mode 100644 fuzz/test_cases/chanmon_consistency/fc_disabled_signers_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_disabled_signers_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_disconnect_close_reconnect create mode 100644 fuzz/test_cases/chanmon_consistency/fc_disconnect_close_reconnect_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_disconnect_close_reconnect_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_disconnect_drain_reconnect create mode 100644 fuzz/test_cases/chanmon_consistency/fc_disconnect_drain_reconnect_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_disconnect_drain_reconnect_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_drain_a_only create mode 100644 fuzz/test_cases/chanmon_consistency/fc_drain_a_only_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_drain_a_only_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_duplicate_pending_claim_event_after_force_close create mode 100644 fuzz/test_cases/chanmon_consistency/fc_duplicate_pending_claim_event_after_force_close_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_duplicate_pending_claim_request_after_force_close_39b47f create mode 100644 fuzz/test_cases/chanmon_consistency/fc_duplicate_pending_claim_request_after_force_close_ed278d create mode 100644 fuzz/test_cases/chanmon_consistency/fc_during_reconnect create mode 100644 fuzz/test_cases/chanmon_consistency/fc_during_reconnect_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_during_reconnect_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_dust_htlcs create mode 100644 fuzz/test_cases/chanmon_consistency/fc_dust_htlcs_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_dust_htlcs_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_dust_path_claim_expected_fail_but_sent_5099d3 create mode 100644 fuzz/test_cases/chanmon_consistency/fc_dust_path_claim_expected_fail_but_sent_595140 create mode 100644 fuzz/test_cases/chanmon_consistency/fc_dust_path_claim_expected_fail_but_sent_7a4062 create mode 100644 fuzz/test_cases/chanmon_consistency/fc_dust_path_claim_expected_fail_but_sent_9d7311 create mode 100644 fuzz/test_cases/chanmon_consistency/fc_dust_path_claim_expected_fail_but_sent_b1281e create mode 100644 fuzz/test_cases/chanmon_consistency/fc_dust_path_claim_expected_fail_but_sent_bf210c create mode 100644 fuzz/test_cases/chanmon_consistency/fc_events_between_drains create mode 100644 fuzz/test_cases/chanmon_consistency/fc_events_between_drains_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_events_between_drains_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_events_only create mode 100644 fuzz/test_cases/chanmon_consistency/fc_events_only_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_events_only_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_exact_cltv_height create mode 100644 fuzz/test_cases/chanmon_consistency/fc_exact_cltv_height_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_exact_cltv_height_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_hop_b_has_preimage create mode 100644 fuzz/test_cases/chanmon_consistency/fc_hop_b_has_preimage_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_hop_b_has_preimage_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_hop_before_bc_commit create mode 100644 fuzz/test_cases/chanmon_consistency/fc_hop_before_bc_commit_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_hop_before_bc_commit_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_hop_mid_flight create mode 100644 fuzz/test_cases/chanmon_consistency/fc_hop_mid_flight_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_hop_mid_flight_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_htlc_late_signer create mode 100644 fuzz/test_cases/chanmon_consistency/fc_htlc_late_signer_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_htlc_late_signer_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_immediate_settle create mode 100644 fuzz/test_cases/chanmon_consistency/fc_immediate_settle_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_immediate_settle_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_inprogress_monitors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_inprogress_monitors_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_inprogress_monitors_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_interleaved_channels create mode 100644 fuzz/test_cases/chanmon_consistency/fc_interleaved_channels_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_interleaved_channels_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_large_payment_resolve create mode 100644 fuzz/test_cases/chanmon_consistency/fc_large_payment_resolve_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_large_payment_resolve_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_many_htlcs create mode 100644 fuzz/test_cases/chanmon_consistency/fc_many_htlcs_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_many_htlcs_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_mid_fulfill_propagation create mode 100644 fuzz/test_cases/chanmon_consistency/fc_mid_fulfill_propagation_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_mid_fulfill_propagation_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_monitor_update_replay_out_of_order_dcbc86 create mode 100644 fuzz/test_cases/chanmon_consistency/fc_msgs_before_drain create mode 100644 fuzz/test_cases/chanmon_consistency/fc_msgs_before_drain_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_msgs_before_drain_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_multi_drain_rounds create mode 100644 fuzz/test_cases/chanmon_consistency/fc_multi_drain_rounds_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_multi_drain_rounds_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_no_settle create mode 100644 fuzz/test_cases/chanmon_consistency/fc_no_settle_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_no_settle_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_node_restart create mode 100644 fuzz/test_cases/chanmon_consistency/fc_node_restart_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_node_restart_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_one_msg_at_a_time create mode 100644 fuzz/test_cases/chanmon_consistency/fc_one_msg_at_a_time_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_one_msg_at_a_time_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_pay_claim_close_pay create mode 100644 fuzz/test_cases/chanmon_consistency/fc_pay_claim_close_pay_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_pay_claim_close_pay_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_pending_cache_without_observable_lifecycle_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_pending_monitor create mode 100644 fuzz/test_cases/chanmon_consistency/fc_pending_monitor_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_pending_monitor_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_post_settle_probe_skips_zero_outbound_limit_channel create mode 100644 fuzz/test_cases/chanmon_consistency/fc_post_settle_probe_uses_advertised_sendable_range create mode 100644 fuzz/test_cases/chanmon_consistency/fc_rapid_fire create mode 100644 fuzz/test_cases/chanmon_consistency/fc_rapid_fire_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_rapid_fire_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_reconnect create mode 100644 fuzz/test_cases/chanmon_consistency/fc_reconnect_broadcast_announcements create mode 100644 fuzz/test_cases/chanmon_consistency/fc_reconnect_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_reconnect_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_repeated_same_channel create mode 100644 fuzz/test_cases/chanmon_consistency/fc_repeated_same_channel_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_repeated_same_channel_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_restart_claimed_payment_stale_monitor_replay create mode 100644 fuzz/test_cases/chanmon_consistency/fc_restart_in_progress_chain_sync_replay create mode 100644 fuzz/test_cases/chanmon_consistency/fc_restart_mid_resolve create mode 100644 fuzz/test_cases/chanmon_consistency/fc_restart_mid_resolve_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_restart_mid_resolve_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_restart_then_counterparty_closes create mode 100644 fuzz/test_cases/chanmon_consistency/fc_restart_then_counterparty_closes_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_restart_then_counterparty_closes_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_reverse_hop create mode 100644 fuzz/test_cases/chanmon_consistency/fc_reverse_hop_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_reverse_hop_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_signer_disabled_holder create mode 100644 fuzz/test_cases/chanmon_consistency/fc_signer_disabled_holder_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_signer_disabled_holder_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_stale_monitor_restart create mode 100644 fuzz/test_cases/chanmon_consistency/fc_stale_monitor_restart_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_stale_monitor_restart_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_sync_one_block create mode 100644 fuzz/test_cases/chanmon_consistency/fc_sync_one_block_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_sync_one_block_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_sync_to_tip create mode 100644 fuzz/test_cases/chanmon_consistency/fc_sync_to_tip_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_sync_to_tip_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_then_send create mode 100644 fuzz/test_cases/chanmon_consistency/fc_then_send_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_then_send_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_timer_tick_after create mode 100644 fuzz/test_cases/chanmon_consistency/fc_timer_tick_after_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/fc_timer_tick_after_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/fc_unclaimed_mpp_timeout_variant_a create mode 100644 fuzz/test_cases/chanmon_consistency/fc_unclaimed_mpp_timeout_variant_b create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_basic create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_basic_async create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_basic_async_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_basic_async_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_basic_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_basic_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_both_directions create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_both_directions_async create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_both_directions_async_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_both_directions_async_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_both_directions_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_both_directions_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_htlc_needs_height create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_htlc_needs_height_async create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_htlc_needs_height_async_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_htlc_needs_height_async_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_htlc_needs_height_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_htlc_needs_height_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_htlc_resolved create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_htlc_resolved_async create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_htlc_resolved_async_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_htlc_resolved_async_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_htlc_resolved_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_htlc_resolved_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_middle_node create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_middle_node_async create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_middle_node_async_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_middle_node_async_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_middle_node_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_middle_node_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_no_confirm create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_no_confirm_async create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_no_confirm_async_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_no_confirm_async_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_no_confirm_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_no_confirm_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_three_node_preimage create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_three_node_preimage_async create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_three_node_preimage_async_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_three_node_preimage_async_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_three_node_preimage_keyed_anchors create mode 100644 fuzz/test_cases/chanmon_consistency/force_close_three_node_preimage_zero_fee_commitments create mode 100644 fuzz/test_cases/chanmon_consistency/ldk_crash_channelmanager_19484 create mode 100644 fuzz/test_cases/chanmon_consistency/ldk_crash_channelmanager_9836 create mode 100644 fuzz/test_cases/chanmon_consistency/ldk_crash_channelmonitor_2727 create mode 100644 fuzz/test_cases/chanmon_consistency/ldk_crash_onchaintx_1025 create mode 100644 fuzz/test_cases/chanmon_consistency/ldk_crash_onchaintx_913 create mode 100644 fuzz/test_cases/chanmon_consistency/ldk_crash_signer_395 create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-0103befb3dc5aa050668752668d04e85bd1fc14e create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-05fc1bb98f2a3b29e826a4de636474de0b23c895 create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-2bd72986b31d87f9260cf627e63971b5b8310a60 create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-3d9c399d0e2d915375da243fb57023df803a5dc6 create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-475eb92f6d72aed80ce7cdaed4181b99b11b2fcd create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-4c5cc7debdfdf2569e21b13b21c4270a9b558267 create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-505b331015cbe51169de31e09acc6d8330c8e385 create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-593eb3357e98be529b0ef35f21577ef6eede171b create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-6ee72dbba68dbca58038d2f9b8525e4d0df25f94 create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-76a82aa89161d0428192e725650324a74a710dca create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-7a7ea04ead9439ad7db2eefb23f6e242d547e459 create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-8250da1564cda1a1dde38a431859afab8ac2934d create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-838b7d436a92ae2a68aa9ad9badd88cbf96b407b create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-87b55c5b37383fe43420089fd3e8ccecbb034b44 create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-885f446335ae279baed408d42af8c398dfdb8c9b create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-8a81e4c066465a2975ef22625c0b91da6332a2c8 create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-8f2aebf3aeeb70d8edd39a886e30beb770f3b42b create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-91d97d9eea2bd59f746681ad822488262e832ff1 create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-95a90908391d3398084b77eb11ff5c9d7fdde008 create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-a31cdfc423211489c841a6ddd067f9e6cf5bed4b create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-b1c4840ea1279dd8d6080d79373ae55bbcad3061 create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-b6ef84eec94d70bbc385c98c4ab0bac77da00a2f create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-bae8693182b102dfebab143a0f48992dad76245d create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-bcab049322729e275e3bbdacebc633495da7643f create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-d5afdff02a253c9f2fbce95cbaf730eb210128fa create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-d6494f068fb2b2d31f1ac8627752692b3c8b7d2f create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-edd3f8168217501dd93f3c24d09c2c095cdf7784 create mode 100644 fuzz/test_cases/chanmon_consistency/timeout-fcbcc131184e33d5b000820b0972f6197b0801d2 diff --git a/fc-crashes.md b/fc-crashes.md new file mode 100644 index 00000000000..a257e7966ba --- /dev/null +++ b/fc-crashes.md @@ -0,0 +1,162 @@ +# Force-close fuzzer LDK crashes + +Minimized crash sequences found by the chanmon_consistency fuzzer with +force-close support. All crashes are `debug_assert` or `panic!` inside +LDK, not in the fuzzer harness. Byte 0 encodes monitor styles (bits +0-2) and channel type (bits 3-4: 0=Legacy, 1=KeyedAnchors). + +## 1. channelmonitor.rs:2727 - HTLC input not found in transaction + +``` +debug_assert!(htlc_input_idx_opt.is_some()); +``` + +When resolving an HTLC spend, the monitor searches for the HTLC +outpoint in the spending transaction's inputs but doesn't find it. +Falls back to index 0 in release mode, which would produce incorrect +tracking. + +Minimized (17 bytes): +``` +0x40 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xdc 0xde 0xff +``` + +Byte 0 = 0x40: Legacy channels, no async monitors. The sequence is +mostly 0xff (settlement) repeated, with height advances (0xdc, 0xde) +near the end. This suggests the crash happens during settlement when +processing on-chain HTLC spends after repeated settlement attempts. + +## 2. onchaintx.rs:913 - Duplicate claim ID in pending requests + +``` +debug_assert!(self.pending_claim_requests.get(&claim_id).is_none()); +``` + +The OnchainTxHandler registers a claim event with a claim_id that +already exists in the pending_claim_requests map. + +Minimized (10 bytes): +``` +0x08 0xd2 0x70 0x70 0x71 0x70 0x10 0x19 0xde 0xff +``` + +Byte 0 = 0x08: KeyedAnchors channels, no async monitors. +- 0xd2: B force-closes the A-B channel +- 0x70/0x71: disconnect/reconnect peers +- 0x10, 0x19: process messages on nodes A and B +- 0xde: advance chain 200 blocks +- 0xff: settle + +B force-closes, peers disconnect and reconnect, messages are exchanged, +then height advances and settlement triggers the duplicate claim. + +## 3. onchaintx.rs:1025 - Inconsistent internal maps + +``` +panic!("Inconsistencies between pending_claim_requests map and claimable_outpoints map"); +``` + +The OnchainTxHandler detects that its `pending_claim_requests` and +`claimable_outpoints` maps are out of sync. + +Minimized (14 bytes): +``` +0x00 0x3c 0x11 0x19 0xd0 0xde 0xff 0xff 0x19 0x21 0x19 0xde 0x26 0xff +``` + +Byte 0 = 0x00: Legacy channels, all monitors completed. +- 0x3c: send hop payment A->B->C (1M msat) +- 0x11, 0x19: process messages to commit HTLC on A-B +- 0xd0: A force-closes A-B +- 0xde: advance 200 blocks +- 0xff: settle (first round) +- 0xff: settle again (second round, processes more messages) +- 0x19, 0x21, 0x19: continue processing B and C messages +- 0xde: advance 200 more blocks +- 0x26: process events on node C +- 0xff: settle (third round) + +A hop payment partially committed, then A force-closes. Multiple +settlement rounds with continued message processing in between triggers +the internal map inconsistency. + +## 4. test_channel_signer.rs:395 - Signing revoked commitment + +``` +panic!("can only sign the next two unrevoked commitment numbers, revoked={} vs requested={}") +``` + +The test channel signer is asked to sign an HTLC transaction for a +commitment number that has already been revoked. + +Minimized (18 bytes): +``` +0x22 0x71 0x71 0x71 0x71 0x71 0x71 0x71 0xff 0xff 0xff 0xff 0xff 0xff 0xde 0xde 0xb5 0xff +``` + +Byte 0 = 0x22: Legacy channels, async monitors on node B. +- 0x71: disconnect B-C peers (repeated, only first effective) +- 0xff: settle (repeated 6 times) +- 0xde 0xde: advance 400 blocks +- 0xb5: restart node B with alternate monitor state +- 0xff: settle + +Async monitors on B with peer disconnection, repeated settlements, +height advances, and a node restart with a different monitor state. +The stale monitor combined with the restart puts B's signer in a state +where it's asked to sign for an already-revoked commitment. + +## 5. channelmanager.rs:9836 - Payment blocker not found + +``` +debug_assert!(found_blocker); +``` + +During payment processing, the ChannelManager expects to find a +specific blocker entry for an in-flight payment but it's missing. + +Minimized (13 bytes): +``` +0x00 0x3c 0x11 0x19 0x11 0x1f 0x19 0x21 0x19 0x27 0x27 0xde 0xff +``` + +Byte 0 = 0x00: Legacy channels, all monitors completed. +- 0x3c: send hop A->B->C (1M msat) +- 0x11, 0x19, 0x11: commit HTLC on A-B +- 0x1f: B processes events (forwards HTLC to C) +- 0x19, 0x21, 0x19: commit HTLC on B-C +- 0x27, 0x27: C processes events (claims payment) +- 0xde: advance 200 blocks +- 0xff: settle + +A straightforward A->B->C hop payment that completes normally (C +claims), followed by a height advance and settlement. No force-close +in this sequence, so the height advance before settlement may cause +HTLC timeout processing that conflicts with the claim path. + +## 6. channelmanager.rs:19484 - Monitor update ID ordering violation + +``` +debug_assert!(update.update_id >= pending_update.update_id); +``` + +A ChannelMonitorUpdate has an update_id that is less than a pending +update's id, violating the expected monotonic ordering. + +Minimized (10 bytes): +``` +0x84 0x70 0x11 0x19 0x11 0x1f 0xd0 0x11 0x1f 0xba +``` + +Byte 0 = 0x84: Legacy channels, no async monitors, high bits set +(bits 3-4 = 0, bits 7 and 2 set). +- 0x70: disconnect A-B peers +- 0x11, 0x19, 0x11: process messages (likely reestablish after setup) +- 0x1f: process B events +- 0xd0: A force-closes A-B channel +- 0x11: process A messages +- 0x1f: process B events +- 0xba: restart node B with alternate monitor state + +A force-close followed by continued message/event processing and a +node B restart triggers a monitor update with an out-of-order ID. diff --git a/fuzz/.gitignore b/fuzz/.gitignore index e8dc6b6e08b..cc3f5f53040 100644 --- a/fuzz/.gitignore +++ b/fuzz/.gitignore @@ -2,3 +2,4 @@ hfuzz_target target hfuzz_workspace corpus +artifacts \ No newline at end of file diff --git a/fuzz/FC-INFO.md b/fuzz/FC-INFO.md new file mode 100644 index 00000000000..1293fcdcedb --- /dev/null +++ b/fuzz/FC-INFO.md @@ -0,0 +1,107 @@ +# Force-Close Fuzzing Notes + +This file records the current contract for `chanmon_consistency` force-close +coverage. It is intentionally short. Keep branch history and one-off debugging +notes elsewhere. + +## Goal + +Force-close fuzzing here should: + +- exercise realistic off-chain to on-chain transitions +- keep force-close from changing the eventual outcome of claimed payments +- only allow claimed-payment sender failures when force-close dust touched a + used payment path +- allow unclaimed HTLCs to resolve by CLTV timeout +- drive the harness far enough that it observes real terminal outcomes +- avoid manufacturing timeout wins by starving message delivery or claim + propagation + +## Hard-Mode Invariant + +The current hard mode is: + +- once the harness calls `claim_funds`, that HTLC must eventually produce + `PaymentClaimed` at the receiver +- after that claim, the sender must eventually produce a terminal outcome, + `PaymentSent` or `PaymentFailed` +- if the sender produces `PaymentFailed` for a claimed payment, some used + force-close path for that payment must have been dust-trimmed +- force-close dust on a used path is not, by itself, enough to require + `PaymentFailed`; the payment may still end in `PaymentSent` +- if no used force-close path for the claimed payment was dust-trimmed, the + sender must eventually produce `PaymentSent` +- going on-chain does not create any broader exception than that dust case +- unclaimed HTLCs may still fail by CLTV expiry +- CSV waits on force-close outputs are normal and expected; they are not + payment outcome changes +- a payment disappearing from `list_recent_payments()` is not enough, the + harness must observe or drive the terminal outcome directly + +In this mode, the following are harness failures: + +- `HTLCHandlingFailed::Receive` after we already chose to claim the HTLC +- a receiver-side claim without the receiver later getting `PaymentClaimed` +- a claimed HTLC without any sender-side terminal event +- a claimed HTLC getting `PaymentFailed` without any dust-trimmed used + force-close path +- a claimed HTLC that should fulfill resolving by CLTV timeout instead +- cleanup stopping while live balances or other pending work still show that + more progress is possible + +## Timeouts + +Do not conflate CSV and CLTV: + +- CSV is normal force-close settlement latency +- CLTV expiry changes the HTLC outcome + +The harness should keep driving through CSV waits. It should only protect +claimed HTLCs that should still fulfill from CLTV-expiry resolution. + +## Harness Rules + +The main rules for preserving the invariant are: + +- advance large height jumps one block at a time, with bounded draining before + and after each block +- process queued messages and events before confirming newly broadcast + transactions, so preimages can propagate before timeout paths win +- keep sender-side payment bookkeeping independent of + `list_recent_payments()` +- track which channels each payment actually used, and when force-closing, + snapshot which used payment paths become dust-blocked on the closer's + commitment +- keep driving while `ClaimableOnChannelClose`, HTLC-related claimable balances, + queued messages, pending monitor updates, or pending broadcasts still show + unresolved work +- only stop before a CLTV boundary when crossing it would let a claimed HTLC + that has not yet reached a sender terminal event expire instead +- do not hide pending-payment state behind unrelated auto-driving before an + explicit force-close opcode; a bounded pre-close drain is acceptable when it + is only making already-queued work visible + +## Review Checklist + +When changing this harness, verify: + +- claimed HTLCs still require `PaymentClaimed` +- claimed HTLCs still require a sender-side terminal event +- claimed HTLCs only allow `PaymentFailed` when some used force-close path was + dust-trimmed +- claimed HTLCs without dust-trimmed used force-close paths still require + `PaymentSent` +- unclaimed HTLCs may still time out on-chain +- force-close opcodes still act on the currently pending state +- large synthetic height jumps do not become blind timeout buttons again +- sender-side obligations are not reconciled away through local caches + +## Verification + +The standard check is: + +```bash +~/repo/rl-tools/run_fuzz_runner.sh --timeout-secs 20 +``` + +Re-run the full corpus after any meaningful force-close harness change. diff --git a/fuzz/ONCHAINTX-BUGS.md b/fuzz/ONCHAINTX-BUGS.md new file mode 100644 index 00000000000..0cb1b397bbe --- /dev/null +++ b/fuzz/ONCHAINTX-BUGS.md @@ -0,0 +1,327 @@ +# Recent `OnchainTxHandler` Bugs And Fixes + +This note records the three `OnchainTxHandler` bugs that were fixed while +hardening the `chanmon_consistency` force-close corpus. + +Both bugs lived in `lightning/src/chain/onchaintx.rs`. Both were real +logic issues, not harness-only artifacts. Both now pass in targeted +reruns and in the full `chanmon_consistency` corpus sweep. + +Current green reference runs: + +- Targeted duplicate-claim rerun: + `fuzz/artifacts/chanmon_runner/run-1776537725/summary.txt` +- Targeted contentious-claim rerun: + `fuzz/artifacts/chanmon_runner/run-1776538115/summary.txt` +- Targeted duplicate pending-claim-event rerun: + `fuzz/artifacts/chanmon_runner/run-1776586956/summary.txt` +- Full corpus rerun: + `fuzz/artifacts/chanmon_runner/run-1776587008/summary.txt` + +Full-corpus result: + +- `392 ok / 0 failed / 0 timed_out / 0 spawn_errors` + +## 1. Duplicate pending claim request after force-close + +### Repro cases + +- `fc_duplicate_pending_claim_request_after_force_close_39b47f` + - bytes: `0fd37373d0b2ffd3` +- `fc_duplicate_pending_claim_request_after_force_close_ed278d` + - bytes: `08d37373d0b2ffd3` + +### What went wrong + +The failing shape was: + +1. A force-close created two single-outpoint claim requests. +2. Those requests were merged into one delayed package because their + timelock was still in the future. +3. A later replay of `update_claims_view_from_requests` at the same + logical state recreated the same two single-outpoint requests. +4. The old dedupe logic only rejected a duplicate delayed claim if the + outpoint sets were exactly equal. +5. Because the existing delayed claim had already been merged into a + two-outpoint package, the new single-outpoint requests were not seen + as duplicates. +6. At the timelock height, the same aggregated delayed package was + restored twice and tried to register the same `ClaimId` twice. + +The crash was the debug assertion in `OnchainTxHandler`: + +- `assertion failed: self.pending_claim_requests.get(&claim_id).is_none()` + +Representative evidence from +`fuzz/artifacts/chanmon_runner/run-1776537612/logs/fc_duplicate_pending_claim_request_after_force_close_ed278d.log`: + +- line `1829`: `Updating claims view at height 61 with 2 claim requests` +- line `1830`: delayed until timelock `361` +- line `2077`: the same `2 claim requests` appear again +- line `17163`: delayed package restored at timelock `361` +- lines `17164` and `17167`: the same two-outpoint event is yielded twice +- line `17169`: assertion failure + +The same pattern appears in the sibling repro +`fc_duplicate_pending_claim_request_after_force_close_39b47f`. + +### Why the old logic was wrong + +Before the fix, delayed-claim dedupe effectively asked: + +- "Do I already have a delayed package with exactly the same outpoint + set as this new request?" + +That was too strict. Once two single-outpoint requests had already been +merged into one delayed package, replaying either single-outpoint +request should have been considered duplicate as well. + +The correct question is: + +- "Is every outpoint in this new request already covered by an existing + delayed package?" + +### The fix + +In `OnchainTxHandler::update_claims_view_from_requests`, the delayed +claim dedupe was changed from exact package equality to covering-package +detection. + +Relevant code: + +- `lightning/src/chain/onchaintx.rs`, `timelocked_covering_package` +- log line for this path: + `Ignoring second claim for outpoint ..., we already have one which + we're waiting on a timelock at ...` + +In practical terms: + +- a fresh single-outpoint request is now ignored if a delayed package + already contains that outpoint +- replaying the same logical claim state no longer creates duplicate + delayed packages +- the delayed package is restored only once at the timelock height + +### Why this fix is correct + +This does not suppress any legitimate new claim. It only rejects a +request whose entire outpoint set is already represented in pending +delayed state. If a request introduces a truly new outpoint, it still +passes through. + +### Verification + +Targeted rerun: + +- `fuzz/artifacts/chanmon_runner/run-1776537725/summary.txt` +- result: `2 ok / 0 failed / 0 timed_out` + +## 2. Contentious claim reused an already resolved outpoint + +### Repro cases + +- `fc_contentious_claim_stuck_after_force_close_218996` + - bytes: `89ffde3d3dc0d3ff` +- `fc_contentious_claim_stuck_after_force_close_36a22e` + - bytes: `2cffde3d3dc0d3ff` +- `fc_contentious_claim_stuck_after_force_close_d7793e` + - bytes: `76ffde3d3dc0d1ff` + +### What went wrong + +The failing shape was: + +1. An HTLC output was claimed on-chain by a single-outpoint claim. +2. That claim matured past `ANTI_REORG_DELAY`. +3. `OnchainTxHandler` removed the pending claim tracking for that + outpoint. +4. A later preimage update arrived and built a fresh two-outpoint claim + that included the already-resolved outpoint again. +5. That new claim could never confirm, because one of its inputs had + already been definitively spent. +6. The handler kept RBF-bumping that impossible claim forever, leaving a + claimed payment stuck pending in the harness. + +Representative evidence from +`fuzz/artifacts/chanmon_runner/run-1776537816/logs/fc_contentious_claim_stuck_after_force_close_d7793e.log`: + +- line `3173`: `Updating claims view at height 60 with 1 claim requests` +- line `3175`: registers claim for + `cc0e...:2` +- line `3282`: removes tracking for `cc0e...:2` after the claim package + matured +- line `3424`: `Updating claims view at height 66 with 2 claim requests` +- line `3425`: yields a new event spending + `cc0e...:1` and `cc0e...:2` +- lines `3426` and `3427`: registers both outpoints again +- lines `4438`, `5380`, `6322`, and many later lines: keeps yielding + RBF-bumped events for that same impossible two-input claim +- line `21640`: final harness failure, + `Node 2 has 1 stuck pending payments after settling all state` + +The same family reproduced in the other two named cases. + +### Why the old logic was wrong + +Removing an outpoint from `claimable_outpoints` after its claim matured +was not enough. That only said: + +- "we no longer need to actively track this pending request" + +It did not preserve the stronger fact: + +- "this outpoint is definitively spent and must never be re-claimed" + +Without that second fact, a later preimage could cause +`update_claims_view_from_requests` to resurrect an already-resolved +outpoint into a new claim package. + +### The fix + +`OnchainTxHandler` now maintains a restart-safe +`irrevocably_spent_outpoints: HashSet`. + +Relevant code paths: + +- field definition: + `lightning/src/chain/onchaintx.rs` +- serialization and deserialization: + the new optional TLV field in `write` and `read` +- request filtering: + `Ignoring claim for outpoint ..., it was already irrevocably spent by + a confirmed claim transaction` +- maturation handling: + outpoints are inserted into `irrevocably_spent_outpoints` when a claim + or contentious outpoint reaches the anti-reorg threshold + +This matters for restarts as well. The spent-outpoint memory is part of +the serialized `OnchainTxHandler` state, so a monitor reload does not +forget that the output was already definitively resolved. + +### Why this fix is correct + +Once a claim tx for an outpoint has reached `ANTI_REORG_DELAY`, the +handler should never generate a new claim for that same outpoint unless +the chain reorgs deep enough to invalidate the confirmation. That is +exactly the invariant the new set captures. + +The fix is intentionally narrow: + +- it does not suppress still-live outpoints +- it does not interfere with normal package splitting or merging +- it only blocks claim generation for outpoints that were already + irreversibly resolved + +### Verification + +Targeted rerun: + +- `fuzz/artifacts/chanmon_runner/run-1776538115/summary.txt` +- result: `3 ok / 0 failed / 0 timed_out` + +## 3. Duplicate pending claim event after force-close + +### Repro cases + +- `fc_duplicate_pending_claim_event_after_force_close` + - bytes: `2934ff3dc0d1b6ff` +- `fc_duplicate_pending_claim_event_after_force_close_zero_fee_commitments` + - bytes: `3f34ff3dc0d1b6ff` + +### What went wrong + +The failing shape was: + +1. A force-close path yielded an `OnchainClaim::Event`. +2. `OnchainTxHandler` inserted that event into `pending_claim_events` + under its `ClaimId`. +3. Before the original pending event was drained, the same logical claim + was rebuilt and yielded again. +4. The initial insertion path still assumed that duplicate `ClaimId` + entries could never happen there. +5. That path pushed a second entry with the same `ClaimId` and hit the + debug assertion that the count had to be zero. + +The crash was the debug assertion in `OnchainTxHandler`: + +- `debug_assert_eq!(self.pending_claim_events.iter().filter(|entry| entry.0 == claim_id).count(), 0);` + +Representative evidence from +`fuzz/artifacts/chanmon_runner/run-1776584834/logs/crash-4b5e6aabf5bc0467bcd2163cced7d60241d24f17.log`: + +- line `3544`: yields an on-chain event spending the commitment output +- line `3545`: registers the associated claim request +- line `3679`: later rebuilds claims view with one fresh claim request +- line `3680`: yields another on-chain event for HTLC output + `513872...:2` +- line `3681`: assertion failure while inserting the second event with + the same `ClaimId` + +The sibling zero-fee-commitments repro follows the same shape in +`crash-a83289388ca2b4f52279218f3a70e0f1f0661a92.log`, with the same +panic at `onchaintx.rs:944`. + +### Why the old logic was wrong + +`pending_claim_events` was already being treated like a keyed queue in +other parts of `OnchainTxHandler`: + +- rebroadcast logic replaced existing entries by `ClaimId` +- bump logic replaced existing entries by `ClaimId` +- reorg logic replaced existing entries by `ClaimId` + +Only the initial insertion path still assumed uniqueness and pushed +blindly. That left the structure with inconsistent semantics depending +on which path happened to enqueue the event. + +The correct invariant is: + +- there is at most one pending event per `ClaimId` +- re-enqueuing the same logical claim should replace the older entry, + not panic + +### The fix + +In `OnchainTxHandler::update_claims_view_from_requests`, the initial +`OnchainClaim::Event` insertion now matches the other paths: + +- under debug builds it asserts the existing count is `0` or `1` +- it removes any existing `pending_claim_events` entry for that + `ClaimId` +- it then pushes the newest event + +This preserves insertion order for distinct claim ids while making +duplicate requeues idempotent. + +### Why this fix is correct + +This does not hide a real conflict between distinct claims. Two +different claim packages should not share a `ClaimId`. If they do, they +represent the same logical event as far as the queue is concerned, and +the newest version should replace the old one. + +This also makes the queue semantics internally consistent. Every path +that mutates `pending_claim_events` now treats it as keyed by +`ClaimId`, rather than having one path act like a multimap. + +### Verification + +Targeted rerun: + +- `fuzz/artifacts/chanmon_runner/run-1776586956/summary.txt` +- result: `2 ok / 0 failed / 0 timed_out` + +## Final verification + +After all three fixes landed, the default corpus sweep passed: + +- `fuzz/artifacts/chanmon_runner/run-1776587008/summary.txt` +- result: `392 ok / 0 failed / 0 timed_out / 0 spawn_errors` + +This is the reference run showing that: + +- the duplicate delayed-claim family is fixed +- the contentious reused-outpoint family is fixed +- the duplicate pending-claim-event family is fixed +- neither change regressed the previously fixed dust, restart, or + sender-terminal-event invariants diff --git a/fuzz/OPEN-ISSUES.md b/fuzz/OPEN-ISSUES.md new file mode 100644 index 00000000000..e20c8e82390 --- /dev/null +++ b/fuzz/OPEN-ISSUES.md @@ -0,0 +1,39 @@ +# Open Issues + +There are no currently open `chanmon_consistency` crash families in this +branch. + +Latest green reference run: + +- Full corpus rerun: + [run-1776587008 summary](/Users/joost/repo/rust-lightning-fuzz-force-close/fuzz/artifacts/chanmon_runner/run-1776587008/summary.txt) + with `392 ok / 0 failed / 0 timed_out / 0 spawn_errors` + +Recently resolved: + +- Manager reload failed with `DangerousValue`. + Fixed in + [fuzz/src/chanmon_consistency.rs](/Users/joost/repo/rust-lightning-fuzz-force-close/fuzz/src/chanmon_consistency.rs) + by retiring every pending monitor blob at `<= completed_update_id` + once a later monitor update is acknowledged complete. + This prevents restart selectors from reloading a stale older monitor + after the serialized `ChannelManager` has already dropped the + corresponding blocked updates. + Targeted verification is clean in + [run-1776585235 summary](/Users/joost/repo/rust-lightning-fuzz-force-close/fuzz/artifacts/chanmon_runner/run-1776585235/summary.txt) + with `8 ok / 0 failed / 0 timed_out`. + +- `OnchainTxHandler` could enqueue the same pending claim event twice. + Fixed in + [lightning/src/chain/onchaintx.rs](/Users/joost/repo/rust-lightning-fuzz-force-close/lightning/src/chain/onchaintx.rs) + by making the initial `pending_claim_events` insertion path replace an + existing entry with the same `ClaimId`, matching the keyed behavior + already used in the rebroadcast, bump, and reorg paths. + Representative repro cases are: + [fc_duplicate_pending_claim_event_after_force_close](/Users/joost/repo/rust-lightning-fuzz-force-close/fuzz/test_cases/chanmon_consistency/fc_duplicate_pending_claim_event_after_force_close) + with bytes `2934ff3dc0d1b6ff`, and + [fc_duplicate_pending_claim_event_after_force_close_zero_fee_commitments](/Users/joost/repo/rust-lightning-fuzz-force-close/fuzz/test_cases/chanmon_consistency/fc_duplicate_pending_claim_event_after_force_close_zero_fee_commitments) + with bytes `3f34ff3dc0d1b6ff`. + Targeted verification is clean in + [run-1776586956 summary](/Users/joost/repo/rust-lightning-fuzz-force-close/fuzz/artifacts/chanmon_runner/run-1776586956/summary.txt) + with `2 ok / 0 failed / 0 timed_out`. diff --git a/fuzz/test_cases/base32/smoke b/fuzz/test_cases/base32/smoke deleted file mode 100644 index 573541ac970..00000000000 --- a/fuzz/test_cases/base32/smoke +++ /dev/null @@ -1 +0,0 @@ -0 diff --git a/fuzz/test_cases/bech32_parse/smoke b/fuzz/test_cases/bech32_parse/smoke deleted file mode 100644 index 573541ac970..00000000000 --- a/fuzz/test_cases/bech32_parse/smoke +++ /dev/null @@ -1 +0,0 @@ -0 diff --git a/fuzz/test_cases/chanmon_consistency/crash-02830a6ff7757f3570924b0c0fd9118a7cdd9770 b/fuzz/test_cases/chanmon_consistency/crash-02830a6ff7757f3570924b0c0fd9118a7cdd9770 new file mode 100644 index 0000000000000000000000000000000000000000..57c626b8597071187a3bc4e90ce05655fd65b429 GIT binary patch literal 24 dcmZQL!C@mPDJUzcD0%V5&0F{G0l7ed{{UQa2#Npz literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/crash-0473b0e767d9a98de62538ce5afcbbc2e6ec5af2 b/fuzz/test_cases/chanmon_consistency/crash-0473b0e767d9a98de62538ce5afcbbc2e6ec5af2 new file mode 100644 index 00000000000..ba413134fbb --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/crash-0473b0e767d9a98de62538ce5afcbbc2e6ec5af2 @@ -0,0 +1 @@ +pppppp0ppp0ÀÞÚÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/crash-05e175d40f60b823f730fa874d98dc10dd2bb6ad b/fuzz/test_cases/chanmon_consistency/crash-05e175d40f60b823f730fa874d98dc10dd2bb6ad new file mode 100644 index 00000000000..cabed892750 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/crash-05e175d40f60b823f730fa874d98dc10dd2bb6ad @@ -0,0 +1 @@ +lls²ÿÿÿÿÝÝÝÝÝÿÿÿÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/crash-07bdc4e56ee67bd2ffa409f76529199d748ab2d8 b/fuzz/test_cases/chanmon_consistency/crash-07bdc4e56ee67bd2ffa409f76529199d748ab2d8 new file mode 100644 index 00000000000..eb3ac3716d2 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/crash-07bdc4e56ee67bd2ffa409f76529199d748ab2d8 @@ -0,0 +1 @@ +pppppp0ÀÐ%ÞÞÏØÙÚÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/crash-096cc3008264dccaefb945f5a4b7a2d3c9f8e90c b/fuzz/test_cases/chanmon_consistency/crash-096cc3008264dccaefb945f5a4b7a2d3c9f8e90c new file mode 100644 index 00000000000..f00662619a1 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/crash-096cc3008264dccaefb945f5a4b7a2d3c9f8e90c @@ -0,0 +1 @@ +<!''ÐØ¥!ÙÚÞºÿ³ÑºÓÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/crash-09a17e06913dea74dba796940cec86cb4e2dd597 b/fuzz/test_cases/chanmon_consistency/crash-09a17e06913dea74dba796940cec86cb4e2dd597 new file mode 100644 index 00000000000..30543451915 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/crash-09a17e06913dea74dba796940cec86cb4e2dd597 @@ -0,0 +1 @@ +<!ÑØÙÚÞÞÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/crash-09f5a41270b07f70a031884cbdfd081e8600923e b/fuzz/test_cases/chanmon_consistency/crash-09f5a41270b07f70a031884cbdfd081e8600923e new file mode 100644 index 0000000000000000000000000000000000000000..e0ff1832a4fa6f420610b625b63dfc2b521be628 GIT binary patch literal 22 ecmdi1 literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/crash-228ea00412a2fab1e866fc6df32ffd00bbfe81ad b/fuzz/test_cases/chanmon_consistency/crash-228ea00412a2fab1e866fc6df32ffd00bbfe81ad new file mode 100644 index 0000000000000000000000000000000000000000..4a6a76ade6c4327ec5b327290a1d158a6a649f45 GIT binary patch literal 24 fcmd-u6y(3j00f4D|Nr0PziD&p9>>4`|Na92UV95K literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/crash-242de208110143401fcf4e1ebaa7d9d38fb93611 b/fuzz/test_cases/chanmon_consistency/crash-242de208110143401fcf4e1ebaa7d9d38fb93611 new file mode 100644 index 00000000000..76da0f6debb --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/crash-242de208110143401fcf4e1ebaa7d9d38fb93611 @@ -0,0 +1 @@ +*ÿ¹¹¹þÿÿÒ¸ÿÞÞÞÞÿ¹¹¹¹¹¹¹ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/crash-24f1373b1cf51f95af854d6d8730336b77728007 b/fuzz/test_cases/chanmon_consistency/crash-24f1373b1cf51f95af854d6d8730336b77728007 new file mode 100644 index 00000000000..0064fa17f19 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/crash-24f1373b1cf51f95af854d6d8730336b77728007 @@ -0,0 +1 @@ +*ÿ¹tÿA2¹¹¹¹ÑØÙÚÞÿÿ¹¹ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/crash-2923c14608fb259c21862cd71ffeb6ac74b0ba32 b/fuzz/test_cases/chanmon_consistency/crash-2923c14608fb259c21862cd71ffeb6ac74b0ba32 new file mode 100644 index 00000000000..ff1549ef79f --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/crash-2923c14608fb259c21862cd71ffeb6ac74b0ba32 @@ -0,0 +1 @@ +p0p0ÀÞÞÏØ°Zÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/crash-2a0852bec1d75334538dacec26831db6995b6e33 b/fuzz/test_cases/chanmon_consistency/crash-2a0852bec1d75334538dacec26831db6995b6e33 new file mode 100644 index 00000000000..f5e273ff51f --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/crash-2a0852bec1d75334538dacec26831db6995b6e33 @@ -0,0 +1 @@ +p0ÀÞÞÏbÿÙÚÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/crash-2d93541536e19c030d95d236e6be545352d98b80 b/fuzz/test_cases/chanmon_consistency/crash-2d93541536e19c030d95d236e6be545352d98b80 new file mode 100644 index 00000000000..0c7432d7c20 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/crash-2d93541536e19c030d95d236e6be545352d98b80 @@ -0,0 +1 @@ +*ÿ¹tÿA¹¹¹¹ÑØÙÚÞÿÿ¹¹ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/crash-2e002fcfdc76c5981f5f93c0f842b548fb56c7a7 b/fuzz/test_cases/chanmon_consistency/crash-2e002fcfdc76c5981f5f93c0f842b548fb56c7a7 new file mode 100644 index 00000000000..bd5c0aab70a --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/crash-2e002fcfdc76c5981f5f93c0f842b548fb56c7a7 @@ -0,0 +1 @@ +p0t0ÀÞÞÏØÙZÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/crash-2fad50c7fd20b250f0349887445af198124900df b/fuzz/test_cases/chanmon_consistency/crash-2fad50c7fd20b250f0349887445af198124900df new file mode 100644 index 0000000000000000000000000000000000000000..44d0be6fc50646d858865e0158f9e4641b1e387d GIT binary patch literal 23 ecmd{{cHF2-W}q literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/crash-33c08a8f15f1c842df5da4fc92228d00606573f9 b/fuzz/test_cases/chanmon_consistency/crash-33c08a8f15f1c842df5da4fc92228d00606573f9 new file mode 100644 index 00000000000..391b9204000 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/crash-33c08a8f15f1c842df5da4fc92228d00606573f9 @@ -0,0 +1 @@ +<ˆ0sslqlqqÚÞÞÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/crash-33e77c2f720493e306bbfea79f151388ca7a04ea b/fuzz/test_cases/chanmon_consistency/crash-33e77c2f720493e306bbfea79f151388ca7a04ea new file mode 100644 index 0000000000000000000000000000000000000000..2c4a1c6cac69675d19dd6a25823cf46a586de0c6 GIT binary patch literal 24 dcmd-w;QIf+VM*$eB|z%lz5hFbVDtaY3;?vR50(G` literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/crash-37a18356d608c97415c0a1bef6a0f13fe04c8b97 b/fuzz/test_cases/chanmon_consistency/crash-37a18356d608c97415c0a1bef6a0f13fe04c8b97 new file mode 100644 index 0000000000000000000000000000000000000000..877a41dd6ae570a9dedab0719c6337030f68c2c7 GIT binary patch literal 22 dcmd07LEu=>Px# literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/crash-8ab54f3642a60a239a7bb787838f3e5a6b6f4f41 b/fuzz/test_cases/chanmon_consistency/crash-8ab54f3642a60a239a7bb787838f3e5a6b6f4f41 new file mode 100644 index 00000000000..50f706994ac --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/crash-8ab54f3642a60a239a7bb787838f3e5a6b6f4f41 @@ -0,0 +1 @@ +ÐÿÞ ØÙÚÌÜÜÿÿÿÿÿÿÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/crash-8ec6798103af6cedfdec68373991c0c0a73e3770 b/fuzz/test_cases/chanmon_consistency/crash-8ec6798103af6cedfdec68373991c0c0a73e3770 new file mode 100644 index 00000000000..877960d1655 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/crash-8ec6798103af6cedfdec68373991c0c0a73e3770 @@ -0,0 +1 @@ +<:!''ÚÞÞÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/crash-8f5cc4f6de42f52dcb571b6c0f21df957eb25462 b/fuzz/test_cases/chanmon_consistency/crash-8f5cc4f6de42f52dcb571b6c0f21df957eb25462 new file mode 100644 index 0000000000000000000000000000000000000000..e64b0b71a133805d9d4ec6ce851113c8497e3757 GIT binary patch literal 27 Zcmd;(U|{$U2ZbUg}ZnD0{|yc2Q2^q literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/crash-91ebb8583ed7705e2601334e52428ea5eb80a681 b/fuzz/test_cases/chanmon_consistency/crash-91ebb8583ed7705e2601334e52428ea5eb80a681 new file mode 100644 index 0000000000000000000000000000000000000000..ba9c42d7059ce40516c0cc28ac235e92721a4a98 GIT binary patch literal 23 dcmd~25 literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/fc_advance_before_drain_keyed_anchors b/fuzz/test_cases/chanmon_consistency/fc_advance_before_drain_keyed_anchors new file mode 100644 index 00000000000..a9b7f9e59f4 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_advance_before_drain_keyed_anchors @@ -0,0 +1 @@ +ÐÜØÙÚÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_advance_before_drain_zero_fee_commitments b/fuzz/test_cases/chanmon_consistency/fc_advance_before_drain_zero_fee_commitments new file mode 100644 index 00000000000..5208796d906 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_advance_before_drain_zero_fee_commitments @@ -0,0 +1 @@ +ÐÜØÙÚÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_after_claim_before_forward b/fuzz/test_cases/chanmon_consistency/fc_after_claim_before_forward new file mode 100644 index 0000000000000000000000000000000000000000..6ed4f13402c34dd77779be072fa58c2cb191520f GIT binary patch literal 18 acmZRu5tI~^msFHgSHE!M=B<18{sRCe_y81=B+z-?)(P;CAaO38!d-whW03k*O=Kufz literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/fc_direct_pay_claimed_keyed_anchors b/fuzz/test_cases/chanmon_consistency/fc_direct_pay_claimed_keyed_anchors new file mode 100644 index 00000000000..a3fb940a20a --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_direct_pay_claimed_keyed_anchors @@ -0,0 +1 @@ +0ÐØÙÚÞÞÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_direct_pay_claimed_zero_fee_commitments b/fuzz/test_cases/chanmon_consistency/fc_direct_pay_claimed_zero_fee_commitments new file mode 100644 index 00000000000..5363a35b90c --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_direct_pay_claimed_zero_fee_commitments @@ -0,0 +1 @@ +0ÐØÙÚÞÞÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_disabled_signers b/fuzz/test_cases/chanmon_consistency/fc_disabled_signers new file mode 100644 index 0000000000000000000000000000000000000000..e178523c474a4b925340f2c79d99e33af2b39852 GIT binary patch literal 13 VcmZQLaPY#Jvo~(ux^w5we*ixR3JCxJ literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/fc_disabled_signers_keyed_anchors b/fuzz/test_cases/chanmon_consistency/fc_disabled_signers_keyed_anchors new file mode 100644 index 00000000000..c3b358cff6e --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_disabled_signers_keyed_anchors @@ -0,0 +1 @@ +ÀÁÐÌÍØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_disabled_signers_zero_fee_commitments b/fuzz/test_cases/chanmon_consistency/fc_disabled_signers_zero_fee_commitments new file mode 100644 index 00000000000..7268091c3a6 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_disabled_signers_zero_fee_commitments @@ -0,0 +1 @@ +ÀÁÐÌÍØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_disconnect_close_reconnect b/fuzz/test_cases/chanmon_consistency/fc_disconnect_close_reconnect new file mode 100644 index 0000000000000000000000000000000000000000..207eb4b58c90a08f743da1baff7708c0e87a125f GIT binary patch literal 13 VcmZSJxxgnNapUH#J9qB<2LK~v2QdHu literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/fc_disconnect_close_reconnect_keyed_anchors b/fuzz/test_cases/chanmon_consistency/fc_disconnect_close_reconnect_keyed_anchors new file mode 100644 index 00000000000..519443e6590 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_disconnect_close_reconnect_keyed_anchors @@ -0,0 +1 @@ + ÐØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_disconnect_close_reconnect_zero_fee_commitments b/fuzz/test_cases/chanmon_consistency/fc_disconnect_close_reconnect_zero_fee_commitments new file mode 100644 index 00000000000..b89f4eb6bb1 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_disconnect_close_reconnect_zero_fee_commitments @@ -0,0 +1 @@ + ÐØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_disconnect_drain_reconnect b/fuzz/test_cases/chanmon_consistency/fc_disconnect_drain_reconnect new file mode 100644 index 0000000000000000000000000000000000000000..93ca334ca2a70442054b3a006617f9e744d02f78 GIT binary patch literal 13 VcmZR$z;ol~Ej|H>J9qB<2LLN42QdHu literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/fc_disconnect_drain_reconnect_keyed_anchors b/fuzz/test_cases/chanmon_consistency/fc_disconnect_drain_reconnect_keyed_anchors new file mode 100644 index 00000000000..977c994b110 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_disconnect_drain_reconnect_keyed_anchors @@ -0,0 +1 @@ +Ð ØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_disconnect_drain_reconnect_zero_fee_commitments b/fuzz/test_cases/chanmon_consistency/fc_disconnect_drain_reconnect_zero_fee_commitments new file mode 100644 index 00000000000..c11308e079e --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_disconnect_drain_reconnect_zero_fee_commitments @@ -0,0 +1 @@ +Ð ØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_drain_a_only b/fuzz/test_cases/chanmon_consistency/fc_drain_a_only new file mode 100644 index 0000000000000000000000000000000000000000..0b2e2d9f9ec9c46c34d8aca48afb11a0bac1872a GIT binary patch literal 7 OcmZR$aO2LMJO2R@+66oS literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/fc_drain_a_only_keyed_anchors b/fuzz/test_cases/chanmon_consistency/fc_drain_a_only_keyed_anchors new file mode 100644 index 00000000000..fb230c9d737 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_drain_a_only_keyed_anchors @@ -0,0 +1 @@ +ÐØÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_drain_a_only_zero_fee_commitments b/fuzz/test_cases/chanmon_consistency/fc_drain_a_only_zero_fee_commitments new file mode 100644 index 00000000000..745714790dd --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_drain_a_only_zero_fee_commitments @@ -0,0 +1 @@ +ÐØÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_duplicate_pending_claim_event_after_force_close b/fuzz/test_cases/chanmon_consistency/fc_duplicate_pending_claim_event_after_force_close new file mode 100644 index 00000000000..90215e173af --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_duplicate_pending_claim_event_after_force_close @@ -0,0 +1 @@ +)4ÿ=ÀѶÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_duplicate_pending_claim_event_after_force_close_zero_fee_commitments b/fuzz/test_cases/chanmon_consistency/fc_duplicate_pending_claim_event_after_force_close_zero_fee_commitments new file mode 100644 index 00000000000..915dda2eb20 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_duplicate_pending_claim_event_after_force_close_zero_fee_commitments @@ -0,0 +1 @@ +?4ÿ=ÀѶÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_duplicate_pending_claim_request_after_force_close_39b47f b/fuzz/test_cases/chanmon_consistency/fc_duplicate_pending_claim_request_after_force_close_39b47f new file mode 100644 index 00000000000..04b31004a35 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_duplicate_pending_claim_request_after_force_close_39b47f @@ -0,0 +1 @@ +ÓssвÿÓ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_duplicate_pending_claim_request_after_force_close_ed278d b/fuzz/test_cases/chanmon_consistency/fc_duplicate_pending_claim_request_after_force_close_ed278d new file mode 100644 index 00000000000..dad6421e159 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_duplicate_pending_claim_request_after_force_close_ed278d @@ -0,0 +1 @@ +ÓssвÿÓ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_during_reconnect b/fuzz/test_cases/chanmon_consistency/fc_during_reconnect new file mode 100644 index 0000000000000000000000000000000000000000..fa91c56dd57c27a7d09026c131bd07d60af03dc5 GIT binary patch literal 13 VcmZSJ;kzIpapUH#J9qB<2LK|X2QdHu literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/fc_during_reconnect_keyed_anchors b/fuzz/test_cases/chanmon_consistency/fc_during_reconnect_keyed_anchors new file mode 100644 index 00000000000..9d325043b08 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_during_reconnect_keyed_anchors @@ -0,0 +1 @@ + ÐØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_during_reconnect_zero_fee_commitments b/fuzz/test_cases/chanmon_consistency/fc_during_reconnect_zero_fee_commitments new file mode 100644 index 00000000000..5ed28383592 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_during_reconnect_zero_fee_commitments @@ -0,0 +1 @@ + ÐØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_dust_htlcs b/fuzz/test_cases/chanmon_consistency/fc_dust_htlcs new file mode 100644 index 0000000000000000000000000000000000000000..22e65fb3d4d29503573145a0fe55c4a1feb608ae GIT binary patch literal 19 bcmZRGh>VIBkPwhml)P~9#?4#z?)?Vrr(J9qB<2LL4m2S)$^ literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/fc_one_msg_at_a_time_keyed_anchors b/fuzz/test_cases/chanmon_consistency/fc_one_msg_at_a_time_keyed_anchors new file mode 100644 index 00000000000..44bfd92caef --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_one_msg_at_a_time_keyed_anchors @@ -0,0 +1 @@ +ÐØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_one_msg_at_a_time_zero_fee_commitments b/fuzz/test_cases/chanmon_consistency/fc_one_msg_at_a_time_zero_fee_commitments new file mode 100644 index 00000000000..69689cecd75 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_one_msg_at_a_time_zero_fee_commitments @@ -0,0 +1 @@ +ÐØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_pay_claim_close_pay b/fuzz/test_cases/chanmon_consistency/fc_pay_claim_close_pay new file mode 100644 index 0000000000000000000000000000000000000000..604a284a048544ac8c72956c8fd9b74698c31094 GIT binary patch literal 17 ZcmZQD5RecMlT*K7c;n`+J9qB<2LLL}2jc($ literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/fc_pay_claim_close_pay_keyed_anchors b/fuzz/test_cases/chanmon_consistency/fc_pay_claim_close_pay_keyed_anchors new file mode 100644 index 00000000000..813d703da99 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_pay_claim_close_pay_keyed_anchors @@ -0,0 +1 @@ +0'Ð1ØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_pay_claim_close_pay_zero_fee_commitments b/fuzz/test_cases/chanmon_consistency/fc_pay_claim_close_pay_zero_fee_commitments new file mode 100644 index 00000000000..91a7822c238 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_pay_claim_close_pay_zero_fee_commitments @@ -0,0 +1 @@ +0'Ð1ØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_pending_cache_without_observable_lifecycle_zero_fee_commitments b/fuzz/test_cases/chanmon_consistency/fc_pending_cache_without_observable_lifecycle_zero_fee_commitments new file mode 100644 index 00000000000..149317919a3 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_pending_cache_without_observable_lifecycle_zero_fee_commitments @@ -0,0 +1 @@ +ÿÿÿÿ·Ïùÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_pending_monitor b/fuzz/test_cases/chanmon_consistency/fc_pending_monitor new file mode 100644 index 0000000000000000000000000000000000000000..ed197c810f12c5699150264790926c8c73c95070 GIT binary patch literal 15 XcmZQzFc6Tqz`=Rr=B+z-?)(P;B=QGX literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/fc_pending_monitor_keyed_anchors b/fuzz/test_cases/chanmon_consistency/fc_pending_monitor_keyed_anchors new file mode 100644 index 0000000000000000000000000000000000000000..89b4d1eca9f4ba9a9d66aff40dfdbb854c3ad2b5 GIT binary patch literal 15 Xcmd;JFc6Tqz`=Rr=B+z-?)(P;C29v? literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/fc_pending_monitor_zero_fee_commitments b/fuzz/test_cases/chanmon_consistency/fc_pending_monitor_zero_fee_commitments new file mode 100644 index 0000000000000000000000000000000000000000..b7dfc3c50127d57805f2f588a8291985552c3165 GIT binary patch literal 15 XcmWe&Fc6Tqz`=Rr=B+z-?)(P;CE^EY literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/fc_post_settle_probe_skips_zero_outbound_limit_channel b/fuzz/test_cases/chanmon_consistency/fc_post_settle_probe_skips_zero_outbound_limit_channel new file mode 100644 index 00000000000..7f191fe194b --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_post_settle_probe_skips_zero_outbound_limit_channel @@ -0,0 +1 @@ +ÿÿºúÿÿÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_post_settle_probe_uses_advertised_sendable_range b/fuzz/test_cases/chanmon_consistency/fc_post_settle_probe_uses_advertised_sendable_range new file mode 100644 index 00000000000..0d5352b88ae --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_post_settle_probe_uses_advertised_sendable_range @@ -0,0 +1,2 @@ + +ÿ´búÿÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_rapid_fire b/fuzz/test_cases/chanmon_consistency/fc_rapid_fire new file mode 100644 index 0000000000000000000000000000000000000000..cf6df985d74d27e4fc3f6b1de87cb6f6a1d54b69 GIT binary patch literal 12 UcmZQDxL|bg#?4!I?%eqg04jk9A^-pY literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/fc_rapid_fire_keyed_anchors b/fuzz/test_cases/chanmon_consistency/fc_rapid_fire_keyed_anchors new file mode 100644 index 00000000000..01035d5c146 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_rapid_fire_keyed_anchors @@ -0,0 +1 @@ +0Ð2ÑØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_rapid_fire_zero_fee_commitments b/fuzz/test_cases/chanmon_consistency/fc_rapid_fire_zero_fee_commitments new file mode 100644 index 00000000000..a5d2a725f74 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_rapid_fire_zero_fee_commitments @@ -0,0 +1 @@ +0Ð2ÑØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_reconnect b/fuzz/test_cases/chanmon_consistency/fc_reconnect new file mode 100644 index 0000000000000000000000000000000000000000..d0a2f5dc15227c19183b0de7b4b7cfe8b0d099f6 GIT binary patch literal 13 VcmZR$z{4jXapUH#J9qB<2LL1}2QdHu literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/fc_reconnect_broadcast_announcements b/fuzz/test_cases/chanmon_consistency/fc_reconnect_broadcast_announcements new file mode 100644 index 00000000000..9547b711ba8 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_reconnect_broadcast_announcements @@ -0,0 +1 @@ +µ¸ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_reconnect_keyed_anchors b/fuzz/test_cases/chanmon_consistency/fc_reconnect_keyed_anchors new file mode 100644 index 00000000000..77ac690a59a --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_reconnect_keyed_anchors @@ -0,0 +1 @@ +Ð ØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_reconnect_zero_fee_commitments b/fuzz/test_cases/chanmon_consistency/fc_reconnect_zero_fee_commitments new file mode 100644 index 00000000000..a76076c83c3 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_reconnect_zero_fee_commitments @@ -0,0 +1 @@ +Ð ØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_repeated_same_channel b/fuzz/test_cases/chanmon_consistency/fc_repeated_same_channel new file mode 100644 index 0000000000000000000000000000000000000000..2cecd41dd26ead926c526969e139f66f8969e77b GIT binary patch literal 11 TcmZR$aN)v@o44-Vx$_?YE%peH literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/fc_repeated_same_channel_keyed_anchors b/fuzz/test_cases/chanmon_consistency/fc_repeated_same_channel_keyed_anchors new file mode 100644 index 00000000000..9d1ae851c44 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_repeated_same_channel_keyed_anchors @@ -0,0 +1 @@ +ÐÐÐØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_repeated_same_channel_zero_fee_commitments b/fuzz/test_cases/chanmon_consistency/fc_repeated_same_channel_zero_fee_commitments new file mode 100644 index 00000000000..6fde55ccdb7 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_repeated_same_channel_zero_fee_commitments @@ -0,0 +1 @@ +ÐÐÐØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_restart_claimed_payment_stale_monitor_replay b/fuzz/test_cases/chanmon_consistency/fc_restart_claimed_payment_stale_monitor_replay new file mode 100644 index 00000000000..9af74b8d826 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_restart_claimed_payment_stale_monitor_replay @@ -0,0 +1 @@ +4Â4аÜaÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_restart_in_progress_chain_sync_replay b/fuzz/test_cases/chanmon_consistency/fc_restart_in_progress_chain_sync_replay new file mode 100644 index 00000000000..38181bf293f --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/fc_restart_in_progress_chain_sync_replay @@ -0,0 +1 @@ +14Â4аÓÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/fc_restart_mid_resolve b/fuzz/test_cases/chanmon_consistency/fc_restart_mid_resolve new file mode 100644 index 0000000000000000000000000000000000000000..0517320bc3380cd876cedb26e79a836721db66e8 GIT binary patch literal 10 ScmZR$aO38!I~(rY`40dkd literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/force_close_basic_async b/fuzz/test_cases/chanmon_consistency/force_close_basic_async new file mode 100644 index 00000000000..086ce5b53fe --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/force_close_basic_async @@ -0,0 +1 @@ +ÐØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/force_close_basic_async_keyed_anchors b/fuzz/test_cases/chanmon_consistency/force_close_basic_async_keyed_anchors new file mode 100644 index 00000000000..55d8227b650 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/force_close_basic_async_keyed_anchors @@ -0,0 +1 @@ +ÐØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/force_close_basic_async_zero_fee_commitments b/fuzz/test_cases/chanmon_consistency/force_close_basic_async_zero_fee_commitments new file mode 100644 index 00000000000..4f375bcbcc8 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/force_close_basic_async_zero_fee_commitments @@ -0,0 +1 @@ +ÐØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/force_close_basic_keyed_anchors b/fuzz/test_cases/chanmon_consistency/force_close_basic_keyed_anchors new file mode 100644 index 00000000000..87788e516d1 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/force_close_basic_keyed_anchors @@ -0,0 +1 @@ +ÐØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/force_close_basic_zero_fee_commitments b/fuzz/test_cases/chanmon_consistency/force_close_basic_zero_fee_commitments new file mode 100644 index 00000000000..686c55e6e8d --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/force_close_basic_zero_fee_commitments @@ -0,0 +1 @@ +ÐØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/force_close_both_directions b/fuzz/test_cases/chanmon_consistency/force_close_both_directions new file mode 100644 index 0000000000000000000000000000000000000000..c55d73896f8652ff99bccefb8c203b6aca911c2a GIT binary patch literal 10 ScmZR$aPh{?TX*i<`40dk-3Pz` literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/force_close_both_directions_async b/fuzz/test_cases/chanmon_consistency/force_close_both_directions_async new file mode 100644 index 00000000000..4937e12b5e2 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/force_close_both_directions_async @@ -0,0 +1 @@ +ÐÑØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/force_close_both_directions_async_keyed_anchors b/fuzz/test_cases/chanmon_consistency/force_close_both_directions_async_keyed_anchors new file mode 100644 index 00000000000..868c75adb90 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/force_close_both_directions_async_keyed_anchors @@ -0,0 +1 @@ +ÐÑØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/force_close_both_directions_async_zero_fee_commitments b/fuzz/test_cases/chanmon_consistency/force_close_both_directions_async_zero_fee_commitments new file mode 100644 index 00000000000..0f3c204b38e --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/force_close_both_directions_async_zero_fee_commitments @@ -0,0 +1 @@ +ÐÑØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/force_close_both_directions_keyed_anchors b/fuzz/test_cases/chanmon_consistency/force_close_both_directions_keyed_anchors new file mode 100644 index 00000000000..f5fd80c55fb --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/force_close_both_directions_keyed_anchors @@ -0,0 +1 @@ +ÐÑØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/force_close_both_directions_zero_fee_commitments b/fuzz/test_cases/chanmon_consistency/force_close_both_directions_zero_fee_commitments new file mode 100644 index 00000000000..d22536577df --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/force_close_both_directions_zero_fee_commitments @@ -0,0 +1 @@ +ÐÑØÙÚÜÜÜÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/force_close_htlc_needs_height b/fuzz/test_cases/chanmon_consistency/force_close_htlc_needs_height new file mode 100644 index 0000000000000000000000000000000000000000..9936534a4758c909218399db60fdfb75b12faeaa GIT binary patch literal 11 ScmZQD5RecMle=)^<}CmX83XtL literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/force_close_htlc_needs_height_async b/fuzz/test_cases/chanmon_consistency/force_close_htlc_needs_height_async new file mode 100644 index 00000000000..11a097db35c --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/force_close_htlc_needs_height_async @@ -0,0 +1 @@ +0  ÐØÙÚ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/force_close_htlc_needs_height_async_keyed_anchors b/fuzz/test_cases/chanmon_consistency/force_close_htlc_needs_height_async_keyed_anchors new file mode 100644 index 00000000000..846b036955d --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/force_close_htlc_needs_height_async_keyed_anchors @@ -0,0 +1 @@ +0  ÐØÙÚ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/force_close_htlc_needs_height_async_zero_fee_commitments b/fuzz/test_cases/chanmon_consistency/force_close_htlc_needs_height_async_zero_fee_commitments new file mode 100644 index 00000000000..2ba86884cb3 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/force_close_htlc_needs_height_async_zero_fee_commitments @@ -0,0 +1 @@ +0  ÐØÙÚ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/force_close_htlc_needs_height_keyed_anchors b/fuzz/test_cases/chanmon_consistency/force_close_htlc_needs_height_keyed_anchors new file mode 100644 index 00000000000..7e59be8feaa --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/force_close_htlc_needs_height_keyed_anchors @@ -0,0 +1 @@ +0ÐØÙÚ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/force_close_htlc_needs_height_zero_fee_commitments b/fuzz/test_cases/chanmon_consistency/force_close_htlc_needs_height_zero_fee_commitments new file mode 100644 index 00000000000..d852e18e121 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/force_close_htlc_needs_height_zero_fee_commitments @@ -0,0 +1 @@ +0ÐØÙÚ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/force_close_htlc_resolved b/fuzz/test_cases/chanmon_consistency/force_close_htlc_resolved new file mode 100644 index 0000000000000000000000000000000000000000..73b498f3c647ce882aa42003e39a1c5e1633509a GIT binary patch literal 11 ScmZQD5RecMle=)^-aP;fCj$t11JCh literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/force_close_three_node_preimage_async b/fuzz/test_cases/chanmon_consistency/force_close_three_node_preimage_async new file mode 100644 index 00000000000..f2961702e79 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/force_close_three_node_preimage_async @@ -0,0 +1,4 @@ +<   + ! + '' ! + Ðÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/force_close_three_node_preimage_async_keyed_anchors b/fuzz/test_cases/chanmon_consistency/force_close_three_node_preimage_async_keyed_anchors new file mode 100644 index 00000000000..babb7953068 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/force_close_three_node_preimage_async_keyed_anchors @@ -0,0 +1,4 @@ +<   + ! + '' ! + Ðÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/force_close_three_node_preimage_async_zero_fee_commitments b/fuzz/test_cases/chanmon_consistency/force_close_three_node_preimage_async_zero_fee_commitments new file mode 100644 index 00000000000..10fdc572a11 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/force_close_three_node_preimage_async_zero_fee_commitments @@ -0,0 +1,4 @@ +<   + ! + '' ! + Ðÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/force_close_three_node_preimage_keyed_anchors b/fuzz/test_cases/chanmon_consistency/force_close_three_node_preimage_keyed_anchors new file mode 100644 index 00000000000..d0d5126f551 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/force_close_three_node_preimage_keyed_anchors @@ -0,0 +1 @@ +<!''!Ðÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/force_close_three_node_preimage_zero_fee_commitments b/fuzz/test_cases/chanmon_consistency/force_close_three_node_preimage_zero_fee_commitments new file mode 100644 index 00000000000..50ba2ea9183 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/force_close_three_node_preimage_zero_fee_commitments @@ -0,0 +1 @@ +<!''!Ðÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/ldk_crash_channelmanager_19484 b/fuzz/test_cases/chanmon_consistency/ldk_crash_channelmanager_19484 new file mode 100644 index 00000000000..577f5f71cd0 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/ldk_crash_channelmanager_19484 @@ -0,0 +1 @@ +„pк \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/ldk_crash_channelmanager_9836 b/fuzz/test_cases/chanmon_consistency/ldk_crash_channelmanager_9836 new file mode 100644 index 0000000000000000000000000000000000000000..14487ba70cd7971f641f8961d0d9e4dd2362284f GIT binary patch literal 13 UcmZRu5tI~^msFHgSHJfk01M#*6#xJL literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/ldk_crash_channelmonitor_2727 b/fuzz/test_cases/chanmon_consistency/ldk_crash_channelmonitor_2727 new file mode 100644 index 00000000000..a1852414d9c --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/ldk_crash_channelmonitor_2727 @@ -0,0 +1 @@ +@ÿÿÿÿÿÿÿÿÿÿÿÿÿÜÞÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/ldk_crash_onchaintx_1025 b/fuzz/test_cases/chanmon_consistency/ldk_crash_onchaintx_1025 new file mode 100644 index 0000000000000000000000000000000000000000..982ac748a974a353683dce6ccbfd5bc4b1b935f3 GIT binary patch literal 14 VcmZRu5tO`e@Be>EMag?={{bmn25A5Q literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/ldk_crash_onchaintx_913 b/fuzz/test_cases/chanmon_consistency/ldk_crash_onchaintx_913 new file mode 100644 index 00000000000..6c32f6a71aa --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/ldk_crash_onchaintx_913 @@ -0,0 +1 @@ +ÒppqpÞÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/ldk_crash_signer_395 b/fuzz/test_cases/chanmon_consistency/ldk_crash_signer_395 new file mode 100644 index 00000000000..2fb6ec95740 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/ldk_crash_signer_395 @@ -0,0 +1 @@ +"qqqqqqqÿÿÿÿÿÿÞÞµÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/timeout-0103befb3dc5aa050668752668d04e85bd1fc14e b/fuzz/test_cases/chanmon_consistency/timeout-0103befb3dc5aa050668752668d04e85bd1fc14e new file mode 100644 index 0000000000000000000000000000000000000000..f5c015c80af9ac48a7b07e6750ceb9a37f6bf2b2 GIT binary patch literal 24 gcmZRu5R_!F5tI~^msFHYxTnVOf6d0N_x}F}06Yx{od5s; literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/timeout-05fc1bb98f2a3b29e826a4de636474de0b23c895 b/fuzz/test_cases/chanmon_consistency/timeout-05fc1bb98f2a3b29e826a4de636474de0b23c895 new file mode 100644 index 0000000000000000000000000000000000000000..e63b29d1bd588160c7f213a43693cce2daca74dd GIT binary patch literal 25 YcmdXK2aVHS}C$T2~(EtCl|D*q-v9$l8g%2YD literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/timeout-87b55c5b37383fe43420089fd3e8ccecbb034b44 b/fuzz/test_cases/chanmon_consistency/timeout-87b55c5b37383fe43420089fd3e8ccecbb034b44 new file mode 100644 index 00000000000..a9600d0cbf4 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/timeout-87b55c5b37383fe43420089fd3e8ccecbb034b44 @@ -0,0 +1 @@ +@: !<:: !' !''Ú Þÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/timeout-885f446335ae279baed408d42af8c398dfdb8c9b b/fuzz/test_cases/chanmon_consistency/timeout-885f446335ae279baed408d42af8c398dfdb8c9b new file mode 100644 index 00000000000..b9ee3e25f13 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/timeout-885f446335ae279baed408d42af8c398dfdb8c9b @@ -0,0 +1 @@ +88888£'Ðÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/timeout-8a81e4c066465a2975ef22625c0b91da6332a2c8 b/fuzz/test_cases/chanmon_consistency/timeout-8a81e4c066465a2975ef22625c0b91da6332a2c8 new file mode 100644 index 0000000000000000000000000000000000000000..735c8da14eb48e8954d925975ba2a408f04c127e GIT binary patch literal 24 fcmdm@7{j^QF#cm literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/timeout-8f2aebf3aeeb70d8edd39a886e30beb770f3b42b b/fuzz/test_cases/chanmon_consistency/timeout-8f2aebf3aeeb70d8edd39a886e30beb770f3b42b new file mode 100644 index 0000000000000000000000000000000000000000..d20f7e163f8421195cbda7574a6d90d16792e144 GIT binary patch literal 23 dcmZRu5tI~^{|^L`it3W;>KAU@ymjy1e*kHe3vmDd literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/timeout-91d97d9eea2bd59f746681ad822488262e832ff1 b/fuzz/test_cases/chanmon_consistency/timeout-91d97d9eea2bd59f746681ad822488262e832ff1 new file mode 100644 index 0000000000000000000000000000000000000000..77a897d89de4a5e40c5c3a1035890a603fc64c3b GIT binary patch literal 26 acmZRu5tI~^{|^U}ijo&^+`O}5#eV?7*$|BY literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/timeout-95a90908391d3398084b77eb11ff5c9d7fdde008 b/fuzz/test_cases/chanmon_consistency/timeout-95a90908391d3398084b77eb11ff5c9d7fdde008 new file mode 100644 index 0000000000000000000000000000000000000000..d85e723aaa1295d624ed77e1a22fcfb8a6042a74 GIT binary patch literal 24 gcmZRu5R_y%At)&*FR3V*a8Hfl|C)_k@BRM|07h{MDF6Tf literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/timeout-a31cdfc423211489c841a6ddd067f9e6cf5bed4b b/fuzz/test_cases/chanmon_consistency/timeout-a31cdfc423211489c841a6ddd067f9e6cf5bed4b new file mode 100644 index 0000000000000000000000000000000000000000..d0e80d1f6497081170669bf2b7504fd06ab7f028 GIT binary patch literal 21 dcmZSJ6SR_-RFqU#zxD5)*Z=LCkM8ex2LMDL2;Bex literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/timeout-b1c4840ea1279dd8d6080d79373ae55bbcad3061 b/fuzz/test_cases/chanmon_consistency/timeout-b1c4840ea1279dd8d6080d79373ae55bbcad3061 new file mode 100644 index 0000000000000000000000000000000000000000..50a625cb0ed0e5bea991996d2115a46175e284f4 GIT binary patch literal 25 ccmZRu`TzgFiJ+vQyriO}`Yn-r|Jfh_0CJB9Hvj+t literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/timeout-b6ef84eec94d70bbc385c98c4ab0bac77da00a2f b/fuzz/test_cases/chanmon_consistency/timeout-b6ef84eec94d70bbc385c98c4ab0bac77da00a2f new file mode 100644 index 00000000000..59319574a0e --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/timeout-b6ef84eec94d70bbc385c98c4ab0bac77da00a2f @@ -0,0 +1 @@ +<: !''<:Ý !''ÚÞÞÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/timeout-bae8693182b102dfebab143a0f48992dad76245d b/fuzz/test_cases/chanmon_consistency/timeout-bae8693182b102dfebab143a0f48992dad76245d new file mode 100644 index 0000000000000000000000000000000000000000..db79f209c2272dcb30fa1f220e3371a6dde9c440 GIT binary patch literal 23 dcmZRu5tI~^{|^L`it3W;>KAUP-@142KLBPL3cmmV literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/timeout-bcab049322729e275e3bbdacebc633495da7643f b/fuzz/test_cases/chanmon_consistency/timeout-bcab049322729e275e3bbdacebc633495da7643f new file mode 100644 index 0000000000000000000000000000000000000000..d774ccb3b2d39456f76b9e4ffa3e96ed2bd46038 GIT binary patch literal 26 icmZRu5tI}Zw33%plvG#0bx+Z&;QyBGk{55>`wswBRtg^g literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/timeout-d5afdff02a253c9f2fbce95cbaf730eb210128fa b/fuzz/test_cases/chanmon_consistency/timeout-d5afdff02a253c9f2fbce95cbaf730eb210128fa new file mode 100644 index 00000000000..7b8cb7b07ab --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/timeout-d5afdff02a253c9f2fbce95cbaf730eb210128fa @@ -0,0 +1 @@ +888888'Ðÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/timeout-d6494f068fb2b2d31f1ac8627752692b3c8b7d2f b/fuzz/test_cases/chanmon_consistency/timeout-d6494f068fb2b2d31f1ac8627752692b3c8b7d2f new file mode 100644 index 00000000000..ee690802eff --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/timeout-d6494f068fb2b2d31f1ac8627752692b3c8b7d2f @@ -0,0 +1 @@ +<<!!RÑØÙÚÞÞÿ \ No newline at end of file diff --git a/fuzz/test_cases/chanmon_consistency/timeout-edd3f8168217501dd93f3c24d09c2c095cdf7784 b/fuzz/test_cases/chanmon_consistency/timeout-edd3f8168217501dd93f3c24d09c2c095cdf7784 new file mode 100644 index 0000000000000000000000000000000000000000..9d78147299487bab8c60a29b319b80926ca57ae8 GIT binary patch literal 14 VcmZRu5tI~^msFHgza?_-KL8LR1Ka=r literal 0 HcmV?d00001 diff --git a/fuzz/test_cases/chanmon_consistency/timeout-fcbcc131184e33d5b000820b0972f6197b0801d2 b/fuzz/test_cases/chanmon_consistency/timeout-fcbcc131184e33d5b000820b0972f6197b0801d2 new file mode 100644 index 00000000000..df65f08fe80 --- /dev/null +++ b/fuzz/test_cases/chanmon_consistency/timeout-fcbcc131184e33d5b000820b0972f6197b0801d2 @@ -0,0 +1 @@ +<: !''<: !''ÚÞÞÿ \ No newline at end of file From 71dd1534d141b0041dd0ab3db9896043a816a46a Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 21 Apr 2026 11:35:39 +0200 Subject: [PATCH 17/29] lightning: retain matured claim outpoints Remember claim outpoints after their spending transactions have reached the anti-reorg delay, even after normal claim tracking is cleaned up. This prevents later claims for newly learned preimages from attempting to spend outputs that were already irrevocably claimed on chain. --- lightning/src/chain/onchaintx.rs | 83 ++++++++++++++----- lightning/src/ln/chanmon_update_fail_tests.rs | 4 +- 2 files changed, 63 insertions(+), 24 deletions(-) diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index 3eb6d64f3a2..37c6a06694a 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -273,6 +273,11 @@ pub struct OnchainTxHandler { #[cfg(not(any(test, feature = "_test_utils")))] claimable_outpoints: HashMap, + // Tracks outpoints whose claim tx has already reached [`ANTI_REORG_DELAY`] confirmations. + // Later claim requests for these outputs must be ignored, even if they arrive from newly + // learned preimages after the original claim tracking has been cleaned up. + irrevocably_spent_outpoints: HashSet, + #[cfg(any(test, feature = "_test_utils"))] pub(crate) locktimed_packages: BTreeMap>, #[cfg(not(any(test, feature = "_test_utils")))] @@ -297,6 +302,7 @@ impl PartialEq for OnchainTxHandler OnchainTxHandler { entry.write(writer)?; } - write_tlv_fields!(writer, {}); + let irrevocably_spent_outpoints = Some(self.irrevocably_spent_outpoints.clone()); + write_tlv_fields!(writer, { + (0, irrevocably_spent_outpoints, option), + }); Ok(()) } @@ -441,7 +450,10 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP } } - read_tlv_fields!(reader, {}); + let mut irrevocably_spent_outpoints = None; + read_tlv_fields!(reader, { + (0, irrevocably_spent_outpoints, option), + }); // `ChannelMonitor`s already track the `channel_id` and `counterparty_node_id`, however, due // to the deserialization order there we can't make use of `ReadableArgs` to hand them in @@ -465,6 +477,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP signer, channel_transaction_parameters: channel_parameters, claimable_outpoints, + irrevocably_spent_outpoints: irrevocably_spent_outpoints.unwrap_or_else(new_hash_set), locktimed_packages, pending_claim_requests, onchain_events_awaiting_threshold_conf, @@ -493,6 +506,7 @@ impl OnchainTxHandler { channel_transaction_parameters: channel_parameters, pending_claim_requests: new_hash_map(), claimable_outpoints: new_hash_map(), + irrevocably_spent_outpoints: new_hash_set(), locktimed_packages: BTreeMap::new(), onchain_events_awaiting_threshold_conf: Vec::new(), pending_claim_events: Vec::new(), @@ -806,6 +820,14 @@ impl OnchainTxHandler { 1, "Claims passed to `update_claims_view_from_requests` should not be aggregated" ); + if req.outpoints() + .iter() + .any(|outpoint| self.irrevocably_spent_outpoints.contains(*outpoint)) + { + log_info!(logger, "Ignoring claim for outpoint {}:{}, it was already irrevocably spent by a confirmed claim transaction", + req.outpoints()[0].txid, req.outpoints()[0].vout); + false + } else { let mut all_outpoints_claiming = true; for outpoint in req.outpoints() { if self.claimable_outpoints.get(outpoint).is_none() { @@ -817,9 +839,16 @@ impl OnchainTxHandler { req.outpoints()[0].txid, req.outpoints()[0].vout); false } else { - let timelocked_equivalent_package = self.locktimed_packages.iter().map(|v| v.1.iter()).flatten() - .find(|locked_package| locked_package.outpoints() == req.outpoints()); - if let Some(package) = timelocked_equivalent_package { + let timelocked_covering_package = self + .locktimed_packages + .values() + .flat_map(|packages| packages.iter()) + .find(|locked_package| { + req.outpoints().iter().all(|outpoint| { + locked_package.outpoints().contains(outpoint) + }) + }); + if let Some(package) = timelocked_covering_package { log_info!(logger, "Ignoring second claim for outpoint {}:{}, we already have one which we're waiting on a timelock at {} for.", req.outpoints()[0].txid, req.outpoints()[0].vout, package.package_locktime(cur_height)); false @@ -827,6 +856,7 @@ impl OnchainTxHandler { true } } + } }); // Then try to maximally aggregate `requests`. @@ -895,26 +925,31 @@ impl OnchainTxHandler { } ClaimId(tx.0.compute_txid().to_byte_array()) }, - OnchainClaim::Event(claim_event) => { - log_info!(logger, "Yielding onchain event to spend inputs {:?}", req.outpoints()); - let claim_id = match claim_event { + OnchainClaim::Event(claim_event) => { + log_info!(logger, "Yielding onchain event to spend inputs {:?}", req.outpoints()); + let claim_id = match claim_event { ClaimEvent::BumpCommitment { ref commitment_tx, .. } => // For commitment claims, we can just use their txid as it should // already be unique. ClaimId(commitment_tx.compute_txid().to_byte_array()), - ClaimEvent::BumpHTLC { ref htlcs, .. } => { - // For HTLC claims, commit to the entire set of HTLC outputs to - // claim, which will always be unique per request. Once a claim ID - // is generated, it is assigned and remains unchanged, even if the - // underlying set of HTLCs changes. - ClaimId::from_htlcs(htlcs) - }, - }; - debug_assert!(self.pending_claim_requests.get(&claim_id).is_none()); - debug_assert_eq!(self.pending_claim_events.iter().filter(|entry| entry.0 == claim_id).count(), 0); - self.pending_claim_events.push((claim_id, claim_event)); - claim_id - }, + ClaimEvent::BumpHTLC { ref htlcs, .. } => { + // For HTLC claims, commit to the entire set of HTLC outputs to + // claim, which will always be unique per request. Once a claim ID + // is generated, it is assigned and remains unchanged, even if the + // underlying set of HTLCs changes. + ClaimId::from_htlcs(htlcs) + }, + }; + debug_assert!(self.pending_claim_requests.get(&claim_id).is_none()); + #[cfg(debug_assertions)] { + let num_existing = self.pending_claim_events.iter() + .filter(|entry| entry.0 == claim_id).count(); + assert!(num_existing == 0 || num_existing == 1); + } + self.pending_claim_events.retain(|entry| entry.0 != claim_id); + self.pending_claim_events.push((claim_id, claim_event)); + claim_id + }, }; // Because fuzzing can cause hash collisions, we can end up with conflicting claim // ids here, so we only assert when not fuzzing. @@ -1064,6 +1099,7 @@ impl OnchainTxHandler { for outpoint in request.outpoints() { log_debug!(logger, "Removing claim tracking for {} due to maturation of claim package {}.", outpoint, log_bytes!(claim_id.0)); + self.irrevocably_spent_outpoints.insert(*outpoint); self.claimable_outpoints.remove(outpoint); } #[cfg(debug_assertions)] { @@ -1077,7 +1113,10 @@ impl OnchainTxHandler { OnchainEvent::ContentiousOutpoint { package } => { log_debug!(logger, "Removing claim tracking due to maturation of claim tx for outpoints:"); log_debug!(logger, " {:?}", package.outpoints()); - self.claimable_outpoints.remove(package.outpoints()[0]); + for outpoint in package.outpoints() { + self.irrevocably_spent_outpoints.insert(*outpoint); + self.claimable_outpoints.remove(outpoint); + } } } } else { diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 9633800db08..b822020b950 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -58,7 +58,7 @@ fn test_monitor_and_persister_update_fail() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); let node_b_id = nodes[1].node.get_our_node_id(); @@ -4609,7 +4609,7 @@ fn test_claim_to_closed_channel_blocks_claimed_event() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let legacy_cfg = test_legacy_channel_config(); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg), None]); - let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); let node_b_id = nodes[1].node.get_our_node_id(); From c6eeef227f00095b43b98495a92a8eb909a38cef Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 21 Apr 2026 11:35:48 +0200 Subject: [PATCH 18/29] lightning: relax fuzz signing expectations Allow fuzz builds to use cheaper ECDSA signatures and skip assertions that rely on production signature sizes. The fuzz signer intentionally trades realistic DER lengths for speed, so weight lower-bound checks must not assume low-R signatures there. --- lightning/src/crypto/utils.rs | 16 +++++++---- lightning/src/events/bump_transaction/mod.rs | 28 ++++++++++++-------- lightning/src/sign/mod.rs | 2 ++ 3 files changed, 30 insertions(+), 16 deletions(-) diff --git a/lightning/src/crypto/utils.rs b/lightning/src/crypto/utils.rs index 88911b0baf8..8b2737fa8e9 100644 --- a/lightning/src/crypto/utils.rs +++ b/lightning/src/crypto/utils.rs @@ -67,7 +67,7 @@ pub fn hkdf_extract_expand_7x( #[inline] pub fn sign(ctx: &Secp256k1, msg: &Message, sk: &SecretKey) -> Signature { #[cfg(feature = "grind_signatures")] - let sig = ctx.sign_ecdsa_low_r(msg, sk); + let sig = if cfg!(fuzzing) { ctx.sign_ecdsa(msg, sk) } else { ctx.sign_ecdsa_low_r(msg, sk) }; #[cfg(not(feature = "grind_signatures"))] let sig = ctx.sign_ecdsa(msg, sk); sig @@ -79,10 +79,16 @@ pub fn sign_with_aux_rand( ctx: &Secp256k1, msg: &Message, sk: &SecretKey, entropy_source: &ES, ) -> Signature { #[cfg(feature = "grind_signatures")] - let sig = loop { - let sig = ctx.sign_ecdsa_with_noncedata(msg, sk, &entropy_source.get_secure_random_bytes()); - if sig.serialize_compact()[0] < 0x80 { - break sig; + let sig = { + if cfg!(fuzzing) { + return sign(ctx, msg, sk); + } + loop { + let sig = + ctx.sign_ecdsa_with_noncedata(msg, sk, &entropy_source.get_secure_random_bytes()); + if sig.serialize_compact()[0] < 0x80 { + break sig; + } } }; #[cfg(all(not(feature = "grind_signatures"), not(ldk_test_vectors)))] diff --git a/lightning/src/events/bump_transaction/mod.rs b/lightning/src/events/bump_transaction/mod.rs index 6a5e9948653..22c70fd4d61 100644 --- a/lightning/src/events/bump_transaction/mod.rs +++ b/lightning/src/events/bump_transaction/mod.rs @@ -480,11 +480,15 @@ impl= signed_tx_weight); + // When fuzzing, signatures are trivially small so the actual weight can be + // significantly less than estimated. Skip the lower-bound check. + #[cfg(not(fuzzing))] assert!(expected_signed_tx_weight * 99 / 100 <= signed_tx_weight); let expected_package_fee = Amount::from_sat(fee_for_weight( @@ -629,10 +633,10 @@ impl(); - #[cfg(debug_assertions)] + #[cfg(all(debug_assertions, not(fuzzing)))] let must_spend_amount = must_spend.iter().map(|input| input.previous_utxo.value.to_sat()).sum::(); @@ -663,13 +667,13 @@ impl= signed_tx_weight); + // When fuzzing, signatures are trivially small so the actual weight can be + // significantly less than estimated. Skip the lower-bound check. assert!(expected_signed_tx_weight * 98 / 100 <= signed_tx_weight); let expected_signed_tx_fee = diff --git a/lightning/src/sign/mod.rs b/lightning/src/sign/mod.rs index 3237149338b..272a1198d34 100644 --- a/lightning/src/sign/mod.rs +++ b/lightning/src/sign/mod.rs @@ -75,6 +75,8 @@ pub mod tx_builder; pub(crate) const COMPRESSED_PUBLIC_KEY_SIZE: usize = bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE; +// Standard low-S ECDSA signatures fit in the secp256k1 DER bound; the appended sighash byte +// replaces the extra DER padding byte that a high-S signature could require. pub(crate) const MAX_STANDARD_SIGNATURE_SIZE: usize = bitcoin::secp256k1::constants::MAX_SIGNATURE_SIZE; From 404c07930726ab1a523caab949b5afbb8564ea65 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 21 Apr 2026 12:09:07 +0200 Subject: [PATCH 19/29] fuzz: persist chanmon_consistency monitor state Replace the test persister with harness-owned monitor persistence that tracks completed and in-progress updates. This lets restarts select persisted or pending monitor snapshots and exercise reload behavior across asynchronous monitor update states. --- fuzz/src/chanmon_consistency.rs | 350 ++++++++++++++++++-------------- 1 file changed, 193 insertions(+), 157 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 8ca544294a7..9c6829da5f3 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -41,8 +41,7 @@ use lightning::chain; use lightning::chain::chaininterface::{ BroadcasterInterface, ConfirmationTarget, FeeEstimator, TransactionType, }; -use lightning::chain::channelmonitor::{ChannelMonitor, MonitorEvent}; -use lightning::chain::transaction::OutPoint; +use lightning::chain::channelmonitor::ChannelMonitor; use lightning::chain::{ chainmonitor, channelmonitor, BlockLocator, ChannelMonitorUpdateStatus, Confirm, Watch, }; @@ -87,7 +86,6 @@ use lightning::util::wallet_utils::{WalletSourceSync, WalletSync}; use lightning_invoice::RawBolt11Invoice; use crate::utils::test_logger::{self, Output}; -use crate::utils::test_persister::TestPersister; use bitcoin::secp256k1::ecdh::SharedSecret; use bitcoin::secp256k1::ecdsa::{RecoverableSignature, Signature}; @@ -104,6 +102,7 @@ use std::sync::atomic; use std::sync::{Arc, Mutex}; const MAX_FEE: u32 = 10_000; + struct FuzzEstimator { ret_val: atomic::AtomicU32, } @@ -293,6 +292,12 @@ impl Writer for VecWriter { } } +fn serialize_monitor(monitor: &ChannelMonitor) -> Vec { + let mut ser = VecWriter(Vec::new()); + monitor.write(&mut ser).unwrap(); + ser.0 +} + /// The LDK API requires that any time we tell it we're done persisting a `ChannelMonitor[Update]` /// we never pass it in as the "latest" `ChannelMonitor` on startup. However, we can pass /// out-of-date monitors as long as we never told LDK we finished persisting them, which we do by @@ -314,120 +319,154 @@ struct LatestMonitorState { pending_monitors: Vec<(u64, Vec)>, } +struct HarnessPersister { + pub update_ret: Mutex, + pub latest_monitors: Arc>>, +} +impl HarnessPersister { + fn track_monitor_update( + &self, channel_id: ChannelId, monitor_id: u64, serialized_monitor: Vec, + status: chain::ChannelMonitorUpdateStatus, + ) { + let mut latest_monitors = self.latest_monitors.lock().unwrap(); + if let Some(state) = latest_monitors.get_mut(&channel_id) { + match status { + chain::ChannelMonitorUpdateStatus::Completed => { + // Completing update N makes any older in-flight monitor blobs unusable on + // restart. A newer ChannelManager serialization will no longer advertise those + // earlier updates as blocked, so reloading them would violate the Watch API. + state.pending_monitors.retain(|(id, _)| *id > monitor_id); + state.persisted_monitor_id = monitor_id; + state.persisted_monitor = serialized_monitor; + }, + chain::ChannelMonitorUpdateStatus::InProgress => { + if let Some((_, pending_monitor)) = + state.pending_monitors.iter_mut().find(|(id, _)| *id == monitor_id) + { + *pending_monitor = serialized_monitor; + } else { + state.pending_monitors.push((monitor_id, serialized_monitor)); + state.pending_monitors.sort_by_key(|(id, _)| *id); + } + }, + chain::ChannelMonitorUpdateStatus::UnrecoverableError => {}, + } + } else { + let state = match status { + chain::ChannelMonitorUpdateStatus::Completed => LatestMonitorState { + persisted_monitor_id: monitor_id, + persisted_monitor: serialized_monitor, + pending_monitors: Vec::new(), + }, + chain::ChannelMonitorUpdateStatus::InProgress => LatestMonitorState { + persisted_monitor_id: monitor_id, + persisted_monitor: Vec::new(), + pending_monitors: vec![(monitor_id, serialized_monitor)], + }, + chain::ChannelMonitorUpdateStatus::UnrecoverableError => return, + }; + assert!( + latest_monitors.insert(channel_id, state).is_none(), + "Already had monitor state pre-persist" + ); + } + } + + fn mark_update_completed( + &self, channel_id: ChannelId, monitor_id: u64, serialized_monitor: Vec, + ) { + if let Some(state) = self.latest_monitors.lock().unwrap().get_mut(&channel_id) { + // Once LDK acknowledges update N as completed, any older pending monitor blob is fully + // superseded and must not be offered back on restart. + state.pending_monitors.retain(|(id, _)| *id > monitor_id); + if monitor_id >= state.persisted_monitor_id { + state.persisted_monitor_id = monitor_id; + state.persisted_monitor = serialized_monitor; + } + } + } +} +impl chainmonitor::Persist for HarnessPersister { + fn persist_new_channel( + &self, _monitor_name: lightning::util::persist::MonitorName, + data: &channelmonitor::ChannelMonitor, + ) -> chain::ChannelMonitorUpdateStatus { + let status = self.update_ret.lock().unwrap().clone(); + let monitor_id = data.get_latest_update_id(); + let serialized_monitor = serialize_monitor(data); + self.track_monitor_update(data.channel_id(), monitor_id, serialized_monitor, status); + status + } + + fn update_persisted_channel( + &self, _monitor_name: lightning::util::persist::MonitorName, + update: Option<&channelmonitor::ChannelMonitorUpdate>, + data: &channelmonitor::ChannelMonitor, + ) -> chain::ChannelMonitorUpdateStatus { + let status = self.update_ret.lock().unwrap().clone(); + let monitor_id = update.map_or_else(|| data.get_latest_update_id(), |upd| upd.update_id); + let serialized_monitor = serialize_monitor(data); + self.track_monitor_update(data.channel_id(), monitor_id, serialized_monitor, status); + status + } + + fn archive_persisted_channel(&self, _monitor_name: lightning::util::persist::MonitorName) {} +} + +type InnerChainMonitor = chainmonitor::ChainMonitor< + TestChannelSigner, + Arc, + Arc, + Arc, + Arc, + Arc, + Arc, +>; + struct TestChainMonitor { - pub logger: Arc, - pub keys: Arc, - pub persister: Arc, - pub chain_monitor: Arc< - chainmonitor::ChainMonitor< - TestChannelSigner, - Arc, - Arc, - Arc, - Arc, - Arc, - Arc, - >, - >, - pub latest_monitors: Mutex>, + pub persister: Arc, + pub chain_monitor: Arc, + pub latest_monitors: Arc>>, } + impl TestChainMonitor { pub fn new( broadcaster: Arc, logger: Arc, feeest: Arc, - persister: Arc, keys: Arc, + initial_update_ret: ChannelMonitorUpdateStatus, keys: Arc, ) -> Self { + let latest_monitors = Arc::new(Mutex::new(new_hash_map())); + let persister = Arc::new(HarnessPersister { + update_ret: Mutex::new(initial_update_ret), + latest_monitors: Arc::clone(&latest_monitors), + }); Self { chain_monitor: Arc::new(chainmonitor::ChainMonitor::new( None, broadcaster, - logger.clone(), + logger, feeest, Arc::clone(&persister), Arc::clone(&keys), keys.get_peer_storage_key(), false, )), - logger, - keys, persister, - latest_monitors: Mutex::new(new_hash_map()), + latest_monitors, } } -} -impl chain::Watch for TestChainMonitor { - fn watch_channel( - &self, channel_id: ChannelId, monitor: channelmonitor::ChannelMonitor, - ) -> Result { - let mut ser = VecWriter(Vec::new()); - monitor.write(&mut ser).unwrap(); - let monitor_id = monitor.get_latest_update_id(); - let res = self.chain_monitor.watch_channel(channel_id, monitor); - let state = match res { - Ok(chain::ChannelMonitorUpdateStatus::Completed) => LatestMonitorState { - persisted_monitor_id: monitor_id, - persisted_monitor: ser.0, - pending_monitors: Vec::new(), - }, - Ok(chain::ChannelMonitorUpdateStatus::InProgress) => LatestMonitorState { - persisted_monitor_id: monitor_id, - persisted_monitor: Vec::new(), - pending_monitors: vec![(monitor_id, ser.0)], - }, - Ok(chain::ChannelMonitorUpdateStatus::UnrecoverableError) => panic!(), - Err(()) => panic!(), - }; - if self.latest_monitors.lock().unwrap().insert(channel_id, state).is_some() { - panic!("Already had monitor pre-watch_channel"); - } - res - } - fn update_channel( - &self, channel_id: ChannelId, update: &channelmonitor::ChannelMonitorUpdate, - ) -> chain::ChannelMonitorUpdateStatus { - let mut map_lock = self.latest_monitors.lock().unwrap(); - let map_entry = map_lock.get_mut(&channel_id).expect("Didn't have monitor on update call"); - let latest_monitor_data = map_entry - .pending_monitors - .last() - .as_ref() - .map(|(_, data)| data) - .unwrap_or(&map_entry.persisted_monitor); - let deserialized_monitor = - <(BlockLocator, channelmonitor::ChannelMonitor)>::read( - &mut &latest_monitor_data[..], - (&*self.keys, &*self.keys), - ) - .unwrap() - .1; - deserialized_monitor - .update_monitor( - update, - &&TestBroadcaster { txn_broadcasted: RefCell::new(Vec::new()) }, - &&FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }, - &self.logger, - ) - .unwrap(); - let mut ser = VecWriter(Vec::new()); - deserialized_monitor.write(&mut ser).unwrap(); - let res = self.chain_monitor.update_channel(channel_id, update); - match res { - chain::ChannelMonitorUpdateStatus::Completed => { - map_entry.persisted_monitor_id = update.update_id; - map_entry.persisted_monitor = ser.0; - }, - chain::ChannelMonitorUpdateStatus::InProgress => { - map_entry.pending_monitors.push((update.update_id, ser.0)); - }, - chain::ChannelMonitorUpdateStatus::UnrecoverableError => panic!(), - } - res + fn mark_update_completed( + &self, channel_id: ChannelId, monitor_id: u64, serialized_monitor: Vec, + ) { + self.persister.mark_update_completed(channel_id, monitor_id, serialized_monitor); } +} - fn release_pending_monitor_events( - &self, - ) -> Vec<(OutPoint, ChannelId, Vec, PublicKey)> { - return self.chain_monitor.release_pending_monitor_events(); +impl std::ops::Deref for TestChainMonitor { + type Target = InnerChainMonitor; + + fn deref(&self) -> &Self::Target { + self.chain_monitor.as_ref() } } @@ -777,7 +816,6 @@ fn send_mpp_payment( if num_paths == 0 { return false; } - let amt_per_path = amt / num_paths as u64; let mut paths = Vec::with_capacity(num_paths); @@ -991,7 +1029,7 @@ impl<'a> HarnessNode<'a> { Arc::clone(broadcaster), logger_for_monitor, Arc::clone(fee_estimator), - Arc::new(TestPersister { update_ret: Mutex::new(persistence_style) }), + persistence_style, Arc::clone(keys_manager), )) } @@ -1060,64 +1098,73 @@ impl<'a> HarnessNode<'a> { self.persistence_style = style; } - fn complete_all_monitor_updates(&self, chan_id: &ChannelId) { - if let Some(state) = self.monitor.latest_monitors.lock().unwrap().get_mut(chan_id) { - assert!( - state.pending_monitors.windows(2).all(|pair| pair[0].0 < pair[1].0), - "updates should be sorted by id" - ); - for (id, data) in state.pending_monitors.drain(..) { - self.monitor.chain_monitor.channel_monitor_updated(*chan_id, id).unwrap(); - if id > state.persisted_monitor_id { - state.persisted_monitor_id = id; - state.persisted_monitor = data; - } + fn complete_all_monitor_updates(&self, chan_id: &ChannelId) -> bool { + let completed_updates = { + let mut latest_monitors = self.monitor.latest_monitors.lock().unwrap(); + if let Some(state) = latest_monitors.get_mut(chan_id) { + assert!( + state.pending_monitors.windows(2).all(|pair| pair[0].0 < pair[1].0), + "updates should be sorted by id" + ); + state.pending_monitors.drain(..).collect::>() + } else { + Vec::new() } + }; + let mut completed_any = false; + for (monitor_id, data) in completed_updates { + completed_any = true; + self.monitor.channel_monitor_updated(*chan_id, monitor_id).unwrap(); + self.monitor.mark_update_completed(*chan_id, monitor_id, data); } + completed_any } fn complete_all_pending_monitor_updates(&self) { + let mut completed_updates = Vec::new(); for (channel_id, state) in self.monitor.latest_monitors.lock().unwrap().iter_mut() { - for (id, data) in state.pending_monitors.drain(..) { - self.monitor.chain_monitor.channel_monitor_updated(*channel_id, id).unwrap(); - if id >= state.persisted_monitor_id { - state.persisted_monitor_id = id; - state.persisted_monitor = data; - } + for (monitor_id, data) in state.pending_monitors.drain(..) { + completed_updates.push((*channel_id, monitor_id, data)); } } + for (channel_id, monitor_id, data) in completed_updates { + self.monitor.channel_monitor_updated(channel_id, monitor_id).unwrap(); + self.monitor.mark_update_completed(channel_id, monitor_id, data); + } } fn complete_monitor_update(&self, chan_id: &ChannelId, selector: MonitorUpdateSelector) { - if let Some(state) = self.monitor.latest_monitors.lock().unwrap().get_mut(chan_id) { - assert!( - state.pending_monitors.windows(2).all(|pair| pair[0].0 < pair[1].0), - "updates should be sorted by id" - ); - let update = match selector { - MonitorUpdateSelector::First => { - if state.pending_monitors.is_empty() { - None - } else { - Some(state.pending_monitors.remove(0)) - } - }, - MonitorUpdateSelector::Second => { - if state.pending_monitors.len() > 1 { - Some(state.pending_monitors.remove(1)) - } else { - None - } - }, - MonitorUpdateSelector::Last => state.pending_monitors.pop(), - }; - if let Some((id, data)) = update { - self.monitor.chain_monitor.channel_monitor_updated(*chan_id, id).unwrap(); - if id > state.persisted_monitor_id { - state.persisted_monitor_id = id; - state.persisted_monitor = data; + let completed_update = { + let mut latest_monitors = self.monitor.latest_monitors.lock().unwrap(); + if let Some(state) = latest_monitors.get_mut(chan_id) { + assert!( + state.pending_monitors.windows(2).all(|pair| pair[0].0 < pair[1].0), + "updates should be sorted by id" + ); + match selector { + MonitorUpdateSelector::First => { + if state.pending_monitors.is_empty() { + None + } else { + Some(state.pending_monitors.remove(0)) + } + }, + MonitorUpdateSelector::Second => { + if state.pending_monitors.len() > 1 { + Some(state.pending_monitors.remove(1)) + } else { + None + } + }, + MonitorUpdateSelector::Last => state.pending_monitors.pop(), } + } else { + None } + }; + if let Some((monitor_id, data)) = completed_update { + self.monitor.channel_monitor_updated(*chan_id, monitor_id).unwrap(); + self.monitor.mark_update_completed(*chan_id, monitor_id, data); } } @@ -1312,13 +1359,11 @@ impl<'a> HarnessNode<'a> { let manager = <(BlockLocator, ChanMan)>::read(&mut &self.serialized_manager[..], read_args) .expect("Failed to read manager"); + let expected_status = self.persistence_style; + *chain_monitor.persister.update_ret.lock().unwrap() = expected_status; for (channel_id, mon) in monitors.drain() { - assert_eq!( - chain_monitor.chain_monitor.watch_channel(channel_id, mon), - Ok(ChannelMonitorUpdateStatus::Completed) - ); + assert_eq!(chain_monitor.watch_channel(channel_id, mon), Ok(expected_status)); } - *chain_monitor.persister.update_ret.lock().unwrap() = self.persistence_style; self.node = manager.1; self.monitor = chain_monitor; self.logger = logger; @@ -1822,7 +1867,6 @@ fn assert_test_invariants(nodes: &[HarnessNode<'_>; 3]) { assert_eq!(nodes[0].node.list_channels().len(), 3); assert_eq!(nodes[1].node.list_channels().len(), 6); assert_eq!(nodes[2].node.list_channels().len(), 3); - // All broadcasters should be empty. Broadcast transactions are handled explicitly. assert!(nodes[0].broadcaster.txn_broadcasted.borrow().is_empty()); assert!(nodes[1].broadcaster.txn_broadcasted.borrow().is_empty()); @@ -2104,6 +2148,7 @@ impl<'a, 'd, Out: Output + MaybeSend + MaybeSync> Harness<'a, 'd, Out> { chan_type, ), ]; + let mut chain_state = ChainState::new(); // Connect peers first, then create channels. @@ -2796,7 +2841,6 @@ pub fn do_test(data: &[u8], out: Out) { 0x15 => { harness.process_msg_events(0, false, ProcessMessages::OnePendingMessage); }, - 0x16 => { harness.process_events(0, true); }, @@ -2822,7 +2866,6 @@ pub fn do_test(data: &[u8], out: Out) { 0x1d => { harness.process_msg_events(1, false, ProcessMessages::OnePendingMessage); }, - 0x1e => { harness.process_events(1, true); }, @@ -2848,7 +2891,6 @@ pub fn do_test(data: &[u8], out: Out) { 0x25 => { harness.process_msg_events(2, false, ProcessMessages::OnePendingMessage); }, - 0x26 => { harness.process_events(2, true); }, @@ -3078,7 +3120,6 @@ pub fn do_test(data: &[u8], out: Out) { let cp_node_id = harness.nodes[1].our_node_id(); harness.nodes[2].splice_in(&cp_node_id, &harness.chan_b_id()); }, - 0xa4 => { if !cfg!(splicing) { assert_test_invariants(&harness.nodes); @@ -3111,7 +3152,6 @@ pub fn do_test(data: &[u8], out: Out) { let cp_node_id = harness.nodes[1].our_node_id(); harness.nodes[2].splice_out(&cp_node_id, &harness.chan_b_id()); }, - // Sync node by 1 block to cover confirmation of a transaction. 0xa8 => { harness.chain_state.confirm_pending_txs(); @@ -3258,7 +3298,6 @@ pub fn do_test(data: &[u8], out: Out) { MonitorUpdateSelector::Last, ); }, - 0xf4 => { harness.ab_link.complete_monitor_updates_for_node( 1, @@ -3280,7 +3319,6 @@ pub fn do_test(data: &[u8], out: Out) { MonitorUpdateSelector::Last, ); }, - 0xf8 => { harness.bc_link.complete_monitor_updates_for_node( 1, @@ -3302,7 +3340,6 @@ pub fn do_test(data: &[u8], out: Out) { MonitorUpdateSelector::Last, ); }, - 0xfc => { harness.bc_link.complete_monitor_updates_for_node( 2, @@ -3324,7 +3361,6 @@ pub fn do_test(data: &[u8], out: Out) { MonitorUpdateSelector::Last, ); }, - 0xff => { // Test that no channel is in a stuck state where neither party can send funds even // after we resolve all pending events. From 1d04299505a8e72d5681da5613ad5ebc98cf98c3 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Wed, 29 Apr 2026 10:26:03 +0200 Subject: [PATCH 20/29] fuzz: track chanmon_consistency chain state Track confirmed UTXOs and reject transactions that spend missing, already spent, or still-timelocked outputs. The harness now models enough chain state to confirm funding, splice, and claim transactions without accepting impossible spends. --- fuzz/src/chanmon_consistency.rs | 176 ++++++++++++++++++++++++-------- 1 file changed, 131 insertions(+), 45 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 9c6829da5f3..4dba4301dbd 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -27,6 +27,7 @@ use bitcoin::script::{Builder, ScriptBuf}; use bitcoin::transaction::Version; use bitcoin::transaction::{Transaction, TxOut}; use bitcoin::FeeRate; +use bitcoin::OutPoint as BitcoinOutPoint; use bitcoin::block::Header; use bitcoin::hash_types::Txid; @@ -187,6 +188,11 @@ struct ChainState { /// Unconfirmed transactions (e.g., splice txs). Conflicting RBF candidates may coexist; /// `confirm_pending_txs` determines which one confirms. pending_txs: Vec<(Txid, Transaction)>, + /// Tracks unspent outputs created by confirmed transactions. Only + /// transactions that spend existing UTXOs can be confirmed, which + /// prevents fuzz hash collisions from creating phantom spends of + /// outputs that were never actually created. + utxos: HashSet, } impl ChainState { @@ -197,6 +203,7 @@ impl ChainState { blocks: vec![(genesis_header, Vec::new())], confirmed_txids: HashSet::new(), pending_txs: Vec::new(), + utxos: HashSet::new(), } } @@ -204,21 +211,57 @@ impl ChainState { (self.blocks.len() - 1) as u32 } - fn is_outpoint_spent(&self, outpoint: &bitcoin::OutPoint) -> bool { - self.blocks.iter().any(|(_, txs)| { - txs.iter().any(|tx| tx.input.iter().any(|input| input.previous_output == *outpoint)) - }) + fn can_confirm_tx( + &self, tx: &Transaction, txid: Txid, utxos: &HashSet, + ) -> bool { + if self.confirmed_txids.contains(&txid) { + return false; + } + // Reject timelocked transactions before their lock_time, matching + // consensus rules. Commitment txs encode an obscured commitment + // number with bit 29 set, which is not a real timelock. + let lock_time = tx.lock_time.to_consensus_u32(); + if lock_time > 0 + && lock_time < 500_000_000 + && lock_time & (1 << 29) == 0 + && self.tip_height() < lock_time + { + return false; + } + // Validate that all inputs spend existing, unspent outputs. This + // rejects both double-spends and spends of outputs that were never + // created (e.g. due to fuzz txid hash collisions where a different + // transaction was confirmed under the same txid). + let is_coinbase = tx.is_coinbase(); + if !is_coinbase { + for input in &tx.input { + if !utxos.contains(&input.previous_output) { + return false; + } + } + } + true + } + + fn apply_tx_to_utxos(txid: Txid, tx: &Transaction, utxos: &mut HashSet) { + let is_coinbase = tx.is_coinbase(); + if !is_coinbase { + for input in &tx.input { + utxos.remove(&input.previous_output); + } + } + for idx in 0..tx.output.len() { + utxos.insert(BitcoinOutPoint { txid, vout: idx as u32 }); + } } fn confirm_tx(&mut self, tx: Transaction) -> bool { let txid = tx.compute_txid(); - if self.confirmed_txids.contains(&txid) { - return false; - } - if tx.input.iter().any(|input| self.is_outpoint_spent(&input.previous_output)) { + if !self.can_confirm_tx(&tx, txid, &self.utxos) { return false; } self.confirmed_txids.insert(txid); + Self::apply_tx_to_utxos(txid, &tx, &mut self.utxos); let prev_hash = self.blocks.last().unwrap().0.block_hash(); let header = create_dummy_header(prev_hash, 42); @@ -246,21 +289,13 @@ impl ChainState { txs.sort_by_key(|(txid, _)| *txid); let mut confirmed = Vec::new(); - let mut spent_outpoints = Vec::new(); + let mut next_utxos = self.utxos.clone(); for (txid, tx) in txs { - if self.confirmed_txids.contains(&txid) { - continue; - } - if tx.input.iter().any(|input| { - self.is_outpoint_spent(&input.previous_output) - || spent_outpoints.contains(&input.previous_output) - }) { + if !self.can_confirm_tx(&tx, txid, &next_utxos) { continue; } self.confirmed_txids.insert(txid); - for input in &tx.input { - spent_outpoints.push(input.previous_output); - } + Self::apply_tx_to_utxos(txid, &tx, &mut next_utxos); confirmed.push(tx); } @@ -271,6 +306,7 @@ impl ChainState { let prev_hash = self.blocks.last().unwrap().0.block_hash(); let header = create_dummy_header(prev_hash, 42); self.blocks.push((header, confirmed)); + self.utxos = next_utxos; for _ in 0..5 { let prev_hash = self.blocks.last().unwrap().0.block_hash(); @@ -279,6 +315,14 @@ impl ChainState { } } + fn advance_height(&mut self, num_blocks: u32) { + for _ in 0..num_blocks { + let prev_hash = self.blocks.last().unwrap().0.block_hash(); + let header = create_dummy_header(prev_hash, 42); + self.blocks.push((header, Vec::new())); + } + } + fn block_at(&self, height: u32) -> &(Header, Vec) { &self.blocks[height as usize] } @@ -1176,12 +1220,31 @@ impl<'a> HarnessNode<'a> { }; while self.height < target_height { - self.height += 1; + let mut next_height = self.height + 1; + while next_height <= target_height && chain_state.block_at(next_height).1.is_empty() { + next_height += 1; + } + if next_height > target_height { + self.height = target_height; + let (header, _) = chain_state.block_at(self.height); + self.monitor.best_block_updated(header, self.height); + self.node.best_block_updated(header, self.height); + break; + } + if next_height > self.height + 1 { + self.height = next_height - 1; + let (header, _) = chain_state.block_at(self.height); + self.monitor.best_block_updated(header, self.height); + self.node.best_block_updated(header, self.height); + } + self.height = next_height; let (header, txn) = chain_state.block_at(self.height); let txdata: Vec<_> = txn.iter().enumerate().map(|(i, tx)| (i + 1, tx)).collect(); if !txdata.is_empty() { + self.monitor.transactions_confirmed(header, &txdata, self.height); self.node.transactions_confirmed(header, &txdata, self.height); } + self.monitor.best_block_updated(header, self.height); self.node.best_block_updated(header, self.height); } } @@ -2091,20 +2154,24 @@ impl<'a, 'd, Out: Output + MaybeSend + MaybeSync> Harness<'a, 'd, Out> { let wallet_b = TestWalletSource::new(SecretKey::from_slice(&[2; 32]).unwrap()); let wallet_c = TestWalletSource::new(SecretKey::from_slice(&[3; 32]).unwrap()); let wallets = [&wallet_a, &wallet_b, &wallet_c]; - let coinbase_tx = bitcoin::Transaction { - version: bitcoin::transaction::Version::TWO, - lock_time: bitcoin::absolute::LockTime::ZERO, - input: vec![bitcoin::TxIn { ..Default::default() }], - output: wallets - .iter() - .map(|wallet| TxOut { - value: Amount::from_sat(100_000), - script_pubkey: wallet.get_change_script().unwrap(), - }) - .collect(), - }; + let mut chain_state = ChainState::new(); + let num_wallet_utxos = 50; for (idx, wallet) in wallets.iter().enumerate() { - wallet.add_utxo(coinbase_tx.clone(), idx as u32); + let coinbase_tx = bitcoin::Transaction { + version: bitcoin::transaction::Version(idx as i32 + 100), + lock_time: bitcoin::absolute::LockTime::ZERO, + input: vec![bitcoin::TxIn { ..Default::default() }], + output: (0..num_wallet_utxos) + .map(|_| TxOut { + value: Amount::from_sat(100_000), + script_pubkey: wallet.get_change_script().unwrap(), + }) + .collect(), + }; + for vout in 0..num_wallet_utxos { + wallet.add_utxo(coinbase_tx.clone(), vout); + } + chain_state.confirm_tx(coinbase_tx); } let fee_est_a = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }); @@ -2149,8 +2216,6 @@ impl<'a, 'd, Out: Output + MaybeSend + MaybeSync> Harness<'a, 'd, Out> { ), ]; - let mut chain_state = ChainState::new(); - // Connect peers first, then create channels. connect_peers(&nodes[0].node, &nodes[1].node); connect_peers(&nodes[1].node, &nodes[2].node); @@ -2170,16 +2235,12 @@ impl<'a, 'd, Out: Output + MaybeSend + MaybeSync> Harness<'a, 'd, Out> { make_channel(&nodes[1], &nodes[2], 5, true, false, &mut chain_state); make_channel(&nodes[1], &nodes[2], 6, false, false, &mut chain_state); - // Wipe the transactions-broadcasted set to make sure we don't broadcast - // any transactions during normal operation after setup. - nodes[0].broadcaster.txn_broadcasted.borrow_mut().clear(); - nodes[1].broadcaster.txn_broadcasted.borrow_mut().clear(); - nodes[2].broadcaster.txn_broadcasted.borrow_mut().clear(); - - // Sync all nodes to tip to lock the funding. - nodes[0].sync_with_chain_state(&chain_state, None); - nodes[1].sync_with_chain_state(&chain_state, None); - nodes[2].sync_with_chain_state(&chain_state, None); + for node in &nodes { + node.broadcaster.txn_broadcasted.borrow_mut().clear(); + } + for node in &mut nodes { + node.sync_with_chain_state(&chain_state, None); + } lock_fundings(&nodes); @@ -2434,6 +2495,31 @@ impl<'a, 'd, Out: Output + MaybeSend + MaybeSync> Harness<'a, 'd, Out> { } } +fn sync_wallets_with_confirmed_tx(wallets: &[&TestWalletSource], tx: &Transaction) { + for wallet in wallets { + let change_script = wallet.get_change_script().unwrap(); + for input in &tx.input { + wallet.remove_utxo(input.previous_output); + } + for (vout, output) in tx.output.iter().enumerate() { + if output.script_pubkey == change_script { + wallet.add_utxo(tx.clone(), vout as u32); + } + } + } +} + +fn confirm_tx_and_sync_wallets( + chain_state: &mut ChainState, wallets: &[&TestWalletSource], tx: Transaction, +) -> bool { + if chain_state.confirm_tx(tx.clone()) { + sync_wallets_with_confirmed_tx(wallets, &tx); + true + } else { + false + } +} + fn process_msg_events_impl( node_idx: usize, corrupt_forward: bool, limit_events: ProcessMessages, nodes: &[HarnessNode<'_>; 3], out: &Out, queues: &mut EventQueues, From 44dabb3ad32a3d140ef979585a5683fae3d4c22c Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Wed, 29 Apr 2026 10:27:33 +0200 Subject: [PATCH 21/29] fuzz: handle chanmon_consistency control messages Teach the harness to deliver additional control and announcement messages emitted during reconnects and timer-driven state changes. This keeps delayed message handling from panicking on valid events that can be produced by the channel manager. --- fuzz/src/chanmon_consistency.rs | 78 +++++++++++++++++++++++---------- 1 file changed, 54 insertions(+), 24 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 4dba4301dbd..f30738c9570 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -1004,17 +1004,16 @@ fn send_mpp_hop_payment( #[inline] fn assert_action_timeout_awaiting_response(action: &msgs::ErrorAction) { - // Since sending/receiving messages may be delayed, `timer_tick_occurred` may cause a node to - // disconnect their counterparty if they're expecting a timely response. - assert!( - matches!( - action, - msgs::ErrorAction::DisconnectPeerWithWarning { msg } - if msg.data.contains("Disconnecting due to timeout awaiting response") - ), - "Expected timeout disconnect, got: {:?}", - action, - ); + // Since sending or receiving messages may be delayed, `timer_tick_occurred` may cause a node + // to disconnect their counterparty if they're expecting a timely response. We may also deliver + // the paired `error` message when one was generated alongside the disconnect. + match action { + msgs::ErrorAction::DisconnectPeerWithWarning { msg } + if msg.data.contains("Disconnecting due to timeout awaiting response") => {}, + msgs::ErrorAction::DisconnectPeer { .. } => {}, + msgs::ErrorAction::SendErrorMessage { .. } => {}, + _ => panic!("Unexpected HandleError action {:?}", action), + } } #[derive(Copy, Clone)] @@ -1529,7 +1528,9 @@ impl EventQueues { }, MessageSendEvent::SendChannelReady { .. } | MessageSendEvent::SendAnnouncementSignatures { .. } - | MessageSendEvent::BroadcastChannelUpdate { .. } => continue, + | MessageSendEvent::BroadcastChannelUpdate { .. } + | MessageSendEvent::BroadcastChannelAnnouncement { .. } + | MessageSendEvent::BroadcastNodeAnnouncement { .. } => continue, _ => panic!("Unhandled message event {:?}", event), }; if push_a { @@ -1566,6 +1567,8 @@ impl EventQueues { MessageSendEvent::SendChannelReady { .. } => {}, MessageSendEvent::SendAnnouncementSignatures { .. } => {}, MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + MessageSendEvent::BroadcastChannelAnnouncement { .. } => {}, + MessageSendEvent::BroadcastNodeAnnouncement { .. } => {}, MessageSendEvent::SendChannelUpdate { .. } => {}, MessageSendEvent::HandleError { ref action, .. } => { assert_action_timeout_awaiting_response(action); @@ -1589,6 +1592,8 @@ impl EventQueues { MessageSendEvent::SendChannelReady { .. } => {}, MessageSendEvent::SendAnnouncementSignatures { .. } => {}, MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + MessageSendEvent::BroadcastChannelAnnouncement { .. } => {}, + MessageSendEvent::BroadcastNodeAnnouncement { .. } => {}, MessageSendEvent::SendChannelUpdate { .. } => {}, MessageSendEvent::HandleError { ref action, .. } => { assert_action_timeout_awaiting_response(action); @@ -1908,6 +1913,7 @@ fn build_node_config(chan_type: ChanType) -> UserConfig { let mut config = UserConfig::default(); config.channel_config.forwarding_fee_proportional_millionths = 0; config.channel_handshake_config.announce_for_forwarding = true; + config.channel_handshake_limits.force_announced_channel_preference = false; config.reject_inbound_splices = false; match chan_type { ChanType::Legacy => { @@ -2105,7 +2111,7 @@ fn lock_fundings(nodes: &[HarnessNode<'_>; 3]) { } } } else { - panic!("Wrong event type"); + panic!("Wrong event type in first lock_fundings pass: {:?}", event); } } } @@ -2113,9 +2119,18 @@ fn lock_fundings(nodes: &[HarnessNode<'_>; 3]) { for node in nodes.iter() { let events = node.node.get_and_clear_pending_msg_events(); for event in events { - if let MessageSendEvent::SendAnnouncementSignatures { .. } = event { - } else { - panic!("Wrong event type"); + match event { + MessageSendEvent::SendAnnouncementSignatures { .. } => {}, + MessageSendEvent::SendChannelUpdate { ref node_id, ref msg } => { + for dest_node in nodes.iter() { + if dest_node.our_node_id() == *node_id { + dest_node.node.handle_channel_update(node.our_node_id(), msg); + } + } + }, + _ => { + panic!("Wrong event type in second lock_fundings pass: {:?}", event); + }, } } } @@ -2650,6 +2665,9 @@ fn process_msg_events_impl( None }, MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => { + if msg.next_local_commitment_number == 0 && msg.next_remote_commitment_number == 0 { + return None; + } let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "channel_reestablish"); nodes[dest_idx].node.handle_channel_reestablish(source_node_id, msg); @@ -2720,21 +2738,33 @@ fn process_msg_events_impl( nodes[dest_idx].node.handle_splice_locked(source_node_id, msg); None }, - MessageSendEvent::HandleError { ref action, .. } => { + MessageSendEvent::HandleError { ref action, ref node_id } => { assert_action_timeout_awaiting_response(action); + if let msgs::ErrorAction::SendErrorMessage { ref msg } = action { + let dest_idx = find_destination_node(nodes, node_id); + nodes[dest_idx].node.handle_error(source_node_id, msg); + } None }, - MessageSendEvent::SendChannelReady { .. } - | MessageSendEvent::SendAnnouncementSignatures { .. } - | MessageSendEvent::SendChannelUpdate { .. } => { - // Can be generated as a reestablish response. + MessageSendEvent::SendChannelReady { ref node_id, ref msg } => { + let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "channel_ready"); + nodes[dest_idx].node.handle_channel_ready(source_node_id, msg); + None + }, + MessageSendEvent::SendAnnouncementSignatures { ref node_id, ref msg } => { + let dest_idx = + log_peer_message(node_idx, node_id, nodes, out, "announcement_signatures"); + nodes[dest_idx].node.handle_announcement_signatures(source_node_id, msg); None }, - MessageSendEvent::BroadcastChannelUpdate { .. } => { - // Can be generated as a result of calling `timer_tick_occurred` enough - // times while peers are disconnected. + MessageSendEvent::SendChannelUpdate { ref node_id, ref msg } => { + let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "channel_update"); + nodes[dest_idx].node.handle_channel_update(source_node_id, msg); None }, + MessageSendEvent::BroadcastChannelUpdate { .. } => None, + MessageSendEvent::BroadcastChannelAnnouncement { .. } => None, + MessageSendEvent::BroadcastNodeAnnouncement { .. } => None, _ => panic!("Unhandled message event {:?}", event), } } From 8195087188d2327823a330107df06a3c4cfd9add Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Wed, 29 Apr 2026 10:28:51 +0200 Subject: [PATCH 22/29] fuzz: relax chanmon_consistency closure assumptions Stop treating every channel close or broadcast transaction as an immediate invariant failure. Later commits add explicit force-close coverage, so the baseline harness must allow channels and broadcaster queues to reflect closure progress. --- fuzz/src/chanmon_consistency.rs | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index f30738c9570..67826271cb7 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -15,8 +15,7 @@ //! actions such as sending payments, handling events, or changing monitor update return values on //! a per-node basis. This should allow it to find any cases where the ordering of actions results //! in us getting out of sync with ourselves, and, assuming at least one of our recieve- or -//! send-side handling is correct, other peers. We consider it a failure if any action results in a -//! channel being force-closed. +//! send-side handling is correct, other peers. use bitcoin::amount::Amount; use bitcoin::constants::genesis_block; @@ -645,8 +644,6 @@ impl SignerProvider for KeyProvider { } } -// Since this fuzzer is only concerned with live-channel operations, we don't need to worry about -// any signer operations that come after a force close. const SUPPORTED_SIGNER_OPS: [SignerOp; 3] = [ SignerOp::SignCounterpartyCommitment, SignerOp::GetPerCommitmentPoint, @@ -1933,13 +1930,12 @@ fn build_node_config(chan_type: ChanType) -> UserConfig { } fn assert_test_invariants(nodes: &[HarnessNode<'_>; 3]) { - assert_eq!(nodes[0].node.list_channels().len(), 3); - assert_eq!(nodes[1].node.list_channels().len(), 6); - assert_eq!(nodes[2].node.list_channels().len(), 3); - // All broadcasters should be empty. Broadcast transactions are handled explicitly. - assert!(nodes[0].broadcaster.txn_broadcasted.borrow().is_empty()); - assert!(nodes[1].broadcaster.txn_broadcasted.borrow().is_empty()); - assert!(nodes[2].broadcaster.txn_broadcasted.borrow().is_empty()); + assert!(nodes[0].node.list_channels().len() <= 3); + assert!(nodes[1].node.list_channels().len() <= 6); + assert!(nodes[2].node.list_channels().len() <= 3); + for node in nodes { + node.broadcaster.txn_broadcasted.borrow_mut().clear(); + } } fn connect_peers(source: &ChanMan<'_>, dest: &ChanMan<'_>) { From 017ed6d06cf65aca6c0bd175a40f45dc7d8545f9 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Wed, 29 Apr 2026 10:41:14 +0200 Subject: [PATCH 23/29] fuzz: sync chanmon_consistency pending tx wallets Return the transactions confirmed from the pending pool and apply their effects to the harness wallets. This keeps wallet UTXO state aligned with the fake chain when splice or other pending transactions are mined by fuzz input. --- fuzz/src/chanmon_consistency.rs | 29 +++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 67826271cb7..03fd9945d6b 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -283,7 +283,7 @@ impl ChainState { /// Confirm pending transactions in a single block, selecting deterministically among /// conflicting RBF candidates. Sorting by txid ensures the winner is determined by fuzz input /// content. Transactions that double-spend an already-confirmed outpoint are skipped. - fn confirm_pending_txs(&mut self) { + fn confirm_pending_txs(&mut self) -> Vec { let mut txs = std::mem::take(&mut self.pending_txs); txs.sort_by_key(|(txid, _)| *txid); @@ -299,11 +299,12 @@ impl ChainState { } if confirmed.is_empty() { - return; + return Vec::new(); } let prev_hash = self.blocks.last().unwrap().0.block_hash(); let header = create_dummy_header(prev_hash, 42); + let confirmed_txs = confirmed.clone(); self.blocks.push((header, confirmed)); self.utxos = next_utxos; @@ -312,6 +313,7 @@ impl ChainState { let header = create_dummy_header(prev_hash, 42); self.blocks.push((header, Vec::new())); } + confirmed_txs } fn advance_height(&mut self, num_blocks: u32) { @@ -2504,6 +2506,17 @@ impl<'a, 'd, Out: Output + MaybeSend + MaybeSync> Harness<'a, 'd, Out> { node.refresh_serialized_manager(); } } + + fn confirm_pending_txs_and_sync_wallets(&mut self) -> bool { + let confirmed_txs = self.chain_state.confirm_pending_txs(); + for tx in &confirmed_txs { + sync_wallets_with_confirmed_tx( + [&self.nodes[0].wallet, &self.nodes[1].wallet, &self.nodes[2].wallet].as_slice(), + tx, + ); + } + !confirmed_txs.is_empty() + } } fn sync_wallets_with_confirmed_tx(wallets: &[&TestWalletSource], tx: &Transaction) { @@ -3266,28 +3279,28 @@ pub fn do_test(data: &[u8], out: Out) { }, // Sync node by 1 block to cover confirmation of a transaction. 0xa8 => { - harness.chain_state.confirm_pending_txs(); + harness.confirm_pending_txs_and_sync_wallets(); harness.nodes[0].sync_with_chain_state(&harness.chain_state, Some(1)); }, 0xa9 => { - harness.chain_state.confirm_pending_txs(); + harness.confirm_pending_txs_and_sync_wallets(); harness.nodes[1].sync_with_chain_state(&harness.chain_state, Some(1)); }, 0xaa => { - harness.chain_state.confirm_pending_txs(); + harness.confirm_pending_txs_and_sync_wallets(); harness.nodes[2].sync_with_chain_state(&harness.chain_state, Some(1)); }, // Sync node to chain tip to cover confirmation of a transaction post-reorg-risk. 0xab => { - harness.chain_state.confirm_pending_txs(); + harness.confirm_pending_txs_and_sync_wallets(); harness.nodes[0].sync_with_chain_state(&harness.chain_state, None); }, 0xac => { - harness.chain_state.confirm_pending_txs(); + harness.confirm_pending_txs_and_sync_wallets(); harness.nodes[1].sync_with_chain_state(&harness.chain_state, None); }, 0xad => { - harness.chain_state.confirm_pending_txs(); + harness.confirm_pending_txs_and_sync_wallets(); harness.nodes[2].sync_with_chain_state(&harness.chain_state, None); }, From 600cd90abee3dff35a24d50af827398e056034d3 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Wed, 29 Apr 2026 10:41:43 +0200 Subject: [PATCH 24/29] fuzz: regularize chanmon_consistency signer controls Map the primary signer-unblock opcodes so nodes 0, 1, and 2 each get distinct controls for counterparty commitment signing, per-commitment points, and commitment secret release. Make node B's primary signer-unblock controls retry all pending channels once an operation is available, while keeping the older channel-specific release-secret controls for now. This avoids duplicated node coverage and makes the byte controls easier to reason about in reduced test cases. --- fuzz/src/chanmon_consistency.rs | 40 +++++++++++++++------------------ 1 file changed, 18 insertions(+), 22 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 03fd9945d6b..127ae865889 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -3333,54 +3333,50 @@ pub fn do_test(data: &[u8], out: Out) { harness.nodes[1] .keys_manager .enable_op_for_all_signers(SignerOp::SignCounterpartyCommitment); - let filter = Some((harness.nodes[0].our_node_id(), harness.chan_a_id())); - harness.nodes[1].node.signer_unblocked(filter); + harness.nodes[1].node.signer_unblocked(None); }, 0xc5 => { - harness.nodes[1] - .keys_manager - .enable_op_for_all_signers(SignerOp::SignCounterpartyCommitment); - let filter = Some((harness.nodes[2].our_node_id(), harness.chan_b_id())); - harness.nodes[1].node.signer_unblocked(filter); - }, - 0xc6 => { harness.nodes[2] .keys_manager .enable_op_for_all_signers(SignerOp::SignCounterpartyCommitment); harness.nodes[2].node.signer_unblocked(None); }, - 0xc7 => { + 0xc6 => { harness.nodes[0] .keys_manager .enable_op_for_all_signers(SignerOp::GetPerCommitmentPoint); harness.nodes[0].node.signer_unblocked(None); }, - 0xc8 => { - harness.nodes[1] - .keys_manager - .enable_op_for_all_signers(SignerOp::GetPerCommitmentPoint); - let filter = Some((harness.nodes[0].our_node_id(), harness.chan_a_id())); - harness.nodes[1].node.signer_unblocked(filter); - }, - 0xc9 => { + 0xc7 => { harness.nodes[1] .keys_manager .enable_op_for_all_signers(SignerOp::GetPerCommitmentPoint); - let filter = Some((harness.nodes[2].our_node_id(), harness.chan_b_id())); - harness.nodes[1].node.signer_unblocked(filter); + harness.nodes[1].node.signer_unblocked(None); }, - 0xca => { + 0xc8 => { harness.nodes[2] .keys_manager .enable_op_for_all_signers(SignerOp::GetPerCommitmentPoint); harness.nodes[2].node.signer_unblocked(None); }, - 0xcb => { + 0xc9 => { harness.nodes[0] .keys_manager .enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); harness.nodes[0].node.signer_unblocked(None); }, + 0xca => { + harness.nodes[1] + .keys_manager + .enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); + harness.nodes[1].node.signer_unblocked(None); + }, + 0xcb => { + harness.nodes[2] + .keys_manager + .enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); + harness.nodes[2].node.signer_unblocked(None); + }, 0xcc => { harness.nodes[1] .keys_manager From 60799690cfdb9a028d01d2ccac7cf9906b9f4176 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Wed, 29 Apr 2026 10:42:35 +0200 Subject: [PATCH 25/29] fuzz: tolerate chanmon_consistency on-chain events Make event processing robust to splice, close, spendable-output, and bump-transaction events that can arise during on-chain cleanup. Splice pending handling now finds the matching broadcast transaction by txid instead of assuming queue order. --- fuzz/src/chanmon_consistency.rs | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 127ae865889..672d322bc4f 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -2867,18 +2867,21 @@ fn process_events_impl( .unwrap(); }, events::Event::SplicePending { new_funding_txo, .. } => { - let mut txs = nodes[node_idx].broadcaster.txn_broadcasted.borrow_mut(); - assert!(txs.len() >= 1); - let splice_tx = txs.remove(0); - assert_eq!(new_funding_txo.txid, splice_tx.compute_txid()); - chain_state.add_pending_tx(splice_tx); + if !chain_state.confirmed_txids.contains(&new_funding_txo.txid) { + let mut txs = nodes[node_idx].broadcaster.txn_broadcasted.borrow_mut(); + if let Some(pos) = + txs.iter().position(|tx| new_funding_txo.txid == tx.compute_txid()) + { + let splice_tx = txs.remove(pos); + chain_state.add_pending_tx(splice_tx); + } + } }, events::Event::SpliceFailed { .. } => {}, - events::Event::DiscardFunding { - funding_info: - events::FundingInfo::Contribution { .. } | events::FundingInfo::Tx { .. }, - .. - } => {}, + events::Event::ChannelClosed { .. } => {}, + events::Event::DiscardFunding { .. } => {}, + events::Event::SpendableOutputs { .. } => {}, + events::Event::BumpTransaction(..) => {}, _ => panic!("Unhandled event"), } } From 4d14751ba69769fbd4001ba73af64ce49cf64e36 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Wed, 29 Apr 2026 10:44:40 +0200 Subject: [PATCH 26/29] fuzz: drive chanmon_consistency broadcast cleanup Add cleanup helpers and fuzz opcodes for monitor bump events, broadcast confirmation, careful chain advancement, and node resyncs. The all-events loop now advances messages, node events, monitor events, pending transactions, and broadcasts until the harness quiesces. --- fuzz/src/chanmon_consistency.rs | 484 +++++++++++++++++++++++++++++--- 1 file changed, 446 insertions(+), 38 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 672d322bc4f..814298a1eeb 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -41,11 +41,11 @@ use lightning::chain; use lightning::chain::chaininterface::{ BroadcasterInterface, ConfirmationTarget, FeeEstimator, TransactionType, }; -use lightning::chain::channelmonitor::ChannelMonitor; +use lightning::chain::channelmonitor::{Balance, ChannelMonitor}; use lightning::chain::{ chainmonitor, channelmonitor, BlockLocator, ChannelMonitorUpdateStatus, Confirm, Watch, }; -use lightning::events; +use lightning::events::{self, EventsProvider}; use lightning::ln::channel::{ FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS, }; @@ -83,6 +83,8 @@ use lightning::util::test_channel_signer::{EnforcementState, SignerOp, TestChann use lightning::util::test_utils::TestWalletSource; use lightning::util::wallet_utils::{WalletSourceSync, WalletSync}; +use lightning::events::bump_transaction::sync::BumpTransactionEventHandlerSync; + use lightning_invoice::RawBolt11Invoice; use crate::utils::test_logger::{self, Output}; @@ -932,8 +934,8 @@ fn send_mpp_hop_payment( .iter() .find(|chan| chan.channel_id == *chan_id) .and_then(|chan| chan.short_channel_id) - .unwrap() }) + .map(Option::unwrap) .collect(); let dest_chans = dest.list_channels(); @@ -944,8 +946,8 @@ fn send_mpp_hop_payment( .iter() .find(|chan| chan.channel_id == *chan_id) .and_then(|chan| chan.short_channel_id) - .unwrap() }) + .map(Option::unwrap) .collect(); for i in 0..num_paths { @@ -1740,7 +1742,9 @@ impl PaymentTracker { payment_preimages: new_hash_map(), } } +} +impl PaymentTracker { fn next_payment(&mut self, dest: &ChanMan) -> (PaymentSecret, PaymentHash, PaymentId) { let (secret, hash) = get_payment_secret_hash(dest, &mut self.payment_ctr, &mut self.payment_preimages); @@ -2370,51 +2374,41 @@ impl<'a, 'd, Out: Output + MaybeSend + MaybeSync> Harness<'a, 'd, Out> { } fn process_all_events(&mut self) { + let mut settled = false; let mut last_pass_no_updates = false; - for i in 0..std::usize::MAX { - if i == 100 { - panic!( - "It may take may iterations to settle the state, but it should not take forever" - ); - } - // First, make sure no monitor updates are pending. - self.ab_link.complete_all_monitor_updates(&self.nodes); - self.bc_link.complete_all_monitor_updates(&self.nodes); - // Then, make sure any current forwards make their way to their destination. - if self.process_msg_events(0, false, ProcessMessages::AllMessages) { - last_pass_no_updates = false; - continue; - } - if self.process_msg_events(1, false, ProcessMessages::AllMessages) { - last_pass_no_updates = false; - continue; - } - if self.process_msg_events(2, false, ProcessMessages::AllMessages) { - last_pass_no_updates = false; - continue; - } - // Finally, make sure any payments are claimed. - if self.process_events(0, false) { - last_pass_no_updates = false; - continue; + for settle_iter in 0..100 { + let completed_monitor_update = self.complete_pending_monitor_updates(); + let mut had_msg_or_ev = false; + for node_idx in 0..3 { + if self.process_msg_events(node_idx, false, ProcessMessages::AllMessages) { + had_msg_or_ev = true; + } } - if self.process_events(1, false) { - last_pass_no_updates = false; - continue; + for node_idx in 0..3 { + if self.process_events(node_idx, false) { + had_msg_or_ev = true; + } } - if self.process_events(2, false) { + let had_pending_txs = self.confirm_pending_txs_and_sync_wallets(); + self.sync_all_nodes_with_chain_state(); + self.process_monitor_pending_events(); + let had_new_txs = self + .drain_and_confirm_broadcast_transactions("process_all_events", Some(settle_iter)); + if completed_monitor_update || had_new_txs || had_msg_or_ev || had_pending_txs { last_pass_no_updates = false; continue; } if last_pass_no_updates { - // In some cases, `process_msg_events` may generate a message to send, but block - // sending until `complete_all_monitor_updates` gets called on the next iteration. - // Thus, we only exit if we manage two iterations with no messages or events to - // process. + settled = true; break; } last_pass_no_updates = true; } + assert!( + settled, + "process_all_events exceeded settle budget: {}", + self.pending_work_summary(), + ); } fn disconnect_ab(&mut self) { @@ -2507,6 +2501,26 @@ impl<'a, 'd, Out: Output + MaybeSend + MaybeSync> Harness<'a, 'd, Out> { } } + fn confirm_broadcasts_for_node(&mut self, node_idx: usize) { + let txs = self.nodes[node_idx] + .broadcaster + .txn_broadcasted + .borrow_mut() + .drain(..) + .collect::>(); + for tx in txs { + self.confirm_tx_and_sync_wallets(tx); + } + } + + fn confirm_tx_and_sync_wallets(&mut self, tx: Transaction) -> bool { + confirm_tx_and_sync_wallets( + &mut self.chain_state, + [&self.nodes[0].wallet, &self.nodes[1].wallet, &self.nodes[2].wallet].as_slice(), + tx, + ) + } + fn confirm_pending_txs_and_sync_wallets(&mut self) -> bool { let confirmed_txs = self.chain_state.confirm_pending_txs(); for tx in &confirmed_txs { @@ -2517,6 +2531,362 @@ impl<'a, 'd, Out: Output + MaybeSend + MaybeSync> Harness<'a, 'd, Out> { } !confirmed_txs.is_empty() } + + fn open_channels(&self) -> Vec { + self.nodes[0] + .node + .list_channels() + .iter() + .chain(self.nodes[1].node.list_channels().iter()) + .chain(self.nodes[2].node.list_channels().iter()) + .cloned() + .collect::>() + } + + fn has_pending_monitor_updates(&self) -> bool { + self.nodes.iter().any(|node| { + node.monitor + .latest_monitors + .lock() + .unwrap() + .values() + .any(|state| !state.pending_monitors.is_empty()) + }) + } + + fn has_time_dependent_work(&self) -> bool { + let open_channels = self.open_channels(); + let open_refs: Vec<_> = open_channels.iter().collect(); + self.nodes.iter().any(|node| { + node.monitor.get_claimable_balances(&open_refs).iter().any(|balance| { + matches!( + balance, + Balance::ClaimableOnChannelClose { .. } + | Balance::ClaimableAwaitingConfirmations { .. } + | Balance::ContentiousClaimable { .. } + | Balance::MaybeTimeoutClaimableHTLC { .. } + | Balance::MaybePreimageClaimableHTLC { .. } + | Balance::CounterpartyRevokedOutputClaimable { .. } + ) + }) + }) + } + + fn has_pending_work(&self) -> bool { + !self.queues.ab.is_empty() + || !self.queues.ba.is_empty() + || !self.queues.bc.is_empty() + || !self.queues.cb.is_empty() + || !self.chain_state.pending_txs.is_empty() + || self.nodes.iter().any(|node| !node.broadcaster.txn_broadcasted.borrow().is_empty()) + || self.has_pending_monitor_updates() + || self.has_time_dependent_work() + } + + fn pending_work_summary(&self) -> String { + let open_channels = self.open_channels(); + let open_refs: Vec<_> = open_channels.iter().collect(); + let balances_a = self.nodes[0].monitor.get_claimable_balances(&open_refs); + let balances_b = self.nodes[1].monitor.get_claimable_balances(&open_refs); + let balances_c = self.nodes[2].monitor.get_claimable_balances(&open_refs); + let pending_payments = &self.payments.pending_payments; + format!( + "queues ab={} ba={} bc={} cb={} pending_txs={} bcast=({},{},{}) pending=({},{},{}) monitor_updates={} timed_work={} heights=({},{},{}) tip={} balances_a=[{}] balances_b=[{}] balances_c=[{}]", + self.queues.ab.len(), + self.queues.ba.len(), + self.queues.bc.len(), + self.queues.cb.len(), + self.chain_state.pending_txs.len(), + self.nodes[0].broadcaster.txn_broadcasted.borrow().len(), + self.nodes[1].broadcaster.txn_broadcasted.borrow().len(), + self.nodes[2].broadcaster.txn_broadcasted.borrow().len(), + pending_payments[0].len(), + pending_payments[1].len(), + pending_payments[2].len(), + self.has_pending_monitor_updates(), + self.has_time_dependent_work(), + self.nodes[0].height, + self.nodes[1].height, + self.nodes[2].height, + self.chain_state.tip_height(), + summarize_balances(&balances_a), + summarize_balances(&balances_b), + summarize_balances(&balances_c), + ) + } + + fn complete_pending_monitor_updates(&self) -> bool { + let mut completed_monitor_update = false; + for id in self.ab_link.channel_ids() { + completed_monitor_update |= self.nodes[0].complete_all_monitor_updates(id); + completed_monitor_update |= self.nodes[1].complete_all_monitor_updates(id); + } + for id in self.bc_link.channel_ids() { + completed_monitor_update |= self.nodes[1].complete_all_monitor_updates(id); + completed_monitor_update |= self.nodes[2].complete_all_monitor_updates(id); + } + completed_monitor_update + } + + fn sync_all_nodes_with_chain_state(&mut self) { + let chain_state = &self.chain_state; + for node in &mut self.nodes { + node.sync_with_chain_state(chain_state, None); + } + } + + fn process_monitor_pending_events(&self) { + for node in &self.nodes { + let logger = Arc::clone(&node.logger); + let wallet = WalletSync::new(&node.wallet, Arc::clone(&logger)); + let handler = BumpTransactionEventHandlerSync::new( + node.broadcaster.as_ref(), + &wallet, + node.keys_manager.as_ref(), + Arc::clone(&logger), + ); + let broadcaster = &node.broadcaster; + node.monitor.process_pending_events(&|event: events::Event| { + if let events::Event::BumpTransaction(ref bump) = event { + match bump { + events::bump_transaction::BumpTransactionEvent::ChannelClose { + commitment_tx, + channel_id, + counterparty_node_id, + .. + } => { + broadcaster.broadcast_transactions(&[( + commitment_tx, + lightning::chain::chaininterface::TransactionType::UnilateralClose { + counterparty_node_id: *counterparty_node_id, + channel_id: *channel_id, + }, + )]); + }, + events::bump_transaction::BumpTransactionEvent::HTLCResolution { + .. + } => { + handler.handle_event(bump); + }, + } + } + Ok(()) + }); + } + } + + fn drain_and_confirm_broadcast_transactions( + &mut self, context: &str, settle_iter: Option, + ) -> bool { + let mut had_new_txs = false; + for confirm_iter in 0..32 { + let mut found = false; + let mut pending_txs = Vec::new(); + for node in &self.nodes { + for tx in node.broadcaster.txn_broadcasted.borrow_mut().drain(..) { + pending_txs.push(tx); + } + } + pending_txs.sort_by_key(|tx| tx.lock_time.to_consensus_u32()); + let mut deferred_txs = pending_txs; + loop { + let mut next_deferred_txs = Vec::new(); + let mut progressed = false; + for tx in deferred_txs { + if self.confirm_tx_and_sync_wallets(tx.clone()) { + found = true; + progressed = true; + } else { + next_deferred_txs.push(tx); + } + } + if !progressed { + deferred_txs = next_deferred_txs + .into_iter() + .filter(|tx| should_retry_confirm_later(&self.chain_state, tx)) + .collect(); + break; + } + deferred_txs = next_deferred_txs; + } + if !deferred_txs.is_empty() { + self.nodes[0].broadcaster.txn_broadcasted.borrow_mut().extend(deferred_txs); + } + if !found { + break; + } + let quiesce_context = match settle_iter { + Some(iter) => format!( + "{context} tx confirmation loop failed to quiesce at settle iter {iter}: {}", + self.pending_work_summary(), + ), + None => format!( + "{context} tx confirmation loop failed to quiesce: {}", + self.pending_work_summary(), + ), + }; + assert!(confirm_iter < 31, "{quiesce_context}"); + had_new_txs = true; + self.sync_all_nodes_with_chain_state(); + } + had_new_txs + } + + fn progress_round(&mut self) -> bool { + let completed_monitor_update = self.complete_pending_monitor_updates(); + let mut had_msg_or_ev = false; + for node_idx in 0..3 { + if self.process_msg_events(node_idx, false, ProcessMessages::AllMessages) { + had_msg_or_ev = true; + } + } + for node_idx in 0..3 { + if self.process_events(node_idx, false) { + had_msg_or_ev = true; + } + } + let had_pending_txs = self.confirm_pending_txs_and_sync_wallets(); + self.sync_all_nodes_with_chain_state(); + self.process_monitor_pending_events(); + let had_new_txs = self.drain_and_confirm_broadcast_transactions("flush_progress", None); + completed_monitor_update || had_new_txs || had_msg_or_ev || had_pending_txs + } + + fn flush_progress(&mut self, max_iters: usize) { + let mut last_pass_no_updates = false; + for _ in 0..max_iters { + if self.progress_round() { + last_pass_no_updates = false; + continue; + } + if last_pass_no_updates { + break; + } + last_pass_no_updates = true; + } + let pending_work = self.has_pending_work(); + let summary = self.pending_work_summary(); + assert!( + !pending_work || last_pass_no_updates, + "flush_progress exhausted {max_iters} iterations without quiescing: {summary}", + ); + assert!( + !pending_work || !last_pass_no_updates || max_iters > 0, + "flush_progress made no progress: {summary}", + ); + } + + fn advance_chain_carefully(&mut self, num_blocks: u32) { + for _ in 0..num_blocks { + self.flush_progress(32); + if !self.has_pending_work() { + break; + } + self.chain_state.advance_height(1); + self.flush_progress(32); + if !self.has_pending_work() { + break; + } + } + } + + fn catch_up_raw_monitors(&self) { + for node in &self.nodes { + let mut min_monitor_height = node.height; + for chan_id in node.monitor.list_monitors() { + if let Ok(mon) = node.monitor.get_monitor(chan_id) { + min_monitor_height = + std::cmp::min(min_monitor_height, mon.current_best_block().height); + } + } + let mut h = min_monitor_height; + while h < node.height { + let mut next_height = h + 1; + while next_height <= node.height + && self.chain_state.block_at(next_height).1.is_empty() + { + next_height += 1; + } + if next_height > node.height { + h = node.height; + let (header, _) = self.chain_state.block_at(h); + node.monitor.best_block_updated(header, h); + break; + } + if next_height > h + 1 { + h = next_height - 1; + let (header, _) = self.chain_state.block_at(h); + node.monitor.best_block_updated(header, h); + } + h = next_height; + let (header, txn) = self.chain_state.block_at(h); + let txdata: Vec<_> = txn.iter().enumerate().map(|(i, tx)| (i + 1, tx)).collect(); + if !txdata.is_empty() { + node.monitor.transactions_confirmed(header, &txdata, h); + } + node.monitor.best_block_updated(header, h); + } + } + } + + fn process_messages_and_events_only(&mut self) { + let mut settled = false; + let mut last_pass_no_updates = false; + for _ in 0..100 { + let completed_monitor_update = self.complete_pending_monitor_updates(); + let mut had_msg_or_ev = false; + for node_idx in 0..3 { + if self.process_msg_events(node_idx, false, ProcessMessages::AllMessages) { + had_msg_or_ev = true; + } + } + for node_idx in 0..3 { + if self.process_events(node_idx, false) { + had_msg_or_ev = true; + } + } + if completed_monitor_update || had_msg_or_ev { + last_pass_no_updates = false; + continue; + } + if last_pass_no_updates { + settled = true; + break; + } + last_pass_no_updates = true; + } + assert!(settled, "message-only settle exceeded budget: {}", self.pending_work_summary(),); + } + + fn probe_amount_for_direction( + &self, source_idx: usize, dest_chan_id: ChannelId, + ) -> Option { + self.nodes[source_idx] + .node + .list_usable_channels() + .iter() + .find(|chan| chan.channel_id == dest_chan_id) + .and_then(|chan| { + let probe_amt = cmp::max( + cmp::min(10_000_000, chan.next_outbound_htlc_limit_msat), + chan.next_outbound_htlc_minimum_msat, + ); + if probe_amt == 0 || probe_amt > chan.next_outbound_htlc_limit_msat { + None + } else { + Some(probe_amt) + } + }) + } + + fn can_send_after_settle( + &mut self, source_idx: usize, dest_idx: usize, dest_chan_id: ChannelId, + ) -> bool { + let Some(amt) = self.probe_amount_for_direction(source_idx, dest_chan_id) else { + return false; + }; + self.send_direct(source_idx, dest_idx, dest_chan_id, amt) + } } fn sync_wallets_with_confirmed_tx(wallets: &[&TestWalletSource], tx: &Transaction) { @@ -2891,6 +3261,36 @@ fn process_events_impl( had_events } +fn summarize_balances(balances: &[Balance]) -> String { + let mut on_close = 0; + let mut awaiting = 0; + let mut contentious = 0; + let mut maybe_timeout = 0; + let mut maybe_preimage = 0; + let mut revoked = 0; + for balance in balances { + match balance { + Balance::ClaimableOnChannelClose { .. } => on_close += 1, + Balance::ClaimableAwaitingConfirmations { .. } => awaiting += 1, + Balance::ContentiousClaimable { .. } => contentious += 1, + Balance::MaybeTimeoutClaimableHTLC { .. } => maybe_timeout += 1, + Balance::MaybePreimageClaimableHTLC { .. } => maybe_preimage += 1, + Balance::CounterpartyRevokedOutputClaimable { .. } => revoked += 1, + } + } + format!( + "on_close={on_close} awaiting={awaiting} contentious={contentious} maybe_timeout={maybe_timeout} maybe_preimage={maybe_preimage} revoked={revoked}" + ) +} + +fn should_retry_confirm_later(chain_state: &ChainState, tx: &Transaction) -> bool { + let lock_time = tx.lock_time.to_consensus_u32(); + lock_time > 0 + && lock_time < 500_000_000 + && lock_time & (1 << 29) == 0 + && chain_state.tip_height() < lock_time +} + #[inline] pub fn do_test(data: &[u8], out: Out) { let router = FuzzRouter {}; @@ -3401,6 +3801,14 @@ pub fn do_test(data: &[u8], out: Out) { harness.nodes[2].node.signer_unblocked(None); }, + 0xd8 => harness.confirm_broadcasts_for_node(0), + 0xd9 => harness.confirm_broadcasts_for_node(1), + 0xda => harness.confirm_broadcasts_for_node(2), + + 0xdc => harness.advance_chain_carefully(50), + 0xdd => harness.advance_chain_carefully(100), + 0xde => harness.advance_chain_carefully(200), + 0xf0 => { harness.ab_link.complete_monitor_updates_for_node( 0, From 27f08f1db5b92d3831212389b968665ff9abb9cf Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Wed, 29 Apr 2026 10:45:17 +0200 Subject: [PATCH 27/29] fuzz: settle chanmon_consistency cleanup work Before final assertions, catch raw monitors up to node height and drive timer ticks plus block advancement until pending work clears. The final liveness probe now uses each channel's advertised sendable range instead of a fixed amount that may be outside its limits. --- fuzz/src/chanmon_consistency.rs | 60 +++++++++++++++++++++++++-------- 1 file changed, 46 insertions(+), 14 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 814298a1eeb..d53ad3ecd68 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -2458,6 +2458,18 @@ impl<'a, 'd, Out: Output + MaybeSend + MaybeSync> Harness<'a, 'd, Out> { self.nodes[1].node.signer_unblocked(None); self.nodes[2].node.signer_unblocked(None); + let has_stale_raw_monitors = self.nodes.iter().any(|node| { + node.monitor.list_monitors().into_iter().any(|chan_id| { + node.monitor + .get_monitor(chan_id) + .map(|mon| mon.current_best_block().height < node.height) + .unwrap_or(false) + }) + }); + if has_stale_raw_monitors { + self.process_messages_and_events_only(); + self.catch_up_raw_monitors(); + } self.process_all_events(); // Since MPP payments are supported, we wait until we fully settle the state of all @@ -2468,26 +2480,46 @@ impl<'a, 'd, Out: Output + MaybeSend + MaybeSync> Harness<'a, 'd, Out> { } self.process_all_events(); - // Verify no payments are stuck, all should have resolved. + for _ in 0..4096 { + self.flush_progress(32); + if !self.has_pending_work() { + break; + } + for node in self.nodes.iter() { + node.node.timer_tick_occurred(); + } + self.flush_progress(32); + if !self.has_pending_work() { + break; + } + self.chain_state.advance_height(1); + self.flush_progress(32); + if !self.has_pending_work() { + break; + } + } + self.payments.assert_all_resolved(); // Verify that every payment claimed by a receiver resulted in a PaymentSent event at // the sender. self.payments.assert_claims_reported(); - // Finally, make sure that at least one end of each channel can make a substantial payment. - let chan_ab_ids = self.ab_link.channel_ids().clone(); - let chan_bc_ids = self.bc_link.channel_ids().clone(); - for chan_id in chan_ab_ids { - assert!( - self.send_direct(0, 1, chan_id, 10_000_000) - || self.send_direct(1, 0, chan_id, 10_000_000) - ); + self.ab_link.complete_all_monitor_updates(&self.nodes); + self.bc_link.complete_all_monitor_updates(&self.nodes); + + for chan_id in *self.ab_link.channel_ids() { + if self.probe_amount_for_direction(0, chan_id).is_some() { + assert!(self.can_send_after_settle(0, 1, chan_id)); + } else if self.probe_amount_for_direction(1, chan_id).is_some() { + assert!(self.can_send_after_settle(1, 0, chan_id)); + } } - for chan_id in chan_bc_ids { - assert!( - self.send_direct(1, 2, chan_id, 10_000_000) - || self.send_direct(2, 1, chan_id, 10_000_000) - ); + for chan_id in *self.bc_link.channel_ids() { + if self.probe_amount_for_direction(1, chan_id).is_some() { + assert!(self.can_send_after_settle(1, 2, chan_id)); + } else if self.probe_amount_for_direction(2, chan_id).is_some() { + assert!(self.can_send_after_settle(2, 1, chan_id)); + } } self.nodes[0].record_last_htlc_clear_fee(); From ec2cdfbfa5d17735bd74487a3f06b70379676f3f Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 21 Apr 2026 12:34:14 +0200 Subject: [PATCH 28/29] fuzz: track chanmon_consistency payment resolution Track payment hashes, paths, claims, sender outcomes, and closed channels so settle-all can distinguish unresolved work from valid force-close outcomes. This lets the harness accept sender failure for claimed dust paths while still asserting that observable payment lifecycles complete. --- fuzz/src/chanmon_consistency.rs | 525 ++++++++++++++++++++++++-------- 1 file changed, 402 insertions(+), 123 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index d53ad3ecd68..236d8c1af50 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -716,7 +716,7 @@ type ChanMan<'a> = ChannelManager< #[inline] fn get_payment_secret_hash( dest: &ChanMan, payment_ctr: &mut u64, - payment_preimages: &mut HashMap, + payment_preimages: &RefCell>, ) -> (PaymentSecret, PaymentHash) { *payment_ctr += 1; let mut payment_preimage = PaymentPreimage([0; 32]); @@ -725,7 +725,7 @@ fn get_payment_secret_hash( let payment_secret = dest .create_inbound_payment_for_hash(payment_hash, None, 3600, None) .expect("create_inbound_payment_for_hash failed"); - assert!(payment_preimages.insert(payment_hash, payment_preimage).is_none()); + assert!(payment_preimages.borrow_mut().insert(payment_hash, payment_preimage).is_none()); (payment_secret, payment_hash) } @@ -857,23 +857,25 @@ fn send_mpp_payment( source: &ChanMan, dest: &ChanMan, dest_chan_ids: &[ChannelId], amt: u64, payment_secret: PaymentSecret, payment_hash: PaymentHash, payment_id: PaymentId, ) -> bool { - let num_paths = dest_chan_ids.len(); + let mut paths = Vec::new(); + + let dest_chans = dest.list_channels(); + let dest_scids: Vec<_> = dest_chan_ids + .iter() + .filter_map(|chan_id| { + dest_chans + .iter() + .find(|chan| chan.channel_id == *chan_id) + .and_then(|chan| chan.short_channel_id) + }) + .collect(); + let num_paths = dest_scids.len(); if num_paths == 0 { return false; } let amt_per_path = amt / num_paths as u64; - let mut paths = Vec::with_capacity(num_paths); - - let dest_chans = dest.list_channels(); - let dest_scids = dest_chan_ids.iter().map(|chan_id| { - dest_chans - .iter() - .find(|chan| chan.channel_id == *chan_id) - .and_then(|chan| chan.short_channel_id) - .unwrap() - }); - for (i, dest_scid) in dest_scids.enumerate() { + for (i, dest_scid) in dest_scids.into_iter().enumerate() { let path_amt = if i == num_paths - 1 { amt - amt_per_path * (num_paths as u64 - 1) } else { @@ -915,41 +917,38 @@ fn send_mpp_hop_payment( dest_chan_ids: &[ChannelId], amt: u64, payment_secret: PaymentSecret, payment_hash: PaymentHash, payment_id: PaymentId, ) -> bool { - // Create paths by pairing middle_scids with dest_scids - let num_paths = middle_chan_ids.len().max(dest_chan_ids.len()); - if num_paths == 0 { - return false; - } - - let first_hop_fee = 50_000; - let amt_per_path = amt / num_paths as u64; - let fee_per_path = first_hop_fee / num_paths as u64; - let mut paths = Vec::with_capacity(num_paths); - let middle_chans = middle.list_channels(); let middle_scids: Vec<_> = middle_chan_ids .iter() - .map(|chan_id| { + .filter_map(|chan_id| { middle_chans .iter() .find(|chan| chan.channel_id == *chan_id) .and_then(|chan| chan.short_channel_id) }) - .map(Option::unwrap) .collect(); let dest_chans = dest.list_channels(); let dest_scids: Vec<_> = dest_chan_ids .iter() - .map(|chan_id| { + .filter_map(|chan_id| { dest_chans .iter() .find(|chan| chan.channel_id == *chan_id) .and_then(|chan| chan.short_channel_id) }) - .map(Option::unwrap) .collect(); + let num_paths = middle_scids.len().max(dest_scids.len()); + if middle_scids.is_empty() || dest_scids.is_empty() { + return false; + } + + let first_hop_fee = 50_000; + let amt_per_path = amt / num_paths as u64; + let fee_per_path = first_hop_fee / num_paths as u64; + let mut paths = Vec::with_capacity(num_paths); + for i in 0..num_paths { let middle_scid = middle_scids[i % middle_scids.len()]; let dest_scid = dest_scids[i % dest_scids.len()]; @@ -1726,28 +1725,107 @@ enum MonitorUpdateSelector { struct PaymentTracker { payment_ctr: u64, - pending_payments: [Vec; 3], - resolved_payments: [HashMap>; 3], - claimed_payment_hashes: HashSet, - payment_preimages: HashMap, + pending_payments: RefCell<[Vec; 3]>, + resolved_payment_ids: RefCell<[HashSet; 3]>, + claimed_payment_hashes: RefCell>, + receiver_claimed_payment_hashes: RefCell>, + sender_sent_payment_hashes: RefCell>, + sender_failed_payment_hashes: RefCell>, + payment_hashes_by_id: RefCell>, + payment_paths_by_hash: RefCell>>>, + blocked_dust_paths_by_hash: RefCell>>, + payment_preimages: RefCell>, + closed_channels: RefCell>, } impl PaymentTracker { fn new() -> Self { Self { payment_ctr: 0, - pending_payments: [Vec::new(), Vec::new(), Vec::new()], - resolved_payments: [new_hash_map(), new_hash_map(), new_hash_map()], - claimed_payment_hashes: HashSet::new(), - payment_preimages: new_hash_map(), + pending_payments: RefCell::new([Vec::new(), Vec::new(), Vec::new()]), + resolved_payment_ids: RefCell::new([HashSet::new(), HashSet::new(), HashSet::new()]), + claimed_payment_hashes: RefCell::new(HashSet::new()), + receiver_claimed_payment_hashes: RefCell::new(HashSet::new()), + sender_sent_payment_hashes: RefCell::new(HashSet::new()), + sender_failed_payment_hashes: RefCell::new(HashSet::new()), + payment_hashes_by_id: RefCell::new(new_hash_map()), + payment_paths_by_hash: RefCell::new(new_hash_map()), + blocked_dust_paths_by_hash: RefCell::new(new_hash_map()), + payment_preimages: RefCell::new(new_hash_map()), + closed_channels: RefCell::new(HashSet::new()), } } + + fn register_payment( + &self, source_idx: usize, payment_id: PaymentId, payment_hash: PaymentHash, + payment_paths: Vec>, + ) { + assert!( + self.payment_hashes_by_id.borrow_mut().insert(payment_id, payment_hash).is_none(), + "duplicate payment_id {:?}", + payment_id + ); + assert!( + self.payment_paths_by_hash.borrow_mut().insert(payment_hash, payment_paths).is_none(), + "duplicate payment_hash {:?}", + payment_hash + ); + self.pending_payments.borrow_mut()[source_idx].push(payment_id); + } + + fn claim_allows_sender_failure(&self, hash: &PaymentHash) -> bool { + self.blocked_dust_paths_by_hash + .borrow() + .get(hash) + .is_some_and(|blocked_paths| !blocked_paths.is_empty()) + } + + fn summarize_claim_tracking(&self) -> String { + let claim_requested = self.claimed_payment_hashes.borrow(); + let receiver_claimed = self.receiver_claimed_payment_hashes.borrow(); + let sender_sent = self.sender_sent_payment_hashes.borrow(); + let sender_failed = self.sender_failed_payment_hashes.borrow(); + let failure_allowed_count = + claim_requested.iter().filter(|hash| self.claim_allows_sender_failure(hash)).count(); + let missing_receiver = + claim_requested.iter().filter(|hash| !receiver_claimed.contains(*hash)).count(); + let missing_sender = claim_requested + .iter() + .filter(|hash| !sender_sent.contains(*hash) && !sender_failed.contains(*hash)) + .count(); + format!( + "claims requested={} receiver_claimed={} sender_sent={} sender_failed={} failure_allowed={} missing_receiver={} missing_sender={}", + claim_requested.len(), + receiver_claimed.len(), + sender_sent.len(), + sender_failed.len(), + failure_allowed_count, + missing_receiver, + missing_sender, + ) + } + + fn has_unfinished_claims(&self) -> bool { + let claim_requested = self.claimed_payment_hashes.borrow(); + let receiver_claimed = self.receiver_claimed_payment_hashes.borrow(); + let sender_sent = self.sender_sent_payment_hashes.borrow(); + let sender_failed = self.sender_failed_payment_hashes.borrow(); + claim_requested.iter().any(|hash| { + !receiver_claimed.contains(hash) + || (!sender_sent.contains(hash) && !sender_failed.contains(hash)) + }) + } + + fn has_live_payment_work(&self) -> bool { + self.pending_payments.borrow().iter().any(|payments| !payments.is_empty()) + || self.has_unfinished_claims() + } } impl PaymentTracker { fn next_payment(&mut self, dest: &ChanMan) -> (PaymentSecret, PaymentHash, PaymentId) { let (secret, hash) = - get_payment_secret_hash(dest, &mut self.payment_ctr, &mut self.payment_preimages); + get_payment_secret_hash(dest, &mut self.payment_ctr, &self.payment_preimages); let mut id = PaymentId([0; 32]); id.0[0..8].copy_from_slice(&self.payment_ctr.to_ne_bytes()); (secret, hash, id) @@ -1757,12 +1835,15 @@ impl PaymentTracker { &mut self, nodes: &[HarnessNode<'_>; 3], source_idx: usize, dest_idx: usize, dest_chan_id: ChannelId, amt: u64, ) -> bool { + if self.closed_channels.borrow().contains(&dest_chan_id) { + return false; + } let source = &nodes[source_idx].node; let dest = &nodes[dest_idx].node; let (secret, hash, id) = self.next_payment(dest); let succeeded = send_payment(source, dest, dest_chan_id, amt, secret, hash, id); if succeeded { - self.pending_payments[source_idx].push(id); + self.register_payment(source_idx, id, hash, vec![vec![(dest_chan_id, amt)]]); } succeeded } @@ -1771,10 +1852,16 @@ impl PaymentTracker { &mut self, nodes: &[HarnessNode<'_>; 3], source_idx: usize, middle_idx: usize, middle_chan_id: ChannelId, dest_idx: usize, dest_chan_id: ChannelId, amt: u64, ) { + let closed_channels = self.closed_channels.borrow(); + if closed_channels.contains(&middle_chan_id) || closed_channels.contains(&dest_chan_id) { + return; + } + drop(closed_channels); let source = &nodes[source_idx].node; let middle = &nodes[middle_idx].node; let dest = &nodes[dest_idx].node; let (secret, hash, id) = self.next_payment(dest); + let first_hop_fee = 50_000; let succeeded = send_hop_payment( source, middle, @@ -1787,7 +1874,12 @@ impl PaymentTracker { id, ); if succeeded { - self.pending_payments[source_idx].push(id); + self.register_payment( + source_idx, + id, + hash, + vec![vec![(middle_chan_id, amt + first_hop_fee), (dest_chan_id, amt)]], + ); } } @@ -1795,12 +1887,38 @@ impl PaymentTracker { &mut self, nodes: &[HarnessNode<'_>; 3], source_idx: usize, dest_idx: usize, dest_chan_ids: &[ChannelId], amt: u64, ) { + let live_dest_chan_ids = { + let closed_channels = self.closed_channels.borrow(); + dest_chan_ids + .iter() + .copied() + .filter(|chan_id| !closed_channels.contains(chan_id)) + .collect::>() + }; + if live_dest_chan_ids.is_empty() { + return; + } let source = &nodes[source_idx].node; let dest = &nodes[dest_idx].node; let (secret, hash, id) = self.next_payment(dest); - let succeeded = send_mpp_payment(source, dest, dest_chan_ids, amt, secret, hash, id); + let succeeded = send_mpp_payment(source, dest, &live_dest_chan_ids, amt, secret, hash, id); if succeeded { - self.pending_payments[source_idx].push(id); + let num_paths = live_dest_chan_ids.len(); + let amt_per_path = amt / num_paths as u64; + let payment_paths = live_dest_chan_ids + .iter() + .copied() + .enumerate() + .map(|(i, chan_id)| { + let path_amt = if i == num_paths - 1 { + amt - amt_per_path * (num_paths as u64 - 1) + } else { + amt_per_path + }; + vec![(chan_id, path_amt)] + }) + .collect(); + self.register_payment(source_idx, id, hash, payment_paths); } } @@ -1808,23 +1926,62 @@ impl PaymentTracker { &mut self, nodes: &[HarnessNode<'_>; 3], source_idx: usize, middle_idx: usize, middle_chan_ids: &[ChannelId], dest_idx: usize, dest_chan_ids: &[ChannelId], amt: u64, ) { + let (live_middle_chan_ids, live_dest_chan_ids) = { + let closed_channels = self.closed_channels.borrow(); + ( + middle_chan_ids + .iter() + .copied() + .filter(|chan_id| !closed_channels.contains(chan_id)) + .collect::>(), + dest_chan_ids + .iter() + .copied() + .filter(|chan_id| !closed_channels.contains(chan_id)) + .collect::>(), + ) + }; + if live_middle_chan_ids.is_empty() || live_dest_chan_ids.is_empty() { + return; + } let source = &nodes[source_idx].node; let middle = &nodes[middle_idx].node; let dest = &nodes[dest_idx].node; let (secret, hash, id) = self.next_payment(dest); + let num_paths = live_middle_chan_ids.len().max(live_dest_chan_ids.len()); + let first_hop_fee = 50_000; + let amt_per_path = amt / num_paths as u64; + let fee_per_path = first_hop_fee / num_paths as u64; let succeeded = send_mpp_hop_payment( source, middle, - middle_chan_ids, + &live_middle_chan_ids, dest, - dest_chan_ids, + &live_dest_chan_ids, amt, secret, hash, id, ); if succeeded { - self.pending_payments[source_idx].push(id); + let payment_paths = (0..num_paths) + .map(|i| { + let middle_chan_id = live_middle_chan_ids[i % live_middle_chan_ids.len()]; + let dest_chan_id = live_dest_chan_ids[i % live_dest_chan_ids.len()]; + let path_amt = if i == num_paths - 1 { + amt - amt_per_path * (num_paths as u64 - 1) + } else { + amt_per_path + }; + let path_fee = if i == num_paths - 1 { + first_hop_fee - fee_per_path * (num_paths as u64 - 1) + } else { + fee_per_path + }; + vec![(middle_chan_id, path_amt + path_fee), (dest_chan_id, path_amt)] + }) + .collect(); + self.register_payment(source_idx, id, hash, payment_paths); } } @@ -1834,67 +1991,55 @@ impl PaymentTracker { } else { let payment_preimage = *self .payment_preimages + .borrow() .get(&payment_hash) .expect("PaymentClaimable for unknown payment hash"); node.node.claim_funds(payment_preimage); - self.claimed_payment_hashes.insert(payment_hash); + self.claimed_payment_hashes.borrow_mut().insert(payment_hash); } } fn mark_sent(&mut self, node_idx: usize, sent_id: PaymentId, payment_hash: PaymentHash) { - let idx_opt = self.pending_payments[node_idx].iter().position(|id| *id == sent_id); - if let Some(idx) = idx_opt { - self.pending_payments[node_idx].remove(idx); - self.resolved_payments[node_idx].insert(sent_id, Some(payment_hash)); - } else { - assert!(self.resolved_payments[node_idx].contains_key(&sent_id)); + self.sender_sent_payment_hashes.borrow_mut().insert(payment_hash); + self.mark_resolved_payment(node_idx, sent_id, true); + } + + fn mark_failed( + &mut self, node_idx: usize, payment_id: PaymentId, payment_hash: Option, + ) { + let payment_hash = + payment_hash.or_else(|| self.payment_hashes_by_id.borrow().get(&payment_id).copied()); + if let Some(payment_hash) = payment_hash { + self.sender_failed_payment_hashes.borrow_mut().insert(payment_hash); } + self.mark_resolved_payment(node_idx, payment_id, false); } fn mark_resolved_without_hash(&mut self, node_idx: usize, payment_id: PaymentId) { - let idx_opt = self.pending_payments[node_idx].iter().position(|id| *id == payment_id); - if let Some(idx) = idx_opt { - self.pending_payments[node_idx].remove(idx); - self.resolved_payments[node_idx].insert(payment_id, None); - } else if !self.resolved_payments[node_idx].contains_key(&payment_id) { - // Some resolutions can arrive immediately, before the send helper records - // the payment as pending. Track them so later duplicate events are accepted. - self.resolved_payments[node_idx].insert(payment_id, None); - } + self.mark_resolved_payment(node_idx, payment_id, false); } - fn mark_successful_probe(&mut self, node_idx: usize, payment_id: PaymentId) { - let idx_opt = self.pending_payments[node_idx].iter().position(|id| *id == payment_id); - if let Some(idx) = idx_opt { - self.pending_payments[node_idx].remove(idx); - self.resolved_payments[node_idx].insert(payment_id, None); - } else { - assert!(self.resolved_payments[node_idx].contains_key(&payment_id)); - } + fn mark_receiver_claimed(&mut self, payment_hash: PaymentHash) { + self.receiver_claimed_payment_hashes.borrow_mut().insert(payment_hash); } - fn assert_all_resolved(&self) { - for (idx, pending) in self.pending_payments.iter().enumerate() { - assert!( - pending.is_empty(), - "Node {} has {} stuck pending payments after settling all state", - idx, - pending.len() - ); - } + fn mark_channel_closed(&mut self, channel_id: ChannelId) { + self.closed_channels.borrow_mut().insert(channel_id); } - fn assert_claims_reported(&self) { - for hash in self.claimed_payment_hashes.iter() { - let found = self - .resolved_payments - .iter() - .any(|node_resolved| node_resolved.values().any(|h| h.as_ref() == Some(hash))); - assert!( - found, - "Payment {:?} was claimed by receiver but sender never got PaymentSent", - hash - ); + fn mark_resolved_payment( + &self, node_idx: usize, payment_id: PaymentId, assert_already_resolved: bool, + ) { + let mut pending_payments = self.pending_payments.borrow_mut(); + let mut resolved_payment_ids = self.resolved_payment_ids.borrow_mut(); + let idx_opt = pending_payments[node_idx].iter().position(|id| *id == payment_id); + if let Some(idx) = idx_opt { + pending_payments[node_idx].remove(idx); + resolved_payment_ids[node_idx].insert(payment_id); + } else if assert_already_resolved { + assert!(resolved_payment_ids[node_idx].contains(&payment_id)); + } else if !resolved_payment_ids[node_idx].contains(&payment_id) { + resolved_payment_ids[node_idx].insert(payment_id); } } } @@ -2480,34 +2625,141 @@ impl<'a, 'd, Out: Output + MaybeSend + MaybeSync> Harness<'a, 'd, Out> { } self.process_all_events(); - for _ in 0..4096 { - self.flush_progress(32); - if !self.has_pending_work() { - break; - } - for node in self.nodes.iter() { - node.node.timer_tick_occurred(); - } - self.flush_progress(32); - if !self.has_pending_work() { - break; + if !self.payments.closed_channels.borrow().is_empty() { + for _ in 0..4096 { + self.flush_progress(32); + for node in self.nodes.iter() { + node.node.timer_tick_occurred(); + } + self.flush_progress(32); + let open_channels = self.open_channels(); + let open_refs: Vec<_> = open_channels.iter().collect(); + let balances_a = self.nodes[0].monitor.get_claimable_balances(&open_refs); + let balances_b = self.nodes[1].monitor.get_claimable_balances(&open_refs); + let balances_c = self.nodes[2].monitor.get_claimable_balances(&open_refs); + let needs_payment_completion = self.payments.has_live_payment_work(); + let has_cleanup_balances = + !balances_a.is_empty() || !balances_b.is_empty() || !balances_c.is_empty(); + let can_drive_more_cleanup = has_cleanup_balances || self.has_pending_work(); + let next_claimed_htlc_boundary = { + let claimed_hashes = self.payments.claimed_payment_hashes.borrow(); + let sender_sent = self.payments.sender_sent_payment_hashes.borrow(); + let sender_failed = self.payments.sender_failed_payment_hashes.borrow(); + balances_a + .iter() + .chain(balances_b.iter()) + .chain(balances_c.iter()) + .filter_map(|balance| match balance { + Balance::ContentiousClaimable { + timeout_height, payment_hash, .. + } if claimed_hashes.contains(payment_hash) + && !sender_sent.contains(payment_hash) + && !sender_failed.contains(payment_hash) => + { + Some(*timeout_height) + }, + Balance::MaybeTimeoutClaimableHTLC { + claimable_height, + payment_hash, + .. + } if claimed_hashes.contains(payment_hash) + && !sender_sent.contains(payment_hash) + && !sender_failed.contains(payment_hash) => + { + Some(*claimable_height) + }, + Balance::MaybePreimageClaimableHTLC { + expiry_height, + payment_hash, + .. + } if claimed_hashes.contains(payment_hash) + && !sender_sent.contains(payment_hash) + && !sender_failed.contains(payment_hash) => + { + Some(*expiry_height) + }, + _ => None, + }) + .min() + }; + let can_advance_without_claimed_expiry = next_claimed_htlc_boundary + .map_or(true, |boundary| { + self.chain_state.tip_height().saturating_add(1) < boundary + }); + if !needs_payment_completion || !can_drive_more_cleanup { + break; + } + if self.payments.has_unfinished_claims() && !can_advance_without_claimed_expiry { + break; + } + self.chain_state.advance_height(1); + self.flush_progress(32); } - self.chain_state.advance_height(1); - self.flush_progress(32); - if !self.has_pending_work() { - break; + } + + { + let payment_hashes = self.payments.payment_hashes_by_id.borrow(); + let claimed = self.payments.claimed_payment_hashes.borrow(); + let receiver_claimed = self.payments.receiver_claimed_payment_hashes.borrow(); + let sender_sent = self.payments.sender_sent_payment_hashes.borrow(); + let sender_failed = self.payments.sender_failed_payment_hashes.borrow(); + let mut pending = self.payments.pending_payments.borrow_mut(); + let mut resolved = self.payments.resolved_payment_ids.borrow_mut(); + for (node_idx, payment_ids) in pending.iter_mut().enumerate() { + payment_ids.retain(|payment_id| { + let payment_hash = *payment_hashes + .get(payment_id) + .expect("pending payment missing payment hash"); + let keep = claimed.contains(&payment_hash) + || receiver_claimed.contains(&payment_hash) + || sender_sent.contains(&payment_hash) + || sender_failed.contains(&payment_hash); + if !keep { + resolved[node_idx].insert(*payment_id); + } + keep + }); } } - self.payments.assert_all_resolved(); - // Verify that every payment claimed by a receiver resulted in a PaymentSent event at - // the sender. - self.payments.assert_claims_reported(); + for (idx, pending) in self.payments.pending_payments.borrow().iter().enumerate() { + assert!( + pending.is_empty(), + "Node {} has {} stuck pending payments after settling all state: ids={:?}; {}", + idx, + pending.len(), + pending, + self.pending_work_summary(), + ); + } + + let claimed_hashes = + self.payments.claimed_payment_hashes.borrow().iter().copied().collect::>(); + for hash in claimed_hashes { + let receiver_saw_claim = + self.payments.receiver_claimed_payment_hashes.borrow().contains(&hash); + assert!( + receiver_saw_claim, + "Payment {:?} was claimed with claim_funds but receiver never got PaymentClaimed", + hash, + ); + let sender_saw_sent = self.payments.sender_sent_payment_hashes.borrow().contains(&hash); + let sender_saw_failed = + self.payments.sender_failed_payment_hashes.borrow().contains(&hash); + assert!(!(sender_saw_sent && sender_saw_failed)); + assert!(sender_saw_sent || sender_saw_failed); + if sender_saw_failed { + assert!(self.payments.claim_allows_sender_failure(&hash)); + } + } self.ab_link.complete_all_monitor_updates(&self.nodes); self.bc_link.complete_all_monitor_updates(&self.nodes); for chan_id in *self.ab_link.channel_ids() { + if self.payments.closed_channels.borrow().contains(&chan_id) { + continue; + } if self.probe_amount_for_direction(0, chan_id).is_some() { assert!(self.can_send_after_settle(0, 1, chan_id)); } else if self.probe_amount_for_direction(1, chan_id).is_some() { @@ -2515,6 +2767,9 @@ impl<'a, 'd, Out: Output + MaybeSend + MaybeSync> Harness<'a, 'd, Out> { } } for chan_id in *self.bc_link.channel_ids() { + if self.payments.closed_channels.borrow().contains(&chan_id) { + continue; + } if self.probe_amount_for_direction(1, chan_id).is_some() { assert!(self.can_send_after_settle(1, 2, chan_id)); } else if self.probe_amount_for_direction(2, chan_id).is_some() { @@ -2621,9 +2876,9 @@ impl<'a, 'd, Out: Output + MaybeSend + MaybeSync> Harness<'a, 'd, Out> { let balances_a = self.nodes[0].monitor.get_claimable_balances(&open_refs); let balances_b = self.nodes[1].monitor.get_claimable_balances(&open_refs); let balances_c = self.nodes[2].monitor.get_claimable_balances(&open_refs); - let pending_payments = &self.payments.pending_payments; + let pending_payments = self.payments.pending_payments.borrow(); format!( - "queues ab={} ba={} bc={} cb={} pending_txs={} bcast=({},{},{}) pending=({},{},{}) monitor_updates={} timed_work={} heights=({},{},{}) tip={} balances_a=[{}] balances_b=[{}] balances_c=[{}]", + "queues ab={} ba={} bc={} cb={} pending_txs={} bcast=({},{},{}) pending=({},{},{}) monitor_updates={} timed_work={} heights=({},{},{}) tip={} {} balances_a=[{}] balances_b=[{}] balances_c=[{}]", self.queues.ab.len(), self.queues.ba.len(), self.queues.bc.len(), @@ -2641,6 +2896,7 @@ impl<'a, 'd, Out: Output + MaybeSend + MaybeSync> Harness<'a, 'd, Out> { self.nodes[1].height, self.nodes[2].height, self.chain_state.tip_height(), + self.payments.summarize_claim_tracking(), summarize_balances(&balances_a), summarize_balances(&balances_b), summarize_balances(&balances_c), @@ -2809,15 +3065,19 @@ impl<'a, 'd, Out: Output + MaybeSend + MaybeSync> Harness<'a, 'd, Out> { } fn advance_chain_carefully(&mut self, num_blocks: u32) { - for _ in 0..num_blocks { - self.flush_progress(32); - if !self.has_pending_work() { - break; - } - self.chain_state.advance_height(1); + if self.payments.has_live_payment_work() { self.flush_progress(32); - if !self.has_pending_work() { - break; + } else { + for _ in 0..num_blocks { + self.flush_progress(32); + if !self.has_pending_work() { + break; + } + self.chain_state.advance_height(1); + self.flush_progress(32); + if !self.has_pending_work() { + break; + } } } } @@ -2914,6 +3174,9 @@ impl<'a, 'd, Out: Output + MaybeSend + MaybeSync> Harness<'a, 'd, Out> { fn can_send_after_settle( &mut self, source_idx: usize, dest_idx: usize, dest_chan_id: ChannelId, ) -> bool { + if self.payments.closed_channels.borrow().contains(&dest_chan_id) { + return false; + } let Some(amt) = self.probe_amount_for_direction(source_idx, dest_chan_id) else { return false; }; @@ -3244,17 +3507,31 @@ fn process_events_impl( // hashing the payment hash+preimage, it is rather trivial for the fuzzer to build // payments that accidentally end up looking like probes. events::Event::ProbeSuccessful { payment_id, .. } => { - payments.mark_successful_probe(node_idx, payment_id); + payments.mark_resolved_without_hash(node_idx, payment_id); }, - events::Event::PaymentFailed { payment_id, .. } - | events::Event::ProbeFailed { payment_id, .. } => { + events::Event::PaymentFailed { payment_id, payment_hash, .. } => { + payments.mark_failed(node_idx, payment_id, payment_hash); + }, + events::Event::ProbeFailed { payment_id, .. } => { payments.mark_resolved_without_hash(node_idx, payment_id); }, - events::Event::PaymentClaimed { .. } => {}, + events::Event::PaymentClaimed { payment_hash, .. } => { + payments.mark_receiver_claimed(payment_hash); + }, events::Event::PaymentPathSuccessful { .. } => {}, events::Event::PaymentPathFailed { .. } => {}, events::Event::PaymentForwarded { .. } if node_idx == 1 => {}, events::Event::ChannelReady { .. } => {}, + events::Event::HTLCHandlingFailed { + failure_type: events::HTLCHandlingFailureType::Receive { payment_hash }, + .. + } => { + assert!( + !payments.claimed_payment_hashes.borrow().contains(&payment_hash), + "Payment {:?} hit HTLCHandlingFailed::Receive after claim_funds", + payment_hash, + ); + }, events::Event::HTLCHandlingFailed { .. } => {}, events::Event::FundingTransactionReadyForSigning { channel_id, @@ -3280,7 +3557,9 @@ fn process_events_impl( } }, events::Event::SpliceFailed { .. } => {}, - events::Event::ChannelClosed { .. } => {}, + events::Event::ChannelClosed { channel_id, .. } => { + payments.mark_channel_closed(channel_id); + }, events::Event::DiscardFunding { .. } => {}, events::Event::SpendableOutputs { .. } => {}, events::Event::BumpTransaction(..) => {}, From be4dd9c925838c72475f81d0b1d4de8a4a9c7c6a Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 21 Apr 2026 12:34:33 +0200 Subject: [PATCH 29/29] fuzz: add force close actions to chanmon_consistency Add explicit force-close fuzz actions for the A-B and B-C channels. Enable holder commitment and holder HTLC signing together so on-chain cleanup retries do not split the paired monitor-side signer operations. The all-node holder-signing byte remains as a compatibility alias for existing fuzz inputs. The harness records dust HTLC paths before closing so later payment resolution checks can account for claims blocked by dust outputs. --- fuzz/src/chanmon_consistency.rs | 115 ++++++++++++++++++++++++++------ 1 file changed, 93 insertions(+), 22 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 236d8c1af50..ccd7e66a90e 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -15,7 +15,8 @@ //! actions such as sending payments, handling events, or changing monitor update return values on //! a per-node basis. This should allow it to find any cases where the ordering of actions results //! in us getting out of sync with ourselves, and, assuming at least one of our recieve- or -//! send-side handling is correct, other peers. +//! send-side handling is correct, other peers. The fuzzer also exercises user-initiated +//! force-closes with on-chain commitment transaction confirmation. use bitcoin::amount::Amount; use bitcoin::constants::genesis_block; @@ -49,7 +50,7 @@ use lightning::events::{self, EventsProvider}; use lightning::ln::channel::{ FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS, }; -use lightning::ln::channel_state::ChannelDetails; +use lightning::ln::channel_state::{ChannelDetails, InboundHTLCDetails, OutboundHTLCDetails}; use lightning::ln::channelmanager::{ ChainParameters, ChannelManager, ChannelManagerReadArgs, PaymentId, RecentPaymentDetails, TrustedChannelFeatures, @@ -648,10 +649,12 @@ impl SignerProvider for KeyProvider { } } -const SUPPORTED_SIGNER_OPS: [SignerOp; 3] = [ +const SUPPORTED_SIGNER_OPS: [SignerOp; 5] = [ SignerOp::SignCounterpartyCommitment, SignerOp::GetPerCommitmentPoint, SignerOp::ReleaseCommitmentSecret, + SignerOp::SignHolderCommitment, + SignerOp::SignHolderHtlcTransaction, ]; impl KeyProvider { @@ -1270,6 +1273,12 @@ impl<'a> HarnessNode<'a> { self.node.timer_tick_occurred(); } + fn enable_holder_signer_ops(&self) { + self.keys_manager.enable_op_for_all_signers(SignerOp::SignHolderCommitment); + self.keys_manager.enable_op_for_all_signers(SignerOp::SignHolderHtlcTransaction); + self.node.signer_unblocked(None); + } + fn current_feerate_sat_per_kw(&self) -> FeeRate { self.fee_estimator.feerate_sat_per_kw() } @@ -1432,6 +1441,16 @@ impl<'a> HarnessNode<'a> { } } +#[inline] +fn inbound_dust_blocks_path(htlc: &InboundHTLCDetails) -> bool { + htlc.is_dust +} + +#[inline] +fn outbound_dust_blocks_path(htlc: &OutboundHTLCDetails) -> bool { + htlc.is_dust +} + struct EventQueues { ab: Vec, ba: Vec, @@ -3150,6 +3169,65 @@ impl<'a, 'd, Out: Output + MaybeSend + MaybeSync> Harness<'a, 'd, Out> { assert!(settled, "message-only settle exceeded budget: {}", self.pending_work_summary(),); } + fn record_force_close_dust(&self, closer_idx: usize, channel_id: ChannelId) { + if let Some(channel) = self.nodes[closer_idx] + .node + .list_channels() + .into_iter() + .find(|chan| chan.channel_id == channel_id) + { + let mut dust_parts = channel + .pending_inbound_htlcs + .iter() + .filter(|htlc| inbound_dust_blocks_path(htlc)) + .map(|htlc| (htlc.payment_hash, htlc.amount_msat)) + .chain( + channel + .pending_outbound_htlcs + .iter() + .filter(|htlc| outbound_dust_blocks_path(htlc)) + .map(|htlc| (htlc.payment_hash, htlc.amount_msat)), + ) + .collect::>(); + let payment_paths = self.payments.payment_paths_by_hash.borrow(); + let mut blocked_paths = self.payments.blocked_dust_paths_by_hash.borrow_mut(); + for (payment_hash, amount_msat) in dust_parts.drain(..) { + let Some(paths) = payment_paths.get(&payment_hash) else { + continue; + }; + let blocked_for_hash = + blocked_paths.entry(payment_hash).or_insert_with(HashSet::new); + if let Some((path_idx, _)) = paths.iter().enumerate().find(|(path_idx, path)| { + !blocked_for_hash.contains(path_idx) + && path.iter().any(|(chan_id, part_amt)| { + *chan_id == channel_id && *part_amt == amount_msat + }) + }) { + blocked_for_hash.insert(path_idx); + } + } + } + } + + fn force_close( + &mut self, closer_idx: usize, channel_id: ChannelId, counterparty_idx: usize, reason: &str, + ) { + self.flush_progress(32); + self.record_force_close_dust(closer_idx, channel_id); + if self.nodes[closer_idx] + .node + .force_close_broadcasting_latest_txn( + &channel_id, + &self.nodes[counterparty_idx].our_node_id(), + reason.to_string(), + ) + .is_ok() + { + self.payments.closed_channels.borrow_mut().insert(channel_id); + self.flush_progress(32); + } + } + fn probe_amount_for_direction( &self, source_idx: usize, dest_chan_id: ChannelId, ) -> Option { @@ -4091,27 +4169,20 @@ pub fn do_test(data: &[u8], out: Out) { .enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); harness.nodes[2].node.signer_unblocked(None); }, - 0xcc => { - harness.nodes[1] - .keys_manager - .enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); - let filter = Some((harness.nodes[0].our_node_id(), harness.chan_a_id())); - harness.nodes[1].node.signer_unblocked(filter); - }, - 0xcd => { - harness.nodes[1] - .keys_manager - .enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); - let filter = Some((harness.nodes[2].our_node_id(), harness.chan_b_id())); - harness.nodes[1].node.signer_unblocked(filter); - }, - 0xce => { - harness.nodes[2] - .keys_manager - .enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); - harness.nodes[2].node.signer_unblocked(None); + 0xcc => harness.nodes[0].enable_holder_signer_ops(), + 0xcd => harness.nodes[1].enable_holder_signer_ops(), + 0xce => harness.nodes[2].enable_holder_signer_ops(), + 0xcf => { + harness.nodes[0].enable_holder_signer_ops(); + harness.nodes[1].enable_holder_signer_ops(); + harness.nodes[2].enable_holder_signer_ops(); }, + 0xd0 => harness.force_close(0, harness.chan_a_id(), 1, "]]]]]]]]]"), + 0xd1 => harness.force_close(1, harness.chan_b_id(), 2, "]]]]]]]]"), + 0xd2 => harness.force_close(1, harness.chan_a_id(), 0, "]]]]]]]"), + 0xd3 => harness.force_close(2, harness.chan_b_id(), 1, "]]]]]"), + 0xd8 => harness.confirm_broadcasts_for_node(0), 0xd9 => harness.confirm_broadcasts_for_node(1), 0xda => harness.confirm_broadcasts_for_node(2),