From 5b3302d29984a0512ac03eafd34c352a4b118904 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 21 Apr 2026 16:45:06 +0200 Subject: [PATCH 01/15] Extract chanmon bootstrap helpers Extract the repeated peer-connection and channel-funding setup into small helpers. This leaves the fuzz scenario setup behavior unchanged while making later harness refactors easier to review. --- fuzz/src/chanmon_consistency.rs | 579 +++++++++++++++++--------------- 1 file changed, 306 insertions(+), 273 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 655fb76200b..3d4c4bbd865 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -208,9 +208,7 @@ impl ChainState { fn is_outpoint_spent(&self, outpoint: &bitcoin::OutPoint) -> bool { self.blocks.iter().any(|(_, txs)| { - txs.iter().any(|tx| { - tx.input.iter().any(|input| input.previous_output == *outpoint) - }) + txs.iter().any(|tx| tx.input.iter().any(|input| input.previous_output == *outpoint)) }) } @@ -938,12 +936,240 @@ fn assert_action_timeout_awaiting_response(action: &msgs::ErrorAction) { ); } +#[derive(Copy, Clone)] enum ChanType { Legacy, KeyedAnchors, ZeroFeeCommitments, } +fn build_node_config(chan_type: ChanType) -> UserConfig { + let mut config = UserConfig::default(); + config.channel_config.forwarding_fee_proportional_millionths = 0; + config.channel_handshake_config.announce_for_forwarding = true; + config.reject_inbound_splices = false; + match chan_type { + ChanType::Legacy => { + config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = false; + config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = false; + }, + ChanType::KeyedAnchors => { + config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; + config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = false; + }, + ChanType::ZeroFeeCommitments => { + config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = false; + config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = true; + }, + } + config +} + +fn complete_all_pending_monitor_updates(monitor: &Arc) { + for (channel_id, state) in monitor.latest_monitors.lock().unwrap().iter_mut() { + for (id, data) in state.pending_monitors.drain(..) { + monitor.chain_monitor.channel_monitor_updated(*channel_id, id).unwrap(); + if id >= state.persisted_monitor_id { + state.persisted_monitor_id = id; + state.persisted_monitor = data; + } + } + } +} + +fn connect_peers(source: &ChanMan<'_>, dest: &ChanMan<'_>) { + let init_dest = + Init { features: dest.init_features(), networks: None, remote_network_address: None }; + source.peer_connected(dest.get_our_node_id(), &init_dest, true).unwrap(); + let init_src = + Init { features: source.init_features(), networks: None, remote_network_address: None }; + dest.peer_connected(source.get_our_node_id(), &init_src, false).unwrap(); +} + +fn make_channel( + source: &ChanMan<'_>, dest: &ChanMan<'_>, source_monitor: &Arc, + dest_monitor: &Arc, dest_keys_manager: &Arc, chan_id: i32, + trusted_open: bool, trusted_accept: bool, chain_state: &mut ChainState, +) { + if trusted_open { + source + .create_channel_to_trusted_peer_0reserve( + dest.get_our_node_id(), + 100_000, + 42, + 0, + None, + None, + ) + .unwrap(); + } else { + source.create_channel(dest.get_our_node_id(), 100_000, 42, 0, None, None).unwrap(); + } + let open_channel = { + let events = source.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + if let MessageSendEvent::SendOpenChannel { ref msg, .. } = events[0] { + msg.clone() + } else { + panic!("Wrong event type"); + } + }; + + dest.handle_open_channel(source.get_our_node_id(), &open_channel); + let accept_channel = { + let events = dest.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + if let events::Event::OpenChannelRequest { + ref temporary_channel_id, + ref counterparty_node_id, + .. + } = events[0] + { + let mut random_bytes = [0u8; 16]; + random_bytes.copy_from_slice(&dest_keys_manager.get_secure_random_bytes()[..16]); + let user_channel_id = u128::from_be_bytes(random_bytes); + if trusted_accept { + dest.accept_inbound_channel_from_trusted_peer( + temporary_channel_id, + counterparty_node_id, + user_channel_id, + TrustedChannelFeatures::ZeroReserve, + None, + ) + .unwrap(); + } else { + dest.accept_inbound_channel( + temporary_channel_id, + counterparty_node_id, + user_channel_id, + None, + ) + .unwrap(); + } + } else { + panic!("Wrong event type"); + } + let events = dest.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + if let MessageSendEvent::SendAcceptChannel { ref msg, .. } = events[0] { + msg.clone() + } else { + panic!("Wrong event type"); + } + }; + + source.handle_accept_channel(dest.get_our_node_id(), &accept_channel); + { + let mut events = source.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + if let events::Event::FundingGenerationReady { + temporary_channel_id, + channel_value_satoshis, + output_script, + .. + } = events.pop().unwrap() + { + let tx = Transaction { + version: Version(chan_id), + lock_time: LockTime::ZERO, + input: Vec::new(), + output: vec![TxOut { + value: Amount::from_sat(channel_value_satoshis), + script_pubkey: output_script, + }], + }; + source + .funding_transaction_generated( + temporary_channel_id, + dest.get_our_node_id(), + tx.clone(), + ) + .unwrap(); + chain_state.confirm_tx(tx); + } else { + panic!("Wrong event type"); + } + } + + let funding_created = { + let events = source.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + if let MessageSendEvent::SendFundingCreated { ref msg, .. } = events[0] { + msg.clone() + } else { + panic!("Wrong event type"); + } + }; + dest.handle_funding_created(source.get_our_node_id(), &funding_created); + // Complete any pending monitor updates for dest after watch_channel. + complete_all_pending_monitor_updates(dest_monitor); + + let (funding_signed, channel_id) = { + let events = dest.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + if let MessageSendEvent::SendFundingSigned { ref msg, .. } = events[0] { + (msg.clone(), msg.channel_id) + } else { + panic!("Wrong event type"); + } + }; + let events = dest.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + if let events::Event::ChannelPending { ref counterparty_node_id, .. } = events[0] { + assert_eq!(counterparty_node_id, &source.get_our_node_id()); + } else { + panic!("Wrong event type"); + } + + source.handle_funding_signed(dest.get_our_node_id(), &funding_signed); + // Complete any pending monitor updates for source after watch_channel. + complete_all_pending_monitor_updates(source_monitor); + + let events = source.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + if let events::Event::ChannelPending { + ref counterparty_node_id, + channel_id: ref event_channel_id, + .. + } = events[0] + { + assert_eq!(counterparty_node_id, &dest.get_our_node_id()); + assert_eq!(*event_channel_id, channel_id); + } else { + panic!("Wrong event type"); + } +} + +fn lock_fundings(nodes: &[ChanMan<'_>; 3]) { + let mut node_events = Vec::new(); + for node in nodes.iter() { + node_events.push(node.get_and_clear_pending_msg_events()); + } + for (idx, node_event) in node_events.iter().enumerate() { + for event in node_event { + if let MessageSendEvent::SendChannelReady { ref node_id, ref msg } = event { + for node in nodes.iter() { + if node.get_our_node_id() == *node_id { + node.handle_channel_ready(nodes[idx].get_our_node_id(), msg); + } + } + } else { + panic!("Wrong event type"); + } + } + } + + for node in nodes.iter() { + let events = node.get_and_clear_pending_msg_events(); + for event in events { + if let MessageSendEvent::SendAnnouncementSignatures { .. } = event { + } else { + panic!("Wrong event type"); + } + } + } +} + #[inline] pub fn do_test(data: &[u8], out: Out) { let broadcast_a = Arc::new(TestBroadcaster { txn_broadcasted: RefCell::new(Vec::new()) }); @@ -1007,27 +1233,10 @@ pub fn do_test(data: &[u8], out: Out) { Arc::clone(&keys_manager), )); - let mut config = UserConfig::default(); - config.channel_config.forwarding_fee_proportional_millionths = 0; - config.channel_handshake_config.announce_for_forwarding = true; - config.reject_inbound_splices = false; - match chan_type { - ChanType::Legacy => { - config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = false; - config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = false; - }, - ChanType::KeyedAnchors => { - config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; - config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = false; - }, - ChanType::ZeroFeeCommitments => { - config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = false; - config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = true; - }, - } let network = Network::Bitcoin; let best_block_timestamp = genesis_block(network).header.time; - let params = ChainParameters { network, best_block: BlockLocator::from_network(network) }; + let params = + ChainParameters { network, best_block: BlockLocator::from_network(network) }; ( ChannelManager::new( $fee_estimator.clone(), @@ -1039,7 +1248,7 @@ pub fn do_test(data: &[u8], out: Out) { keys_manager.clone(), keys_manager.clone(), keys_manager.clone(), - config, + build_node_config(chan_type), params, best_block_timestamp, ), @@ -1070,25 +1279,6 @@ pub fn do_test(data: &[u8], out: Out) { Arc::clone(keys), )); - let mut config = UserConfig::default(); - config.channel_config.forwarding_fee_proportional_millionths = 0; - config.channel_handshake_config.announce_for_forwarding = true; - config.reject_inbound_splices = false; - match chan_type { - ChanType::Legacy => { - config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = false; - config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = false; - }, - ChanType::KeyedAnchors => { - config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; - config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = false; - }, - ChanType::ZeroFeeCommitments => { - config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = false; - config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = true; - }, - } - let mut monitors = new_hash_map(); let mut old_monitors = old_monitors.latest_monitors.lock().unwrap(); for (channel_id, mut prev_state) in old_monitors.drain() { @@ -1138,12 +1328,12 @@ pub fn do_test(data: &[u8], out: Out) { router: &router, message_router: &router, logger, - config, + config: build_node_config(chan_type), channel_monitors: monitor_refs, }; - let manager = - <(BlockLocator, ChanMan)>::read(&mut &ser[..], read_args).expect("Failed to read manager"); + let manager = <(BlockLocator, ChanMan)>::read(&mut &ser[..], read_args) + .expect("Failed to read manager"); let res = (manager.1, chain_monitor.clone()); for (channel_id, mon) in monitors.drain() { assert_eq!( @@ -1155,224 +1345,6 @@ pub fn do_test(data: &[u8], out: Out) { res }; - macro_rules! complete_all_pending_monitor_updates { - ($monitor: expr) => {{ - for (channel_id, state) in $monitor.latest_monitors.lock().unwrap().iter_mut() { - for (id, data) in state.pending_monitors.drain(..) { - $monitor.chain_monitor.channel_monitor_updated(*channel_id, id).unwrap(); - if id >= state.persisted_monitor_id { - state.persisted_monitor_id = id; - state.persisted_monitor = data; - } - } - } - }}; - } - macro_rules! connect_peers { - ($source: expr, $dest: expr) => {{ - let init_dest = Init { - features: $dest.init_features(), - networks: None, - remote_network_address: None, - }; - $source.peer_connected($dest.get_our_node_id(), &init_dest, true).unwrap(); - let init_src = Init { - features: $source.init_features(), - networks: None, - remote_network_address: None, - }; - $dest.peer_connected($source.get_our_node_id(), &init_src, false).unwrap(); - }}; - } - macro_rules! make_channel { - ($source: expr, $dest: expr, $source_monitor: expr, $dest_monitor: expr, $dest_keys_manager: expr, $chan_id: expr, $trusted_open: expr, $trusted_accept: expr) => {{ - if $trusted_open { - $source - .create_channel_to_trusted_peer_0reserve( - $dest.get_our_node_id(), - 100_000, - 42, - 0, - None, - None, - ) - .unwrap(); - } else { - $source - .create_channel($dest.get_our_node_id(), 100_000, 42, 0, None, None) - .unwrap(); - } - let open_channel = { - let events = $source.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - if let MessageSendEvent::SendOpenChannel { ref msg, .. } = events[0] { - msg.clone() - } else { - panic!("Wrong event type"); - } - }; - - $dest.handle_open_channel($source.get_our_node_id(), &open_channel); - let accept_channel = { - let events = $dest.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - if let events::Event::OpenChannelRequest { - ref temporary_channel_id, - ref counterparty_node_id, - .. - } = events[0] - { - let mut random_bytes = [0u8; 16]; - random_bytes - .copy_from_slice(&$dest_keys_manager.get_secure_random_bytes()[..16]); - let user_channel_id = u128::from_be_bytes(random_bytes); - if $trusted_accept { - $dest - .accept_inbound_channel_from_trusted_peer( - temporary_channel_id, - counterparty_node_id, - user_channel_id, - TrustedChannelFeatures::ZeroReserve, - None, - ) - .unwrap(); - } else { - $dest - .accept_inbound_channel( - temporary_channel_id, - counterparty_node_id, - user_channel_id, - None, - ) - .unwrap(); - } - } else { - panic!("Wrong event type"); - } - let events = $dest.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - if let MessageSendEvent::SendAcceptChannel { ref msg, .. } = events[0] { - msg.clone() - } else { - panic!("Wrong event type"); - } - }; - - $source.handle_accept_channel($dest.get_our_node_id(), &accept_channel); - { - let mut events = $source.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - if let events::Event::FundingGenerationReady { - temporary_channel_id, - channel_value_satoshis, - output_script, - .. - } = events.pop().unwrap() - { - let tx = Transaction { - version: Version($chan_id), - lock_time: LockTime::ZERO, - input: Vec::new(), - output: vec![TxOut { - value: Amount::from_sat(channel_value_satoshis), - script_pubkey: output_script, - }], - }; - $source - .funding_transaction_generated( - temporary_channel_id, - $dest.get_our_node_id(), - tx.clone(), - ) - .unwrap(); - chain_state.confirm_tx(tx); - } else { - panic!("Wrong event type"); - } - } - - let funding_created = { - let events = $source.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - if let MessageSendEvent::SendFundingCreated { ref msg, .. } = events[0] { - msg.clone() - } else { - panic!("Wrong event type"); - } - }; - $dest.handle_funding_created($source.get_our_node_id(), &funding_created); - // Complete any pending monitor updates for dest after watch_channel - complete_all_pending_monitor_updates!($dest_monitor); - - let (funding_signed, channel_id) = { - let events = $dest.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - if let MessageSendEvent::SendFundingSigned { ref msg, .. } = events[0] { - (msg.clone(), msg.channel_id.clone()) - } else { - panic!("Wrong event type"); - } - }; - let events = $dest.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - if let events::Event::ChannelPending { ref counterparty_node_id, .. } = events[0] { - assert_eq!(counterparty_node_id, &$source.get_our_node_id()); - } else { - panic!("Wrong event type"); - } - - $source.handle_funding_signed($dest.get_our_node_id(), &funding_signed); - // Complete any pending monitor updates for source after watch_channel - complete_all_pending_monitor_updates!($source_monitor); - - let events = $source.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - if let events::Event::ChannelPending { - ref counterparty_node_id, - channel_id: ref event_channel_id, - .. - } = events[0] - { - assert_eq!(counterparty_node_id, &$dest.get_our_node_id()); - assert_eq!(*event_channel_id, channel_id); - } else { - panic!("Wrong event type"); - } - }}; - } - - macro_rules! lock_fundings { - ($nodes: expr) => {{ - let mut node_events = Vec::new(); - for node in $nodes.iter() { - node_events.push(node.get_and_clear_pending_msg_events()); - } - for (idx, node_event) in node_events.iter().enumerate() { - for event in node_event { - if let MessageSendEvent::SendChannelReady { ref node_id, ref msg } = event { - for node in $nodes.iter() { - if node.get_our_node_id() == *node_id { - node.handle_channel_ready($nodes[idx].get_our_node_id(), msg); - } - } - } else { - panic!("Wrong event type"); - } - } - } - - for node in $nodes.iter() { - let events = node.get_and_clear_pending_msg_events(); - for event in events { - if let MessageSendEvent::SendAnnouncementSignatures { .. } = event { - } else { - panic!("Wrong event type"); - } - } - } - }}; - } - let wallet_a = TestWalletSource::new(SecretKey::from_slice(&[1; 32]).unwrap()); let wallet_b = TestWalletSource::new(SecretKey::from_slice(&[2; 32]).unwrap()); let wallet_c = TestWalletSource::new(SecretKey::from_slice(&[3; 32]).unwrap()); @@ -1414,8 +1386,8 @@ pub fn do_test(data: &[u8], out: Out) { let fee_estimators = [Arc::clone(&fee_est_a), Arc::clone(&fee_est_b), Arc::clone(&fee_est_c)]; // Connect peers first, then create channels - connect_peers!(nodes[0], nodes[1]); - connect_peers!(nodes[1], nodes[2]); + connect_peers(&nodes[0], &nodes[1]); + connect_peers(&nodes[1], &nodes[2]); // Create 3 channels between A-B and 3 channels between B-C (6 total). // @@ -1423,14 +1395,74 @@ pub fn do_test(data: &[u8], out: Out) { // txid and funding outpoint. // A-B: channel 2 A and B have 0-reserve (trusted open + trusted accept), // channel 3 A has 0-reserve (trusted accept) - make_channel!(nodes[0], nodes[1], monitor_a, monitor_b, keys_manager_b, 1, false, false); - make_channel!(nodes[0], nodes[1], monitor_a, monitor_b, keys_manager_b, 2, true, true); - make_channel!(nodes[0], nodes[1], monitor_a, monitor_b, keys_manager_b, 3, false, true); + make_channel( + &nodes[0], + &nodes[1], + &monitor_a, + &monitor_b, + &keys_manager_b, + 1, + false, + false, + &mut chain_state, + ); + make_channel( + &nodes[0], + &nodes[1], + &monitor_a, + &monitor_b, + &keys_manager_b, + 2, + true, + true, + &mut chain_state, + ); + make_channel( + &nodes[0], + &nodes[1], + &monitor_a, + &monitor_b, + &keys_manager_b, + 3, + false, + true, + &mut chain_state, + ); // B-C: channel 4 B has 0-reserve (via trusted accept), // channel 5 C has 0-reserve (via trusted open) - make_channel!(nodes[1], nodes[2], monitor_b, monitor_c, keys_manager_c, 4, false, true); - make_channel!(nodes[1], nodes[2], monitor_b, monitor_c, keys_manager_c, 5, true, false); - make_channel!(nodes[1], nodes[2], monitor_b, monitor_c, keys_manager_c, 6, false, false); + make_channel( + &nodes[1], + &nodes[2], + &monitor_b, + &monitor_c, + &keys_manager_c, + 4, + false, + true, + &mut chain_state, + ); + make_channel( + &nodes[1], + &nodes[2], + &monitor_b, + &monitor_c, + &keys_manager_c, + 5, + true, + false, + &mut chain_state, + ); + make_channel( + &nodes[1], + &nodes[2], + &monitor_b, + &monitor_c, + &keys_manager_c, + 6, + false, + false, + &mut chain_state, + ); // Wipe the transactions-broadcasted set to make sure we don't broadcast any transactions // during normal operation in `test_return`. @@ -1464,7 +1496,7 @@ pub fn do_test(data: &[u8], out: Out) { sync_with_chain_state(&mut chain_state, &nodes[1], &mut node_height_b, None); sync_with_chain_state(&mut chain_state, &nodes[2], &mut node_height_c, None); - lock_fundings!(nodes); + lock_fundings(&nodes); // Get channel IDs for all A-B channels (from node A's perspective) let chan_ab_ids = { @@ -2106,7 +2138,8 @@ pub fn do_test(data: &[u8], out: Out) { }, events::Event::SpliceFailed { .. } => {}, events::Event::DiscardFunding { - funding_info: events::FundingInfo::Contribution { .. } + funding_info: + events::FundingInfo::Contribution { .. } | events::FundingInfo::Tx { .. }, .. } => {}, From fe9811e355bd3e747a34c6bbb9991c0c868edfd6 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 21 Apr 2026 17:31:38 +0200 Subject: [PATCH 02/15] Wrap chanmon nodes in HarnessNode Introduce a small wrapper around each channel manager and its test resources. This keeps node-local state together before moving more operations onto the harness. --- fuzz/src/chanmon_consistency.rs | 249 ++++++++++++++------------------ 1 file changed, 110 insertions(+), 139 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 3d4c4bbd865..5fdddaad6c4 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -943,6 +943,38 @@ enum ChanType { ZeroFeeCommitments, } +struct HarnessNode<'a> { + node: ChanMan<'a>, + monitor: Arc, + keys_manager: Arc, +} + +impl<'a> std::ops::Deref for HarnessNode<'a> { + type Target = ChanMan<'a>; + + fn deref(&self) -> &Self::Target { + &self.node + } +} + +impl<'a> HarnessNode<'a> { + fn our_node_id(&self) -> PublicKey { + self.node.get_our_node_id() + } + + fn complete_all_pending_monitor_updates(&self) { + for (channel_id, state) in self.monitor.latest_monitors.lock().unwrap().iter_mut() { + for (id, data) in state.pending_monitors.drain(..) { + self.monitor.chain_monitor.channel_monitor_updated(*channel_id, id).unwrap(); + if id >= state.persisted_monitor_id { + state.persisted_monitor_id = id; + state.persisted_monitor = data; + } + } + } + } +} + fn build_node_config(chan_type: ChanType) -> UserConfig { let mut config = UserConfig::default(); config.channel_config.forwarding_fee_proportional_millionths = 0; @@ -965,18 +997,6 @@ fn build_node_config(chan_type: ChanType) -> UserConfig { config } -fn complete_all_pending_monitor_updates(monitor: &Arc) { - for (channel_id, state) in monitor.latest_monitors.lock().unwrap().iter_mut() { - for (id, data) in state.pending_monitors.drain(..) { - monitor.chain_monitor.channel_monitor_updated(*channel_id, id).unwrap(); - if id >= state.persisted_monitor_id { - state.persisted_monitor_id = id; - state.persisted_monitor = data; - } - } - } -} - fn connect_peers(source: &ChanMan<'_>, dest: &ChanMan<'_>) { let init_dest = Init { features: dest.init_features(), networks: None, remote_network_address: None }; @@ -987,26 +1007,19 @@ fn connect_peers(source: &ChanMan<'_>, dest: &ChanMan<'_>) { } fn make_channel( - source: &ChanMan<'_>, dest: &ChanMan<'_>, source_monitor: &Arc, - dest_monitor: &Arc, dest_keys_manager: &Arc, chan_id: i32, - trusted_open: bool, trusted_accept: bool, chain_state: &mut ChainState, + source: &HarnessNode<'_>, dest: &HarnessNode<'_>, chan_id: i32, trusted_open: bool, + trusted_accept: bool, chain_state: &mut ChainState, ) { if trusted_open { source - .create_channel_to_trusted_peer_0reserve( - dest.get_our_node_id(), - 100_000, - 42, - 0, - None, - None, - ) + .node + .create_channel_to_trusted_peer_0reserve(dest.our_node_id(), 100_000, 42, 0, None, None) .unwrap(); } else { - source.create_channel(dest.get_our_node_id(), 100_000, 42, 0, None, None).unwrap(); + source.node.create_channel(dest.our_node_id(), 100_000, 42, 0, None, None).unwrap(); } let open_channel = { - let events = source.get_and_clear_pending_msg_events(); + let events = source.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); if let MessageSendEvent::SendOpenChannel { ref msg, .. } = events[0] { msg.clone() @@ -1015,9 +1028,9 @@ fn make_channel( } }; - dest.handle_open_channel(source.get_our_node_id(), &open_channel); + dest.node.handle_open_channel(source.our_node_id(), &open_channel); let accept_channel = { - let events = dest.get_and_clear_pending_events(); + let events = dest.node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); if let events::Event::OpenChannelRequest { ref temporary_channel_id, @@ -1026,30 +1039,32 @@ fn make_channel( } = events[0] { let mut random_bytes = [0u8; 16]; - random_bytes.copy_from_slice(&dest_keys_manager.get_secure_random_bytes()[..16]); + random_bytes.copy_from_slice(&dest.keys_manager.get_secure_random_bytes()[..16]); let user_channel_id = u128::from_be_bytes(random_bytes); if trusted_accept { - dest.accept_inbound_channel_from_trusted_peer( - temporary_channel_id, - counterparty_node_id, - user_channel_id, - TrustedChannelFeatures::ZeroReserve, - None, - ) - .unwrap(); + dest.node + .accept_inbound_channel_from_trusted_peer( + temporary_channel_id, + counterparty_node_id, + user_channel_id, + TrustedChannelFeatures::ZeroReserve, + None, + ) + .unwrap(); } else { - dest.accept_inbound_channel( - temporary_channel_id, - counterparty_node_id, - user_channel_id, - None, - ) - .unwrap(); + dest.node + .accept_inbound_channel( + temporary_channel_id, + counterparty_node_id, + user_channel_id, + None, + ) + .unwrap(); } } else { panic!("Wrong event type"); } - let events = dest.get_and_clear_pending_msg_events(); + let events = dest.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); if let MessageSendEvent::SendAcceptChannel { ref msg, .. } = events[0] { msg.clone() @@ -1058,9 +1073,9 @@ fn make_channel( } }; - source.handle_accept_channel(dest.get_our_node_id(), &accept_channel); + source.node.handle_accept_channel(dest.our_node_id(), &accept_channel); { - let mut events = source.get_and_clear_pending_events(); + let mut events = source.node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); if let events::Event::FundingGenerationReady { temporary_channel_id, @@ -1079,11 +1094,8 @@ fn make_channel( }], }; source - .funding_transaction_generated( - temporary_channel_id, - dest.get_our_node_id(), - tx.clone(), - ) + .node + .funding_transaction_generated(temporary_channel_id, dest.our_node_id(), tx.clone()) .unwrap(); chain_state.confirm_tx(tx); } else { @@ -1092,7 +1104,7 @@ fn make_channel( } let funding_created = { - let events = source.get_and_clear_pending_msg_events(); + let events = source.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); if let MessageSendEvent::SendFundingCreated { ref msg, .. } = events[0] { msg.clone() @@ -1100,12 +1112,12 @@ fn make_channel( panic!("Wrong event type"); } }; - dest.handle_funding_created(source.get_our_node_id(), &funding_created); + dest.node.handle_funding_created(source.our_node_id(), &funding_created); // Complete any pending monitor updates for dest after watch_channel. - complete_all_pending_monitor_updates(dest_monitor); + dest.complete_all_pending_monitor_updates(); let (funding_signed, channel_id) = { - let events = dest.get_and_clear_pending_msg_events(); + let events = dest.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); if let MessageSendEvent::SendFundingSigned { ref msg, .. } = events[0] { (msg.clone(), msg.channel_id) @@ -1113,19 +1125,19 @@ fn make_channel( panic!("Wrong event type"); } }; - let events = dest.get_and_clear_pending_events(); + let events = dest.node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); if let events::Event::ChannelPending { ref counterparty_node_id, .. } = events[0] { - assert_eq!(counterparty_node_id, &source.get_our_node_id()); + assert_eq!(counterparty_node_id, &source.our_node_id()); } else { panic!("Wrong event type"); } - source.handle_funding_signed(dest.get_our_node_id(), &funding_signed); + source.node.handle_funding_signed(dest.our_node_id(), &funding_signed); // Complete any pending monitor updates for source after watch_channel. - complete_all_pending_monitor_updates(source_monitor); + source.complete_all_pending_monitor_updates(); - let events = source.get_and_clear_pending_events(); + let events = source.node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); if let events::Event::ChannelPending { ref counterparty_node_id, @@ -1133,24 +1145,24 @@ fn make_channel( .. } = events[0] { - assert_eq!(counterparty_node_id, &dest.get_our_node_id()); + assert_eq!(counterparty_node_id, &dest.our_node_id()); assert_eq!(*event_channel_id, channel_id); } else { panic!("Wrong event type"); } } -fn lock_fundings(nodes: &[ChanMan<'_>; 3]) { +fn lock_fundings(nodes: &[HarnessNode<'_>; 3]) { let mut node_events = Vec::new(); for node in nodes.iter() { - node_events.push(node.get_and_clear_pending_msg_events()); + node_events.push(node.node.get_and_clear_pending_msg_events()); } for (idx, node_event) in node_events.iter().enumerate() { for event in node_event { if let MessageSendEvent::SendChannelReady { ref node_id, ref msg } = event { for node in nodes.iter() { - if node.get_our_node_id() == *node_id { - node.handle_channel_ready(nodes[idx].get_our_node_id(), msg); + if node.our_node_id() == *node_id { + node.node.handle_channel_ready(nodes[idx].our_node_id(), msg); } } } else { @@ -1160,7 +1172,7 @@ fn lock_fundings(nodes: &[ChanMan<'_>; 3]) { } for node in nodes.iter() { - let events = node.get_and_clear_pending_msg_events(); + let events = node.node.get_and_clear_pending_msg_events(); for event in events { if let MessageSendEvent::SendAnnouncementSignatures { .. } = event { } else { @@ -1379,7 +1391,23 @@ pub fn do_test(data: &[u8], out: Out) { let (node_b, mut monitor_b, keys_manager_b, logger_b) = make_node!(1, fee_est_b, broadcast_b); let (node_c, mut monitor_c, keys_manager_c, logger_c) = make_node!(2, fee_est_c, broadcast_c); - let mut nodes = [node_a, node_b, node_c]; + let mut nodes = [ + HarnessNode { + node: node_a, + monitor: Arc::clone(&monitor_a), + keys_manager: Arc::clone(&keys_manager_a), + }, + HarnessNode { + node: node_b, + monitor: Arc::clone(&monitor_b), + keys_manager: Arc::clone(&keys_manager_b), + }, + HarnessNode { + node: node_c, + monitor: Arc::clone(&monitor_c), + keys_manager: Arc::clone(&keys_manager_c), + }, + ]; #[allow(unused_variables)] let loggers = [logger_a, logger_b, logger_c]; #[allow(unused_variables)] @@ -1395,74 +1423,14 @@ pub fn do_test(data: &[u8], out: Out) { // txid and funding outpoint. // A-B: channel 2 A and B have 0-reserve (trusted open + trusted accept), // channel 3 A has 0-reserve (trusted accept) - make_channel( - &nodes[0], - &nodes[1], - &monitor_a, - &monitor_b, - &keys_manager_b, - 1, - false, - false, - &mut chain_state, - ); - make_channel( - &nodes[0], - &nodes[1], - &monitor_a, - &monitor_b, - &keys_manager_b, - 2, - true, - true, - &mut chain_state, - ); - make_channel( - &nodes[0], - &nodes[1], - &monitor_a, - &monitor_b, - &keys_manager_b, - 3, - false, - true, - &mut chain_state, - ); + make_channel(&nodes[0], &nodes[1], 1, false, false, &mut chain_state); + make_channel(&nodes[0], &nodes[1], 2, true, true, &mut chain_state); + make_channel(&nodes[0], &nodes[1], 3, false, true, &mut chain_state); // B-C: channel 4 B has 0-reserve (via trusted accept), // channel 5 C has 0-reserve (via trusted open) - make_channel( - &nodes[1], - &nodes[2], - &monitor_b, - &monitor_c, - &keys_manager_c, - 4, - false, - true, - &mut chain_state, - ); - make_channel( - &nodes[1], - &nodes[2], - &monitor_b, - &monitor_c, - &keys_manager_c, - 5, - true, - false, - &mut chain_state, - ); - make_channel( - &nodes[1], - &nodes[2], - &monitor_b, - &monitor_c, - &keys_manager_c, - 6, - false, - false, - &mut chain_state, - ); + make_channel(&nodes[1], &nodes[2], 4, false, true, &mut chain_state); + make_channel(&nodes[1], &nodes[2], 5, true, false, &mut chain_state); + make_channel(&nodes[1], &nodes[2], 6, false, false, &mut chain_state); // Wipe the transactions-broadcasted set to make sure we don't broadcast any transactions // during normal operation in `test_return`. @@ -2656,8 +2624,9 @@ pub fn do_test(data: &[u8], out: Out) { &fee_est_a, broadcast_a.clone(), ); - nodes[0] = new_node_a; - monitor_a = new_monitor_a; + nodes[0].node = new_node_a; + monitor_a = Arc::clone(&new_monitor_a); + nodes[0].monitor = new_monitor_a; }, 0xb3..=0xbb => { // Restart node B, picking among the in-flight `ChannelMonitor`s to use based on @@ -2685,8 +2654,9 @@ pub fn do_test(data: &[u8], out: Out) { &fee_est_b, broadcast_b.clone(), ); - nodes[1] = new_node_b; - monitor_b = new_monitor_b; + nodes[1].node = new_node_b; + monitor_b = Arc::clone(&new_monitor_b); + nodes[1].monitor = new_monitor_b; }, 0xbc | 0xbd | 0xbe => { // Restart node C, picking among the in-flight `ChannelMonitor`s to use based on @@ -2710,8 +2680,9 @@ pub fn do_test(data: &[u8], out: Out) { &fee_est_c, broadcast_c.clone(), ); - nodes[2] = new_node_c; - monitor_c = new_monitor_c; + nodes[2].node = new_node_c; + monitor_c = Arc::clone(&new_monitor_c); + nodes[2].monitor = new_monitor_c; }, 0xc0 => keys_manager_a.disable_supported_ops_for_all_signers(), From c0de5836bb837162fde1cb3e54026847e143b1f6 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 21 Apr 2026 18:00:00 +0200 Subject: [PATCH 03/15] Build chanmon node resources Move construction of loggers, keys, monitors, broadcasters, wallets, and fee estimators into node resource setup. This removes ad hoc local closures while preserving the deterministic test inputs used by the fuzzer. --- fuzz/src/chanmon_consistency.rs | 669 ++++++++++++++++---------------- 1 file changed, 345 insertions(+), 324 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 5fdddaad6c4..4abbb9665fd 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -947,6 +947,10 @@ struct HarnessNode<'a> { node: ChanMan<'a>, monitor: Arc, keys_manager: Arc, + logger: Arc, + broadcaster: Arc, + fee_estimator: Arc, + wallet: TestWalletSource, } impl<'a> std::ops::Deref for HarnessNode<'a> { @@ -958,6 +962,72 @@ impl<'a> std::ops::Deref for HarnessNode<'a> { } impl<'a> HarnessNode<'a> { + fn build_loggers( + node_id: u8, out: &Out, + ) -> (Arc, Arc) { + let raw_logger = Arc::new(test_logger::TestLogger::new(node_id.to_string(), out.clone())); + let logger_for_monitor: Arc = raw_logger.clone(); + let logger: Arc = raw_logger; + (logger_for_monitor, logger) + } + + fn build_chain_monitor( + broadcaster: &Arc, fee_estimator: &Arc, + keys_manager: &Arc, logger_for_monitor: Arc, + persistence_style: ChannelMonitorUpdateStatus, + ) -> Arc { + Arc::new(TestChainMonitor::new( + Arc::clone(broadcaster), + logger_for_monitor, + Arc::clone(fee_estimator), + Arc::new(TestPersister { update_ret: Mutex::new(persistence_style) }), + Arc::clone(keys_manager), + )) + } + + fn new( + node_id: u8, wallet: TestWalletSource, fee_estimator: Arc, + broadcaster: Arc, persistence_style: ChannelMonitorUpdateStatus, + out: &Out, router: &'a FuzzRouter, chan_type: ChanType, + ) -> Self { + let (logger_for_monitor, logger) = Self::build_loggers(node_id, out); + let node_secret = SecretKey::from_slice(&[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1, node_id, + ]) + .unwrap(); + let keys_manager = Arc::new(KeyProvider { + node_secret, + rand_bytes_id: atomic::AtomicU32::new(0), + enforcement_states: Mutex::new(new_hash_map()), + }); + let monitor = Self::build_chain_monitor( + &broadcaster, + &fee_estimator, + &keys_manager, + logger_for_monitor, + persistence_style, + ); + let network = Network::Bitcoin; + let best_block_timestamp = genesis_block(network).header.time; + let params = ChainParameters { network, best_block: BlockLocator::from_network(network) }; + let node = ChannelManager::new( + Arc::clone(&fee_estimator), + Arc::clone(&monitor), + Arc::clone(&broadcaster), + router, + router, + Arc::clone(&logger), + Arc::clone(&keys_manager), + Arc::clone(&keys_manager), + Arc::clone(&keys_manager), + build_node_config(chan_type), + params, + best_block_timestamp, + ); + Self { node, monitor, keys_manager, logger, broadcaster, fee_estimator, wallet } + } + fn our_node_id(&self) -> PublicKey { self.node.get_our_node_id() } @@ -997,6 +1067,17 @@ fn build_node_config(chan_type: ChanType) -> UserConfig { config } +fn assert_test_invariants(nodes: &[HarnessNode<'_>; 3]) { + assert_eq!(nodes[0].list_channels().len(), 3); + assert_eq!(nodes[1].list_channels().len(), 6); + assert_eq!(nodes[2].list_channels().len(), 3); + + // All broadcasters should be empty. Broadcast transactions are handled explicitly. + assert!(nodes[0].broadcaster.txn_broadcasted.borrow().is_empty()); + assert!(nodes[1].broadcaster.txn_broadcasted.borrow().is_empty()); + assert!(nodes[2].broadcaster.txn_broadcasted.borrow().is_empty()); +} + fn connect_peers(source: &ChanMan<'_>, dest: &ChanMan<'_>) { let init_dest = Init { features: dest.init_features(), networks: None, remote_network_address: None }; @@ -1184,9 +1265,6 @@ fn lock_fundings(nodes: &[HarnessNode<'_>; 3]) { #[inline] pub fn do_test(data: &[u8], out: Out) { - let broadcast_a = Arc::new(TestBroadcaster { txn_broadcasted: RefCell::new(Vec::new()) }); - let broadcast_b = Arc::new(TestBroadcaster { txn_broadcasted: RefCell::new(Vec::new()) }); - let broadcast_c = Arc::new(TestBroadcaster { txn_broadcasted: RefCell::new(Vec::new()) }); let router = FuzzRouter {}; // Read initial monitor styles and channel type from fuzz input byte 0: @@ -1220,163 +1298,26 @@ pub fn do_test(data: &[u8], out: Out) { let mut node_height_a: u32 = 0; let mut node_height_b: u32 = 0; let mut node_height_c: u32 = 0; - - macro_rules! make_node { - ($node_id: expr, $fee_estimator: expr, $broadcaster: expr) => {{ - let logger: Arc = - Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone())); - let node_secret = SecretKey::from_slice(&[ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1, $node_id, - ]) - .unwrap(); - let keys_manager = Arc::new(KeyProvider { - node_secret, - rand_bytes_id: atomic::AtomicU32::new(0), - enforcement_states: Mutex::new(new_hash_map()), - }); - let monitor = Arc::new(TestChainMonitor::new( - $broadcaster.clone(), - logger.clone(), - $fee_estimator.clone(), - Arc::new(TestPersister { - update_ret: Mutex::new(mon_style[$node_id as usize].borrow().clone()), - }), - Arc::clone(&keys_manager), - )); - - let network = Network::Bitcoin; - let best_block_timestamp = genesis_block(network).header.time; - let params = - ChainParameters { network, best_block: BlockLocator::from_network(network) }; - ( - ChannelManager::new( - $fee_estimator.clone(), - monitor.clone(), - $broadcaster.clone(), - &router, - &router, - Arc::clone(&logger), - keys_manager.clone(), - keys_manager.clone(), - keys_manager.clone(), - build_node_config(chan_type), - params, - best_block_timestamp, - ), - monitor, - keys_manager, - logger, - ) - }}; - } - - let reload_node = |ser: &Vec, - node_id: u8, - old_monitors: &TestChainMonitor, - mut use_old_mons, - keys, - fee_estimator, - broadcaster: Arc| { - let keys_manager = Arc::clone(keys); - let logger: Arc = - Arc::new(test_logger::TestLogger::new(node_id.to_string(), out.clone())); - let chain_monitor = Arc::new(TestChainMonitor::new( - broadcaster.clone(), - logger.clone(), - Arc::clone(fee_estimator), - Arc::new(TestPersister { - update_ret: Mutex::new(ChannelMonitorUpdateStatus::Completed), - }), - Arc::clone(keys), - )); - - let mut monitors = new_hash_map(); - let mut old_monitors = old_monitors.latest_monitors.lock().unwrap(); - for (channel_id, mut prev_state) in old_monitors.drain() { - let (mon_id, serialized_mon) = if use_old_mons % 3 == 0 { - // Reload with the oldest `ChannelMonitor` (the one that we already told - // `ChannelManager` we finished persisting). - (prev_state.persisted_monitor_id, prev_state.persisted_monitor) - } else if use_old_mons % 3 == 1 { - // Reload with the second-oldest `ChannelMonitor` - let old_mon = (prev_state.persisted_monitor_id, prev_state.persisted_monitor); - prev_state.pending_monitors.drain(..).next().unwrap_or(old_mon) - } else { - // Reload with the newest `ChannelMonitor` - let old_mon = (prev_state.persisted_monitor_id, prev_state.persisted_monitor); - prev_state.pending_monitors.pop().unwrap_or(old_mon) - }; - // Use a different value of `use_old_mons` if we have another monitor (only for node B) - // by shifting `use_old_mons` one in base-3. - use_old_mons /= 3; - let mon = <(BlockLocator, ChannelMonitor)>::read( - &mut &serialized_mon[..], - (&**keys, &**keys), - ) - .expect("Failed to read monitor"); - monitors.insert(channel_id, mon.1); - // Update the latest `ChannelMonitor` state to match what we just told LDK. - prev_state.persisted_monitor = serialized_mon; - prev_state.persisted_monitor_id = mon_id; - // Wipe any `ChannelMonitor`s which we never told LDK we finished persisting, - // considering them discarded. LDK should replay these for us as they're stored in - // the `ChannelManager`. - prev_state.pending_monitors.clear(); - chain_monitor.latest_monitors.lock().unwrap().insert(channel_id, prev_state); - } - let mut monitor_refs = new_hash_map(); - for (channel_id, monitor) in monitors.iter() { - monitor_refs.insert(*channel_id, monitor); - } - - let read_args = ChannelManagerReadArgs { - entropy_source: Arc::clone(&keys_manager), - node_signer: Arc::clone(&keys_manager), - signer_provider: keys_manager, - fee_estimator: Arc::clone(fee_estimator), - chain_monitor: chain_monitor.clone(), - tx_broadcaster: broadcaster, - router: &router, - message_router: &router, - logger, - config: build_node_config(chan_type), - channel_monitors: monitor_refs, - }; - - let manager = <(BlockLocator, ChanMan)>::read(&mut &ser[..], read_args) - .expect("Failed to read manager"); - let res = (manager.1, chain_monitor.clone()); - for (channel_id, mon) in monitors.drain() { - assert_eq!( - chain_monitor.chain_monitor.watch_channel(channel_id, mon), - Ok(ChannelMonitorUpdateStatus::Completed) - ); - } - *chain_monitor.persister.update_ret.lock().unwrap() = *mon_style[node_id as usize].borrow(); - res - }; - let wallet_a = TestWalletSource::new(SecretKey::from_slice(&[1; 32]).unwrap()); let wallet_b = TestWalletSource::new(SecretKey::from_slice(&[2; 32]).unwrap()); let wallet_c = TestWalletSource::new(SecretKey::from_slice(&[3; 32]).unwrap()); - let wallets = vec![wallet_a, wallet_b, wallet_c]; + let wallets = [&wallet_a, &wallet_b, &wallet_c]; let coinbase_tx = bitcoin::Transaction { version: bitcoin::transaction::Version::TWO, lock_time: bitcoin::absolute::LockTime::ZERO, input: vec![bitcoin::TxIn { ..Default::default() }], output: wallets .iter() - .map(|w| TxOut { + .map(|wallet| TxOut { value: Amount::from_sat(100_000), - script_pubkey: w.get_change_script().unwrap(), + script_pubkey: wallet.get_change_script().unwrap(), }) .collect(), }; - wallets.iter().enumerate().for_each(|(i, w)| { - w.add_utxo(coinbase_tx.clone(), i as u32); - }); + for (idx, wallet) in wallets.iter().enumerate() { + wallet.add_utxo(coinbase_tx.clone(), idx as u32); + } let fee_est_a = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }); let mut last_htlc_clear_fee_a = 253; @@ -1384,34 +1325,50 @@ pub fn do_test(data: &[u8], out: Out) { let mut last_htlc_clear_fee_b = 253; let fee_est_c = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }); let mut last_htlc_clear_fee_c = 253; + let broadcast_a = Arc::new(TestBroadcaster { txn_broadcasted: RefCell::new(Vec::new()) }); + let broadcast_b = Arc::new(TestBroadcaster { txn_broadcasted: RefCell::new(Vec::new()) }); + let broadcast_c = Arc::new(TestBroadcaster { txn_broadcasted: RefCell::new(Vec::new()) }); // 3 nodes is enough to hit all the possible cases, notably unknown-source-unknown-dest // forwarding. - let (node_a, mut monitor_a, keys_manager_a, logger_a) = make_node!(0, fee_est_a, broadcast_a); - let (node_b, mut monitor_b, keys_manager_b, logger_b) = make_node!(1, fee_est_b, broadcast_b); - let (node_c, mut monitor_c, keys_manager_c, logger_c) = make_node!(2, fee_est_c, broadcast_c); - let mut nodes = [ - HarnessNode { - node: node_a, - monitor: Arc::clone(&monitor_a), - keys_manager: Arc::clone(&keys_manager_a), - }, - HarnessNode { - node: node_b, - monitor: Arc::clone(&monitor_b), - keys_manager: Arc::clone(&keys_manager_b), - }, - HarnessNode { - node: node_c, - monitor: Arc::clone(&monitor_c), - keys_manager: Arc::clone(&keys_manager_c), - }, + HarnessNode::new( + 0, + wallet_a, + Arc::clone(&fee_est_a), + Arc::clone(&broadcast_a), + mon_style[0].borrow().clone(), + &out, + &router, + chan_type, + ), + HarnessNode::new( + 1, + wallet_b, + Arc::clone(&fee_est_b), + Arc::clone(&broadcast_b), + mon_style[1].borrow().clone(), + &out, + &router, + chan_type, + ), + HarnessNode::new( + 2, + wallet_c, + Arc::clone(&fee_est_c), + Arc::clone(&broadcast_c), + mon_style[2].borrow().clone(), + &out, + &router, + chan_type, + ), ]; - #[allow(unused_variables)] - let loggers = [logger_a, logger_b, logger_c]; - #[allow(unused_variables)] - let fee_estimators = [Arc::clone(&fee_est_a), Arc::clone(&fee_est_b), Arc::clone(&fee_est_c)]; + let mut monitor_a = Arc::clone(&nodes[0].monitor); + let mut monitor_b = Arc::clone(&nodes[1].monitor); + let mut monitor_c = Arc::clone(&nodes[2].monitor); + let keys_manager_a = Arc::clone(&nodes[0].keys_manager); + let keys_manager_b = Arc::clone(&nodes[1].keys_manager); + let keys_manager_c = Arc::clone(&nodes[2].keys_manager); // Connect peers first, then create channels connect_peers(&nodes[0], &nodes[1]); @@ -1434,12 +1391,12 @@ pub fn do_test(data: &[u8], out: Out) { // Wipe the transactions-broadcasted set to make sure we don't broadcast any transactions // during normal operation in `test_return`. - broadcast_a.txn_broadcasted.borrow_mut().clear(); - broadcast_b.txn_broadcasted.borrow_mut().clear(); - broadcast_c.txn_broadcasted.borrow_mut().clear(); + nodes[0].broadcaster.txn_broadcasted.borrow_mut().clear(); + nodes[1].broadcaster.txn_broadcasted.borrow_mut().clear(); + nodes[2].broadcaster.txn_broadcasted.borrow_mut().clear(); let sync_with_chain_state = |chain_state: &ChainState, - node: &ChannelManager<_, _, _, _, _, _, _, _, _>, + node: &HarnessNode<'_>, node_height: &mut u32, num_blocks: Option| { let target_height = if let Some(num_blocks) = num_blocks { @@ -1447,7 +1404,6 @@ pub fn do_test(data: &[u8], out: Out) { } else { chain_state.tip_height() }; - while *node_height < target_height { *node_height += 1; let (header, txn) = chain_state.block_at(*node_height); @@ -1460,9 +1416,9 @@ pub fn do_test(data: &[u8], out: Out) { }; // Sync all nodes to tip to lock the funding. - sync_with_chain_state(&mut chain_state, &nodes[0], &mut node_height_a, None); - sync_with_chain_state(&mut chain_state, &nodes[1], &mut node_height_b, None); - sync_with_chain_state(&mut chain_state, &nodes[2], &mut node_height_c, None); + sync_with_chain_state(&chain_state, &nodes[0], &mut node_height_a, None); + sync_with_chain_state(&chain_state, &nodes[1], &mut node_height_b, None); + sync_with_chain_state(&chain_state, &nodes[2], &mut node_height_c, None); lock_fundings(&nodes); @@ -1502,20 +1458,93 @@ pub fn do_test(data: &[u8], out: Out) { macro_rules! test_return { () => {{ - assert_eq!(nodes[0].list_channels().len(), 3); - assert_eq!(nodes[1].list_channels().len(), 6); - assert_eq!(nodes[2].list_channels().len(), 3); - - // All broadcasters should be empty (all broadcast transactions should be handled - // explicitly). - assert!(broadcast_a.txn_broadcasted.borrow().is_empty()); - assert!(broadcast_b.txn_broadcasted.borrow().is_empty()); - assert!(broadcast_c.txn_broadcasted.borrow().is_empty()); - + assert_test_invariants(&nodes); return; }}; } + let reload_node = |ser: &Vec, + node_id: u8, + old_monitors: &TestChainMonitor, + mut use_old_mons, + keys: &Arc, + fee_estimator: &Arc, + broadcaster: Arc| { + let keys_manager = Arc::clone(keys); + let (logger_for_monitor, logger) = HarnessNode::build_loggers(node_id, &out); + let chain_monitor = HarnessNode::build_chain_monitor( + &broadcaster, + fee_estimator, + &keys_manager, + logger_for_monitor, + ChannelMonitorUpdateStatus::Completed, + ); + + let mut monitors = new_hash_map(); + let mut old_monitors = old_monitors.latest_monitors.lock().unwrap(); + for (channel_id, mut prev_state) in old_monitors.drain() { + let (mon_id, serialized_mon) = if use_old_mons % 3 == 0 { + // Reload with the oldest `ChannelMonitor` (the one that we already told + // `ChannelManager` we finished persisting). + (prev_state.persisted_monitor_id, prev_state.persisted_monitor) + } else if use_old_mons % 3 == 1 { + // Reload with the second-oldest `ChannelMonitor` + let old_mon = (prev_state.persisted_monitor_id, prev_state.persisted_monitor); + prev_state.pending_monitors.drain(..).next().unwrap_or(old_mon) + } else { + // Reload with the newest `ChannelMonitor` + let old_mon = (prev_state.persisted_monitor_id, prev_state.persisted_monitor); + prev_state.pending_monitors.pop().unwrap_or(old_mon) + }; + // Use a different value of `use_old_mons` if we have another monitor (only for node B) + // by shifting `use_old_mons` one in base-3. + use_old_mons /= 3; + let mon = <(BlockLocator, ChannelMonitor)>::read( + &mut &serialized_mon[..], + (&*keys_manager, &*keys_manager), + ) + .expect("Failed to read monitor"); + monitors.insert(channel_id, mon.1); + // Update the latest `ChannelMonitor` state to match what we just told LDK. + prev_state.persisted_monitor = serialized_mon; + prev_state.persisted_monitor_id = mon_id; + // Wipe any `ChannelMonitor`s which we never told LDK we finished persisting, + // considering them discarded. LDK should replay these for us as they're stored in + // the `ChannelManager`. + prev_state.pending_monitors.clear(); + chain_monitor.latest_monitors.lock().unwrap().insert(channel_id, prev_state); + } + let mut monitor_refs = new_hash_map(); + for (channel_id, monitor) in monitors.iter() { + monitor_refs.insert(*channel_id, monitor); + } + + let read_args = ChannelManagerReadArgs { + entropy_source: Arc::clone(&keys_manager), + node_signer: Arc::clone(&keys_manager), + signer_provider: Arc::clone(&keys_manager), + fee_estimator: Arc::clone(fee_estimator), + chain_monitor: chain_monitor.clone(), + tx_broadcaster: broadcaster, + router: &router, + message_router: &router, + logger: Arc::clone(&logger), + config: build_node_config(chan_type), + channel_monitors: monitor_refs, + }; + + let manager = <(BlockLocator, ChanMan)>::read(&mut &ser[..], read_args) + .expect("Failed to read manager"); + for (channel_id, mon) in monitors.drain() { + assert_eq!( + chain_monitor.chain_monitor.watch_channel(channel_id, mon), + Ok(ChannelMonitorUpdateStatus::Completed) + ); + } + *chain_monitor.persister.update_ret.lock().unwrap() = *mon_style[node_id as usize].borrow(); + (manager.1, chain_monitor, logger) + }; + let mut read_pos = 1; // First byte was consumed for initial config (mon_style + chan_type) macro_rules! get_slice { ($len: expr) => {{ @@ -1528,82 +1557,6 @@ pub fn do_test(data: &[u8], out: Out) { }}; } - let splice_channel = - |node: &ChanMan, - counterparty_node_id: &PublicKey, - channel_id: &ChannelId, - f: &dyn Fn(FundingTemplate) -> Result| { - match node.splice_channel(channel_id, counterparty_node_id) { - Ok(funding_template) => { - if let Ok(contribution) = f(funding_template) { - let _ = node.funding_contributed( - channel_id, - counterparty_node_id, - contribution, - None, - ); - } - }, - Err(e) => { - assert!( - matches!(e, APIError::APIMisuseError { ref err } if err.contains("splice")), - "{:?}", - e - ); - }, - } - }; - - let splice_in = - |node: &ChanMan, - counterparty_node_id: &PublicKey, - channel_id: &ChannelId, - wallet: &WalletSync<&TestWalletSource, Arc>, - funding_feerate_sat_per_kw: FeeRate| { - splice_channel( - node, - counterparty_node_id, - channel_id, - &move |funding_template: FundingTemplate| { - let feerate = - funding_template.min_rbf_feerate().unwrap_or(funding_feerate_sat_per_kw); - funding_template.splice_in_sync( - Amount::from_sat(10_000), - feerate, - FeeRate::MAX, - wallet, - ) - }, - ); - }; - - let splice_out = |node: &ChanMan, - counterparty_node_id: &PublicKey, - channel_id: &ChannelId, - wallet: &TestWalletSource, - funding_feerate_sat_per_kw: FeeRate| { - // We conditionally splice out `MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS` only when the node - // has double the balance required to send a payment upon a `0xff` byte. We do this to - // ensure there's always liquidity available for a payment to succeed then. - let outbound_capacity_msat = node - .list_channels() - .iter() - .find(|chan| chan.channel_id == *channel_id) - .map(|chan| chan.outbound_capacity_msat) - .unwrap(); - if outbound_capacity_msat < 20_000_000 { - return; - } - splice_channel(node, counterparty_node_id, channel_id, &move |funding_template| { - let feerate = funding_template.min_rbf_feerate().unwrap_or(funding_feerate_sat_per_kw); - let outputs = vec![TxOut { - value: Amount::from_sat(MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS), - script_pubkey: wallet.get_change_script().unwrap(), - }]; - funding_template.splice_out(outputs, feerate, FeeRate::MAX) - }); - }; - loop { // Push any events from Node B onto ba_events and bc_events macro_rules! push_excess_b_events { @@ -2083,7 +2036,8 @@ pub fn do_test(data: &[u8], out: Out) { unsigned_transaction, .. } => { - let signed_tx = wallets[$node].sign_tx(unsigned_transaction).unwrap(); + let signed_tx = + nodes[$node].wallet.sign_tx(unsigned_transaction).unwrap(); nodes[$node] .funding_transaction_signed( &channel_id, @@ -2093,12 +2047,7 @@ pub fn do_test(data: &[u8], out: Out) { .unwrap(); }, events::Event::SplicePending { new_funding_txo, .. } => { - let broadcaster = match $node { - 0 => &broadcast_a, - 1 => &broadcast_b, - _ => &broadcast_c, - }; - let mut txs = broadcaster.txn_broadcasted.borrow_mut(); + let mut txs = nodes[$node].broadcaster.txn_broadcasted.borrow_mut(); assert!(txs.len() >= 1); let splice_tx = txs.remove(0); assert_eq!(new_funding_txo.txid, splice_tx.compute_txid()); @@ -2148,7 +2097,6 @@ pub fn do_test(data: &[u8], out: Out) { } } }; - let complete_all_monitor_updates = |monitor: &Arc, chan_id| { if let Some(state) = monitor.latest_monitors.lock().unwrap().get_mut(chan_id) { assert!( @@ -2165,6 +2113,85 @@ pub fn do_test(data: &[u8], out: Out) { } }; + let splice_channel = + |node: &HarnessNode<'_>, + counterparty_node_id: &PublicKey, + channel_id: &ChannelId, + f: &dyn Fn( + FundingTemplate, + ) -> Result| { + match node.splice_channel(channel_id, counterparty_node_id) { + Ok(funding_template) => { + if let Ok(contribution) = f(funding_template) { + let _ = node.funding_contributed( + channel_id, + counterparty_node_id, + contribution, + None, + ); + } + }, + Err(e) => { + assert!( + matches!(e, APIError::APIMisuseError { ref err } if err.contains("splice")), + "{:?}", + e + ); + }, + } + }; + + let splice_in = |node: &HarnessNode<'_>, + counterparty_node_id: &PublicKey, + channel_id: &ChannelId| { + let wallet = WalletSync::new(&node.wallet, Arc::clone(&node.logger)); + let funding_feerate_sat_per_kw = node.fee_estimator.feerate_sat_per_kw(); + splice_channel( + node, + counterparty_node_id, + channel_id, + &move |funding_template: FundingTemplate| { + let feerate = + funding_template.min_rbf_feerate().unwrap_or(funding_feerate_sat_per_kw); + funding_template.splice_in_sync( + Amount::from_sat(10_000), + feerate, + FeeRate::MAX, + &wallet, + ) + }, + ); + }; + + let splice_out = |node: &HarnessNode<'_>, + counterparty_node_id: &PublicKey, + channel_id: &ChannelId| { + let outbound_capacity_msat = node + .list_channels() + .iter() + .find(|chan| chan.channel_id == *channel_id) + .map(|chan| chan.outbound_capacity_msat) + .unwrap(); + if outbound_capacity_msat < 20_000_000 { + return; + } + let funding_feerate_sat_per_kw = node.fee_estimator.feerate_sat_per_kw(); + splice_channel( + node, + counterparty_node_id, + channel_id, + &move |funding_template: FundingTemplate| { + let feerate = + funding_template.min_rbf_feerate().unwrap_or(funding_feerate_sat_per_kw); + let outputs = vec![TxOut { + value: Amount::from_sat(MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS), + script_pubkey: node.wallet.get_change_script().unwrap(), + }]; + funding_template.splice_out(outputs, feerate, FeeRate::MAX) + }, + ); + }; + let send = |source_idx: usize, dest_idx: usize, dest_chan_id, amt, payment_ctr: &mut u64| { let source = &nodes[source_idx]; @@ -2461,43 +2488,47 @@ pub fn do_test(data: &[u8], out: Out) { if matches!(chan_type, ChanType::Legacy) { max_feerate *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32; } - if fee_est_a.ret_val.fetch_add(250, atomic::Ordering::AcqRel) + 250 > max_feerate { - fee_est_a.ret_val.store(max_feerate, atomic::Ordering::Release); + if nodes[0].fee_estimator.ret_val.fetch_add(250, atomic::Ordering::AcqRel) + 250 + > max_feerate + { + nodes[0].fee_estimator.ret_val.store(max_feerate, atomic::Ordering::Release); } nodes[0].timer_tick_occurred(); }, 0x81 => { - fee_est_a.ret_val.store(253, atomic::Ordering::Release); + nodes[0].fee_estimator.ret_val.store(253, atomic::Ordering::Release); nodes[0].timer_tick_occurred(); }, - 0x84 => { let mut max_feerate = last_htlc_clear_fee_b; if matches!(chan_type, ChanType::Legacy) { max_feerate *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32; } - if fee_est_b.ret_val.fetch_add(250, atomic::Ordering::AcqRel) + 250 > max_feerate { - fee_est_b.ret_val.store(max_feerate, atomic::Ordering::Release); + if nodes[1].fee_estimator.ret_val.fetch_add(250, atomic::Ordering::AcqRel) + 250 + > max_feerate + { + nodes[1].fee_estimator.ret_val.store(max_feerate, atomic::Ordering::Release); } nodes[1].timer_tick_occurred(); }, 0x85 => { - fee_est_b.ret_val.store(253, atomic::Ordering::Release); + nodes[1].fee_estimator.ret_val.store(253, atomic::Ordering::Release); nodes[1].timer_tick_occurred(); }, - 0x88 => { let mut max_feerate = last_htlc_clear_fee_c; if matches!(chan_type, ChanType::Legacy) { max_feerate *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32; } - if fee_est_c.ret_val.fetch_add(250, atomic::Ordering::AcqRel) + 250 > max_feerate { - fee_est_c.ret_val.store(max_feerate, atomic::Ordering::Release); + if nodes[2].fee_estimator.ret_val.fetch_add(250, atomic::Ordering::AcqRel) + 250 + > max_feerate + { + nodes[2].fee_estimator.ret_val.store(max_feerate, atomic::Ordering::Release); } nodes[2].timer_tick_occurred(); }, 0x89 => { - fee_est_c.ret_val.store(253, atomic::Ordering::Release); + nodes[2].fee_estimator.ret_val.store(253, atomic::Ordering::Release); nodes[2].timer_tick_occurred(); }, @@ -2506,36 +2537,28 @@ pub fn do_test(data: &[u8], out: Out) { test_return!(); } let cp_node_id = nodes[1].get_our_node_id(); - let wallet = WalletSync::new(&wallets[0], Arc::clone(&loggers[0])); - let feerate_sat_per_kw = fee_estimators[0].feerate_sat_per_kw(); - splice_in(&nodes[0], &cp_node_id, &chan_a_id, &wallet, feerate_sat_per_kw); + splice_in(&nodes[0], &cp_node_id, &chan_a_id); }, 0xa1 => { if !cfg!(splicing) { test_return!(); } let cp_node_id = nodes[0].get_our_node_id(); - let wallet = WalletSync::new(&wallets[1], Arc::clone(&loggers[1])); - let feerate_sat_per_kw = fee_estimators[1].feerate_sat_per_kw(); - splice_in(&nodes[1], &cp_node_id, &chan_a_id, &wallet, feerate_sat_per_kw); + splice_in(&nodes[1], &cp_node_id, &chan_a_id); }, 0xa2 => { if !cfg!(splicing) { test_return!(); } let cp_node_id = nodes[2].get_our_node_id(); - let wallet = WalletSync::new(&wallets[1], Arc::clone(&loggers[1])); - let feerate_sat_per_kw = fee_estimators[1].feerate_sat_per_kw(); - splice_in(&nodes[1], &cp_node_id, &chan_b_id, &wallet, feerate_sat_per_kw); + splice_in(&nodes[1], &cp_node_id, &chan_b_id); }, 0xa3 => { if !cfg!(splicing) { test_return!(); } let cp_node_id = nodes[1].get_our_node_id(); - let wallet = WalletSync::new(&wallets[2], Arc::clone(&loggers[2])); - let feerate_sat_per_kw = fee_estimators[2].feerate_sat_per_kw(); - splice_in(&nodes[2], &cp_node_id, &chan_b_id, &wallet, feerate_sat_per_kw); + splice_in(&nodes[2], &cp_node_id, &chan_b_id); }, 0xa4 => { @@ -2543,63 +2566,55 @@ pub fn do_test(data: &[u8], out: Out) { test_return!(); } let cp_node_id = nodes[1].get_our_node_id(); - let wallet = &wallets[0]; - let feerate_sat_per_kw = fee_estimators[0].feerate_sat_per_kw(); - splice_out(&nodes[0], &cp_node_id, &chan_a_id, wallet, feerate_sat_per_kw); + splice_out(&nodes[0], &cp_node_id, &chan_a_id); }, 0xa5 => { if !cfg!(splicing) { test_return!(); } let cp_node_id = nodes[0].get_our_node_id(); - let wallet = &wallets[1]; - let feerate_sat_per_kw = fee_estimators[1].feerate_sat_per_kw(); - splice_out(&nodes[1], &cp_node_id, &chan_a_id, wallet, feerate_sat_per_kw); + splice_out(&nodes[1], &cp_node_id, &chan_a_id); }, 0xa6 => { if !cfg!(splicing) { test_return!(); } let cp_node_id = nodes[2].get_our_node_id(); - let wallet = &wallets[1]; - let feerate_sat_per_kw = fee_estimators[1].feerate_sat_per_kw(); - splice_out(&nodes[1], &cp_node_id, &chan_b_id, wallet, feerate_sat_per_kw); + splice_out(&nodes[1], &cp_node_id, &chan_b_id); }, 0xa7 => { if !cfg!(splicing) { test_return!(); } let cp_node_id = nodes[1].get_our_node_id(); - let wallet = &wallets[2]; - let feerate_sat_per_kw = fee_estimators[2].feerate_sat_per_kw(); - splice_out(&nodes[2], &cp_node_id, &chan_b_id, wallet, feerate_sat_per_kw); + splice_out(&nodes[2], &cp_node_id, &chan_b_id); }, // Sync node by 1 block to cover confirmation of a transaction. 0xa8 => { chain_state.confirm_pending_txs(); - sync_with_chain_state(&mut chain_state, &nodes[0], &mut node_height_a, Some(1)); + sync_with_chain_state(&chain_state, &nodes[0], &mut node_height_a, Some(1)); }, 0xa9 => { chain_state.confirm_pending_txs(); - sync_with_chain_state(&mut chain_state, &nodes[1], &mut node_height_b, Some(1)); + sync_with_chain_state(&chain_state, &nodes[1], &mut node_height_b, Some(1)); }, 0xaa => { chain_state.confirm_pending_txs(); - sync_with_chain_state(&mut chain_state, &nodes[2], &mut node_height_c, Some(1)); + sync_with_chain_state(&chain_state, &nodes[2], &mut node_height_c, Some(1)); }, // Sync node to chain tip to cover confirmation of a transaction post-reorg-risk. 0xab => { chain_state.confirm_pending_txs(); - sync_with_chain_state(&mut chain_state, &nodes[0], &mut node_height_a, None); + sync_with_chain_state(&chain_state, &nodes[0], &mut node_height_a, None); }, 0xac => { chain_state.confirm_pending_txs(); - sync_with_chain_state(&mut chain_state, &nodes[1], &mut node_height_b, None); + sync_with_chain_state(&chain_state, &nodes[1], &mut node_height_b, None); }, 0xad => { chain_state.confirm_pending_txs(); - sync_with_chain_state(&mut chain_state, &nodes[2], &mut node_height_c, None); + sync_with_chain_state(&chain_state, &nodes[2], &mut node_height_c, None); }, 0xb0 | 0xb1 | 0xb2 => { @@ -2615,18 +2630,19 @@ pub fn do_test(data: &[u8], out: Out) { ab_events.clear(); ba_events.clear(); } - let (new_node_a, new_monitor_a) = reload_node( + let (new_node_a, new_monitor_a, new_logger_a) = reload_node( &node_a_ser, 0, &monitor_a, v, &keys_manager_a, &fee_est_a, - broadcast_a.clone(), + Arc::clone(&broadcast_a), ); nodes[0].node = new_node_a; monitor_a = Arc::clone(&new_monitor_a); nodes[0].monitor = new_monitor_a; + nodes[0].logger = new_logger_a; }, 0xb3..=0xbb => { // Restart node B, picking among the in-flight `ChannelMonitor`s to use based on @@ -2645,18 +2661,19 @@ pub fn do_test(data: &[u8], out: Out) { bc_events.clear(); cb_events.clear(); } - let (new_node_b, new_monitor_b) = reload_node( + let (new_node_b, new_monitor_b, new_logger_b) = reload_node( &node_b_ser, 1, &monitor_b, v, &keys_manager_b, &fee_est_b, - broadcast_b.clone(), + Arc::clone(&broadcast_b), ); nodes[1].node = new_node_b; monitor_b = Arc::clone(&new_monitor_b); nodes[1].monitor = new_monitor_b; + nodes[1].logger = new_logger_b; }, 0xbc | 0xbd | 0xbe => { // Restart node C, picking among the in-flight `ChannelMonitor`s to use based on @@ -2671,18 +2688,19 @@ pub fn do_test(data: &[u8], out: Out) { bc_events.clear(); cb_events.clear(); } - let (new_node_c, new_monitor_c) = reload_node( + let (new_node_c, new_monitor_c, new_logger_c) = reload_node( &node_c_ser, 2, &monitor_c, v, &keys_manager_c, &fee_est_c, - broadcast_c.clone(), + Arc::clone(&broadcast_c), ); nodes[2].node = new_node_c; monitor_c = Arc::clone(&new_monitor_c); nodes[2].monitor = new_monitor_c; + nodes[2].logger = new_logger_c; }, 0xc0 => keys_manager_a.disable_supported_ops_for_all_signers(), @@ -2957,20 +2975,23 @@ pub fn do_test(data: &[u8], out: Out) { ); } - last_htlc_clear_fee_a = fee_est_a.ret_val.load(atomic::Ordering::Acquire); - last_htlc_clear_fee_b = fee_est_b.ret_val.load(atomic::Ordering::Acquire); - last_htlc_clear_fee_c = fee_est_c.ret_val.load(atomic::Ordering::Acquire); + last_htlc_clear_fee_a = + nodes[0].fee_estimator.ret_val.load(atomic::Ordering::Acquire); + last_htlc_clear_fee_b = + nodes[1].fee_estimator.ret_val.load(atomic::Ordering::Acquire); + last_htlc_clear_fee_c = + nodes[2].fee_estimator.ret_val.load(atomic::Ordering::Acquire); }, _ => test_return!(), } - if nodes[0].get_and_clear_needs_persistence() == true { + if nodes[0].get_and_clear_needs_persistence() { node_a_ser = nodes[0].encode(); } - if nodes[1].get_and_clear_needs_persistence() == true { + if nodes[1].get_and_clear_needs_persistence() { node_b_ser = nodes[1].encode(); } - if nodes[2].get_and_clear_needs_persistence() == true { + if nodes[2].get_and_clear_needs_persistence() { node_c_ser = nodes[2].encode(); } } From db0ea51f5e746e306a97a234602a6ff1b91d53a5 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 21 Apr 2026 18:00:24 +0200 Subject: [PATCH 04/15] Extract chanmon harness nodes Centralize creation of the three chanmon harness nodes. The fuzzer now initializes the node array through one path, which reduces duplicated setup before the event and payment helpers are split out. --- fuzz/src/chanmon_consistency.rs | 155 +++++++++++++------------------- 1 file changed, 63 insertions(+), 92 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 4abbb9665fd..905d989bfdb 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -1363,12 +1363,6 @@ pub fn do_test(data: &[u8], out: Out) { chan_type, ), ]; - let mut monitor_a = Arc::clone(&nodes[0].monitor); - let mut monitor_b = Arc::clone(&nodes[1].monitor); - let mut monitor_c = Arc::clone(&nodes[2].monitor); - let keys_manager_a = Arc::clone(&nodes[0].keys_manager); - let keys_manager_b = Arc::clone(&nodes[1].keys_manager); - let keys_manager_c = Arc::clone(&nodes[2].keys_manager); // Connect peers first, then create channels connect_peers(&nodes[0], &nodes[1]); @@ -1463,25 +1457,18 @@ pub fn do_test(data: &[u8], out: Out) { }}; } - let reload_node = |ser: &Vec, - node_id: u8, - old_monitors: &TestChainMonitor, - mut use_old_mons, - keys: &Arc, - fee_estimator: &Arc, - broadcaster: Arc| { - let keys_manager = Arc::clone(keys); + let reload_node = |ser: &Vec, node_id: u8, old_node: &HarnessNode<'_>, mut use_old_mons| { let (logger_for_monitor, logger) = HarnessNode::build_loggers(node_id, &out); let chain_monitor = HarnessNode::build_chain_monitor( - &broadcaster, - fee_estimator, - &keys_manager, + &old_node.broadcaster, + &old_node.fee_estimator, + &old_node.keys_manager, logger_for_monitor, ChannelMonitorUpdateStatus::Completed, ); let mut monitors = new_hash_map(); - let mut old_monitors = old_monitors.latest_monitors.lock().unwrap(); + let mut old_monitors = old_node.monitor.latest_monitors.lock().unwrap(); for (channel_id, mut prev_state) in old_monitors.drain() { let (mon_id, serialized_mon) = if use_old_mons % 3 == 0 { // Reload with the oldest `ChannelMonitor` (the one that we already told @@ -1501,7 +1488,7 @@ pub fn do_test(data: &[u8], out: Out) { use_old_mons /= 3; let mon = <(BlockLocator, ChannelMonitor)>::read( &mut &serialized_mon[..], - (&*keys_manager, &*keys_manager), + (&*old_node.keys_manager, &*old_node.keys_manager), ) .expect("Failed to read monitor"); monitors.insert(channel_id, mon.1); @@ -1520,12 +1507,12 @@ pub fn do_test(data: &[u8], out: Out) { } let read_args = ChannelManagerReadArgs { - entropy_source: Arc::clone(&keys_manager), - node_signer: Arc::clone(&keys_manager), - signer_provider: Arc::clone(&keys_manager), - fee_estimator: Arc::clone(fee_estimator), + entropy_source: Arc::clone(&old_node.keys_manager), + node_signer: Arc::clone(&old_node.keys_manager), + signer_provider: Arc::clone(&old_node.keys_manager), + fee_estimator: Arc::clone(&old_node.fee_estimator), chain_monitor: chain_monitor.clone(), - tx_broadcaster: broadcaster, + tx_broadcaster: Arc::clone(&old_node.broadcaster), router: &router, message_router: &router, logger: Arc::clone(&logger), @@ -2312,22 +2299,22 @@ pub fn do_test(data: &[u8], out: Out) { 0x08 => { for id in &chan_ab_ids { - complete_all_monitor_updates(&monitor_a, id); + complete_all_monitor_updates(&nodes[0].monitor, id); } }, 0x09 => { for id in &chan_ab_ids { - complete_all_monitor_updates(&monitor_b, id); + complete_all_monitor_updates(&nodes[1].monitor, id); } }, 0x0a => { for id in &chan_bc_ids { - complete_all_monitor_updates(&monitor_b, id); + complete_all_monitor_updates(&nodes[1].monitor, id); } }, 0x0b => { for id in &chan_bc_ids { - complete_all_monitor_updates(&monitor_c, id); + complete_all_monitor_updates(&nodes[2].monitor, id); } }, @@ -2630,17 +2617,9 @@ pub fn do_test(data: &[u8], out: Out) { ab_events.clear(); ba_events.clear(); } - let (new_node_a, new_monitor_a, new_logger_a) = reload_node( - &node_a_ser, - 0, - &monitor_a, - v, - &keys_manager_a, - &fee_est_a, - Arc::clone(&broadcast_a), - ); + let (new_node_a, new_monitor_a, new_logger_a) = + reload_node(&node_a_ser, 0, &nodes[0], v); nodes[0].node = new_node_a; - monitor_a = Arc::clone(&new_monitor_a); nodes[0].monitor = new_monitor_a; nodes[0].logger = new_logger_a; }, @@ -2661,17 +2640,9 @@ pub fn do_test(data: &[u8], out: Out) { bc_events.clear(); cb_events.clear(); } - let (new_node_b, new_monitor_b, new_logger_b) = reload_node( - &node_b_ser, - 1, - &monitor_b, - v, - &keys_manager_b, - &fee_est_b, - Arc::clone(&broadcast_b), - ); + let (new_node_b, new_monitor_b, new_logger_b) = + reload_node(&node_b_ser, 1, &nodes[1], v); nodes[1].node = new_node_b; - monitor_b = Arc::clone(&new_monitor_b); nodes[1].monitor = new_monitor_b; nodes[1].logger = new_logger_b; }, @@ -2688,140 +2659,140 @@ pub fn do_test(data: &[u8], out: Out) { bc_events.clear(); cb_events.clear(); } - let (new_node_c, new_monitor_c, new_logger_c) = reload_node( - &node_c_ser, - 2, - &monitor_c, - v, - &keys_manager_c, - &fee_est_c, - Arc::clone(&broadcast_c), - ); + let (new_node_c, new_monitor_c, new_logger_c) = + reload_node(&node_c_ser, 2, &nodes[2], v); nodes[2].node = new_node_c; - monitor_c = Arc::clone(&new_monitor_c); nodes[2].monitor = new_monitor_c; nodes[2].logger = new_logger_c; }, - 0xc0 => keys_manager_a.disable_supported_ops_for_all_signers(), - 0xc1 => keys_manager_b.disable_supported_ops_for_all_signers(), - 0xc2 => keys_manager_c.disable_supported_ops_for_all_signers(), + 0xc0 => nodes[0].keys_manager.disable_supported_ops_for_all_signers(), + 0xc1 => nodes[1].keys_manager.disable_supported_ops_for_all_signers(), + 0xc2 => nodes[2].keys_manager.disable_supported_ops_for_all_signers(), 0xc3 => { - keys_manager_a.enable_op_for_all_signers(SignerOp::SignCounterpartyCommitment); + nodes[0] + .keys_manager + .enable_op_for_all_signers(SignerOp::SignCounterpartyCommitment); nodes[0].signer_unblocked(None); }, 0xc4 => { - keys_manager_b.enable_op_for_all_signers(SignerOp::SignCounterpartyCommitment); + nodes[1] + .keys_manager + .enable_op_for_all_signers(SignerOp::SignCounterpartyCommitment); let filter = Some((nodes[0].get_our_node_id(), chan_a_id)); nodes[1].signer_unblocked(filter); }, 0xc5 => { - keys_manager_b.enable_op_for_all_signers(SignerOp::SignCounterpartyCommitment); + nodes[1] + .keys_manager + .enable_op_for_all_signers(SignerOp::SignCounterpartyCommitment); let filter = Some((nodes[2].get_our_node_id(), chan_b_id)); nodes[1].signer_unblocked(filter); }, 0xc6 => { - keys_manager_c.enable_op_for_all_signers(SignerOp::SignCounterpartyCommitment); + nodes[2] + .keys_manager + .enable_op_for_all_signers(SignerOp::SignCounterpartyCommitment); nodes[2].signer_unblocked(None); }, 0xc7 => { - keys_manager_a.enable_op_for_all_signers(SignerOp::GetPerCommitmentPoint); + nodes[0].keys_manager.enable_op_for_all_signers(SignerOp::GetPerCommitmentPoint); nodes[0].signer_unblocked(None); }, 0xc8 => { - keys_manager_b.enable_op_for_all_signers(SignerOp::GetPerCommitmentPoint); + nodes[1].keys_manager.enable_op_for_all_signers(SignerOp::GetPerCommitmentPoint); let filter = Some((nodes[0].get_our_node_id(), chan_a_id)); nodes[1].signer_unblocked(filter); }, 0xc9 => { - keys_manager_b.enable_op_for_all_signers(SignerOp::GetPerCommitmentPoint); + nodes[1].keys_manager.enable_op_for_all_signers(SignerOp::GetPerCommitmentPoint); let filter = Some((nodes[2].get_our_node_id(), chan_b_id)); nodes[1].signer_unblocked(filter); }, 0xca => { - keys_manager_c.enable_op_for_all_signers(SignerOp::GetPerCommitmentPoint); + nodes[2].keys_manager.enable_op_for_all_signers(SignerOp::GetPerCommitmentPoint); nodes[2].signer_unblocked(None); }, 0xcb => { - keys_manager_a.enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); + nodes[0].keys_manager.enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); nodes[0].signer_unblocked(None); }, 0xcc => { - keys_manager_b.enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); + nodes[1].keys_manager.enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); let filter = Some((nodes[0].get_our_node_id(), chan_a_id)); nodes[1].signer_unblocked(filter); }, 0xcd => { - keys_manager_b.enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); + nodes[1].keys_manager.enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); let filter = Some((nodes[2].get_our_node_id(), chan_b_id)); nodes[1].signer_unblocked(filter); }, 0xce => { - keys_manager_c.enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); + nodes[2].keys_manager.enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); nodes[2].signer_unblocked(None); }, 0xf0 => { for id in &chan_ab_ids { - complete_monitor_update(&monitor_a, id, &complete_first); + complete_monitor_update(&nodes[0].monitor, id, &complete_first); } }, 0xf1 => { for id in &chan_ab_ids { - complete_monitor_update(&monitor_a, id, &complete_second); + complete_monitor_update(&nodes[0].monitor, id, &complete_second); } }, 0xf2 => { for id in &chan_ab_ids { - complete_monitor_update(&monitor_a, id, &Vec::pop); + complete_monitor_update(&nodes[0].monitor, id, &Vec::pop); } }, 0xf4 => { for id in &chan_ab_ids { - complete_monitor_update(&monitor_b, id, &complete_first); + complete_monitor_update(&nodes[1].monitor, id, &complete_first); } }, 0xf5 => { for id in &chan_ab_ids { - complete_monitor_update(&monitor_b, id, &complete_second); + complete_monitor_update(&nodes[1].monitor, id, &complete_second); } }, 0xf6 => { for id in &chan_ab_ids { - complete_monitor_update(&monitor_b, id, &Vec::pop); + complete_monitor_update(&nodes[1].monitor, id, &Vec::pop); } }, 0xf8 => { for id in &chan_bc_ids { - complete_monitor_update(&monitor_b, id, &complete_first); + complete_monitor_update(&nodes[1].monitor, id, &complete_first); } }, 0xf9 => { for id in &chan_bc_ids { - complete_monitor_update(&monitor_b, id, &complete_second); + complete_monitor_update(&nodes[1].monitor, id, &complete_second); } }, 0xfa => { for id in &chan_bc_ids { - complete_monitor_update(&monitor_b, id, &Vec::pop); + complete_monitor_update(&nodes[1].monitor, id, &Vec::pop); } }, 0xfc => { for id in &chan_bc_ids { - complete_monitor_update(&monitor_c, id, &complete_first); + complete_monitor_update(&nodes[2].monitor, id, &complete_first); } }, 0xfd => { for id in &chan_bc_ids { - complete_monitor_update(&monitor_c, id, &complete_second); + complete_monitor_update(&nodes[2].monitor, id, &complete_second); } }, 0xfe => { for id in &chan_bc_ids { - complete_monitor_update(&monitor_c, id, &Vec::pop); + complete_monitor_update(&nodes[2].monitor, id, &Vec::pop); } }, @@ -2862,9 +2833,9 @@ pub fn do_test(data: &[u8], out: Out) { } for op in SUPPORTED_SIGNER_OPS { - keys_manager_a.enable_op_for_all_signers(op); - keys_manager_b.enable_op_for_all_signers(op); - keys_manager_c.enable_op_for_all_signers(op); + nodes[0].keys_manager.enable_op_for_all_signers(op); + nodes[1].keys_manager.enable_op_for_all_signers(op); + nodes[2].keys_manager.enable_op_for_all_signers(op); } nodes[0].signer_unblocked(None); nodes[1].signer_unblocked(None); @@ -2879,12 +2850,12 @@ pub fn do_test(data: &[u8], out: Out) { } // Next, make sure no monitor updates are pending for id in &chan_ab_ids { - complete_all_monitor_updates(&monitor_a, id); - complete_all_monitor_updates(&monitor_b, id); + complete_all_monitor_updates(&nodes[0].monitor, id); + complete_all_monitor_updates(&nodes[1].monitor, id); } for id in &chan_bc_ids { - complete_all_monitor_updates(&monitor_b, id); - complete_all_monitor_updates(&monitor_c, id); + complete_all_monitor_updates(&nodes[1].monitor, id); + complete_all_monitor_updates(&nodes[2].monitor, id); } // Then, make sure any current forwards make their way to their destination if process_msg_events!(0, false, ProcessMessages::AllMessages) { From bd4ee1da8c9c0db9b4a09d6f751519b9de58325a Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 21 Apr 2026 17:24:01 +0200 Subject: [PATCH 05/15] Extract chanmon harness node lifecycle Move persistence, reload, and chain sync state onto each harness node. Keeping serialized managers and heights with the node makes restarts and block updates easier to reason about. --- fuzz/src/chanmon_consistency.rs | 337 ++++++++++++++++---------------- 1 file changed, 167 insertions(+), 170 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 905d989bfdb..de6a824eace 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -944,6 +944,7 @@ enum ChanType { } struct HarnessNode<'a> { + node_id: u8, node: ChanMan<'a>, monitor: Arc, keys_manager: Arc, @@ -951,6 +952,10 @@ struct HarnessNode<'a> { broadcaster: Arc, fee_estimator: Arc, wallet: TestWalletSource, + persistence_style: ChannelMonitorUpdateStatus, + serialized_manager: Vec, + height: u32, + last_htlc_clear_fee: u32, } impl<'a> std::ops::Deref for HarnessNode<'a> { @@ -1025,13 +1030,30 @@ impl<'a> HarnessNode<'a> { params, best_block_timestamp, ); - Self { node, monitor, keys_manager, logger, broadcaster, fee_estimator, wallet } + Self { + node_id, + node, + monitor, + keys_manager, + logger, + broadcaster, + fee_estimator, + wallet, + persistence_style, + serialized_manager: Vec::new(), + height: 0, + last_htlc_clear_fee: 253, + } } fn our_node_id(&self) -> PublicKey { self.node.get_our_node_id() } + fn set_persistence_style(&mut self, style: ChannelMonitorUpdateStatus) { + self.persistence_style = style; + } + fn complete_all_pending_monitor_updates(&self) { for (channel_id, state) in self.monitor.latest_monitors.lock().unwrap().iter_mut() { for (id, data) in state.pending_monitors.drain(..) { @@ -1043,6 +1065,94 @@ impl<'a> HarnessNode<'a> { } } } + + fn refresh_serialized_manager(&mut self) { + if self.node.get_and_clear_needs_persistence() { + self.serialized_manager = self.node.encode(); + } + } + + fn reload( + &mut self, use_old_mons: u8, out: &Out, router: &'a FuzzRouter, chan_type: ChanType, + ) { + let (logger_for_monitor, logger) = Self::build_loggers(self.node_id, out); + let chain_monitor = Self::build_chain_monitor( + &self.broadcaster, + &self.fee_estimator, + &self.keys_manager, + logger_for_monitor, + ChannelMonitorUpdateStatus::Completed, + ); + + let mut monitors = new_hash_map(); + let mut use_old_mons = use_old_mons; + { + let mut old_monitors = self.monitor.latest_monitors.lock().unwrap(); + for (channel_id, mut prev_state) in old_monitors.drain() { + let (mon_id, serialized_mon) = if use_old_mons % 3 == 0 { + // Reload with the oldest `ChannelMonitor` (the one that we already told + // `ChannelManager` we finished persisting). + (prev_state.persisted_monitor_id, prev_state.persisted_monitor) + } else if use_old_mons % 3 == 1 { + // Reload with the second-oldest `ChannelMonitor`. + let old_mon = (prev_state.persisted_monitor_id, prev_state.persisted_monitor); + prev_state.pending_monitors.drain(..).next().unwrap_or(old_mon) + } else { + // Reload with the newest `ChannelMonitor`. + let old_mon = (prev_state.persisted_monitor_id, prev_state.persisted_monitor); + prev_state.pending_monitors.pop().unwrap_or(old_mon) + }; + // Use a different value of `use_old_mons` if we have another monitor + // (only for node B) by shifting `use_old_mons` one in base-3. + use_old_mons /= 3; + let mon = <(BlockLocator, ChannelMonitor)>::read( + &mut &serialized_mon[..], + (&*self.keys_manager, &*self.keys_manager), + ) + .expect("Failed to read monitor"); + monitors.insert(channel_id, mon.1); + // Update the latest `ChannelMonitor` state to match what we just told LDK. + prev_state.persisted_monitor = serialized_mon; + prev_state.persisted_monitor_id = mon_id; + // Wipe any `ChannelMonitor`s which we never told LDK we finished persisting, + // considering them discarded. LDK should replay these for us as they're stored in + // the `ChannelManager`. + prev_state.pending_monitors.clear(); + chain_monitor.latest_monitors.lock().unwrap().insert(channel_id, prev_state); + } + } + let mut monitor_refs = new_hash_map(); + for (channel_id, monitor) in monitors.iter() { + monitor_refs.insert(*channel_id, monitor); + } + + let read_args = ChannelManagerReadArgs { + entropy_source: Arc::clone(&self.keys_manager), + node_signer: Arc::clone(&self.keys_manager), + signer_provider: Arc::clone(&self.keys_manager), + fee_estimator: Arc::clone(&self.fee_estimator), + chain_monitor: Arc::clone(&chain_monitor), + tx_broadcaster: Arc::clone(&self.broadcaster), + router, + message_router: router, + logger: Arc::clone(&logger), + config: build_node_config(chan_type), + channel_monitors: monitor_refs, + }; + + let manager = <(BlockLocator, ChanMan)>::read(&mut &self.serialized_manager[..], read_args) + .expect("Failed to read manager"); + for (channel_id, mon) in monitors.drain() { + assert_eq!( + chain_monitor.chain_monitor.watch_channel(channel_id, mon), + Ok(ChannelMonitorUpdateStatus::Completed) + ); + } + *chain_monitor.persister.update_ret.lock().unwrap() = self.persistence_style; + self.node = manager.1; + self.monitor = chain_monitor; + self.logger = logger; + } } fn build_node_config(chan_type: ChanType) -> UserConfig { @@ -1276,28 +1386,25 @@ pub fn do_test(data: &[u8], out: Out) { 1 => ChanType::KeyedAnchors, _ => ChanType::ZeroFeeCommitments, }; - let mon_style = [ - RefCell::new(if config_byte & 0b01 != 0 { + let persistence_styles = [ + if config_byte & 0b01 != 0 { ChannelMonitorUpdateStatus::InProgress } else { ChannelMonitorUpdateStatus::Completed - }), - RefCell::new(if config_byte & 0b10 != 0 { + }, + if config_byte & 0b10 != 0 { ChannelMonitorUpdateStatus::InProgress } else { ChannelMonitorUpdateStatus::Completed - }), - RefCell::new(if config_byte & 0b100 != 0 { + }, + if config_byte & 0b100 != 0 { ChannelMonitorUpdateStatus::InProgress } else { ChannelMonitorUpdateStatus::Completed - }), + }, ]; let mut chain_state = ChainState::new(); - let mut node_height_a: u32 = 0; - let mut node_height_b: u32 = 0; - let mut node_height_c: u32 = 0; let wallet_a = TestWalletSource::new(SecretKey::from_slice(&[1; 32]).unwrap()); let wallet_b = TestWalletSource::new(SecretKey::from_slice(&[2; 32]).unwrap()); let wallet_c = TestWalletSource::new(SecretKey::from_slice(&[3; 32]).unwrap()); @@ -1320,11 +1427,8 @@ pub fn do_test(data: &[u8], out: Out) { } let fee_est_a = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }); - let mut last_htlc_clear_fee_a = 253; let fee_est_b = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }); - let mut last_htlc_clear_fee_b = 253; let fee_est_c = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }); - let mut last_htlc_clear_fee_c = 253; let broadcast_a = Arc::new(TestBroadcaster { txn_broadcasted: RefCell::new(Vec::new()) }); let broadcast_b = Arc::new(TestBroadcaster { txn_broadcasted: RefCell::new(Vec::new()) }); let broadcast_c = Arc::new(TestBroadcaster { txn_broadcasted: RefCell::new(Vec::new()) }); @@ -1337,7 +1441,7 @@ pub fn do_test(data: &[u8], out: Out) { wallet_a, Arc::clone(&fee_est_a), Arc::clone(&broadcast_a), - mon_style[0].borrow().clone(), + persistence_styles[0], &out, &router, chan_type, @@ -1347,7 +1451,7 @@ pub fn do_test(data: &[u8], out: Out) { wallet_b, Arc::clone(&fee_est_b), Arc::clone(&broadcast_b), - mon_style[1].borrow().clone(), + persistence_styles[1], &out, &router, chan_type, @@ -1357,7 +1461,7 @@ pub fn do_test(data: &[u8], out: Out) { wallet_c, Arc::clone(&fee_est_c), Arc::clone(&broadcast_c), - mon_style[2].borrow().clone(), + persistence_styles[2], &out, &router, chan_type, @@ -1389,30 +1493,28 @@ pub fn do_test(data: &[u8], out: Out) { nodes[1].broadcaster.txn_broadcasted.borrow_mut().clear(); nodes[2].broadcaster.txn_broadcasted.borrow_mut().clear(); - let sync_with_chain_state = |chain_state: &ChainState, - node: &HarnessNode<'_>, - node_height: &mut u32, - num_blocks: Option| { - let target_height = if let Some(num_blocks) = num_blocks { - std::cmp::min(*node_height + num_blocks, chain_state.tip_height()) - } else { - chain_state.tip_height() - }; - while *node_height < target_height { - *node_height += 1; - let (header, txn) = chain_state.block_at(*node_height); - let txdata: Vec<_> = txn.iter().enumerate().map(|(i, tx)| (i + 1, tx)).collect(); - if !txdata.is_empty() { - node.transactions_confirmed(header, &txdata, *node_height); + let sync_with_chain_state = + |node: &mut HarnessNode<'_>, chain_state: &ChainState, num_blocks: Option| { + let target_height = if let Some(num_blocks) = num_blocks { + std::cmp::min(node.height + num_blocks, chain_state.tip_height()) + } else { + chain_state.tip_height() + }; + while node.height < target_height { + node.height += 1; + let (header, txn) = chain_state.block_at(node.height); + let txdata: Vec<_> = txn.iter().enumerate().map(|(i, tx)| (i + 1, tx)).collect(); + if !txdata.is_empty() { + node.transactions_confirmed(header, &txdata, node.height); + } + node.best_block_updated(header, node.height); } - node.best_block_updated(header, *node_height); - } - }; + }; // Sync all nodes to tip to lock the funding. - sync_with_chain_state(&chain_state, &nodes[0], &mut node_height_a, None); - sync_with_chain_state(&chain_state, &nodes[1], &mut node_height_b, None); - sync_with_chain_state(&chain_state, &nodes[2], &mut node_height_c, None); + sync_with_chain_state(&mut nodes[0], &chain_state, None); + sync_with_chain_state(&mut nodes[1], &chain_state, None); + sync_with_chain_state(&mut nodes[2], &chain_state, None); lock_fundings(&nodes); @@ -1439,9 +1541,9 @@ pub fn do_test(data: &[u8], out: Out) { let mut bc_events = Vec::new(); let mut cb_events = Vec::new(); - let mut node_a_ser = nodes[0].encode(); - let mut node_b_ser = nodes[1].encode(); - let mut node_c_ser = nodes[2].encode(); + for node in &mut nodes { + node.serialized_manager = node.encode(); + } let pending_payments = RefCell::new([Vec::new(), Vec::new(), Vec::new()]); let resolved_payments: RefCell<[HashMap>; 3]> = @@ -1457,82 +1559,7 @@ pub fn do_test(data: &[u8], out: Out) { }}; } - let reload_node = |ser: &Vec, node_id: u8, old_node: &HarnessNode<'_>, mut use_old_mons| { - let (logger_for_monitor, logger) = HarnessNode::build_loggers(node_id, &out); - let chain_monitor = HarnessNode::build_chain_monitor( - &old_node.broadcaster, - &old_node.fee_estimator, - &old_node.keys_manager, - logger_for_monitor, - ChannelMonitorUpdateStatus::Completed, - ); - - let mut monitors = new_hash_map(); - let mut old_monitors = old_node.monitor.latest_monitors.lock().unwrap(); - for (channel_id, mut prev_state) in old_monitors.drain() { - let (mon_id, serialized_mon) = if use_old_mons % 3 == 0 { - // Reload with the oldest `ChannelMonitor` (the one that we already told - // `ChannelManager` we finished persisting). - (prev_state.persisted_monitor_id, prev_state.persisted_monitor) - } else if use_old_mons % 3 == 1 { - // Reload with the second-oldest `ChannelMonitor` - let old_mon = (prev_state.persisted_monitor_id, prev_state.persisted_monitor); - prev_state.pending_monitors.drain(..).next().unwrap_or(old_mon) - } else { - // Reload with the newest `ChannelMonitor` - let old_mon = (prev_state.persisted_monitor_id, prev_state.persisted_monitor); - prev_state.pending_monitors.pop().unwrap_or(old_mon) - }; - // Use a different value of `use_old_mons` if we have another monitor (only for node B) - // by shifting `use_old_mons` one in base-3. - use_old_mons /= 3; - let mon = <(BlockLocator, ChannelMonitor)>::read( - &mut &serialized_mon[..], - (&*old_node.keys_manager, &*old_node.keys_manager), - ) - .expect("Failed to read monitor"); - monitors.insert(channel_id, mon.1); - // Update the latest `ChannelMonitor` state to match what we just told LDK. - prev_state.persisted_monitor = serialized_mon; - prev_state.persisted_monitor_id = mon_id; - // Wipe any `ChannelMonitor`s which we never told LDK we finished persisting, - // considering them discarded. LDK should replay these for us as they're stored in - // the `ChannelManager`. - prev_state.pending_monitors.clear(); - chain_monitor.latest_monitors.lock().unwrap().insert(channel_id, prev_state); - } - let mut monitor_refs = new_hash_map(); - for (channel_id, monitor) in monitors.iter() { - monitor_refs.insert(*channel_id, monitor); - } - - let read_args = ChannelManagerReadArgs { - entropy_source: Arc::clone(&old_node.keys_manager), - node_signer: Arc::clone(&old_node.keys_manager), - signer_provider: Arc::clone(&old_node.keys_manager), - fee_estimator: Arc::clone(&old_node.fee_estimator), - chain_monitor: chain_monitor.clone(), - tx_broadcaster: Arc::clone(&old_node.broadcaster), - router: &router, - message_router: &router, - logger: Arc::clone(&logger), - config: build_node_config(chan_type), - channel_monitors: monitor_refs, - }; - - let manager = <(BlockLocator, ChanMan)>::read(&mut &ser[..], read_args) - .expect("Failed to read manager"); - for (channel_id, mon) in monitors.drain() { - assert_eq!( - chain_monitor.chain_monitor.watch_channel(channel_id, mon), - Ok(ChannelMonitorUpdateStatus::Completed) - ); - } - *chain_monitor.persister.update_ret.lock().unwrap() = *mon_style[node_id as usize].borrow(); - (manager.1, chain_monitor, logger) - }; - - let mut read_pos = 1; // First byte was consumed for initial config (mon_style + chan_type) + let mut read_pos = 1; // First byte was consumed for initial config (persistence styles + chan_type) macro_rules! get_slice { ($len: expr) => {{ let slice_len = $len as usize; @@ -2278,24 +2305,12 @@ pub fn do_test(data: &[u8], out: Out) { // In general, we keep related message groups close together in binary form, allowing // bit-twiddling mutations to have similar effects. This is probably overkill, but no // harm in doing so. - 0x00 => { - *mon_style[0].borrow_mut() = ChannelMonitorUpdateStatus::InProgress; - }, - 0x01 => { - *mon_style[1].borrow_mut() = ChannelMonitorUpdateStatus::InProgress; - }, - 0x02 => { - *mon_style[2].borrow_mut() = ChannelMonitorUpdateStatus::InProgress; - }, - 0x04 => { - *mon_style[0].borrow_mut() = ChannelMonitorUpdateStatus::Completed; - }, - 0x05 => { - *mon_style[1].borrow_mut() = ChannelMonitorUpdateStatus::Completed; - }, - 0x06 => { - *mon_style[2].borrow_mut() = ChannelMonitorUpdateStatus::Completed; - }, + 0x00 => nodes[0].set_persistence_style(ChannelMonitorUpdateStatus::InProgress), + 0x01 => nodes[1].set_persistence_style(ChannelMonitorUpdateStatus::InProgress), + 0x02 => nodes[2].set_persistence_style(ChannelMonitorUpdateStatus::InProgress), + 0x04 => nodes[0].set_persistence_style(ChannelMonitorUpdateStatus::Completed), + 0x05 => nodes[1].set_persistence_style(ChannelMonitorUpdateStatus::Completed), + 0x06 => nodes[2].set_persistence_style(ChannelMonitorUpdateStatus::Completed), 0x08 => { for id in &chan_ab_ids { @@ -2471,7 +2486,7 @@ pub fn do_test(data: &[u8], out: Out) { }, 0x80 => { - let mut max_feerate = last_htlc_clear_fee_a; + let mut max_feerate = nodes[0].last_htlc_clear_fee; if matches!(chan_type, ChanType::Legacy) { max_feerate *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32; } @@ -2487,7 +2502,7 @@ pub fn do_test(data: &[u8], out: Out) { nodes[0].timer_tick_occurred(); }, 0x84 => { - let mut max_feerate = last_htlc_clear_fee_b; + let mut max_feerate = nodes[1].last_htlc_clear_fee; if matches!(chan_type, ChanType::Legacy) { max_feerate *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32; } @@ -2503,7 +2518,7 @@ pub fn do_test(data: &[u8], out: Out) { nodes[1].timer_tick_occurred(); }, 0x88 => { - let mut max_feerate = last_htlc_clear_fee_c; + let mut max_feerate = nodes[2].last_htlc_clear_fee; if matches!(chan_type, ChanType::Legacy) { max_feerate *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32; } @@ -2580,28 +2595,28 @@ pub fn do_test(data: &[u8], out: Out) { // Sync node by 1 block to cover confirmation of a transaction. 0xa8 => { chain_state.confirm_pending_txs(); - sync_with_chain_state(&chain_state, &nodes[0], &mut node_height_a, Some(1)); + sync_with_chain_state(&mut nodes[0], &chain_state, Some(1)); }, 0xa9 => { chain_state.confirm_pending_txs(); - sync_with_chain_state(&chain_state, &nodes[1], &mut node_height_b, Some(1)); + sync_with_chain_state(&mut nodes[1], &chain_state, Some(1)); }, 0xaa => { chain_state.confirm_pending_txs(); - sync_with_chain_state(&chain_state, &nodes[2], &mut node_height_c, Some(1)); + sync_with_chain_state(&mut nodes[2], &chain_state, Some(1)); }, // Sync node to chain tip to cover confirmation of a transaction post-reorg-risk. 0xab => { chain_state.confirm_pending_txs(); - sync_with_chain_state(&chain_state, &nodes[0], &mut node_height_a, None); + sync_with_chain_state(&mut nodes[0], &chain_state, None); }, 0xac => { chain_state.confirm_pending_txs(); - sync_with_chain_state(&chain_state, &nodes[1], &mut node_height_b, None); + sync_with_chain_state(&mut nodes[1], &chain_state, None); }, 0xad => { chain_state.confirm_pending_txs(); - sync_with_chain_state(&chain_state, &nodes[2], &mut node_height_c, None); + sync_with_chain_state(&mut nodes[2], &chain_state, None); }, 0xb0 | 0xb1 | 0xb2 => { @@ -2617,11 +2632,7 @@ pub fn do_test(data: &[u8], out: Out) { ab_events.clear(); ba_events.clear(); } - let (new_node_a, new_monitor_a, new_logger_a) = - reload_node(&node_a_ser, 0, &nodes[0], v); - nodes[0].node = new_node_a; - nodes[0].monitor = new_monitor_a; - nodes[0].logger = new_logger_a; + nodes[0].reload(v, &out, &router, chan_type); }, 0xb3..=0xbb => { // Restart node B, picking among the in-flight `ChannelMonitor`s to use based on @@ -2640,11 +2651,7 @@ pub fn do_test(data: &[u8], out: Out) { bc_events.clear(); cb_events.clear(); } - let (new_node_b, new_monitor_b, new_logger_b) = - reload_node(&node_b_ser, 1, &nodes[1], v); - nodes[1].node = new_node_b; - nodes[1].monitor = new_monitor_b; - nodes[1].logger = new_logger_b; + nodes[1].reload(v, &out, &router, chan_type); }, 0xbc | 0xbd | 0xbe => { // Restart node C, picking among the in-flight `ChannelMonitor`s to use based on @@ -2659,11 +2666,7 @@ pub fn do_test(data: &[u8], out: Out) { bc_events.clear(); cb_events.clear(); } - let (new_node_c, new_monitor_c, new_logger_c) = - reload_node(&node_c_ser, 2, &nodes[2], v); - nodes[2].node = new_node_c; - nodes[2].monitor = new_monitor_c; - nodes[2].logger = new_logger_c; + nodes[2].reload(v, &out, &router, chan_type); }, 0xc0 => nodes[0].keys_manager.disable_supported_ops_for_all_signers(), @@ -2946,24 +2949,18 @@ pub fn do_test(data: &[u8], out: Out) { ); } - last_htlc_clear_fee_a = + nodes[0].last_htlc_clear_fee = nodes[0].fee_estimator.ret_val.load(atomic::Ordering::Acquire); - last_htlc_clear_fee_b = + nodes[1].last_htlc_clear_fee = nodes[1].fee_estimator.ret_val.load(atomic::Ordering::Acquire); - last_htlc_clear_fee_c = + nodes[2].last_htlc_clear_fee = nodes[2].fee_estimator.ret_val.load(atomic::Ordering::Acquire); }, _ => test_return!(), } - if nodes[0].get_and_clear_needs_persistence() { - node_a_ser = nodes[0].encode(); - } - if nodes[1].get_and_clear_needs_persistence() { - node_b_ser = nodes[1].encode(); - } - if nodes[2].get_and_clear_needs_persistence() { - node_c_ser = nodes[2].encode(); + for node in &mut nodes { + node.refresh_serialized_manager(); } } } From a49834ec8e75a6a346d9ab395192d4c3c2aeaeb4 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 21 Apr 2026 16:32:31 +0200 Subject: [PATCH 06/15] Extract chanmon harness node action helpers Lift monitor update, splice, and chain sync actions into named helper functions. This keeps the byte-dispatch loop focused on choosing actions rather than spelling out each operation. --- fuzz/src/chanmon_consistency.rs | 299 ++++++++++++++++---------------- 1 file changed, 153 insertions(+), 146 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index de6a824eace..86b89a8f95f 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -56,7 +56,6 @@ use lightning::ln::channelmanager::{ TrustedChannelFeatures, }; use lightning::ln::functional_test_utils::*; -use lightning::ln::funding::{FundingContribution, FundingContributionError, FundingTemplate}; use lightning::ln::inbound_payment::ExpandedKey; use lightning::ln::msgs::{ self, BaseMessageHandler, ChannelMessageHandler, CommitmentUpdate, Init, MessageSendEvent, @@ -1155,6 +1154,147 @@ impl<'a> HarnessNode<'a> { } } +#[derive(Copy, Clone)] +enum MonitorUpdateSelector { + First, + Second, + Last, +} + +fn complete_monitor_update( + monitor: &Arc, chan_id: &ChannelId, selector: MonitorUpdateSelector, +) { + if let Some(state) = monitor.latest_monitors.lock().unwrap().get_mut(chan_id) { + assert!( + state.pending_monitors.windows(2).all(|pair| pair[0].0 < pair[1].0), + "updates should be sorted by id" + ); + let update = match selector { + MonitorUpdateSelector::First => { + if state.pending_monitors.is_empty() { + None + } else { + Some(state.pending_monitors.remove(0)) + } + }, + MonitorUpdateSelector::Second => { + if state.pending_monitors.len() > 1 { + Some(state.pending_monitors.remove(1)) + } else { + None + } + }, + MonitorUpdateSelector::Last => state.pending_monitors.pop(), + }; + if let Some((id, data)) = update { + monitor.chain_monitor.channel_monitor_updated(*chan_id, id).unwrap(); + if id > state.persisted_monitor_id { + state.persisted_monitor_id = id; + state.persisted_monitor = data; + } + } + } +} + +fn complete_all_monitor_updates(monitor: &Arc, chan_id: &ChannelId) { + if let Some(state) = monitor.latest_monitors.lock().unwrap().get_mut(chan_id) { + assert!( + state.pending_monitors.windows(2).all(|pair| pair[0].0 < pair[1].0), + "updates should be sorted by id" + ); + for (id, data) in state.pending_monitors.drain(..) { + monitor.chain_monitor.channel_monitor_updated(*chan_id, id).unwrap(); + if id > state.persisted_monitor_id { + state.persisted_monitor_id = id; + state.persisted_monitor = data; + } + } + } +} + +fn sync_with_chain_state( + node: &mut HarnessNode<'_>, chain_state: &ChainState, num_blocks: Option, +) { + let target_height = if let Some(num_blocks) = num_blocks { + std::cmp::min(node.height + num_blocks, chain_state.tip_height()) + } else { + chain_state.tip_height() + }; + while node.height < target_height { + node.height += 1; + let (header, txn) = chain_state.block_at(node.height); + let txdata: Vec<_> = txn.iter().enumerate().map(|(i, tx)| (i + 1, tx)).collect(); + if !txdata.is_empty() { + node.transactions_confirmed(header, &txdata, node.height); + } + node.best_block_updated(header, node.height); + } +} + +fn splice_in(node: &HarnessNode<'_>, counterparty_node_id: &PublicKey, channel_id: &ChannelId) { + let wallet = WalletSync::new(&node.wallet, Arc::clone(&node.logger)); + match node.splice_channel(channel_id, counterparty_node_id) { + Ok(funding_template) => { + let feerate = funding_template + .min_rbf_feerate() + .unwrap_or(node.fee_estimator.feerate_sat_per_kw()); + if let Ok(contribution) = funding_template.splice_in_sync( + Amount::from_sat(10_000), + feerate, + FeeRate::MAX, + &wallet, + ) { + let _ = + node.funding_contributed(channel_id, counterparty_node_id, contribution, None); + } + }, + Err(e) => { + assert!( + matches!(e, APIError::APIMisuseError { ref err } if err.contains("splice")), + "{:?}", + e + ); + }, + } +} + +fn splice_out(node: &HarnessNode<'_>, counterparty_node_id: &PublicKey, channel_id: &ChannelId) { + // We conditionally splice out `MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS` only when the node + // has double the balance required to send a payment upon a `0xff` byte. We do this to + // ensure there's always liquidity available for a payment to succeed then. + let outbound_capacity_msat = node + .list_channels() + .iter() + .find(|chan| chan.channel_id == *channel_id) + .map(|chan| chan.outbound_capacity_msat) + .unwrap(); + if outbound_capacity_msat < 20_000_000 { + return; + } + match node.splice_channel(channel_id, counterparty_node_id) { + Ok(funding_template) => { + let feerate = funding_template + .min_rbf_feerate() + .unwrap_or(node.fee_estimator.feerate_sat_per_kw()); + let outputs = vec![TxOut { + value: Amount::from_sat(MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS), + script_pubkey: node.wallet.get_change_script().unwrap(), + }]; + if let Ok(contribution) = funding_template.splice_out(outputs, feerate, FeeRate::MAX) { + let _ = + node.funding_contributed(channel_id, counterparty_node_id, contribution, None); + } + }, + Err(e) => { + assert!( + matches!(e, APIError::APIMisuseError { ref err } if err.contains("splice")), + "{:?}", + e + ); + }, + } +} + fn build_node_config(chan_type: ChanType) -> UserConfig { let mut config = UserConfig::default(); config.channel_config.forwarding_fee_proportional_millionths = 0; @@ -1493,24 +1633,6 @@ pub fn do_test(data: &[u8], out: Out) { nodes[1].broadcaster.txn_broadcasted.borrow_mut().clear(); nodes[2].broadcaster.txn_broadcasted.borrow_mut().clear(); - let sync_with_chain_state = - |node: &mut HarnessNode<'_>, chain_state: &ChainState, num_blocks: Option| { - let target_height = if let Some(num_blocks) = num_blocks { - std::cmp::min(node.height + num_blocks, chain_state.tip_height()) - } else { - chain_state.tip_height() - }; - while node.height < target_height { - node.height += 1; - let (header, txn) = chain_state.block_at(node.height); - let txdata: Vec<_> = txn.iter().enumerate().map(|(i, tx)| (i + 1, tx)).collect(); - if !txdata.is_empty() { - node.transactions_confirmed(header, &txdata, node.height); - } - node.best_block_updated(header, node.height); - } - }; - // Sync all nodes to tip to lock the funding. sync_with_chain_state(&mut nodes[0], &chain_state, None); sync_with_chain_state(&mut nodes[1], &chain_state, None); @@ -2091,121 +2213,6 @@ pub fn do_test(data: &[u8], out: Out) { }}; } - let complete_first = |v: &mut Vec<_>| if !v.is_empty() { Some(v.remove(0)) } else { None }; - let complete_second = |v: &mut Vec<_>| if v.len() > 1 { Some(v.remove(1)) } else { None }; - let complete_monitor_update = - |monitor: &Arc, - chan_funding, - compl_selector: &dyn Fn(&mut Vec<(u64, Vec)>) -> Option<(u64, Vec)>| { - if let Some(state) = monitor.latest_monitors.lock().unwrap().get_mut(chan_funding) { - assert!( - state.pending_monitors.windows(2).all(|pair| pair[0].0 < pair[1].0), - "updates should be sorted by id" - ); - if let Some((id, data)) = compl_selector(&mut state.pending_monitors) { - monitor.chain_monitor.channel_monitor_updated(*chan_funding, id).unwrap(); - if id > state.persisted_monitor_id { - state.persisted_monitor_id = id; - state.persisted_monitor = data; - } - } - } - }; - let complete_all_monitor_updates = |monitor: &Arc, chan_id| { - if let Some(state) = monitor.latest_monitors.lock().unwrap().get_mut(chan_id) { - assert!( - state.pending_monitors.windows(2).all(|pair| pair[0].0 < pair[1].0), - "updates should be sorted by id" - ); - for (id, data) in state.pending_monitors.drain(..) { - monitor.chain_monitor.channel_monitor_updated(*chan_id, id).unwrap(); - if id > state.persisted_monitor_id { - state.persisted_monitor_id = id; - state.persisted_monitor = data; - } - } - } - }; - - let splice_channel = - |node: &HarnessNode<'_>, - counterparty_node_id: &PublicKey, - channel_id: &ChannelId, - f: &dyn Fn( - FundingTemplate, - ) -> Result| { - match node.splice_channel(channel_id, counterparty_node_id) { - Ok(funding_template) => { - if let Ok(contribution) = f(funding_template) { - let _ = node.funding_contributed( - channel_id, - counterparty_node_id, - contribution, - None, - ); - } - }, - Err(e) => { - assert!( - matches!(e, APIError::APIMisuseError { ref err } if err.contains("splice")), - "{:?}", - e - ); - }, - } - }; - - let splice_in = |node: &HarnessNode<'_>, - counterparty_node_id: &PublicKey, - channel_id: &ChannelId| { - let wallet = WalletSync::new(&node.wallet, Arc::clone(&node.logger)); - let funding_feerate_sat_per_kw = node.fee_estimator.feerate_sat_per_kw(); - splice_channel( - node, - counterparty_node_id, - channel_id, - &move |funding_template: FundingTemplate| { - let feerate = - funding_template.min_rbf_feerate().unwrap_or(funding_feerate_sat_per_kw); - funding_template.splice_in_sync( - Amount::from_sat(10_000), - feerate, - FeeRate::MAX, - &wallet, - ) - }, - ); - }; - - let splice_out = |node: &HarnessNode<'_>, - counterparty_node_id: &PublicKey, - channel_id: &ChannelId| { - let outbound_capacity_msat = node - .list_channels() - .iter() - .find(|chan| chan.channel_id == *channel_id) - .map(|chan| chan.outbound_capacity_msat) - .unwrap(); - if outbound_capacity_msat < 20_000_000 { - return; - } - let funding_feerate_sat_per_kw = node.fee_estimator.feerate_sat_per_kw(); - splice_channel( - node, - counterparty_node_id, - channel_id, - &move |funding_template: FundingTemplate| { - let feerate = - funding_template.min_rbf_feerate().unwrap_or(funding_feerate_sat_per_kw); - let outputs = vec![TxOut { - value: Amount::from_sat(MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS), - script_pubkey: node.wallet.get_change_script().unwrap(), - }]; - funding_template.splice_out(outputs, feerate, FeeRate::MAX) - }, - ); - }; - let send = |source_idx: usize, dest_idx: usize, dest_chan_id, amt, payment_ctr: &mut u64| { let source = &nodes[source_idx]; @@ -2737,65 +2744,65 @@ pub fn do_test(data: &[u8], out: Out) { 0xf0 => { for id in &chan_ab_ids { - complete_monitor_update(&nodes[0].monitor, id, &complete_first); + complete_monitor_update(&nodes[0].monitor, id, MonitorUpdateSelector::First); } }, 0xf1 => { for id in &chan_ab_ids { - complete_monitor_update(&nodes[0].monitor, id, &complete_second); + complete_monitor_update(&nodes[0].monitor, id, MonitorUpdateSelector::Second); } }, 0xf2 => { for id in &chan_ab_ids { - complete_monitor_update(&nodes[0].monitor, id, &Vec::pop); + complete_monitor_update(&nodes[0].monitor, id, MonitorUpdateSelector::Last); } }, 0xf4 => { for id in &chan_ab_ids { - complete_monitor_update(&nodes[1].monitor, id, &complete_first); + complete_monitor_update(&nodes[1].monitor, id, MonitorUpdateSelector::First); } }, 0xf5 => { for id in &chan_ab_ids { - complete_monitor_update(&nodes[1].monitor, id, &complete_second); + complete_monitor_update(&nodes[1].monitor, id, MonitorUpdateSelector::Second); } }, 0xf6 => { for id in &chan_ab_ids { - complete_monitor_update(&nodes[1].monitor, id, &Vec::pop); + complete_monitor_update(&nodes[1].monitor, id, MonitorUpdateSelector::Last); } }, 0xf8 => { for id in &chan_bc_ids { - complete_monitor_update(&nodes[1].monitor, id, &complete_first); + complete_monitor_update(&nodes[1].monitor, id, MonitorUpdateSelector::First); } }, 0xf9 => { for id in &chan_bc_ids { - complete_monitor_update(&nodes[1].monitor, id, &complete_second); + complete_monitor_update(&nodes[1].monitor, id, MonitorUpdateSelector::Second); } }, 0xfa => { for id in &chan_bc_ids { - complete_monitor_update(&nodes[1].monitor, id, &Vec::pop); + complete_monitor_update(&nodes[1].monitor, id, MonitorUpdateSelector::Last); } }, 0xfc => { for id in &chan_bc_ids { - complete_monitor_update(&nodes[2].monitor, id, &complete_first); + complete_monitor_update(&nodes[2].monitor, id, MonitorUpdateSelector::First); } }, 0xfd => { for id in &chan_bc_ids { - complete_monitor_update(&nodes[2].monitor, id, &complete_second); + complete_monitor_update(&nodes[2].monitor, id, MonitorUpdateSelector::Second); } }, 0xfe => { for id in &chan_bc_ids { - complete_monitor_update(&nodes[2].monitor, id, &Vec::pop); + complete_monitor_update(&nodes[2].monitor, id, MonitorUpdateSelector::Last); } }, From 4bf18e92409d9beb0a60fd719b160a258bd74763 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 21 Apr 2026 16:20:57 +0200 Subject: [PATCH 07/15] Extract chanmon harness node operations Move the action helpers onto `HarnessNode` methods. Node-local operations now live with the state they mutate, which reduces argument threading through the fuzz loop. --- fuzz/src/chanmon_consistency.rs | 435 +++++++++++++++----------------- 1 file changed, 210 insertions(+), 225 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 86b89a8f95f..4872e23fca6 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -1053,6 +1053,22 @@ impl<'a> HarnessNode<'a> { self.persistence_style = style; } + fn complete_all_monitor_updates(&self, chan_id: &ChannelId) { + if let Some(state) = self.monitor.latest_monitors.lock().unwrap().get_mut(chan_id) { + assert!( + state.pending_monitors.windows(2).all(|pair| pair[0].0 < pair[1].0), + "updates should be sorted by id" + ); + for (id, data) in state.pending_monitors.drain(..) { + self.monitor.chain_monitor.channel_monitor_updated(*chan_id, id).unwrap(); + if id > state.persisted_monitor_id { + state.persisted_monitor_id = id; + state.persisted_monitor = data; + } + } + } + } + fn complete_all_pending_monitor_updates(&self) { for (channel_id, state) in self.monitor.latest_monitors.lock().unwrap().iter_mut() { for (id, data) in state.pending_monitors.drain(..) { @@ -1065,12 +1081,160 @@ impl<'a> HarnessNode<'a> { } } + fn complete_monitor_update(&self, chan_id: &ChannelId, selector: MonitorUpdateSelector) { + if let Some(state) = self.monitor.latest_monitors.lock().unwrap().get_mut(chan_id) { + assert!( + state.pending_monitors.windows(2).all(|pair| pair[0].0 < pair[1].0), + "updates should be sorted by id" + ); + let update = match selector { + MonitorUpdateSelector::First => { + if state.pending_monitors.is_empty() { + None + } else { + Some(state.pending_monitors.remove(0)) + } + }, + MonitorUpdateSelector::Second => { + if state.pending_monitors.len() > 1 { + Some(state.pending_monitors.remove(1)) + } else { + None + } + }, + MonitorUpdateSelector::Last => state.pending_monitors.pop(), + }; + if let Some((id, data)) = update { + self.monitor.chain_monitor.channel_monitor_updated(*chan_id, id).unwrap(); + if id > state.persisted_monitor_id { + state.persisted_monitor_id = id; + state.persisted_monitor = data; + } + } + } + } + + fn sync_with_chain_state(&mut self, chain_state: &ChainState, num_blocks: Option) { + let target_height = if let Some(num_blocks) = num_blocks { + std::cmp::min(self.height + num_blocks, chain_state.tip_height()) + } else { + chain_state.tip_height() + }; + + while self.height < target_height { + self.height += 1; + let (header, txn) = chain_state.block_at(self.height); + let txdata: Vec<_> = txn.iter().enumerate().map(|(i, tx)| (i + 1, tx)).collect(); + if !txdata.is_empty() { + self.node.transactions_confirmed(header, &txdata, self.height); + } + self.node.best_block_updated(header, self.height); + } + } + fn refresh_serialized_manager(&mut self) { if self.node.get_and_clear_needs_persistence() { self.serialized_manager = self.node.encode(); } } + fn bump_fee_estimate(&mut self, chan_type: ChanType) { + let mut max_feerate = self.last_htlc_clear_fee; + if matches!(chan_type, ChanType::Legacy) { + max_feerate *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32; + } + if self.fee_estimator.ret_val.fetch_add(250, atomic::Ordering::AcqRel) + 250 > max_feerate { + self.fee_estimator.ret_val.store(max_feerate, atomic::Ordering::Release); + } + self.node.timer_tick_occurred(); + } + + fn reset_fee_estimate(&self) { + self.fee_estimator.ret_val.store(253, atomic::Ordering::Release); + self.node.timer_tick_occurred(); + } + + fn current_feerate_sat_per_kw(&self) -> FeeRate { + self.fee_estimator.feerate_sat_per_kw() + } + + fn record_last_htlc_clear_fee(&mut self) { + self.last_htlc_clear_fee = self.fee_estimator.ret_val.load(atomic::Ordering::Acquire); + } + + fn splice_in(&self, counterparty_node_id: &PublicKey, channel_id: &ChannelId) { + let wallet = WalletSync::new(&self.wallet, Arc::clone(&self.logger)); + match self.node.splice_channel(channel_id, counterparty_node_id) { + Ok(funding_template) => { + let feerate = + funding_template.min_rbf_feerate().unwrap_or(self.current_feerate_sat_per_kw()); + if let Ok(contribution) = funding_template.splice_in_sync( + Amount::from_sat(10_000), + feerate, + FeeRate::MAX, + &wallet, + ) { + let _ = self.node.funding_contributed( + channel_id, + counterparty_node_id, + contribution, + None, + ); + } + }, + Err(e) => { + assert!( + matches!(e, APIError::APIMisuseError { ref err } if err.contains("splice")), + "{:?}", + e + ); + }, + } + } + + fn splice_out(&self, counterparty_node_id: &PublicKey, channel_id: &ChannelId) { + // We conditionally splice out `MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS` only when the node + // has double the balance required to send a payment upon a `0xff` byte. We do this to + // ensure there's always liquidity available for a payment to succeed then. + let outbound_capacity_msat = self + .node + .list_channels() + .iter() + .find(|chan| chan.channel_id == *channel_id) + .map(|chan| chan.outbound_capacity_msat) + .unwrap(); + if outbound_capacity_msat < 20_000_000 { + return; + } + match self.node.splice_channel(channel_id, counterparty_node_id) { + Ok(funding_template) => { + let feerate = + funding_template.min_rbf_feerate().unwrap_or(self.current_feerate_sat_per_kw()); + let outputs = vec![TxOut { + value: Amount::from_sat(MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS), + script_pubkey: self.wallet.get_change_script().unwrap(), + }]; + if let Ok(contribution) = + funding_template.splice_out(outputs, feerate, FeeRate::MAX) + { + let _ = self.node.funding_contributed( + channel_id, + counterparty_node_id, + contribution, + None, + ); + } + }, + Err(e) => { + assert!( + matches!(e, APIError::APIMisuseError { ref err } if err.contains("splice")), + "{:?}", + e + ); + }, + } + } + fn reload( &mut self, use_old_mons: u8, out: &Out, router: &'a FuzzRouter, chan_type: ChanType, ) { @@ -1161,140 +1325,6 @@ enum MonitorUpdateSelector { Last, } -fn complete_monitor_update( - monitor: &Arc, chan_id: &ChannelId, selector: MonitorUpdateSelector, -) { - if let Some(state) = monitor.latest_monitors.lock().unwrap().get_mut(chan_id) { - assert!( - state.pending_monitors.windows(2).all(|pair| pair[0].0 < pair[1].0), - "updates should be sorted by id" - ); - let update = match selector { - MonitorUpdateSelector::First => { - if state.pending_monitors.is_empty() { - None - } else { - Some(state.pending_monitors.remove(0)) - } - }, - MonitorUpdateSelector::Second => { - if state.pending_monitors.len() > 1 { - Some(state.pending_monitors.remove(1)) - } else { - None - } - }, - MonitorUpdateSelector::Last => state.pending_monitors.pop(), - }; - if let Some((id, data)) = update { - monitor.chain_monitor.channel_monitor_updated(*chan_id, id).unwrap(); - if id > state.persisted_monitor_id { - state.persisted_monitor_id = id; - state.persisted_monitor = data; - } - } - } -} - -fn complete_all_monitor_updates(monitor: &Arc, chan_id: &ChannelId) { - if let Some(state) = monitor.latest_monitors.lock().unwrap().get_mut(chan_id) { - assert!( - state.pending_monitors.windows(2).all(|pair| pair[0].0 < pair[1].0), - "updates should be sorted by id" - ); - for (id, data) in state.pending_monitors.drain(..) { - monitor.chain_monitor.channel_monitor_updated(*chan_id, id).unwrap(); - if id > state.persisted_monitor_id { - state.persisted_monitor_id = id; - state.persisted_monitor = data; - } - } - } -} - -fn sync_with_chain_state( - node: &mut HarnessNode<'_>, chain_state: &ChainState, num_blocks: Option, -) { - let target_height = if let Some(num_blocks) = num_blocks { - std::cmp::min(node.height + num_blocks, chain_state.tip_height()) - } else { - chain_state.tip_height() - }; - while node.height < target_height { - node.height += 1; - let (header, txn) = chain_state.block_at(node.height); - let txdata: Vec<_> = txn.iter().enumerate().map(|(i, tx)| (i + 1, tx)).collect(); - if !txdata.is_empty() { - node.transactions_confirmed(header, &txdata, node.height); - } - node.best_block_updated(header, node.height); - } -} - -fn splice_in(node: &HarnessNode<'_>, counterparty_node_id: &PublicKey, channel_id: &ChannelId) { - let wallet = WalletSync::new(&node.wallet, Arc::clone(&node.logger)); - match node.splice_channel(channel_id, counterparty_node_id) { - Ok(funding_template) => { - let feerate = funding_template - .min_rbf_feerate() - .unwrap_or(node.fee_estimator.feerate_sat_per_kw()); - if let Ok(contribution) = funding_template.splice_in_sync( - Amount::from_sat(10_000), - feerate, - FeeRate::MAX, - &wallet, - ) { - let _ = - node.funding_contributed(channel_id, counterparty_node_id, contribution, None); - } - }, - Err(e) => { - assert!( - matches!(e, APIError::APIMisuseError { ref err } if err.contains("splice")), - "{:?}", - e - ); - }, - } -} - -fn splice_out(node: &HarnessNode<'_>, counterparty_node_id: &PublicKey, channel_id: &ChannelId) { - // We conditionally splice out `MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS` only when the node - // has double the balance required to send a payment upon a `0xff` byte. We do this to - // ensure there's always liquidity available for a payment to succeed then. - let outbound_capacity_msat = node - .list_channels() - .iter() - .find(|chan| chan.channel_id == *channel_id) - .map(|chan| chan.outbound_capacity_msat) - .unwrap(); - if outbound_capacity_msat < 20_000_000 { - return; - } - match node.splice_channel(channel_id, counterparty_node_id) { - Ok(funding_template) => { - let feerate = funding_template - .min_rbf_feerate() - .unwrap_or(node.fee_estimator.feerate_sat_per_kw()); - let outputs = vec![TxOut { - value: Amount::from_sat(MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS), - script_pubkey: node.wallet.get_change_script().unwrap(), - }]; - if let Ok(contribution) = funding_template.splice_out(outputs, feerate, FeeRate::MAX) { - let _ = - node.funding_contributed(channel_id, counterparty_node_id, contribution, None); - } - }, - Err(e) => { - assert!( - matches!(e, APIError::APIMisuseError { ref err } if err.contains("splice")), - "{:?}", - e - ); - }, - } -} - fn build_node_config(chan_type: ChanType) -> UserConfig { let mut config = UserConfig::default(); config.channel_config.forwarding_fee_proportional_millionths = 0; @@ -1634,9 +1664,9 @@ pub fn do_test(data: &[u8], out: Out) { nodes[2].broadcaster.txn_broadcasted.borrow_mut().clear(); // Sync all nodes to tip to lock the funding. - sync_with_chain_state(&mut nodes[0], &chain_state, None); - sync_with_chain_state(&mut nodes[1], &chain_state, None); - sync_with_chain_state(&mut nodes[2], &chain_state, None); + nodes[0].sync_with_chain_state(&chain_state, None); + nodes[1].sync_with_chain_state(&chain_state, None); + nodes[2].sync_with_chain_state(&chain_state, None); lock_fundings(&nodes); @@ -2321,22 +2351,22 @@ pub fn do_test(data: &[u8], out: Out) { 0x08 => { for id in &chan_ab_ids { - complete_all_monitor_updates(&nodes[0].monitor, id); + nodes[0].complete_all_monitor_updates(id); } }, 0x09 => { for id in &chan_ab_ids { - complete_all_monitor_updates(&nodes[1].monitor, id); + nodes[1].complete_all_monitor_updates(id); } }, 0x0a => { for id in &chan_bc_ids { - complete_all_monitor_updates(&nodes[1].monitor, id); + nodes[1].complete_all_monitor_updates(id); } }, 0x0b => { for id in &chan_bc_ids { - complete_all_monitor_updates(&nodes[2].monitor, id); + nodes[2].complete_all_monitor_updates(id); } }, @@ -2492,82 +2522,40 @@ pub fn do_test(data: &[u8], out: Out) { send_mpp_direct(0, 1, &[chan_a_id, chan_a_id, chan_a_id], 1_000_000, &mut p_ctr) }, - 0x80 => { - let mut max_feerate = nodes[0].last_htlc_clear_fee; - if matches!(chan_type, ChanType::Legacy) { - max_feerate *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32; - } - if nodes[0].fee_estimator.ret_val.fetch_add(250, atomic::Ordering::AcqRel) + 250 - > max_feerate - { - nodes[0].fee_estimator.ret_val.store(max_feerate, atomic::Ordering::Release); - } - nodes[0].timer_tick_occurred(); - }, - 0x81 => { - nodes[0].fee_estimator.ret_val.store(253, atomic::Ordering::Release); - nodes[0].timer_tick_occurred(); - }, - 0x84 => { - let mut max_feerate = nodes[1].last_htlc_clear_fee; - if matches!(chan_type, ChanType::Legacy) { - max_feerate *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32; - } - if nodes[1].fee_estimator.ret_val.fetch_add(250, atomic::Ordering::AcqRel) + 250 - > max_feerate - { - nodes[1].fee_estimator.ret_val.store(max_feerate, atomic::Ordering::Release); - } - nodes[1].timer_tick_occurred(); - }, - 0x85 => { - nodes[1].fee_estimator.ret_val.store(253, atomic::Ordering::Release); - nodes[1].timer_tick_occurred(); - }, - 0x88 => { - let mut max_feerate = nodes[2].last_htlc_clear_fee; - if matches!(chan_type, ChanType::Legacy) { - max_feerate *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32; - } - if nodes[2].fee_estimator.ret_val.fetch_add(250, atomic::Ordering::AcqRel) + 250 - > max_feerate - { - nodes[2].fee_estimator.ret_val.store(max_feerate, atomic::Ordering::Release); - } - nodes[2].timer_tick_occurred(); - }, - 0x89 => { - nodes[2].fee_estimator.ret_val.store(253, atomic::Ordering::Release); - nodes[2].timer_tick_occurred(); - }, + 0x80 => nodes[0].bump_fee_estimate(chan_type), + 0x81 => nodes[0].reset_fee_estimate(), + 0x84 => nodes[1].bump_fee_estimate(chan_type), + 0x85 => nodes[1].reset_fee_estimate(), + 0x88 => nodes[2].bump_fee_estimate(chan_type), + 0x89 => nodes[2].reset_fee_estimate(), 0xa0 => { if !cfg!(splicing) { test_return!(); } let cp_node_id = nodes[1].get_our_node_id(); - splice_in(&nodes[0], &cp_node_id, &chan_a_id); + nodes[0].splice_in(&cp_node_id, &chan_a_id); }, 0xa1 => { if !cfg!(splicing) { test_return!(); } let cp_node_id = nodes[0].get_our_node_id(); - splice_in(&nodes[1], &cp_node_id, &chan_a_id); + nodes[1].splice_in(&cp_node_id, &chan_a_id); }, 0xa2 => { if !cfg!(splicing) { test_return!(); } let cp_node_id = nodes[2].get_our_node_id(); - splice_in(&nodes[1], &cp_node_id, &chan_b_id); + nodes[1].splice_in(&cp_node_id, &chan_b_id); }, 0xa3 => { if !cfg!(splicing) { test_return!(); } let cp_node_id = nodes[1].get_our_node_id(); - splice_in(&nodes[2], &cp_node_id, &chan_b_id); + nodes[2].splice_in(&cp_node_id, &chan_b_id); }, 0xa4 => { @@ -2575,55 +2563,55 @@ pub fn do_test(data: &[u8], out: Out) { test_return!(); } let cp_node_id = nodes[1].get_our_node_id(); - splice_out(&nodes[0], &cp_node_id, &chan_a_id); + nodes[0].splice_out(&cp_node_id, &chan_a_id); }, 0xa5 => { if !cfg!(splicing) { test_return!(); } let cp_node_id = nodes[0].get_our_node_id(); - splice_out(&nodes[1], &cp_node_id, &chan_a_id); + nodes[1].splice_out(&cp_node_id, &chan_a_id); }, 0xa6 => { if !cfg!(splicing) { test_return!(); } let cp_node_id = nodes[2].get_our_node_id(); - splice_out(&nodes[1], &cp_node_id, &chan_b_id); + nodes[1].splice_out(&cp_node_id, &chan_b_id); }, 0xa7 => { if !cfg!(splicing) { test_return!(); } let cp_node_id = nodes[1].get_our_node_id(); - splice_out(&nodes[2], &cp_node_id, &chan_b_id); + nodes[2].splice_out(&cp_node_id, &chan_b_id); }, // Sync node by 1 block to cover confirmation of a transaction. 0xa8 => { chain_state.confirm_pending_txs(); - sync_with_chain_state(&mut nodes[0], &chain_state, Some(1)); + nodes[0].sync_with_chain_state(&chain_state, Some(1)); }, 0xa9 => { chain_state.confirm_pending_txs(); - sync_with_chain_state(&mut nodes[1], &chain_state, Some(1)); + nodes[1].sync_with_chain_state(&chain_state, Some(1)); }, 0xaa => { chain_state.confirm_pending_txs(); - sync_with_chain_state(&mut nodes[2], &chain_state, Some(1)); + nodes[2].sync_with_chain_state(&chain_state, Some(1)); }, // Sync node to chain tip to cover confirmation of a transaction post-reorg-risk. 0xab => { chain_state.confirm_pending_txs(); - sync_with_chain_state(&mut nodes[0], &chain_state, None); + nodes[0].sync_with_chain_state(&chain_state, None); }, 0xac => { chain_state.confirm_pending_txs(); - sync_with_chain_state(&mut nodes[1], &chain_state, None); + nodes[1].sync_with_chain_state(&chain_state, None); }, 0xad => { chain_state.confirm_pending_txs(); - sync_with_chain_state(&mut nodes[2], &chain_state, None); + nodes[2].sync_with_chain_state(&chain_state, None); }, 0xb0 | 0xb1 | 0xb2 => { @@ -2744,65 +2732,65 @@ pub fn do_test(data: &[u8], out: Out) { 0xf0 => { for id in &chan_ab_ids { - complete_monitor_update(&nodes[0].monitor, id, MonitorUpdateSelector::First); + nodes[0].complete_monitor_update(id, MonitorUpdateSelector::First); } }, 0xf1 => { for id in &chan_ab_ids { - complete_monitor_update(&nodes[0].monitor, id, MonitorUpdateSelector::Second); + nodes[0].complete_monitor_update(id, MonitorUpdateSelector::Second); } }, 0xf2 => { for id in &chan_ab_ids { - complete_monitor_update(&nodes[0].monitor, id, MonitorUpdateSelector::Last); + nodes[0].complete_monitor_update(id, MonitorUpdateSelector::Last); } }, 0xf4 => { for id in &chan_ab_ids { - complete_monitor_update(&nodes[1].monitor, id, MonitorUpdateSelector::First); + nodes[1].complete_monitor_update(id, MonitorUpdateSelector::First); } }, 0xf5 => { for id in &chan_ab_ids { - complete_monitor_update(&nodes[1].monitor, id, MonitorUpdateSelector::Second); + nodes[1].complete_monitor_update(id, MonitorUpdateSelector::Second); } }, 0xf6 => { for id in &chan_ab_ids { - complete_monitor_update(&nodes[1].monitor, id, MonitorUpdateSelector::Last); + nodes[1].complete_monitor_update(id, MonitorUpdateSelector::Last); } }, 0xf8 => { for id in &chan_bc_ids { - complete_monitor_update(&nodes[1].monitor, id, MonitorUpdateSelector::First); + nodes[1].complete_monitor_update(id, MonitorUpdateSelector::First); } }, 0xf9 => { for id in &chan_bc_ids { - complete_monitor_update(&nodes[1].monitor, id, MonitorUpdateSelector::Second); + nodes[1].complete_monitor_update(id, MonitorUpdateSelector::Second); } }, 0xfa => { for id in &chan_bc_ids { - complete_monitor_update(&nodes[1].monitor, id, MonitorUpdateSelector::Last); + nodes[1].complete_monitor_update(id, MonitorUpdateSelector::Last); } }, 0xfc => { for id in &chan_bc_ids { - complete_monitor_update(&nodes[2].monitor, id, MonitorUpdateSelector::First); + nodes[2].complete_monitor_update(id, MonitorUpdateSelector::First); } }, 0xfd => { for id in &chan_bc_ids { - complete_monitor_update(&nodes[2].monitor, id, MonitorUpdateSelector::Second); + nodes[2].complete_monitor_update(id, MonitorUpdateSelector::Second); } }, 0xfe => { for id in &chan_bc_ids { - complete_monitor_update(&nodes[2].monitor, id, MonitorUpdateSelector::Last); + nodes[2].complete_monitor_update(id, MonitorUpdateSelector::Last); } }, @@ -2860,12 +2848,12 @@ pub fn do_test(data: &[u8], out: Out) { } // Next, make sure no monitor updates are pending for id in &chan_ab_ids { - complete_all_monitor_updates(&nodes[0].monitor, id); - complete_all_monitor_updates(&nodes[1].monitor, id); + nodes[0].complete_all_monitor_updates(id); + nodes[1].complete_all_monitor_updates(id); } for id in &chan_bc_ids { - complete_all_monitor_updates(&nodes[1].monitor, id); - complete_all_monitor_updates(&nodes[2].monitor, id); + nodes[1].complete_all_monitor_updates(id); + nodes[2].complete_all_monitor_updates(id); } // Then, make sure any current forwards make their way to their destination if process_msg_events!(0, false, ProcessMessages::AllMessages) { @@ -2956,12 +2944,9 @@ pub fn do_test(data: &[u8], out: Out) { ); } - nodes[0].last_htlc_clear_fee = - nodes[0].fee_estimator.ret_val.load(atomic::Ordering::Acquire); - nodes[1].last_htlc_clear_fee = - nodes[1].fee_estimator.ret_val.load(atomic::Ordering::Acquire); - nodes[2].last_htlc_clear_fee = - nodes[2].fee_estimator.ret_val.load(atomic::Ordering::Acquire); + nodes[0].record_last_htlc_clear_fee(); + nodes[1].record_last_htlc_clear_fee(); + nodes[2].record_last_htlc_clear_fee(); }, _ => test_return!(), } From 60bff763e91252f8ce2a80ff9b777f2f3eaaf8a4 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 28 Apr 2026 11:40:02 +0200 Subject: [PATCH 08/15] Route chanmon messages through EventQueues Replace the four directional message vectors with one queue owner. The fuzz loop now uses that owner at send, receive, drain, and reload sites while preserving the existing routing behavior. --- fuzz/src/chanmon_consistency.rs | 506 ++++++++++++++++++++++++-------- 1 file changed, 385 insertions(+), 121 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 4872e23fca6..67f5bf9db2e 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -1325,6 +1325,19 @@ enum MonitorUpdateSelector { Last, } +struct EventQueues { + ab: Vec, + ba: Vec, + bc: Vec, + cb: Vec, +} + +impl EventQueues { + fn new() -> Self { + Self { ab: Vec::new(), ba: Vec::new(), bc: Vec::new(), cb: Vec::new() } + } +} + fn build_node_config(chan_type: ChanType) -> UserConfig { let mut config = UserConfig::default(); config.channel_config.forwarding_fee_proportional_millionths = 0; @@ -1680,18 +1693,12 @@ pub fn do_test(data: &[u8], out: Out) { let node_c_chans = nodes[2].list_usable_channels(); [node_c_chans[0].channel_id, node_c_chans[1].channel_id, node_c_chans[2].channel_id] }; - // Keep old names for backward compatibility in existing code let chan_a_id = chan_ab_ids[0]; let chan_b_id = chan_bc_ids[0]; - - let mut p_ctr: u64 = 0; - let mut peers_ab_disconnected = false; let mut peers_bc_disconnected = false; - let mut ab_events = Vec::new(); - let mut ba_events = Vec::new(); - let mut bc_events = Vec::new(); - let mut cb_events = Vec::new(); + let mut queues = EventQueues::new(); + let mut p_ctr: u64 = 0; for node in &mut nodes { node.serialized_manager = node.encode(); @@ -1724,95 +1731,175 @@ pub fn do_test(data: &[u8], out: Out) { } loop { - // Push any events from Node B onto ba_events and bc_events + // Push any events from Node B onto queues.ba and queues.bc macro_rules! push_excess_b_events { - ($excess_events: expr, $expect_drop_node: expr) => { { + ($excess_events: expr, $expect_drop_node: expr) => {{ let a_id = nodes[0].get_our_node_id(); let expect_drop_node: Option = $expect_drop_node; - let expect_drop_id = if let Some(id) = expect_drop_node { Some(nodes[id].get_our_node_id()) } else { None }; + let expect_drop_id = if let Some(id) = expect_drop_node { + Some(nodes[id].get_our_node_id()) + } else { + None + }; for event in $excess_events { let push_a = match event { MessageSendEvent::UpdateHTLCs { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + if Some(*node_id) == expect_drop_id { + panic!( + "peer_disconnected should drop msgs bound for the disconnected peer" + ); + } *node_id == a_id }, MessageSendEvent::SendRevokeAndACK { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + if Some(*node_id) == expect_drop_id { + panic!( + "peer_disconnected should drop msgs bound for the disconnected peer" + ); + } *node_id == a_id }, MessageSendEvent::SendChannelReestablish { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + if Some(*node_id) == expect_drop_id { + panic!( + "peer_disconnected should drop msgs bound for the disconnected peer" + ); + } *node_id == a_id }, MessageSendEvent::SendStfu { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + if Some(*node_id) == expect_drop_id { + panic!( + "peer_disconnected should drop msgs bound for the disconnected peer" + ); + } *node_id == a_id }, MessageSendEvent::SendSpliceInit { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + if Some(*node_id) == expect_drop_id { + panic!( + "peer_disconnected should drop msgs bound for the disconnected peer" + ); + } *node_id == a_id }, MessageSendEvent::SendSpliceAck { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + if Some(*node_id) == expect_drop_id { + panic!( + "peer_disconnected should drop msgs bound for the disconnected peer" + ); + } *node_id == a_id }, MessageSendEvent::SendSpliceLocked { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + if Some(*node_id) == expect_drop_id { + panic!( + "peer_disconnected should drop msgs bound for the disconnected peer" + ); + } *node_id == a_id }, MessageSendEvent::SendTxAddInput { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + if Some(*node_id) == expect_drop_id { + panic!( + "peer_disconnected should drop msgs bound for the disconnected peer" + ); + } *node_id == a_id }, MessageSendEvent::SendTxAddOutput { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + if Some(*node_id) == expect_drop_id { + panic!( + "peer_disconnected should drop msgs bound for the disconnected peer" + ); + } *node_id == a_id }, MessageSendEvent::SendTxRemoveInput { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + if Some(*node_id) == expect_drop_id { + panic!( + "peer_disconnected should drop msgs bound for the disconnected peer" + ); + } *node_id == a_id }, MessageSendEvent::SendTxRemoveOutput { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + if Some(*node_id) == expect_drop_id { + panic!( + "peer_disconnected should drop msgs bound for the disconnected peer" + ); + } *node_id == a_id }, MessageSendEvent::SendTxComplete { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + if Some(*node_id) == expect_drop_id { + panic!( + "peer_disconnected should drop msgs bound for the disconnected peer" + ); + } *node_id == a_id }, MessageSendEvent::SendTxAbort { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + if Some(*node_id) == expect_drop_id { + panic!( + "peer_disconnected should drop msgs bound for the disconnected peer" + ); + } *node_id == a_id }, MessageSendEvent::SendTxInitRbf { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + if Some(*node_id) == expect_drop_id { + panic!( + "peer_disconnected should drop msgs bound for the disconnected peer" + ); + } *node_id == a_id }, MessageSendEvent::SendTxAckRbf { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + if Some(*node_id) == expect_drop_id { + panic!( + "peer_disconnected should drop msgs bound for the disconnected peer" + ); + } *node_id == a_id }, MessageSendEvent::SendTxSignatures { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + if Some(*node_id) == expect_drop_id { + panic!( + "peer_disconnected should drop msgs bound for the disconnected peer" + ); + } *node_id == a_id }, MessageSendEvent::SendChannelReady { .. } => continue, MessageSendEvent::SendAnnouncementSignatures { .. } => continue, MessageSendEvent::BroadcastChannelUpdate { .. } => continue, MessageSendEvent::SendChannelUpdate { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + if Some(*node_id) == expect_drop_id { + panic!( + "peer_disconnected should drop msgs bound for the disconnected peer" + ); + } *node_id == a_id }, MessageSendEvent::HandleError { ref action, ref node_id } => { assert_action_timeout_awaiting_response(action); - if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + if Some(*node_id) == expect_drop_id { + panic!( + "peer_disconnected should drop msgs bound for the disconnected peer" + ); + } *node_id == a_id }, _ => panic!("Unhandled message event {:?}", event), }; - if push_a { ba_events.push(event); } else { bc_events.push(event); } + if push_a { + queues.ba.push(event); + } else { + queues.bc.push(event); + } } - } } + }}; } // While delivering messages, we select across three possible message selection processes @@ -1833,20 +1920,20 @@ pub fn do_test(data: &[u8], out: Out) { } macro_rules! process_msg_events { - ($node: expr, $corrupt_forward: expr, $limit_events: expr) => { { + ($node: expr, $corrupt_forward: expr, $limit_events: expr) => {{ let mut events = if $node == 1 { let mut new_events = Vec::new(); - mem::swap(&mut new_events, &mut ba_events); - new_events.extend_from_slice(&bc_events[..]); - bc_events.clear(); + mem::swap(&mut new_events, &mut queues.ba); + new_events.extend_from_slice(&queues.bc[..]); + queues.bc.clear(); new_events } else if $node == 0 { let mut new_events = Vec::new(); - mem::swap(&mut new_events, &mut ab_events); + mem::swap(&mut new_events, &mut queues.ab); new_events } else { let mut new_events = Vec::new(); - mem::swap(&mut new_events, &mut cb_events); + mem::swap(&mut new_events, &mut queues.cb); new_events }; let mut new_events = Vec::new(); @@ -1859,13 +1946,35 @@ pub fn do_test(data: &[u8], out: Out) { for event in &mut events_iter { had_events = true; match event { - MessageSendEvent::UpdateHTLCs { node_id, channel_id, updates: CommitmentUpdate { update_add_htlcs, update_fail_htlcs, update_fulfill_htlcs, update_fail_malformed_htlcs, update_fee, commitment_signed } } => { + MessageSendEvent::UpdateHTLCs { + node_id, + channel_id, + updates: + CommitmentUpdate { + update_add_htlcs, + update_fail_htlcs, + update_fulfill_htlcs, + update_fail_malformed_htlcs, + update_fee, + commitment_signed, + }, + } => { for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == node_id { for update_add in update_add_htlcs.iter() { - out.locked_write(format!("Delivering update_add_htlc from node {} to node {}.\n", $node, idx).as_bytes()); + out.locked_write( + format!( + "Delivering update_add_htlc from node {} to node {}.\n", + $node, + idx + ) + .as_bytes(), + ); if !$corrupt_forward { - dest.handle_update_add_htlc(nodes[$node].get_our_node_id(), update_add); + dest.handle_update_add_htlc( + nodes[$node].get_our_node_id(), + update_add, + ); } else { // Corrupt the update_add_htlc message so that its HMAC // check will fail and we generate a @@ -1873,42 +1982,105 @@ pub fn do_test(data: &[u8], out: Out) { // update_fail_htlc as we do when we reject a payment. let mut msg_ser = update_add.encode(); msg_ser[1000] ^= 0xff; - let new_msg = UpdateAddHTLC::read_from_fixed_length_buffer(&mut &msg_ser[..]).unwrap(); - dest.handle_update_add_htlc(nodes[$node].get_our_node_id(), &new_msg); + let new_msg = + UpdateAddHTLC::read_from_fixed_length_buffer( + &mut &msg_ser[..], + ) + .unwrap(); + dest.handle_update_add_htlc( + nodes[$node].get_our_node_id(), + &new_msg, + ); } } - let processed_change = !update_add_htlcs.is_empty() || !update_fulfill_htlcs.is_empty() || - !update_fail_htlcs.is_empty() || !update_fail_malformed_htlcs.is_empty(); + let processed_change = !update_add_htlcs.is_empty() + || !update_fulfill_htlcs.is_empty() + || !update_fail_htlcs.is_empty() + || !update_fail_malformed_htlcs.is_empty(); for update_fulfill in update_fulfill_htlcs { - out.locked_write(format!("Delivering update_fulfill_htlc from node {} to node {}.\n", $node, idx).as_bytes()); - dest.handle_update_fulfill_htlc(nodes[$node].get_our_node_id(), update_fulfill); + out.locked_write( + format!( + "Delivering update_fulfill_htlc from node {} to node {}.\n", + $node, + idx + ) + .as_bytes(), + ); + dest.handle_update_fulfill_htlc( + nodes[$node].get_our_node_id(), + update_fulfill, + ); } for update_fail in update_fail_htlcs.iter() { - out.locked_write(format!("Delivering update_fail_htlc from node {} to node {}.\n", $node, idx).as_bytes()); - dest.handle_update_fail_htlc(nodes[$node].get_our_node_id(), update_fail); + out.locked_write( + format!( + "Delivering update_fail_htlc from node {} to node {}.\n", + $node, + idx + ) + .as_bytes(), + ); + dest.handle_update_fail_htlc( + nodes[$node].get_our_node_id(), + update_fail, + ); } for update_fail_malformed in update_fail_malformed_htlcs.iter() { - out.locked_write(format!("Delivering update_fail_malformed_htlc from node {} to node {}.\n", $node, idx).as_bytes()); - dest.handle_update_fail_malformed_htlc(nodes[$node].get_our_node_id(), update_fail_malformed); + out.locked_write( + format!( + "Delivering update_fail_malformed_htlc from node {} to node {}.\n", + $node, + idx + ) + .as_bytes(), + ); + dest.handle_update_fail_malformed_htlc( + nodes[$node].get_our_node_id(), + update_fail_malformed, + ); } if let Some(msg) = update_fee { - out.locked_write(format!("Delivering update_fee from node {} to node {}.\n", $node, idx).as_bytes()); + out.locked_write( + format!( + "Delivering update_fee from node {} to node {}.\n", + $node, + idx + ) + .as_bytes(), + ); dest.handle_update_fee(nodes[$node].get_our_node_id(), &msg); } - if $limit_events != ProcessMessages::AllMessages && processed_change { - // If we only want to process some messages, don't deliver the CS until later. - extra_ev = Some(MessageSendEvent::UpdateHTLCs { node_id, channel_id, updates: CommitmentUpdate { - update_add_htlcs: Vec::new(), - update_fail_htlcs: Vec::new(), - update_fulfill_htlcs: Vec::new(), - update_fail_malformed_htlcs: Vec::new(), - update_fee: None, - commitment_signed - } }); + if $limit_events != ProcessMessages::AllMessages + && processed_change + { + // If we only want to process some messages, don't deliver the + // CS until later. + extra_ev = Some(MessageSendEvent::UpdateHTLCs { + node_id, + channel_id, + updates: CommitmentUpdate { + update_add_htlcs: Vec::new(), + update_fail_htlcs: Vec::new(), + update_fulfill_htlcs: Vec::new(), + update_fail_malformed_htlcs: Vec::new(), + update_fee: None, + commitment_signed, + }, + }); break; } - out.locked_write(format!("Delivering commitment_signed from node {} to node {}.\n", $node, idx).as_bytes()); - dest.handle_commitment_signed_batch_test(nodes[$node].get_our_node_id(), &commitment_signed); + out.locked_write( + format!( + "Delivering commitment_signed from node {} to node {}.\n", + $node, + idx + ) + .as_bytes(), + ); + dest.handle_commitment_signed_batch_test( + nodes[$node].get_our_node_id(), + &commitment_signed, + ); break; } } @@ -1916,7 +2088,14 @@ pub fn do_test(data: &[u8], out: Out) { MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == *node_id { - out.locked_write(format!("Delivering revoke_and_ack from node {} to node {}.\n", $node, idx).as_bytes()); + out.locked_write( + format!( + "Delivering revoke_and_ack from node {} to node {}.\n", + $node, + idx + ) + .as_bytes(), + ); dest.handle_revoke_and_ack(nodes[$node].get_our_node_id(), msg); } } @@ -1924,15 +2103,28 @@ pub fn do_test(data: &[u8], out: Out) { MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => { for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == *node_id { - out.locked_write(format!("Delivering channel_reestablish from node {} to node {}.\n", $node, idx).as_bytes()); - dest.handle_channel_reestablish(nodes[$node].get_our_node_id(), msg); + out.locked_write( + format!( + "Delivering channel_reestablish from node {} to node {}.\n", + $node, + idx + ) + .as_bytes(), + ); + dest.handle_channel_reestablish( + nodes[$node].get_our_node_id(), + msg, + ); } } }, MessageSendEvent::SendStfu { ref node_id, ref msg } => { for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == *node_id { - out.locked_write(format!("Delivering stfu from node {} to node {}.\n", $node, idx).as_bytes()); + out.locked_write( + format!("Delivering stfu from node {} to node {}.\n", $node, idx) + .as_bytes(), + ); dest.handle_stfu(nodes[$node].get_our_node_id(), msg); } } @@ -1940,7 +2132,14 @@ pub fn do_test(data: &[u8], out: Out) { MessageSendEvent::SendTxAddInput { ref node_id, ref msg } => { for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == *node_id { - out.locked_write(format!("Delivering tx_add_input from node {} to node {}.\n", $node, idx).as_bytes()); + out.locked_write( + format!( + "Delivering tx_add_input from node {} to node {}.\n", + $node, + idx + ) + .as_bytes(), + ); dest.handle_tx_add_input(nodes[$node].get_our_node_id(), msg); } } @@ -1948,7 +2147,14 @@ pub fn do_test(data: &[u8], out: Out) { MessageSendEvent::SendTxAddOutput { ref node_id, ref msg } => { for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == *node_id { - out.locked_write(format!("Delivering tx_add_output from node {} to node {}.\n", $node, idx).as_bytes()); + out.locked_write( + format!( + "Delivering tx_add_output from node {} to node {}.\n", + $node, + idx + ) + .as_bytes(), + ); dest.handle_tx_add_output(nodes[$node].get_our_node_id(), msg); } } @@ -1956,7 +2162,14 @@ pub fn do_test(data: &[u8], out: Out) { MessageSendEvent::SendTxRemoveInput { ref node_id, ref msg } => { for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == *node_id { - out.locked_write(format!("Delivering tx_remove_input from node {} to node {}.\n", $node, idx).as_bytes()); + out.locked_write( + format!( + "Delivering tx_remove_input from node {} to node {}.\n", + $node, + idx + ) + .as_bytes(), + ); dest.handle_tx_remove_input(nodes[$node].get_our_node_id(), msg); } } @@ -1964,7 +2177,14 @@ pub fn do_test(data: &[u8], out: Out) { MessageSendEvent::SendTxRemoveOutput { ref node_id, ref msg } => { for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == *node_id { - out.locked_write(format!("Delivering tx_remove_output from node {} to node {}.\n", $node, idx).as_bytes()); + out.locked_write( + format!( + "Delivering tx_remove_output from node {} to node {}.\n", + $node, + idx + ) + .as_bytes(), + ); dest.handle_tx_remove_output(nodes[$node].get_our_node_id(), msg); } } @@ -1972,7 +2192,14 @@ pub fn do_test(data: &[u8], out: Out) { MessageSendEvent::SendTxComplete { ref node_id, ref msg } => { for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == *node_id { - out.locked_write(format!("Delivering tx_complete from node {} to node {}.\n", $node, idx).as_bytes()); + out.locked_write( + format!( + "Delivering tx_complete from node {} to node {}.\n", + $node, + idx + ) + .as_bytes(), + ); dest.handle_tx_complete(nodes[$node].get_our_node_id(), msg); } } @@ -1980,7 +2207,14 @@ pub fn do_test(data: &[u8], out: Out) { MessageSendEvent::SendTxAbort { ref node_id, ref msg } => { for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == *node_id { - out.locked_write(format!("Delivering tx_abort from node {} to node {}.\n", $node, idx).as_bytes()); + out.locked_write( + format!( + "Delivering tx_abort from node {} to node {}.\n", + $node, + idx + ) + .as_bytes(), + ); dest.handle_tx_abort(nodes[$node].get_our_node_id(), msg); } } @@ -1988,7 +2222,14 @@ pub fn do_test(data: &[u8], out: Out) { MessageSendEvent::SendTxInitRbf { ref node_id, ref msg } => { for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == *node_id { - out.locked_write(format!("Delivering tx_init_rbf from node {} to node {}.\n", $node, idx).as_bytes()); + out.locked_write( + format!( + "Delivering tx_init_rbf from node {} to node {}.\n", + $node, + idx + ) + .as_bytes(), + ); dest.handle_tx_init_rbf(nodes[$node].get_our_node_id(), msg); } } @@ -1996,7 +2237,14 @@ pub fn do_test(data: &[u8], out: Out) { MessageSendEvent::SendTxAckRbf { ref node_id, ref msg } => { for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == *node_id { - out.locked_write(format!("Delivering tx_ack_rbf from node {} to node {}.\n", $node, idx).as_bytes()); + out.locked_write( + format!( + "Delivering tx_ack_rbf from node {} to node {}.\n", + $node, + idx + ) + .as_bytes(), + ); dest.handle_tx_ack_rbf(nodes[$node].get_our_node_id(), msg); } } @@ -2004,7 +2252,14 @@ pub fn do_test(data: &[u8], out: Out) { MessageSendEvent::SendTxSignatures { ref node_id, ref msg } => { for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == *node_id { - out.locked_write(format!("Delivering tx_signatures from node {} to node {}.\n", $node, idx).as_bytes()); + out.locked_write( + format!( + "Delivering tx_signatures from node {} to node {}.\n", + $node, + idx + ) + .as_bytes(), + ); dest.handle_tx_signatures(nodes[$node].get_our_node_id(), msg); } } @@ -2012,7 +2267,14 @@ pub fn do_test(data: &[u8], out: Out) { MessageSendEvent::SendSpliceInit { ref node_id, ref msg } => { for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == *node_id { - out.locked_write(format!("Delivering splice_init from node {} to node {}.\n", $node, idx).as_bytes()); + out.locked_write( + format!( + "Delivering splice_init from node {} to node {}.\n", + $node, + idx + ) + .as_bytes(), + ); dest.handle_splice_init(nodes[$node].get_our_node_id(), msg); } } @@ -2020,7 +2282,14 @@ pub fn do_test(data: &[u8], out: Out) { MessageSendEvent::SendSpliceAck { ref node_id, ref msg } => { for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == *node_id { - out.locked_write(format!("Delivering splice_ack from node {} to node {}.\n", $node, idx).as_bytes()); + out.locked_write( + format!( + "Delivering splice_ack from node {} to node {}.\n", + $node, + idx + ) + .as_bytes(), + ); dest.handle_splice_ack(nodes[$node].get_our_node_id(), msg); } } @@ -2028,7 +2297,14 @@ pub fn do_test(data: &[u8], out: Out) { MessageSendEvent::SendSpliceLocked { ref node_id, ref msg } => { for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == *node_id { - out.locked_write(format!("Delivering splice_locked from node {} to node {}.\n", $node, idx).as_bytes()); + out.locked_write( + format!( + "Delivering splice_locked from node {} to node {}.\n", + $node, + idx + ) + .as_bytes(), + ); dest.handle_splice_locked(nodes[$node].get_our_node_id(), msg); } } @@ -2058,14 +2334,22 @@ pub fn do_test(data: &[u8], out: Out) { if $node == 1 { push_excess_b_events!(extra_ev.into_iter().chain(events_iter), None); } else if $node == 0 { - if let Some(ev) = extra_ev { ab_events.push(ev); } - for event in events_iter { ab_events.push(event); } + if let Some(ev) = extra_ev { + queues.ab.push(ev); + } + for event in events_iter { + queues.ab.push(event); + } } else { - if let Some(ev) = extra_ev { cb_events.push(ev); } - for event in events_iter { cb_events.push(event); } + if let Some(ev) = extra_ev { + queues.cb.push(ev); + } + for event in events_iter { + queues.cb.push(event); + } } had_events - } } + }}; } macro_rules! process_msg_noret { @@ -2097,8 +2381,8 @@ pub fn do_test(data: &[u8], out: Out) { nodes[1].get_and_clear_pending_msg_events().drain(..), Some(0) ); - ab_events.clear(); - ba_events.clear(); + queues.ab.clear(); + queues.ba.clear(); } else { for event in nodes[2].get_and_clear_pending_msg_events() { match event { @@ -2120,8 +2404,8 @@ pub fn do_test(data: &[u8], out: Out) { nodes[1].get_and_clear_pending_msg_events().drain(..), Some(2) ); - bc_events.clear(); - cb_events.clear(); + queues.bc.clear(); + queues.cb.clear(); } }}; } @@ -2289,7 +2573,6 @@ pub fn do_test(data: &[u8], out: Out) { } }; - // Direct MPP payment (no hop) let send_mpp_direct = |source_idx: usize, dest_idx: usize, dest_chan_ids: &[ChannelId], @@ -2306,7 +2589,6 @@ pub fn do_test(data: &[u8], out: Out) { } }; - // MPP payment via hop - splits payment across multiple channels on either or both hops let send_mpp_hop = |source_idx: usize, middle_idx: usize, middle_chan_ids: &[ChannelId], @@ -2615,8 +2897,6 @@ pub fn do_test(data: &[u8], out: Out) { }, 0xb0 | 0xb1 | 0xb2 => { - // Restart node A, picking among the in-flight `ChannelMonitor`s to use based on - // the value of `v` we're matching. if !peers_ab_disconnected { nodes[1].peer_disconnected(nodes[0].get_our_node_id()); peers_ab_disconnected = true; @@ -2624,33 +2904,29 @@ pub fn do_test(data: &[u8], out: Out) { nodes[1].get_and_clear_pending_msg_events().drain(..), Some(0) ); - ab_events.clear(); - ba_events.clear(); + queues.ab.clear(); + queues.ba.clear(); } nodes[0].reload(v, &out, &router, chan_type); }, 0xb3..=0xbb => { - // Restart node B, picking among the in-flight `ChannelMonitor`s to use based on - // the value of `v` we're matching. if !peers_ab_disconnected { nodes[0].peer_disconnected(nodes[1].get_our_node_id()); peers_ab_disconnected = true; nodes[0].get_and_clear_pending_msg_events(); - ab_events.clear(); - ba_events.clear(); + queues.ab.clear(); + queues.ba.clear(); } if !peers_bc_disconnected { nodes[2].peer_disconnected(nodes[1].get_our_node_id()); peers_bc_disconnected = true; nodes[2].get_and_clear_pending_msg_events(); - bc_events.clear(); - cb_events.clear(); + queues.bc.clear(); + queues.cb.clear(); } nodes[1].reload(v, &out, &router, chan_type); }, 0xbc | 0xbd | 0xbe => { - // Restart node C, picking among the in-flight `ChannelMonitor`s to use based on - // the value of `v` we're matching. if !peers_bc_disconnected { nodes[1].peer_disconnected(nodes[2].get_our_node_id()); peers_bc_disconnected = true; @@ -2658,8 +2934,8 @@ pub fn do_test(data: &[u8], out: Out) { nodes[1].get_and_clear_pending_msg_events().drain(..), Some(2) ); - bc_events.clear(); - cb_events.clear(); + queues.bc.clear(); + queues.cb.clear(); } nodes[2].reload(v, &out, &router, chan_type); }, @@ -2798,7 +3074,6 @@ pub fn do_test(data: &[u8], out: Out) { // Test that no channel is in a stuck state where neither party can send funds even // after we resolve all pending events. - // First, make sure peers are all connected to each other if peers_ab_disconnected { let init_1 = Init { features: nodes[1].init_features(), @@ -2840,13 +3115,14 @@ pub fn do_test(data: &[u8], out: Out) { nodes[2].signer_unblocked(None); macro_rules! process_all_events { - () => { { + () => {{ let mut last_pass_no_updates = false; for i in 0..std::usize::MAX { if i == 100 { - panic!("It may take may iterations to settle the state, but it should not take forever"); + panic!( + "It may take may iterations to settle the state, but it should not take forever" + ); } - // Next, make sure no monitor updates are pending for id in &chan_ab_ids { nodes[0].complete_all_monitor_updates(id); nodes[1].complete_all_monitor_updates(id); @@ -2855,7 +3131,6 @@ pub fn do_test(data: &[u8], out: Out) { nodes[1].complete_all_monitor_updates(id); nodes[2].complete_all_monitor_updates(id); } - // Then, make sure any current forwards make their way to their destination if process_msg_events!(0, false, ProcessMessages::AllMessages) { last_pass_no_updates = false; continue; @@ -2868,7 +3143,6 @@ pub fn do_test(data: &[u8], out: Out) { last_pass_no_updates = false; continue; } - // ...making sure any payments are claimed. if process_events!(0, false) { last_pass_no_updates = false; continue; @@ -2882,18 +3156,11 @@ pub fn do_test(data: &[u8], out: Out) { continue; } if last_pass_no_updates { - // In some cases, we may generate a message to send in - // `process_msg_events`, but block sending until - // `complete_all_monitor_updates` gets called on the next - // iteration. - // - // Thus, we only exit if we manage two iterations with no messages - // or events to process. break; } last_pass_no_updates = true; } - } }; + }}; } process_all_events!(); @@ -2906,7 +3173,6 @@ pub fn do_test(data: &[u8], out: Out) { } process_all_events!(); - // Verify no payments are stuck - all should have resolved for (idx, pending) in pending_payments.borrow().iter().enumerate() { assert!( pending.is_empty(), @@ -2916,8 +3182,6 @@ pub fn do_test(data: &[u8], out: Out) { ); } - // Verify that every payment claimed by a receiver resulted in a - // PaymentSent event at the sender. let resolved = resolved_payments.borrow(); for hash in claimed_payment_hashes.borrow().iter() { let found = resolved.iter().any(|node_resolved| { From 4f0471c17c197012c1f3f1bff2180d7b4e989591 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 28 Apr 2026 11:40:36 +0200 Subject: [PATCH 09/15] Move chanmon queue routing into EventQueues Move per-node queue draining, middle-node routing, and disconnect cleanup into EventQueues. The fuzz loop now asks the queue owner to route remaining messages instead of mutating each directional vector directly. --- fuzz/src/chanmon_consistency.rs | 415 ++++++++++++-------------------- 1 file changed, 160 insertions(+), 255 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 67f5bf9db2e..8a5756321be 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -1336,6 +1336,145 @@ impl EventQueues { fn new() -> Self { Self { ab: Vec::new(), ba: Vec::new(), bc: Vec::new(), cb: Vec::new() } } + + fn take_for_node(&mut self, node_idx: usize) -> Vec { + match node_idx { + 0 => { + let mut events = Vec::new(); + mem::swap(&mut events, &mut self.ab); + events + }, + 1 => { + let mut events = Vec::new(); + mem::swap(&mut events, &mut self.ba); + events.extend_from_slice(&self.bc[..]); + self.bc.clear(); + events + }, + 2 => { + let mut events = Vec::new(); + mem::swap(&mut events, &mut self.cb); + events + }, + _ => panic!("invalid node index"), + } + } + + fn push_for_node(&mut self, node_idx: usize, event: MessageSendEvent) { + match node_idx { + 0 => self.ab.push(event), + 2 => self.cb.push(event), + _ => panic!("cannot directly queue messages for node {}", node_idx), + } + } + + fn extend_for_node>( + &mut self, node_idx: usize, events: I, + ) { + match node_idx { + 0 => self.ab.extend(events), + 2 => self.cb.extend(events), + _ => panic!("cannot directly queue messages for node {}", node_idx), + } + } + + fn route_from_middle<'a, I: IntoIterator>( + &mut self, excess_events: I, expect_drop_node: Option, nodes: &[HarnessNode<'a>; 3], + ) { + // Push any events from Node B onto queues.ba and queues.bc. + let a_id = nodes[0].our_node_id(); + let expect_drop_id = expect_drop_node.map(|id| nodes[id].our_node_id()); + for event in excess_events { + let push_a = match event { + MessageSendEvent::UpdateHTLCs { ref node_id, .. } + | MessageSendEvent::SendRevokeAndACK { ref node_id, .. } + | MessageSendEvent::SendChannelReestablish { ref node_id, .. } + | MessageSendEvent::SendStfu { ref node_id, .. } + | MessageSendEvent::SendSpliceInit { ref node_id, .. } + | MessageSendEvent::SendSpliceAck { ref node_id, .. } + | MessageSendEvent::SendSpliceLocked { ref node_id, .. } + | MessageSendEvent::SendTxAddInput { ref node_id, .. } + | MessageSendEvent::SendTxAddOutput { ref node_id, .. } + | MessageSendEvent::SendTxRemoveInput { ref node_id, .. } + | MessageSendEvent::SendTxRemoveOutput { ref node_id, .. } + | MessageSendEvent::SendTxComplete { ref node_id, .. } + | MessageSendEvent::SendTxAbort { ref node_id, .. } + | MessageSendEvent::SendTxInitRbf { ref node_id, .. } + | MessageSendEvent::SendTxAckRbf { ref node_id, .. } + | MessageSendEvent::SendTxSignatures { ref node_id, .. } + | MessageSendEvent::SendChannelUpdate { ref node_id, .. } => { + if Some(*node_id) == expect_drop_id { + panic!( + "peer_disconnected should drop msgs bound for the disconnected peer" + ); + } + *node_id == a_id + }, + MessageSendEvent::HandleError { ref action, ref node_id } => { + assert_action_timeout_awaiting_response(action); + if Some(*node_id) == expect_drop_id { + panic!( + "peer_disconnected should drop msgs bound for the disconnected peer" + ); + } + *node_id == a_id + }, + MessageSendEvent::SendChannelReady { .. } + | MessageSendEvent::SendAnnouncementSignatures { .. } + | MessageSendEvent::BroadcastChannelUpdate { .. } => continue, + _ => panic!("Unhandled message event {:?}", event), + }; + if push_a { + self.ba.push(event); + } else { + self.bc.push(event); + } + } + } + + fn drain_on_disconnect(&mut self, edge_node: usize, nodes: &[HarnessNode<'_>; 3]) { + match edge_node { + 0 => { + for event in nodes[0].get_and_clear_pending_msg_events() { + match event { + MessageSendEvent::UpdateHTLCs { .. } => {}, + MessageSendEvent::SendRevokeAndACK { .. } => {}, + MessageSendEvent::SendChannelReestablish { .. } => {}, + MessageSendEvent::SendStfu { .. } => {}, + MessageSendEvent::SendChannelReady { .. } => {}, + MessageSendEvent::SendAnnouncementSignatures { .. } => {}, + MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + MessageSendEvent::SendChannelUpdate { .. } => {}, + MessageSendEvent::HandleError { ref action, .. } => { + assert_action_timeout_awaiting_response(action); + }, + _ => panic!("Unhandled message event"), + } + } + self.route_from_middle(nodes[1].get_and_clear_pending_msg_events(), Some(0), nodes); + }, + 2 => { + for event in nodes[2].get_and_clear_pending_msg_events() { + match event { + MessageSendEvent::UpdateHTLCs { .. } => {}, + MessageSendEvent::SendRevokeAndACK { .. } => {}, + MessageSendEvent::SendChannelReestablish { .. } => {}, + MessageSendEvent::SendStfu { .. } => {}, + MessageSendEvent::SendChannelReady { .. } => {}, + MessageSendEvent::SendAnnouncementSignatures { .. } => {}, + MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + MessageSendEvent::SendChannelUpdate { .. } => {}, + MessageSendEvent::HandleError { ref action, .. } => { + assert_action_timeout_awaiting_response(action); + }, + _ => panic!("Unhandled message event"), + } + } + self.route_from_middle(nodes[1].get_and_clear_pending_msg_events(), Some(2), nodes); + }, + _ => panic!("unsupported disconnected edge"), + } + } } fn build_node_config(chan_type: ChanType) -> UserConfig { @@ -1731,177 +1870,6 @@ pub fn do_test(data: &[u8], out: Out) { } loop { - // Push any events from Node B onto queues.ba and queues.bc - macro_rules! push_excess_b_events { - ($excess_events: expr, $expect_drop_node: expr) => {{ - let a_id = nodes[0].get_our_node_id(); - let expect_drop_node: Option = $expect_drop_node; - let expect_drop_id = if let Some(id) = expect_drop_node { - Some(nodes[id].get_our_node_id()) - } else { - None - }; - for event in $excess_events { - let push_a = match event { - MessageSendEvent::UpdateHTLCs { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { - panic!( - "peer_disconnected should drop msgs bound for the disconnected peer" - ); - } - *node_id == a_id - }, - MessageSendEvent::SendRevokeAndACK { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { - panic!( - "peer_disconnected should drop msgs bound for the disconnected peer" - ); - } - *node_id == a_id - }, - MessageSendEvent::SendChannelReestablish { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { - panic!( - "peer_disconnected should drop msgs bound for the disconnected peer" - ); - } - *node_id == a_id - }, - MessageSendEvent::SendStfu { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { - panic!( - "peer_disconnected should drop msgs bound for the disconnected peer" - ); - } - *node_id == a_id - }, - MessageSendEvent::SendSpliceInit { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { - panic!( - "peer_disconnected should drop msgs bound for the disconnected peer" - ); - } - *node_id == a_id - }, - MessageSendEvent::SendSpliceAck { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { - panic!( - "peer_disconnected should drop msgs bound for the disconnected peer" - ); - } - *node_id == a_id - }, - MessageSendEvent::SendSpliceLocked { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { - panic!( - "peer_disconnected should drop msgs bound for the disconnected peer" - ); - } - *node_id == a_id - }, - MessageSendEvent::SendTxAddInput { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { - panic!( - "peer_disconnected should drop msgs bound for the disconnected peer" - ); - } - *node_id == a_id - }, - MessageSendEvent::SendTxAddOutput { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { - panic!( - "peer_disconnected should drop msgs bound for the disconnected peer" - ); - } - *node_id == a_id - }, - MessageSendEvent::SendTxRemoveInput { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { - panic!( - "peer_disconnected should drop msgs bound for the disconnected peer" - ); - } - *node_id == a_id - }, - MessageSendEvent::SendTxRemoveOutput { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { - panic!( - "peer_disconnected should drop msgs bound for the disconnected peer" - ); - } - *node_id == a_id - }, - MessageSendEvent::SendTxComplete { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { - panic!( - "peer_disconnected should drop msgs bound for the disconnected peer" - ); - } - *node_id == a_id - }, - MessageSendEvent::SendTxAbort { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { - panic!( - "peer_disconnected should drop msgs bound for the disconnected peer" - ); - } - *node_id == a_id - }, - MessageSendEvent::SendTxInitRbf { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { - panic!( - "peer_disconnected should drop msgs bound for the disconnected peer" - ); - } - *node_id == a_id - }, - MessageSendEvent::SendTxAckRbf { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { - panic!( - "peer_disconnected should drop msgs bound for the disconnected peer" - ); - } - *node_id == a_id - }, - MessageSendEvent::SendTxSignatures { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { - panic!( - "peer_disconnected should drop msgs bound for the disconnected peer" - ); - } - *node_id == a_id - }, - MessageSendEvent::SendChannelReady { .. } => continue, - MessageSendEvent::SendAnnouncementSignatures { .. } => continue, - MessageSendEvent::BroadcastChannelUpdate { .. } => continue, - MessageSendEvent::SendChannelUpdate { ref node_id, .. } => { - if Some(*node_id) == expect_drop_id { - panic!( - "peer_disconnected should drop msgs bound for the disconnected peer" - ); - } - *node_id == a_id - }, - MessageSendEvent::HandleError { ref action, ref node_id } => { - assert_action_timeout_awaiting_response(action); - if Some(*node_id) == expect_drop_id { - panic!( - "peer_disconnected should drop msgs bound for the disconnected peer" - ); - } - *node_id == a_id - }, - _ => panic!("Unhandled message event {:?}", event), - }; - if push_a { - queues.ba.push(event); - } else { - queues.bc.push(event); - } - } - }}; - } - // While delivering messages, we select across three possible message selection processes // to ensure we get as much coverage as possible. See the individual enum variants for more // details. @@ -1921,21 +1889,7 @@ pub fn do_test(data: &[u8], out: Out) { macro_rules! process_msg_events { ($node: expr, $corrupt_forward: expr, $limit_events: expr) => {{ - let mut events = if $node == 1 { - let mut new_events = Vec::new(); - mem::swap(&mut new_events, &mut queues.ba); - new_events.extend_from_slice(&queues.bc[..]); - queues.bc.clear(); - new_events - } else if $node == 0 { - let mut new_events = Vec::new(); - mem::swap(&mut new_events, &mut queues.ab); - new_events - } else { - let mut new_events = Vec::new(); - mem::swap(&mut new_events, &mut queues.cb); - new_events - }; + let mut events = queues.take_for_node($node); let mut new_events = Vec::new(); if $limit_events != ProcessMessages::OnePendingMessage { new_events = nodes[$node].get_and_clear_pending_msg_events(); @@ -2332,21 +2286,18 @@ pub fn do_test(data: &[u8], out: Out) { } } if $node == 1 { - push_excess_b_events!(extra_ev.into_iter().chain(events_iter), None); + let remaining = extra_ev.into_iter().chain(events_iter).collect::>(); + queues.route_from_middle(remaining, None, &nodes); } else if $node == 0 { if let Some(ev) = extra_ev { - queues.ab.push(ev); - } - for event in events_iter { - queues.ab.push(event); + queues.push_for_node(0, ev); } + queues.extend_for_node(0, events_iter); } else { if let Some(ev) = extra_ev { - queues.cb.push(ev); - } - for event in events_iter { - queues.cb.push(event); + queues.push_for_node(2, ev); } + queues.extend_for_node(2, events_iter); } had_events }}; @@ -2358,58 +2309,6 @@ pub fn do_test(data: &[u8], out: Out) { }}; } - macro_rules! drain_msg_events_on_disconnect { - ($counterparty_id: expr) => {{ - if $counterparty_id == 0 { - for event in nodes[0].get_and_clear_pending_msg_events() { - match event { - MessageSendEvent::UpdateHTLCs { .. } => {}, - MessageSendEvent::SendRevokeAndACK { .. } => {}, - MessageSendEvent::SendChannelReestablish { .. } => {}, - MessageSendEvent::SendStfu { .. } => {}, - MessageSendEvent::SendChannelReady { .. } => {}, - MessageSendEvent::SendAnnouncementSignatures { .. } => {}, - MessageSendEvent::BroadcastChannelUpdate { .. } => {}, - MessageSendEvent::SendChannelUpdate { .. } => {}, - MessageSendEvent::HandleError { ref action, .. } => { - assert_action_timeout_awaiting_response(action); - }, - _ => panic!("Unhandled message event"), - } - } - push_excess_b_events!( - nodes[1].get_and_clear_pending_msg_events().drain(..), - Some(0) - ); - queues.ab.clear(); - queues.ba.clear(); - } else { - for event in nodes[2].get_and_clear_pending_msg_events() { - match event { - MessageSendEvent::UpdateHTLCs { .. } => {}, - MessageSendEvent::SendRevokeAndACK { .. } => {}, - MessageSendEvent::SendChannelReestablish { .. } => {}, - MessageSendEvent::SendStfu { .. } => {}, - MessageSendEvent::SendChannelReady { .. } => {}, - MessageSendEvent::SendAnnouncementSignatures { .. } => {}, - MessageSendEvent::BroadcastChannelUpdate { .. } => {}, - MessageSendEvent::SendChannelUpdate { .. } => {}, - MessageSendEvent::HandleError { ref action, .. } => { - assert_action_timeout_awaiting_response(action); - }, - _ => panic!("Unhandled message event"), - } - } - push_excess_b_events!( - nodes[1].get_and_clear_pending_msg_events().drain(..), - Some(2) - ); - queues.bc.clear(); - queues.cb.clear(); - } - }}; - } - macro_rules! process_events { ($node: expr, $fail: expr) => {{ // Multiple HTLCs can resolve for the same payment hash, so deduplicate @@ -2657,7 +2556,9 @@ pub fn do_test(data: &[u8], out: Out) { nodes[0].peer_disconnected(nodes[1].get_our_node_id()); nodes[1].peer_disconnected(nodes[0].get_our_node_id()); peers_ab_disconnected = true; - drain_msg_events_on_disconnect!(0); + queues.drain_on_disconnect(0, &nodes); + queues.ab.clear(); + queues.ba.clear(); } }, 0x0d => { @@ -2665,7 +2566,9 @@ pub fn do_test(data: &[u8], out: Out) { nodes[1].peer_disconnected(nodes[2].get_our_node_id()); nodes[2].peer_disconnected(nodes[1].get_our_node_id()); peers_bc_disconnected = true; - drain_msg_events_on_disconnect!(2); + queues.drain_on_disconnect(2, &nodes); + queues.bc.clear(); + queues.cb.clear(); } }, 0x0e => { @@ -2900,9 +2803,10 @@ pub fn do_test(data: &[u8], out: Out) { if !peers_ab_disconnected { nodes[1].peer_disconnected(nodes[0].get_our_node_id()); peers_ab_disconnected = true; - push_excess_b_events!( - nodes[1].get_and_clear_pending_msg_events().drain(..), - Some(0) + queues.route_from_middle( + nodes[1].get_and_clear_pending_msg_events(), + Some(0), + &nodes, ); queues.ab.clear(); queues.ba.clear(); @@ -2930,9 +2834,10 @@ pub fn do_test(data: &[u8], out: Out) { if !peers_bc_disconnected { nodes[1].peer_disconnected(nodes[2].get_our_node_id()); peers_bc_disconnected = true; - push_excess_b_events!( - nodes[1].get_and_clear_pending_msg_events().drain(..), - Some(2) + queues.route_from_middle( + nodes[1].get_and_clear_pending_msg_events(), + Some(2), + &nodes, ); queues.bc.clear(); queues.cb.clear(); From f1631a3723802dfdfaa289bebdf67de3e518246a Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 21 Apr 2026 17:09:02 +0200 Subject: [PATCH 10/15] Extract chanmon harness message processing Pull message-event delivery into standalone helpers. This keeps the fuzz dispatch loop smaller while preserving the same corruption and one-message processing modes. --- fuzz/src/chanmon_consistency.rs | 711 +++++++++++++------------------- 1 file changed, 283 insertions(+), 428 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 8a5756321be..47b027c597f 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -942,6 +942,21 @@ enum ChanType { ZeroFeeCommitments, } +// While delivering messages, select across three possible message selection +// processes to maximize coverage. See the individual enum variants for details. +#[derive(Copy, Clone, PartialEq, Eq)] +enum ProcessMessages { + /// Deliver all available messages, including fetching any new messages from + /// `get_and_clear_pending_msg_events()` which may have side effects. + AllMessages, + /// Call `get_and_clear_pending_msg_events()` first, then deliver up to one + /// message, which may already be queued. + OneMessage, + /// Deliver up to one already-queued message. This avoids the side effects of + /// `get_and_clear_pending_msg_events()`, such as freeing the HTLC holding cell. + OnePendingMessage, +} + struct HarnessNode<'a> { node_id: u8, node: ChanMan<'a>, @@ -1695,6 +1710,266 @@ fn lock_fundings(nodes: &[HarnessNode<'_>; 3]) { } } +fn process_msg_events_impl( + node_idx: usize, corrupt_forward: bool, limit_events: ProcessMessages, + nodes: &[HarnessNode<'_>; 3], out: &Out, queues: &mut EventQueues, +) -> bool { + fn find_destination_node(nodes: &[HarnessNode<'_>; 3], node_id: &PublicKey) -> usize { + nodes + .iter() + .position(|node| node.our_node_id() == *node_id) + .expect("message destination should be a known harness node") + } + + fn log_msg_delivery( + node_idx: usize, dest_idx: usize, msg_name: &str, out: &Out, + ) { + out.locked_write( + format!("Delivering {} from node {} to node {}.\n", msg_name, node_idx, dest_idx) + .as_bytes(), + ); + } + + fn log_peer_message( + node_idx: usize, node_id: &PublicKey, nodes: &[HarnessNode<'_>; 3], out: &Out, + msg_name: &str, + ) -> usize { + let dest_idx = find_destination_node(nodes, node_id); + log_msg_delivery(node_idx, dest_idx, msg_name, out); + dest_idx + } + + fn handle_update_add_htlc( + source_node_id: PublicKey, dest: &HarnessNode<'_>, update_add: &UpdateAddHTLC, + corrupt_forward: bool, + ) { + if !corrupt_forward { + dest.handle_update_add_htlc(source_node_id, update_add); + } else { + // Corrupt the update_add_htlc message so that its HMAC check will fail and we + // generate an update_fail_malformed_htlc instead of an update_fail_htlc as we do + // when we reject a payment. + let mut msg_ser = update_add.encode(); + msg_ser[1000] ^= 0xff; + let new_msg = UpdateAddHTLC::read_from_fixed_length_buffer(&mut &msg_ser[..]).unwrap(); + dest.handle_update_add_htlc(source_node_id, &new_msg); + } + } + + fn handle_update_htlcs_event( + node_idx: usize, source_node_id: PublicKey, node_id: PublicKey, channel_id: ChannelId, + updates: CommitmentUpdate, corrupt_forward: bool, limit_events: ProcessMessages, + nodes: &[HarnessNode<'_>; 3], out: &Out, + ) -> Option { + let dest_idx = find_destination_node(nodes, &node_id); + let dest = &nodes[dest_idx]; + let CommitmentUpdate { + update_add_htlcs, + update_fail_htlcs, + update_fulfill_htlcs, + update_fail_malformed_htlcs, + update_fee, + commitment_signed, + } = updates; + + for update_add in update_add_htlcs.iter() { + log_msg_delivery(node_idx, dest_idx, "update_add_htlc", out); + handle_update_add_htlc(source_node_id, dest, update_add, corrupt_forward); + } + let processed_change = !update_add_htlcs.is_empty() + || !update_fulfill_htlcs.is_empty() + || !update_fail_htlcs.is_empty() + || !update_fail_malformed_htlcs.is_empty(); + for update_fulfill in update_fulfill_htlcs { + log_msg_delivery(node_idx, dest_idx, "update_fulfill_htlc", out); + dest.handle_update_fulfill_htlc(source_node_id, update_fulfill); + } + for update_fail in update_fail_htlcs.iter() { + log_msg_delivery(node_idx, dest_idx, "update_fail_htlc", out); + dest.handle_update_fail_htlc(source_node_id, update_fail); + } + for update_fail_malformed in update_fail_malformed_htlcs.iter() { + log_msg_delivery(node_idx, dest_idx, "update_fail_malformed_htlc", out); + dest.handle_update_fail_malformed_htlc(source_node_id, update_fail_malformed); + } + if let Some(msg) = update_fee { + log_msg_delivery(node_idx, dest_idx, "update_fee", out); + dest.handle_update_fee(source_node_id, &msg); + } + if limit_events != ProcessMessages::AllMessages && processed_change { + // If we only want to process some messages, don't deliver the CS until later. + return Some(MessageSendEvent::UpdateHTLCs { + node_id, + channel_id, + updates: CommitmentUpdate { + update_add_htlcs: Vec::new(), + update_fail_htlcs: Vec::new(), + update_fulfill_htlcs: Vec::new(), + update_fail_malformed_htlcs: Vec::new(), + update_fee: None, + commitment_signed, + }, + }); + } + log_msg_delivery(node_idx, dest_idx, "commitment_signed", out); + dest.handle_commitment_signed_batch_test(source_node_id, &commitment_signed); + None + } + + fn process_msg_event( + node_idx: usize, source_node_id: PublicKey, event: MessageSendEvent, corrupt_forward: bool, + limit_events: ProcessMessages, nodes: &[HarnessNode<'_>; 3], out: &Out, + ) -> Option { + match event { + MessageSendEvent::UpdateHTLCs { node_id, channel_id, updates } => { + handle_update_htlcs_event( + node_idx, + source_node_id, + node_id, + channel_id, + updates, + corrupt_forward, + limit_events, + nodes, + out, + ) + }, + MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { + let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "revoke_and_ack"); + nodes[dest_idx].handle_revoke_and_ack(source_node_id, msg); + None + }, + MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => { + let dest_idx = + log_peer_message(node_idx, node_id, nodes, out, "channel_reestablish"); + nodes[dest_idx].handle_channel_reestablish(source_node_id, msg); + None + }, + MessageSendEvent::SendStfu { ref node_id, ref msg } => { + let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "stfu"); + nodes[dest_idx].handle_stfu(source_node_id, msg); + None + }, + MessageSendEvent::SendTxAddInput { ref node_id, ref msg } => { + let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "tx_add_input"); + nodes[dest_idx].handle_tx_add_input(source_node_id, msg); + None + }, + MessageSendEvent::SendTxAddOutput { ref node_id, ref msg } => { + let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "tx_add_output"); + nodes[dest_idx].handle_tx_add_output(source_node_id, msg); + None + }, + MessageSendEvent::SendTxRemoveInput { ref node_id, ref msg } => { + let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "tx_remove_input"); + nodes[dest_idx].handle_tx_remove_input(source_node_id, msg); + None + }, + MessageSendEvent::SendTxRemoveOutput { ref node_id, ref msg } => { + let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "tx_remove_output"); + nodes[dest_idx].handle_tx_remove_output(source_node_id, msg); + None + }, + MessageSendEvent::SendTxComplete { ref node_id, ref msg } => { + let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "tx_complete"); + nodes[dest_idx].handle_tx_complete(source_node_id, msg); + None + }, + MessageSendEvent::SendTxAbort { ref node_id, ref msg } => { + let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "tx_abort"); + nodes[dest_idx].handle_tx_abort(source_node_id, msg); + None + }, + MessageSendEvent::SendTxInitRbf { ref node_id, ref msg } => { + let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "tx_init_rbf"); + nodes[dest_idx].handle_tx_init_rbf(source_node_id, msg); + None + }, + MessageSendEvent::SendTxAckRbf { ref node_id, ref msg } => { + let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "tx_ack_rbf"); + nodes[dest_idx].handle_tx_ack_rbf(source_node_id, msg); + None + }, + MessageSendEvent::SendTxSignatures { ref node_id, ref msg } => { + let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "tx_signatures"); + nodes[dest_idx].handle_tx_signatures(source_node_id, msg); + None + }, + MessageSendEvent::SendSpliceInit { ref node_id, ref msg } => { + let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "splice_init"); + nodes[dest_idx].handle_splice_init(source_node_id, msg); + None + }, + MessageSendEvent::SendSpliceAck { ref node_id, ref msg } => { + let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "splice_ack"); + nodes[dest_idx].handle_splice_ack(source_node_id, msg); + None + }, + MessageSendEvent::SendSpliceLocked { ref node_id, ref msg } => { + let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "splice_locked"); + nodes[dest_idx].handle_splice_locked(source_node_id, msg); + None + }, + MessageSendEvent::HandleError { ref action, .. } => { + assert_action_timeout_awaiting_response(action); + None + }, + MessageSendEvent::SendChannelReady { .. } + | MessageSendEvent::SendAnnouncementSignatures { .. } + | MessageSendEvent::SendChannelUpdate { .. } => { + // Can be generated as a reestablish response. + None + }, + MessageSendEvent::BroadcastChannelUpdate { .. } => { + // Can be generated as a result of calling `timer_tick_occurred` enough + // times while peers are disconnected. + None + }, + _ => panic!("Unhandled message event {:?}", event), + } + } + + let mut events = queues.take_for_node(node_idx); + let mut new_events = Vec::new(); + if limit_events != ProcessMessages::OnePendingMessage { + new_events = nodes[node_idx].get_and_clear_pending_msg_events(); + } + let mut had_events = false; + let source_node_id = nodes[node_idx].our_node_id(); + let mut events_iter = events.drain(..).chain(new_events.drain(..)); + let mut extra_ev = None; + for event in &mut events_iter { + had_events = true; + extra_ev = process_msg_event( + node_idx, + source_node_id, + event, + corrupt_forward, + limit_events, + nodes, + out, + ); + if limit_events != ProcessMessages::AllMessages { + break; + } + } + if node_idx == 1 { + let remaining = extra_ev.into_iter().chain(events_iter).collect::>(); + queues.route_from_middle(remaining, None, nodes); + } else if node_idx == 0 { + if let Some(ev) = extra_ev { + queues.push_for_node(0, ev); + } + queues.extend_for_node(0, events_iter); + } else { + if let Some(ev) = extra_ev { + queues.push_for_node(2, ev); + } + queues.extend_for_node(2, events_iter); + } + had_events +} + #[inline] pub fn do_test(data: &[u8], out: Out) { let router = FuzzRouter {}; @@ -1870,436 +2145,16 @@ pub fn do_test(data: &[u8], out: Out) { } loop { - // While delivering messages, we select across three possible message selection processes - // to ensure we get as much coverage as possible. See the individual enum variants for more - // details. - #[derive(PartialEq)] - enum ProcessMessages { - /// Deliver all available messages, including fetching any new messages from - /// `get_and_clear_pending_msg_events()` (which may have side effects). - AllMessages, - /// Call `get_and_clear_pending_msg_events()` first, and then deliver up to one - /// message (which may already be queued). - OneMessage, - /// Deliver up to one already-queued message. This avoids any potential side-effects - /// of `get_and_clear_pending_msg_events()` (eg freeing the HTLC holding cell), which - /// provides potentially more coverage. - OnePendingMessage, - } - macro_rules! process_msg_events { ($node: expr, $corrupt_forward: expr, $limit_events: expr) => {{ - let mut events = queues.take_for_node($node); - let mut new_events = Vec::new(); - if $limit_events != ProcessMessages::OnePendingMessage { - new_events = nodes[$node].get_and_clear_pending_msg_events(); - } - let mut had_events = false; - let mut events_iter = events.drain(..).chain(new_events.drain(..)); - let mut extra_ev = None; - for event in &mut events_iter { - had_events = true; - match event { - MessageSendEvent::UpdateHTLCs { - node_id, - channel_id, - updates: - CommitmentUpdate { - update_add_htlcs, - update_fail_htlcs, - update_fulfill_htlcs, - update_fail_malformed_htlcs, - update_fee, - commitment_signed, - }, - } => { - for (idx, dest) in nodes.iter().enumerate() { - if dest.get_our_node_id() == node_id { - for update_add in update_add_htlcs.iter() { - out.locked_write( - format!( - "Delivering update_add_htlc from node {} to node {}.\n", - $node, - idx - ) - .as_bytes(), - ); - if !$corrupt_forward { - dest.handle_update_add_htlc( - nodes[$node].get_our_node_id(), - update_add, - ); - } else { - // Corrupt the update_add_htlc message so that its HMAC - // check will fail and we generate a - // update_fail_malformed_htlc instead of an - // update_fail_htlc as we do when we reject a payment. - let mut msg_ser = update_add.encode(); - msg_ser[1000] ^= 0xff; - let new_msg = - UpdateAddHTLC::read_from_fixed_length_buffer( - &mut &msg_ser[..], - ) - .unwrap(); - dest.handle_update_add_htlc( - nodes[$node].get_our_node_id(), - &new_msg, - ); - } - } - let processed_change = !update_add_htlcs.is_empty() - || !update_fulfill_htlcs.is_empty() - || !update_fail_htlcs.is_empty() - || !update_fail_malformed_htlcs.is_empty(); - for update_fulfill in update_fulfill_htlcs { - out.locked_write( - format!( - "Delivering update_fulfill_htlc from node {} to node {}.\n", - $node, - idx - ) - .as_bytes(), - ); - dest.handle_update_fulfill_htlc( - nodes[$node].get_our_node_id(), - update_fulfill, - ); - } - for update_fail in update_fail_htlcs.iter() { - out.locked_write( - format!( - "Delivering update_fail_htlc from node {} to node {}.\n", - $node, - idx - ) - .as_bytes(), - ); - dest.handle_update_fail_htlc( - nodes[$node].get_our_node_id(), - update_fail, - ); - } - for update_fail_malformed in update_fail_malformed_htlcs.iter() { - out.locked_write( - format!( - "Delivering update_fail_malformed_htlc from node {} to node {}.\n", - $node, - idx - ) - .as_bytes(), - ); - dest.handle_update_fail_malformed_htlc( - nodes[$node].get_our_node_id(), - update_fail_malformed, - ); - } - if let Some(msg) = update_fee { - out.locked_write( - format!( - "Delivering update_fee from node {} to node {}.\n", - $node, - idx - ) - .as_bytes(), - ); - dest.handle_update_fee(nodes[$node].get_our_node_id(), &msg); - } - if $limit_events != ProcessMessages::AllMessages - && processed_change - { - // If we only want to process some messages, don't deliver the - // CS until later. - extra_ev = Some(MessageSendEvent::UpdateHTLCs { - node_id, - channel_id, - updates: CommitmentUpdate { - update_add_htlcs: Vec::new(), - update_fail_htlcs: Vec::new(), - update_fulfill_htlcs: Vec::new(), - update_fail_malformed_htlcs: Vec::new(), - update_fee: None, - commitment_signed, - }, - }); - break; - } - out.locked_write( - format!( - "Delivering commitment_signed from node {} to node {}.\n", - $node, - idx - ) - .as_bytes(), - ); - dest.handle_commitment_signed_batch_test( - nodes[$node].get_our_node_id(), - &commitment_signed, - ); - break; - } - } - }, - MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { - for (idx, dest) in nodes.iter().enumerate() { - if dest.get_our_node_id() == *node_id { - out.locked_write( - format!( - "Delivering revoke_and_ack from node {} to node {}.\n", - $node, - idx - ) - .as_bytes(), - ); - dest.handle_revoke_and_ack(nodes[$node].get_our_node_id(), msg); - } - } - }, - MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => { - for (idx, dest) in nodes.iter().enumerate() { - if dest.get_our_node_id() == *node_id { - out.locked_write( - format!( - "Delivering channel_reestablish from node {} to node {}.\n", - $node, - idx - ) - .as_bytes(), - ); - dest.handle_channel_reestablish( - nodes[$node].get_our_node_id(), - msg, - ); - } - } - }, - MessageSendEvent::SendStfu { ref node_id, ref msg } => { - for (idx, dest) in nodes.iter().enumerate() { - if dest.get_our_node_id() == *node_id { - out.locked_write( - format!("Delivering stfu from node {} to node {}.\n", $node, idx) - .as_bytes(), - ); - dest.handle_stfu(nodes[$node].get_our_node_id(), msg); - } - } - }, - MessageSendEvent::SendTxAddInput { ref node_id, ref msg } => { - for (idx, dest) in nodes.iter().enumerate() { - if dest.get_our_node_id() == *node_id { - out.locked_write( - format!( - "Delivering tx_add_input from node {} to node {}.\n", - $node, - idx - ) - .as_bytes(), - ); - dest.handle_tx_add_input(nodes[$node].get_our_node_id(), msg); - } - } - }, - MessageSendEvent::SendTxAddOutput { ref node_id, ref msg } => { - for (idx, dest) in nodes.iter().enumerate() { - if dest.get_our_node_id() == *node_id { - out.locked_write( - format!( - "Delivering tx_add_output from node {} to node {}.\n", - $node, - idx - ) - .as_bytes(), - ); - dest.handle_tx_add_output(nodes[$node].get_our_node_id(), msg); - } - } - }, - MessageSendEvent::SendTxRemoveInput { ref node_id, ref msg } => { - for (idx, dest) in nodes.iter().enumerate() { - if dest.get_our_node_id() == *node_id { - out.locked_write( - format!( - "Delivering tx_remove_input from node {} to node {}.\n", - $node, - idx - ) - .as_bytes(), - ); - dest.handle_tx_remove_input(nodes[$node].get_our_node_id(), msg); - } - } - }, - MessageSendEvent::SendTxRemoveOutput { ref node_id, ref msg } => { - for (idx, dest) in nodes.iter().enumerate() { - if dest.get_our_node_id() == *node_id { - out.locked_write( - format!( - "Delivering tx_remove_output from node {} to node {}.\n", - $node, - idx - ) - .as_bytes(), - ); - dest.handle_tx_remove_output(nodes[$node].get_our_node_id(), msg); - } - } - }, - MessageSendEvent::SendTxComplete { ref node_id, ref msg } => { - for (idx, dest) in nodes.iter().enumerate() { - if dest.get_our_node_id() == *node_id { - out.locked_write( - format!( - "Delivering tx_complete from node {} to node {}.\n", - $node, - idx - ) - .as_bytes(), - ); - dest.handle_tx_complete(nodes[$node].get_our_node_id(), msg); - } - } - }, - MessageSendEvent::SendTxAbort { ref node_id, ref msg } => { - for (idx, dest) in nodes.iter().enumerate() { - if dest.get_our_node_id() == *node_id { - out.locked_write( - format!( - "Delivering tx_abort from node {} to node {}.\n", - $node, - idx - ) - .as_bytes(), - ); - dest.handle_tx_abort(nodes[$node].get_our_node_id(), msg); - } - } - }, - MessageSendEvent::SendTxInitRbf { ref node_id, ref msg } => { - for (idx, dest) in nodes.iter().enumerate() { - if dest.get_our_node_id() == *node_id { - out.locked_write( - format!( - "Delivering tx_init_rbf from node {} to node {}.\n", - $node, - idx - ) - .as_bytes(), - ); - dest.handle_tx_init_rbf(nodes[$node].get_our_node_id(), msg); - } - } - }, - MessageSendEvent::SendTxAckRbf { ref node_id, ref msg } => { - for (idx, dest) in nodes.iter().enumerate() { - if dest.get_our_node_id() == *node_id { - out.locked_write( - format!( - "Delivering tx_ack_rbf from node {} to node {}.\n", - $node, - idx - ) - .as_bytes(), - ); - dest.handle_tx_ack_rbf(nodes[$node].get_our_node_id(), msg); - } - } - }, - MessageSendEvent::SendTxSignatures { ref node_id, ref msg } => { - for (idx, dest) in nodes.iter().enumerate() { - if dest.get_our_node_id() == *node_id { - out.locked_write( - format!( - "Delivering tx_signatures from node {} to node {}.\n", - $node, - idx - ) - .as_bytes(), - ); - dest.handle_tx_signatures(nodes[$node].get_our_node_id(), msg); - } - } - }, - MessageSendEvent::SendSpliceInit { ref node_id, ref msg } => { - for (idx, dest) in nodes.iter().enumerate() { - if dest.get_our_node_id() == *node_id { - out.locked_write( - format!( - "Delivering splice_init from node {} to node {}.\n", - $node, - idx - ) - .as_bytes(), - ); - dest.handle_splice_init(nodes[$node].get_our_node_id(), msg); - } - } - }, - MessageSendEvent::SendSpliceAck { ref node_id, ref msg } => { - for (idx, dest) in nodes.iter().enumerate() { - if dest.get_our_node_id() == *node_id { - out.locked_write( - format!( - "Delivering splice_ack from node {} to node {}.\n", - $node, - idx - ) - .as_bytes(), - ); - dest.handle_splice_ack(nodes[$node].get_our_node_id(), msg); - } - } - }, - MessageSendEvent::SendSpliceLocked { ref node_id, ref msg } => { - for (idx, dest) in nodes.iter().enumerate() { - if dest.get_our_node_id() == *node_id { - out.locked_write( - format!( - "Delivering splice_locked from node {} to node {}.\n", - $node, - idx - ) - .as_bytes(), - ); - dest.handle_splice_locked(nodes[$node].get_our_node_id(), msg); - } - } - }, - MessageSendEvent::HandleError { ref action, .. } => { - assert_action_timeout_awaiting_response(action); - }, - MessageSendEvent::SendChannelReady { .. } => { - // Can be generated as a reestablish response - }, - MessageSendEvent::SendAnnouncementSignatures { .. } => { - // Can be generated as a reestablish response - }, - MessageSendEvent::SendChannelUpdate { .. } => { - // Can be generated as a reestablish response - }, - MessageSendEvent::BroadcastChannelUpdate { .. } => { - // Can be generated as a result of calling `timer_tick_occurred` enough - // times while peers are disconnected - }, - _ => panic!("Unhandled message event {:?}", event), - } - if $limit_events != ProcessMessages::AllMessages { - break; - } - } - if $node == 1 { - let remaining = extra_ev.into_iter().chain(events_iter).collect::>(); - queues.route_from_middle(remaining, None, &nodes); - } else if $node == 0 { - if let Some(ev) = extra_ev { - queues.push_for_node(0, ev); - } - queues.extend_for_node(0, events_iter); - } else { - if let Some(ev) = extra_ev { - queues.push_for_node(2, ev); - } - queues.extend_for_node(2, events_iter); - } - had_events + process_msg_events_impl( + $node, + $corrupt_forward, + $limit_events, + &nodes, + &out, + &mut queues, + ) }}; } From 259ec4dff7eb9fc4d489dae6af5b04dc6904a126 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 21 Apr 2026 16:07:32 +0200 Subject: [PATCH 11/15] Extract chanmon harness peer links Represent each channel pair as a peer link with its channel ids and disconnect state. Link methods now own peer reconnect, disconnect, and monitor-update operations for that channel group. --- fuzz/src/chanmon_consistency.rs | 351 ++++++++++++++++---------------- 1 file changed, 173 insertions(+), 178 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 47b027c597f..4e498e131e5 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -1447,6 +1447,20 @@ impl EventQueues { } } + fn clear_link(&mut self, link: &PeerLink) { + match (link.node_a, link.node_b) { + (0, 1) | (1, 0) => { + self.ab.clear(); + self.ba.clear(); + }, + (1, 2) | (2, 1) => { + self.bc.clear(); + self.cb.clear(); + }, + _ => panic!("unsupported link"), + } + } + fn drain_on_disconnect(&mut self, edge_node: usize, nodes: &[HarnessNode<'_>; 3]) { match edge_node { 0 => { @@ -1492,6 +1506,110 @@ impl EventQueues { } } +struct PeerLink { + node_a: usize, + node_b: usize, + channel_ids: [ChannelId; 3], + disconnected: bool, +} + +impl PeerLink { + fn new(node_a: usize, node_b: usize, channel_ids: [ChannelId; 3]) -> Self { + Self { node_a, node_b, channel_ids, disconnected: false } + } + + fn first_channel_id(&self) -> ChannelId { + self.channel_ids[0] + } + + fn channel_ids(&self) -> &[ChannelId; 3] { + &self.channel_ids + } + + fn complete_all_monitor_updates(&self, nodes: &[HarnessNode<'_>; 3]) { + for id in &self.channel_ids { + nodes[self.node_a].complete_all_monitor_updates(id); + nodes[self.node_b].complete_all_monitor_updates(id); + } + } + + fn complete_monitor_updates_for_node( + &self, node_idx: usize, nodes: &[HarnessNode<'_>; 3], selector: MonitorUpdateSelector, + ) { + assert!(node_idx == self.node_a || node_idx == self.node_b); + for id in &self.channel_ids { + nodes[node_idx].complete_monitor_update(id, selector); + } + } + + fn disconnect(&mut self, nodes: &mut [HarnessNode<'_>; 3], queues: &mut EventQueues) { + if self.disconnected { + return; + } + let node_a_id = nodes[self.node_a].our_node_id(); + let node_b_id = nodes[self.node_b].our_node_id(); + nodes[self.node_a].peer_disconnected(node_b_id); + nodes[self.node_b].peer_disconnected(node_a_id); + self.disconnected = true; + let edge_node = if self.node_a == 1 { + self.node_b + } else if self.node_b == 1 { + self.node_a + } else { + panic!("unsupported link topology") + }; + queues.drain_on_disconnect(edge_node, nodes); + queues.clear_link(self); + } + + fn reconnect(&mut self, nodes: &mut [HarnessNode<'_>; 3]) { + if !self.disconnected { + return; + } + let node_a_id = nodes[self.node_a].our_node_id(); + let node_b_id = nodes[self.node_b].our_node_id(); + let init_b = Init { + features: nodes[self.node_b].init_features(), + networks: None, + remote_network_address: None, + }; + nodes[self.node_a].peer_connected(node_b_id, &init_b, true).unwrap(); + let init_a = Init { + features: nodes[self.node_a].init_features(), + networks: None, + remote_network_address: None, + }; + nodes[self.node_b].peer_connected(node_a_id, &init_a, false).unwrap(); + self.disconnected = false; + } + + fn disconnect_for_reload( + &mut self, restarted_node: usize, nodes: &mut [HarnessNode<'_>; 3], + queues: &mut EventQueues, + ) { + if self.disconnected { + return; + } + assert!(restarted_node == self.node_a || restarted_node == self.node_b); + + let remaining_node = if restarted_node == self.node_a { self.node_b } else { self.node_a }; + let restarted_node_id = nodes[restarted_node].our_node_id(); + nodes[remaining_node].peer_disconnected(restarted_node_id); + self.disconnected = true; + + if remaining_node == 1 { + queues.route_from_middle( + nodes[1].get_and_clear_pending_msg_events(), + Some(restarted_node), + nodes, + ); + } else { + nodes[remaining_node].get_and_clear_pending_msg_events(); + } + queues.clear_link(self); + } +} + fn build_node_config(chan_type: ChanType) -> UserConfig { let mut config = UserConfig::default(); config.channel_config.forwarding_fee_proportional_millionths = 0; @@ -2107,10 +2225,10 @@ pub fn do_test(data: &[u8], out: Out) { let node_c_chans = nodes[2].list_usable_channels(); [node_c_chans[0].channel_id, node_c_chans[1].channel_id, node_c_chans[2].channel_id] }; - let chan_a_id = chan_ab_ids[0]; - let chan_b_id = chan_bc_ids[0]; - let mut peers_ab_disconnected = false; - let mut peers_bc_disconnected = false; + let mut ab_link = PeerLink::new(0, 1, chan_ab_ids); + let mut bc_link = PeerLink::new(1, 2, chan_bc_ids); + let chan_a_id = ab_link.first_channel_id(); + let chan_b_id = bc_link.first_channel_id(); let mut queues = EventQueues::new(); let mut p_ctr: u64 = 0; @@ -2386,80 +2504,30 @@ pub fn do_test(data: &[u8], out: Out) { 0x06 => nodes[2].set_persistence_style(ChannelMonitorUpdateStatus::Completed), 0x08 => { - for id in &chan_ab_ids { + for id in ab_link.channel_ids() { nodes[0].complete_all_monitor_updates(id); } }, 0x09 => { - for id in &chan_ab_ids { + for id in ab_link.channel_ids() { nodes[1].complete_all_monitor_updates(id); } }, 0x0a => { - for id in &chan_bc_ids { + for id in bc_link.channel_ids() { nodes[1].complete_all_monitor_updates(id); } }, 0x0b => { - for id in &chan_bc_ids { + for id in bc_link.channel_ids() { nodes[2].complete_all_monitor_updates(id); } }, - 0x0c => { - if !peers_ab_disconnected { - nodes[0].peer_disconnected(nodes[1].get_our_node_id()); - nodes[1].peer_disconnected(nodes[0].get_our_node_id()); - peers_ab_disconnected = true; - queues.drain_on_disconnect(0, &nodes); - queues.ab.clear(); - queues.ba.clear(); - } - }, - 0x0d => { - if !peers_bc_disconnected { - nodes[1].peer_disconnected(nodes[2].get_our_node_id()); - nodes[2].peer_disconnected(nodes[1].get_our_node_id()); - peers_bc_disconnected = true; - queues.drain_on_disconnect(2, &nodes); - queues.bc.clear(); - queues.cb.clear(); - } - }, - 0x0e => { - if peers_ab_disconnected { - let init_1 = Init { - features: nodes[1].init_features(), - networks: None, - remote_network_address: None, - }; - nodes[0].peer_connected(nodes[1].get_our_node_id(), &init_1, true).unwrap(); - let init_0 = Init { - features: nodes[0].init_features(), - networks: None, - remote_network_address: None, - }; - nodes[1].peer_connected(nodes[0].get_our_node_id(), &init_0, false).unwrap(); - peers_ab_disconnected = false; - } - }, - 0x0f => { - if peers_bc_disconnected { - let init_2 = Init { - features: nodes[2].init_features(), - networks: None, - remote_network_address: None, - }; - nodes[1].peer_connected(nodes[2].get_our_node_id(), &init_2, true).unwrap(); - let init_1 = Init { - features: nodes[1].init_features(), - networks: None, - remote_network_address: None, - }; - nodes[2].peer_connected(nodes[1].get_our_node_id(), &init_1, false).unwrap(); - peers_bc_disconnected = false; - } - }, + 0x0c => ab_link.disconnect(&mut nodes, &mut queues), + 0x0d => bc_link.disconnect(&mut nodes, &mut queues), + 0x0e => ab_link.reconnect(&mut nodes), + 0x0f => bc_link.reconnect(&mut nodes), 0x10 => process_msg_noret!(0, true, ProcessMessages::AllMessages), 0x11 => process_msg_noret!(0, false, ProcessMessages::AllMessages), @@ -2655,48 +2723,16 @@ pub fn do_test(data: &[u8], out: Out) { }, 0xb0 | 0xb1 | 0xb2 => { - if !peers_ab_disconnected { - nodes[1].peer_disconnected(nodes[0].get_our_node_id()); - peers_ab_disconnected = true; - queues.route_from_middle( - nodes[1].get_and_clear_pending_msg_events(), - Some(0), - &nodes, - ); - queues.ab.clear(); - queues.ba.clear(); - } + ab_link.disconnect_for_reload(0, &mut nodes, &mut queues); nodes[0].reload(v, &out, &router, chan_type); }, 0xb3..=0xbb => { - if !peers_ab_disconnected { - nodes[0].peer_disconnected(nodes[1].get_our_node_id()); - peers_ab_disconnected = true; - nodes[0].get_and_clear_pending_msg_events(); - queues.ab.clear(); - queues.ba.clear(); - } - if !peers_bc_disconnected { - nodes[2].peer_disconnected(nodes[1].get_our_node_id()); - peers_bc_disconnected = true; - nodes[2].get_and_clear_pending_msg_events(); - queues.bc.clear(); - queues.cb.clear(); - } + ab_link.disconnect_for_reload(1, &mut nodes, &mut queues); + bc_link.disconnect_for_reload(1, &mut nodes, &mut queues); nodes[1].reload(v, &out, &router, chan_type); }, 0xbc | 0xbd | 0xbe => { - if !peers_bc_disconnected { - nodes[1].peer_disconnected(nodes[2].get_our_node_id()); - peers_bc_disconnected = true; - queues.route_from_middle( - nodes[1].get_and_clear_pending_msg_events(), - Some(2), - &nodes, - ); - queues.bc.clear(); - queues.cb.clear(); - } + bc_link.disconnect_for_reload(2, &mut nodes, &mut queues); nodes[2].reload(v, &out, &router, chan_type); }, @@ -2767,103 +2803,51 @@ pub fn do_test(data: &[u8], out: Out) { }, 0xf0 => { - for id in &chan_ab_ids { - nodes[0].complete_monitor_update(id, MonitorUpdateSelector::First); - } + ab_link.complete_monitor_updates_for_node(0, &nodes, MonitorUpdateSelector::First) }, 0xf1 => { - for id in &chan_ab_ids { - nodes[0].complete_monitor_update(id, MonitorUpdateSelector::Second); - } + ab_link.complete_monitor_updates_for_node(0, &nodes, MonitorUpdateSelector::Second) }, 0xf2 => { - for id in &chan_ab_ids { - nodes[0].complete_monitor_update(id, MonitorUpdateSelector::Last); - } + ab_link.complete_monitor_updates_for_node(0, &nodes, MonitorUpdateSelector::Last) }, 0xf4 => { - for id in &chan_ab_ids { - nodes[1].complete_monitor_update(id, MonitorUpdateSelector::First); - } + ab_link.complete_monitor_updates_for_node(1, &nodes, MonitorUpdateSelector::First) }, 0xf5 => { - for id in &chan_ab_ids { - nodes[1].complete_monitor_update(id, MonitorUpdateSelector::Second); - } + ab_link.complete_monitor_updates_for_node(1, &nodes, MonitorUpdateSelector::Second) }, 0xf6 => { - for id in &chan_ab_ids { - nodes[1].complete_monitor_update(id, MonitorUpdateSelector::Last); - } + ab_link.complete_monitor_updates_for_node(1, &nodes, MonitorUpdateSelector::Last) }, 0xf8 => { - for id in &chan_bc_ids { - nodes[1].complete_monitor_update(id, MonitorUpdateSelector::First); - } + bc_link.complete_monitor_updates_for_node(1, &nodes, MonitorUpdateSelector::First) }, 0xf9 => { - for id in &chan_bc_ids { - nodes[1].complete_monitor_update(id, MonitorUpdateSelector::Second); - } + bc_link.complete_monitor_updates_for_node(1, &nodes, MonitorUpdateSelector::Second) }, 0xfa => { - for id in &chan_bc_ids { - nodes[1].complete_monitor_update(id, MonitorUpdateSelector::Last); - } + bc_link.complete_monitor_updates_for_node(1, &nodes, MonitorUpdateSelector::Last) }, 0xfc => { - for id in &chan_bc_ids { - nodes[2].complete_monitor_update(id, MonitorUpdateSelector::First); - } + bc_link.complete_monitor_updates_for_node(2, &nodes, MonitorUpdateSelector::First) }, 0xfd => { - for id in &chan_bc_ids { - nodes[2].complete_monitor_update(id, MonitorUpdateSelector::Second); - } + bc_link.complete_monitor_updates_for_node(2, &nodes, MonitorUpdateSelector::Second) }, 0xfe => { - for id in &chan_bc_ids { - nodes[2].complete_monitor_update(id, MonitorUpdateSelector::Last); - } + bc_link.complete_monitor_updates_for_node(2, &nodes, MonitorUpdateSelector::Last) }, 0xff => { // Test that no channel is in a stuck state where neither party can send funds even // after we resolve all pending events. - if peers_ab_disconnected { - let init_1 = Init { - features: nodes[1].init_features(), - networks: None, - remote_network_address: None, - }; - nodes[0].peer_connected(nodes[1].get_our_node_id(), &init_1, true).unwrap(); - let init_0 = Init { - features: nodes[0].init_features(), - networks: None, - remote_network_address: None, - }; - nodes[1].peer_connected(nodes[0].get_our_node_id(), &init_0, false).unwrap(); - peers_ab_disconnected = false; - } - if peers_bc_disconnected { - let init_2 = Init { - features: nodes[2].init_features(), - networks: None, - remote_network_address: None, - }; - nodes[1].peer_connected(nodes[2].get_our_node_id(), &init_2, true).unwrap(); - let init_1 = Init { - features: nodes[1].init_features(), - networks: None, - remote_network_address: None, - }; - nodes[2].peer_connected(nodes[1].get_our_node_id(), &init_1, false).unwrap(); - peers_bc_disconnected = false; - } + ab_link.reconnect(&mut nodes); + bc_link.reconnect(&mut nodes); for op in SUPPORTED_SIGNER_OPS { nodes[0].keys_manager.enable_op_for_all_signers(op); @@ -2883,14 +2867,8 @@ pub fn do_test(data: &[u8], out: Out) { "It may take may iterations to settle the state, but it should not take forever" ); } - for id in &chan_ab_ids { - nodes[0].complete_all_monitor_updates(id); - nodes[1].complete_all_monitor_updates(id); - } - for id in &chan_bc_ids { - nodes[1].complete_all_monitor_updates(id); - nodes[2].complete_all_monitor_updates(id); - } + ab_link.complete_all_monitor_updates(&nodes); + bc_link.complete_all_monitor_updates(&nodes); if process_msg_events!(0, false, ProcessMessages::AllMessages) { last_pass_no_updates = false; continue; @@ -2955,16 +2933,33 @@ pub fn do_test(data: &[u8], out: Out) { } // Finally, make sure that at least one end of each channel can make a substantial payment - for &chan_id in &chan_ab_ids { + let send_after_settle = |source_idx: usize, + dest_idx: usize, + dest_chan_id, + amt, + payment_ctr: &mut u64| { + let source = &nodes[source_idx]; + let dest = &nodes[dest_idx]; + let (secret, hash) = + get_payment_secret_hash(dest, payment_ctr, &payment_preimages); + let mut id = PaymentId([0; 32]); + id.0[0..8].copy_from_slice(&payment_ctr.to_ne_bytes()); + let succeeded = send_payment(source, dest, dest_chan_id, amt, secret, hash, id); + if succeeded { + pending_payments.borrow_mut()[source_idx].push(id); + } + succeeded + }; + for &chan_id in ab_link.channel_ids() { assert!( - send(0, 1, chan_id, 10_000_000, &mut p_ctr) - || send(1, 0, chan_id, 10_000_000, &mut p_ctr) + send_after_settle(0, 1, chan_id, 10_000_000, &mut p_ctr) + || send_after_settle(1, 0, chan_id, 10_000_000, &mut p_ctr) ); } - for &chan_id in &chan_bc_ids { + for &chan_id in bc_link.channel_ids() { assert!( - send(1, 2, chan_id, 10_000_000, &mut p_ctr) - || send(2, 1, chan_id, 10_000_000, &mut p_ctr) + send_after_settle(1, 2, chan_id, 10_000_000, &mut p_ctr) + || send_after_settle(2, 1, chan_id, 10_000_000, &mut p_ctr) ); } From 8d5bd2798e364aaa07509f74002eca94f0c55334 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 21 Apr 2026 15:51:28 +0200 Subject: [PATCH 12/15] Extract chanmon harness payment helpers Move payment bookkeeping into a payment tracker. Sending, resolving, claiming, and stuck-payment assertions now share one state owner instead of borrowing several local maps. --- fuzz/src/chanmon_consistency.rs | 867 +++++++++++++++++++------------- 1 file changed, 514 insertions(+), 353 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 4e498e131e5..1554d670c4f 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -632,7 +632,7 @@ type ChanMan<'a> = ChannelManager< #[inline] fn get_payment_secret_hash( dest: &ChanMan, payment_ctr: &mut u64, - payment_preimages: &RefCell>, + payment_preimages: &mut HashMap, ) -> (PaymentSecret, PaymentHash) { *payment_ctr += 1; let mut payment_preimage = PaymentPreimage([0; 32]); @@ -641,7 +641,7 @@ fn get_payment_secret_hash( let payment_secret = dest .create_inbound_payment_for_hash(payment_hash, None, 3600, None) .expect("create_inbound_payment_for_hash failed"); - assert!(payment_preimages.borrow_mut().insert(payment_hash, payment_preimage).is_none()); + assert!(payment_preimages.insert(payment_hash, payment_preimage).is_none()); (payment_secret, payment_hash) } @@ -1610,6 +1610,179 @@ impl PeerLink { } } +struct PaymentTracker { + pending_payments: [Vec; 3], + resolved_payments: [HashMap>; 3], + claimed_payment_hashes: HashSet, + payment_preimages: HashMap, + payment_ctr: u64, +} + +impl PaymentTracker { + fn new() -> Self { + Self { + pending_payments: [Vec::new(), Vec::new(), Vec::new()], + resolved_payments: [new_hash_map(), new_hash_map(), new_hash_map()], + claimed_payment_hashes: HashSet::new(), + payment_preimages: new_hash_map(), + payment_ctr: 0, + } + } + + fn next_payment(&mut self, dest: &ChanMan) -> (PaymentSecret, PaymentHash, PaymentId) { + let (secret, hash) = + get_payment_secret_hash(dest, &mut self.payment_ctr, &mut self.payment_preimages); + let mut id = PaymentId([0; 32]); + id.0[0..8].copy_from_slice(&self.payment_ctr.to_ne_bytes()); + (secret, hash, id) + } + + fn send_direct( + &mut self, nodes: &[HarnessNode<'_>; 3], source_idx: usize, dest_idx: usize, + dest_chan_id: ChannelId, amt: u64, + ) -> bool { + let source = &nodes[source_idx]; + let dest = &nodes[dest_idx]; + let (secret, hash, id) = self.next_payment(dest); + let succeeded = send_payment(source, dest, dest_chan_id, amt, secret, hash, id); + if succeeded { + self.pending_payments[source_idx].push(id); + } + succeeded + } + + fn send_hop( + &mut self, nodes: &[HarnessNode<'_>; 3], source_idx: usize, middle_idx: usize, + middle_chan_id: ChannelId, dest_idx: usize, dest_chan_id: ChannelId, amt: u64, + ) { + let source = &nodes[source_idx]; + let middle = &nodes[middle_idx]; + let dest = &nodes[dest_idx]; + let (secret, hash, id) = self.next_payment(dest); + let succeeded = send_hop_payment( + source, + middle, + middle_chan_id, + dest, + dest_chan_id, + amt, + secret, + hash, + id, + ); + if succeeded { + self.pending_payments[source_idx].push(id); + } + } + + fn send_mpp_direct( + &mut self, nodes: &[HarnessNode<'_>; 3], source_idx: usize, dest_idx: usize, + dest_chan_ids: &[ChannelId], amt: u64, + ) { + let source = &nodes[source_idx]; + let dest = &nodes[dest_idx]; + let (secret, hash, id) = self.next_payment(dest); + let succeeded = send_mpp_payment(source, dest, dest_chan_ids, amt, secret, hash, id); + if succeeded { + self.pending_payments[source_idx].push(id); + } + } + + fn send_mpp_hop( + &mut self, nodes: &[HarnessNode<'_>; 3], source_idx: usize, middle_idx: usize, + middle_chan_ids: &[ChannelId], dest_idx: usize, dest_chan_ids: &[ChannelId], amt: u64, + ) { + let source = &nodes[source_idx]; + let middle = &nodes[middle_idx]; + let dest = &nodes[dest_idx]; + let (secret, hash, id) = self.next_payment(dest); + let succeeded = send_mpp_hop_payment( + source, + middle, + middle_chan_ids, + dest, + dest_chan_ids, + amt, + secret, + hash, + id, + ); + if succeeded { + self.pending_payments[source_idx].push(id); + } + } + + fn claim_payment(&mut self, node: &HarnessNode<'_>, payment_hash: PaymentHash, fail: bool) { + if fail { + node.fail_htlc_backwards(&payment_hash); + } else { + let payment_preimage = *self + .payment_preimages + .get(&payment_hash) + .expect("PaymentClaimable for unknown payment hash"); + node.claim_funds(payment_preimage); + self.claimed_payment_hashes.insert(payment_hash); + } + } + + fn mark_sent(&mut self, node_idx: usize, sent_id: PaymentId, payment_hash: PaymentHash) { + let idx_opt = self.pending_payments[node_idx].iter().position(|id| *id == sent_id); + if let Some(idx) = idx_opt { + self.pending_payments[node_idx].remove(idx); + self.resolved_payments[node_idx].insert(sent_id, Some(payment_hash)); + } else { + assert!(self.resolved_payments[node_idx].contains_key(&sent_id)); + } + } + + fn mark_resolved_without_hash(&mut self, node_idx: usize, payment_id: PaymentId) { + let idx_opt = self.pending_payments[node_idx].iter().position(|id| *id == payment_id); + if let Some(idx) = idx_opt { + self.pending_payments[node_idx].remove(idx); + self.resolved_payments[node_idx].insert(payment_id, None); + } else if !self.resolved_payments[node_idx].contains_key(&payment_id) { + // Some resolutions can arrive immediately, before the send helper records + // the payment as pending. Track them so later duplicate events are accepted. + self.resolved_payments[node_idx].insert(payment_id, None); + } + } + + fn mark_successful_probe(&mut self, node_idx: usize, payment_id: PaymentId) { + let idx_opt = self.pending_payments[node_idx].iter().position(|id| *id == payment_id); + if let Some(idx) = idx_opt { + self.pending_payments[node_idx].remove(idx); + self.resolved_payments[node_idx].insert(payment_id, None); + } else { + assert!(self.resolved_payments[node_idx].contains_key(&payment_id)); + } + } + + fn assert_all_resolved(&self) { + for (idx, pending) in self.pending_payments.iter().enumerate() { + assert!( + pending.is_empty(), + "Node {} has {} stuck pending payments after settling all state", + idx, + pending.len() + ); + } + } + + fn assert_claims_reported(&self) { + for hash in self.claimed_payment_hashes.iter() { + let found = self + .resolved_payments + .iter() + .any(|node_resolved| node_resolved.values().any(|h| h.as_ref() == Some(hash))); + assert!( + found, + "Payment {:?} was claimed by receiver but sender never got PaymentSent", + hash + ); + } + } +} + fn build_node_config(chan_type: ChanType) -> UserConfig { let mut config = UserConfig::default(); config.channel_config.forwarding_fee_proportional_millionths = 0; @@ -2088,6 +2261,125 @@ fn process_msg_events_impl( had_events } +fn process_events_impl( + node_idx: usize, fail: bool, nodes: &[HarnessNode<'_>; 3], chain_state: &mut ChainState, + payments: &mut PaymentTracker, +) -> bool { + // Multiple HTLCs can resolve for the same payment hash, so deduplicate + // claim/fail handling per event batch. + let mut claim_set = new_hash_map(); + let mut events = nodes[node_idx].get_and_clear_pending_events(); + let had_events = !events.is_empty(); + for event in events.drain(..) { + match event { + events::Event::PaymentClaimable { payment_hash, .. } => { + if claim_set.insert(payment_hash.0, ()).is_none() { + payments.claim_payment(&nodes[node_idx], payment_hash, fail); + } + }, + events::Event::PaymentSent { payment_id, payment_hash, .. } => { + payments.mark_sent(node_idx, payment_id.unwrap(), payment_hash); + }, + // Even though we don't explicitly send probes, because probes are detected based on + // hashing the payment hash+preimage, it is rather trivial for the fuzzer to build + // payments that accidentally end up looking like probes. + events::Event::ProbeSuccessful { payment_id, .. } => { + payments.mark_successful_probe(node_idx, payment_id); + }, + events::Event::PaymentFailed { payment_id, .. } + | events::Event::ProbeFailed { payment_id, .. } => { + payments.mark_resolved_without_hash(node_idx, payment_id); + }, + events::Event::PaymentClaimed { .. } => {}, + events::Event::PaymentPathSuccessful { .. } => {}, + events::Event::PaymentPathFailed { .. } => {}, + events::Event::PaymentForwarded { .. } if node_idx == 1 => {}, + events::Event::ChannelReady { .. } => {}, + events::Event::HTLCHandlingFailed { .. } => {}, + events::Event::FundingTransactionReadyForSigning { + channel_id, + counterparty_node_id, + unsigned_transaction, + .. + } => { + let signed_tx = nodes[node_idx].wallet.sign_tx(unsigned_transaction).unwrap(); + nodes[node_idx] + .funding_transaction_signed(&channel_id, &counterparty_node_id, signed_tx) + .unwrap(); + }, + events::Event::SplicePending { new_funding_txo, .. } => { + let mut txs = nodes[node_idx].broadcaster.txn_broadcasted.borrow_mut(); + assert!(txs.len() >= 1); + let splice_tx = txs.remove(0); + assert_eq!(new_funding_txo.txid, splice_tx.compute_txid()); + chain_state.add_pending_tx(splice_tx); + }, + events::Event::SpliceFailed { .. } => {}, + events::Event::DiscardFunding { + funding_info: + events::FundingInfo::Contribution { .. } | events::FundingInfo::Tx { .. }, + .. + } => {}, + _ => panic!("Unhandled event"), + } + } + while nodes[node_idx].needs_pending_htlc_processing() { + nodes[node_idx].process_pending_htlc_forwards(); + } + had_events +} + +fn process_all_events_impl( + nodes: &[HarnessNode<'_>; 3], out: &Out, ab_link: &PeerLink, bc_link: &PeerLink, + chain_state: &mut ChainState, payments: &mut PaymentTracker, queues: &mut EventQueues, +) { + let mut last_pass_no_updates = false; + for i in 0..std::usize::MAX { + if i == 100 { + panic!( + "It may take may iterations to settle the state, but it should not take forever" + ); + } + // First, make sure no monitor updates are pending. + ab_link.complete_all_monitor_updates(nodes); + bc_link.complete_all_monitor_updates(nodes); + // Then, make sure any current forwards make their way to their destination. + if process_msg_events_impl(0, false, ProcessMessages::AllMessages, nodes, out, queues) { + last_pass_no_updates = false; + continue; + } + if process_msg_events_impl(1, false, ProcessMessages::AllMessages, nodes, out, queues) { + last_pass_no_updates = false; + continue; + } + if process_msg_events_impl(2, false, ProcessMessages::AllMessages, nodes, out, queues) { + last_pass_no_updates = false; + continue; + } + // Finally, make sure any payments are claimed. + if process_events_impl(0, false, nodes, chain_state, payments) { + last_pass_no_updates = false; + continue; + } + if process_events_impl(1, false, nodes, chain_state, payments) { + last_pass_no_updates = false; + continue; + } + if process_events_impl(2, false, nodes, chain_state, payments) { + last_pass_no_updates = false; + continue; + } + if last_pass_no_updates { + // In some cases, `process_msg_events_impl` may generate a message to send, but + // block sending until `complete_all_monitor_updates` gets called on the next + // iteration. Thus, we only exit if we manage two iterations with no messages or + // events to process. + break; + } + last_pass_no_updates = true; + } +} + #[inline] pub fn do_test(data: &[u8], out: Out) { let router = FuzzRouter {}; @@ -2230,19 +2522,12 @@ pub fn do_test(data: &[u8], out: Out) { let chan_a_id = ab_link.first_channel_id(); let chan_b_id = bc_link.first_channel_id(); let mut queues = EventQueues::new(); - let mut p_ctr: u64 = 0; + let mut payments = PaymentTracker::new(); for node in &mut nodes { node.serialized_manager = node.encode(); } - let pending_payments = RefCell::new([Vec::new(), Vec::new(), Vec::new()]); - let resolved_payments: RefCell<[HashMap>; 3]> = - RefCell::new([new_hash_map(), new_hash_map(), new_hash_map()]); - let claimed_payment_hashes: RefCell> = RefCell::new(HashSet::new()); - let payment_preimages: RefCell> = - RefCell::new(new_hash_map()); - macro_rules! test_return { () => {{ assert_test_invariants(&nodes); @@ -2284,112 +2569,7 @@ pub fn do_test(data: &[u8], out: Out) { macro_rules! process_events { ($node: expr, $fail: expr) => {{ - // Multiple HTLCs can resolve for the same payment hash, so deduplicate - // claim/fail handling per event batch. - let mut claim_set = new_hash_map(); - let mut events = nodes[$node].get_and_clear_pending_events(); - let had_events = !events.is_empty(); - let mut pending_payments = pending_payments.borrow_mut(); - let mut resolved_payments = resolved_payments.borrow_mut(); - for event in events.drain(..) { - match event { - events::Event::PaymentClaimable { payment_hash, .. } => { - if claim_set.insert(payment_hash.0, ()).is_none() { - if $fail { - nodes[$node].fail_htlc_backwards(&payment_hash); - } else { - let payment_preimage = *payment_preimages - .borrow() - .get(&payment_hash) - .expect("PaymentClaimable for unknown payment hash"); - nodes[$node].claim_funds(payment_preimage); - claimed_payment_hashes.borrow_mut().insert(payment_hash); - } - } - }, - events::Event::PaymentSent { payment_id, payment_hash, .. } => { - let sent_id = payment_id.unwrap(); - let idx_opt = - pending_payments[$node].iter().position(|id| *id == sent_id); - if let Some(idx) = idx_opt { - pending_payments[$node].remove(idx); - resolved_payments[$node].insert(sent_id, Some(payment_hash)); - } else { - assert!(resolved_payments[$node].contains_key(&sent_id)); - } - }, - // Even though we don't explicitly send probes, because probes are - // detected based on hashing the payment hash+preimage, its rather - // trivial for the fuzzer to build payments that accidentally end up - // looking like probes. - events::Event::ProbeSuccessful { payment_id, .. } => { - let idx_opt = - pending_payments[$node].iter().position(|id| *id == payment_id); - if let Some(idx) = idx_opt { - pending_payments[$node].remove(idx); - resolved_payments[$node].insert(payment_id, None); - } else { - assert!(resolved_payments[$node].contains_key(&payment_id)); - } - }, - events::Event::PaymentFailed { payment_id, .. } - | events::Event::ProbeFailed { payment_id, .. } => { - let idx_opt = - pending_payments[$node].iter().position(|id| *id == payment_id); - if let Some(idx) = idx_opt { - pending_payments[$node].remove(idx); - resolved_payments[$node].insert(payment_id, None); - } else if !resolved_payments[$node].contains_key(&payment_id) { - // Payment failed immediately on send, so it was never added to - // pending_payments. Add it to resolved_payments to track it. - resolved_payments[$node].insert(payment_id, None); - } - }, - events::Event::PaymentClaimed { .. } => {}, - events::Event::PaymentPathSuccessful { .. } => {}, - events::Event::PaymentPathFailed { .. } => {}, - events::Event::PaymentForwarded { .. } if $node == 1 => {}, - events::Event::ChannelReady { .. } => {}, - events::Event::HTLCHandlingFailed { .. } => {}, - - events::Event::FundingTransactionReadyForSigning { - channel_id, - counterparty_node_id, - unsigned_transaction, - .. - } => { - let signed_tx = - nodes[$node].wallet.sign_tx(unsigned_transaction).unwrap(); - nodes[$node] - .funding_transaction_signed( - &channel_id, - &counterparty_node_id, - signed_tx, - ) - .unwrap(); - }, - events::Event::SplicePending { new_funding_txo, .. } => { - let mut txs = nodes[$node].broadcaster.txn_broadcasted.borrow_mut(); - assert!(txs.len() >= 1); - let splice_tx = txs.remove(0); - assert_eq!(new_funding_txo.txid, splice_tx.compute_txid()); - chain_state.add_pending_tx(splice_tx); - }, - events::Event::SpliceFailed { .. } => {}, - events::Event::DiscardFunding { - funding_info: - events::FundingInfo::Contribution { .. } - | events::FundingInfo::Tx { .. }, - .. - } => {}, - - _ => panic!("Unhandled event: {:?}", event), - } - } - while nodes[$node].needs_pending_htlc_processing() { - nodes[$node].process_pending_htlc_forwards(); - } - had_events + process_events_impl($node, $fail, &nodes, &mut chain_state, &mut payments) }}; } @@ -2399,97 +2579,6 @@ pub fn do_test(data: &[u8], out: Out) { }}; } - let send = - |source_idx: usize, dest_idx: usize, dest_chan_id, amt, payment_ctr: &mut u64| { - let source = &nodes[source_idx]; - let dest = &nodes[dest_idx]; - let (secret, hash) = get_payment_secret_hash(dest, payment_ctr, &payment_preimages); - let mut id = PaymentId([0; 32]); - id.0[0..8].copy_from_slice(&payment_ctr.to_ne_bytes()); - let succeeded = send_payment(source, dest, dest_chan_id, amt, secret, hash, id); - if succeeded { - pending_payments.borrow_mut()[source_idx].push(id); - } - succeeded - }; - let send_noret = |source_idx, dest_idx, dest_chan_id, amt, payment_ctr: &mut u64| { - send(source_idx, dest_idx, dest_chan_id, amt, payment_ctr); - }; - - let send_hop_noret = |source_idx: usize, - middle_idx: usize, - middle_chan_id: ChannelId, - dest_idx: usize, - dest_chan_id: ChannelId, - amt: u64, - payment_ctr: &mut u64| { - let source = &nodes[source_idx]; - let middle = &nodes[middle_idx]; - let dest = &nodes[dest_idx]; - let (secret, hash) = get_payment_secret_hash(dest, payment_ctr, &payment_preimages); - let mut id = PaymentId([0; 32]); - id.0[0..8].copy_from_slice(&payment_ctr.to_ne_bytes()); - let succeeded = send_hop_payment( - source, - middle, - middle_chan_id, - dest, - dest_chan_id, - amt, - secret, - hash, - id, - ); - if succeeded { - pending_payments.borrow_mut()[source_idx].push(id); - } - }; - - let send_mpp_direct = |source_idx: usize, - dest_idx: usize, - dest_chan_ids: &[ChannelId], - amt: u64, - payment_ctr: &mut u64| { - let source = &nodes[source_idx]; - let dest = &nodes[dest_idx]; - let (secret, hash) = get_payment_secret_hash(dest, payment_ctr, &payment_preimages); - let mut id = PaymentId([0; 32]); - id.0[0..8].copy_from_slice(&payment_ctr.to_ne_bytes()); - let succeeded = send_mpp_payment(source, dest, dest_chan_ids, amt, secret, hash, id); - if succeeded { - pending_payments.borrow_mut()[source_idx].push(id); - } - }; - - let send_mpp_hop = |source_idx: usize, - middle_idx: usize, - middle_chan_ids: &[ChannelId], - dest_idx: usize, - dest_chan_ids: &[ChannelId], - amt: u64, - payment_ctr: &mut u64| { - let source = &nodes[source_idx]; - let middle = &nodes[middle_idx]; - let dest = &nodes[dest_idx]; - let (secret, hash) = get_payment_secret_hash(dest, payment_ctr, &payment_preimages); - let mut id = PaymentId([0; 32]); - id.0[0..8].copy_from_slice(&payment_ctr.to_ne_bytes()); - let succeeded = send_mpp_hop_payment( - source, - middle, - middle_chan_ids, - dest, - dest_chan_ids, - amt, - secret, - hash, - id, - ); - if succeeded { - pending_payments.borrow_mut()[source_idx].push(id); - } - }; - let v = get_slice!(1)[0]; out.locked_write(format!("READ A BYTE! HANDLING INPUT {:x}...........\n", v).as_bytes()); match v { @@ -2560,74 +2649,208 @@ pub fn do_test(data: &[u8], out: Out) { 0x27 => process_ev_noret!(2, false), // 1/10th the channel size: - 0x30 => send_noret(0, 1, chan_a_id, 10_000_000, &mut p_ctr), - 0x31 => send_noret(1, 0, chan_a_id, 10_000_000, &mut p_ctr), - 0x32 => send_noret(1, 2, chan_b_id, 10_000_000, &mut p_ctr), - 0x33 => send_noret(2, 1, chan_b_id, 10_000_000, &mut p_ctr), - 0x34 => send_hop_noret(0, 1, chan_a_id, 2, chan_b_id, 10_000_000, &mut p_ctr), - 0x35 => send_hop_noret(2, 1, chan_b_id, 0, chan_a_id, 10_000_000, &mut p_ctr), - - 0x38 => send_noret(0, 1, chan_a_id, 1_000_000, &mut p_ctr), - 0x39 => send_noret(1, 0, chan_a_id, 1_000_000, &mut p_ctr), - 0x3a => send_noret(1, 2, chan_b_id, 1_000_000, &mut p_ctr), - 0x3b => send_noret(2, 1, chan_b_id, 1_000_000, &mut p_ctr), - 0x3c => send_hop_noret(0, 1, chan_a_id, 2, chan_b_id, 1_000_000, &mut p_ctr), - 0x3d => send_hop_noret(2, 1, chan_b_id, 0, chan_a_id, 1_000_000, &mut p_ctr), - - 0x40 => send_noret(0, 1, chan_a_id, 100_000, &mut p_ctr), - 0x41 => send_noret(1, 0, chan_a_id, 100_000, &mut p_ctr), - 0x42 => send_noret(1, 2, chan_b_id, 100_000, &mut p_ctr), - 0x43 => send_noret(2, 1, chan_b_id, 100_000, &mut p_ctr), - 0x44 => send_hop_noret(0, 1, chan_a_id, 2, chan_b_id, 100_000, &mut p_ctr), - 0x45 => send_hop_noret(2, 1, chan_b_id, 0, chan_a_id, 100_000, &mut p_ctr), - - 0x48 => send_noret(0, 1, chan_a_id, 10_000, &mut p_ctr), - 0x49 => send_noret(1, 0, chan_a_id, 10_000, &mut p_ctr), - 0x4a => send_noret(1, 2, chan_b_id, 10_000, &mut p_ctr), - 0x4b => send_noret(2, 1, chan_b_id, 10_000, &mut p_ctr), - 0x4c => send_hop_noret(0, 1, chan_a_id, 2, chan_b_id, 10_000, &mut p_ctr), - 0x4d => send_hop_noret(2, 1, chan_b_id, 0, chan_a_id, 10_000, &mut p_ctr), - - 0x50 => send_noret(0, 1, chan_a_id, 1_000, &mut p_ctr), - 0x51 => send_noret(1, 0, chan_a_id, 1_000, &mut p_ctr), - 0x52 => send_noret(1, 2, chan_b_id, 1_000, &mut p_ctr), - 0x53 => send_noret(2, 1, chan_b_id, 1_000, &mut p_ctr), - 0x54 => send_hop_noret(0, 1, chan_a_id, 2, chan_b_id, 1_000, &mut p_ctr), - 0x55 => send_hop_noret(2, 1, chan_b_id, 0, chan_a_id, 1_000, &mut p_ctr), - - 0x58 => send_noret(0, 1, chan_a_id, 100, &mut p_ctr), - 0x59 => send_noret(1, 0, chan_a_id, 100, &mut p_ctr), - 0x5a => send_noret(1, 2, chan_b_id, 100, &mut p_ctr), - 0x5b => send_noret(2, 1, chan_b_id, 100, &mut p_ctr), - 0x5c => send_hop_noret(0, 1, chan_a_id, 2, chan_b_id, 100, &mut p_ctr), - 0x5d => send_hop_noret(2, 1, chan_b_id, 0, chan_a_id, 100, &mut p_ctr), - - 0x60 => send_noret(0, 1, chan_a_id, 10, &mut p_ctr), - 0x61 => send_noret(1, 0, chan_a_id, 10, &mut p_ctr), - 0x62 => send_noret(1, 2, chan_b_id, 10, &mut p_ctr), - 0x63 => send_noret(2, 1, chan_b_id, 10, &mut p_ctr), - 0x64 => send_hop_noret(0, 1, chan_a_id, 2, chan_b_id, 10, &mut p_ctr), - 0x65 => send_hop_noret(2, 1, chan_b_id, 0, chan_a_id, 10, &mut p_ctr), - - 0x68 => send_noret(0, 1, chan_a_id, 1, &mut p_ctr), - 0x69 => send_noret(1, 0, chan_a_id, 1, &mut p_ctr), - 0x6a => send_noret(1, 2, chan_b_id, 1, &mut p_ctr), - 0x6b => send_noret(2, 1, chan_b_id, 1, &mut p_ctr), - 0x6c => send_hop_noret(0, 1, chan_a_id, 2, chan_b_id, 1, &mut p_ctr), - 0x6d => send_hop_noret(2, 1, chan_b_id, 0, chan_a_id, 1, &mut p_ctr), + 0x30 => { + payments.send_direct(&nodes, 0, 1, chan_a_id, 10_000_000); + }, + 0x31 => { + payments.send_direct(&nodes, 1, 0, chan_a_id, 10_000_000); + }, + 0x32 => { + payments.send_direct(&nodes, 1, 2, chan_b_id, 10_000_000); + }, + 0x33 => { + payments.send_direct(&nodes, 2, 1, chan_b_id, 10_000_000); + }, + 0x34 => { + payments.send_hop(&nodes, 0, 1, chan_a_id, 2, chan_b_id, 10_000_000); + }, + 0x35 => { + payments.send_hop(&nodes, 2, 1, chan_b_id, 0, chan_a_id, 10_000_000); + }, + + 0x38 => { + payments.send_direct(&nodes, 0, 1, chan_a_id, 1_000_000); + }, + 0x39 => { + payments.send_direct(&nodes, 1, 0, chan_a_id, 1_000_000); + }, + 0x3a => { + payments.send_direct(&nodes, 1, 2, chan_b_id, 1_000_000); + }, + 0x3b => { + payments.send_direct(&nodes, 2, 1, chan_b_id, 1_000_000); + }, + 0x3c => { + payments.send_hop(&nodes, 0, 1, chan_a_id, 2, chan_b_id, 1_000_000); + }, + 0x3d => { + payments.send_hop(&nodes, 2, 1, chan_b_id, 0, chan_a_id, 1_000_000); + }, + + 0x40 => { + payments.send_direct(&nodes, 0, 1, chan_a_id, 100_000); + }, + 0x41 => { + payments.send_direct(&nodes, 1, 0, chan_a_id, 100_000); + }, + 0x42 => { + payments.send_direct(&nodes, 1, 2, chan_b_id, 100_000); + }, + 0x43 => { + payments.send_direct(&nodes, 2, 1, chan_b_id, 100_000); + }, + 0x44 => { + payments.send_hop(&nodes, 0, 1, chan_a_id, 2, chan_b_id, 100_000); + }, + 0x45 => { + payments.send_hop(&nodes, 2, 1, chan_b_id, 0, chan_a_id, 100_000); + }, + + 0x48 => { + payments.send_direct(&nodes, 0, 1, chan_a_id, 10_000); + }, + 0x49 => { + payments.send_direct(&nodes, 1, 0, chan_a_id, 10_000); + }, + 0x4a => { + payments.send_direct(&nodes, 1, 2, chan_b_id, 10_000); + }, + 0x4b => { + payments.send_direct(&nodes, 2, 1, chan_b_id, 10_000); + }, + 0x4c => { + payments.send_hop(&nodes, 0, 1, chan_a_id, 2, chan_b_id, 10_000); + }, + 0x4d => { + payments.send_hop(&nodes, 2, 1, chan_b_id, 0, chan_a_id, 10_000); + }, + + 0x50 => { + payments.send_direct(&nodes, 0, 1, chan_a_id, 1_000); + }, + 0x51 => { + payments.send_direct(&nodes, 1, 0, chan_a_id, 1_000); + }, + 0x52 => { + payments.send_direct(&nodes, 1, 2, chan_b_id, 1_000); + }, + 0x53 => { + payments.send_direct(&nodes, 2, 1, chan_b_id, 1_000); + }, + 0x54 => { + payments.send_hop(&nodes, 0, 1, chan_a_id, 2, chan_b_id, 1_000); + }, + 0x55 => { + payments.send_hop(&nodes, 2, 1, chan_b_id, 0, chan_a_id, 1_000); + }, + + 0x58 => { + payments.send_direct(&nodes, 0, 1, chan_a_id, 100); + }, + 0x59 => { + payments.send_direct(&nodes, 1, 0, chan_a_id, 100); + }, + 0x5a => { + payments.send_direct(&nodes, 1, 2, chan_b_id, 100); + }, + 0x5b => { + payments.send_direct(&nodes, 2, 1, chan_b_id, 100); + }, + 0x5c => { + payments.send_hop(&nodes, 0, 1, chan_a_id, 2, chan_b_id, 100); + }, + 0x5d => { + payments.send_hop(&nodes, 2, 1, chan_b_id, 0, chan_a_id, 100); + }, + + 0x60 => { + payments.send_direct(&nodes, 0, 1, chan_a_id, 10); + }, + 0x61 => { + payments.send_direct(&nodes, 1, 0, chan_a_id, 10); + }, + 0x62 => { + payments.send_direct(&nodes, 1, 2, chan_b_id, 10); + }, + 0x63 => { + payments.send_direct(&nodes, 2, 1, chan_b_id, 10); + }, + 0x64 => { + payments.send_hop(&nodes, 0, 1, chan_a_id, 2, chan_b_id, 10); + }, + 0x65 => { + payments.send_hop(&nodes, 2, 1, chan_b_id, 0, chan_a_id, 10); + }, + + 0x68 => { + payments.send_direct(&nodes, 0, 1, chan_a_id, 1); + }, + 0x69 => { + payments.send_direct(&nodes, 1, 0, chan_a_id, 1); + }, + 0x6a => { + payments.send_direct(&nodes, 1, 2, chan_b_id, 1); + }, + 0x6b => { + payments.send_direct(&nodes, 2, 1, chan_b_id, 1); + }, + 0x6c => { + payments.send_hop(&nodes, 0, 1, chan_a_id, 2, chan_b_id, 1); + }, + 0x6d => { + payments.send_hop(&nodes, 2, 1, chan_b_id, 0, chan_a_id, 1); + }, // MPP payments // 0x70: direct MPP from 0 to 1 (multi A-B channels) - 0x70 => send_mpp_direct(0, 1, &chan_ab_ids, 1_000_000, &mut p_ctr), + 0x70 => { + payments.send_mpp_direct(&nodes, 0, 1, ab_link.channel_ids(), 1_000_000); + }, // 0x71: MPP 0->1->2, multi channels on first hop (A-B) - 0x71 => send_mpp_hop(0, 1, &chan_ab_ids, 2, &[chan_b_id], 1_000_000, &mut p_ctr), + 0x71 => { + payments.send_mpp_hop( + &nodes, + 0, + 1, + ab_link.channel_ids(), + 2, + &[chan_b_id], + 1_000_000, + ); + }, // 0x72: MPP 0->1->2, multi channels on both hops (A-B and B-C) - 0x72 => send_mpp_hop(0, 1, &chan_ab_ids, 2, &chan_bc_ids, 1_000_000, &mut p_ctr), + 0x72 => { + payments.send_mpp_hop( + &nodes, + 0, + 1, + ab_link.channel_ids(), + 2, + bc_link.channel_ids(), + 1_000_000, + ); + }, // 0x73: MPP 0->1->2, multi channels on second hop (B-C) - 0x73 => send_mpp_hop(0, 1, &[chan_a_id], 2, &chan_bc_ids, 1_000_000, &mut p_ctr), + 0x73 => { + payments.send_mpp_hop( + &nodes, + 0, + 1, + &[chan_a_id], + 2, + bc_link.channel_ids(), + 1_000_000, + ); + }, // 0x74: direct MPP from 0 to 1, multi parts over single channel 0x74 => { - send_mpp_direct(0, 1, &[chan_a_id, chan_a_id, chan_a_id], 1_000_000, &mut p_ctr) + payments.send_mpp_direct( + &nodes, + 0, + 1, + &[chan_a_id, chan_a_id, chan_a_id], + 1_000_000, + ); }, 0x80 => nodes[0].bump_fee_estimate(chan_type), @@ -2858,50 +3081,15 @@ pub fn do_test(data: &[u8], out: Out) { nodes[1].signer_unblocked(None); nodes[2].signer_unblocked(None); - macro_rules! process_all_events { - () => {{ - let mut last_pass_no_updates = false; - for i in 0..std::usize::MAX { - if i == 100 { - panic!( - "It may take may iterations to settle the state, but it should not take forever" - ); - } - ab_link.complete_all_monitor_updates(&nodes); - bc_link.complete_all_monitor_updates(&nodes); - if process_msg_events!(0, false, ProcessMessages::AllMessages) { - last_pass_no_updates = false; - continue; - } - if process_msg_events!(1, false, ProcessMessages::AllMessages) { - last_pass_no_updates = false; - continue; - } - if process_msg_events!(2, false, ProcessMessages::AllMessages) { - last_pass_no_updates = false; - continue; - } - if process_events!(0, false) { - last_pass_no_updates = false; - continue; - } - if process_events!(1, false) { - last_pass_no_updates = false; - continue; - } - if process_events!(2, false) { - last_pass_no_updates = false; - continue; - } - if last_pass_no_updates { - break; - } - last_pass_no_updates = true; - } - }}; - } - - process_all_events!(); + process_all_events_impl( + &nodes, + &out, + &ab_link, + &bc_link, + &mut chain_state, + &mut payments, + &mut queues, + ); // Since MPP payments are supported, we wait until we fully settle the state of all // channels to see if we have any committed HTLC parts of an MPP payment that need @@ -2909,57 +3097,30 @@ pub fn do_test(data: &[u8], out: Out) { for node in &nodes { node.timer_tick_occurred(); } - process_all_events!(); - - for (idx, pending) in pending_payments.borrow().iter().enumerate() { - assert!( - pending.is_empty(), - "Node {} has {} stuck pending payments after settling all state", - idx, - pending.len() - ); - } + process_all_events_impl( + &nodes, + &out, + &ab_link, + &bc_link, + &mut chain_state, + &mut payments, + &mut queues, + ); - let resolved = resolved_payments.borrow(); - for hash in claimed_payment_hashes.borrow().iter() { - let found = resolved.iter().any(|node_resolved| { - node_resolved.values().any(|h| h.as_ref() == Some(hash)) - }); - assert!( - found, - "Payment {:?} was claimed by receiver but sender never got PaymentSent", - hash - ); - } + payments.assert_all_resolved(); + payments.assert_claims_reported(); // Finally, make sure that at least one end of each channel can make a substantial payment - let send_after_settle = |source_idx: usize, - dest_idx: usize, - dest_chan_id, - amt, - payment_ctr: &mut u64| { - let source = &nodes[source_idx]; - let dest = &nodes[dest_idx]; - let (secret, hash) = - get_payment_secret_hash(dest, payment_ctr, &payment_preimages); - let mut id = PaymentId([0; 32]); - id.0[0..8].copy_from_slice(&payment_ctr.to_ne_bytes()); - let succeeded = send_payment(source, dest, dest_chan_id, amt, secret, hash, id); - if succeeded { - pending_payments.borrow_mut()[source_idx].push(id); - } - succeeded - }; for &chan_id in ab_link.channel_ids() { assert!( - send_after_settle(0, 1, chan_id, 10_000_000, &mut p_ctr) - || send_after_settle(1, 0, chan_id, 10_000_000, &mut p_ctr) + payments.send_direct(&nodes, 0, 1, chan_id, 10_000_000) + || payments.send_direct(&nodes, 1, 0, chan_id, 10_000_000) ); } for &chan_id in bc_link.channel_ids() { assert!( - send_after_settle(1, 2, chan_id, 10_000_000, &mut p_ctr) - || send_after_settle(2, 1, chan_id, 10_000_000, &mut p_ctr) + payments.send_direct(&nodes, 1, 2, chan_id, 10_000_000) + || payments.send_direct(&nodes, 2, 1, chan_id, 10_000_000) ); } From 23734df9ec28ec6f981e17e4467e7041a5ec01fa Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 21 Apr 2026 17:46:44 +0200 Subject: [PATCH 13/15] Build chanmon harness setup Collect the node, link, queue, chain, and payment setup into a harness builder. This keeps the initial fuzz scenario construction together and leaves the action loop with a smaller state surface. --- fuzz/src/chanmon_consistency.rs | 593 +++++++++++++++++--------------- 1 file changed, 317 insertions(+), 276 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 1554d670c4f..d5e547ae185 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -972,14 +972,6 @@ struct HarnessNode<'a> { last_htlc_clear_fee: u32, } -impl<'a> std::ops::Deref for HarnessNode<'a> { - type Target = ChanMan<'a>; - - fn deref(&self) -> &Self::Target { - &self.node - } -} - impl<'a> HarnessNode<'a> { fn build_loggers( node_id: u8, out: &Out, @@ -1333,13 +1325,6 @@ impl<'a> HarnessNode<'a> { } } -#[derive(Copy, Clone)] -enum MonitorUpdateSelector { - First, - Second, - Last, -} - struct EventQueues { ab: Vec, ba: Vec, @@ -1464,7 +1449,7 @@ impl EventQueues { fn drain_on_disconnect(&mut self, edge_node: usize, nodes: &[HarnessNode<'_>; 3]) { match edge_node { 0 => { - for event in nodes[0].get_and_clear_pending_msg_events() { + for event in nodes[0].node.get_and_clear_pending_msg_events() { match event { MessageSendEvent::UpdateHTLCs { .. } => {}, MessageSendEvent::SendRevokeAndACK { .. } => {}, @@ -1480,10 +1465,14 @@ impl EventQueues { _ => panic!("Unhandled message event"), } } - self.route_from_middle(nodes[1].get_and_clear_pending_msg_events(), Some(0), nodes); + self.route_from_middle( + nodes[1].node.get_and_clear_pending_msg_events(), + Some(0), + nodes, + ); }, 2 => { - for event in nodes[2].get_and_clear_pending_msg_events() { + for event in nodes[2].node.get_and_clear_pending_msg_events() { match event { MessageSendEvent::UpdateHTLCs { .. } => {}, MessageSendEvent::SendRevokeAndACK { .. } => {}, @@ -1499,7 +1488,11 @@ impl EventQueues { _ => panic!("Unhandled message event"), } } - self.route_from_middle(nodes[1].get_and_clear_pending_msg_events(), Some(2), nodes); + self.route_from_middle( + nodes[1].node.get_and_clear_pending_msg_events(), + Some(2), + nodes, + ); }, _ => panic!("unsupported disconnected edge"), } @@ -1548,8 +1541,8 @@ impl PeerLink { } let node_a_id = nodes[self.node_a].our_node_id(); let node_b_id = nodes[self.node_b].our_node_id(); - nodes[self.node_a].peer_disconnected(node_b_id); - nodes[self.node_b].peer_disconnected(node_a_id); + nodes[self.node_a].node.peer_disconnected(node_b_id); + nodes[self.node_b].node.peer_disconnected(node_a_id); self.disconnected = true; let edge_node = if self.node_a == 1 { self.node_b @@ -1569,17 +1562,17 @@ impl PeerLink { let node_a_id = nodes[self.node_a].our_node_id(); let node_b_id = nodes[self.node_b].our_node_id(); let init_b = Init { - features: nodes[self.node_b].init_features(), + features: nodes[self.node_b].node.init_features(), networks: None, remote_network_address: None, }; - nodes[self.node_a].peer_connected(node_b_id, &init_b, true).unwrap(); + nodes[self.node_a].node.peer_connected(node_b_id, &init_b, true).unwrap(); let init_a = Init { - features: nodes[self.node_a].init_features(), + features: nodes[self.node_a].node.init_features(), networks: None, remote_network_address: None, }; - nodes[self.node_b].peer_connected(node_a_id, &init_a, false).unwrap(); + nodes[self.node_b].node.peer_connected(node_a_id, &init_a, false).unwrap(); self.disconnected = false; } @@ -1594,38 +1587,45 @@ impl PeerLink { let remaining_node = if restarted_node == self.node_a { self.node_b } else { self.node_a }; let restarted_node_id = nodes[restarted_node].our_node_id(); - nodes[remaining_node].peer_disconnected(restarted_node_id); + nodes[remaining_node].node.peer_disconnected(restarted_node_id); self.disconnected = true; if remaining_node == 1 { queues.route_from_middle( - nodes[1].get_and_clear_pending_msg_events(), + nodes[1].node.get_and_clear_pending_msg_events(), Some(restarted_node), nodes, ); } else { - nodes[remaining_node].get_and_clear_pending_msg_events(); + nodes[remaining_node].node.get_and_clear_pending_msg_events(); } queues.clear_link(self); } } +#[derive(Copy, Clone)] +enum MonitorUpdateSelector { + First, + Second, + Last, +} + struct PaymentTracker { + payment_ctr: u64, pending_payments: [Vec; 3], resolved_payments: [HashMap>; 3], claimed_payment_hashes: HashSet, payment_preimages: HashMap, - payment_ctr: u64, } impl PaymentTracker { fn new() -> Self { Self { + payment_ctr: 0, pending_payments: [Vec::new(), Vec::new(), Vec::new()], resolved_payments: [new_hash_map(), new_hash_map(), new_hash_map()], claimed_payment_hashes: HashSet::new(), payment_preimages: new_hash_map(), - payment_ctr: 0, } } @@ -1641,8 +1641,8 @@ impl PaymentTracker { &mut self, nodes: &[HarnessNode<'_>; 3], source_idx: usize, dest_idx: usize, dest_chan_id: ChannelId, amt: u64, ) -> bool { - let source = &nodes[source_idx]; - let dest = &nodes[dest_idx]; + let source = &nodes[source_idx].node; + let dest = &nodes[dest_idx].node; let (secret, hash, id) = self.next_payment(dest); let succeeded = send_payment(source, dest, dest_chan_id, amt, secret, hash, id); if succeeded { @@ -1655,9 +1655,9 @@ impl PaymentTracker { &mut self, nodes: &[HarnessNode<'_>; 3], source_idx: usize, middle_idx: usize, middle_chan_id: ChannelId, dest_idx: usize, dest_chan_id: ChannelId, amt: u64, ) { - let source = &nodes[source_idx]; - let middle = &nodes[middle_idx]; - let dest = &nodes[dest_idx]; + let source = &nodes[source_idx].node; + let middle = &nodes[middle_idx].node; + let dest = &nodes[dest_idx].node; let (secret, hash, id) = self.next_payment(dest); let succeeded = send_hop_payment( source, @@ -1679,8 +1679,8 @@ impl PaymentTracker { &mut self, nodes: &[HarnessNode<'_>; 3], source_idx: usize, dest_idx: usize, dest_chan_ids: &[ChannelId], amt: u64, ) { - let source = &nodes[source_idx]; - let dest = &nodes[dest_idx]; + let source = &nodes[source_idx].node; + let dest = &nodes[dest_idx].node; let (secret, hash, id) = self.next_payment(dest); let succeeded = send_mpp_payment(source, dest, dest_chan_ids, amt, secret, hash, id); if succeeded { @@ -1692,9 +1692,9 @@ impl PaymentTracker { &mut self, nodes: &[HarnessNode<'_>; 3], source_idx: usize, middle_idx: usize, middle_chan_ids: &[ChannelId], dest_idx: usize, dest_chan_ids: &[ChannelId], amt: u64, ) { - let source = &nodes[source_idx]; - let middle = &nodes[middle_idx]; - let dest = &nodes[dest_idx]; + let source = &nodes[source_idx].node; + let middle = &nodes[middle_idx].node; + let dest = &nodes[dest_idx].node; let (secret, hash, id) = self.next_payment(dest); let succeeded = send_mpp_hop_payment( source, @@ -1714,13 +1714,13 @@ impl PaymentTracker { fn claim_payment(&mut self, node: &HarnessNode<'_>, payment_hash: PaymentHash, fail: bool) { if fail { - node.fail_htlc_backwards(&payment_hash); + node.node.fail_htlc_backwards(&payment_hash); } else { let payment_preimage = *self .payment_preimages .get(&payment_hash) .expect("PaymentClaimable for unknown payment hash"); - node.claim_funds(payment_preimage); + node.node.claim_funds(payment_preimage); self.claimed_payment_hashes.insert(payment_hash); } } @@ -1783,6 +1783,18 @@ impl PaymentTracker { } } +struct Harness<'a, Out: Output + MaybeSend + MaybeSync> { + out: Out, + chan_type: ChanType, + chain_state: ChainState, + nodes: [HarnessNode<'a>; 3], + ab_link: PeerLink, + bc_link: PeerLink, + queues: EventQueues, + payments: PaymentTracker, + read_pos: usize, +} + fn build_node_config(chan_type: ChanType) -> UserConfig { let mut config = UserConfig::default(); config.channel_config.forwarding_fee_proportional_millionths = 0; @@ -1806,9 +1818,9 @@ fn build_node_config(chan_type: ChanType) -> UserConfig { } fn assert_test_invariants(nodes: &[HarnessNode<'_>; 3]) { - assert_eq!(nodes[0].list_channels().len(), 3); - assert_eq!(nodes[1].list_channels().len(), 6); - assert_eq!(nodes[2].list_channels().len(), 3); + assert_eq!(nodes[0].node.list_channels().len(), 3); + assert_eq!(nodes[1].node.list_channels().len(), 6); + assert_eq!(nodes[2].node.list_channels().len(), 3); // All broadcasters should be empty. Broadcast transactions are handled explicitly. assert!(nodes[0].broadcaster.txn_broadcasted.borrow().is_empty()); @@ -2001,6 +2013,159 @@ fn lock_fundings(nodes: &[HarnessNode<'_>; 3]) { } } +impl<'a, Out: Output + MaybeSend + MaybeSync> Harness<'a, Out> { + fn new(data: &[u8], out: Out, router: &'a FuzzRouter) -> Self { + // Read initial monitor styles and channel type from fuzz input byte 0: + // bits 0-2: monitor styles (1 bit per node) + // bits 3-4: channel type (0=Legacy, 1=KeyedAnchors, 2=ZeroFeeCommitments) + let config_byte = if !data.is_empty() { data[0] } else { 0 }; + let chan_type = match (config_byte >> 3) & 0b11 { + 0 => ChanType::Legacy, + 1 => ChanType::KeyedAnchors, + _ => ChanType::ZeroFeeCommitments, + }; + let persistence_styles = [ + if config_byte & 0b01 != 0 { + ChannelMonitorUpdateStatus::InProgress + } else { + ChannelMonitorUpdateStatus::Completed + }, + if config_byte & 0b10 != 0 { + ChannelMonitorUpdateStatus::InProgress + } else { + ChannelMonitorUpdateStatus::Completed + }, + if config_byte & 0b100 != 0 { + ChannelMonitorUpdateStatus::InProgress + } else { + ChannelMonitorUpdateStatus::Completed + }, + ]; + + let wallet_a = TestWalletSource::new(SecretKey::from_slice(&[1; 32]).unwrap()); + let wallet_b = TestWalletSource::new(SecretKey::from_slice(&[2; 32]).unwrap()); + let wallet_c = TestWalletSource::new(SecretKey::from_slice(&[3; 32]).unwrap()); + let wallets = [&wallet_a, &wallet_b, &wallet_c]; + let coinbase_tx = bitcoin::Transaction { + version: bitcoin::transaction::Version::TWO, + lock_time: bitcoin::absolute::LockTime::ZERO, + input: vec![bitcoin::TxIn { ..Default::default() }], + output: wallets + .iter() + .map(|wallet| TxOut { + value: Amount::from_sat(100_000), + script_pubkey: wallet.get_change_script().unwrap(), + }) + .collect(), + }; + for (idx, wallet) in wallets.iter().enumerate() { + wallet.add_utxo(coinbase_tx.clone(), idx as u32); + } + + let fee_est_a = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }); + let fee_est_b = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }); + let fee_est_c = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }); + let broadcast_a = Arc::new(TestBroadcaster { txn_broadcasted: RefCell::new(Vec::new()) }); + let broadcast_b = Arc::new(TestBroadcaster { txn_broadcasted: RefCell::new(Vec::new()) }); + let broadcast_c = Arc::new(TestBroadcaster { txn_broadcasted: RefCell::new(Vec::new()) }); + + // 3 nodes is enough to hit all the possible cases, notably + // unknown-source-unknown-dest forwarding. + let mut nodes = [ + HarnessNode::new( + 0, + wallet_a, + Arc::clone(&fee_est_a), + Arc::clone(&broadcast_a), + persistence_styles[0], + &out, + router, + chan_type, + ), + HarnessNode::new( + 1, + wallet_b, + Arc::clone(&fee_est_b), + Arc::clone(&broadcast_b), + persistence_styles[1], + &out, + router, + chan_type, + ), + HarnessNode::new( + 2, + wallet_c, + Arc::clone(&fee_est_c), + Arc::clone(&broadcast_c), + persistence_styles[2], + &out, + router, + chan_type, + ), + ]; + let mut chain_state = ChainState::new(); + + // Connect peers first, then create channels. + connect_peers(&nodes[0].node, &nodes[1].node); + connect_peers(&nodes[1].node, &nodes[2].node); + + // Create 3 channels between A-B and 3 channels between B-C (6 total). + // + // Use distinct version numbers for each funding transaction so each test + // channel gets its own txid and funding outpoint. + // A-B: channel 2 A and B have 0-reserve (trusted open + trusted accept), + // channel 3 A has 0-reserve (trusted accept). + make_channel(&nodes[0], &nodes[1], 1, false, false, &mut chain_state); + make_channel(&nodes[0], &nodes[1], 2, true, true, &mut chain_state); + make_channel(&nodes[0], &nodes[1], 3, false, true, &mut chain_state); + // B-C: channel 4 B has 0-reserve (via trusted accept), + // channel 5 C has 0-reserve (via trusted open). + make_channel(&nodes[1], &nodes[2], 4, false, true, &mut chain_state); + make_channel(&nodes[1], &nodes[2], 5, true, false, &mut chain_state); + make_channel(&nodes[1], &nodes[2], 6, false, false, &mut chain_state); + + // Wipe the transactions-broadcasted set to make sure we don't broadcast + // any transactions during normal operation after setup. + nodes[0].broadcaster.txn_broadcasted.borrow_mut().clear(); + nodes[1].broadcaster.txn_broadcasted.borrow_mut().clear(); + nodes[2].broadcaster.txn_broadcasted.borrow_mut().clear(); + + // Sync all nodes to tip to lock the funding. + nodes[0].sync_with_chain_state(&chain_state, None); + nodes[1].sync_with_chain_state(&chain_state, None); + nodes[2].sync_with_chain_state(&chain_state, None); + + lock_fundings(&nodes); + + let chan_ab_ids = { + // Get channel IDs for all A-B channels (from node A's perspective). + let node_a_chans = nodes[0].node.list_usable_channels(); + [node_a_chans[0].channel_id, node_a_chans[1].channel_id, node_a_chans[2].channel_id] + }; + let chan_bc_ids = { + // Get channel IDs for all B-C channels (from node C's perspective). + let node_c_chans = nodes[2].node.list_usable_channels(); + [node_c_chans[0].channel_id, node_c_chans[1].channel_id, node_c_chans[2].channel_id] + }; + + for node in &mut nodes { + node.serialized_manager = node.node.encode(); + } + + Self { + out, + chan_type, + chain_state, + nodes, + ab_link: PeerLink::new(0, 1, chan_ab_ids), + bc_link: PeerLink::new(1, 2, chan_bc_ids), + queues: EventQueues::new(), + payments: PaymentTracker::new(), + read_pos: 1, + } + } +} + fn process_msg_events_impl( node_idx: usize, corrupt_forward: bool, limit_events: ProcessMessages, nodes: &[HarnessNode<'_>; 3], out: &Out, queues: &mut EventQueues, @@ -2035,7 +2200,7 @@ fn process_msg_events_impl( corrupt_forward: bool, ) { if !corrupt_forward { - dest.handle_update_add_htlc(source_node_id, update_add); + dest.node.handle_update_add_htlc(source_node_id, update_add); } else { // Corrupt the update_add_htlc message so that its HMAC check will fail and we // generate an update_fail_malformed_htlc instead of an update_fail_htlc as we do @@ -2043,7 +2208,7 @@ fn process_msg_events_impl( let mut msg_ser = update_add.encode(); msg_ser[1000] ^= 0xff; let new_msg = UpdateAddHTLC::read_from_fixed_length_buffer(&mut &msg_ser[..]).unwrap(); - dest.handle_update_add_htlc(source_node_id, &new_msg); + dest.node.handle_update_add_htlc(source_node_id, &new_msg); } } @@ -2073,19 +2238,19 @@ fn process_msg_events_impl( || !update_fail_malformed_htlcs.is_empty(); for update_fulfill in update_fulfill_htlcs { log_msg_delivery(node_idx, dest_idx, "update_fulfill_htlc", out); - dest.handle_update_fulfill_htlc(source_node_id, update_fulfill); + dest.node.handle_update_fulfill_htlc(source_node_id, update_fulfill); } for update_fail in update_fail_htlcs.iter() { log_msg_delivery(node_idx, dest_idx, "update_fail_htlc", out); - dest.handle_update_fail_htlc(source_node_id, update_fail); + dest.node.handle_update_fail_htlc(source_node_id, update_fail); } for update_fail_malformed in update_fail_malformed_htlcs.iter() { log_msg_delivery(node_idx, dest_idx, "update_fail_malformed_htlc", out); - dest.handle_update_fail_malformed_htlc(source_node_id, update_fail_malformed); + dest.node.handle_update_fail_malformed_htlc(source_node_id, update_fail_malformed); } if let Some(msg) = update_fee { log_msg_delivery(node_idx, dest_idx, "update_fee", out); - dest.handle_update_fee(source_node_id, &msg); + dest.node.handle_update_fee(source_node_id, &msg); } if limit_events != ProcessMessages::AllMessages && processed_change { // If we only want to process some messages, don't deliver the CS until later. @@ -2103,7 +2268,7 @@ fn process_msg_events_impl( }); } log_msg_delivery(node_idx, dest_idx, "commitment_signed", out); - dest.handle_commitment_signed_batch_test(source_node_id, &commitment_signed); + dest.node.handle_commitment_signed_batch_test(source_node_id, &commitment_signed); None } @@ -2127,78 +2292,78 @@ fn process_msg_events_impl( }, MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "revoke_and_ack"); - nodes[dest_idx].handle_revoke_and_ack(source_node_id, msg); + nodes[dest_idx].node.handle_revoke_and_ack(source_node_id, msg); None }, MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => { let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "channel_reestablish"); - nodes[dest_idx].handle_channel_reestablish(source_node_id, msg); + nodes[dest_idx].node.handle_channel_reestablish(source_node_id, msg); None }, MessageSendEvent::SendStfu { ref node_id, ref msg } => { let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "stfu"); - nodes[dest_idx].handle_stfu(source_node_id, msg); + nodes[dest_idx].node.handle_stfu(source_node_id, msg); None }, MessageSendEvent::SendTxAddInput { ref node_id, ref msg } => { let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "tx_add_input"); - nodes[dest_idx].handle_tx_add_input(source_node_id, msg); + nodes[dest_idx].node.handle_tx_add_input(source_node_id, msg); None }, MessageSendEvent::SendTxAddOutput { ref node_id, ref msg } => { let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "tx_add_output"); - nodes[dest_idx].handle_tx_add_output(source_node_id, msg); + nodes[dest_idx].node.handle_tx_add_output(source_node_id, msg); None }, MessageSendEvent::SendTxRemoveInput { ref node_id, ref msg } => { let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "tx_remove_input"); - nodes[dest_idx].handle_tx_remove_input(source_node_id, msg); + nodes[dest_idx].node.handle_tx_remove_input(source_node_id, msg); None }, MessageSendEvent::SendTxRemoveOutput { ref node_id, ref msg } => { let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "tx_remove_output"); - nodes[dest_idx].handle_tx_remove_output(source_node_id, msg); + nodes[dest_idx].node.handle_tx_remove_output(source_node_id, msg); None }, MessageSendEvent::SendTxComplete { ref node_id, ref msg } => { let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "tx_complete"); - nodes[dest_idx].handle_tx_complete(source_node_id, msg); + nodes[dest_idx].node.handle_tx_complete(source_node_id, msg); None }, MessageSendEvent::SendTxAbort { ref node_id, ref msg } => { let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "tx_abort"); - nodes[dest_idx].handle_tx_abort(source_node_id, msg); + nodes[dest_idx].node.handle_tx_abort(source_node_id, msg); None }, MessageSendEvent::SendTxInitRbf { ref node_id, ref msg } => { let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "tx_init_rbf"); - nodes[dest_idx].handle_tx_init_rbf(source_node_id, msg); + nodes[dest_idx].node.handle_tx_init_rbf(source_node_id, msg); None }, MessageSendEvent::SendTxAckRbf { ref node_id, ref msg } => { let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "tx_ack_rbf"); - nodes[dest_idx].handle_tx_ack_rbf(source_node_id, msg); + nodes[dest_idx].node.handle_tx_ack_rbf(source_node_id, msg); None }, MessageSendEvent::SendTxSignatures { ref node_id, ref msg } => { let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "tx_signatures"); - nodes[dest_idx].handle_tx_signatures(source_node_id, msg); + nodes[dest_idx].node.handle_tx_signatures(source_node_id, msg); None }, MessageSendEvent::SendSpliceInit { ref node_id, ref msg } => { let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "splice_init"); - nodes[dest_idx].handle_splice_init(source_node_id, msg); + nodes[dest_idx].node.handle_splice_init(source_node_id, msg); None }, MessageSendEvent::SendSpliceAck { ref node_id, ref msg } => { let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "splice_ack"); - nodes[dest_idx].handle_splice_ack(source_node_id, msg); + nodes[dest_idx].node.handle_splice_ack(source_node_id, msg); None }, MessageSendEvent::SendSpliceLocked { ref node_id, ref msg } => { let dest_idx = log_peer_message(node_idx, node_id, nodes, out, "splice_locked"); - nodes[dest_idx].handle_splice_locked(source_node_id, msg); + nodes[dest_idx].node.handle_splice_locked(source_node_id, msg); None }, MessageSendEvent::HandleError { ref action, .. } => { @@ -2223,7 +2388,7 @@ fn process_msg_events_impl( let mut events = queues.take_for_node(node_idx); let mut new_events = Vec::new(); if limit_events != ProcessMessages::OnePendingMessage { - new_events = nodes[node_idx].get_and_clear_pending_msg_events(); + new_events = nodes[node_idx].node.get_and_clear_pending_msg_events(); } let mut had_events = false; let source_node_id = nodes[node_idx].our_node_id(); @@ -2268,7 +2433,7 @@ fn process_events_impl( // Multiple HTLCs can resolve for the same payment hash, so deduplicate // claim/fail handling per event batch. let mut claim_set = new_hash_map(); - let mut events = nodes[node_idx].get_and_clear_pending_events(); + let mut events = nodes[node_idx].node.get_and_clear_pending_events(); let had_events = !events.is_empty(); for event in events.drain(..) { match event { @@ -2304,6 +2469,7 @@ fn process_events_impl( } => { let signed_tx = nodes[node_idx].wallet.sign_tx(unsigned_transaction).unwrap(); nodes[node_idx] + .node .funding_transaction_signed(&channel_id, &counterparty_node_id, signed_tx) .unwrap(); }, @@ -2323,8 +2489,8 @@ fn process_events_impl( _ => panic!("Unhandled event"), } } - while nodes[node_idx].needs_pending_htlc_processing() { - nodes[node_idx].process_pending_htlc_forwards(); + while nodes[node_idx].node.needs_pending_htlc_processing() { + nodes[node_idx].node.process_pending_htlc_forwards(); } had_events } @@ -2383,150 +2549,19 @@ fn process_all_events_impl( #[inline] pub fn do_test(data: &[u8], out: Out) { let router = FuzzRouter {}; - - // Read initial monitor styles and channel type from fuzz input byte 0: - // bits 0-2: monitor styles (1 bit per node) - // bits 3-4: channel type (0=Legacy, 1=KeyedAnchors, 2=ZeroFeeCommitments) - let config_byte = if !data.is_empty() { data[0] } else { 0 }; - let chan_type = match (config_byte >> 3) & 0b11 { - 0 => ChanType::Legacy, - 1 => ChanType::KeyedAnchors, - _ => ChanType::ZeroFeeCommitments, - }; - let persistence_styles = [ - if config_byte & 0b01 != 0 { - ChannelMonitorUpdateStatus::InProgress - } else { - ChannelMonitorUpdateStatus::Completed - }, - if config_byte & 0b10 != 0 { - ChannelMonitorUpdateStatus::InProgress - } else { - ChannelMonitorUpdateStatus::Completed - }, - if config_byte & 0b100 != 0 { - ChannelMonitorUpdateStatus::InProgress - } else { - ChannelMonitorUpdateStatus::Completed - }, - ]; - - let mut chain_state = ChainState::new(); - let wallet_a = TestWalletSource::new(SecretKey::from_slice(&[1; 32]).unwrap()); - let wallet_b = TestWalletSource::new(SecretKey::from_slice(&[2; 32]).unwrap()); - let wallet_c = TestWalletSource::new(SecretKey::from_slice(&[3; 32]).unwrap()); - - let wallets = [&wallet_a, &wallet_b, &wallet_c]; - let coinbase_tx = bitcoin::Transaction { - version: bitcoin::transaction::Version::TWO, - lock_time: bitcoin::absolute::LockTime::ZERO, - input: vec![bitcoin::TxIn { ..Default::default() }], - output: wallets - .iter() - .map(|wallet| TxOut { - value: Amount::from_sat(100_000), - script_pubkey: wallet.get_change_script().unwrap(), - }) - .collect(), - }; - for (idx, wallet) in wallets.iter().enumerate() { - wallet.add_utxo(coinbase_tx.clone(), idx as u32); - } - - let fee_est_a = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }); - let fee_est_b = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }); - let fee_est_c = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }); - let broadcast_a = Arc::new(TestBroadcaster { txn_broadcasted: RefCell::new(Vec::new()) }); - let broadcast_b = Arc::new(TestBroadcaster { txn_broadcasted: RefCell::new(Vec::new()) }); - let broadcast_c = Arc::new(TestBroadcaster { txn_broadcasted: RefCell::new(Vec::new()) }); - - // 3 nodes is enough to hit all the possible cases, notably unknown-source-unknown-dest - // forwarding. - let mut nodes = [ - HarnessNode::new( - 0, - wallet_a, - Arc::clone(&fee_est_a), - Arc::clone(&broadcast_a), - persistence_styles[0], - &out, - &router, - chan_type, - ), - HarnessNode::new( - 1, - wallet_b, - Arc::clone(&fee_est_b), - Arc::clone(&broadcast_b), - persistence_styles[1], - &out, - &router, - chan_type, - ), - HarnessNode::new( - 2, - wallet_c, - Arc::clone(&fee_est_c), - Arc::clone(&broadcast_c), - persistence_styles[2], - &out, - &router, - chan_type, - ), - ]; - - // Connect peers first, then create channels - connect_peers(&nodes[0], &nodes[1]); - connect_peers(&nodes[1], &nodes[2]); - - // Create 3 channels between A-B and 3 channels between B-C (6 total). - // - // Use distinct version numbers for each funding transaction so each test channel gets its own - // txid and funding outpoint. - // A-B: channel 2 A and B have 0-reserve (trusted open + trusted accept), - // channel 3 A has 0-reserve (trusted accept) - make_channel(&nodes[0], &nodes[1], 1, false, false, &mut chain_state); - make_channel(&nodes[0], &nodes[1], 2, true, true, &mut chain_state); - make_channel(&nodes[0], &nodes[1], 3, false, true, &mut chain_state); - // B-C: channel 4 B has 0-reserve (via trusted accept), - // channel 5 C has 0-reserve (via trusted open) - make_channel(&nodes[1], &nodes[2], 4, false, true, &mut chain_state); - make_channel(&nodes[1], &nodes[2], 5, true, false, &mut chain_state); - make_channel(&nodes[1], &nodes[2], 6, false, false, &mut chain_state); - - // Wipe the transactions-broadcasted set to make sure we don't broadcast any transactions - // during normal operation in `test_return`. - nodes[0].broadcaster.txn_broadcasted.borrow_mut().clear(); - nodes[1].broadcaster.txn_broadcasted.borrow_mut().clear(); - nodes[2].broadcaster.txn_broadcasted.borrow_mut().clear(); - - // Sync all nodes to tip to lock the funding. - nodes[0].sync_with_chain_state(&chain_state, None); - nodes[1].sync_with_chain_state(&chain_state, None); - nodes[2].sync_with_chain_state(&chain_state, None); - - lock_fundings(&nodes); - - // Get channel IDs for all A-B channels (from node A's perspective) - let chan_ab_ids = { - let node_a_chans = nodes[0].list_usable_channels(); - [node_a_chans[0].channel_id, node_a_chans[1].channel_id, node_a_chans[2].channel_id] - }; - // Get channel IDs for all B-C channels (from node C's perspective) - let chan_bc_ids = { - let node_c_chans = nodes[2].list_usable_channels(); - [node_c_chans[0].channel_id, node_c_chans[1].channel_id, node_c_chans[2].channel_id] - }; - let mut ab_link = PeerLink::new(0, 1, chan_ab_ids); - let mut bc_link = PeerLink::new(1, 2, chan_bc_ids); + let Harness { + out, + chan_type, + mut chain_state, + mut nodes, + mut ab_link, + mut bc_link, + mut queues, + mut payments, + mut read_pos, + } = Harness::new(data, out, &router); let chan_a_id = ab_link.first_channel_id(); let chan_b_id = bc_link.first_channel_id(); - let mut queues = EventQueues::new(); - let mut payments = PaymentTracker::new(); - - for node in &mut nodes { - node.serialized_manager = node.encode(); - } macro_rules! test_return { () => {{ @@ -2535,18 +2570,6 @@ pub fn do_test(data: &[u8], out: Out) { }}; } - let mut read_pos = 1; // First byte was consumed for initial config (persistence styles + chan_type) - macro_rules! get_slice { - ($len: expr) => {{ - let slice_len = $len as usize; - if data.len() < read_pos + slice_len { - test_return!(); - } - read_pos += slice_len; - &data[read_pos - slice_len..read_pos] - }}; - } - loop { macro_rules! process_msg_events { ($node: expr, $corrupt_forward: expr, $limit_events: expr) => {{ @@ -2579,7 +2602,11 @@ pub fn do_test(data: &[u8], out: Out) { }}; } - let v = get_slice!(1)[0]; + if data.len() < read_pos + 1 { + test_return!(); + } + let v = data[read_pos]; + read_pos += 1; out.locked_write(format!("READ A BYTE! HANDLING INPUT {:x}...........\n", v).as_bytes()); match v { // In general, we keep related message groups close together in binary form, allowing @@ -2862,59 +2889,67 @@ pub fn do_test(data: &[u8], out: Out) { 0xa0 => { if !cfg!(splicing) { - test_return!(); + assert_test_invariants(&nodes); + return; } - let cp_node_id = nodes[1].get_our_node_id(); + let cp_node_id = nodes[1].our_node_id(); nodes[0].splice_in(&cp_node_id, &chan_a_id); }, 0xa1 => { if !cfg!(splicing) { - test_return!(); + assert_test_invariants(&nodes); + return; } - let cp_node_id = nodes[0].get_our_node_id(); + let cp_node_id = nodes[0].our_node_id(); nodes[1].splice_in(&cp_node_id, &chan_a_id); }, 0xa2 => { if !cfg!(splicing) { - test_return!(); + assert_test_invariants(&nodes); + return; } - let cp_node_id = nodes[2].get_our_node_id(); + let cp_node_id = nodes[2].our_node_id(); nodes[1].splice_in(&cp_node_id, &chan_b_id); }, 0xa3 => { if !cfg!(splicing) { - test_return!(); + assert_test_invariants(&nodes); + return; } - let cp_node_id = nodes[1].get_our_node_id(); + let cp_node_id = nodes[1].our_node_id(); nodes[2].splice_in(&cp_node_id, &chan_b_id); }, 0xa4 => { if !cfg!(splicing) { - test_return!(); + assert_test_invariants(&nodes); + return; } - let cp_node_id = nodes[1].get_our_node_id(); + let cp_node_id = nodes[1].our_node_id(); nodes[0].splice_out(&cp_node_id, &chan_a_id); }, 0xa5 => { if !cfg!(splicing) { - test_return!(); + assert_test_invariants(&nodes); + return; } - let cp_node_id = nodes[0].get_our_node_id(); + let cp_node_id = nodes[0].our_node_id(); nodes[1].splice_out(&cp_node_id, &chan_a_id); }, 0xa6 => { if !cfg!(splicing) { - test_return!(); + assert_test_invariants(&nodes); + return; } - let cp_node_id = nodes[2].get_our_node_id(); + let cp_node_id = nodes[2].our_node_id(); nodes[1].splice_out(&cp_node_id, &chan_b_id); }, 0xa7 => { if !cfg!(splicing) { - test_return!(); + assert_test_invariants(&nodes); + return; } - let cp_node_id = nodes[1].get_our_node_id(); + let cp_node_id = nodes[1].our_node_id(); nodes[2].splice_out(&cp_node_id, &chan_b_id); }, @@ -2946,15 +2981,21 @@ pub fn do_test(data: &[u8], out: Out) { }, 0xb0 | 0xb1 | 0xb2 => { + // Restart node A, picking among the in-flight `ChannelMonitor`s to use based on + // the value of `v` we're matching. ab_link.disconnect_for_reload(0, &mut nodes, &mut queues); nodes[0].reload(v, &out, &router, chan_type); }, 0xb3..=0xbb => { + // Restart node B, picking among the in-flight `ChannelMonitor`s to use based on + // the value of `v` we're matching. ab_link.disconnect_for_reload(1, &mut nodes, &mut queues); bc_link.disconnect_for_reload(1, &mut nodes, &mut queues); nodes[1].reload(v, &out, &router, chan_type); }, 0xbc | 0xbd | 0xbe => { + // Restart node C, picking among the in-flight `ChannelMonitor`s to use based on + // the value of `v` we're matching. bc_link.disconnect_for_reload(2, &mut nodes, &mut queues); nodes[2].reload(v, &out, &router, chan_type); }, @@ -2966,103 +3007,103 @@ pub fn do_test(data: &[u8], out: Out) { nodes[0] .keys_manager .enable_op_for_all_signers(SignerOp::SignCounterpartyCommitment); - nodes[0].signer_unblocked(None); + nodes[0].node.signer_unblocked(None); }, 0xc4 => { nodes[1] .keys_manager .enable_op_for_all_signers(SignerOp::SignCounterpartyCommitment); - let filter = Some((nodes[0].get_our_node_id(), chan_a_id)); - nodes[1].signer_unblocked(filter); + let filter = Some((nodes[0].our_node_id(), chan_a_id)); + nodes[1].node.signer_unblocked(filter); }, 0xc5 => { nodes[1] .keys_manager .enable_op_for_all_signers(SignerOp::SignCounterpartyCommitment); - let filter = Some((nodes[2].get_our_node_id(), chan_b_id)); - nodes[1].signer_unblocked(filter); + let filter = Some((nodes[2].our_node_id(), chan_b_id)); + nodes[1].node.signer_unblocked(filter); }, 0xc6 => { nodes[2] .keys_manager .enable_op_for_all_signers(SignerOp::SignCounterpartyCommitment); - nodes[2].signer_unblocked(None); + nodes[2].node.signer_unblocked(None); }, 0xc7 => { nodes[0].keys_manager.enable_op_for_all_signers(SignerOp::GetPerCommitmentPoint); - nodes[0].signer_unblocked(None); + nodes[0].node.signer_unblocked(None); }, 0xc8 => { nodes[1].keys_manager.enable_op_for_all_signers(SignerOp::GetPerCommitmentPoint); - let filter = Some((nodes[0].get_our_node_id(), chan_a_id)); - nodes[1].signer_unblocked(filter); + let filter = Some((nodes[0].our_node_id(), chan_a_id)); + nodes[1].node.signer_unblocked(filter); }, 0xc9 => { nodes[1].keys_manager.enable_op_for_all_signers(SignerOp::GetPerCommitmentPoint); - let filter = Some((nodes[2].get_our_node_id(), chan_b_id)); - nodes[1].signer_unblocked(filter); + let filter = Some((nodes[2].our_node_id(), chan_b_id)); + nodes[1].node.signer_unblocked(filter); }, 0xca => { nodes[2].keys_manager.enable_op_for_all_signers(SignerOp::GetPerCommitmentPoint); - nodes[2].signer_unblocked(None); + nodes[2].node.signer_unblocked(None); }, 0xcb => { nodes[0].keys_manager.enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); - nodes[0].signer_unblocked(None); + nodes[0].node.signer_unblocked(None); }, 0xcc => { nodes[1].keys_manager.enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); - let filter = Some((nodes[0].get_our_node_id(), chan_a_id)); - nodes[1].signer_unblocked(filter); + let filter = Some((nodes[0].our_node_id(), chan_a_id)); + nodes[1].node.signer_unblocked(filter); }, 0xcd => { nodes[1].keys_manager.enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); - let filter = Some((nodes[2].get_our_node_id(), chan_b_id)); - nodes[1].signer_unblocked(filter); + let filter = Some((nodes[2].our_node_id(), chan_b_id)); + nodes[1].node.signer_unblocked(filter); }, 0xce => { nodes[2].keys_manager.enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); - nodes[2].signer_unblocked(None); + nodes[2].node.signer_unblocked(None); }, 0xf0 => { - ab_link.complete_monitor_updates_for_node(0, &nodes, MonitorUpdateSelector::First) + ab_link.complete_monitor_updates_for_node(0, &nodes, MonitorUpdateSelector::First); }, 0xf1 => { - ab_link.complete_monitor_updates_for_node(0, &nodes, MonitorUpdateSelector::Second) + ab_link.complete_monitor_updates_for_node(0, &nodes, MonitorUpdateSelector::Second); }, 0xf2 => { - ab_link.complete_monitor_updates_for_node(0, &nodes, MonitorUpdateSelector::Last) + ab_link.complete_monitor_updates_for_node(0, &nodes, MonitorUpdateSelector::Last); }, 0xf4 => { - ab_link.complete_monitor_updates_for_node(1, &nodes, MonitorUpdateSelector::First) + ab_link.complete_monitor_updates_for_node(1, &nodes, MonitorUpdateSelector::First); }, 0xf5 => { - ab_link.complete_monitor_updates_for_node(1, &nodes, MonitorUpdateSelector::Second) + ab_link.complete_monitor_updates_for_node(1, &nodes, MonitorUpdateSelector::Second); }, 0xf6 => { - ab_link.complete_monitor_updates_for_node(1, &nodes, MonitorUpdateSelector::Last) + ab_link.complete_monitor_updates_for_node(1, &nodes, MonitorUpdateSelector::Last); }, 0xf8 => { - bc_link.complete_monitor_updates_for_node(1, &nodes, MonitorUpdateSelector::First) + bc_link.complete_monitor_updates_for_node(1, &nodes, MonitorUpdateSelector::First); }, 0xf9 => { - bc_link.complete_monitor_updates_for_node(1, &nodes, MonitorUpdateSelector::Second) + bc_link.complete_monitor_updates_for_node(1, &nodes, MonitorUpdateSelector::Second); }, 0xfa => { - bc_link.complete_monitor_updates_for_node(1, &nodes, MonitorUpdateSelector::Last) + bc_link.complete_monitor_updates_for_node(1, &nodes, MonitorUpdateSelector::Last); }, 0xfc => { - bc_link.complete_monitor_updates_for_node(2, &nodes, MonitorUpdateSelector::First) + bc_link.complete_monitor_updates_for_node(2, &nodes, MonitorUpdateSelector::First); }, 0xfd => { - bc_link.complete_monitor_updates_for_node(2, &nodes, MonitorUpdateSelector::Second) + bc_link.complete_monitor_updates_for_node(2, &nodes, MonitorUpdateSelector::Second); }, 0xfe => { - bc_link.complete_monitor_updates_for_node(2, &nodes, MonitorUpdateSelector::Last) + bc_link.complete_monitor_updates_for_node(2, &nodes, MonitorUpdateSelector::Last); }, 0xff => { @@ -3077,9 +3118,9 @@ pub fn do_test(data: &[u8], out: Out) { nodes[1].keys_manager.enable_op_for_all_signers(op); nodes[2].keys_manager.enable_op_for_all_signers(op); } - nodes[0].signer_unblocked(None); - nodes[1].signer_unblocked(None); - nodes[2].signer_unblocked(None); + nodes[0].node.signer_unblocked(None); + nodes[1].node.signer_unblocked(None); + nodes[2].node.signer_unblocked(None); process_all_events_impl( &nodes, @@ -3095,7 +3136,7 @@ pub fn do_test(data: &[u8], out: Out) { // channels to see if we have any committed HTLC parts of an MPP payment that need // to be failed back. for node in &nodes { - node.timer_tick_occurred(); + node.node.timer_tick_occurred(); } process_all_events_impl( &nodes, From b3752745c88242f563bb879face8320b99550d17 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 21 Apr 2026 17:03:43 +0200 Subject: [PATCH 14/15] Wrap chanmon consistency state in Harness Wrap the chanmon consistency state in a `Harness` struct. The fuzz loop now accesses nodes, links, queues, payments, and chain state through one owner while keeping the existing byte actions intact. --- fuzz/src/chanmon_consistency.rs | 594 +++++++++++++++++++------------- 1 file changed, 357 insertions(+), 237 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index d5e547ae185..553c3ec3aaa 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -1783,7 +1783,8 @@ impl PaymentTracker { } } -struct Harness<'a, Out: Output + MaybeSend + MaybeSync> { +struct Harness<'a, 'd, Out: Output + MaybeSend + MaybeSync> { + data: &'d [u8], out: Out, chan_type: ChanType, chain_state: ChainState, @@ -2013,8 +2014,8 @@ fn lock_fundings(nodes: &[HarnessNode<'_>; 3]) { } } -impl<'a, Out: Output + MaybeSend + MaybeSync> Harness<'a, Out> { - fn new(data: &[u8], out: Out, router: &'a FuzzRouter) -> Self { +impl<'a, 'd, Out: Output + MaybeSend + MaybeSync> Harness<'a, 'd, Out> { + fn new(data: &'d [u8], out: Out, router: &'a FuzzRouter) -> Self { // Read initial monitor styles and channel type from fuzz input byte 0: // bits 0-2: monitor styles (1 bit per node) // bits 3-4: channel type (0=Legacy, 1=KeyedAnchors, 2=ZeroFeeCommitments) @@ -2153,6 +2154,7 @@ impl<'a, Out: Output + MaybeSend + MaybeSync> Harness<'a, Out> { } Self { + data, out, chan_type, chain_state, @@ -2164,6 +2166,34 @@ impl<'a, Out: Output + MaybeSend + MaybeSync> Harness<'a, Out> { read_pos: 1, } } + + fn chan_a_id(&self) -> ChannelId { + self.ab_link.first_channel_id() + } + + fn chan_b_id(&self) -> ChannelId { + self.bc_link.first_channel_id() + } + + fn next_input_byte(&mut self) -> Option { + if self.data.len() < self.read_pos + 1 { + None + } else { + let value = self.data[self.read_pos]; + self.read_pos += 1; + Some(value) + } + } + + fn finish(&self) { + assert_test_invariants(&self.nodes); + } + + fn refresh_serialized_managers(&mut self) { + for node in &mut self.nodes { + node.refresh_serialized_manager(); + } + } } fn process_msg_events_impl( @@ -2549,23 +2579,13 @@ fn process_all_events_impl( #[inline] pub fn do_test(data: &[u8], out: Out) { let router = FuzzRouter {}; - let Harness { - out, - chan_type, - mut chain_state, - mut nodes, - mut ab_link, - mut bc_link, - mut queues, - mut payments, - mut read_pos, - } = Harness::new(data, out, &router); - let chan_a_id = ab_link.first_channel_id(); - let chan_b_id = bc_link.first_channel_id(); + let mut harness = Harness::new(data, out, &router); + let chan_a_id = harness.chan_a_id(); + let chan_b_id = harness.chan_b_id(); macro_rules! test_return { () => {{ - assert_test_invariants(&nodes); + harness.finish(); return; }}; } @@ -2577,9 +2597,9 @@ pub fn do_test(data: &[u8], out: Out) { $node, $corrupt_forward, $limit_events, - &nodes, - &out, - &mut queues, + &harness.nodes, + &harness.out, + &mut harness.queues, ) }}; } @@ -2592,7 +2612,13 @@ pub fn do_test(data: &[u8], out: Out) { macro_rules! process_events { ($node: expr, $fail: expr) => {{ - process_events_impl($node, $fail, &nodes, &mut chain_state, &mut payments) + process_events_impl( + $node, + $fail, + &harness.nodes, + &mut harness.chain_state, + &mut harness.payments, + ) }}; } @@ -2602,48 +2628,46 @@ pub fn do_test(data: &[u8], out: Out) { }}; } - if data.len() < read_pos + 1 { - test_return!(); - } - let v = data[read_pos]; - read_pos += 1; - out.locked_write(format!("READ A BYTE! HANDLING INPUT {:x}...........\n", v).as_bytes()); + let v = if let Some(value) = harness.next_input_byte() { value } else { test_return!() }; + harness + .out + .locked_write(format!("READ A BYTE! HANDLING INPUT {:x}...........\n", v).as_bytes()); match v { // In general, we keep related message groups close together in binary form, allowing // bit-twiddling mutations to have similar effects. This is probably overkill, but no // harm in doing so. - 0x00 => nodes[0].set_persistence_style(ChannelMonitorUpdateStatus::InProgress), - 0x01 => nodes[1].set_persistence_style(ChannelMonitorUpdateStatus::InProgress), - 0x02 => nodes[2].set_persistence_style(ChannelMonitorUpdateStatus::InProgress), - 0x04 => nodes[0].set_persistence_style(ChannelMonitorUpdateStatus::Completed), - 0x05 => nodes[1].set_persistence_style(ChannelMonitorUpdateStatus::Completed), - 0x06 => nodes[2].set_persistence_style(ChannelMonitorUpdateStatus::Completed), + 0x00 => harness.nodes[0].set_persistence_style(ChannelMonitorUpdateStatus::InProgress), + 0x01 => harness.nodes[1].set_persistence_style(ChannelMonitorUpdateStatus::InProgress), + 0x02 => harness.nodes[2].set_persistence_style(ChannelMonitorUpdateStatus::InProgress), + 0x04 => harness.nodes[0].set_persistence_style(ChannelMonitorUpdateStatus::Completed), + 0x05 => harness.nodes[1].set_persistence_style(ChannelMonitorUpdateStatus::Completed), + 0x06 => harness.nodes[2].set_persistence_style(ChannelMonitorUpdateStatus::Completed), 0x08 => { - for id in ab_link.channel_ids() { - nodes[0].complete_all_monitor_updates(id); + for id in harness.ab_link.channel_ids() { + harness.nodes[0].complete_all_monitor_updates(id); } }, 0x09 => { - for id in ab_link.channel_ids() { - nodes[1].complete_all_monitor_updates(id); + for id in harness.ab_link.channel_ids() { + harness.nodes[1].complete_all_monitor_updates(id); } }, 0x0a => { - for id in bc_link.channel_ids() { - nodes[1].complete_all_monitor_updates(id); + for id in harness.bc_link.channel_ids() { + harness.nodes[1].complete_all_monitor_updates(id); } }, 0x0b => { - for id in bc_link.channel_ids() { - nodes[2].complete_all_monitor_updates(id); + for id in harness.bc_link.channel_ids() { + harness.nodes[2].complete_all_monitor_updates(id); } }, - 0x0c => ab_link.disconnect(&mut nodes, &mut queues), - 0x0d => bc_link.disconnect(&mut nodes, &mut queues), - 0x0e => ab_link.reconnect(&mut nodes), - 0x0f => bc_link.reconnect(&mut nodes), + 0x0c => harness.ab_link.disconnect(&mut harness.nodes, &mut harness.queues), + 0x0d => harness.bc_link.disconnect(&mut harness.nodes, &mut harness.queues), + 0x0e => harness.ab_link.reconnect(&mut harness.nodes), + 0x0f => harness.bc_link.reconnect(&mut harness.nodes), 0x10 => process_msg_noret!(0, true, ProcessMessages::AllMessages), 0x11 => process_msg_noret!(0, false, ProcessMessages::AllMessages), @@ -2677,169 +2701,191 @@ pub fn do_test(data: &[u8], out: Out) { // 1/10th the channel size: 0x30 => { - payments.send_direct(&nodes, 0, 1, chan_a_id, 10_000_000); + harness.payments.send_direct(&harness.nodes, 0, 1, chan_a_id, 10_000_000); }, 0x31 => { - payments.send_direct(&nodes, 1, 0, chan_a_id, 10_000_000); + harness.payments.send_direct(&harness.nodes, 1, 0, chan_a_id, 10_000_000); }, 0x32 => { - payments.send_direct(&nodes, 1, 2, chan_b_id, 10_000_000); + harness.payments.send_direct(&harness.nodes, 1, 2, chan_b_id, 10_000_000); }, 0x33 => { - payments.send_direct(&nodes, 2, 1, chan_b_id, 10_000_000); + harness.payments.send_direct(&harness.nodes, 2, 1, chan_b_id, 10_000_000); }, 0x34 => { - payments.send_hop(&nodes, 0, 1, chan_a_id, 2, chan_b_id, 10_000_000); + harness.payments.send_hop( + &harness.nodes, + 0, + 1, + chan_a_id, + 2, + chan_b_id, + 10_000_000, + ); }, 0x35 => { - payments.send_hop(&nodes, 2, 1, chan_b_id, 0, chan_a_id, 10_000_000); + harness.payments.send_hop( + &harness.nodes, + 2, + 1, + chan_b_id, + 0, + chan_a_id, + 10_000_000, + ); }, 0x38 => { - payments.send_direct(&nodes, 0, 1, chan_a_id, 1_000_000); + harness.payments.send_direct(&harness.nodes, 0, 1, chan_a_id, 1_000_000); }, 0x39 => { - payments.send_direct(&nodes, 1, 0, chan_a_id, 1_000_000); + harness.payments.send_direct(&harness.nodes, 1, 0, chan_a_id, 1_000_000); }, 0x3a => { - payments.send_direct(&nodes, 1, 2, chan_b_id, 1_000_000); + harness.payments.send_direct(&harness.nodes, 1, 2, chan_b_id, 1_000_000); }, 0x3b => { - payments.send_direct(&nodes, 2, 1, chan_b_id, 1_000_000); + harness.payments.send_direct(&harness.nodes, 2, 1, chan_b_id, 1_000_000); }, 0x3c => { - payments.send_hop(&nodes, 0, 1, chan_a_id, 2, chan_b_id, 1_000_000); + harness.payments.send_hop(&harness.nodes, 0, 1, chan_a_id, 2, chan_b_id, 1_000_000); }, 0x3d => { - payments.send_hop(&nodes, 2, 1, chan_b_id, 0, chan_a_id, 1_000_000); + harness.payments.send_hop(&harness.nodes, 2, 1, chan_b_id, 0, chan_a_id, 1_000_000); }, 0x40 => { - payments.send_direct(&nodes, 0, 1, chan_a_id, 100_000); + harness.payments.send_direct(&harness.nodes, 0, 1, chan_a_id, 100_000); }, 0x41 => { - payments.send_direct(&nodes, 1, 0, chan_a_id, 100_000); + harness.payments.send_direct(&harness.nodes, 1, 0, chan_a_id, 100_000); }, 0x42 => { - payments.send_direct(&nodes, 1, 2, chan_b_id, 100_000); + harness.payments.send_direct(&harness.nodes, 1, 2, chan_b_id, 100_000); }, 0x43 => { - payments.send_direct(&nodes, 2, 1, chan_b_id, 100_000); + harness.payments.send_direct(&harness.nodes, 2, 1, chan_b_id, 100_000); }, 0x44 => { - payments.send_hop(&nodes, 0, 1, chan_a_id, 2, chan_b_id, 100_000); + harness.payments.send_hop(&harness.nodes, 0, 1, chan_a_id, 2, chan_b_id, 100_000); }, 0x45 => { - payments.send_hop(&nodes, 2, 1, chan_b_id, 0, chan_a_id, 100_000); + harness.payments.send_hop(&harness.nodes, 2, 1, chan_b_id, 0, chan_a_id, 100_000); }, 0x48 => { - payments.send_direct(&nodes, 0, 1, chan_a_id, 10_000); + harness.payments.send_direct(&harness.nodes, 0, 1, chan_a_id, 10_000); }, 0x49 => { - payments.send_direct(&nodes, 1, 0, chan_a_id, 10_000); + harness.payments.send_direct(&harness.nodes, 1, 0, chan_a_id, 10_000); }, 0x4a => { - payments.send_direct(&nodes, 1, 2, chan_b_id, 10_000); + harness.payments.send_direct(&harness.nodes, 1, 2, chan_b_id, 10_000); }, 0x4b => { - payments.send_direct(&nodes, 2, 1, chan_b_id, 10_000); + harness.payments.send_direct(&harness.nodes, 2, 1, chan_b_id, 10_000); }, 0x4c => { - payments.send_hop(&nodes, 0, 1, chan_a_id, 2, chan_b_id, 10_000); + harness.payments.send_hop(&harness.nodes, 0, 1, chan_a_id, 2, chan_b_id, 10_000); }, 0x4d => { - payments.send_hop(&nodes, 2, 1, chan_b_id, 0, chan_a_id, 10_000); + harness.payments.send_hop(&harness.nodes, 2, 1, chan_b_id, 0, chan_a_id, 10_000); }, 0x50 => { - payments.send_direct(&nodes, 0, 1, chan_a_id, 1_000); + harness.payments.send_direct(&harness.nodes, 0, 1, chan_a_id, 1_000); }, 0x51 => { - payments.send_direct(&nodes, 1, 0, chan_a_id, 1_000); + harness.payments.send_direct(&harness.nodes, 1, 0, chan_a_id, 1_000); }, 0x52 => { - payments.send_direct(&nodes, 1, 2, chan_b_id, 1_000); + harness.payments.send_direct(&harness.nodes, 1, 2, chan_b_id, 1_000); }, 0x53 => { - payments.send_direct(&nodes, 2, 1, chan_b_id, 1_000); + harness.payments.send_direct(&harness.nodes, 2, 1, chan_b_id, 1_000); }, 0x54 => { - payments.send_hop(&nodes, 0, 1, chan_a_id, 2, chan_b_id, 1_000); + harness.payments.send_hop(&harness.nodes, 0, 1, chan_a_id, 2, chan_b_id, 1_000); }, 0x55 => { - payments.send_hop(&nodes, 2, 1, chan_b_id, 0, chan_a_id, 1_000); + harness.payments.send_hop(&harness.nodes, 2, 1, chan_b_id, 0, chan_a_id, 1_000); }, 0x58 => { - payments.send_direct(&nodes, 0, 1, chan_a_id, 100); + harness.payments.send_direct(&harness.nodes, 0, 1, chan_a_id, 100); }, 0x59 => { - payments.send_direct(&nodes, 1, 0, chan_a_id, 100); + harness.payments.send_direct(&harness.nodes, 1, 0, chan_a_id, 100); }, 0x5a => { - payments.send_direct(&nodes, 1, 2, chan_b_id, 100); + harness.payments.send_direct(&harness.nodes, 1, 2, chan_b_id, 100); }, 0x5b => { - payments.send_direct(&nodes, 2, 1, chan_b_id, 100); + harness.payments.send_direct(&harness.nodes, 2, 1, chan_b_id, 100); }, 0x5c => { - payments.send_hop(&nodes, 0, 1, chan_a_id, 2, chan_b_id, 100); + harness.payments.send_hop(&harness.nodes, 0, 1, chan_a_id, 2, chan_b_id, 100); }, 0x5d => { - payments.send_hop(&nodes, 2, 1, chan_b_id, 0, chan_a_id, 100); + harness.payments.send_hop(&harness.nodes, 2, 1, chan_b_id, 0, chan_a_id, 100); }, 0x60 => { - payments.send_direct(&nodes, 0, 1, chan_a_id, 10); + harness.payments.send_direct(&harness.nodes, 0, 1, chan_a_id, 10); }, 0x61 => { - payments.send_direct(&nodes, 1, 0, chan_a_id, 10); + harness.payments.send_direct(&harness.nodes, 1, 0, chan_a_id, 10); }, 0x62 => { - payments.send_direct(&nodes, 1, 2, chan_b_id, 10); + harness.payments.send_direct(&harness.nodes, 1, 2, chan_b_id, 10); }, 0x63 => { - payments.send_direct(&nodes, 2, 1, chan_b_id, 10); + harness.payments.send_direct(&harness.nodes, 2, 1, chan_b_id, 10); }, 0x64 => { - payments.send_hop(&nodes, 0, 1, chan_a_id, 2, chan_b_id, 10); + harness.payments.send_hop(&harness.nodes, 0, 1, chan_a_id, 2, chan_b_id, 10); }, 0x65 => { - payments.send_hop(&nodes, 2, 1, chan_b_id, 0, chan_a_id, 10); + harness.payments.send_hop(&harness.nodes, 2, 1, chan_b_id, 0, chan_a_id, 10); }, 0x68 => { - payments.send_direct(&nodes, 0, 1, chan_a_id, 1); + harness.payments.send_direct(&harness.nodes, 0, 1, chan_a_id, 1); }, 0x69 => { - payments.send_direct(&nodes, 1, 0, chan_a_id, 1); + harness.payments.send_direct(&harness.nodes, 1, 0, chan_a_id, 1); }, 0x6a => { - payments.send_direct(&nodes, 1, 2, chan_b_id, 1); + harness.payments.send_direct(&harness.nodes, 1, 2, chan_b_id, 1); }, 0x6b => { - payments.send_direct(&nodes, 2, 1, chan_b_id, 1); + harness.payments.send_direct(&harness.nodes, 2, 1, chan_b_id, 1); }, 0x6c => { - payments.send_hop(&nodes, 0, 1, chan_a_id, 2, chan_b_id, 1); + harness.payments.send_hop(&harness.nodes, 0, 1, chan_a_id, 2, chan_b_id, 1); }, 0x6d => { - payments.send_hop(&nodes, 2, 1, chan_b_id, 0, chan_a_id, 1); + harness.payments.send_hop(&harness.nodes, 2, 1, chan_b_id, 0, chan_a_id, 1); }, // MPP payments // 0x70: direct MPP from 0 to 1 (multi A-B channels) 0x70 => { - payments.send_mpp_direct(&nodes, 0, 1, ab_link.channel_ids(), 1_000_000); + harness.payments.send_mpp_direct( + &harness.nodes, + 0, + 1, + harness.ab_link.channel_ids(), + 1_000_000, + ); }, // 0x71: MPP 0->1->2, multi channels on first hop (A-B) 0x71 => { - payments.send_mpp_hop( - &nodes, + harness.payments.send_mpp_hop( + &harness.nodes, 0, 1, - ab_link.channel_ids(), + harness.ab_link.channel_ids(), 2, &[chan_b_id], 1_000_000, @@ -2847,32 +2893,32 @@ pub fn do_test(data: &[u8], out: Out) { }, // 0x72: MPP 0->1->2, multi channels on both hops (A-B and B-C) 0x72 => { - payments.send_mpp_hop( - &nodes, + harness.payments.send_mpp_hop( + &harness.nodes, 0, 1, - ab_link.channel_ids(), + harness.ab_link.channel_ids(), 2, - bc_link.channel_ids(), + harness.bc_link.channel_ids(), 1_000_000, ); }, // 0x73: MPP 0->1->2, multi channels on second hop (B-C) 0x73 => { - payments.send_mpp_hop( - &nodes, + harness.payments.send_mpp_hop( + &harness.nodes, 0, 1, &[chan_a_id], 2, - bc_link.channel_ids(), + harness.bc_link.channel_ids(), 1_000_000, ); }, // 0x74: direct MPP from 0 to 1, multi parts over single channel 0x74 => { - payments.send_mpp_direct( - &nodes, + harness.payments.send_mpp_direct( + &harness.nodes, 0, 1, &[chan_a_id, chan_a_id, chan_a_id], @@ -2880,301 +2926,375 @@ pub fn do_test(data: &[u8], out: Out) { ); }, - 0x80 => nodes[0].bump_fee_estimate(chan_type), - 0x81 => nodes[0].reset_fee_estimate(), - 0x84 => nodes[1].bump_fee_estimate(chan_type), - 0x85 => nodes[1].reset_fee_estimate(), - 0x88 => nodes[2].bump_fee_estimate(chan_type), - 0x89 => nodes[2].reset_fee_estimate(), + 0x80 => harness.nodes[0].bump_fee_estimate(harness.chan_type), + 0x81 => harness.nodes[0].reset_fee_estimate(), + 0x84 => harness.nodes[1].bump_fee_estimate(harness.chan_type), + 0x85 => harness.nodes[1].reset_fee_estimate(), + 0x88 => harness.nodes[2].bump_fee_estimate(harness.chan_type), + 0x89 => harness.nodes[2].reset_fee_estimate(), 0xa0 => { if !cfg!(splicing) { - assert_test_invariants(&nodes); + assert_test_invariants(&harness.nodes); return; } - let cp_node_id = nodes[1].our_node_id(); - nodes[0].splice_in(&cp_node_id, &chan_a_id); + let cp_node_id = harness.nodes[1].our_node_id(); + harness.nodes[0].splice_in(&cp_node_id, &harness.chan_a_id()); }, 0xa1 => { if !cfg!(splicing) { - assert_test_invariants(&nodes); + assert_test_invariants(&harness.nodes); return; } - let cp_node_id = nodes[0].our_node_id(); - nodes[1].splice_in(&cp_node_id, &chan_a_id); + let cp_node_id = harness.nodes[0].our_node_id(); + harness.nodes[1].splice_in(&cp_node_id, &harness.chan_a_id()); }, 0xa2 => { if !cfg!(splicing) { - assert_test_invariants(&nodes); + assert_test_invariants(&harness.nodes); return; } - let cp_node_id = nodes[2].our_node_id(); - nodes[1].splice_in(&cp_node_id, &chan_b_id); + let cp_node_id = harness.nodes[2].our_node_id(); + harness.nodes[1].splice_in(&cp_node_id, &harness.chan_b_id()); }, 0xa3 => { if !cfg!(splicing) { - assert_test_invariants(&nodes); + assert_test_invariants(&harness.nodes); return; } - let cp_node_id = nodes[1].our_node_id(); - nodes[2].splice_in(&cp_node_id, &chan_b_id); + let cp_node_id = harness.nodes[1].our_node_id(); + harness.nodes[2].splice_in(&cp_node_id, &harness.chan_b_id()); }, 0xa4 => { if !cfg!(splicing) { - assert_test_invariants(&nodes); + assert_test_invariants(&harness.nodes); return; } - let cp_node_id = nodes[1].our_node_id(); - nodes[0].splice_out(&cp_node_id, &chan_a_id); + let cp_node_id = harness.nodes[1].our_node_id(); + harness.nodes[0].splice_out(&cp_node_id, &harness.chan_a_id()); }, 0xa5 => { if !cfg!(splicing) { - assert_test_invariants(&nodes); + assert_test_invariants(&harness.nodes); return; } - let cp_node_id = nodes[0].our_node_id(); - nodes[1].splice_out(&cp_node_id, &chan_a_id); + let cp_node_id = harness.nodes[0].our_node_id(); + harness.nodes[1].splice_out(&cp_node_id, &harness.chan_a_id()); }, 0xa6 => { if !cfg!(splicing) { - assert_test_invariants(&nodes); + assert_test_invariants(&harness.nodes); return; } - let cp_node_id = nodes[2].our_node_id(); - nodes[1].splice_out(&cp_node_id, &chan_b_id); + let cp_node_id = harness.nodes[2].our_node_id(); + harness.nodes[1].splice_out(&cp_node_id, &harness.chan_b_id()); }, 0xa7 => { if !cfg!(splicing) { - assert_test_invariants(&nodes); + assert_test_invariants(&harness.nodes); return; } - let cp_node_id = nodes[1].our_node_id(); - nodes[2].splice_out(&cp_node_id, &chan_b_id); + let cp_node_id = harness.nodes[1].our_node_id(); + harness.nodes[2].splice_out(&cp_node_id, &harness.chan_b_id()); }, // Sync node by 1 block to cover confirmation of a transaction. 0xa8 => { - chain_state.confirm_pending_txs(); - nodes[0].sync_with_chain_state(&chain_state, Some(1)); + harness.chain_state.confirm_pending_txs(); + harness.nodes[0].sync_with_chain_state(&harness.chain_state, Some(1)); }, 0xa9 => { - chain_state.confirm_pending_txs(); - nodes[1].sync_with_chain_state(&chain_state, Some(1)); + harness.chain_state.confirm_pending_txs(); + harness.nodes[1].sync_with_chain_state(&harness.chain_state, Some(1)); }, 0xaa => { - chain_state.confirm_pending_txs(); - nodes[2].sync_with_chain_state(&chain_state, Some(1)); + harness.chain_state.confirm_pending_txs(); + harness.nodes[2].sync_with_chain_state(&harness.chain_state, Some(1)); }, // Sync node to chain tip to cover confirmation of a transaction post-reorg-risk. 0xab => { - chain_state.confirm_pending_txs(); - nodes[0].sync_with_chain_state(&chain_state, None); + harness.chain_state.confirm_pending_txs(); + harness.nodes[0].sync_with_chain_state(&harness.chain_state, None); }, 0xac => { - chain_state.confirm_pending_txs(); - nodes[1].sync_with_chain_state(&chain_state, None); + harness.chain_state.confirm_pending_txs(); + harness.nodes[1].sync_with_chain_state(&harness.chain_state, None); }, 0xad => { - chain_state.confirm_pending_txs(); - nodes[2].sync_with_chain_state(&chain_state, None); + harness.chain_state.confirm_pending_txs(); + harness.nodes[2].sync_with_chain_state(&harness.chain_state, None); }, 0xb0 | 0xb1 | 0xb2 => { // Restart node A, picking among the in-flight `ChannelMonitor`s to use based on // the value of `v` we're matching. - ab_link.disconnect_for_reload(0, &mut nodes, &mut queues); - nodes[0].reload(v, &out, &router, chan_type); + harness.ab_link.disconnect_for_reload(0, &mut harness.nodes, &mut harness.queues); + harness.nodes[0].reload(v, &harness.out, &router, harness.chan_type); }, 0xb3..=0xbb => { // Restart node B, picking among the in-flight `ChannelMonitor`s to use based on // the value of `v` we're matching. - ab_link.disconnect_for_reload(1, &mut nodes, &mut queues); - bc_link.disconnect_for_reload(1, &mut nodes, &mut queues); - nodes[1].reload(v, &out, &router, chan_type); + harness.ab_link.disconnect_for_reload(1, &mut harness.nodes, &mut harness.queues); + harness.bc_link.disconnect_for_reload(1, &mut harness.nodes, &mut harness.queues); + harness.nodes[1].reload(v, &harness.out, &router, harness.chan_type); }, 0xbc | 0xbd | 0xbe => { // Restart node C, picking among the in-flight `ChannelMonitor`s to use based on // the value of `v` we're matching. - bc_link.disconnect_for_reload(2, &mut nodes, &mut queues); - nodes[2].reload(v, &out, &router, chan_type); + harness.bc_link.disconnect_for_reload(2, &mut harness.nodes, &mut harness.queues); + harness.nodes[2].reload(v, &harness.out, &router, harness.chan_type); }, - 0xc0 => nodes[0].keys_manager.disable_supported_ops_for_all_signers(), - 0xc1 => nodes[1].keys_manager.disable_supported_ops_for_all_signers(), - 0xc2 => nodes[2].keys_manager.disable_supported_ops_for_all_signers(), + 0xc0 => harness.nodes[0].keys_manager.disable_supported_ops_for_all_signers(), + 0xc1 => harness.nodes[1].keys_manager.disable_supported_ops_for_all_signers(), + 0xc2 => harness.nodes[2].keys_manager.disable_supported_ops_for_all_signers(), 0xc3 => { - nodes[0] + harness.nodes[0] .keys_manager .enable_op_for_all_signers(SignerOp::SignCounterpartyCommitment); - nodes[0].node.signer_unblocked(None); + harness.nodes[0].node.signer_unblocked(None); }, 0xc4 => { - nodes[1] + harness.nodes[1] .keys_manager .enable_op_for_all_signers(SignerOp::SignCounterpartyCommitment); - let filter = Some((nodes[0].our_node_id(), chan_a_id)); - nodes[1].node.signer_unblocked(filter); + let filter = Some((harness.nodes[0].our_node_id(), harness.chan_a_id())); + harness.nodes[1].node.signer_unblocked(filter); }, 0xc5 => { - nodes[1] + harness.nodes[1] .keys_manager .enable_op_for_all_signers(SignerOp::SignCounterpartyCommitment); - let filter = Some((nodes[2].our_node_id(), chan_b_id)); - nodes[1].node.signer_unblocked(filter); + let filter = Some((harness.nodes[2].our_node_id(), harness.chan_b_id())); + harness.nodes[1].node.signer_unblocked(filter); }, 0xc6 => { - nodes[2] + harness.nodes[2] .keys_manager .enable_op_for_all_signers(SignerOp::SignCounterpartyCommitment); - nodes[2].node.signer_unblocked(None); + harness.nodes[2].node.signer_unblocked(None); }, 0xc7 => { - nodes[0].keys_manager.enable_op_for_all_signers(SignerOp::GetPerCommitmentPoint); - nodes[0].node.signer_unblocked(None); + harness.nodes[0] + .keys_manager + .enable_op_for_all_signers(SignerOp::GetPerCommitmentPoint); + harness.nodes[0].node.signer_unblocked(None); }, 0xc8 => { - nodes[1].keys_manager.enable_op_for_all_signers(SignerOp::GetPerCommitmentPoint); - let filter = Some((nodes[0].our_node_id(), chan_a_id)); - nodes[1].node.signer_unblocked(filter); + harness.nodes[1] + .keys_manager + .enable_op_for_all_signers(SignerOp::GetPerCommitmentPoint); + let filter = Some((harness.nodes[0].our_node_id(), harness.chan_a_id())); + harness.nodes[1].node.signer_unblocked(filter); }, 0xc9 => { - nodes[1].keys_manager.enable_op_for_all_signers(SignerOp::GetPerCommitmentPoint); - let filter = Some((nodes[2].our_node_id(), chan_b_id)); - nodes[1].node.signer_unblocked(filter); + harness.nodes[1] + .keys_manager + .enable_op_for_all_signers(SignerOp::GetPerCommitmentPoint); + let filter = Some((harness.nodes[2].our_node_id(), harness.chan_b_id())); + harness.nodes[1].node.signer_unblocked(filter); }, 0xca => { - nodes[2].keys_manager.enable_op_for_all_signers(SignerOp::GetPerCommitmentPoint); - nodes[2].node.signer_unblocked(None); + harness.nodes[2] + .keys_manager + .enable_op_for_all_signers(SignerOp::GetPerCommitmentPoint); + harness.nodes[2].node.signer_unblocked(None); }, 0xcb => { - nodes[0].keys_manager.enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); - nodes[0].node.signer_unblocked(None); + harness.nodes[0] + .keys_manager + .enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); + harness.nodes[0].node.signer_unblocked(None); }, 0xcc => { - nodes[1].keys_manager.enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); - let filter = Some((nodes[0].our_node_id(), chan_a_id)); - nodes[1].node.signer_unblocked(filter); + harness.nodes[1] + .keys_manager + .enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); + let filter = Some((harness.nodes[0].our_node_id(), harness.chan_a_id())); + harness.nodes[1].node.signer_unblocked(filter); }, 0xcd => { - nodes[1].keys_manager.enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); - let filter = Some((nodes[2].our_node_id(), chan_b_id)); - nodes[1].node.signer_unblocked(filter); + harness.nodes[1] + .keys_manager + .enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); + let filter = Some((harness.nodes[2].our_node_id(), harness.chan_b_id())); + harness.nodes[1].node.signer_unblocked(filter); }, 0xce => { - nodes[2].keys_manager.enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); - nodes[2].node.signer_unblocked(None); + harness.nodes[2] + .keys_manager + .enable_op_for_all_signers(SignerOp::ReleaseCommitmentSecret); + harness.nodes[2].node.signer_unblocked(None); }, 0xf0 => { - ab_link.complete_monitor_updates_for_node(0, &nodes, MonitorUpdateSelector::First); + harness.ab_link.complete_monitor_updates_for_node( + 0, + &harness.nodes, + MonitorUpdateSelector::First, + ); }, 0xf1 => { - ab_link.complete_monitor_updates_for_node(0, &nodes, MonitorUpdateSelector::Second); + harness.ab_link.complete_monitor_updates_for_node( + 0, + &harness.nodes, + MonitorUpdateSelector::Second, + ); }, 0xf2 => { - ab_link.complete_monitor_updates_for_node(0, &nodes, MonitorUpdateSelector::Last); + harness.ab_link.complete_monitor_updates_for_node( + 0, + &harness.nodes, + MonitorUpdateSelector::Last, + ); }, 0xf4 => { - ab_link.complete_monitor_updates_for_node(1, &nodes, MonitorUpdateSelector::First); + harness.ab_link.complete_monitor_updates_for_node( + 1, + &harness.nodes, + MonitorUpdateSelector::First, + ); }, 0xf5 => { - ab_link.complete_monitor_updates_for_node(1, &nodes, MonitorUpdateSelector::Second); + harness.ab_link.complete_monitor_updates_for_node( + 1, + &harness.nodes, + MonitorUpdateSelector::Second, + ); }, 0xf6 => { - ab_link.complete_monitor_updates_for_node(1, &nodes, MonitorUpdateSelector::Last); + harness.ab_link.complete_monitor_updates_for_node( + 1, + &harness.nodes, + MonitorUpdateSelector::Last, + ); }, 0xf8 => { - bc_link.complete_monitor_updates_for_node(1, &nodes, MonitorUpdateSelector::First); + harness.bc_link.complete_monitor_updates_for_node( + 1, + &harness.nodes, + MonitorUpdateSelector::First, + ); }, 0xf9 => { - bc_link.complete_monitor_updates_for_node(1, &nodes, MonitorUpdateSelector::Second); + harness.bc_link.complete_monitor_updates_for_node( + 1, + &harness.nodes, + MonitorUpdateSelector::Second, + ); }, 0xfa => { - bc_link.complete_monitor_updates_for_node(1, &nodes, MonitorUpdateSelector::Last); + harness.bc_link.complete_monitor_updates_for_node( + 1, + &harness.nodes, + MonitorUpdateSelector::Last, + ); }, 0xfc => { - bc_link.complete_monitor_updates_for_node(2, &nodes, MonitorUpdateSelector::First); + harness.bc_link.complete_monitor_updates_for_node( + 2, + &harness.nodes, + MonitorUpdateSelector::First, + ); }, 0xfd => { - bc_link.complete_monitor_updates_for_node(2, &nodes, MonitorUpdateSelector::Second); + harness.bc_link.complete_monitor_updates_for_node( + 2, + &harness.nodes, + MonitorUpdateSelector::Second, + ); }, 0xfe => { - bc_link.complete_monitor_updates_for_node(2, &nodes, MonitorUpdateSelector::Last); + harness.bc_link.complete_monitor_updates_for_node( + 2, + &harness.nodes, + MonitorUpdateSelector::Last, + ); }, 0xff => { // Test that no channel is in a stuck state where neither party can send funds even // after we resolve all pending events. - ab_link.reconnect(&mut nodes); - bc_link.reconnect(&mut nodes); + harness.ab_link.reconnect(&mut harness.nodes); + harness.bc_link.reconnect(&mut harness.nodes); for op in SUPPORTED_SIGNER_OPS { - nodes[0].keys_manager.enable_op_for_all_signers(op); - nodes[1].keys_manager.enable_op_for_all_signers(op); - nodes[2].keys_manager.enable_op_for_all_signers(op); + harness.nodes[0].keys_manager.enable_op_for_all_signers(op); + harness.nodes[1].keys_manager.enable_op_for_all_signers(op); + harness.nodes[2].keys_manager.enable_op_for_all_signers(op); } - nodes[0].node.signer_unblocked(None); - nodes[1].node.signer_unblocked(None); - nodes[2].node.signer_unblocked(None); + harness.nodes[0].node.signer_unblocked(None); + harness.nodes[1].node.signer_unblocked(None); + harness.nodes[2].node.signer_unblocked(None); process_all_events_impl( - &nodes, - &out, - &ab_link, - &bc_link, - &mut chain_state, - &mut payments, - &mut queues, + &harness.nodes, + &harness.out, + &harness.ab_link, + &harness.bc_link, + &mut harness.chain_state, + &mut harness.payments, + &mut harness.queues, ); // Since MPP payments are supported, we wait until we fully settle the state of all // channels to see if we have any committed HTLC parts of an MPP payment that need // to be failed back. - for node in &nodes { + for node in &harness.nodes { node.node.timer_tick_occurred(); } process_all_events_impl( - &nodes, - &out, - &ab_link, - &bc_link, - &mut chain_state, - &mut payments, - &mut queues, + &harness.nodes, + &harness.out, + &harness.ab_link, + &harness.bc_link, + &mut harness.chain_state, + &mut harness.payments, + &mut harness.queues, ); - payments.assert_all_resolved(); - payments.assert_claims_reported(); + harness.payments.assert_all_resolved(); + harness.payments.assert_claims_reported(); // Finally, make sure that at least one end of each channel can make a substantial payment - for &chan_id in ab_link.channel_ids() { + for &chan_id in harness.ab_link.channel_ids() { assert!( - payments.send_direct(&nodes, 0, 1, chan_id, 10_000_000) - || payments.send_direct(&nodes, 1, 0, chan_id, 10_000_000) + harness.payments.send_direct(&harness.nodes, 0, 1, chan_id, 10_000_000) + || harness.payments.send_direct( + &harness.nodes, + 1, + 0, + chan_id, + 10_000_000 + ) ); } - for &chan_id in bc_link.channel_ids() { + for &chan_id in harness.bc_link.channel_ids() { assert!( - payments.send_direct(&nodes, 1, 2, chan_id, 10_000_000) - || payments.send_direct(&nodes, 2, 1, chan_id, 10_000_000) + harness.payments.send_direct(&harness.nodes, 1, 2, chan_id, 10_000_000) + || harness.payments.send_direct( + &harness.nodes, + 2, + 1, + chan_id, + 10_000_000 + ) ); } - nodes[0].record_last_htlc_clear_fee(); - nodes[1].record_last_htlc_clear_fee(); - nodes[2].record_last_htlc_clear_fee(); + harness.nodes[0].record_last_htlc_clear_fee(); + harness.nodes[1].record_last_htlc_clear_fee(); + harness.nodes[2].record_last_htlc_clear_fee(); }, _ => test_return!(), } - for node in &mut nodes { - node.refresh_serialized_manager(); - } + harness.refresh_serialized_managers(); } } From 6feccc10937ac374d65f6f54bb532187f6ad2627 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 21 Apr 2026 16:00:30 +0200 Subject: [PATCH 15/15] Wrap chanmon consistency flow in Harness Move the main fuzz flow onto the harness. This completes the structural refactor so `do_test` is responsible for constructing and running the harness rather than managing the full scenario directly. Keep the final event-settling loop on the harness as well, avoiding an extra free function once the state it needs is already owned by `Harness`. --- fuzz/src/chanmon_consistency.rs | 660 +++++++++++++++++--------------- 1 file changed, 351 insertions(+), 309 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 553c3ec3aaa..8ca544294a7 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -2189,6 +2189,199 @@ impl<'a, 'd, Out: Output + MaybeSend + MaybeSync> Harness<'a, 'd, Out> { assert_test_invariants(&self.nodes); } + fn send_direct( + &mut self, source_idx: usize, dest_idx: usize, dest_chan_id: ChannelId, amt: u64, + ) -> bool { + self.payments.send_direct(&self.nodes, source_idx, dest_idx, dest_chan_id, amt) + } + + fn send_hop( + &mut self, source_idx: usize, middle_idx: usize, middle_chan_id: ChannelId, + dest_idx: usize, dest_chan_id: ChannelId, amt: u64, + ) { + self.payments.send_hop( + &self.nodes, + source_idx, + middle_idx, + middle_chan_id, + dest_idx, + dest_chan_id, + amt, + ); + } + + fn send_mpp_direct( + &mut self, source_idx: usize, dest_idx: usize, dest_chan_ids: &[ChannelId], amt: u64, + ) { + // Direct MPP payment with no hop. + self.payments.send_mpp_direct(&self.nodes, source_idx, dest_idx, dest_chan_ids, amt); + } + + fn send_mpp_hop( + &mut self, source_idx: usize, middle_idx: usize, middle_chan_ids: &[ChannelId], + dest_idx: usize, dest_chan_ids: &[ChannelId], amt: u64, + ) { + // MPP payment via hop, split across multiple channels on either or both hops. + self.payments.send_mpp_hop( + &self.nodes, + source_idx, + middle_idx, + middle_chan_ids, + dest_idx, + dest_chan_ids, + amt, + ); + } + + fn process_msg_events( + &mut self, node_idx: usize, corrupt_forward: bool, limit_events: ProcessMessages, + ) -> bool { + process_msg_events_impl( + node_idx, + corrupt_forward, + limit_events, + &self.nodes, + &self.out, + &mut self.queues, + ) + } + + fn process_events(&mut self, node_idx: usize, fail: bool) -> bool { + process_events_impl(node_idx, fail, &self.nodes, &mut self.chain_state, &mut self.payments) + } + + fn process_all_events(&mut self) { + let mut last_pass_no_updates = false; + for i in 0..std::usize::MAX { + if i == 100 { + panic!( + "It may take may iterations to settle the state, but it should not take forever" + ); + } + // First, make sure no monitor updates are pending. + self.ab_link.complete_all_monitor_updates(&self.nodes); + self.bc_link.complete_all_monitor_updates(&self.nodes); + // Then, make sure any current forwards make their way to their destination. + if self.process_msg_events(0, false, ProcessMessages::AllMessages) { + last_pass_no_updates = false; + continue; + } + if self.process_msg_events(1, false, ProcessMessages::AllMessages) { + last_pass_no_updates = false; + continue; + } + if self.process_msg_events(2, false, ProcessMessages::AllMessages) { + last_pass_no_updates = false; + continue; + } + // Finally, make sure any payments are claimed. + if self.process_events(0, false) { + last_pass_no_updates = false; + continue; + } + if self.process_events(1, false) { + last_pass_no_updates = false; + continue; + } + if self.process_events(2, false) { + last_pass_no_updates = false; + continue; + } + if last_pass_no_updates { + // In some cases, `process_msg_events` may generate a message to send, but block + // sending until `complete_all_monitor_updates` gets called on the next iteration. + // Thus, we only exit if we manage two iterations with no messages or events to + // process. + break; + } + last_pass_no_updates = true; + } + } + + fn disconnect_ab(&mut self) { + self.ab_link.disconnect(&mut self.nodes, &mut self.queues); + } + + fn disconnect_bc(&mut self) { + self.bc_link.disconnect(&mut self.nodes, &mut self.queues); + } + + fn reconnect_ab(&mut self) { + self.ab_link.reconnect(&mut self.nodes); + } + + fn reconnect_bc(&mut self) { + self.bc_link.reconnect(&mut self.nodes); + } + + fn restart_node(&mut self, node_idx: usize, v: u8, router: &'a FuzzRouter) { + match node_idx { + 0 => { + self.ab_link.disconnect_for_reload(0, &mut self.nodes, &mut self.queues); + }, + 1 => { + self.ab_link.disconnect_for_reload(1, &mut self.nodes, &mut self.queues); + self.bc_link.disconnect_for_reload(1, &mut self.nodes, &mut self.queues); + }, + 2 => { + self.bc_link.disconnect_for_reload(2, &mut self.nodes, &mut self.queues); + }, + _ => panic!("invalid node index"), + } + self.nodes[node_idx].reload(v, &self.out, router, self.chan_type); + } + + fn settle_all(&mut self) { + // First, make sure peers are all connected to each other. + self.reconnect_ab(); + self.reconnect_bc(); + + for op in SUPPORTED_SIGNER_OPS { + self.nodes[0].keys_manager.enable_op_for_all_signers(op); + self.nodes[1].keys_manager.enable_op_for_all_signers(op); + self.nodes[2].keys_manager.enable_op_for_all_signers(op); + } + self.nodes[0].node.signer_unblocked(None); + self.nodes[1].node.signer_unblocked(None); + self.nodes[2].node.signer_unblocked(None); + + self.process_all_events(); + + // Since MPP payments are supported, we wait until we fully settle the state of all + // channels to see if we have any committed HTLC parts of an MPP payment that need + // to be failed back. + for node in self.nodes.iter() { + node.node.timer_tick_occurred(); + } + self.process_all_events(); + + // Verify no payments are stuck, all should have resolved. + self.payments.assert_all_resolved(); + // Verify that every payment claimed by a receiver resulted in a PaymentSent event at + // the sender. + self.payments.assert_claims_reported(); + + // Finally, make sure that at least one end of each channel can make a substantial payment. + let chan_ab_ids = self.ab_link.channel_ids().clone(); + let chan_bc_ids = self.bc_link.channel_ids().clone(); + for chan_id in chan_ab_ids { + assert!( + self.send_direct(0, 1, chan_id, 10_000_000) + || self.send_direct(1, 0, chan_id, 10_000_000) + ); + } + for chan_id in chan_bc_ids { + assert!( + self.send_direct(1, 2, chan_id, 10_000_000) + || self.send_direct(2, 1, chan_id, 10_000_000) + ); + } + + self.nodes[0].record_last_htlc_clear_fee(); + self.nodes[1].record_last_htlc_clear_fee(); + self.nodes[2].record_last_htlc_clear_fee(); + } + fn refresh_serialized_managers(&mut self) { for node in &mut self.nodes { node.refresh_serialized_manager(); @@ -2525,110 +2718,18 @@ fn process_events_impl( had_events } -fn process_all_events_impl( - nodes: &[HarnessNode<'_>; 3], out: &Out, ab_link: &PeerLink, bc_link: &PeerLink, - chain_state: &mut ChainState, payments: &mut PaymentTracker, queues: &mut EventQueues, -) { - let mut last_pass_no_updates = false; - for i in 0..std::usize::MAX { - if i == 100 { - panic!( - "It may take may iterations to settle the state, but it should not take forever" - ); - } - // First, make sure no monitor updates are pending. - ab_link.complete_all_monitor_updates(nodes); - bc_link.complete_all_monitor_updates(nodes); - // Then, make sure any current forwards make their way to their destination. - if process_msg_events_impl(0, false, ProcessMessages::AllMessages, nodes, out, queues) { - last_pass_no_updates = false; - continue; - } - if process_msg_events_impl(1, false, ProcessMessages::AllMessages, nodes, out, queues) { - last_pass_no_updates = false; - continue; - } - if process_msg_events_impl(2, false, ProcessMessages::AllMessages, nodes, out, queues) { - last_pass_no_updates = false; - continue; - } - // Finally, make sure any payments are claimed. - if process_events_impl(0, false, nodes, chain_state, payments) { - last_pass_no_updates = false; - continue; - } - if process_events_impl(1, false, nodes, chain_state, payments) { - last_pass_no_updates = false; - continue; - } - if process_events_impl(2, false, nodes, chain_state, payments) { - last_pass_no_updates = false; - continue; - } - if last_pass_no_updates { - // In some cases, `process_msg_events_impl` may generate a message to send, but - // block sending until `complete_all_monitor_updates` gets called on the next - // iteration. Thus, we only exit if we manage two iterations with no messages or - // events to process. - break; - } - last_pass_no_updates = true; - } -} - #[inline] pub fn do_test(data: &[u8], out: Out) { let router = FuzzRouter {}; let mut harness = Harness::new(data, out, &router); - let chan_a_id = harness.chan_a_id(); - let chan_b_id = harness.chan_b_id(); - macro_rules! test_return { - () => {{ + loop { + let v = if let Some(value) = harness.next_input_byte() { + value + } else { harness.finish(); return; - }}; - } - - loop { - macro_rules! process_msg_events { - ($node: expr, $corrupt_forward: expr, $limit_events: expr) => {{ - process_msg_events_impl( - $node, - $corrupt_forward, - $limit_events, - &harness.nodes, - &harness.out, - &mut harness.queues, - ) - }}; - } - - macro_rules! process_msg_noret { - ($node: expr, $corrupt_forward: expr, $limit_events: expr) => {{ - process_msg_events!($node, $corrupt_forward, $limit_events); - }}; - } - - macro_rules! process_events { - ($node: expr, $fail: expr) => {{ - process_events_impl( - $node, - $fail, - &harness.nodes, - &mut harness.chain_state, - &mut harness.payments, - ) - }}; - } - - macro_rules! process_ev_noret { - ($node: expr, $fail: expr) => {{ - process_events!($node, $fail); - }}; - } - - let v = if let Some(value) = harness.next_input_byte() { value } else { test_return!() }; + }; harness .out .locked_write(format!("READ A BYTE! HANDLING INPUT {:x}...........\n", v).as_bytes()); @@ -2664,266 +2765,278 @@ pub fn do_test(data: &[u8], out: Out) { } }, - 0x0c => harness.ab_link.disconnect(&mut harness.nodes, &mut harness.queues), - 0x0d => harness.bc_link.disconnect(&mut harness.nodes, &mut harness.queues), - 0x0e => harness.ab_link.reconnect(&mut harness.nodes), - 0x0f => harness.bc_link.reconnect(&mut harness.nodes), + 0x0c => { + harness.disconnect_ab(); + }, + 0x0d => { + harness.disconnect_bc(); + }, + 0x0e => { + harness.reconnect_ab(); + }, + 0x0f => { + harness.reconnect_bc(); + }, - 0x10 => process_msg_noret!(0, true, ProcessMessages::AllMessages), - 0x11 => process_msg_noret!(0, false, ProcessMessages::AllMessages), - 0x12 => process_msg_noret!(0, true, ProcessMessages::OneMessage), - 0x13 => process_msg_noret!(0, false, ProcessMessages::OneMessage), - 0x14 => process_msg_noret!(0, true, ProcessMessages::OnePendingMessage), - 0x15 => process_msg_noret!(0, false, ProcessMessages::OnePendingMessage), + 0x10 => { + harness.process_msg_events(0, true, ProcessMessages::AllMessages); + }, + 0x11 => { + harness.process_msg_events(0, false, ProcessMessages::AllMessages); + }, + 0x12 => { + harness.process_msg_events(0, true, ProcessMessages::OneMessage); + }, + 0x13 => { + harness.process_msg_events(0, false, ProcessMessages::OneMessage); + }, + 0x14 => { + harness.process_msg_events(0, true, ProcessMessages::OnePendingMessage); + }, + 0x15 => { + harness.process_msg_events(0, false, ProcessMessages::OnePendingMessage); + }, - 0x16 => process_ev_noret!(0, true), - 0x17 => process_ev_noret!(0, false), + 0x16 => { + harness.process_events(0, true); + }, + 0x17 => { + harness.process_events(0, false); + }, - 0x18 => process_msg_noret!(1, true, ProcessMessages::AllMessages), - 0x19 => process_msg_noret!(1, false, ProcessMessages::AllMessages), - 0x1a => process_msg_noret!(1, true, ProcessMessages::OneMessage), - 0x1b => process_msg_noret!(1, false, ProcessMessages::OneMessage), - 0x1c => process_msg_noret!(1, true, ProcessMessages::OnePendingMessage), - 0x1d => process_msg_noret!(1, false, ProcessMessages::OnePendingMessage), + 0x18 => { + harness.process_msg_events(1, true, ProcessMessages::AllMessages); + }, + 0x19 => { + harness.process_msg_events(1, false, ProcessMessages::AllMessages); + }, + 0x1a => { + harness.process_msg_events(1, true, ProcessMessages::OneMessage); + }, + 0x1b => { + harness.process_msg_events(1, false, ProcessMessages::OneMessage); + }, + 0x1c => { + harness.process_msg_events(1, true, ProcessMessages::OnePendingMessage); + }, + 0x1d => { + harness.process_msg_events(1, false, ProcessMessages::OnePendingMessage); + }, - 0x1e => process_ev_noret!(1, true), - 0x1f => process_ev_noret!(1, false), + 0x1e => { + harness.process_events(1, true); + }, + 0x1f => { + harness.process_events(1, false); + }, - 0x20 => process_msg_noret!(2, true, ProcessMessages::AllMessages), - 0x21 => process_msg_noret!(2, false, ProcessMessages::AllMessages), - 0x22 => process_msg_noret!(2, true, ProcessMessages::OneMessage), - 0x23 => process_msg_noret!(2, false, ProcessMessages::OneMessage), - 0x24 => process_msg_noret!(2, true, ProcessMessages::OnePendingMessage), - 0x25 => process_msg_noret!(2, false, ProcessMessages::OnePendingMessage), + 0x20 => { + harness.process_msg_events(2, true, ProcessMessages::AllMessages); + }, + 0x21 => { + harness.process_msg_events(2, false, ProcessMessages::AllMessages); + }, + 0x22 => { + harness.process_msg_events(2, true, ProcessMessages::OneMessage); + }, + 0x23 => { + harness.process_msg_events(2, false, ProcessMessages::OneMessage); + }, + 0x24 => { + harness.process_msg_events(2, true, ProcessMessages::OnePendingMessage); + }, + 0x25 => { + harness.process_msg_events(2, false, ProcessMessages::OnePendingMessage); + }, - 0x26 => process_ev_noret!(2, true), - 0x27 => process_ev_noret!(2, false), + 0x26 => { + harness.process_events(2, true); + }, + 0x27 => { + harness.process_events(2, false); + }, // 1/10th the channel size: 0x30 => { - harness.payments.send_direct(&harness.nodes, 0, 1, chan_a_id, 10_000_000); + harness.send_direct(0, 1, harness.chan_a_id(), 10_000_000); }, 0x31 => { - harness.payments.send_direct(&harness.nodes, 1, 0, chan_a_id, 10_000_000); + harness.send_direct(1, 0, harness.chan_a_id(), 10_000_000); }, 0x32 => { - harness.payments.send_direct(&harness.nodes, 1, 2, chan_b_id, 10_000_000); + harness.send_direct(1, 2, harness.chan_b_id(), 10_000_000); }, 0x33 => { - harness.payments.send_direct(&harness.nodes, 2, 1, chan_b_id, 10_000_000); + harness.send_direct(2, 1, harness.chan_b_id(), 10_000_000); }, 0x34 => { - harness.payments.send_hop( - &harness.nodes, - 0, - 1, - chan_a_id, - 2, - chan_b_id, - 10_000_000, - ); + harness.send_hop(0, 1, harness.chan_a_id(), 2, harness.chan_b_id(), 10_000_000); }, 0x35 => { - harness.payments.send_hop( - &harness.nodes, - 2, - 1, - chan_b_id, - 0, - chan_a_id, - 10_000_000, - ); + harness.send_hop(2, 1, harness.chan_b_id(), 0, harness.chan_a_id(), 10_000_000); }, 0x38 => { - harness.payments.send_direct(&harness.nodes, 0, 1, chan_a_id, 1_000_000); + harness.send_direct(0, 1, harness.chan_a_id(), 1_000_000); }, 0x39 => { - harness.payments.send_direct(&harness.nodes, 1, 0, chan_a_id, 1_000_000); + harness.send_direct(1, 0, harness.chan_a_id(), 1_000_000); }, 0x3a => { - harness.payments.send_direct(&harness.nodes, 1, 2, chan_b_id, 1_000_000); + harness.send_direct(1, 2, harness.chan_b_id(), 1_000_000); }, 0x3b => { - harness.payments.send_direct(&harness.nodes, 2, 1, chan_b_id, 1_000_000); + harness.send_direct(2, 1, harness.chan_b_id(), 1_000_000); }, 0x3c => { - harness.payments.send_hop(&harness.nodes, 0, 1, chan_a_id, 2, chan_b_id, 1_000_000); + harness.send_hop(0, 1, harness.chan_a_id(), 2, harness.chan_b_id(), 1_000_000); }, 0x3d => { - harness.payments.send_hop(&harness.nodes, 2, 1, chan_b_id, 0, chan_a_id, 1_000_000); + harness.send_hop(2, 1, harness.chan_b_id(), 0, harness.chan_a_id(), 1_000_000); }, 0x40 => { - harness.payments.send_direct(&harness.nodes, 0, 1, chan_a_id, 100_000); + harness.send_direct(0, 1, harness.chan_a_id(), 100_000); }, 0x41 => { - harness.payments.send_direct(&harness.nodes, 1, 0, chan_a_id, 100_000); + harness.send_direct(1, 0, harness.chan_a_id(), 100_000); }, 0x42 => { - harness.payments.send_direct(&harness.nodes, 1, 2, chan_b_id, 100_000); + harness.send_direct(1, 2, harness.chan_b_id(), 100_000); }, 0x43 => { - harness.payments.send_direct(&harness.nodes, 2, 1, chan_b_id, 100_000); + harness.send_direct(2, 1, harness.chan_b_id(), 100_000); }, 0x44 => { - harness.payments.send_hop(&harness.nodes, 0, 1, chan_a_id, 2, chan_b_id, 100_000); + harness.send_hop(0, 1, harness.chan_a_id(), 2, harness.chan_b_id(), 100_000); }, 0x45 => { - harness.payments.send_hop(&harness.nodes, 2, 1, chan_b_id, 0, chan_a_id, 100_000); + harness.send_hop(2, 1, harness.chan_b_id(), 0, harness.chan_a_id(), 100_000); }, 0x48 => { - harness.payments.send_direct(&harness.nodes, 0, 1, chan_a_id, 10_000); + harness.send_direct(0, 1, harness.chan_a_id(), 10_000); }, 0x49 => { - harness.payments.send_direct(&harness.nodes, 1, 0, chan_a_id, 10_000); + harness.send_direct(1, 0, harness.chan_a_id(), 10_000); }, 0x4a => { - harness.payments.send_direct(&harness.nodes, 1, 2, chan_b_id, 10_000); + harness.send_direct(1, 2, harness.chan_b_id(), 10_000); }, 0x4b => { - harness.payments.send_direct(&harness.nodes, 2, 1, chan_b_id, 10_000); + harness.send_direct(2, 1, harness.chan_b_id(), 10_000); }, 0x4c => { - harness.payments.send_hop(&harness.nodes, 0, 1, chan_a_id, 2, chan_b_id, 10_000); + harness.send_hop(0, 1, harness.chan_a_id(), 2, harness.chan_b_id(), 10_000); }, 0x4d => { - harness.payments.send_hop(&harness.nodes, 2, 1, chan_b_id, 0, chan_a_id, 10_000); + harness.send_hop(2, 1, harness.chan_b_id(), 0, harness.chan_a_id(), 10_000); }, 0x50 => { - harness.payments.send_direct(&harness.nodes, 0, 1, chan_a_id, 1_000); + harness.send_direct(0, 1, harness.chan_a_id(), 1_000); }, 0x51 => { - harness.payments.send_direct(&harness.nodes, 1, 0, chan_a_id, 1_000); + harness.send_direct(1, 0, harness.chan_a_id(), 1_000); }, 0x52 => { - harness.payments.send_direct(&harness.nodes, 1, 2, chan_b_id, 1_000); + harness.send_direct(1, 2, harness.chan_b_id(), 1_000); }, 0x53 => { - harness.payments.send_direct(&harness.nodes, 2, 1, chan_b_id, 1_000); + harness.send_direct(2, 1, harness.chan_b_id(), 1_000); }, 0x54 => { - harness.payments.send_hop(&harness.nodes, 0, 1, chan_a_id, 2, chan_b_id, 1_000); + harness.send_hop(0, 1, harness.chan_a_id(), 2, harness.chan_b_id(), 1_000); }, 0x55 => { - harness.payments.send_hop(&harness.nodes, 2, 1, chan_b_id, 0, chan_a_id, 1_000); + harness.send_hop(2, 1, harness.chan_b_id(), 0, harness.chan_a_id(), 1_000); }, 0x58 => { - harness.payments.send_direct(&harness.nodes, 0, 1, chan_a_id, 100); + harness.send_direct(0, 1, harness.chan_a_id(), 100); }, 0x59 => { - harness.payments.send_direct(&harness.nodes, 1, 0, chan_a_id, 100); + harness.send_direct(1, 0, harness.chan_a_id(), 100); }, 0x5a => { - harness.payments.send_direct(&harness.nodes, 1, 2, chan_b_id, 100); + harness.send_direct(1, 2, harness.chan_b_id(), 100); }, 0x5b => { - harness.payments.send_direct(&harness.nodes, 2, 1, chan_b_id, 100); + harness.send_direct(2, 1, harness.chan_b_id(), 100); }, 0x5c => { - harness.payments.send_hop(&harness.nodes, 0, 1, chan_a_id, 2, chan_b_id, 100); + harness.send_hop(0, 1, harness.chan_a_id(), 2, harness.chan_b_id(), 100); }, 0x5d => { - harness.payments.send_hop(&harness.nodes, 2, 1, chan_b_id, 0, chan_a_id, 100); + harness.send_hop(2, 1, harness.chan_b_id(), 0, harness.chan_a_id(), 100); }, 0x60 => { - harness.payments.send_direct(&harness.nodes, 0, 1, chan_a_id, 10); + harness.send_direct(0, 1, harness.chan_a_id(), 10); }, 0x61 => { - harness.payments.send_direct(&harness.nodes, 1, 0, chan_a_id, 10); + harness.send_direct(1, 0, harness.chan_a_id(), 10); }, 0x62 => { - harness.payments.send_direct(&harness.nodes, 1, 2, chan_b_id, 10); + harness.send_direct(1, 2, harness.chan_b_id(), 10); }, 0x63 => { - harness.payments.send_direct(&harness.nodes, 2, 1, chan_b_id, 10); + harness.send_direct(2, 1, harness.chan_b_id(), 10); }, 0x64 => { - harness.payments.send_hop(&harness.nodes, 0, 1, chan_a_id, 2, chan_b_id, 10); + harness.send_hop(0, 1, harness.chan_a_id(), 2, harness.chan_b_id(), 10); }, 0x65 => { - harness.payments.send_hop(&harness.nodes, 2, 1, chan_b_id, 0, chan_a_id, 10); + harness.send_hop(2, 1, harness.chan_b_id(), 0, harness.chan_a_id(), 10); }, 0x68 => { - harness.payments.send_direct(&harness.nodes, 0, 1, chan_a_id, 1); + harness.send_direct(0, 1, harness.chan_a_id(), 1); }, 0x69 => { - harness.payments.send_direct(&harness.nodes, 1, 0, chan_a_id, 1); + harness.send_direct(1, 0, harness.chan_a_id(), 1); }, 0x6a => { - harness.payments.send_direct(&harness.nodes, 1, 2, chan_b_id, 1); + harness.send_direct(1, 2, harness.chan_b_id(), 1); }, 0x6b => { - harness.payments.send_direct(&harness.nodes, 2, 1, chan_b_id, 1); + harness.send_direct(2, 1, harness.chan_b_id(), 1); }, 0x6c => { - harness.payments.send_hop(&harness.nodes, 0, 1, chan_a_id, 2, chan_b_id, 1); + harness.send_hop(0, 1, harness.chan_a_id(), 2, harness.chan_b_id(), 1); }, 0x6d => { - harness.payments.send_hop(&harness.nodes, 2, 1, chan_b_id, 0, chan_a_id, 1); + harness.send_hop(2, 1, harness.chan_b_id(), 0, harness.chan_a_id(), 1); }, // MPP payments // 0x70: direct MPP from 0 to 1 (multi A-B channels) 0x70 => { - harness.payments.send_mpp_direct( - &harness.nodes, - 0, - 1, - harness.ab_link.channel_ids(), - 1_000_000, - ); + let chan_ab_ids = harness.ab_link.channel_ids().clone(); + harness.send_mpp_direct(0, 1, &chan_ab_ids, 1_000_000); }, // 0x71: MPP 0->1->2, multi channels on first hop (A-B) 0x71 => { - harness.payments.send_mpp_hop( - &harness.nodes, - 0, - 1, - harness.ab_link.channel_ids(), - 2, - &[chan_b_id], - 1_000_000, - ); + let chan_ab_ids = harness.ab_link.channel_ids().clone(); + let chan_b_id = harness.chan_b_id(); + harness.send_mpp_hop(0, 1, &chan_ab_ids, 2, &[chan_b_id], 1_000_000); }, // 0x72: MPP 0->1->2, multi channels on both hops (A-B and B-C) 0x72 => { - harness.payments.send_mpp_hop( - &harness.nodes, - 0, - 1, - harness.ab_link.channel_ids(), - 2, - harness.bc_link.channel_ids(), - 1_000_000, - ); + let chan_ab_ids = harness.ab_link.channel_ids().clone(); + let chan_bc_ids = harness.bc_link.channel_ids().clone(); + harness.send_mpp_hop(0, 1, &chan_ab_ids, 2, &chan_bc_ids, 1_000_000); }, // 0x73: MPP 0->1->2, multi channels on second hop (B-C) 0x73 => { - harness.payments.send_mpp_hop( - &harness.nodes, - 0, - 1, - &[chan_a_id], - 2, - harness.bc_link.channel_ids(), - 1_000_000, - ); + let chan_a_id = harness.chan_a_id(); + let chan_bc_ids = harness.bc_link.channel_ids().clone(); + harness.send_mpp_hop(0, 1, &[chan_a_id], 2, &chan_bc_ids, 1_000_000); }, // 0x74: direct MPP from 0 to 1, multi parts over single channel 0x74 => { - harness.payments.send_mpp_direct( - &harness.nodes, - 0, - 1, - &[chan_a_id, chan_a_id, chan_a_id], - 1_000_000, - ); + let chan_a_id = harness.chan_a_id(); + harness.send_mpp_direct(0, 1, &[chan_a_id, chan_a_id, chan_a_id], 1_000_000); }, 0x80 => harness.nodes[0].bump_fee_estimate(harness.chan_type), @@ -3029,21 +3142,17 @@ pub fn do_test(data: &[u8], out: Out) { 0xb0 | 0xb1 | 0xb2 => { // Restart node A, picking among the in-flight `ChannelMonitor`s to use based on // the value of `v` we're matching. - harness.ab_link.disconnect_for_reload(0, &mut harness.nodes, &mut harness.queues); - harness.nodes[0].reload(v, &harness.out, &router, harness.chan_type); + harness.restart_node(0, v, &router); }, 0xb3..=0xbb => { // Restart node B, picking among the in-flight `ChannelMonitor`s to use based on // the value of `v` we're matching. - harness.ab_link.disconnect_for_reload(1, &mut harness.nodes, &mut harness.queues); - harness.bc_link.disconnect_for_reload(1, &mut harness.nodes, &mut harness.queues); - harness.nodes[1].reload(v, &harness.out, &router, harness.chan_type); + harness.restart_node(1, v, &router); }, 0xbc | 0xbd | 0xbe => { // Restart node C, picking among the in-flight `ChannelMonitor`s to use based on // the value of `v` we're matching. - harness.bc_link.disconnect_for_reload(2, &mut harness.nodes, &mut harness.queues); - harness.nodes[2].reload(v, &harness.out, &router, harness.chan_type); + harness.restart_node(2, v, &router); }, 0xc0 => harness.nodes[0].keys_manager.disable_supported_ops_for_all_signers(), @@ -3219,79 +3328,12 @@ pub fn do_test(data: &[u8], out: Out) { 0xff => { // Test that no channel is in a stuck state where neither party can send funds even // after we resolve all pending events. - - harness.ab_link.reconnect(&mut harness.nodes); - harness.bc_link.reconnect(&mut harness.nodes); - - for op in SUPPORTED_SIGNER_OPS { - harness.nodes[0].keys_manager.enable_op_for_all_signers(op); - harness.nodes[1].keys_manager.enable_op_for_all_signers(op); - harness.nodes[2].keys_manager.enable_op_for_all_signers(op); - } - harness.nodes[0].node.signer_unblocked(None); - harness.nodes[1].node.signer_unblocked(None); - harness.nodes[2].node.signer_unblocked(None); - - process_all_events_impl( - &harness.nodes, - &harness.out, - &harness.ab_link, - &harness.bc_link, - &mut harness.chain_state, - &mut harness.payments, - &mut harness.queues, - ); - - // Since MPP payments are supported, we wait until we fully settle the state of all - // channels to see if we have any committed HTLC parts of an MPP payment that need - // to be failed back. - for node in &harness.nodes { - node.node.timer_tick_occurred(); - } - process_all_events_impl( - &harness.nodes, - &harness.out, - &harness.ab_link, - &harness.bc_link, - &mut harness.chain_state, - &mut harness.payments, - &mut harness.queues, - ); - - harness.payments.assert_all_resolved(); - harness.payments.assert_claims_reported(); - - // Finally, make sure that at least one end of each channel can make a substantial payment - for &chan_id in harness.ab_link.channel_ids() { - assert!( - harness.payments.send_direct(&harness.nodes, 0, 1, chan_id, 10_000_000) - || harness.payments.send_direct( - &harness.nodes, - 1, - 0, - chan_id, - 10_000_000 - ) - ); - } - for &chan_id in harness.bc_link.channel_ids() { - assert!( - harness.payments.send_direct(&harness.nodes, 1, 2, chan_id, 10_000_000) - || harness.payments.send_direct( - &harness.nodes, - 2, - 1, - chan_id, - 10_000_000 - ) - ); - } - - harness.nodes[0].record_last_htlc_clear_fee(); - harness.nodes[1].record_last_htlc_clear_fee(); - harness.nodes[2].record_last_htlc_clear_fee(); + harness.settle_all(); + }, + _ => { + assert_test_invariants(&harness.nodes); + return; }, - _ => test_return!(), } harness.refresh_serialized_managers();