From f5e8a832b62a40f0a7e6eeb98766cc56f1a81de3 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sun, 10 Oct 2021 23:36:57 +0000 Subject: [PATCH] Add some basic test coverage of monitor payment data reloading --- lightning/src/ln/functional_tests.rs | 46 +++++++-- lightning/src/ln/payment_tests.rs | 149 ++++++++++++++++++++++++++- 2 files changed, 183 insertions(+), 12 deletions(-) diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index eb7d868a95d..3020cb77d4c 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -4100,7 +4100,7 @@ fn test_no_txn_manager_serialize_deserialize() { send_payment(&nodes[0], &[&nodes[1]], 1000000); } -fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool) { +fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, confirm_commitment_tx: bool, payment_timeout: bool) { // When a Channel is closed, any outbound HTLCs which were relayed through it are simply // dropped when the Channel is. From there, the ChannelManager relies on the ChannelMonitor // having a copy of the relevant fail-/claim-back data and processes the HTLC fail/claim when @@ -4121,7 +4121,7 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool) { // Route a payment, but force-close the channel before the HTLC fulfill message arrives at // nodes[0]. - let (payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1]], 10000000); + let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 10000000); nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id).unwrap(); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); @@ -4137,6 +4137,7 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool) { assert_eq!(node_txn[0], node_txn[1]); check_spends!(node_txn[1], funding_tx); check_spends!(node_txn[2], node_txn[1]); + let timeout_txn = vec![node_txn[2].clone()]; assert!(nodes[1].node.claim_funds(payment_preimage)); check_added_monitors!(nodes[1], 1); @@ -4151,15 +4152,30 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool) { header.prev_blockhash = nodes[0].best_block_hash(); connect_block(&nodes[0], &Block { header, txdata: vec![node_txn[1].clone()]}); + if confirm_commitment_tx { + connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - 1); + } + + header.prev_blockhash = nodes[0].best_block_hash(); + let claim_block = Block { header, txdata: if payment_timeout { timeout_txn } else { claim_txn } }; + + if payment_timeout { + assert!(confirm_commitment_tx); // Otherwise we're spending below our CSV! + connect_block(&nodes[0], &claim_block); + connect_blocks(&nodes[0], ANTI_REORG_DELAY - 2); + } + // Now connect the HTLC claim transaction with the ChainMonitor-generated ChannelMonitor update // returning TemporaryFailure. This should cause the claim event to never make its way to the // ChannelManager. chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clear(); chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); - header.prev_blockhash = nodes[0].best_block_hash(); - let claim_block = Block { header, txdata: claim_txn }; - connect_block(&nodes[0], &claim_block); + if payment_timeout { + connect_blocks(&nodes[0], 1); + } else { + connect_block(&nodes[0], &claim_block); + } let funding_txo = OutPoint { txid: funding_tx.txid(), index: 0 }; let mon_updates: Vec<_> = chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap() @@ -4181,7 +4197,11 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool) { let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new()); get_monitor!(nodes[0], chan_id).write(&mut chan_0_monitor_serialized).unwrap(); nodes[0].chain_monitor.chain_monitor.channel_monitor_updated(funding_txo, mon_updates[0]).unwrap(); - expect_payment_sent!(nodes[0], payment_preimage); + if payment_timeout { + expect_payment_failed!(nodes[0], payment_hash, true); + } else { + expect_payment_sent!(nodes[0], payment_preimage); + } // If we persist the ChannelManager after we get the PaymentSent event, we shouldn't get it // twice. @@ -4222,7 +4242,11 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool) { if persist_manager_post_event { assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); } else { - expect_payment_sent!(nodes[0], payment_preimage); + if payment_timeout { + expect_payment_failed!(nodes[0], payment_hash, true); + } else { + expect_payment_sent!(nodes[0], payment_preimage); + } } // Note that if we re-connect the block which exposed nodes[0] to the payment preimage (but @@ -4235,8 +4259,12 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool) { #[test] fn test_dup_htlc_onchain_fails_on_reload() { - do_test_dup_htlc_onchain_fails_on_reload(true); - do_test_dup_htlc_onchain_fails_on_reload(false); + do_test_dup_htlc_onchain_fails_on_reload(true, true, true); + do_test_dup_htlc_onchain_fails_on_reload(true, true, false); + do_test_dup_htlc_onchain_fails_on_reload(true, false, false); + do_test_dup_htlc_onchain_fails_on_reload(false, true, true); + do_test_dup_htlc_onchain_fails_on_reload(false, true, false); + do_test_dup_htlc_onchain_fails_on_reload(false, false, false); } #[test] diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index c622309a4b5..3d86ae11902 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -11,16 +11,22 @@ //! serialization ordering between ChannelManager/ChannelMonitors and ensuring we can still retry //! payments thereafter. +use chain::Watch; +use chain::channelmonitor::ChannelMonitor; use ln::{PaymentPreimage, PaymentHash}; -use ln::channelmanager::{PaymentId, PaymentSendFailure}; +use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, PaymentId, PaymentSendFailure}; use ln::features::InitFeatures; use ln::msgs; -use ln::msgs::ChannelMessageHandler; -use util::events::{Event, MessageSendEvent, MessageSendEventsProvider}; +use ln::msgs::{ChannelMessageHandler, ErrorAction}; +use util::events::{ClosureReason, Event, MessageSendEvent, MessageSendEventsProvider}; +use util::test_utils; use util::errors::APIError; +use util::enforcing_trait_impls::EnforcingSigner; +use util::ser::{ReadableArgs, Writeable}; use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hashes::Hash; +use bitcoin::BlockHash; use prelude::*; @@ -266,3 +272,140 @@ fn no_pending_leak_on_initial_send_failure() { assert!(!nodes[0].node.has_pending_payments()); } + +#[test] +fn retry_with_no_persist() { + // If we send a pending payment and `send_payment` returns success, we should always either + // return a payment failure event or a payment success event, and on failure the payment should + // be retryable. + // In order to do so when the ChannelManager isn't immediately persisted (which is normal - its + // always persisted asynchronously), the ChannelManager has to reload some payment data from + // ChannelMonitor(s) in some cases. This tests that reloading. + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let persister: test_utils::TestPersister; + let new_chain_monitor: test_utils::TestChainMonitor; + let nodes_0_deserialized: ChannelManager; + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2; + create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known()); + + // Serialize the ChannelManager prior to sending the payment + let nodes_0_serialized = nodes[0].node.encode(); + + let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 100_000); + let payment_id = nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap(); + check_added_monitors!(nodes[0], 1); + + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let payment_event = SendEvent::from_event(events.pop().unwrap()); + assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id()); + + // We relay the payment to nodes[1] while its disconnected from nodes[2], causing the payment + // to be returned immediately to nodes[0], without having nodes[2] fail the inbound payment + // which would prevent retry. + nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false); + nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true); + // nodes[1] now immediately fails the HTLC as the next-hop channel is disconnected + let _ = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + + reconnect_nodes(&nodes[1], &nodes[2], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + + // The ChannelMonitor should always be the latest version, as we're required to persist it + // during the `commitment_signed_dance!()`. + let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new()); + get_monitor!(nodes[0], chan_id).write(&mut chan_0_monitor_serialized).unwrap(); + + persister = test_utils::TestPersister::new(); + let keys_manager = &chanmon_cfgs[0].keys_manager; + new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), nodes[0].logger, node_cfgs[0].fee_estimator, &persister, keys_manager); + nodes[0].chain_monitor = &new_chain_monitor; + let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..]; + let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor)>::read( + &mut chan_0_monitor_read, keys_manager).unwrap(); + assert!(chan_0_monitor_read.is_empty()); + + let mut nodes_0_read = &nodes_0_serialized[..]; + let (_, nodes_0_deserialized_tmp) = { + let mut channel_monitors = HashMap::new(); + channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor); + <(BlockHash, ChannelManager)>::read(&mut nodes_0_read, ChannelManagerReadArgs { + default_config: test_default_channel_config(), + keys_manager, + fee_estimator: node_cfgs[0].fee_estimator, + chain_monitor: nodes[0].chain_monitor, + tx_broadcaster: nodes[0].tx_broadcaster.clone(), + logger: nodes[0].logger, + channel_monitors, + }).unwrap() + }; + nodes_0_deserialized = nodes_0_deserialized_tmp; + assert!(nodes_0_read.is_empty()); + + assert!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok()); + nodes[0].node = &nodes_0_deserialized; + check_added_monitors!(nodes[0], 1); + + // On reload, the ChannelManager should realize it is stale compared to the ChannelMonitor and + // force-close the channel. + check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager); + assert!(nodes[0].node.list_channels().is_empty()); + assert!(nodes[0].node.has_pending_payments()); + let as_commitment_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); + assert_eq!(as_commitment_tx.len(), 1); + + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known()}); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + + // Now nodes[1] should send a channel reestablish, which nodes[0] will respond to with an + // error, as the channel has hit the chain. + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known()}); + let bs_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish); + let as_err = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(as_err.len(), 1); + match as_err[0] { + MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg } } => { + assert_eq!(node_id, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_error(&nodes[0].node.get_our_node_id(), msg); + check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: "Failed to find corresponding channel".to_string() }); + check_added_monitors!(nodes[1], 1); + assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1); + }, + _ => panic!("Unexpected event"), + } + check_closed_broadcast!(nodes[1], false); + + // Create a new channel on which to retry the payment before we fail the payment via the + // HTLC-Timeout transaction. This avoids ChannelManager timing out the payment due to us + // connecting several blocks while creating the channel (implying time has passed). + create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + assert_eq!(nodes[0].node.list_usable_channels().len(), 1); + + mine_transaction(&nodes[0], &as_commitment_tx[0]); + connect_blocks(&nodes[0], TEST_FINAL_CLTV*4 + 20); + let as_htlc_timeout_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); + assert_eq!(as_htlc_timeout_tx.len(), 1); + confirm_transaction(&nodes[0], &as_htlc_timeout_tx[0]); + nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); + expect_payment_failed!(nodes[0], payment_hash, false); + + // Finally, retry the payment (which was reloaded from the ChannelMonitor when nodes[0] was + // reloaded) via a route over the new channel, which work without issue and eventually be + // received and claimed at the recipient just like any other payment. + let (new_route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[2], 100_000); + + nodes[0].node.retry_payment(&new_route, payment_id).unwrap(); + check_added_monitors!(nodes[0], 1); + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + pass_along_path(&nodes[0], &[&nodes[1], &nodes[2]], 100_000, payment_hash, Some(payment_secret), events.pop().unwrap(), true, None); + claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], false, payment_preimage); +}