Skip to content

Commit

Permalink
Merge pull request #8892 from lightningnetwork/yy-itest-miner
Browse files Browse the repository at this point in the history
Beat [0/4]: improve itest miner
  • Loading branch information
guggero committed Jul 23, 2024
2 parents 578e1d4 + 9180762 commit f27f9f2
Show file tree
Hide file tree
Showing 43 changed files with 1,255 additions and 999 deletions.
4 changes: 2 additions & 2 deletions discovery/gossiper.go
Original file line number Diff line number Diff line change
Expand Up @@ -750,8 +750,8 @@ func (d *AuthenticatedGossiper) Stop() error {
}

func (d *AuthenticatedGossiper) stop() {
log.Info("Authenticated Gossiper is stopping")
defer log.Info("Authenticated Gossiper stopped")
log.Debug("Authenticated Gossiper is stopping")
defer log.Debug("Authenticated Gossiper stopped")

d.blockEpochs.Cancel()

Expand Down
5 changes: 5 additions & 0 deletions docs/release-notes/release-notes-0.18.3.md
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,11 @@

## Tooling and Documentation

* [`lntest.HarnessTest` no longer exposes `Miner`
instance](https://github.com/lightningnetwork/lnd/pull/8892). Instead, it's
changed into a private `miner` instance and all mining related assertions are
now only accessible via the harness.

# Contributors (Alphabetical Order)

* Andras Banki-Horvath
Expand Down
3 changes: 3 additions & 0 deletions htlcswitch/decayedlog.go
Original file line number Diff line number Diff line change
Expand Up @@ -149,6 +149,9 @@ func (d *DecayedLog) initBuckets() error {

// Stop halts the garbage collector and closes boltdb.
func (d *DecayedLog) Stop() error {
log.Debugf("DecayedLog shutting down...")
defer log.Debugf("DecayedLog shutdown complete")

if !atomic.CompareAndSwapInt32(&d.stopped, 0, 1) {
return nil
}
Expand Down
33 changes: 16 additions & 17 deletions itest/lnd_channel_backup_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ func newChanRestoreScenario(ht *lntest.HarnessTest, ct lnrpc.CommitmentType,
ht.FundCoinsUnconfirmed(btcutil.SatoshiPerBitcoin, dave)

// Mine a block to confirm the funds.
ht.MineBlocks(1)
ht.MineBlocksAndAssertNumTxes(1, 2)

// For the anchor output case we need two UTXOs for Carol so she can
// sweep both the local and remote anchor.
Expand Down Expand Up @@ -267,7 +267,7 @@ func testChannelBackupRestoreBasic(ht *lntest.HarnessTest) {
// the node from seed, then manually recover
// the channel backup.
return chanRestoreViaRPC(
st, password, mnemonic, multi, oldNode,
st, password, mnemonic, multi,
)
},
},
Expand All @@ -291,7 +291,7 @@ func testChannelBackupRestoreBasic(ht *lntest.HarnessTest) {
// create a new nodeRestorer that will restore
// using the on-disk channel.backup.
return chanRestoreViaRPC(
st, password, mnemonic, multi, oldNode,
st, password, mnemonic, multi,
)
},
},
Expand Down Expand Up @@ -523,7 +523,7 @@ func runChanRestoreScenarioUnConfirmed(ht *lntest.HarnessTest, useFile bool) {
// In our nodeRestorer function, we'll restore the node from seed, then
// manually recover the channel backup.
restoredNodeFunc := chanRestoreViaRPC(
ht, crs.password, crs.mnemonic, multi, dave,
ht, crs.password, crs.mnemonic, multi,
)

// Test the scenario.
Expand Down Expand Up @@ -624,8 +624,8 @@ func runChanRestoreScenarioCommitTypes(ht *lntest.HarnessTest,

var fundingShim *lnrpc.FundingShim
if ct == lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE {
_, minerHeight := ht.Miner.GetBestBlock()
thawHeight := uint32(minerHeight + thawHeightDelta)
minerHeight := ht.CurrentHeight()
thawHeight := minerHeight + thawHeightDelta

fundingShim, _ = deriveFundingShim(
ht, dave, carol, crs.params.Amt, thawHeight, true, ct,
Expand Down Expand Up @@ -658,7 +658,7 @@ func runChanRestoreScenarioCommitTypes(ht *lntest.HarnessTest,
// Now that we have Dave's backup file, we'll create a new nodeRestorer
// that we'll restore using the on-disk channels.backup.
restoredNodeFunc := chanRestoreViaRPC(
ht, crs.password, crs.mnemonic, multi, dave,
ht, crs.password, crs.mnemonic, multi,
)

// Test the scenario.
Expand Down Expand Up @@ -687,7 +687,7 @@ func testChannelBackupRestoreLegacy(ht *lntest.HarnessTest) {
// In our nodeRestorer function, we'll restore the node from seed, then
// manually recover the channel backup.
restoredNodeFunc := chanRestoreViaRPC(
ht, crs.password, crs.mnemonic, multi, dave,
ht, crs.password, crs.mnemonic, multi,
)

// Test the scenario.
Expand Down Expand Up @@ -779,11 +779,11 @@ func runChanRestoreScenarioForceClose(ht *lntest.HarnessTest, zeroConf bool) {
// Now that we have Dave's backup file, we'll create a new nodeRestorer
// that will restore using the on-disk channel.backup.
restoredNodeFunc := chanRestoreViaRPC(
ht, crs.password, crs.mnemonic, multi, dave,
ht, crs.password, crs.mnemonic, multi,
)

// We now wait until both Dave's closing tx.
ht.Miner.AssertNumTxsInMempool(1)
ht.AssertNumTxsInMempool(1)

// Now that we're able to make our restored now, we'll shutdown the old
// Dave node as we'll be storing it shortly below.
Expand Down Expand Up @@ -1272,7 +1272,7 @@ func testDataLossProtection(ht *lntest.HarnessTest) {
ht.MineBlocks(1)

// Dave should sweep his funds.
ht.Miner.AssertNumTxsInMempool(1)
ht.AssertNumTxsInMempool(1)

// Mine a block to confirm the sweep, and make sure Dave got his
// balance back.
Expand Down Expand Up @@ -1388,8 +1388,7 @@ func createLegacyRevocationChannel(ht *lntest.HarnessTest,
// instance which will restore the target node from a password+seed, then
// trigger a SCB restore using the RPC interface.
func chanRestoreViaRPC(ht *lntest.HarnessTest, password []byte,
mnemonic []string, multi []byte,
oldNode *node.HarnessNode) nodeRestorer {
mnemonic []string, multi []byte) nodeRestorer {

backup := &lnrpc.RestoreChanBackupRequest_MultiChanBackup{
MultiChanBackup: multi,
Expand Down Expand Up @@ -1428,7 +1427,7 @@ func assertTimeLockSwept(ht *lntest.HarnessTest, carol, dave *node.HarnessNode,

// Mine a block to trigger the sweeps.
ht.MineBlocks(1)
ht.Miner.AssertNumTxsInMempool(expectedTxes)
ht.AssertNumTxsInMempool(expectedTxes)

// Carol should consider the channel pending force close (since she is
// waiting for her sweep to confirm).
Expand Down Expand Up @@ -1462,9 +1461,9 @@ func assertTimeLockSwept(ht *lntest.HarnessTest, carol, dave *node.HarnessNode,

// Mine a block to trigger the sweeps.
ht.MineEmptyBlocks(1)
daveSweep := ht.Miner.AssertNumTxsInMempool(1)[0]
daveSweep := ht.AssertNumTxsInMempool(1)[0]
block := ht.MineBlocksAndAssertNumTxes(1, 1)[0]
ht.Miner.AssertTxInBlock(block, daveSweep)
ht.AssertTxInBlock(block, daveSweep)

// Now the channel should be fully closed also from Dave's POV.
ht.AssertNumPendingForceClose(dave, 0)
Expand Down Expand Up @@ -1510,7 +1509,7 @@ func assertDLPExecuted(ht *lntest.HarnessTest,

// Upon reconnection, the nodes should detect that Dave is out of sync.
// Carol should force close the channel using her latest commitment.
ht.Miner.AssertNumTxsInMempool(1)
ht.AssertNumTxsInMempool(1)

// Channel should be in the state "waiting close" for Carol since she
// broadcasted the force close tx.
Expand Down
30 changes: 15 additions & 15 deletions itest/lnd_channel_force_close_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,7 @@ func channelForceClosureTest(ht *lntest.HarnessTest,

// Fetch starting height of this test so we can compute the block
// heights we expect certain events to take place.
_, curHeight := ht.Miner.GetBestBlock()
curHeight := int32(ht.CurrentHeight())

// Using the current height of the chain, derive the relevant heights
// for incubating two-stage htlcs.
Expand Down Expand Up @@ -214,7 +214,7 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
ht.AssertNumUTXOs(alice, expectedUtxos)

// We expect to see Alice's force close tx in the mempool.
ht.Miner.GetNumTxsFromMempool(1)
ht.GetNumTxsFromMempool(1)

// Mine a block which should confirm the commitment transaction
// broadcast as a result of the force closure. Once mined, we also
Expand Down Expand Up @@ -278,7 +278,7 @@ func channelForceClosureTest(ht *lntest.HarnessTest,

// Carol's sweep tx should be in the mempool already, as her output is
// not timelocked.
carolTx := ht.Miner.GetNumTxsFromMempool(1)[0]
carolTx := ht.GetNumTxsFromMempool(1)[0]

// Carol's sweeping tx should have 2-input-1-output shape.
require.Len(ht, carolTx.TxIn, 2)
Expand Down Expand Up @@ -389,11 +389,11 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
// So we fetch the node's mempool to ensure it has been properly
// broadcast.
ht.MineEmptyBlocks(1)
sweepingTXID := ht.Miner.AssertNumTxsInMempool(1)[0]
sweepingTXID := ht.AssertNumTxsInMempool(1)[0]

// Fetch the sweep transaction, all input it's spending should be from
// the commitment transaction which was broadcast on-chain.
sweepTx := ht.Miner.GetRawTransaction(sweepingTXID)
sweepTx := ht.GetRawTransaction(sweepingTXID)
for _, txIn := range sweepTx.MsgTx().TxIn {
require.Equal(ht, &txIn.PreviousOutPoint.Hash, closingTxID,
"sweep transaction not spending from commit")
Expand Down Expand Up @@ -431,7 +431,7 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
ht.MineBlocksAndAssertNumTxes(1, 1)

// Update current height
_, curHeight = ht.Miner.GetBestBlock()
curHeight = int32(ht.CurrentHeight())

// checkForceClosedChannelNumHtlcs verifies that a force closed channel
// has the proper number of htlcs.
Expand Down Expand Up @@ -485,7 +485,7 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
// number of blocks we have generated since adding it to the nursery,
// and take an additional block off so that we end up one block shy of
// the expiry height, and add the block padding.
_, currentHeight := ht.Miner.GetBestBlock()
currentHeight := int32(ht.CurrentHeight())
cltvHeightDelta := int(htlcExpiryHeight - uint32(currentHeight) - 1)

// Advance the blockchain until just before the CLTV expires, nothing
Expand Down Expand Up @@ -547,7 +547,7 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
// NOTE: after restart, all the htlc timeout txns will be offered to
// the sweeper with `Immediate` set to true, so they won't be
// aggregated.
htlcTxIDs := ht.Miner.AssertNumTxsInMempool(numInvoices)
htlcTxIDs := ht.AssertNumTxsInMempool(numInvoices)

// Retrieve each htlc timeout txn from the mempool, and ensure it is
// well-formed. This entails verifying that each only spends from
Expand All @@ -567,7 +567,7 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
// on-chain. In case of an anchor type channel, we expect one
// extra input that is not spending from the commitment, that
// is added for fees.
htlcTx := ht.Miner.GetRawTransaction(htlcTxID)
htlcTx := ht.GetRawTransaction(htlcTxID)

// Ensure the htlc transaction has the expected number of
// inputs.
Expand Down Expand Up @@ -662,7 +662,7 @@ func channelForceClosureTest(ht *lntest.HarnessTest,

// Advance the chain until just before the 2nd-layer CSV delays expire.
// For anchor channels this is one block earlier.
_, currentHeight = ht.Miner.GetBestBlock()
currentHeight = int32(ht.CurrentHeight())
ht.Logf("current height: %v, htlcCsvMaturityHeight=%v", currentHeight,
htlcCsvMaturityHeight)
numBlocks := int(htlcCsvMaturityHeight - uint32(currentHeight) - 2)
Expand Down Expand Up @@ -709,7 +709,7 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
// NOTE: we don't check `len(mempool) == 1` because it will
// give us false positive.
err := wait.NoError(func() error {
mempool := ht.Miner.GetRawMempool()
mempool := ht.Miner().GetRawMempool()
if len(mempool) == 2 {
return nil
}
Expand All @@ -733,10 +733,10 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
}

// Wait for the single sweep txn to appear in the mempool.
htlcSweepTxID := ht.Miner.AssertNumTxsInMempool(1)[0]
htlcSweepTxID := ht.AssertNumTxsInMempool(1)[0]

// Fetch the htlc sweep transaction from the mempool.
htlcSweepTx := ht.Miner.GetRawTransaction(htlcSweepTxID)
htlcSweepTx := ht.GetRawTransaction(htlcSweepTxID)

// Ensure the htlc sweep transaction only has one input for each htlc
// Alice extended before force closing.
Expand Down Expand Up @@ -818,7 +818,7 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
// Generate the final block that sweeps all htlc funds into the user's
// wallet, and make sure the sweep is in this block.
block := ht.MineBlocksAndAssertNumTxes(1, 1)[0]
ht.Miner.AssertTxInBlock(block, htlcSweepTxID)
ht.AssertTxInBlock(block, htlcSweepTxID)

// Now that the channel has been fully swept, it should no longer show
// up within the pending channels RPC.
Expand Down Expand Up @@ -935,7 +935,7 @@ func testFailingChannel(ht *lntest.HarnessTest) {
ht.MineEmptyBlocks(1)

// Carol should have broadcast her sweeping tx.
ht.Miner.AssertNumTxsInMempool(1)
ht.AssertNumTxsInMempool(1)

// Mine two blocks to confirm Carol's sweeping tx, which will by now
// Alice's commit output should be offered to her sweeper.
Expand Down
4 changes: 1 addition & 3 deletions itest/lnd_channel_funding_fund_max_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@ import (
"github.com/lightningnetwork/lnd/lnwallet"
"github.com/lightningnetwork/lnd/lnwallet/chainfee"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/stretchr/testify/require"
)

type chanFundMaxTestCase struct {
Expand Down Expand Up @@ -317,8 +316,7 @@ func fundingFee(numInput int, change bool) btcutil.Amount {
// sweepNodeWalletAndAssert sweeps funds from a node wallet.
func sweepNodeWalletAndAssert(ht *lntest.HarnessTest, node *node.HarnessNode) {
// New miner address we will sweep all funds to.
minerAddr, err := ht.Miner.NewAddress()
require.NoError(ht, err)
minerAddr := ht.NewMinerAddress()

// Send all funds back to the miner node.
node.RPC.SendCoins(&lnrpc.SendCoinsRequest{
Expand Down
4 changes: 2 additions & 2 deletions itest/lnd_channel_graph_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -316,7 +316,7 @@ func testGraphTopologyNtfns(ht *lntest.HarnessTest, pinned bool) {
ht.AssertNumNodeAnns(alice, alice.PubKeyStr, 1)
ht.AssertNumNodeAnns(alice, bob.PubKeyStr, 1)

_, blockHeight := ht.Miner.GetBestBlock()
blockHeight := ht.CurrentHeight()

// Now we'll test that updates are properly sent after channels are
// closed within the network.
Expand All @@ -326,7 +326,7 @@ func testGraphTopologyNtfns(ht *lntest.HarnessTest, pinned bool) {
// notification indicating so.
closedChan := ht.AssertTopologyChannelClosed(alice, chanPoint)

require.Equal(ht, uint32(blockHeight+1), closedChan.ClosedHeight,
require.Equal(ht, blockHeight+1, closedChan.ClosedHeight,
"close heights of channel mismatch")

fundingTxid := ht.OutPointFromChannelPoint(chanPoint)
Expand Down
2 changes: 1 addition & 1 deletion itest/lnd_coop_close_with_htlcs_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ func coopCloseWithHTLCs(ht *lntest.HarnessTest) {
)

// Wait for the close tx to be in the Mempool.
ht.Miner.AssertTxInMempool(&closeTxid)
ht.AssertTxInMempool(&closeTxid)

// Wait for it to get mined and finish tearing down.
ht.AssertStreamChannelCoopClosed(alice, chanPoint, false, closeClient)
Expand Down
2 changes: 2 additions & 0 deletions itest/lnd_estimate_route_fee_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -359,6 +359,8 @@ func testEstimateRouteFee(ht *lntest.HarnessTest) {

mts.ht.CloseChannelAssertPending(mts.bob, channelPointBobPaula, false)
mts.ht.CloseChannelAssertPending(mts.eve, channelPointEvePaula, false)
ht.MineBlocksAndAssertNumTxes(1, 2)

mts.closeChannels()
}

Expand Down
Loading

0 comments on commit f27f9f2

Please sign in to comment.