• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 14811380784

03 May 2025 01:35PM UTC coverage: 69.055% (+0.02%) from 69.037%
14811380784

Pull #9780

github

web-flow
Merge 580e54983 into 334a7d112
Pull Request #9780: chore: remove dead code

133887 of 193884 relevant lines covered (69.06%)

22140.16 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/lntest/harness.go
1
package lntest
2

3
import (
4
        "context"
5
        "fmt"
6
        "strings"
7
        "testing"
8
        "time"
9

10
        "github.com/btcsuite/btcd/blockchain"
11
        "github.com/btcsuite/btcd/btcec/v2"
12
        "github.com/btcsuite/btcd/btcutil"
13
        "github.com/btcsuite/btcd/chaincfg/chainhash"
14
        "github.com/btcsuite/btcd/txscript"
15
        "github.com/btcsuite/btcd/wire"
16
        "github.com/go-errors/errors"
17
        "github.com/lightningnetwork/lnd/fn/v2"
18
        "github.com/lightningnetwork/lnd/input"
19
        "github.com/lightningnetwork/lnd/kvdb/etcd"
20
        "github.com/lightningnetwork/lnd/lnrpc"
21
        "github.com/lightningnetwork/lnd/lnrpc/invoicesrpc"
22
        "github.com/lightningnetwork/lnd/lnrpc/routerrpc"
23
        "github.com/lightningnetwork/lnd/lnrpc/signrpc"
24
        "github.com/lightningnetwork/lnd/lnrpc/walletrpc"
25
        "github.com/lightningnetwork/lnd/lntest/miner"
26
        "github.com/lightningnetwork/lnd/lntest/node"
27
        "github.com/lightningnetwork/lnd/lntest/rpc"
28
        "github.com/lightningnetwork/lnd/lntest/wait"
29
        "github.com/lightningnetwork/lnd/lntypes"
30
        "github.com/lightningnetwork/lnd/lnwallet/chainfee"
31
        "github.com/lightningnetwork/lnd/lnwire"
32
        "github.com/lightningnetwork/lnd/routing"
33
        "github.com/stretchr/testify/require"
34
)
35

36
const (
37
        // defaultMinerFeeRate specifies the fee rate in sats when sending
38
        // outputs from the miner.
39
        defaultMinerFeeRate = 7500
40

41
        // numBlocksOpenChannel specifies the number of blocks mined when
42
        // opening a channel.
43
        numBlocksOpenChannel = 6
44

45
        // lndErrorChanSize specifies the buffer size used to receive errors
46
        // from lnd process.
47
        lndErrorChanSize = 10
48

49
        // maxBlocksAllowed specifies the max allowed value to be used when
50
        // mining blocks.
51
        maxBlocksAllowed = 100
52

53
        finalCltvDelta  = routing.MinCLTVDelta // 18.
54
        thawHeightDelta = finalCltvDelta * 2   // 36.
55
)
56

57
var (
58
        // MaxBlocksMinedPerTest is the maximum number of blocks that we allow
59
        // a test to mine. This is an exported global variable so it can be
60
        // overwritten by other projects that don't have the same constraints.
61
        MaxBlocksMinedPerTest = 50
62
)
63

64
// TestCase defines a test case that's been used in the integration test.
65
type TestCase struct {
66
        // Name specifies the test name.
67
        Name string
68

69
        // TestFunc is the test case wrapped in a function.
70
        TestFunc func(t *HarnessTest)
71
}
72

73
// HarnessTest builds on top of a testing.T with enhanced error detection. It
74
// is responsible for managing the interactions among different nodes, and
75
// providing easy-to-use assertions.
76
type HarnessTest struct {
77
        *testing.T
78

79
        // miner is a reference to a running full node that can be used to
80
        // create new blocks on the network.
81
        miner *miner.HarnessMiner
82

83
        // manager handles the start and stop of a given node.
84
        manager *nodeManager
85

86
        // feeService is a web service that provides external fee estimates to
87
        // lnd.
88
        feeService WebFeeService
89

90
        // Channel for transmitting stderr output from failed lightning node
91
        // to main process.
92
        lndErrorChan chan error
93

94
        // runCtx is a context with cancel method. It's used to signal when the
95
        // node needs to quit, and used as the parent context when spawning
96
        // children contexts for RPC requests.
97
        runCtx context.Context //nolint:containedctx
98
        cancel context.CancelFunc
99

100
        // stopChainBackend points to the cleanup function returned by the
101
        // chainBackend.
102
        stopChainBackend func()
103

104
        // cleaned specifies whether the cleanup has been applied for the
105
        // current HarnessTest.
106
        cleaned bool
107

108
        // currentHeight is the current height of the chain backend.
109
        currentHeight uint32
110
}
111

112
// harnessOpts contains functional option to modify the behavior of the various
113
// harness calls.
114
type harnessOpts struct {
115
        useAMP bool
116
}
117

118
// defaultHarnessOpts returns a new instance of the harnessOpts with default
119
// values specified.
120
func defaultHarnessOpts() harnessOpts {
×
121
        return harnessOpts{
×
122
                useAMP: false,
×
123
        }
×
124
}
×
125

126
// HarnessOpt is a functional option that can be used to modify the behavior of
127
// harness functionality.
128
type HarnessOpt func(*harnessOpts)
129

130
// WithAMP is a functional option that can be used to enable the AMP feature
131
// for sending payments.
132
func WithAMP() HarnessOpt {
×
133
        return func(h *harnessOpts) {
×
134
                h.useAMP = true
×
135
        }
×
136
}
137

138
// NewHarnessTest creates a new instance of a harnessTest from a regular
139
// testing.T instance.
140
func NewHarnessTest(t *testing.T, lndBinary string, feeService WebFeeService,
141
        dbBackend node.DatabaseBackend, nativeSQL bool) *HarnessTest {
×
142

×
143
        t.Helper()
×
144

×
145
        // Create the run context.
×
146
        ctxt, cancel := context.WithCancel(context.Background())
×
147

×
148
        manager := newNodeManager(lndBinary, dbBackend, nativeSQL)
×
149

×
150
        return &HarnessTest{
×
151
                T:          t,
×
152
                manager:    manager,
×
153
                feeService: feeService,
×
154
                runCtx:     ctxt,
×
155
                cancel:     cancel,
×
156
                // We need to use buffered channel here as we don't want to
×
157
                // block sending errors.
×
158
                lndErrorChan: make(chan error, lndErrorChanSize),
×
159
        }
×
160
}
×
161

162
// Start will assemble the chain backend and the miner for the HarnessTest. It
163
// also starts the fee service and watches lnd process error.
164
func (h *HarnessTest) Start(chain node.BackendConfig,
165
        miner *miner.HarnessMiner) {
×
166

×
167
        // Spawn a new goroutine to watch for any fatal errors that any of the
×
168
        // running lnd processes encounter. If an error occurs, then the test
×
169
        // case should naturally as a result and we log the server error here
×
170
        // to help debug.
×
171
        go func() {
×
172
                select {
×
173
                case err, more := <-h.lndErrorChan:
×
174
                        if !more {
×
175
                                return
×
176
                        }
×
177
                        h.Logf("lnd finished with error (stderr):\n%v", err)
×
178

179
                case <-h.runCtx.Done():
×
180
                        return
×
181
                }
182
        }()
183

184
        // Start the fee service.
185
        err := h.feeService.Start()
×
186
        require.NoError(h, err, "failed to start fee service")
×
187

×
188
        // Assemble the node manager with chainBackend and feeServiceURL.
×
189
        h.manager.chainBackend = chain
×
190
        h.manager.feeServiceURL = h.feeService.URL()
×
191

×
192
        // Assemble the miner.
×
193
        h.miner = miner
×
194

×
195
        // Update block height.
×
196
        h.updateCurrentHeight()
×
197
}
198

199
// ChainBackendName returns the chain backend name used in the test.
200
func (h *HarnessTest) ChainBackendName() string {
×
201
        return h.manager.chainBackend.Name()
×
202
}
×
203

204
// Context returns the run context used in this test. Usaually it should be
205
// managed by the test itself otherwise undefined behaviors will occur. It can
206
// be used, however, when a test needs to have its own context being managed
207
// differently. In that case, instead of using a background context, the run
208
// context should be used such that the test context scope can be fully
209
// controlled.
210
func (h *HarnessTest) Context() context.Context {
×
211
        return h.runCtx
×
212
}
×
213

214
// createAndSendOutput send amt satoshis from the internal mining node to the
215
// targeted lightning node using a P2WKH address. No blocks are mined so
216
// transactions will sit unconfirmed in mempool.
217
func (h *HarnessTest) createAndSendOutput(target *node.HarnessNode,
218
        amt btcutil.Amount, addrType lnrpc.AddressType) {
×
219

×
220
        req := &lnrpc.NewAddressRequest{Type: addrType}
×
221
        resp := target.RPC.NewAddress(req)
×
222
        addr := h.DecodeAddress(resp.Address)
×
223
        addrScript := h.PayToAddrScript(addr)
×
224

×
225
        output := &wire.TxOut{
×
226
                PkScript: addrScript,
×
227
                Value:    int64(amt),
×
228
        }
×
229
        h.miner.SendOutput(output, defaultMinerFeeRate)
×
230
}
×
231

232
// Stop stops the test harness.
233
func (h *HarnessTest) Stop() {
×
234
        // Do nothing if it's not started.
×
235
        if h.runCtx == nil {
×
236
                h.Log("HarnessTest is not started")
×
237
                return
×
238
        }
×
239

240
        h.shutdownAllNodes()
×
241

×
242
        close(h.lndErrorChan)
×
243

×
244
        // Stop the fee service.
×
245
        err := h.feeService.Stop()
×
246
        require.NoError(h, err, "failed to stop fee service")
×
247

×
248
        // Stop the chainBackend.
×
249
        h.stopChainBackend()
×
250

×
251
        // Stop the miner.
×
252
        h.miner.Stop()
×
253
}
254

255
// RunTestCase executes a harness test case. Any errors or panics will be
256
// represented as fatal.
257
func (h *HarnessTest) RunTestCase(testCase *TestCase) {
×
258
        defer func() {
×
259
                if err := recover(); err != nil {
×
260
                        description := errors.Wrap(err, 2).ErrorStack()
×
261
                        h.Fatalf("Failed: (%v) panic with: \n%v",
×
262
                                testCase.Name, description)
×
263
                }
×
264
        }()
265

266
        testCase.TestFunc(h)
×
267
}
268

269
// Subtest creates a child HarnessTest, which inherits the harness net and
270
// stand by nodes created by the parent test. It will return a cleanup function
271
// which resets  all the standby nodes' configs back to its original state and
272
// create snapshots of each nodes' internal state.
273
func (h *HarnessTest) Subtest(t *testing.T) *HarnessTest {
×
274
        t.Helper()
×
275

×
276
        st := &HarnessTest{
×
277
                T:            t,
×
278
                manager:      h.manager,
×
279
                miner:        h.miner,
×
280
                feeService:   h.feeService,
×
281
                lndErrorChan: make(chan error, lndErrorChanSize),
×
282
        }
×
283

×
284
        // Inherit context from the main test.
×
285
        st.runCtx, st.cancel = context.WithCancel(h.runCtx)
×
286

×
287
        // Inherit the subtest for the miner.
×
288
        st.miner.T = st.T
×
289

×
290
        // Reset fee estimator.
×
291
        st.feeService.Reset()
×
292

×
293
        // Record block height.
×
294
        h.updateCurrentHeight()
×
295
        startHeight := int32(h.CurrentHeight())
×
296

×
297
        st.Cleanup(func() {
×
298
                // Make sure the test is not consuming too many blocks.
×
299
                st.checkAndLimitBlocksMined(startHeight)
×
300

×
301
                // Don't bother run the cleanups if the test is failed.
×
302
                if st.Failed() {
×
303
                        st.Log("test failed, skipped cleanup")
×
304
                        st.shutdownNodesNoAssert()
×
305
                        return
×
306
                }
×
307

308
                // Don't run cleanup if it's already done. This can happen if
309
                // we have multiple level inheritance of the parent harness
310
                // test. For instance, a `Subtest(st)`.
311
                if st.cleaned {
×
312
                        st.Log("test already cleaned, skipped cleanup")
×
313
                        return
×
314
                }
×
315

316
                // If found running nodes, shut them down.
317
                st.shutdownAllNodes()
×
318

×
319
                // We require the mempool to be cleaned from the test.
×
320
                require.Empty(st, st.miner.GetRawMempool(), "mempool not "+
×
321
                        "cleaned, please mine blocks to clean them all.")
×
322

×
323
                // Finally, cancel the run context. We have to do it here
×
324
                // because we need to keep the context alive for the above
×
325
                // assertions used in cleanup.
×
326
                st.cancel()
×
327

×
328
                // We now want to mark the parent harness as cleaned to avoid
×
329
                // running cleanup again since its internal state has been
×
330
                // cleaned up by its child harness tests.
×
331
                h.cleaned = true
×
332
        })
333

334
        return st
×
335
}
336

337
// checkAndLimitBlocksMined asserts that the blocks mined in a single test
338
// doesn't exceed 50, which implicitly discourage table-drive tests, which are
339
// hard to maintain and take a long time to run.
340
func (h *HarnessTest) checkAndLimitBlocksMined(startHeight int32) {
×
341
        _, endHeight := h.GetBestBlock()
×
342
        blocksMined := endHeight - startHeight
×
343

×
344
        h.Logf("finished test: %s, start height=%d, end height=%d, mined "+
×
345
                "blocks=%d", h.manager.currentTestCase, startHeight, endHeight,
×
346
                blocksMined)
×
347

×
348
        // If the number of blocks is less than 40, we consider the test
×
349
        // healthy.
×
350
        if blocksMined < 40 {
×
351
                return
×
352
        }
×
353

354
        // Otherwise log a warning if it's mining more than 40 blocks.
355
        desc := "!============================================!\n"
×
356

×
357
        desc += fmt.Sprintf("Too many blocks (%v) mined in one test! Tips:\n",
×
358
                blocksMined)
×
359

×
360
        desc += "1. break test into smaller individual tests, especially if " +
×
361
                "this is a table-drive test.\n" +
×
362
                "2. use smaller CSV via `--bitcoin.defaultremotedelay=1.`\n" +
×
363
                "3. use smaller CLTV via `--bitcoin.timelockdelta=18.`\n" +
×
364
                "4. remove unnecessary CloseChannel when test ends.\n" +
×
365
                "5. use `CreateSimpleNetwork` for efficient channel creation.\n"
×
366
        h.Log(desc)
×
367

×
368
        // We enforce that the test should not mine more than
×
369
        // MaxBlocksMinedPerTest (50 by default) blocks, which is more than
×
370
        // enough to test a multi hop force close scenario.
×
371
        require.LessOrEqualf(
×
372
                h, int(blocksMined), MaxBlocksMinedPerTest,
×
373
                "cannot mine more than %d blocks in one test",
×
374
                MaxBlocksMinedPerTest,
×
375
        )
×
376
}
377

378
// shutdownNodesNoAssert will shutdown all running nodes without assertions.
379
// This is used when the test has already failed, we don't want to log more
380
// errors but focusing on the original error.
381
func (h *HarnessTest) shutdownNodesNoAssert() {
×
382
        for _, node := range h.manager.activeNodes {
×
383
                _ = h.manager.shutdownNode(node)
×
384
        }
×
385
}
386

387
// shutdownAllNodes will shutdown all running nodes.
388
func (h *HarnessTest) shutdownAllNodes() {
×
389
        var err error
×
390
        for _, node := range h.manager.activeNodes {
×
391
                err = h.manager.shutdownNode(node)
×
392
                if err == nil {
×
393
                        continue
×
394
                }
395

396
                // Instead of returning the error, we will log it instead. This
397
                // is needed so other nodes can continue their shutdown
398
                // processes.
399
                h.Logf("unable to shutdown %s, got err: %v", node.Name(), err)
×
400
        }
401

402
        require.NoError(h, err, "failed to shutdown all nodes")
×
403
}
404

405
// SetTestName set the test case name.
406
func (h *HarnessTest) SetTestName(name string) {
×
407
        cleanTestCaseName := strings.ReplaceAll(name, " ", "_")
×
408
        h.manager.currentTestCase = cleanTestCaseName
×
409
}
×
410

411
// NewNode creates a new node and asserts its creation. The node is guaranteed
412
// to have finished its initialization and all its subservers are started.
413
func (h *HarnessTest) NewNode(name string,
414
        extraArgs []string) *node.HarnessNode {
×
415

×
416
        node, err := h.manager.newNode(h.T, name, extraArgs, nil, false)
×
417
        require.NoErrorf(h, err, "unable to create new node for %s", name)
×
418

×
419
        // Start the node.
×
420
        err = node.Start(h.runCtx)
×
421
        require.NoError(h, err, "failed to start node %s", node.Name())
×
422

×
423
        // Get the miner's best block hash.
×
424
        bestBlock, err := h.miner.Client.GetBestBlockHash()
×
425
        require.NoError(h, err, "unable to get best block hash")
×
426

×
427
        // Wait until the node's chain backend is synced to the miner's best
×
428
        // block.
×
429
        h.WaitForBlockchainSyncTo(node, *bestBlock)
×
430

×
431
        return node
×
432
}
×
433

434
// NewNodeWithCoins creates a new node and asserts its creation. The node is
435
// guaranteed to have finished its initialization and all its subservers are
436
// started. In addition, 5 UTXO of 1 BTC each are sent to the node.
437
func (h *HarnessTest) NewNodeWithCoins(name string,
438
        extraArgs []string) *node.HarnessNode {
×
439

×
440
        node := h.NewNode(name, extraArgs)
×
441

×
442
        // Load up the wallets of the node with 5 outputs of 1 BTC each.
×
443
        const (
×
444
                numOutputs  = 5
×
445
                fundAmount  = 1 * btcutil.SatoshiPerBitcoin
×
446
                totalAmount = fundAmount * numOutputs
×
447
        )
×
448

×
449
        for i := 0; i < numOutputs; i++ {
×
450
                h.createAndSendOutput(
×
451
                        node, fundAmount,
×
452
                        lnrpc.AddressType_WITNESS_PUBKEY_HASH,
×
453
                )
×
454
        }
×
455

456
        // Mine a block to confirm the transactions.
457
        h.MineBlocksAndAssertNumTxes(1, numOutputs)
×
458

×
459
        // Now block until the wallet have fully synced up.
×
460
        h.WaitForBalanceConfirmed(node, totalAmount)
×
461

×
462
        return node
×
463
}
464

465
// Shutdown shuts down the given node and asserts that no errors occur.
466
func (h *HarnessTest) Shutdown(node *node.HarnessNode) {
×
467
        err := h.manager.shutdownNode(node)
×
468
        require.NoErrorf(h, err, "unable to shutdown %v in %v", node.Name(),
×
469
                h.manager.currentTestCase)
×
470
}
×
471

472
// SuspendNode stops the given node and returns a callback that can be used to
473
// start it again.
474
func (h *HarnessTest) SuspendNode(node *node.HarnessNode) func() error {
×
475
        err := node.Stop()
×
476
        require.NoErrorf(h, err, "failed to stop %s", node.Name())
×
477

×
478
        // Remove the node from active nodes.
×
479
        delete(h.manager.activeNodes, node.Cfg.NodeID)
×
480

×
481
        return func() error {
×
482
                h.manager.registerNode(node)
×
483

×
484
                if err := node.Start(h.runCtx); err != nil {
×
485
                        return err
×
486
                }
×
487
                h.WaitForBlockchainSync(node)
×
488

×
489
                return nil
×
490
        }
491
}
492

493
// RestartNode restarts a given node, unlocks it and asserts it's successfully
494
// started.
495
func (h *HarnessTest) RestartNode(hn *node.HarnessNode) {
×
496
        err := h.manager.restartNode(h.runCtx, hn, nil)
×
497
        require.NoErrorf(h, err, "failed to restart node %s", hn.Name())
×
498

×
499
        err = h.manager.unlockNode(hn)
×
500
        require.NoErrorf(h, err, "failed to unlock node %s", hn.Name())
×
501

×
502
        if !hn.Cfg.SkipUnlock {
×
503
                // Give the node some time to catch up with the chain before we
×
504
                // continue with the tests.
×
505
                h.WaitForBlockchainSync(hn)
×
506
        }
×
507
}
508

509
// RestartNodeNoUnlock restarts a given node without unlocking its wallet.
510
func (h *HarnessTest) RestartNodeNoUnlock(hn *node.HarnessNode) {
×
511
        err := h.manager.restartNode(h.runCtx, hn, nil)
×
512
        require.NoErrorf(h, err, "failed to restart node %s", hn.Name())
×
513
}
×
514

515
// RestartNodeWithChanBackups restarts a given node with the specified channel
516
// backups.
517
func (h *HarnessTest) RestartNodeWithChanBackups(hn *node.HarnessNode,
518
        chanBackups ...*lnrpc.ChanBackupSnapshot) {
×
519

×
520
        err := h.manager.restartNode(h.runCtx, hn, nil)
×
521
        require.NoErrorf(h, err, "failed to restart node %s", hn.Name())
×
522

×
523
        err = h.manager.unlockNode(hn, chanBackups...)
×
524
        require.NoErrorf(h, err, "failed to unlock node %s", hn.Name())
×
525

×
526
        // Give the node some time to catch up with the chain before we
×
527
        // continue with the tests.
×
528
        h.WaitForBlockchainSync(hn)
×
529
}
×
530

531
// RestartNodeWithExtraArgs updates the node's config and restarts it.
532
func (h *HarnessTest) RestartNodeWithExtraArgs(hn *node.HarnessNode,
533
        extraArgs []string) {
×
534

×
535
        hn.SetExtraArgs(extraArgs)
×
536
        h.RestartNode(hn)
×
537
}
×
538

539
// NewNodeWithSeed fully initializes a new HarnessNode after creating a fresh
540
// aezeed. The provided password is used as both the aezeed password and the
541
// wallet password. The generated mnemonic is returned along with the
542
// initialized harness node.
543
func (h *HarnessTest) NewNodeWithSeed(name string,
544
        extraArgs []string, password []byte,
545
        statelessInit bool) (*node.HarnessNode, []string, []byte) {
×
546

×
547
        // Create a request to generate a new aezeed. The new seed will have
×
548
        // the same password as the internal wallet.
×
549
        req := &lnrpc.GenSeedRequest{
×
550
                AezeedPassphrase: password,
×
551
                SeedEntropy:      nil,
×
552
        }
×
553

×
554
        return h.newNodeWithSeed(name, extraArgs, req, statelessInit)
×
555
}
×
556

557
// newNodeWithSeed creates and initializes a new HarnessNode such that it'll be
558
// ready to accept RPC calls. A `GenSeedRequest` is needed to generate the
559
// seed.
560
func (h *HarnessTest) newNodeWithSeed(name string,
561
        extraArgs []string, req *lnrpc.GenSeedRequest,
562
        statelessInit bool) (*node.HarnessNode, []string, []byte) {
×
563

×
564
        node, err := h.manager.newNode(
×
565
                h.T, name, extraArgs, req.AezeedPassphrase, true,
×
566
        )
×
567
        require.NoErrorf(h, err, "unable to create new node for %s", name)
×
568

×
569
        // Start the node with seed only, which will only create the `State`
×
570
        // and `WalletUnlocker` clients.
×
571
        err = node.StartWithNoAuth(h.runCtx)
×
572
        require.NoErrorf(h, err, "failed to start node %s", node.Name())
×
573

×
574
        // Generate a new seed.
×
575
        genSeedResp := node.RPC.GenSeed(req)
×
576

×
577
        // With the seed created, construct the init request to the node,
×
578
        // including the newly generated seed.
×
579
        initReq := &lnrpc.InitWalletRequest{
×
580
                WalletPassword:     req.AezeedPassphrase,
×
581
                CipherSeedMnemonic: genSeedResp.CipherSeedMnemonic,
×
582
                AezeedPassphrase:   req.AezeedPassphrase,
×
583
                StatelessInit:      statelessInit,
×
584
        }
×
585

×
586
        // Pass the init request via rpc to finish unlocking the node. This
×
587
        // will also initialize the macaroon-authenticated LightningClient.
×
588
        adminMac, err := h.manager.initWalletAndNode(node, initReq)
×
589
        require.NoErrorf(h, err, "failed to unlock and init node %s",
×
590
                node.Name())
×
591

×
592
        // In stateless initialization mode we get a macaroon back that we have
×
593
        // to return to the test, otherwise gRPC calls won't be possible since
×
594
        // there are no macaroon files created in that mode.
×
595
        // In stateful init the admin macaroon will just be nil.
×
596
        return node, genSeedResp.CipherSeedMnemonic, adminMac
×
597
}
×
598

599
// RestoreNodeWithSeed fully initializes a HarnessNode using a chosen mnemonic,
600
// password, recovery window, and optionally a set of static channel backups.
601
// After providing the initialization request to unlock the node, this method
602
// will finish initializing the LightningClient such that the HarnessNode can
603
// be used for regular rpc operations.
604
func (h *HarnessTest) RestoreNodeWithSeed(name string, extraArgs []string,
605
        password []byte, mnemonic []string, rootKey string,
606
        recoveryWindow int32,
607
        chanBackups *lnrpc.ChanBackupSnapshot) *node.HarnessNode {
×
608

×
609
        n, err := h.manager.newNode(h.T, name, extraArgs, password, true)
×
610
        require.NoErrorf(h, err, "unable to create new node for %s", name)
×
611

×
612
        // Start the node with seed only, which will only create the `State`
×
613
        // and `WalletUnlocker` clients.
×
614
        err = n.StartWithNoAuth(h.runCtx)
×
615
        require.NoErrorf(h, err, "failed to start node %s", n.Name())
×
616

×
617
        // Create the wallet.
×
618
        initReq := &lnrpc.InitWalletRequest{
×
619
                WalletPassword:     password,
×
620
                CipherSeedMnemonic: mnemonic,
×
621
                AezeedPassphrase:   password,
×
622
                ExtendedMasterKey:  rootKey,
×
623
                RecoveryWindow:     recoveryWindow,
×
624
                ChannelBackups:     chanBackups,
×
625
        }
×
626
        _, err = h.manager.initWalletAndNode(n, initReq)
×
627
        require.NoErrorf(h, err, "failed to unlock and init node %s",
×
628
                n.Name())
×
629

×
630
        return n
×
631
}
×
632

633
// NewNodeEtcd starts a new node with seed that'll use an external etcd
634
// database as its storage. The passed cluster flag indicates that we'd like
635
// the node to join the cluster leader election. We won't wait until RPC is
636
// available (this is useful when the node is not expected to become the leader
637
// right away).
638
func (h *HarnessTest) NewNodeEtcd(name string, etcdCfg *etcd.Config,
639
        password []byte, cluster bool,
640
        leaderSessionTTL int) *node.HarnessNode {
×
641

×
642
        // We don't want to use the embedded etcd instance.
×
643
        h.manager.dbBackend = node.BackendBbolt
×
644

×
645
        extraArgs := node.ExtraArgsEtcd(
×
646
                etcdCfg, name, cluster, leaderSessionTTL,
×
647
        )
×
648
        node, err := h.manager.newNode(h.T, name, extraArgs, password, true)
×
649
        require.NoError(h, err, "failed to create new node with etcd")
×
650

×
651
        // Start the node daemon only.
×
652
        err = node.StartLndCmd(h.runCtx)
×
653
        require.NoError(h, err, "failed to start node %s", node.Name())
×
654

×
655
        return node
×
656
}
×
657

658
// NewNodeWithSeedEtcd starts a new node with seed that'll use an external etcd
659
// database as its storage. The passed cluster flag indicates that we'd like
660
// the node to join the cluster leader election.
661
func (h *HarnessTest) NewNodeWithSeedEtcd(name string, etcdCfg *etcd.Config,
662
        password []byte, statelessInit, cluster bool,
663
        leaderSessionTTL int) (*node.HarnessNode, []string, []byte) {
×
664

×
665
        // We don't want to use the embedded etcd instance.
×
666
        h.manager.dbBackend = node.BackendBbolt
×
667

×
668
        // Create a request to generate a new aezeed. The new seed will have
×
669
        // the same password as the internal wallet.
×
670
        req := &lnrpc.GenSeedRequest{
×
671
                AezeedPassphrase: password,
×
672
                SeedEntropy:      nil,
×
673
        }
×
674

×
675
        extraArgs := node.ExtraArgsEtcd(
×
676
                etcdCfg, name, cluster, leaderSessionTTL,
×
677
        )
×
678

×
679
        return h.newNodeWithSeed(name, extraArgs, req, statelessInit)
×
680
}
×
681

682
// NewNodeRemoteSigner creates a new remote signer node and asserts its
683
// creation.
684
func (h *HarnessTest) NewNodeRemoteSigner(name string, extraArgs []string,
685
        password []byte, watchOnly *lnrpc.WatchOnly) *node.HarnessNode {
×
686

×
687
        hn, err := h.manager.newNode(h.T, name, extraArgs, password, true)
×
688
        require.NoErrorf(h, err, "unable to create new node for %s", name)
×
689

×
690
        err = hn.StartWithNoAuth(h.runCtx)
×
691
        require.NoError(h, err, "failed to start node %s", name)
×
692

×
693
        // With the seed created, construct the init request to the node,
×
694
        // including the newly generated seed.
×
695
        initReq := &lnrpc.InitWalletRequest{
×
696
                WalletPassword: password,
×
697
                WatchOnly:      watchOnly,
×
698
        }
×
699

×
700
        // Pass the init request via rpc to finish unlocking the node. This
×
701
        // will also initialize the macaroon-authenticated LightningClient.
×
702
        _, err = h.manager.initWalletAndNode(hn, initReq)
×
703
        require.NoErrorf(h, err, "failed to init node %s", name)
×
704

×
705
        return hn
×
706
}
×
707

708
// KillNode kills the node and waits for the node process to stop.
709
func (h *HarnessTest) KillNode(hn *node.HarnessNode) {
×
710
        delete(h.manager.activeNodes, hn.Cfg.NodeID)
×
711

×
712
        h.Logf("Manually killing the node %s", hn.Name())
×
713
        require.NoErrorf(h, hn.KillAndWait(), "%s: kill got error", hn.Name())
×
714
}
×
715

716
// SetFeeEstimate sets a fee rate to be returned from fee estimator.
717
//
718
// NOTE: this method will set the fee rate for a conf target of 1, which is the
719
// fallback fee rate for a `WebAPIEstimator` if a higher conf target's fee rate
720
// is not set. This means if the fee rate for conf target 6 is set, the fee
721
// estimator will use that value instead.
722
func (h *HarnessTest) SetFeeEstimate(fee chainfee.SatPerKWeight) {
×
723
        h.feeService.SetFeeRate(fee, 1)
×
724
}
×
725

726
// SetFeeEstimateWithConf sets a fee rate of a specified conf target to be
727
// returned from fee estimator.
728
func (h *HarnessTest) SetFeeEstimateWithConf(
729
        fee chainfee.SatPerKWeight, conf uint32) {
×
730

×
731
        h.feeService.SetFeeRate(fee, conf)
×
732
}
×
733

734
// SetMinRelayFeerate sets a min relay fee rate to be returned from fee
735
// estimator.
736
func (h *HarnessTest) SetMinRelayFeerate(fee chainfee.SatPerKVByte) {
×
737
        h.feeService.SetMinRelayFeerate(fee)
×
738
}
×
739

740
// GetChanPointFundingTxid takes a channel point and converts it into a chain
741
// hash.
742
func (h *HarnessTest) GetChanPointFundingTxid(
743
        cp *lnrpc.ChannelPoint) chainhash.Hash {
×
744

×
745
        txid, err := lnrpc.GetChanPointFundingTxid(cp)
×
746
        require.NoError(h, err, "unable to get txid")
×
747

×
748
        return *txid
×
749
}
×
750

751
// OutPointFromChannelPoint creates an outpoint from a given channel point.
752
func (h *HarnessTest) OutPointFromChannelPoint(
753
        cp *lnrpc.ChannelPoint) wire.OutPoint {
×
754

×
755
        txid := h.GetChanPointFundingTxid(cp)
×
756
        return wire.OutPoint{
×
757
                Hash:  txid,
×
758
                Index: cp.OutputIndex,
×
759
        }
×
760
}
×
761

762
// OpenChannelParams houses the params to specify when opening a new channel.
763
type OpenChannelParams struct {
764
        // Amt is the local amount being put into the channel.
765
        Amt btcutil.Amount
766

767
        // PushAmt is the amount that should be pushed to the remote when the
768
        // channel is opened.
769
        PushAmt btcutil.Amount
770

771
        // Private is a boolan indicating whether the opened channel should be
772
        // private.
773
        Private bool
774

775
        // SpendUnconfirmed is a boolean indicating whether we can utilize
776
        // unconfirmed outputs to fund the channel.
777
        SpendUnconfirmed bool
778

779
        // MinHtlc is the htlc_minimum_msat value set when opening the channel.
780
        MinHtlc lnwire.MilliSatoshi
781

782
        // RemoteMaxHtlcs is the remote_max_htlcs value set when opening the
783
        // channel, restricting the number of concurrent HTLCs the remote party
784
        // can add to a commitment.
785
        RemoteMaxHtlcs uint16
786

787
        // FundingShim is an optional funding shim that the caller can specify
788
        // in order to modify the channel funding workflow.
789
        FundingShim *lnrpc.FundingShim
790

791
        // SatPerVByte is the amount of satoshis to spend in chain fees per
792
        // virtual byte of the transaction.
793
        SatPerVByte btcutil.Amount
794

795
        // ConfTarget is the number of blocks that the funding transaction
796
        // should be confirmed in.
797
        ConfTarget fn.Option[int32]
798

799
        // CommitmentType is the commitment type that should be used for the
800
        // channel to be opened.
801
        CommitmentType lnrpc.CommitmentType
802

803
        // ZeroConf is used to determine if the channel will be a zero-conf
804
        // channel. This only works if the explicit negotiation is used with
805
        // anchors or script enforced leases.
806
        ZeroConf bool
807

808
        // ScidAlias denotes whether the channel will be an option-scid-alias
809
        // channel type negotiation.
810
        ScidAlias bool
811

812
        // BaseFee is the channel base fee applied during the channel
813
        // announcement phase.
814
        BaseFee uint64
815

816
        // FeeRate is the channel fee rate in ppm applied during the channel
817
        // announcement phase.
818
        FeeRate uint64
819

820
        // UseBaseFee, if set, instructs the downstream logic to apply the
821
        // user-specified channel base fee to the channel update announcement.
822
        // If set to false it avoids applying a base fee of 0 and instead
823
        // activates the default configured base fee.
824
        UseBaseFee bool
825

826
        // UseFeeRate, if set, instructs the downstream logic to apply the
827
        // user-specified channel fee rate to the channel update announcement.
828
        // If set to false it avoids applying a fee rate of 0 and instead
829
        // activates the default configured fee rate.
830
        UseFeeRate bool
831

832
        // FundMax is a boolean indicating whether the channel should be funded
833
        // with the maximum possible amount from the wallet.
834
        FundMax bool
835

836
        // An optional note-to-self containing some useful information about the
837
        // channel. This is stored locally only, and is purely for reference. It
838
        // has no bearing on the channel's operation. Max allowed length is 500
839
        // characters.
840
        Memo string
841

842
        // Outpoints is a list of client-selected outpoints that should be used
843
        // for funding a channel. If Amt is specified then this amount is
844
        // allocated from the sum of outpoints towards funding. If the
845
        // FundMax flag is specified the entirety of selected funds is
846
        // allocated towards channel funding.
847
        Outpoints []*lnrpc.OutPoint
848

849
        // CloseAddress sets the upfront_shutdown_script parameter during
850
        // channel open. It is expected to be encoded as a bitcoin address.
851
        CloseAddress string
852
}
853

854
// prepareOpenChannel waits for both nodes to be synced to chain and returns an
855
// OpenChannelRequest.
856
func (h *HarnessTest) prepareOpenChannel(srcNode, destNode *node.HarnessNode,
857
        p OpenChannelParams) *lnrpc.OpenChannelRequest {
×
858

×
859
        // Wait until srcNode and destNode have the latest chain synced.
×
860
        // Otherwise, we may run into a check within the funding manager that
×
861
        // prevents any funding workflows from being kicked off if the chain
×
862
        // isn't yet synced.
×
863
        h.WaitForBlockchainSync(srcNode)
×
864
        h.WaitForBlockchainSync(destNode)
×
865

×
866
        // Specify the minimal confirmations of the UTXOs used for channel
×
867
        // funding.
×
868
        minConfs := int32(1)
×
869
        if p.SpendUnconfirmed {
×
870
                minConfs = 0
×
871
        }
×
872

873
        // Get the requested conf target. If not set, default to 6.
874
        confTarget := p.ConfTarget.UnwrapOr(6)
×
875

×
876
        // If there's fee rate set, unset the conf target.
×
877
        if p.SatPerVByte != 0 {
×
878
                confTarget = 0
×
879
        }
×
880

881
        // Prepare the request.
882
        return &lnrpc.OpenChannelRequest{
×
883
                NodePubkey:         destNode.PubKey[:],
×
884
                LocalFundingAmount: int64(p.Amt),
×
885
                PushSat:            int64(p.PushAmt),
×
886
                Private:            p.Private,
×
887
                TargetConf:         confTarget,
×
888
                MinConfs:           minConfs,
×
889
                SpendUnconfirmed:   p.SpendUnconfirmed,
×
890
                MinHtlcMsat:        int64(p.MinHtlc),
×
891
                RemoteMaxHtlcs:     uint32(p.RemoteMaxHtlcs),
×
892
                FundingShim:        p.FundingShim,
×
893
                SatPerVbyte:        uint64(p.SatPerVByte),
×
894
                CommitmentType:     p.CommitmentType,
×
895
                ZeroConf:           p.ZeroConf,
×
896
                ScidAlias:          p.ScidAlias,
×
897
                BaseFee:            p.BaseFee,
×
898
                FeeRate:            p.FeeRate,
×
899
                UseBaseFee:         p.UseBaseFee,
×
900
                UseFeeRate:         p.UseFeeRate,
×
901
                FundMax:            p.FundMax,
×
902
                Memo:               p.Memo,
×
903
                Outpoints:          p.Outpoints,
×
904
                CloseAddress:       p.CloseAddress,
×
905
        }
×
906
}
907

908
// OpenChannelAssertPending attempts to open a channel between srcNode and
909
// destNode with the passed channel funding parameters. Once the `OpenChannel`
910
// is called, it will consume the first event it receives from the open channel
911
// client and asserts it's a channel pending event.
912
func (h *HarnessTest) openChannelAssertPending(srcNode,
913
        destNode *node.HarnessNode,
914
        p OpenChannelParams) (*lnrpc.PendingUpdate, rpc.OpenChanClient) {
×
915

×
916
        // Prepare the request and open the channel.
×
917
        openReq := h.prepareOpenChannel(srcNode, destNode, p)
×
918
        respStream := srcNode.RPC.OpenChannel(openReq)
×
919

×
920
        // Consume the "channel pending" update. This waits until the node
×
921
        // notifies us that the final message in the channel funding workflow
×
922
        // has been sent to the remote node.
×
923
        resp := h.ReceiveOpenChannelUpdate(respStream)
×
924

×
925
        // Check that the update is channel pending.
×
926
        update, ok := resp.Update.(*lnrpc.OpenStatusUpdate_ChanPending)
×
927
        require.Truef(h, ok, "expected channel pending: update, instead got %v",
×
928
                resp)
×
929

×
930
        return update.ChanPending, respStream
×
931
}
×
932

933
// OpenChannelAssertPending attempts to open a channel between srcNode and
934
// destNode with the passed channel funding parameters. Once the `OpenChannel`
935
// is called, it will consume the first event it receives from the open channel
936
// client and asserts it's a channel pending event. It returns the
937
// `PendingUpdate`.
938
func (h *HarnessTest) OpenChannelAssertPending(srcNode,
939
        destNode *node.HarnessNode, p OpenChannelParams) *lnrpc.PendingUpdate {
×
940

×
941
        resp, _ := h.openChannelAssertPending(srcNode, destNode, p)
×
942
        return resp
×
943
}
×
944

945
// OpenChannelAssertStream attempts to open a channel between srcNode and
946
// destNode with the passed channel funding parameters. Once the `OpenChannel`
947
// is called, it will consume the first event it receives from the open channel
948
// client and asserts it's a channel pending event. It returns the open channel
949
// stream.
950
func (h *HarnessTest) OpenChannelAssertStream(srcNode,
951
        destNode *node.HarnessNode, p OpenChannelParams) rpc.OpenChanClient {
×
952

×
953
        _, stream := h.openChannelAssertPending(srcNode, destNode, p)
×
954
        return stream
×
955
}
×
956

957
// OpenChannel attempts to open a channel with the specified parameters
958
// extended from Alice to Bob. Additionally, for public channels, it will mine
959
// extra blocks so they are announced to the network. In specific, the
960
// following items are asserted,
961
//   - for non-zero conf channel, 1 blocks will be mined to confirm the funding
962
//     tx.
963
//   - both nodes should see the channel edge update in their network graph.
964
//   - both nodes can report the status of the new channel from ListChannels.
965
//   - extra blocks are mined if it's a public channel.
966
func (h *HarnessTest) OpenChannel(alice, bob *node.HarnessNode,
967
        p OpenChannelParams) *lnrpc.ChannelPoint {
×
968

×
969
        // First, open the channel without announcing it.
×
970
        cp := h.OpenChannelNoAnnounce(alice, bob, p)
×
971

×
972
        // If this is a private channel, there's no need to mine extra blocks
×
973
        // since it will never be announced to the network.
×
974
        if p.Private {
×
975
                return cp
×
976
        }
×
977

978
        // Mine extra blocks to announce the channel.
979
        if p.ZeroConf {
×
980
                // For a zero-conf channel, no blocks have been mined so we
×
981
                // need to mine 6 blocks.
×
982
                //
×
983
                // Mine 1 block to confirm the funding transaction.
×
984
                h.MineBlocksAndAssertNumTxes(numBlocksOpenChannel, 1)
×
985
        } else {
×
986
                // For a regular channel, 1 block has already been mined to
×
987
                // confirm the funding transaction, so we mine 5 blocks.
×
988
                h.MineBlocks(numBlocksOpenChannel - 1)
×
989
        }
×
990

991
        return cp
×
992
}
993

994
// OpenChannelNoAnnounce attempts to open a channel with the specified
995
// parameters extended from Alice to Bob without mining the necessary blocks to
996
// announce the channel. Additionally, the following items are asserted,
997
//   - for non-zero conf channel, 1 blocks will be mined to confirm the funding
998
//     tx.
999
//   - both nodes should see the channel edge update in their network graph.
1000
//   - both nodes can report the status of the new channel from ListChannels.
1001
func (h *HarnessTest) OpenChannelNoAnnounce(alice, bob *node.HarnessNode,
1002
        p OpenChannelParams) *lnrpc.ChannelPoint {
×
1003

×
1004
        chanOpenUpdate := h.OpenChannelAssertStream(alice, bob, p)
×
1005

×
1006
        // Open a zero conf channel.
×
1007
        if p.ZeroConf {
×
1008
                return h.openChannelZeroConf(alice, bob, chanOpenUpdate)
×
1009
        }
×
1010

1011
        // Open a non-zero conf channel.
1012
        return h.openChannel(alice, bob, chanOpenUpdate)
×
1013
}
1014

1015
// openChannel attempts to open a channel with the specified parameters
1016
// extended from Alice to Bob. Additionally, the following items are asserted,
1017
//   - 1 block is mined and the funding transaction should be found in it.
1018
//   - both nodes should see the channel edge update in their network graph.
1019
//   - both nodes can report the status of the new channel from ListChannels.
1020
func (h *HarnessTest) openChannel(alice, bob *node.HarnessNode,
1021
        stream rpc.OpenChanClient) *lnrpc.ChannelPoint {
×
1022

×
1023
        // Mine 1 block to confirm the funding transaction.
×
1024
        block := h.MineBlocksAndAssertNumTxes(1, 1)[0]
×
1025

×
1026
        // Wait for the channel open event.
×
1027
        fundingChanPoint := h.WaitForChannelOpenEvent(stream)
×
1028

×
1029
        // Check that the funding tx is found in the first block.
×
1030
        fundingTxID := h.GetChanPointFundingTxid(fundingChanPoint)
×
1031
        h.AssertTxInBlock(block, fundingTxID)
×
1032

×
1033
        // Check that both alice and bob have seen the channel from their
×
1034
        // network topology.
×
1035
        h.AssertChannelInGraph(alice, fundingChanPoint)
×
1036
        h.AssertChannelInGraph(bob, fundingChanPoint)
×
1037

×
1038
        // Check that the channel can be seen in their ListChannels.
×
1039
        h.AssertChannelExists(alice, fundingChanPoint)
×
1040
        h.AssertChannelExists(bob, fundingChanPoint)
×
1041

×
1042
        return fundingChanPoint
×
1043
}
×
1044

1045
// openChannelZeroConf attempts to open a channel with the specified parameters
1046
// extended from Alice to Bob. Additionally, the following items are asserted,
1047
//   - both nodes should see the channel edge update in their network graph.
1048
//   - both nodes can report the status of the new channel from ListChannels.
1049
func (h *HarnessTest) openChannelZeroConf(alice, bob *node.HarnessNode,
1050
        stream rpc.OpenChanClient) *lnrpc.ChannelPoint {
×
1051

×
1052
        // Wait for the channel open event.
×
1053
        fundingChanPoint := h.WaitForChannelOpenEvent(stream)
×
1054

×
1055
        // Check that both alice and bob have seen the channel from their
×
1056
        // network topology.
×
1057
        h.AssertChannelInGraph(alice, fundingChanPoint)
×
1058
        h.AssertChannelInGraph(bob, fundingChanPoint)
×
1059

×
1060
        // Finally, check that the channel can be seen in their ListChannels.
×
1061
        h.AssertChannelExists(alice, fundingChanPoint)
×
1062
        h.AssertChannelExists(bob, fundingChanPoint)
×
1063

×
1064
        return fundingChanPoint
×
1065
}
×
1066

1067
// OpenChannelAssertErr opens a channel between node srcNode and destNode,
1068
// asserts that the expected error is returned from the channel opening.
1069
func (h *HarnessTest) OpenChannelAssertErr(srcNode, destNode *node.HarnessNode,
1070
        p OpenChannelParams, expectedErr error) {
×
1071

×
1072
        // Prepare the request and open the channel.
×
1073
        openReq := h.prepareOpenChannel(srcNode, destNode, p)
×
1074
        respStream := srcNode.RPC.OpenChannel(openReq)
×
1075

×
1076
        // Receive an error to be sent from the stream.
×
1077
        _, err := h.receiveOpenChannelUpdate(respStream)
×
1078
        require.NotNil(h, err, "expected channel opening to fail")
×
1079

×
1080
        // Use string comparison here as we haven't codified all the RPC errors
×
1081
        // yet.
×
1082
        require.Containsf(h, err.Error(), expectedErr.Error(), "unexpected "+
×
1083
                "error returned, want %v, got %v", expectedErr, err)
×
1084
}
×
1085

1086
// closeChannelOpts holds the options for closing a channel.
1087
type closeChannelOpts struct {
1088
        feeRate fn.Option[chainfee.SatPerVByte]
1089

1090
        // localTxOnly is a boolean indicating if we should only attempt to
1091
        // consume close pending notifications for the local transaction.
1092
        localTxOnly bool
1093

1094
        // skipMempoolCheck is a boolean indicating if we should skip the normal
1095
        // mempool check after a coop close.
1096
        skipMempoolCheck bool
1097

1098
        // errString is an expected error. If this is non-blank, then we'll
1099
        // assert that the coop close wasn't possible, and returns an error that
1100
        // contains this err string.
1101
        errString string
1102
}
1103

1104
// CloseChanOpt is a functional option to modify the way we close a channel.
1105
type CloseChanOpt func(*closeChannelOpts)
1106

1107
// WithCoopCloseFeeRate is a functional option to set the fee rate for a coop
1108
// close attempt.
1109
func WithCoopCloseFeeRate(rate chainfee.SatPerVByte) CloseChanOpt {
×
1110
        return func(o *closeChannelOpts) {
×
1111
                o.feeRate = fn.Some(rate)
×
1112
        }
×
1113
}
1114

1115
// WithLocalTxNotify is a functional option to indicate that we should only
1116
// notify for the local txn. This is useful for the RBF coop close type, as
1117
// it'll notify for both local and remote txns.
1118
func WithLocalTxNotify() CloseChanOpt {
×
1119
        return func(o *closeChannelOpts) {
×
1120
                o.localTxOnly = true
×
1121
        }
×
1122
}
1123

1124
// WithSkipMempoolCheck is a functional option to indicate that we should skip
1125
// the mempool check. This can be used when a coop close iteration may not
1126
// result in a newly broadcast transaction.
1127
func WithSkipMempoolCheck() CloseChanOpt {
×
1128
        return func(o *closeChannelOpts) {
×
1129
                o.skipMempoolCheck = true
×
1130
        }
×
1131
}
1132

1133
// WithExpectedErrString is a functional option that can be used to assert that
1134
// an error occurs during the coop close process.
1135
func WithExpectedErrString(errString string) CloseChanOpt {
×
1136
        return func(o *closeChannelOpts) {
×
1137
                o.errString = errString
×
1138
        }
×
1139
}
1140

1141
// defaultCloseOpts returns the set of default close options.
1142
func defaultCloseOpts() *closeChannelOpts {
×
1143
        return &closeChannelOpts{}
×
1144
}
×
1145

1146
// CloseChannelAssertPending attempts to close the channel indicated by the
1147
// passed channel point, initiated by the passed node. Once the CloseChannel
1148
// rpc is called, it will consume one event and assert it's a close pending
1149
// event. In addition, it will check that the closing tx can be found in the
1150
// mempool.
1151
func (h *HarnessTest) CloseChannelAssertPending(hn *node.HarnessNode,
1152
        cp *lnrpc.ChannelPoint, force bool,
1153
        opts ...CloseChanOpt) (rpc.CloseChanClient, *lnrpc.CloseStatusUpdate) {
×
1154

×
1155
        closeOpts := defaultCloseOpts()
×
1156
        for _, optFunc := range opts {
×
1157
                optFunc(closeOpts)
×
1158
        }
×
1159

1160
        // Calls the rpc to close the channel.
1161
        closeReq := &lnrpc.CloseChannelRequest{
×
1162
                ChannelPoint: cp,
×
1163
                Force:        force,
×
1164
                NoWait:       true,
×
1165
        }
×
1166

×
1167
        closeOpts.feeRate.WhenSome(func(feeRate chainfee.SatPerVByte) {
×
1168
                closeReq.SatPerVbyte = uint64(feeRate)
×
1169
        })
×
1170

1171
        var (
×
1172
                stream rpc.CloseChanClient
×
1173
                event  *lnrpc.CloseStatusUpdate
×
1174
                err    error
×
1175
        )
×
1176

×
1177
        // Consume the "channel close" update in order to wait for the closing
×
1178
        // transaction to be broadcast, then wait for the closing tx to be seen
×
1179
        // within the network.
×
1180
        stream = hn.RPC.CloseChannel(closeReq)
×
1181
        _, err = h.ReceiveCloseChannelUpdate(stream)
×
1182
        require.NoError(h, err, "close channel update got error: %v", err)
×
1183

×
1184
        var closeTxid *chainhash.Hash
×
1185
        for {
×
1186
                event, err = h.ReceiveCloseChannelUpdate(stream)
×
1187
                if err != nil {
×
1188
                        h.Logf("Test: %s, close channel got error: %v",
×
1189
                                h.manager.currentTestCase, err)
×
1190
                }
×
1191
                if err != nil && closeOpts.errString == "" {
×
1192
                        require.NoError(h, err, "retry closing channel failed")
×
1193
                } else if err != nil && closeOpts.errString != "" {
×
1194
                        require.ErrorContains(h, err, closeOpts.errString)
×
1195
                        return nil, nil
×
1196
                }
×
1197

1198
                pendingClose, ok := event.Update.(*lnrpc.CloseStatusUpdate_ClosePending) //nolint:ll
×
1199
                require.Truef(h, ok, "expected channel close "+
×
1200
                        "update, instead got %v", pendingClose)
×
1201

×
1202
                if !pendingClose.ClosePending.LocalCloseTx &&
×
1203
                        closeOpts.localTxOnly {
×
1204

×
1205
                        continue
×
1206
                }
1207

1208
                notifyRate := pendingClose.ClosePending.FeePerVbyte
×
1209
                if closeOpts.localTxOnly &&
×
1210
                        notifyRate != int64(closeReq.SatPerVbyte) {
×
1211

×
1212
                        continue
×
1213
                }
1214

1215
                closeTxid, err = chainhash.NewHash(
×
1216
                        pendingClose.ClosePending.Txid,
×
1217
                )
×
1218
                require.NoErrorf(h, err, "unable to decode closeTxid: %v",
×
1219
                        pendingClose.ClosePending.Txid)
×
1220

×
1221
                break
×
1222
        }
1223

1224
        if !closeOpts.skipMempoolCheck {
×
1225
                // Assert the closing tx is in the mempool.
×
1226
                h.miner.AssertTxInMempool(*closeTxid)
×
1227
        }
×
1228

1229
        return stream, event
×
1230
}
1231

1232
// CloseChannel attempts to coop close a non-anchored channel identified by the
1233
// passed channel point owned by the passed harness node. The following items
1234
// are asserted,
1235
//  1. a close pending event is sent from the close channel client.
1236
//  2. the closing tx is found in the mempool.
1237
//  3. the node reports the channel being waiting to close.
1238
//  4. a block is mined and the closing tx should be found in it.
1239
//  5. the node reports zero waiting close channels.
1240
//  6. the node receives a topology update regarding the channel close.
1241
func (h *HarnessTest) CloseChannel(hn *node.HarnessNode,
1242
        cp *lnrpc.ChannelPoint) chainhash.Hash {
×
1243

×
1244
        stream, _ := h.CloseChannelAssertPending(hn, cp, false)
×
1245

×
1246
        return h.AssertStreamChannelCoopClosed(hn, cp, false, stream)
×
1247
}
×
1248

1249
// ForceCloseChannel attempts to force close a non-anchored channel identified
1250
// by the passed channel point owned by the passed harness node. The following
1251
// items are asserted,
1252
//  1. a close pending event is sent from the close channel client.
1253
//  2. the closing tx is found in the mempool.
1254
//  3. the node reports the channel being waiting to close.
1255
//  4. a block is mined and the closing tx should be found in it.
1256
//  5. the node reports zero waiting close channels.
1257
//  6. the node receives a topology update regarding the channel close.
1258
//  7. mine DefaultCSV-1 blocks.
1259
//  8. the node reports zero pending force close channels.
1260
func (h *HarnessTest) ForceCloseChannel(hn *node.HarnessNode,
1261
        cp *lnrpc.ChannelPoint) chainhash.Hash {
×
1262

×
1263
        stream, _ := h.CloseChannelAssertPending(hn, cp, true)
×
1264

×
1265
        closingTxid := h.AssertStreamChannelForceClosed(hn, cp, false, stream)
×
1266

×
1267
        // Cleanup the force close.
×
1268
        h.CleanupForceClose(hn)
×
1269

×
1270
        return closingTxid
×
1271
}
×
1272

1273
// CloseChannelAssertErr closes the given channel and asserts an error
1274
// returned.
1275
func (h *HarnessTest) CloseChannelAssertErr(hn *node.HarnessNode,
1276
        req *lnrpc.CloseChannelRequest) error {
×
1277

×
1278
        // Calls the rpc to close the channel.
×
1279
        stream := hn.RPC.CloseChannel(req)
×
1280

×
1281
        // Consume the "channel close" update in order to wait for the closing
×
1282
        // transaction to be broadcast, then wait for the closing tx to be seen
×
1283
        // within the network.
×
1284
        _, err := h.ReceiveCloseChannelUpdate(stream)
×
1285
        require.Errorf(h, err, "%s: expect close channel to return an error",
×
1286
                hn.Name())
×
1287

×
1288
        return err
×
1289
}
×
1290

1291
// IsNeutrinoBackend returns a bool indicating whether the node is using a
1292
// neutrino as its backend. This is useful when we want to skip certain tests
1293
// which cannot be done with a neutrino backend.
1294
func (h *HarnessTest) IsNeutrinoBackend() bool {
×
1295
        return h.manager.chainBackend.Name() == NeutrinoBackendName
×
1296
}
×
1297

1298
// fundCoins attempts to send amt satoshis from the internal mining node to the
1299
// targeted lightning node. The confirmed boolean indicates whether the
1300
// transaction that pays to the target should confirm. For neutrino backend,
1301
// the `confirmed` param is ignored.
1302
func (h *HarnessTest) fundCoins(amt btcutil.Amount, target *node.HarnessNode,
1303
        addrType lnrpc.AddressType, confirmed bool) *wire.MsgTx {
×
1304

×
1305
        initialBalance := target.RPC.WalletBalance()
×
1306

×
1307
        // First, obtain an address from the target lightning node, preferring
×
1308
        // to receive a p2wkh address s.t the output can immediately be used as
×
1309
        // an input to a funding transaction.
×
1310
        req := &lnrpc.NewAddressRequest{Type: addrType}
×
1311
        resp := target.RPC.NewAddress(req)
×
1312
        addr := h.DecodeAddress(resp.Address)
×
1313
        addrScript := h.PayToAddrScript(addr)
×
1314

×
1315
        // Generate a transaction which creates an output to the target
×
1316
        // pkScript of the desired amount.
×
1317
        output := &wire.TxOut{
×
1318
                PkScript: addrScript,
×
1319
                Value:    int64(amt),
×
1320
        }
×
1321
        txid := h.miner.SendOutput(output, defaultMinerFeeRate)
×
1322

×
1323
        // Get the funding tx.
×
1324
        tx := h.GetRawTransaction(*txid)
×
1325
        msgTx := tx.MsgTx()
×
1326

×
1327
        // Since neutrino doesn't support unconfirmed outputs, skip this check.
×
1328
        if !h.IsNeutrinoBackend() {
×
1329
                expectedBalance := btcutil.Amount(
×
1330
                        initialBalance.UnconfirmedBalance,
×
1331
                ) + amt
×
1332
                h.WaitForBalanceUnconfirmed(target, expectedBalance)
×
1333
        }
×
1334

1335
        // If the transaction should remain unconfirmed, then we'll wait until
1336
        // the target node's unconfirmed balance reflects the expected balance
1337
        // and exit.
1338
        if !confirmed {
×
1339
                return msgTx
×
1340
        }
×
1341

1342
        // Otherwise, we'll generate 1 new blocks to ensure the output gains a
1343
        // sufficient number of confirmations and wait for the balance to
1344
        // reflect what's expected.
1345
        h.MineBlockWithTx(msgTx)
×
1346

×
1347
        expectedBalance := btcutil.Amount(initialBalance.ConfirmedBalance) + amt
×
1348
        h.WaitForBalanceConfirmed(target, expectedBalance)
×
1349

×
1350
        return msgTx
×
1351
}
1352

1353
// FundCoins attempts to send amt satoshis from the internal mining node to the
1354
// targeted lightning node using a P2WKH address. 1 blocks are mined after in
1355
// order to confirm the transaction.
1356
func (h *HarnessTest) FundCoins(amt btcutil.Amount,
1357
        hn *node.HarnessNode) *wire.MsgTx {
×
1358

×
1359
        return h.fundCoins(amt, hn, lnrpc.AddressType_WITNESS_PUBKEY_HASH, true)
×
1360
}
×
1361

1362
// FundCoinsUnconfirmed attempts to send amt satoshis from the internal mining
1363
// node to the targeted lightning node using a P2WKH address. No blocks are
1364
// mined after and the UTXOs are unconfirmed.
1365
func (h *HarnessTest) FundCoinsUnconfirmed(amt btcutil.Amount,
1366
        hn *node.HarnessNode) *wire.MsgTx {
×
1367

×
1368
        return h.fundCoins(
×
1369
                amt, hn, lnrpc.AddressType_WITNESS_PUBKEY_HASH, false,
×
1370
        )
×
1371
}
×
1372

1373
// FundCoinsNP2WKH attempts to send amt satoshis from the internal mining node
1374
// to the targeted lightning node using a NP2WKH address.
1375
func (h *HarnessTest) FundCoinsNP2WKH(amt btcutil.Amount,
1376
        target *node.HarnessNode) *wire.MsgTx {
×
1377

×
1378
        return h.fundCoins(
×
1379
                amt, target, lnrpc.AddressType_NESTED_PUBKEY_HASH, true,
×
1380
        )
×
1381
}
×
1382

1383
// FundCoinsP2TR attempts to send amt satoshis from the internal mining node to
1384
// the targeted lightning node using a P2TR address.
1385
func (h *HarnessTest) FundCoinsP2TR(amt btcutil.Amount,
1386
        target *node.HarnessNode) *wire.MsgTx {
×
1387

×
1388
        return h.fundCoins(amt, target, lnrpc.AddressType_TAPROOT_PUBKEY, true)
×
1389
}
×
1390

1391
// FundNumCoins attempts to send the given number of UTXOs from the internal
1392
// mining node to the targeted lightning node using a P2WKH address. Each UTXO
1393
// has an amount of 1 BTC. 1 blocks are mined to confirm the tx.
1394
func (h *HarnessTest) FundNumCoins(hn *node.HarnessNode, num int) {
×
1395
        // Get the initial balance first.
×
1396
        resp := hn.RPC.WalletBalance()
×
1397
        initialBalance := btcutil.Amount(resp.ConfirmedBalance)
×
1398

×
1399
        const fundAmount = 1 * btcutil.SatoshiPerBitcoin
×
1400

×
1401
        // Send out the outputs from the miner.
×
1402
        for i := 0; i < num; i++ {
×
1403
                h.createAndSendOutput(
×
1404
                        hn, fundAmount, lnrpc.AddressType_WITNESS_PUBKEY_HASH,
×
1405
                )
×
1406
        }
×
1407

1408
        // Wait for ListUnspent to show the correct number of unconfirmed
1409
        // UTXOs.
1410
        //
1411
        // Since neutrino doesn't support unconfirmed outputs, skip this check.
1412
        if !h.IsNeutrinoBackend() {
×
1413
                h.AssertNumUTXOsUnconfirmed(hn, num)
×
1414
        }
×
1415

1416
        // Mine a block to confirm the transactions.
1417
        h.MineBlocksAndAssertNumTxes(1, num)
×
1418

×
1419
        // Now block until the wallet have fully synced up.
×
1420
        totalAmount := btcutil.Amount(fundAmount * num)
×
1421
        expectedBalance := initialBalance + totalAmount
×
1422
        h.WaitForBalanceConfirmed(hn, expectedBalance)
×
1423
}
1424

1425
// completePaymentRequestsAssertStatus sends payments from a node to complete
1426
// all payment requests. This function does not return until all payments
1427
// have reached the specified status.
1428
func (h *HarnessTest) completePaymentRequestsAssertStatus(hn *node.HarnessNode,
1429
        paymentRequests []string, status lnrpc.Payment_PaymentStatus,
1430
        opts ...HarnessOpt) {
×
1431

×
1432
        payOpts := defaultHarnessOpts()
×
1433
        for _, opt := range opts {
×
1434
                opt(&payOpts)
×
1435
        }
×
1436

1437
        // Create a buffered chan to signal the results.
1438
        results := make(chan rpc.PaymentClient, len(paymentRequests))
×
1439

×
1440
        // send sends a payment and asserts if it doesn't succeeded.
×
1441
        send := func(payReq string) {
×
1442
                req := &routerrpc.SendPaymentRequest{
×
1443
                        PaymentRequest: payReq,
×
1444
                        TimeoutSeconds: int32(wait.PaymentTimeout.Seconds()),
×
1445
                        FeeLimitMsat:   noFeeLimitMsat,
×
1446
                        Amp:            payOpts.useAMP,
×
1447
                }
×
1448
                stream := hn.RPC.SendPayment(req)
×
1449

×
1450
                // Signal sent succeeded.
×
1451
                results <- stream
×
1452
        }
×
1453

1454
        // Launch all payments simultaneously.
1455
        for _, payReq := range paymentRequests {
×
1456
                payReqCopy := payReq
×
1457
                go send(payReqCopy)
×
1458
        }
×
1459

1460
        // Wait for all payments to report the expected status.
1461
        timer := time.After(wait.PaymentTimeout)
×
1462
        select {
×
1463
        case stream := <-results:
×
1464
                h.AssertPaymentStatusFromStream(stream, status)
×
1465

1466
        case <-timer:
×
1467
                require.Fail(h, "timeout", "waiting payment results timeout")
×
1468
        }
1469
}
1470

1471
// CompletePaymentRequests sends payments from a node to complete all payment
1472
// requests. This function does not return until all payments successfully
1473
// complete without errors.
1474
func (h *HarnessTest) CompletePaymentRequests(hn *node.HarnessNode,
1475
        paymentRequests []string, opts ...HarnessOpt) {
×
1476

×
1477
        h.completePaymentRequestsAssertStatus(
×
1478
                hn, paymentRequests, lnrpc.Payment_SUCCEEDED, opts...,
×
1479
        )
×
1480
}
×
1481

1482
// CompletePaymentRequestsNoWait sends payments from a node to complete all
1483
// payment requests without waiting for the results. Instead, it checks the
1484
// number of updates in the specified channel has increased.
1485
func (h *HarnessTest) CompletePaymentRequestsNoWait(hn *node.HarnessNode,
1486
        paymentRequests []string, chanPoint *lnrpc.ChannelPoint) {
×
1487

×
1488
        // We start by getting the current state of the client's channels. This
×
1489
        // is needed to ensure the payments actually have been committed before
×
1490
        // we return.
×
1491
        oldResp := h.GetChannelByChanPoint(hn, chanPoint)
×
1492

×
1493
        // Send payments and assert they are in-flight.
×
1494
        h.completePaymentRequestsAssertStatus(
×
1495
                hn, paymentRequests, lnrpc.Payment_IN_FLIGHT,
×
1496
        )
×
1497

×
1498
        // We are not waiting for feedback in the form of a response, but we
×
1499
        // should still wait long enough for the server to receive and handle
×
1500
        // the send before cancelling the request. We wait for the number of
×
1501
        // updates to one of our channels has increased before we return.
×
1502
        err := wait.NoError(func() error {
×
1503
                newResp := h.GetChannelByChanPoint(hn, chanPoint)
×
1504

×
1505
                // If this channel has an increased number of updates, we
×
1506
                // assume the payments are committed, and we can return.
×
1507
                if newResp.NumUpdates > oldResp.NumUpdates {
×
1508
                        return nil
×
1509
                }
×
1510

1511
                // Otherwise return an error as the NumUpdates are not
1512
                // increased.
1513
                return fmt.Errorf("%s: channel:%v not updated after sending "+
×
1514
                        "payments, old updates: %v, new updates: %v", hn.Name(),
×
1515
                        chanPoint, oldResp.NumUpdates, newResp.NumUpdates)
×
1516
        }, DefaultTimeout)
1517
        require.NoError(h, err, "timeout while checking for channel updates")
×
1518
}
1519

1520
// OpenChannelPsbt attempts to open a channel between srcNode and destNode with
1521
// the passed channel funding parameters. It will assert if the expected step
1522
// of funding the PSBT is not received from the source node.
1523
func (h *HarnessTest) OpenChannelPsbt(srcNode, destNode *node.HarnessNode,
1524
        p OpenChannelParams) (rpc.OpenChanClient, []byte) {
×
1525

×
1526
        // Wait until srcNode and destNode have the latest chain synced.
×
1527
        // Otherwise, we may run into a check within the funding manager that
×
1528
        // prevents any funding workflows from being kicked off if the chain
×
1529
        // isn't yet synced.
×
1530
        h.WaitForBlockchainSync(srcNode)
×
1531
        h.WaitForBlockchainSync(destNode)
×
1532

×
1533
        // Send the request to open a channel to the source node now. This will
×
1534
        // open a long-lived stream where we'll receive status updates about
×
1535
        // the progress of the channel.
×
1536
        // respStream := h.OpenChannelStreamAndAssert(srcNode, destNode, p)
×
1537
        req := &lnrpc.OpenChannelRequest{
×
1538
                NodePubkey:         destNode.PubKey[:],
×
1539
                LocalFundingAmount: int64(p.Amt),
×
1540
                PushSat:            int64(p.PushAmt),
×
1541
                Private:            p.Private,
×
1542
                SpendUnconfirmed:   p.SpendUnconfirmed,
×
1543
                MinHtlcMsat:        int64(p.MinHtlc),
×
1544
                FundingShim:        p.FundingShim,
×
1545
                CommitmentType:     p.CommitmentType,
×
1546
        }
×
1547
        respStream := srcNode.RPC.OpenChannel(req)
×
1548

×
1549
        // Consume the "PSBT funding ready" update. This waits until the node
×
1550
        // notifies us that the PSBT can now be funded.
×
1551
        resp := h.ReceiveOpenChannelUpdate(respStream)
×
1552
        upd, ok := resp.Update.(*lnrpc.OpenStatusUpdate_PsbtFund)
×
1553
        require.Truef(h, ok, "expected PSBT funding update, got %v", resp)
×
1554

×
1555
        // Make sure the channel funding address has the correct type for the
×
1556
        // given commitment type.
×
1557
        fundingAddr, err := btcutil.DecodeAddress(
×
1558
                upd.PsbtFund.FundingAddress, miner.HarnessNetParams,
×
1559
        )
×
1560
        require.NoError(h, err)
×
1561

×
1562
        switch p.CommitmentType {
×
1563
        case lnrpc.CommitmentType_SIMPLE_TAPROOT:
×
1564
                require.IsType(h, &btcutil.AddressTaproot{}, fundingAddr)
×
1565

1566
        default:
×
1567
                require.IsType(
×
1568
                        h, &btcutil.AddressWitnessScriptHash{}, fundingAddr,
×
1569
                )
×
1570
        }
1571

1572
        return respStream, upd.PsbtFund.Psbt
×
1573
}
1574

1575
// CleanupForceClose mines blocks to clean up the force close process. This is
1576
// used for tests that are not asserting the expected behavior is found during
1577
// the force close process, e.g., num of sweeps, etc. Instead, it provides a
1578
// shortcut to move the test forward with a clean mempool.
1579
func (h *HarnessTest) CleanupForceClose(hn *node.HarnessNode) {
×
1580
        // Wait for the channel to be marked pending force close.
×
1581
        h.AssertNumPendingForceClose(hn, 1)
×
1582

×
1583
        // Mine enough blocks for the node to sweep its funds from the force
×
1584
        // closed channel. The commit sweep resolver is offers the input to the
×
1585
        // sweeper when it's force closed, and broadcast the sweep tx at
×
1586
        // defaulCSV-1.
×
1587
        //
×
1588
        // NOTE: we might empty blocks here as we don't know the exact number
×
1589
        // of blocks to mine. This may end up mining more blocks than needed.
×
1590
        h.MineEmptyBlocks(node.DefaultCSV - 1)
×
1591

×
1592
        // Assert there is one pending sweep.
×
1593
        h.AssertNumPendingSweeps(hn, 1)
×
1594

×
1595
        // The node should now sweep the funds, clean up by mining the sweeping
×
1596
        // tx.
×
1597
        h.MineBlocksAndAssertNumTxes(1, 1)
×
1598

×
1599
        // Mine blocks to get any second level HTLC resolved. If there are no
×
1600
        // HTLCs, this will behave like h.AssertNumPendingCloseChannels.
×
1601
        h.mineTillForceCloseResolved(hn)
×
1602
}
×
1603

1604
// CreatePayReqs is a helper method that will create a slice of payment
1605
// requests for the given node.
1606
func (h *HarnessTest) CreatePayReqs(hn *node.HarnessNode,
1607
        paymentAmt btcutil.Amount, numInvoices int,
1608
        routeHints ...*lnrpc.RouteHint) ([]string, [][]byte, []*lnrpc.Invoice) {
×
1609

×
1610
        payReqs := make([]string, numInvoices)
×
1611
        rHashes := make([][]byte, numInvoices)
×
1612
        invoices := make([]*lnrpc.Invoice, numInvoices)
×
1613
        for i := 0; i < numInvoices; i++ {
×
1614
                preimage := h.Random32Bytes()
×
1615

×
1616
                invoice := &lnrpc.Invoice{
×
1617
                        Memo:       "testing",
×
1618
                        RPreimage:  preimage,
×
1619
                        Value:      int64(paymentAmt),
×
1620
                        RouteHints: routeHints,
×
1621
                }
×
1622
                resp := hn.RPC.AddInvoice(invoice)
×
1623

×
1624
                // Set the payment address in the invoice so the caller can
×
1625
                // properly use it.
×
1626
                invoice.PaymentAddr = resp.PaymentAddr
×
1627

×
1628
                payReqs[i] = resp.PaymentRequest
×
1629
                rHashes[i] = resp.RHash
×
1630
                invoices[i] = invoice
×
1631
        }
×
1632

1633
        return payReqs, rHashes, invoices
×
1634
}
1635

1636
// BackupDB creates a backup of the current database. It will stop the node
1637
// first, copy the database files, and restart the node.
1638
func (h *HarnessTest) BackupDB(hn *node.HarnessNode) {
×
1639
        restart := h.SuspendNode(hn)
×
1640

×
1641
        err := hn.BackupDB()
×
1642
        require.NoErrorf(h, err, "%s: failed to backup db", hn.Name())
×
1643

×
1644
        err = restart()
×
1645
        require.NoErrorf(h, err, "%s: failed to restart", hn.Name())
×
1646
}
×
1647

1648
// RestartNodeAndRestoreDB restarts a given node with a callback to restore the
1649
// db.
1650
func (h *HarnessTest) RestartNodeAndRestoreDB(hn *node.HarnessNode) {
×
1651
        cb := func() error { return hn.RestoreDB() }
×
1652
        err := h.manager.restartNode(h.runCtx, hn, cb)
×
1653
        require.NoErrorf(h, err, "failed to restart node %s", hn.Name())
×
1654

×
1655
        err = h.manager.unlockNode(hn)
×
1656
        require.NoErrorf(h, err, "failed to unlock node %s", hn.Name())
×
1657

×
1658
        // Give the node some time to catch up with the chain before we
×
1659
        // continue with the tests.
×
1660
        h.WaitForBlockchainSync(hn)
×
1661
}
1662

1663
// CleanShutDown is used to quickly end a test by shutting down all non-standby
1664
// nodes and mining blocks to empty the mempool.
1665
//
1666
// NOTE: this method provides a faster exit for a test that involves force
1667
// closures as the caller doesn't need to mine all the blocks to make sure the
1668
// mempool is empty.
1669
func (h *HarnessTest) CleanShutDown() {
×
1670
        // First, shutdown all nodes to prevent new transactions being created
×
1671
        // and fed into the mempool.
×
1672
        h.shutdownAllNodes()
×
1673

×
1674
        // Now mine blocks till the mempool is empty.
×
1675
        h.cleanMempool()
×
1676
}
×
1677

1678
// QueryChannelByChanPoint tries to find a channel matching the channel point
1679
// and asserts. It returns the channel found.
1680
func (h *HarnessTest) QueryChannelByChanPoint(hn *node.HarnessNode,
1681
        chanPoint *lnrpc.ChannelPoint,
1682
        opts ...ListChannelOption) *lnrpc.Channel {
×
1683

×
1684
        channel, err := h.findChannel(hn, chanPoint, opts...)
×
1685
        require.NoError(h, err, "failed to query channel")
×
1686

×
1687
        return channel
×
1688
}
×
1689

1690
// SendPaymentAndAssertStatus sends a payment from the passed node and asserts
1691
// the desired status is reached.
1692
func (h *HarnessTest) SendPaymentAndAssertStatus(hn *node.HarnessNode,
1693
        req *routerrpc.SendPaymentRequest,
1694
        status lnrpc.Payment_PaymentStatus) *lnrpc.Payment {
×
1695

×
1696
        stream := hn.RPC.SendPayment(req)
×
1697
        return h.AssertPaymentStatusFromStream(stream, status)
×
1698
}
×
1699

1700
// SendPaymentAssertFail sends a payment from the passed node and asserts the
1701
// payment is failed with the specified failure reason .
1702
func (h *HarnessTest) SendPaymentAssertFail(hn *node.HarnessNode,
1703
        req *routerrpc.SendPaymentRequest,
1704
        reason lnrpc.PaymentFailureReason) *lnrpc.Payment {
×
1705

×
1706
        payment := h.SendPaymentAndAssertStatus(hn, req, lnrpc.Payment_FAILED)
×
1707
        require.Equal(h, reason, payment.FailureReason,
×
1708
                "payment failureReason not matched")
×
1709

×
1710
        return payment
×
1711
}
×
1712

1713
// SendPaymentAssertSettled sends a payment from the passed node and asserts the
1714
// payment is settled.
1715
func (h *HarnessTest) SendPaymentAssertSettled(hn *node.HarnessNode,
1716
        req *routerrpc.SendPaymentRequest) *lnrpc.Payment {
×
1717

×
1718
        return h.SendPaymentAndAssertStatus(hn, req, lnrpc.Payment_SUCCEEDED)
×
1719
}
×
1720

1721
// SendPaymentAssertInflight sends a payment from the passed node and asserts
1722
// the payment is inflight.
1723
func (h *HarnessTest) SendPaymentAssertInflight(hn *node.HarnessNode,
1724
        req *routerrpc.SendPaymentRequest) *lnrpc.Payment {
×
1725

×
1726
        return h.SendPaymentAndAssertStatus(hn, req, lnrpc.Payment_IN_FLIGHT)
×
1727
}
×
1728

1729
// OpenChannelRequest is used to open a channel using the method
1730
// OpenMultiChannelsAsync.
1731
type OpenChannelRequest struct {
1732
        // Local is the funding node.
1733
        Local *node.HarnessNode
1734

1735
        // Remote is the receiving node.
1736
        Remote *node.HarnessNode
1737

1738
        // Param is the open channel params.
1739
        Param OpenChannelParams
1740

1741
        // stream is the client created after calling OpenChannel RPC.
1742
        stream rpc.OpenChanClient
1743

1744
        // result is a channel used to send the channel point once the funding
1745
        // has succeeded.
1746
        result chan *lnrpc.ChannelPoint
1747
}
1748

1749
// OpenMultiChannelsAsync takes a list of OpenChannelRequest and opens them in
1750
// batch. The channel points are returned in same the order of the requests
1751
// once all of the channel open succeeded.
1752
//
1753
// NOTE: compared to open multiple channel sequentially, this method will be
1754
// faster as it doesn't need to mine 6 blocks for each channel open. However,
1755
// it does make debugging the logs more difficult as messages are intertwined.
1756
func (h *HarnessTest) OpenMultiChannelsAsync(
1757
        reqs []*OpenChannelRequest) []*lnrpc.ChannelPoint {
×
1758

×
1759
        // openChannel opens a channel based on the request.
×
1760
        openChannel := func(req *OpenChannelRequest) {
×
1761
                stream := h.OpenChannelAssertStream(
×
1762
                        req.Local, req.Remote, req.Param,
×
1763
                )
×
1764
                req.stream = stream
×
1765
        }
×
1766

1767
        // assertChannelOpen is a helper closure that asserts a channel is
1768
        // open.
1769
        assertChannelOpen := func(req *OpenChannelRequest) {
×
1770
                // Wait for the channel open event from the stream.
×
1771
                cp := h.WaitForChannelOpenEvent(req.stream)
×
1772

×
1773
                if !req.Param.Private {
×
1774
                        // Check that both alice and bob have seen the channel
×
1775
                        // from their channel watch request.
×
1776
                        h.AssertChannelInGraph(req.Local, cp)
×
1777
                        h.AssertChannelInGraph(req.Remote, cp)
×
1778
                }
×
1779

1780
                // Finally, check that the channel can be seen in their
1781
                // ListChannels.
1782
                h.AssertChannelExists(req.Local, cp)
×
1783
                h.AssertChannelExists(req.Remote, cp)
×
1784

×
1785
                req.result <- cp
×
1786
        }
1787

1788
        // Go through the requests and make the OpenChannel RPC call.
1789
        for _, r := range reqs {
×
1790
                openChannel(r)
×
1791
        }
×
1792

1793
        // Mine one block to confirm all the funding transactions.
1794
        h.MineBlocksAndAssertNumTxes(1, len(reqs))
×
1795

×
1796
        // Mine 5 more blocks so all the public channels are announced to the
×
1797
        // network.
×
1798
        h.MineBlocks(numBlocksOpenChannel - 1)
×
1799

×
1800
        // Once the blocks are mined, we fire goroutines for each of the
×
1801
        // request to watch for the channel openning.
×
1802
        for _, r := range reqs {
×
1803
                r.result = make(chan *lnrpc.ChannelPoint, 1)
×
1804
                go assertChannelOpen(r)
×
1805
        }
×
1806

1807
        // Finally, collect the results.
1808
        channelPoints := make([]*lnrpc.ChannelPoint, 0)
×
1809
        for _, r := range reqs {
×
1810
                select {
×
1811
                case cp := <-r.result:
×
1812
                        channelPoints = append(channelPoints, cp)
×
1813

1814
                case <-time.After(wait.ChannelOpenTimeout):
×
1815
                        require.Failf(h, "timeout", "wait channel point "+
×
1816
                                "timeout for channel %s=>%s", r.Local.Name(),
×
1817
                                r.Remote.Name())
×
1818
                }
1819
        }
1820

1821
        // Assert that we have the expected num of channel points.
1822
        require.Len(h, channelPoints, len(reqs),
×
1823
                "returned channel points not match")
×
1824

×
1825
        return channelPoints
×
1826
}
1827

1828
// ReceiveInvoiceUpdate waits until a message is received on the subscribe
1829
// invoice stream or the timeout is reached.
1830
func (h *HarnessTest) ReceiveInvoiceUpdate(
1831
        stream rpc.InvoiceUpdateClient) *lnrpc.Invoice {
×
1832

×
1833
        chanMsg := make(chan *lnrpc.Invoice)
×
1834
        errChan := make(chan error)
×
1835
        go func() {
×
1836
                // Consume one message. This will block until the message is
×
1837
                // received.
×
1838
                resp, err := stream.Recv()
×
1839
                if err != nil {
×
1840
                        errChan <- err
×
1841
                        return
×
1842
                }
×
1843
                chanMsg <- resp
×
1844
        }()
1845

1846
        select {
×
1847
        case <-time.After(DefaultTimeout):
×
1848
                require.Fail(h, "timeout", "timeout receiving invoice update")
×
1849

1850
        case err := <-errChan:
×
1851
                require.Failf(h, "err from stream",
×
1852
                        "received err from stream: %v", err)
×
1853

1854
        case updateMsg := <-chanMsg:
×
1855
                return updateMsg
×
1856
        }
1857

1858
        return nil
×
1859
}
1860

1861
// CalculateTxFee retrieves parent transactions and reconstructs the fee paid.
1862
func (h *HarnessTest) CalculateTxFee(tx *wire.MsgTx) btcutil.Amount {
×
1863
        var balance btcutil.Amount
×
1864
        for _, in := range tx.TxIn {
×
1865
                parentHash := in.PreviousOutPoint.Hash
×
1866
                rawTx := h.miner.GetRawTransaction(parentHash)
×
1867
                parent := rawTx.MsgTx()
×
1868
                value := parent.TxOut[in.PreviousOutPoint.Index].Value
×
1869

×
1870
                balance += btcutil.Amount(value)
×
1871
        }
×
1872

1873
        for _, out := range tx.TxOut {
×
1874
                balance -= btcutil.Amount(out.Value)
×
1875
        }
×
1876

1877
        return balance
×
1878
}
1879

1880
// CalculateTxWeight calculates the weight for a given tx.
1881
//
1882
// TODO(yy): use weight estimator to get more accurate result.
1883
func (h *HarnessTest) CalculateTxWeight(tx *wire.MsgTx) lntypes.WeightUnit {
×
1884
        utx := btcutil.NewTx(tx)
×
1885
        return lntypes.WeightUnit(blockchain.GetTransactionWeight(utx))
×
1886
}
×
1887

1888
// CalculateTxFeeRate calculates the fee rate for a given tx.
1889
func (h *HarnessTest) CalculateTxFeeRate(
1890
        tx *wire.MsgTx) chainfee.SatPerKWeight {
×
1891

×
1892
        w := h.CalculateTxWeight(tx)
×
1893
        fee := h.CalculateTxFee(tx)
×
1894

×
1895
        return chainfee.NewSatPerKWeight(fee, w)
×
1896
}
×
1897

1898
// CalculateTxesFeeRate takes a list of transactions and estimates the fee rate
1899
// used to sweep them.
1900
//
1901
// NOTE: only used in current test file.
1902
func (h *HarnessTest) CalculateTxesFeeRate(txns []*wire.MsgTx) int64 {
×
1903
        const scale = 1000
×
1904

×
1905
        var totalWeight, totalFee int64
×
1906
        for _, tx := range txns {
×
1907
                utx := btcutil.NewTx(tx)
×
1908
                totalWeight += blockchain.GetTransactionWeight(utx)
×
1909

×
1910
                fee := h.CalculateTxFee(tx)
×
1911
                totalFee += int64(fee)
×
1912
        }
×
1913
        feeRate := totalFee * scale / totalWeight
×
1914

×
1915
        return feeRate
×
1916
}
1917

1918
// AssertSweepFound looks up a sweep in a nodes list of broadcast sweeps and
1919
// asserts it's found.
1920
//
1921
// NOTE: Does not account for node's internal state.
1922
func (h *HarnessTest) AssertSweepFound(hn *node.HarnessNode,
1923
        sweep string, verbose bool, startHeight int32) {
×
1924

×
1925
        err := wait.NoError(func() error {
×
1926
                // List all sweeps that alice's node had broadcast.
×
1927
                sweepResp := hn.RPC.ListSweeps(verbose, startHeight)
×
1928

×
1929
                var found bool
×
1930
                if verbose {
×
1931
                        found = findSweepInDetails(h, sweep, sweepResp)
×
1932
                } else {
×
1933
                        found = findSweepInTxids(h, sweep, sweepResp)
×
1934
                }
×
1935

1936
                if found {
×
1937
                        return nil
×
1938
                }
×
1939

1940
                return fmt.Errorf("sweep tx %v not found in resp %v", sweep,
×
1941
                        sweepResp)
×
1942
        }, wait.DefaultTimeout)
1943
        require.NoError(h, err, "%s: timeout checking sweep tx", hn.Name())
×
1944
}
1945

1946
func findSweepInTxids(ht *HarnessTest, sweepTxid string,
1947
        sweepResp *walletrpc.ListSweepsResponse) bool {
×
1948

×
1949
        sweepTxIDs := sweepResp.GetTransactionIds()
×
1950
        require.NotNil(ht, sweepTxIDs, "expected transaction ids")
×
1951
        require.Nil(ht, sweepResp.GetTransactionDetails())
×
1952

×
1953
        // Check that the sweep tx we have just produced is present.
×
1954
        for _, tx := range sweepTxIDs.TransactionIds {
×
1955
                if tx == sweepTxid {
×
1956
                        return true
×
1957
                }
×
1958
        }
1959

1960
        return false
×
1961
}
1962

1963
func findSweepInDetails(ht *HarnessTest, sweepTxid string,
1964
        sweepResp *walletrpc.ListSweepsResponse) bool {
×
1965

×
1966
        sweepDetails := sweepResp.GetTransactionDetails()
×
1967
        require.NotNil(ht, sweepDetails, "expected transaction details")
×
1968
        require.Nil(ht, sweepResp.GetTransactionIds())
×
1969

×
1970
        for _, tx := range sweepDetails.Transactions {
×
1971
                if tx.TxHash == sweepTxid {
×
1972
                        return true
×
1973
                }
×
1974
        }
1975

1976
        return false
×
1977
}
1978

1979
// QueryRoutesAndRetry attempts to keep querying a route until timeout is
1980
// reached.
1981
//
1982
// NOTE: when a channel is opened, we may need to query multiple times to get
1983
// it in our QueryRoutes RPC. This happens even after we check the channel is
1984
// heard by the node using ht.AssertChannelOpen. Deep down, this is because our
1985
// GraphTopologySubscription and QueryRoutes give different results regarding a
1986
// specific channel, with the formal reporting it being open while the latter
1987
// not, resulting GraphTopologySubscription acting "faster" than QueryRoutes.
1988
// TODO(yy): make sure related subsystems share the same view on a given
1989
// channel.
1990
func (h *HarnessTest) QueryRoutesAndRetry(hn *node.HarnessNode,
1991
        req *lnrpc.QueryRoutesRequest) *lnrpc.QueryRoutesResponse {
×
1992

×
1993
        var routes *lnrpc.QueryRoutesResponse
×
1994
        err := wait.NoError(func() error {
×
1995
                ctxt, cancel := context.WithCancel(h.runCtx)
×
1996
                defer cancel()
×
1997

×
1998
                resp, err := hn.RPC.LN.QueryRoutes(ctxt, req)
×
1999
                if err != nil {
×
2000
                        return fmt.Errorf("%s: failed to query route: %w",
×
2001
                                hn.Name(), err)
×
2002
                }
×
2003

2004
                routes = resp
×
2005

×
2006
                return nil
×
2007
        }, DefaultTimeout)
2008

2009
        require.NoError(h, err, "timeout querying routes")
×
2010

×
2011
        return routes
×
2012
}
2013

2014
// ReceiveHtlcInterceptor waits until a message is received on the htlc
2015
// interceptor stream or the timeout is reached.
2016
func (h *HarnessTest) ReceiveHtlcInterceptor(
2017
        stream rpc.InterceptorClient) *routerrpc.ForwardHtlcInterceptRequest {
×
2018

×
2019
        chanMsg := make(chan *routerrpc.ForwardHtlcInterceptRequest)
×
2020
        errChan := make(chan error)
×
2021
        go func() {
×
2022
                // Consume one message. This will block until the message is
×
2023
                // received.
×
2024
                resp, err := stream.Recv()
×
2025
                if err != nil {
×
2026
                        errChan <- err
×
2027
                        return
×
2028
                }
×
2029
                chanMsg <- resp
×
2030
        }()
2031

2032
        select {
×
2033
        case <-time.After(DefaultTimeout):
×
2034
                require.Fail(h, "timeout", "timeout intercepting htlc")
×
2035

2036
        case err := <-errChan:
×
2037
                require.Failf(h, "err from HTLC interceptor stream",
×
2038
                        "received err from HTLC interceptor stream: %v", err)
×
2039

2040
        case updateMsg := <-chanMsg:
×
2041
                return updateMsg
×
2042
        }
2043

2044
        return nil
×
2045
}
2046

2047
// ReceiveInvoiceHtlcModification waits until a message is received on the
2048
// invoice HTLC modifier stream or the timeout is reached.
2049
func (h *HarnessTest) ReceiveInvoiceHtlcModification(
2050
        stream rpc.InvoiceHtlcModifierClient) *invoicesrpc.HtlcModifyRequest {
×
2051

×
2052
        chanMsg := make(chan *invoicesrpc.HtlcModifyRequest)
×
2053
        errChan := make(chan error)
×
2054
        go func() {
×
2055
                // Consume one message. This will block until the message is
×
2056
                // received.
×
2057
                resp, err := stream.Recv()
×
2058
                if err != nil {
×
2059
                        errChan <- err
×
2060
                        return
×
2061
                }
×
2062
                chanMsg <- resp
×
2063
        }()
2064

2065
        select {
×
2066
        case <-time.After(DefaultTimeout):
×
2067
                require.Fail(h, "timeout", "timeout invoice HTLC modifier")
×
2068

2069
        case err := <-errChan:
×
2070
                require.Failf(h, "err from invoice HTLC modifier stream",
×
2071
                        "received err from invoice HTLC modifier stream: %v",
×
2072
                        err)
×
2073

2074
        case updateMsg := <-chanMsg:
×
2075
                return updateMsg
×
2076
        }
2077

2078
        return nil
×
2079
}
2080

2081
// ReceiveChannelEvent waits until a message is received from the
2082
// ChannelEventsClient stream or the timeout is reached.
2083
func (h *HarnessTest) ReceiveChannelEvent(
2084
        stream rpc.ChannelEventsClient) *lnrpc.ChannelEventUpdate {
×
2085

×
2086
        chanMsg := make(chan *lnrpc.ChannelEventUpdate)
×
2087
        errChan := make(chan error)
×
2088
        go func() {
×
2089
                // Consume one message. This will block until the message is
×
2090
                // received.
×
2091
                resp, err := stream.Recv()
×
2092
                if err != nil {
×
2093
                        errChan <- err
×
2094
                        return
×
2095
                }
×
2096
                chanMsg <- resp
×
2097
        }()
2098

2099
        select {
×
2100
        case <-time.After(DefaultTimeout):
×
2101
                require.Fail(h, "timeout", "timeout intercepting htlc")
×
2102

2103
        case err := <-errChan:
×
2104
                require.Failf(h, "err from stream",
×
2105
                        "received err from stream: %v", err)
×
2106

2107
        case updateMsg := <-chanMsg:
×
2108
                return updateMsg
×
2109
        }
2110

2111
        return nil
×
2112
}
2113

2114
// GetOutputIndex returns the output index of the given address in the given
2115
// transaction.
2116
func (h *HarnessTest) GetOutputIndex(txid chainhash.Hash, addr string) int {
×
2117
        // We'll then extract the raw transaction from the mempool in order to
×
2118
        // determine the index of the p2tr output.
×
2119
        tx := h.miner.GetRawTransaction(txid)
×
2120

×
2121
        p2trOutputIndex := -1
×
2122
        for i, txOut := range tx.MsgTx().TxOut {
×
2123
                _, addrs, _, err := txscript.ExtractPkScriptAddrs(
×
2124
                        txOut.PkScript, h.miner.ActiveNet,
×
2125
                )
×
2126
                require.NoError(h, err)
×
2127

×
2128
                if addrs[0].String() == addr {
×
2129
                        p2trOutputIndex = i
×
2130
                }
×
2131
        }
2132
        require.Greater(h, p2trOutputIndex, -1)
×
2133

×
2134
        return p2trOutputIndex
×
2135
}
2136

2137
// SendCoins sends a coin from node A to node B with the given amount, returns
2138
// the sending tx.
2139
func (h *HarnessTest) SendCoins(a, b *node.HarnessNode,
2140
        amt btcutil.Amount) *wire.MsgTx {
×
2141

×
2142
        // Create an address for Bob receive the coins.
×
2143
        req := &lnrpc.NewAddressRequest{
×
2144
                Type: lnrpc.AddressType_TAPROOT_PUBKEY,
×
2145
        }
×
2146
        resp := b.RPC.NewAddress(req)
×
2147

×
2148
        // Send the coins from Alice to Bob. We should expect a tx to be
×
2149
        // broadcast and seen in the mempool.
×
2150
        sendReq := &lnrpc.SendCoinsRequest{
×
2151
                Addr:       resp.Address,
×
2152
                Amount:     int64(amt),
×
2153
                TargetConf: 6,
×
2154
        }
×
2155
        a.RPC.SendCoins(sendReq)
×
2156
        tx := h.GetNumTxsFromMempool(1)[0]
×
2157

×
2158
        return tx
×
2159
}
×
2160

2161
// SendCoins sends all coins from node A to node B, returns the sending tx.
2162
func (h *HarnessTest) SendAllCoins(a, b *node.HarnessNode) *wire.MsgTx {
×
2163
        // Create an address for Bob receive the coins.
×
2164
        req := &lnrpc.NewAddressRequest{
×
2165
                Type: lnrpc.AddressType_TAPROOT_PUBKEY,
×
2166
        }
×
2167
        resp := b.RPC.NewAddress(req)
×
2168

×
2169
        // Send the coins from Alice to Bob. We should expect a tx to be
×
2170
        // broadcast and seen in the mempool.
×
2171
        sendReq := &lnrpc.SendCoinsRequest{
×
2172
                Addr:             resp.Address,
×
2173
                TargetConf:       6,
×
2174
                SendAll:          true,
×
2175
                SpendUnconfirmed: true,
×
2176
        }
×
2177
        a.RPC.SendCoins(sendReq)
×
2178
        tx := h.GetNumTxsFromMempool(1)[0]
×
2179

×
2180
        return tx
×
2181
}
×
2182

2183
// CreateSimpleNetwork creates the number of nodes specified by the number of
2184
// configs and makes a topology of `node1 -> node2 -> node3...`. Each node is
2185
// created using the specified config, the neighbors are connected, and the
2186
// channels are opened. Each node will be funded with a single UTXO of 1 BTC
2187
// except the last one.
2188
//
2189
// For instance, to create a network with 2 nodes that share the same node
2190
// config,
2191
//
2192
//        cfg := []string{"--protocol.anchors"}
2193
//        cfgs := [][]string{cfg, cfg}
2194
//        params := OpenChannelParams{...}
2195
//        chanPoints, nodes := ht.CreateSimpleNetwork(cfgs, params)
2196
//
2197
// This will create two nodes and open an anchor channel between them.
2198
func (h *HarnessTest) CreateSimpleNetwork(nodeCfgs [][]string,
2199
        p OpenChannelParams) ([]*lnrpc.ChannelPoint, []*node.HarnessNode) {
×
2200

×
2201
        // Create new nodes.
×
2202
        nodes := h.createNodes(nodeCfgs)
×
2203

×
2204
        var resp []*lnrpc.ChannelPoint
×
2205

×
2206
        // Open zero-conf channels if specified.
×
2207
        if p.ZeroConf {
×
2208
                resp = h.openZeroConfChannelsForNodes(nodes, p)
×
2209
        } else {
×
2210
                // Open channels between the nodes.
×
2211
                resp = h.openChannelsForNodes(nodes, p)
×
2212
        }
×
2213

2214
        return resp, nodes
×
2215
}
2216

2217
// acceptChannel is used to accept a single channel that comes across. This
2218
// should be run in a goroutine and is used to test nodes with the zero-conf
2219
// feature bit.
2220
func acceptChannel(t *testing.T, zeroConf bool, stream rpc.AcceptorClient) {
×
2221
        req, err := stream.Recv()
×
2222
        require.NoError(t, err)
×
2223

×
2224
        resp := &lnrpc.ChannelAcceptResponse{
×
2225
                Accept:        true,
×
2226
                PendingChanId: req.PendingChanId,
×
2227
                ZeroConf:      zeroConf,
×
2228
        }
×
2229
        err = stream.Send(resp)
×
2230
        require.NoError(t, err)
×
2231
}
×
2232

2233
// nodeNames defines a slice of human-reable names for the nodes created in the
2234
// `createNodes` method. 8 nodes are defined here as by default we can only
2235
// create this many nodes in one test.
2236
var nodeNames = []string{
2237
        "Alice", "Bob", "Carol", "Dave", "Eve", "Frank", "Grace", "Heidi",
2238
}
2239

2240
// createNodes creates the number of nodes specified by the number of configs.
2241
// Each node is created using the specified config, the neighbors are
2242
// connected.
2243
func (h *HarnessTest) createNodes(nodeCfgs [][]string) []*node.HarnessNode {
×
2244
        // Get the number of nodes.
×
2245
        numNodes := len(nodeCfgs)
×
2246

×
2247
        // Make sure we are creating a reasonable number of nodes.
×
2248
        require.LessOrEqual(h, numNodes, len(nodeNames), "too many nodes")
×
2249

×
2250
        // Make a slice of nodes.
×
2251
        nodes := make([]*node.HarnessNode, numNodes)
×
2252

×
2253
        // Create new nodes.
×
2254
        for i, nodeCfg := range nodeCfgs {
×
2255
                nodeName := nodeNames[i]
×
2256
                n := h.NewNode(nodeName, nodeCfg)
×
2257
                nodes[i] = n
×
2258
        }
×
2259

2260
        // Connect the nodes in a chain.
2261
        for i := 1; i < len(nodes); i++ {
×
2262
                nodeA := nodes[i-1]
×
2263
                nodeB := nodes[i]
×
2264
                h.EnsureConnected(nodeA, nodeB)
×
2265
        }
×
2266

2267
        // Fund all the nodes expect the last one.
2268
        for i := 0; i < len(nodes)-1; i++ {
×
2269
                node := nodes[i]
×
2270
                h.FundCoinsUnconfirmed(btcutil.SatoshiPerBitcoin, node)
×
2271
        }
×
2272

2273
        // Mine 1 block to get the above coins confirmed.
2274
        h.MineBlocksAndAssertNumTxes(1, numNodes-1)
×
2275

×
2276
        return nodes
×
2277
}
2278

2279
// openChannelsForNodes takes a list of nodes and makes a topology of `node1 ->
2280
// node2 -> node3...`.
2281
func (h *HarnessTest) openChannelsForNodes(nodes []*node.HarnessNode,
2282
        p OpenChannelParams) []*lnrpc.ChannelPoint {
×
2283

×
2284
        // Sanity check the params.
×
2285
        require.Greater(h, len(nodes), 1, "need at least 2 nodes")
×
2286

×
2287
        // attachFundingShim is a helper closure that optionally attaches a
×
2288
        // funding shim to the open channel params and returns it.
×
2289
        attachFundingShim := func(
×
2290
                nodeA, nodeB *node.HarnessNode) OpenChannelParams {
×
2291

×
2292
                // If this channel is not a script enforced lease channel,
×
2293
                // we'll do nothing and return the params.
×
2294
                leasedType := lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE
×
2295
                if p.CommitmentType != leasedType {
×
2296
                        return p
×
2297
                }
×
2298

2299
                // Otherwise derive the funding shim, attach it to the original
2300
                // open channel params and return it.
2301
                minerHeight := h.CurrentHeight()
×
2302
                thawHeight := minerHeight + thawHeightDelta
×
2303
                fundingShim, _ := h.DeriveFundingShim(
×
2304
                        nodeA, nodeB, p.Amt, thawHeight, true, leasedType,
×
2305
                )
×
2306

×
2307
                p.FundingShim = fundingShim
×
2308

×
2309
                return p
×
2310
        }
2311

2312
        // Open channels in batch to save blocks mined.
2313
        reqs := make([]*OpenChannelRequest, 0, len(nodes)-1)
×
2314
        for i := 0; i < len(nodes)-1; i++ {
×
2315
                nodeA := nodes[i]
×
2316
                nodeB := nodes[i+1]
×
2317

×
2318
                // Optionally attach a funding shim to the open channel params.
×
2319
                p = attachFundingShim(nodeA, nodeB)
×
2320

×
2321
                req := &OpenChannelRequest{
×
2322
                        Local:  nodeA,
×
2323
                        Remote: nodeB,
×
2324
                        Param:  p,
×
2325
                }
×
2326
                reqs = append(reqs, req)
×
2327
        }
×
2328
        resp := h.OpenMultiChannelsAsync(reqs)
×
2329

×
2330
        // If the channels are private, make sure the channel participants know
×
2331
        // the relevant channels.
×
2332
        if p.Private {
×
2333
                for i, chanPoint := range resp {
×
2334
                        // Get the channel participants - for n channels we
×
2335
                        // would have n+1 nodes.
×
2336
                        nodeA, nodeB := nodes[i], nodes[i+1]
×
2337
                        h.AssertChannelInGraph(nodeA, chanPoint)
×
2338
                        h.AssertChannelInGraph(nodeB, chanPoint)
×
2339
                }
×
2340
        } else {
×
2341
                // Make sure the all nodes know all the channels if they are
×
2342
                // public.
×
2343
                for _, node := range nodes {
×
2344
                        for _, chanPoint := range resp {
×
2345
                                h.AssertChannelInGraph(node, chanPoint)
×
2346
                        }
×
2347

2348
                        // Make sure every node has updated its cached graph
2349
                        // about the edges as indicated in `DescribeGraph`.
2350
                        h.AssertNumEdges(node, len(resp), false)
×
2351
                }
2352
        }
2353

2354
        return resp
×
2355
}
2356

2357
// openZeroConfChannelsForNodes takes a list of nodes and makes a topology of
2358
// `node1 -> node2 -> node3...` with zero-conf channels.
2359
func (h *HarnessTest) openZeroConfChannelsForNodes(nodes []*node.HarnessNode,
2360
        p OpenChannelParams) []*lnrpc.ChannelPoint {
×
2361

×
2362
        // Sanity check the params.
×
2363
        require.True(h, p.ZeroConf, "zero-conf channels must be enabled")
×
2364
        require.Greater(h, len(nodes), 1, "need at least 2 nodes")
×
2365

×
2366
        // We are opening numNodes-1 channels.
×
2367
        cancels := make([]context.CancelFunc, 0, len(nodes)-1)
×
2368

×
2369
        // Create the channel acceptors.
×
2370
        for _, node := range nodes[1:] {
×
2371
                acceptor, cancel := node.RPC.ChannelAcceptor()
×
2372
                go acceptChannel(h.T, true, acceptor)
×
2373

×
2374
                cancels = append(cancels, cancel)
×
2375
        }
×
2376

2377
        // Open channels between the nodes.
2378
        resp := h.openChannelsForNodes(nodes, p)
×
2379

×
2380
        for _, cancel := range cancels {
×
2381
                cancel()
×
2382
        }
×
2383

2384
        return resp
×
2385
}
2386

2387
// DeriveFundingShim creates a channel funding shim by deriving the necessary
2388
// keys on both sides.
2389
func (h *HarnessTest) DeriveFundingShim(alice, bob *node.HarnessNode,
2390
        chanSize btcutil.Amount, thawHeight uint32, publish bool,
2391
        commitType lnrpc.CommitmentType) (*lnrpc.FundingShim,
2392
        *lnrpc.ChannelPoint) {
×
2393

×
2394
        keyLoc := &signrpc.KeyLocator{KeyFamily: 9999}
×
2395
        carolFundingKey := alice.RPC.DeriveKey(keyLoc)
×
2396
        daveFundingKey := bob.RPC.DeriveKey(keyLoc)
×
2397

×
2398
        // Now that we have the multi-sig keys for each party, we can manually
×
2399
        // construct the funding transaction. We'll instruct the backend to
×
2400
        // immediately create and broadcast a transaction paying out an exact
×
2401
        // amount. Normally this would reside in the mempool, but we just
×
2402
        // confirm it now for simplicity.
×
2403
        var (
×
2404
                fundingOutput *wire.TxOut
×
2405
                musig2        bool
×
2406
                err           error
×
2407
        )
×
2408

×
2409
        if commitType == lnrpc.CommitmentType_SIMPLE_TAPROOT ||
×
2410
                commitType == lnrpc.CommitmentType_SIMPLE_TAPROOT_OVERLAY {
×
2411

×
2412
                var carolKey, daveKey *btcec.PublicKey
×
2413
                carolKey, err = btcec.ParsePubKey(carolFundingKey.RawKeyBytes)
×
2414
                require.NoError(h, err)
×
2415
                daveKey, err = btcec.ParsePubKey(daveFundingKey.RawKeyBytes)
×
2416
                require.NoError(h, err)
×
2417

×
2418
                _, fundingOutput, err = input.GenTaprootFundingScript(
×
2419
                        carolKey, daveKey, int64(chanSize),
×
2420
                        fn.None[chainhash.Hash](),
×
2421
                )
×
2422
                require.NoError(h, err)
×
2423

×
2424
                musig2 = true
×
2425
        } else {
×
2426
                _, fundingOutput, err = input.GenFundingPkScript(
×
2427
                        carolFundingKey.RawKeyBytes, daveFundingKey.RawKeyBytes,
×
2428
                        int64(chanSize),
×
2429
                )
×
2430
                require.NoError(h, err)
×
2431
        }
×
2432

2433
        var txid *chainhash.Hash
×
2434
        targetOutputs := []*wire.TxOut{fundingOutput}
×
2435
        if publish {
×
2436
                txid = h.SendOutputsWithoutChange(targetOutputs, 5)
×
2437
        } else {
×
2438
                tx := h.CreateTransaction(targetOutputs, 5)
×
2439

×
2440
                txHash := tx.TxHash()
×
2441
                txid = &txHash
×
2442
        }
×
2443

2444
        // At this point, we can being our external channel funding workflow.
2445
        // We'll start by generating a pending channel ID externally that will
2446
        // be used to track this new funding type.
2447
        pendingChanID := h.Random32Bytes()
×
2448

×
2449
        // Now that we have the pending channel ID, Dave (our responder) will
×
2450
        // register the intent to receive a new channel funding workflow using
×
2451
        // the pending channel ID.
×
2452
        chanPoint := &lnrpc.ChannelPoint{
×
2453
                FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
×
2454
                        FundingTxidBytes: txid[:],
×
2455
                },
×
2456
        }
×
2457
        chanPointShim := &lnrpc.ChanPointShim{
×
2458
                Amt:       int64(chanSize),
×
2459
                ChanPoint: chanPoint,
×
2460
                LocalKey: &lnrpc.KeyDescriptor{
×
2461
                        RawKeyBytes: daveFundingKey.RawKeyBytes,
×
2462
                        KeyLoc: &lnrpc.KeyLocator{
×
2463
                                KeyFamily: daveFundingKey.KeyLoc.KeyFamily,
×
2464
                                KeyIndex:  daveFundingKey.KeyLoc.KeyIndex,
×
2465
                        },
×
2466
                },
×
2467
                RemoteKey:     carolFundingKey.RawKeyBytes,
×
2468
                PendingChanId: pendingChanID,
×
2469
                ThawHeight:    thawHeight,
×
2470
                Musig2:        musig2,
×
2471
        }
×
2472
        fundingShim := &lnrpc.FundingShim{
×
2473
                Shim: &lnrpc.FundingShim_ChanPointShim{
×
2474
                        ChanPointShim: chanPointShim,
×
2475
                },
×
2476
        }
×
2477
        bob.RPC.FundingStateStep(&lnrpc.FundingTransitionMsg{
×
2478
                Trigger: &lnrpc.FundingTransitionMsg_ShimRegister{
×
2479
                        ShimRegister: fundingShim,
×
2480
                },
×
2481
        })
×
2482

×
2483
        // If we attempt to register the same shim (has the same pending chan
×
2484
        // ID), then we should get an error.
×
2485
        bob.RPC.FundingStateStepAssertErr(&lnrpc.FundingTransitionMsg{
×
2486
                Trigger: &lnrpc.FundingTransitionMsg_ShimRegister{
×
2487
                        ShimRegister: fundingShim,
×
2488
                },
×
2489
        })
×
2490

×
2491
        // We'll take the chan point shim we just registered for Dave (the
×
2492
        // responder), and swap the local/remote keys before we feed it in as
×
2493
        // Carol's funding shim as the initiator.
×
2494
        fundingShim.GetChanPointShim().LocalKey = &lnrpc.KeyDescriptor{
×
2495
                RawKeyBytes: carolFundingKey.RawKeyBytes,
×
2496
                KeyLoc: &lnrpc.KeyLocator{
×
2497
                        KeyFamily: carolFundingKey.KeyLoc.KeyFamily,
×
2498
                        KeyIndex:  carolFundingKey.KeyLoc.KeyIndex,
×
2499
                },
×
2500
        }
×
2501
        fundingShim.GetChanPointShim().RemoteKey = daveFundingKey.RawKeyBytes
×
2502

×
2503
        return fundingShim, chanPoint
×
2504
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc