• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 15636357780

13 Jun 2025 01:58PM UTC coverage: 68.494% (+10.2%) from 58.333%
15636357780

Pull #9948

github

web-flow
Merge fe6d0e9a5 into 35102e7c3
Pull Request #9948: lntest: add funding and seed entropy options to HarnessTest.NewNode

0 of 75 new or added lines in 1 file covered. (0.0%)

35 existing lines in 6 files now uncovered.

134462 of 196311 relevant lines covered (68.49%)

22308.92 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/lntest/harness.go
1
package lntest
2

3
import (
4
        "context"
5
        "fmt"
6
        "strings"
7
        "testing"
8
        "time"
9

10
        "github.com/btcsuite/btcd/blockchain"
11
        "github.com/btcsuite/btcd/btcec/v2"
12
        "github.com/btcsuite/btcd/btcutil"
13
        "github.com/btcsuite/btcd/chaincfg/chainhash"
14
        "github.com/btcsuite/btcd/txscript"
15
        "github.com/btcsuite/btcd/wire"
16
        "github.com/go-errors/errors"
17
        "github.com/lightningnetwork/lnd/fn/v2"
18
        "github.com/lightningnetwork/lnd/input"
19
        "github.com/lightningnetwork/lnd/kvdb/etcd"
20
        "github.com/lightningnetwork/lnd/lnrpc"
21
        "github.com/lightningnetwork/lnd/lnrpc/invoicesrpc"
22
        "github.com/lightningnetwork/lnd/lnrpc/routerrpc"
23
        "github.com/lightningnetwork/lnd/lnrpc/signrpc"
24
        "github.com/lightningnetwork/lnd/lnrpc/walletrpc"
25
        "github.com/lightningnetwork/lnd/lntest/miner"
26
        "github.com/lightningnetwork/lnd/lntest/node"
27
        "github.com/lightningnetwork/lnd/lntest/rpc"
28
        "github.com/lightningnetwork/lnd/lntest/wait"
29
        "github.com/lightningnetwork/lnd/lntypes"
30
        "github.com/lightningnetwork/lnd/lnwallet/chainfee"
31
        "github.com/lightningnetwork/lnd/lnwire"
32
        "github.com/lightningnetwork/lnd/routing"
33
        "github.com/stretchr/testify/require"
34
)
35

36
const (
37
        // defaultMinerFeeRate specifies the fee rate in sats when sending
38
        // outputs from the miner.
39
        defaultMinerFeeRate = 7500
40

41
        // numBlocksSendOutput specifies the number of blocks to mine after
42
        // sending outputs from the miner.
43
        numBlocksSendOutput = 2
44

45
        // numBlocksOpenChannel specifies the number of blocks mined when
46
        // opening a channel.
47
        numBlocksOpenChannel = 6
48

49
        // lndErrorChanSize specifies the buffer size used to receive errors
50
        // from lnd process.
51
        lndErrorChanSize = 10
52

53
        // maxBlocksAllowed specifies the max allowed value to be used when
54
        // mining blocks.
55
        maxBlocksAllowed = 100
56

57
        finalCltvDelta  = routing.MinCLTVDelta // 18.
58
        thawHeightDelta = finalCltvDelta * 2   // 36.
59
)
60

61
var (
62
        // MaxBlocksMinedPerTest is the maximum number of blocks that we allow
63
        // a test to mine. This is an exported global variable so it can be
64
        // overwritten by other projects that don't have the same constraints.
65
        MaxBlocksMinedPerTest = 50
66
)
67

68
// TestCase defines a test case that's been used in the integration test.
69
type TestCase struct {
70
        // Name specifies the test name.
71
        Name string
72

73
        // TestFunc is the test case wrapped in a function.
74
        TestFunc func(t *HarnessTest)
75
}
76

77
// HarnessTest builds on top of a testing.T with enhanced error detection. It
78
// is responsible for managing the interactions among different nodes, and
79
// providing easy-to-use assertions.
80
type HarnessTest struct {
81
        *testing.T
82

83
        // miner is a reference to a running full node that can be used to
84
        // create new blocks on the network.
85
        miner *miner.HarnessMiner
86

87
        // manager handles the start and stop of a given node.
88
        manager *nodeManager
89

90
        // feeService is a web service that provides external fee estimates to
91
        // lnd.
92
        feeService WebFeeService
93

94
        // Channel for transmitting stderr output from failed lightning node
95
        // to main process.
96
        lndErrorChan chan error
97

98
        // runCtx is a context with cancel method. It's used to signal when the
99
        // node needs to quit, and used as the parent context when spawning
100
        // children contexts for RPC requests.
101
        runCtx context.Context //nolint:containedctx
102
        cancel context.CancelFunc
103

104
        // stopChainBackend points to the cleanup function returned by the
105
        // chainBackend.
106
        stopChainBackend func()
107

108
        // cleaned specifies whether the cleanup has been applied for the
109
        // current HarnessTest.
110
        cleaned bool
111

112
        // currentHeight is the current height of the chain backend.
113
        currentHeight uint32
114
}
115

116
// harnessOpts contains functional option to modify the behavior of the various
117
// harness calls.
118
type harnessOpts struct {
119
        useAMP bool
120
}
121

122
// defaultHarnessOpts returns a new instance of the harnessOpts with default
123
// values specified.
124
func defaultHarnessOpts() harnessOpts {
×
125
        return harnessOpts{
×
126
                useAMP: false,
×
127
        }
×
128
}
×
129

130
// HarnessOpt is a functional option that can be used to modify the behavior of
131
// harness functionality.
132
type HarnessOpt func(*harnessOpts)
133

134
// WithAMP is a functional option that can be used to enable the AMP feature
135
// for sending payments.
136
func WithAMP() HarnessOpt {
×
137
        return func(h *harnessOpts) {
×
138
                h.useAMP = true
×
139
        }
×
140
}
141

142
// NewHarnessTest creates a new instance of a harnessTest from a regular
143
// testing.T instance.
144
func NewHarnessTest(t *testing.T, lndBinary string, feeService WebFeeService,
145
        dbBackend node.DatabaseBackend, nativeSQL bool) *HarnessTest {
×
146

×
147
        t.Helper()
×
148

×
149
        // Create the run context.
×
150
        ctxt, cancel := context.WithCancel(context.Background())
×
151

×
152
        manager := newNodeManager(lndBinary, dbBackend, nativeSQL)
×
153

×
154
        return &HarnessTest{
×
155
                T:          t,
×
156
                manager:    manager,
×
157
                feeService: feeService,
×
158
                runCtx:     ctxt,
×
159
                cancel:     cancel,
×
160
                // We need to use buffered channel here as we don't want to
×
161
                // block sending errors.
×
162
                lndErrorChan: make(chan error, lndErrorChanSize),
×
163
        }
×
164
}
×
165

166
// Start will assemble the chain backend and the miner for the HarnessTest. It
167
// also starts the fee service and watches lnd process error.
168
func (h *HarnessTest) Start(chain node.BackendConfig,
169
        miner *miner.HarnessMiner) {
×
170

×
171
        // Spawn a new goroutine to watch for any fatal errors that any of the
×
172
        // running lnd processes encounter. If an error occurs, then the test
×
173
        // case should naturally as a result and we log the server error here
×
174
        // to help debug.
×
175
        go func() {
×
176
                select {
×
177
                case err, more := <-h.lndErrorChan:
×
178
                        if !more {
×
179
                                return
×
180
                        }
×
181
                        h.Logf("lnd finished with error (stderr):\n%v", err)
×
182

183
                case <-h.runCtx.Done():
×
184
                        return
×
185
                }
186
        }()
187

188
        // Start the fee service.
189
        err := h.feeService.Start()
×
190
        require.NoError(h, err, "failed to start fee service")
×
191

×
192
        // Assemble the node manager with chainBackend and feeServiceURL.
×
193
        h.manager.chainBackend = chain
×
194
        h.manager.feeServiceURL = h.feeService.URL()
×
195

×
196
        // Assemble the miner.
×
197
        h.miner = miner
×
198

×
199
        // Update block height.
×
200
        h.updateCurrentHeight()
×
201
}
202

203
// ChainBackendName returns the chain backend name used in the test.
204
func (h *HarnessTest) ChainBackendName() string {
×
205
        return h.manager.chainBackend.Name()
×
206
}
×
207

208
// Context returns the run context used in this test. Usaually it should be
209
// managed by the test itself otherwise undefined behaviors will occur. It can
210
// be used, however, when a test needs to have its own context being managed
211
// differently. In that case, instead of using a background context, the run
212
// context should be used such that the test context scope can be fully
213
// controlled.
214
func (h *HarnessTest) Context() context.Context {
×
215
        return h.runCtx
×
216
}
×
217

218
// setupWatchOnlyNode initializes a node with the watch-only accounts of an
219
// associated remote signing instance.
220
func (h *HarnessTest) setupWatchOnlyNode(name string,
221
        signerNode *node.HarnessNode, password []byte) *node.HarnessNode {
×
222

×
223
        // Prepare arguments for watch-only node connected to the remote signer.
×
224
        remoteSignerArgs := []string{
×
225
                "--remotesigner.enable",
×
226
                fmt.Sprintf("--remotesigner.rpchost=localhost:%d",
×
227
                        signerNode.Cfg.RPCPort),
×
228
                fmt.Sprintf("--remotesigner.tlscertpath=%s",
×
229
                        signerNode.Cfg.TLSCertPath),
×
230
                fmt.Sprintf("--remotesigner.macaroonpath=%s",
×
231
                        signerNode.Cfg.AdminMacPath),
×
232
        }
×
233

×
234
        // Fetch watch-only accounts from the signer node.
×
235
        resp := signerNode.RPC.ListAccounts(&walletrpc.ListAccountsRequest{})
×
236
        watchOnlyAccounts, err := walletrpc.AccountsToWatchOnly(resp.Accounts)
×
237
        require.NoErrorf(h, err, "unable to find watch only accounts for %s",
×
238
                name)
×
239

×
240
        // Create a new watch-only node with remote signer configuration.
×
241
        return h.NewNodeRemoteSigner(
×
242
                name, remoteSignerArgs, password,
×
243
                &lnrpc.WatchOnly{
×
244
                        MasterKeyBirthdayTimestamp: 0,
×
245
                        MasterKeyFingerprint:       nil,
×
246
                        Accounts:                   watchOnlyAccounts,
×
247
                },
×
248
        )
×
249
}
×
250

251
// createAndSendOutput send amt satoshis from the internal mining node to the
252
// targeted lightning node using a P2WKH address. No blocks are mined so
253
// transactions will sit unconfirmed in mempool.
254
func (h *HarnessTest) createAndSendOutput(target *node.HarnessNode,
255
        amt btcutil.Amount, addrType lnrpc.AddressType) {
×
256

×
257
        req := &lnrpc.NewAddressRequest{Type: addrType}
×
258
        resp := target.RPC.NewAddress(req)
×
259
        addr := h.DecodeAddress(resp.Address)
×
260
        addrScript := h.PayToAddrScript(addr)
×
261

×
262
        output := &wire.TxOut{
×
263
                PkScript: addrScript,
×
264
                Value:    int64(amt),
×
265
        }
×
266
        h.miner.SendOutput(output, defaultMinerFeeRate)
×
267
}
×
268

269
// Stop stops the test harness.
270
func (h *HarnessTest) Stop() {
×
271
        // Do nothing if it's not started.
×
272
        if h.runCtx == nil {
×
273
                h.Log("HarnessTest is not started")
×
274
                return
×
275
        }
×
276

277
        h.shutdownAllNodes()
×
278

×
279
        close(h.lndErrorChan)
×
280

×
281
        // Stop the fee service.
×
282
        err := h.feeService.Stop()
×
283
        require.NoError(h, err, "failed to stop fee service")
×
284

×
285
        // Stop the chainBackend.
×
286
        h.stopChainBackend()
×
287

×
288
        // Stop the miner.
×
289
        h.miner.Stop()
×
290
}
291

292
// RunTestCase executes a harness test case. Any errors or panics will be
293
// represented as fatal.
294
func (h *HarnessTest) RunTestCase(testCase *TestCase) {
×
295
        defer func() {
×
296
                if err := recover(); err != nil {
×
297
                        description := errors.Wrap(err, 2).ErrorStack()
×
298
                        h.Fatalf("Failed: (%v) panic with: \n%v",
×
299
                                testCase.Name, description)
×
300
                }
×
301
        }()
302

303
        testCase.TestFunc(h)
×
304
}
305

306
// Subtest creates a child HarnessTest, which inherits the harness net and
307
// stand by nodes created by the parent test. It will return a cleanup function
308
// which resets  all the standby nodes' configs back to its original state and
309
// create snapshots of each nodes' internal state.
310
func (h *HarnessTest) Subtest(t *testing.T) *HarnessTest {
×
311
        t.Helper()
×
312

×
313
        st := &HarnessTest{
×
314
                T:            t,
×
315
                manager:      h.manager,
×
316
                miner:        h.miner,
×
317
                feeService:   h.feeService,
×
318
                lndErrorChan: make(chan error, lndErrorChanSize),
×
319
        }
×
320

×
321
        // Inherit context from the main test.
×
322
        st.runCtx, st.cancel = context.WithCancel(h.runCtx)
×
323

×
324
        // Inherit the subtest for the miner.
×
325
        st.miner.T = st.T
×
326

×
327
        // Reset fee estimator.
×
328
        st.feeService.Reset()
×
329

×
330
        // Record block height.
×
331
        h.updateCurrentHeight()
×
332
        startHeight := int32(h.CurrentHeight())
×
333

×
334
        st.Cleanup(func() {
×
335
                // Make sure the test is not consuming too many blocks.
×
336
                st.checkAndLimitBlocksMined(startHeight)
×
337

×
338
                // Don't bother run the cleanups if the test is failed.
×
339
                if st.Failed() {
×
340
                        st.Log("test failed, skipped cleanup")
×
341
                        st.shutdownNodesNoAssert()
×
342
                        return
×
343
                }
×
344

345
                // Don't run cleanup if it's already done. This can happen if
346
                // we have multiple level inheritance of the parent harness
347
                // test. For instance, a `Subtest(st)`.
348
                if st.cleaned {
×
349
                        st.Log("test already cleaned, skipped cleanup")
×
350
                        return
×
351
                }
×
352

353
                // If found running nodes, shut them down.
354
                st.shutdownAllNodes()
×
355

×
356
                // We require the mempool to be cleaned from the test.
×
357
                require.Empty(st, st.miner.GetRawMempool(), "mempool not "+
×
358
                        "cleaned, please mine blocks to clean them all.")
×
359

×
360
                // Finally, cancel the run context. We have to do it here
×
361
                // because we need to keep the context alive for the above
×
362
                // assertions used in cleanup.
×
363
                st.cancel()
×
364

×
365
                // We now want to mark the parent harness as cleaned to avoid
×
366
                // running cleanup again since its internal state has been
×
367
                // cleaned up by its child harness tests.
×
368
                h.cleaned = true
×
369
        })
370

371
        return st
×
372
}
373

374
// checkAndLimitBlocksMined asserts that the blocks mined in a single test
375
// doesn't exceed 50, which implicitly discourage table-drive tests, which are
376
// hard to maintain and take a long time to run.
377
func (h *HarnessTest) checkAndLimitBlocksMined(startHeight int32) {
×
378
        _, endHeight := h.GetBestBlock()
×
379
        blocksMined := endHeight - startHeight
×
380

×
381
        h.Logf("finished test: %s, start height=%d, end height=%d, mined "+
×
382
                "blocks=%d", h.manager.currentTestCase, startHeight, endHeight,
×
383
                blocksMined)
×
384

×
385
        // If the number of blocks is less than 40, we consider the test
×
386
        // healthy.
×
387
        if blocksMined < 40 {
×
388
                return
×
389
        }
×
390

391
        // Otherwise log a warning if it's mining more than 40 blocks.
392
        desc := "!============================================!\n"
×
393

×
394
        desc += fmt.Sprintf("Too many blocks (%v) mined in one test! Tips:\n",
×
395
                blocksMined)
×
396

×
397
        desc += "1. break test into smaller individual tests, especially if " +
×
398
                "this is a table-drive test.\n" +
×
399
                "2. use smaller CSV via `--bitcoin.defaultremotedelay=1.`\n" +
×
400
                "3. use smaller CLTV via `--bitcoin.timelockdelta=18.`\n" +
×
401
                "4. remove unnecessary CloseChannel when test ends.\n" +
×
402
                "5. use `CreateSimpleNetwork` for efficient channel creation.\n"
×
403
        h.Log(desc)
×
404

×
405
        // We enforce that the test should not mine more than
×
406
        // MaxBlocksMinedPerTest (50 by default) blocks, which is more than
×
407
        // enough to test a multi hop force close scenario.
×
408
        require.LessOrEqualf(
×
409
                h, int(blocksMined), MaxBlocksMinedPerTest,
×
410
                "cannot mine more than %d blocks in one test",
×
411
                MaxBlocksMinedPerTest,
×
412
        )
×
413
}
414

415
// shutdownNodesNoAssert will shutdown all running nodes without assertions.
416
// This is used when the test has already failed, we don't want to log more
417
// errors but focusing on the original error.
418
func (h *HarnessTest) shutdownNodesNoAssert() {
×
419
        for _, node := range h.manager.activeNodes {
×
420
                _ = h.manager.shutdownNode(node)
×
421
        }
×
422
}
423

424
// shutdownAllNodes will shutdown all running nodes.
425
func (h *HarnessTest) shutdownAllNodes() {
×
426
        var err error
×
427
        for _, node := range h.manager.activeNodes {
×
428
                err = h.manager.shutdownNode(node)
×
429
                if err == nil {
×
430
                        continue
×
431
                }
432

433
                // Instead of returning the error, we will log it instead. This
434
                // is needed so other nodes can continue their shutdown
435
                // processes.
436
                h.Logf("unable to shutdown %s, got err: %v", node.Name(), err)
×
437
        }
438

439
        require.NoError(h, err, "failed to shutdown all nodes")
×
440
}
441

442
// cleanupStandbyNode is a function should be called with defer whenever a
443
// subtest is created. It will reset the standby nodes configs, snapshot the
444
// states, and validate the node has a clean state.
445
func (h *HarnessTest) cleanupStandbyNode(hn *node.HarnessNode) {
×
446
        // Remove connections made from this test.
×
447
        h.removeConnectionns(hn)
×
448

×
449
        // Delete all payments made from this test.
×
450
        hn.RPC.DeleteAllPayments()
×
451

×
452
        // Check the node's current state with timeout.
×
453
        //
×
454
        // NOTE: we need to do this in a `wait` because it takes some time for
×
455
        // the node to update its internal state. Once the RPCs are synced we
×
456
        // can then remove this wait.
×
457
        err := wait.NoError(func() error {
×
458
                // Update the node's internal state.
×
459
                hn.UpdateState()
×
460

×
461
                // Check the node is in a clean state for the following tests.
×
462
                return h.validateNodeState(hn)
×
463
        }, wait.DefaultTimeout)
×
464
        require.NoError(h, err, "timeout checking node's state")
×
465
}
466

467
// removeConnectionns will remove all connections made on the standby nodes
468
// expect the connections between Alice and Bob.
469
func (h *HarnessTest) removeConnectionns(hn *node.HarnessNode) {
×
470
        resp := hn.RPC.ListPeers()
×
471
        for _, peer := range resp.Peers {
×
472
                hn.RPC.DisconnectPeer(peer.PubKey)
×
473
        }
×
474
}
475

476
// SetTestName set the test case name.
477
func (h *HarnessTest) SetTestName(name string) {
×
478
        cleanTestCaseName := strings.ReplaceAll(name, " ", "_")
×
479
        h.manager.currentTestCase = cleanTestCaseName
×
480
}
×
481

482
// newNodeOpts contains options for creating a new node.
483
type newNodeOpts struct {
484
        // withCoins indicates whether the node should be funded with
485
        // coins after creation.
486
        withCoins bool
487

488
        // seedEntropy contains the entropy bytes from which the node’s seed is
489
        // deterministically derived. When non-nil, the node is initialised with
490
        // a seed produced from this entropy instead of a random seed.
491
        seedEntropy []byte
492

493
        // walletPassword is an optional user provided passphrase that will be
494
        // used to encrypt the generated aezeed cipher seed. When using REST,
495
        // this field must be encoded as base64.
496
        walletPassword []byte
497
}
498

499
// NewNodeOpt is a functional option for NewNode.
500
type NewNodeOpt func(*newNodeOpts)
501

502
// WithCoins is a functional option that funds the node with coins after
503
// creation.
NEW
504
func WithCoins() NewNodeOpt {
×
NEW
505
        return func(opts *newNodeOpts) {
×
NEW
506
                opts.withCoins = true
×
NEW
507
        }
×
508
}
509

510
// WithSeedEntropy returns a functional option that injects explicit entropy
511
// into the node, enabling deterministic control over mnemonic seed derivation.
NEW
512
func WithSeedEntropy(seedEntropy []byte, walletPassword []byte) NewNodeOpt {
×
NEW
513
        return func(opts *newNodeOpts) {
×
NEW
514
                opts.seedEntropy = seedEntropy
×
NEW
515
                opts.walletPassword = walletPassword
×
NEW
516
        }
×
517
}
518

519
// NewNode creates a new node and asserts its creation. The node is guaranteed
520
// to have finished its initialization and all its subservers are started.
521
func (h *HarnessTest) NewNode(name string, extraArgs []string,
NEW
522
        nodeOpts ...NewNodeOpt) *node.HarnessNode {
×
NEW
523

×
NEW
524
        // Apply functional options.
×
NEW
525
        opts := &newNodeOpts{}
×
NEW
526
        for _, opt := range nodeOpts {
×
NEW
527
                opt(opts)
×
NEW
528
        }
×
529

NEW
530
        var hNode *node.HarnessNode
×
NEW
531

×
NEW
532
        // If a seed is provided, use the seed-based creation flow.
×
NEW
533
        if len(opts.seedEntropy) > 0 {
×
NEW
534
                hNode = h.createNodeWithSeed(name, extraArgs, opts)
×
NEW
535
        } else {
×
NEW
536
                // Otherwise, use the regular node creation flow.
×
NEW
537
                hNode = h.createRegularNode(name, extraArgs)
×
NEW
538
        }
×
539

540
        // If withCoins option is set, fund the node with coins.
NEW
541
        if opts.withCoins {
×
NEW
542
                h.fundNodeWithCoins(hNode)
×
NEW
543
        }
×
544

NEW
545
        return hNode
×
546
}
547

548
// createRegularNode creates a node using the regular flow (no seed).
549
func (h *HarnessTest) createRegularNode(name string,
550
        extraArgs []string) *node.HarnessNode {
×
551

×
NEW
552
        hNode, err := h.manager.newNode(h.T, name, extraArgs, nil, false)
×
553
        require.NoErrorf(h, err, "unable to create new node for %s", name)
×
554

×
555
        // Start the node.
×
NEW
556
        err = hNode.Start(h.runCtx)
×
NEW
557
        require.NoError(h, err, "failed to start node %s", hNode.Name())
×
558

×
559
        // Get the miner's best block hash.
×
560
        bestBlock, err := h.miner.Client.GetBestBlockHash()
×
561
        require.NoError(h, err, "unable to get best block hash")
×
562

×
563
        // Wait until the node's chain backend is synced to the miner's best
×
564
        // block.
×
NEW
565
        h.WaitForBlockchainSyncTo(hNode, *bestBlock)
×
566

×
NEW
567
        return hNode
×
568
}
×
569

570
// createNodeWithSeed creates a node using the seed-based flow.
571
func (h *HarnessTest) createNodeWithSeed(name string, extraArgs []string,
NEW
572
        opts *newNodeOpts) *node.HarnessNode {
×
573

×
NEW
574
        hNode, err := h.manager.newNode(
×
NEW
575
                h.T, name, extraArgs, opts.seedEntropy, true,
×
NEW
576
        )
×
NEW
577
        require.NoErrorf(h, err, "unable to create new node for %s", name)
×
NEW
578

×
NEW
579
        // Start the node with seed only, which will only create the `State`
×
NEW
580
        // and `WalletUnlocker` clients.
×
NEW
581
        err = hNode.StartWithNoAuth(h.runCtx)
×
NEW
582
        require.NoErrorf(h, err, "failed to start node %s", hNode.Name())
×
NEW
583

×
NEW
584
        // Generate a new seed using the provided seed entropy, if any.
×
NEW
585
        genSeedResp := hNode.RPC.GenSeed(&lnrpc.GenSeedRequest{
×
NEW
586
                SeedEntropy: opts.seedEntropy,
×
NEW
587
        })
×
NEW
588

×
NEW
589
        // Create the wallet using the provided seed.
×
NEW
590
        initReq := &lnrpc.InitWalletRequest{
×
NEW
591
                CipherSeedMnemonic: genSeedResp.CipherSeedMnemonic,
×
NEW
592
                WalletPassword:     opts.walletPassword,
×
NEW
593
        }
×
NEW
594

×
NEW
595
        // Pass the init request via rpc to finish unlocking the node.
×
NEW
596
        _, err = h.manager.initWalletAndNode(hNode, initReq)
×
NEW
597
        require.NoErrorf(h, err, "failed to unlock and init node %s",
×
NEW
598
                hNode.Name())
×
NEW
599

×
NEW
600
        // Get the miner's best block hash.
×
NEW
601
        bestBlock, err := h.miner.Client.GetBestBlockHash()
×
NEW
602
        require.NoError(h, err, "unable to get best block hash")
×
NEW
603

×
NEW
604
        // Wait until the node's chain backend is synced to the miner's best
×
NEW
605
        // block.
×
NEW
606
        h.WaitForBlockchainSyncTo(hNode, *bestBlock)
×
NEW
607

×
NEW
608
        return hNode
×
NEW
609
}
×
610

611
// fundNodeWithCoins funds a node with coins, similar to NewNodeWithCoins.
NEW
612
func (h *HarnessTest) fundNodeWithCoins(node *node.HarnessNode) {
×
613
        // Load up the wallets of the node with 5 outputs of 1 BTC each.
×
614
        const (
×
615
                numOutputs  = 5
×
616
                fundAmount  = 1 * btcutil.SatoshiPerBitcoin
×
617
                totalAmount = fundAmount * numOutputs
×
618
        )
×
619

×
620
        for i := 0; i < numOutputs; i++ {
×
621
                h.createAndSendOutput(
×
622
                        node, fundAmount,
×
623
                        lnrpc.AddressType_WITNESS_PUBKEY_HASH,
×
624
                )
×
625
        }
×
626

627
        // Mine a block to confirm the transactions.
628
        h.MineBlocksAndAssertNumTxes(1, numOutputs)
×
629

×
630
        // Now block until the wallet have fully synced up.
×
631
        h.WaitForBalanceConfirmed(node, totalAmount)
×
632
}
633

634
// NewNodeWithCoins creates a new node and asserts its creation. The node is
635
// guaranteed to have finished its initialization and all its subservers are
636
// started. In addition, 5 UTXO of 1 BTC each are sent to the node.
637
func (h *HarnessTest) NewNodeWithCoins(name string,
NEW
638
        extraArgs []string) *node.HarnessNode {
×
NEW
639

×
NEW
640
        return h.NewNode(name, extraArgs, WithCoins())
×
641
}
×
642

643
// Shutdown shuts down the given node and asserts that no errors occur.
644
func (h *HarnessTest) Shutdown(node *node.HarnessNode) {
×
645
        err := h.manager.shutdownNode(node)
×
646
        require.NoErrorf(h, err, "unable to shutdown %v in %v", node.Name(),
×
647
                h.manager.currentTestCase)
×
648
}
×
649

650
// SuspendNode stops the given node and returns a callback that can be used to
651
// start it again.
652
func (h *HarnessTest) SuspendNode(node *node.HarnessNode) func() error {
×
653
        err := node.Stop()
×
654
        require.NoErrorf(h, err, "failed to stop %s", node.Name())
×
655

×
656
        // Remove the node from active nodes.
×
657
        delete(h.manager.activeNodes, node.Cfg.NodeID)
×
658

×
659
        return func() error {
×
660
                h.manager.registerNode(node)
×
661

×
662
                if err := node.Start(h.runCtx); err != nil {
×
663
                        return err
×
664
                }
×
665
                h.WaitForBlockchainSync(node)
×
666

×
667
                return nil
×
668
        }
669
}
670

671
// RestartNode restarts a given node, unlocks it and asserts it's successfully
672
// started.
673
func (h *HarnessTest) RestartNode(hn *node.HarnessNode) {
×
674
        err := h.manager.restartNode(h.runCtx, hn, nil)
×
675
        require.NoErrorf(h, err, "failed to restart node %s", hn.Name())
×
676

×
677
        err = h.manager.unlockNode(hn)
×
678
        require.NoErrorf(h, err, "failed to unlock node %s", hn.Name())
×
679

×
680
        if !hn.Cfg.SkipUnlock {
×
681
                // Give the node some time to catch up with the chain before we
×
682
                // continue with the tests.
×
683
                h.WaitForBlockchainSync(hn)
×
684
        }
×
685
}
686

687
// RestartNodeNoUnlock restarts a given node without unlocking its wallet.
688
func (h *HarnessTest) RestartNodeNoUnlock(hn *node.HarnessNode) {
×
689
        err := h.manager.restartNode(h.runCtx, hn, nil)
×
690
        require.NoErrorf(h, err, "failed to restart node %s", hn.Name())
×
691
}
×
692

693
// RestartNodeWithChanBackups restarts a given node with the specified channel
694
// backups.
695
func (h *HarnessTest) RestartNodeWithChanBackups(hn *node.HarnessNode,
696
        chanBackups ...*lnrpc.ChanBackupSnapshot) {
×
697

×
698
        err := h.manager.restartNode(h.runCtx, hn, nil)
×
699
        require.NoErrorf(h, err, "failed to restart node %s", hn.Name())
×
700

×
701
        err = h.manager.unlockNode(hn, chanBackups...)
×
702
        require.NoErrorf(h, err, "failed to unlock node %s", hn.Name())
×
703

×
704
        // Give the node some time to catch up with the chain before we
×
705
        // continue with the tests.
×
706
        h.WaitForBlockchainSync(hn)
×
707
}
×
708

709
// RestartNodeWithExtraArgs updates the node's config and restarts it.
710
func (h *HarnessTest) RestartNodeWithExtraArgs(hn *node.HarnessNode,
711
        extraArgs []string) {
×
712

×
713
        hn.SetExtraArgs(extraArgs)
×
714
        h.RestartNode(hn)
×
715
}
×
716

717
// NewNodeWithSeed fully initializes a new HarnessNode after creating a fresh
718
// aezeed. The provided password is used as both the aezeed password and the
719
// wallet password. The generated mnemonic is returned along with the
720
// initialized harness node.
721
func (h *HarnessTest) NewNodeWithSeed(name string,
722
        extraArgs []string, password []byte,
723
        statelessInit bool) (*node.HarnessNode, []string, []byte) {
×
724

×
725
        // Create a request to generate a new aezeed. The new seed will have
×
726
        // the same password as the internal wallet.
×
727
        req := &lnrpc.GenSeedRequest{
×
728
                AezeedPassphrase: password,
×
729
                SeedEntropy:      nil,
×
730
        }
×
731

×
732
        return h.newNodeWithSeed(name, extraArgs, req, statelessInit)
×
733
}
×
734

735
// newNodeWithSeed creates and initializes a new HarnessNode such that it'll be
736
// ready to accept RPC calls. A `GenSeedRequest` is needed to generate the
737
// seed.
738
func (h *HarnessTest) newNodeWithSeed(name string,
739
        extraArgs []string, req *lnrpc.GenSeedRequest,
740
        statelessInit bool) (*node.HarnessNode, []string, []byte) {
×
741

×
742
        node, err := h.manager.newNode(
×
743
                h.T, name, extraArgs, req.AezeedPassphrase, true,
×
744
        )
×
745
        require.NoErrorf(h, err, "unable to create new node for %s", name)
×
746

×
747
        // Start the node with seed only, which will only create the `State`
×
748
        // and `WalletUnlocker` clients.
×
749
        err = node.StartWithNoAuth(h.runCtx)
×
750
        require.NoErrorf(h, err, "failed to start node %s", node.Name())
×
751

×
752
        // Generate a new seed.
×
753
        genSeedResp := node.RPC.GenSeed(req)
×
754

×
755
        // With the seed created, construct the init request to the node,
×
756
        // including the newly generated seed.
×
757
        initReq := &lnrpc.InitWalletRequest{
×
758
                WalletPassword:     req.AezeedPassphrase,
×
759
                CipherSeedMnemonic: genSeedResp.CipherSeedMnemonic,
×
760
                AezeedPassphrase:   req.AezeedPassphrase,
×
761
                StatelessInit:      statelessInit,
×
762
        }
×
763

×
764
        // Pass the init request via rpc to finish unlocking the node. This
×
765
        // will also initialize the macaroon-authenticated LightningClient.
×
766
        adminMac, err := h.manager.initWalletAndNode(node, initReq)
×
767
        require.NoErrorf(h, err, "failed to unlock and init node %s",
×
768
                node.Name())
×
769

×
770
        // In stateless initialization mode we get a macaroon back that we have
×
771
        // to return to the test, otherwise gRPC calls won't be possible since
×
772
        // there are no macaroon files created in that mode.
×
773
        // In stateful init the admin macaroon will just be nil.
×
774
        return node, genSeedResp.CipherSeedMnemonic, adminMac
×
775
}
×
776

777
// RestoreNodeWithSeed fully initializes a HarnessNode using a chosen mnemonic,
778
// password, recovery window, and optionally a set of static channel backups.
779
// After providing the initialization request to unlock the node, this method
780
// will finish initializing the LightningClient such that the HarnessNode can
781
// be used for regular rpc operations.
782
func (h *HarnessTest) RestoreNodeWithSeed(name string, extraArgs []string,
783
        password []byte, mnemonic []string, rootKey string,
784
        recoveryWindow int32,
785
        chanBackups *lnrpc.ChanBackupSnapshot) *node.HarnessNode {
×
786

×
787
        n, err := h.manager.newNode(h.T, name, extraArgs, password, true)
×
788
        require.NoErrorf(h, err, "unable to create new node for %s", name)
×
789

×
790
        // Start the node with seed only, which will only create the `State`
×
791
        // and `WalletUnlocker` clients.
×
792
        err = n.StartWithNoAuth(h.runCtx)
×
793
        require.NoErrorf(h, err, "failed to start node %s", n.Name())
×
794

×
795
        // Create the wallet.
×
796
        initReq := &lnrpc.InitWalletRequest{
×
797
                WalletPassword:     password,
×
798
                CipherSeedMnemonic: mnemonic,
×
799
                AezeedPassphrase:   password,
×
800
                ExtendedMasterKey:  rootKey,
×
801
                RecoveryWindow:     recoveryWindow,
×
802
                ChannelBackups:     chanBackups,
×
803
        }
×
804
        _, err = h.manager.initWalletAndNode(n, initReq)
×
805
        require.NoErrorf(h, err, "failed to unlock and init node %s",
×
806
                n.Name())
×
807

×
808
        return n
×
809
}
×
810

811
// NewNodeEtcd starts a new node with seed that'll use an external etcd
812
// database as its storage. The passed cluster flag indicates that we'd like
813
// the node to join the cluster leader election. We won't wait until RPC is
814
// available (this is useful when the node is not expected to become the leader
815
// right away).
816
func (h *HarnessTest) NewNodeEtcd(name string, etcdCfg *etcd.Config,
817
        password []byte, cluster bool,
818
        leaderSessionTTL int) *node.HarnessNode {
×
819

×
820
        // We don't want to use the embedded etcd instance.
×
821
        h.manager.dbBackend = node.BackendBbolt
×
822

×
823
        extraArgs := node.ExtraArgsEtcd(
×
824
                etcdCfg, name, cluster, leaderSessionTTL,
×
825
        )
×
826
        node, err := h.manager.newNode(h.T, name, extraArgs, password, true)
×
827
        require.NoError(h, err, "failed to create new node with etcd")
×
828

×
829
        // Start the node daemon only.
×
830
        err = node.StartLndCmd(h.runCtx)
×
831
        require.NoError(h, err, "failed to start node %s", node.Name())
×
832

×
833
        return node
×
834
}
×
835

836
// NewNodeWithSeedEtcd starts a new node with seed that'll use an external etcd
837
// database as its storage. The passed cluster flag indicates that we'd like
838
// the node to join the cluster leader election.
839
func (h *HarnessTest) NewNodeWithSeedEtcd(name string, etcdCfg *etcd.Config,
840
        password []byte, statelessInit, cluster bool,
841
        leaderSessionTTL int) (*node.HarnessNode, []string, []byte) {
×
842

×
843
        // We don't want to use the embedded etcd instance.
×
844
        h.manager.dbBackend = node.BackendBbolt
×
845

×
846
        // Create a request to generate a new aezeed. The new seed will have
×
847
        // the same password as the internal wallet.
×
848
        req := &lnrpc.GenSeedRequest{
×
849
                AezeedPassphrase: password,
×
850
                SeedEntropy:      nil,
×
851
        }
×
852

×
853
        extraArgs := node.ExtraArgsEtcd(
×
854
                etcdCfg, name, cluster, leaderSessionTTL,
×
855
        )
×
856

×
857
        return h.newNodeWithSeed(name, extraArgs, req, statelessInit)
×
858
}
×
859

860
// NewNodeRemoteSigner creates a new remote signer node and asserts its
861
// creation.
862
func (h *HarnessTest) NewNodeRemoteSigner(name string, extraArgs []string,
863
        password []byte, watchOnly *lnrpc.WatchOnly) *node.HarnessNode {
×
864

×
865
        hn, err := h.manager.newNode(h.T, name, extraArgs, password, true)
×
866
        require.NoErrorf(h, err, "unable to create new node for %s", name)
×
867

×
868
        err = hn.StartWithNoAuth(h.runCtx)
×
869
        require.NoError(h, err, "failed to start node %s", name)
×
870

×
871
        // With the seed created, construct the init request to the node,
×
872
        // including the newly generated seed.
×
873
        initReq := &lnrpc.InitWalletRequest{
×
874
                WalletPassword: password,
×
875
                WatchOnly:      watchOnly,
×
876
        }
×
877

×
878
        // Pass the init request via rpc to finish unlocking the node. This
×
879
        // will also initialize the macaroon-authenticated LightningClient.
×
880
        _, err = h.manager.initWalletAndNode(hn, initReq)
×
881
        require.NoErrorf(h, err, "failed to init node %s", name)
×
882

×
883
        return hn
×
884
}
×
885

886
// KillNode kills the node and waits for the node process to stop.
887
func (h *HarnessTest) KillNode(hn *node.HarnessNode) {
×
888
        delete(h.manager.activeNodes, hn.Cfg.NodeID)
×
889

×
890
        h.Logf("Manually killing the node %s", hn.Name())
×
891
        require.NoErrorf(h, hn.KillAndWait(), "%s: kill got error", hn.Name())
×
892
}
×
893

894
// SetFeeEstimate sets a fee rate to be returned from fee estimator.
895
//
896
// NOTE: this method will set the fee rate for a conf target of 1, which is the
897
// fallback fee rate for a `WebAPIEstimator` if a higher conf target's fee rate
898
// is not set. This means if the fee rate for conf target 6 is set, the fee
899
// estimator will use that value instead.
900
func (h *HarnessTest) SetFeeEstimate(fee chainfee.SatPerKWeight) {
×
901
        h.feeService.SetFeeRate(fee, 1)
×
902
}
×
903

904
// SetFeeEstimateWithConf sets a fee rate of a specified conf target to be
905
// returned from fee estimator.
906
func (h *HarnessTest) SetFeeEstimateWithConf(
907
        fee chainfee.SatPerKWeight, conf uint32) {
×
908

×
909
        h.feeService.SetFeeRate(fee, conf)
×
910
}
×
911

912
// SetMinRelayFeerate sets a min relay fee rate to be returned from fee
913
// estimator.
914
func (h *HarnessTest) SetMinRelayFeerate(fee chainfee.SatPerKVByte) {
×
915
        h.feeService.SetMinRelayFeerate(fee)
×
916
}
×
917

918
// validateNodeState checks that the node doesn't have any uncleaned states
919
// which will affect its following tests.
920
func (h *HarnessTest) validateNodeState(hn *node.HarnessNode) error {
×
921
        errStr := func(subject string) error {
×
922
                return fmt.Errorf("%s: found %s channels, please close "+
×
923
                        "them properly", hn.Name(), subject)
×
924
        }
×
925
        // If the node still has open channels, it's most likely that the
926
        // current test didn't close it properly.
927
        if hn.State.OpenChannel.Active != 0 {
×
928
                return errStr("active")
×
929
        }
×
930
        if hn.State.OpenChannel.Public != 0 {
×
931
                return errStr("public")
×
932
        }
×
933
        if hn.State.OpenChannel.Private != 0 {
×
934
                return errStr("private")
×
935
        }
×
936
        if hn.State.OpenChannel.Pending != 0 {
×
937
                return errStr("pending open")
×
938
        }
×
939

940
        // The number of pending force close channels should be zero.
941
        if hn.State.CloseChannel.PendingForceClose != 0 {
×
942
                return errStr("pending force")
×
943
        }
×
944

945
        // The number of waiting close channels should be zero.
946
        if hn.State.CloseChannel.WaitingClose != 0 {
×
947
                return errStr("waiting close")
×
948
        }
×
949

950
        // Ths number of payments should be zero.
951
        if hn.State.Payment.Total != 0 {
×
952
                return fmt.Errorf("%s: found uncleaned payments, please "+
×
953
                        "delete all of them properly", hn.Name())
×
954
        }
×
955

956
        // The number of public edges should be zero.
957
        if hn.State.Edge.Public != 0 {
×
958
                return fmt.Errorf("%s: found active public egdes, please "+
×
959
                        "clean them properly", hn.Name())
×
960
        }
×
961

962
        // The number of edges should be zero.
963
        if hn.State.Edge.Total != 0 {
×
964
                return fmt.Errorf("%s: found active edges, please "+
×
965
                        "clean them properly", hn.Name())
×
966
        }
×
967

968
        return nil
×
969
}
970

971
// GetChanPointFundingTxid takes a channel point and converts it into a chain
972
// hash.
973
func (h *HarnessTest) GetChanPointFundingTxid(
974
        cp *lnrpc.ChannelPoint) chainhash.Hash {
×
975

×
976
        txid, err := lnrpc.GetChanPointFundingTxid(cp)
×
977
        require.NoError(h, err, "unable to get txid")
×
978

×
979
        return *txid
×
980
}
×
981

982
// OutPointFromChannelPoint creates an outpoint from a given channel point.
983
func (h *HarnessTest) OutPointFromChannelPoint(
984
        cp *lnrpc.ChannelPoint) wire.OutPoint {
×
985

×
986
        txid := h.GetChanPointFundingTxid(cp)
×
987
        return wire.OutPoint{
×
988
                Hash:  txid,
×
989
                Index: cp.OutputIndex,
×
990
        }
×
991
}
×
992

993
// OpenChannelParams houses the params to specify when opening a new channel.
994
type OpenChannelParams struct {
995
        // Amt is the local amount being put into the channel.
996
        Amt btcutil.Amount
997

998
        // PushAmt is the amount that should be pushed to the remote when the
999
        // channel is opened.
1000
        PushAmt btcutil.Amount
1001

1002
        // Private is a boolan indicating whether the opened channel should be
1003
        // private.
1004
        Private bool
1005

1006
        // SpendUnconfirmed is a boolean indicating whether we can utilize
1007
        // unconfirmed outputs to fund the channel.
1008
        SpendUnconfirmed bool
1009

1010
        // MinHtlc is the htlc_minimum_msat value set when opening the channel.
1011
        MinHtlc lnwire.MilliSatoshi
1012

1013
        // RemoteMaxHtlcs is the remote_max_htlcs value set when opening the
1014
        // channel, restricting the number of concurrent HTLCs the remote party
1015
        // can add to a commitment.
1016
        RemoteMaxHtlcs uint16
1017

1018
        // FundingShim is an optional funding shim that the caller can specify
1019
        // in order to modify the channel funding workflow.
1020
        FundingShim *lnrpc.FundingShim
1021

1022
        // SatPerVByte is the amount of satoshis to spend in chain fees per
1023
        // virtual byte of the transaction.
1024
        SatPerVByte btcutil.Amount
1025

1026
        // ConfTarget is the number of blocks that the funding transaction
1027
        // should be confirmed in.
1028
        ConfTarget fn.Option[int32]
1029

1030
        // CommitmentType is the commitment type that should be used for the
1031
        // channel to be opened.
1032
        CommitmentType lnrpc.CommitmentType
1033

1034
        // ZeroConf is used to determine if the channel will be a zero-conf
1035
        // channel. This only works if the explicit negotiation is used with
1036
        // anchors or script enforced leases.
1037
        ZeroConf bool
1038

1039
        // ScidAlias denotes whether the channel will be an option-scid-alias
1040
        // channel type negotiation.
1041
        ScidAlias bool
1042

1043
        // BaseFee is the channel base fee applied during the channel
1044
        // announcement phase.
1045
        BaseFee uint64
1046

1047
        // FeeRate is the channel fee rate in ppm applied during the channel
1048
        // announcement phase.
1049
        FeeRate uint64
1050

1051
        // UseBaseFee, if set, instructs the downstream logic to apply the
1052
        // user-specified channel base fee to the channel update announcement.
1053
        // If set to false it avoids applying a base fee of 0 and instead
1054
        // activates the default configured base fee.
1055
        UseBaseFee bool
1056

1057
        // UseFeeRate, if set, instructs the downstream logic to apply the
1058
        // user-specified channel fee rate to the channel update announcement.
1059
        // If set to false it avoids applying a fee rate of 0 and instead
1060
        // activates the default configured fee rate.
1061
        UseFeeRate bool
1062

1063
        // FundMax is a boolean indicating whether the channel should be funded
1064
        // with the maximum possible amount from the wallet.
1065
        FundMax bool
1066

1067
        // An optional note-to-self containing some useful information about the
1068
        // channel. This is stored locally only, and is purely for reference. It
1069
        // has no bearing on the channel's operation. Max allowed length is 500
1070
        // characters.
1071
        Memo string
1072

1073
        // Outpoints is a list of client-selected outpoints that should be used
1074
        // for funding a channel. If Amt is specified then this amount is
1075
        // allocated from the sum of outpoints towards funding. If the
1076
        // FundMax flag is specified the entirety of selected funds is
1077
        // allocated towards channel funding.
1078
        Outpoints []*lnrpc.OutPoint
1079

1080
        // CloseAddress sets the upfront_shutdown_script parameter during
1081
        // channel open. It is expected to be encoded as a bitcoin address.
1082
        CloseAddress string
1083
}
1084

1085
// prepareOpenChannel waits for both nodes to be synced to chain and returns an
1086
// OpenChannelRequest.
1087
func (h *HarnessTest) prepareOpenChannel(srcNode, destNode *node.HarnessNode,
1088
        p OpenChannelParams) *lnrpc.OpenChannelRequest {
×
1089

×
1090
        // Wait until srcNode and destNode have the latest chain synced.
×
1091
        // Otherwise, we may run into a check within the funding manager that
×
1092
        // prevents any funding workflows from being kicked off if the chain
×
1093
        // isn't yet synced.
×
1094
        h.WaitForBlockchainSync(srcNode)
×
1095
        h.WaitForBlockchainSync(destNode)
×
1096

×
1097
        // Specify the minimal confirmations of the UTXOs used for channel
×
1098
        // funding.
×
1099
        minConfs := int32(1)
×
1100
        if p.SpendUnconfirmed {
×
1101
                minConfs = 0
×
1102
        }
×
1103

1104
        // Get the requested conf target. If not set, default to 6.
1105
        confTarget := p.ConfTarget.UnwrapOr(6)
×
1106

×
1107
        // If there's fee rate set, unset the conf target.
×
1108
        if p.SatPerVByte != 0 {
×
1109
                confTarget = 0
×
1110
        }
×
1111

1112
        // Prepare the request.
1113
        return &lnrpc.OpenChannelRequest{
×
1114
                NodePubkey:         destNode.PubKey[:],
×
1115
                LocalFundingAmount: int64(p.Amt),
×
1116
                PushSat:            int64(p.PushAmt),
×
1117
                Private:            p.Private,
×
1118
                TargetConf:         confTarget,
×
1119
                MinConfs:           minConfs,
×
1120
                SpendUnconfirmed:   p.SpendUnconfirmed,
×
1121
                MinHtlcMsat:        int64(p.MinHtlc),
×
1122
                RemoteMaxHtlcs:     uint32(p.RemoteMaxHtlcs),
×
1123
                FundingShim:        p.FundingShim,
×
1124
                SatPerVbyte:        uint64(p.SatPerVByte),
×
1125
                CommitmentType:     p.CommitmentType,
×
1126
                ZeroConf:           p.ZeroConf,
×
1127
                ScidAlias:          p.ScidAlias,
×
1128
                BaseFee:            p.BaseFee,
×
1129
                FeeRate:            p.FeeRate,
×
1130
                UseBaseFee:         p.UseBaseFee,
×
1131
                UseFeeRate:         p.UseFeeRate,
×
1132
                FundMax:            p.FundMax,
×
1133
                Memo:               p.Memo,
×
1134
                Outpoints:          p.Outpoints,
×
1135
                CloseAddress:       p.CloseAddress,
×
1136
        }
×
1137
}
1138

1139
// OpenChannelAssertPending attempts to open a channel between srcNode and
1140
// destNode with the passed channel funding parameters. Once the `OpenChannel`
1141
// is called, it will consume the first event it receives from the open channel
1142
// client and asserts it's a channel pending event.
1143
func (h *HarnessTest) openChannelAssertPending(srcNode,
1144
        destNode *node.HarnessNode,
1145
        p OpenChannelParams) (*lnrpc.PendingUpdate, rpc.OpenChanClient) {
×
1146

×
1147
        // Prepare the request and open the channel.
×
1148
        openReq := h.prepareOpenChannel(srcNode, destNode, p)
×
1149
        respStream := srcNode.RPC.OpenChannel(openReq)
×
1150

×
1151
        // Consume the "channel pending" update. This waits until the node
×
1152
        // notifies us that the final message in the channel funding workflow
×
1153
        // has been sent to the remote node.
×
1154
        resp := h.ReceiveOpenChannelUpdate(respStream)
×
1155

×
1156
        // Check that the update is channel pending.
×
1157
        update, ok := resp.Update.(*lnrpc.OpenStatusUpdate_ChanPending)
×
1158
        require.Truef(h, ok, "expected channel pending: update, instead got %v",
×
1159
                resp)
×
1160

×
1161
        return update.ChanPending, respStream
×
1162
}
×
1163

1164
// OpenChannelAssertPending attempts to open a channel between srcNode and
1165
// destNode with the passed channel funding parameters. Once the `OpenChannel`
1166
// is called, it will consume the first event it receives from the open channel
1167
// client and asserts it's a channel pending event. It returns the
1168
// `PendingUpdate`.
1169
func (h *HarnessTest) OpenChannelAssertPending(srcNode,
1170
        destNode *node.HarnessNode, p OpenChannelParams) *lnrpc.PendingUpdate {
×
1171

×
1172
        resp, _ := h.openChannelAssertPending(srcNode, destNode, p)
×
1173
        return resp
×
1174
}
×
1175

1176
// OpenChannelAssertStream attempts to open a channel between srcNode and
1177
// destNode with the passed channel funding parameters. Once the `OpenChannel`
1178
// is called, it will consume the first event it receives from the open channel
1179
// client and asserts it's a channel pending event. It returns the open channel
1180
// stream.
1181
func (h *HarnessTest) OpenChannelAssertStream(srcNode,
1182
        destNode *node.HarnessNode, p OpenChannelParams) rpc.OpenChanClient {
×
1183

×
1184
        _, stream := h.openChannelAssertPending(srcNode, destNode, p)
×
1185
        return stream
×
1186
}
×
1187

1188
// OpenChannel attempts to open a channel with the specified parameters
1189
// extended from Alice to Bob. Additionally, for public channels, it will mine
1190
// extra blocks so they are announced to the network. In specific, the
1191
// following items are asserted,
1192
//   - for non-zero conf channel, 1 blocks will be mined to confirm the funding
1193
//     tx.
1194
//   - both nodes should see the channel edge update in their network graph.
1195
//   - both nodes can report the status of the new channel from ListChannels.
1196
//   - extra blocks are mined if it's a public channel.
1197
func (h *HarnessTest) OpenChannel(alice, bob *node.HarnessNode,
1198
        p OpenChannelParams) *lnrpc.ChannelPoint {
×
1199

×
1200
        // First, open the channel without announcing it.
×
1201
        cp := h.OpenChannelNoAnnounce(alice, bob, p)
×
1202

×
1203
        // If this is a private channel, there's no need to mine extra blocks
×
1204
        // since it will never be announced to the network.
×
1205
        if p.Private {
×
1206
                return cp
×
1207
        }
×
1208

1209
        // Mine extra blocks to announce the channel.
1210
        if p.ZeroConf {
×
1211
                // For a zero-conf channel, no blocks have been mined so we
×
1212
                // need to mine 6 blocks.
×
1213
                //
×
1214
                // Mine 1 block to confirm the funding transaction.
×
1215
                h.MineBlocksAndAssertNumTxes(numBlocksOpenChannel, 1)
×
1216
        } else {
×
1217
                // For a regular channel, 1 block has already been mined to
×
1218
                // confirm the funding transaction, so we mine 5 blocks.
×
1219
                h.MineBlocks(numBlocksOpenChannel - 1)
×
1220
        }
×
1221

1222
        return cp
×
1223
}
1224

1225
// OpenChannelNoAnnounce attempts to open a channel with the specified
1226
// parameters extended from Alice to Bob without mining the necessary blocks to
1227
// announce the channel. Additionally, the following items are asserted,
1228
//   - for non-zero conf channel, 1 blocks will be mined to confirm the funding
1229
//     tx.
1230
//   - both nodes should see the channel edge update in their network graph.
1231
//   - both nodes can report the status of the new channel from ListChannels.
1232
func (h *HarnessTest) OpenChannelNoAnnounce(alice, bob *node.HarnessNode,
1233
        p OpenChannelParams) *lnrpc.ChannelPoint {
×
1234

×
1235
        chanOpenUpdate := h.OpenChannelAssertStream(alice, bob, p)
×
1236

×
1237
        // Open a zero conf channel.
×
1238
        if p.ZeroConf {
×
1239
                return h.openChannelZeroConf(alice, bob, chanOpenUpdate)
×
1240
        }
×
1241

1242
        // Open a non-zero conf channel.
1243
        return h.openChannel(alice, bob, chanOpenUpdate)
×
1244
}
1245

1246
// openChannel attempts to open a channel with the specified parameters
1247
// extended from Alice to Bob. Additionally, the following items are asserted,
1248
//   - 1 block is mined and the funding transaction should be found in it.
1249
//   - both nodes should see the channel edge update in their network graph.
1250
//   - both nodes can report the status of the new channel from ListChannels.
1251
func (h *HarnessTest) openChannel(alice, bob *node.HarnessNode,
1252
        stream rpc.OpenChanClient) *lnrpc.ChannelPoint {
×
1253

×
1254
        // Mine 1 block to confirm the funding transaction.
×
1255
        block := h.MineBlocksAndAssertNumTxes(1, 1)[0]
×
1256

×
1257
        // Wait for the channel open event.
×
1258
        fundingChanPoint := h.WaitForChannelOpenEvent(stream)
×
1259

×
1260
        // Check that the funding tx is found in the first block.
×
1261
        fundingTxID := h.GetChanPointFundingTxid(fundingChanPoint)
×
1262
        h.AssertTxInBlock(block, fundingTxID)
×
1263

×
1264
        // Check that both alice and bob have seen the channel from their
×
1265
        // network topology.
×
1266
        h.AssertChannelInGraph(alice, fundingChanPoint)
×
1267
        h.AssertChannelInGraph(bob, fundingChanPoint)
×
1268

×
1269
        // Check that the channel can be seen in their ListChannels.
×
1270
        h.AssertChannelExists(alice, fundingChanPoint)
×
1271
        h.AssertChannelExists(bob, fundingChanPoint)
×
1272

×
1273
        return fundingChanPoint
×
1274
}
×
1275

1276
// openChannelZeroConf attempts to open a channel with the specified parameters
1277
// extended from Alice to Bob. Additionally, the following items are asserted,
1278
//   - both nodes should see the channel edge update in their network graph.
1279
//   - both nodes can report the status of the new channel from ListChannels.
1280
func (h *HarnessTest) openChannelZeroConf(alice, bob *node.HarnessNode,
1281
        stream rpc.OpenChanClient) *lnrpc.ChannelPoint {
×
1282

×
1283
        // Wait for the channel open event.
×
1284
        fundingChanPoint := h.WaitForChannelOpenEvent(stream)
×
1285

×
1286
        // Check that both alice and bob have seen the channel from their
×
1287
        // network topology.
×
1288
        h.AssertChannelInGraph(alice, fundingChanPoint)
×
1289
        h.AssertChannelInGraph(bob, fundingChanPoint)
×
1290

×
1291
        // Finally, check that the channel can be seen in their ListChannels.
×
1292
        h.AssertChannelExists(alice, fundingChanPoint)
×
1293
        h.AssertChannelExists(bob, fundingChanPoint)
×
1294

×
1295
        return fundingChanPoint
×
1296
}
×
1297

1298
// OpenChannelAssertErr opens a channel between node srcNode and destNode,
1299
// asserts that the expected error is returned from the channel opening.
1300
func (h *HarnessTest) OpenChannelAssertErr(srcNode, destNode *node.HarnessNode,
1301
        p OpenChannelParams, expectedErr error) {
×
1302

×
1303
        // Prepare the request and open the channel.
×
1304
        openReq := h.prepareOpenChannel(srcNode, destNode, p)
×
1305
        respStream := srcNode.RPC.OpenChannel(openReq)
×
1306

×
1307
        // Receive an error to be sent from the stream.
×
1308
        _, err := h.receiveOpenChannelUpdate(respStream)
×
1309
        require.NotNil(h, err, "expected channel opening to fail")
×
1310

×
1311
        // Use string comparison here as we haven't codified all the RPC errors
×
1312
        // yet.
×
1313
        require.Containsf(h, err.Error(), expectedErr.Error(), "unexpected "+
×
1314
                "error returned, want %v, got %v", expectedErr, err)
×
1315
}
×
1316

1317
// closeChannelOpts holds the options for closing a channel.
1318
type closeChannelOpts struct {
1319
        feeRate fn.Option[chainfee.SatPerVByte]
1320

1321
        // localTxOnly is a boolean indicating if we should only attempt to
1322
        // consume close pending notifications for the local transaction.
1323
        localTxOnly bool
1324

1325
        // skipMempoolCheck is a boolean indicating if we should skip the normal
1326
        // mempool check after a coop close.
1327
        skipMempoolCheck bool
1328

1329
        // errString is an expected error. If this is non-blank, then we'll
1330
        // assert that the coop close wasn't possible, and returns an error that
1331
        // contains this err string.
1332
        errString string
1333
}
1334

1335
// CloseChanOpt is a functional option to modify the way we close a channel.
1336
type CloseChanOpt func(*closeChannelOpts)
1337

1338
// WithCoopCloseFeeRate is a functional option to set the fee rate for a coop
1339
// close attempt.
1340
func WithCoopCloseFeeRate(rate chainfee.SatPerVByte) CloseChanOpt {
×
1341
        return func(o *closeChannelOpts) {
×
1342
                o.feeRate = fn.Some(rate)
×
1343
        }
×
1344
}
1345

1346
// WithLocalTxNotify is a functional option to indicate that we should only
1347
// notify for the local txn. This is useful for the RBF coop close type, as
1348
// it'll notify for both local and remote txns.
1349
func WithLocalTxNotify() CloseChanOpt {
×
1350
        return func(o *closeChannelOpts) {
×
1351
                o.localTxOnly = true
×
1352
        }
×
1353
}
1354

1355
// WithSkipMempoolCheck is a functional option to indicate that we should skip
1356
// the mempool check. This can be used when a coop close iteration may not
1357
// result in a newly broadcast transaction.
1358
func WithSkipMempoolCheck() CloseChanOpt {
×
1359
        return func(o *closeChannelOpts) {
×
1360
                o.skipMempoolCheck = true
×
1361
        }
×
1362
}
1363

1364
// WithExpectedErrString is a functional option that can be used to assert that
1365
// an error occurs during the coop close process.
1366
func WithExpectedErrString(errString string) CloseChanOpt {
×
1367
        return func(o *closeChannelOpts) {
×
1368
                o.errString = errString
×
1369
        }
×
1370
}
1371

1372
// defaultCloseOpts returns the set of default close options.
1373
func defaultCloseOpts() *closeChannelOpts {
×
1374
        return &closeChannelOpts{}
×
1375
}
×
1376

1377
// CloseChannelAssertPending attempts to close the channel indicated by the
1378
// passed channel point, initiated by the passed node. Once the CloseChannel
1379
// rpc is called, it will consume one event and assert it's a close pending
1380
// event. In addition, it will check that the closing tx can be found in the
1381
// mempool.
1382
func (h *HarnessTest) CloseChannelAssertPending(hn *node.HarnessNode,
1383
        cp *lnrpc.ChannelPoint, force bool,
1384
        opts ...CloseChanOpt) (rpc.CloseChanClient, *lnrpc.CloseStatusUpdate) {
×
1385

×
1386
        closeOpts := defaultCloseOpts()
×
1387
        for _, optFunc := range opts {
×
1388
                optFunc(closeOpts)
×
1389
        }
×
1390

1391
        // Calls the rpc to close the channel.
1392
        closeReq := &lnrpc.CloseChannelRequest{
×
1393
                ChannelPoint: cp,
×
1394
                Force:        force,
×
1395
                NoWait:       true,
×
1396
        }
×
1397

×
1398
        closeOpts.feeRate.WhenSome(func(feeRate chainfee.SatPerVByte) {
×
1399
                closeReq.SatPerVbyte = uint64(feeRate)
×
1400
        })
×
1401

1402
        var (
×
1403
                stream rpc.CloseChanClient
×
1404
                event  *lnrpc.CloseStatusUpdate
×
1405
                err    error
×
1406
        )
×
1407

×
1408
        // Consume the "channel close" update in order to wait for the closing
×
1409
        // transaction to be broadcast, then wait for the closing tx to be seen
×
1410
        // within the network.
×
1411
        stream = hn.RPC.CloseChannel(closeReq)
×
1412
        _, err = h.ReceiveCloseChannelUpdate(stream)
×
1413
        require.NoError(h, err, "close channel update got error: %v", err)
×
1414

×
1415
        var closeTxid *chainhash.Hash
×
1416
        for {
×
1417
                event, err = h.ReceiveCloseChannelUpdate(stream)
×
1418
                if err != nil {
×
1419
                        h.Logf("Test: %s, close channel got error: %v",
×
1420
                                h.manager.currentTestCase, err)
×
1421
                }
×
1422
                if err != nil && closeOpts.errString == "" {
×
1423
                        require.NoError(h, err, "retry closing channel failed")
×
1424
                } else if err != nil && closeOpts.errString != "" {
×
1425
                        require.ErrorContains(h, err, closeOpts.errString)
×
1426
                        return nil, nil
×
1427
                }
×
1428

1429
                pendingClose, ok := event.Update.(*lnrpc.CloseStatusUpdate_ClosePending) //nolint:ll
×
1430
                require.Truef(h, ok, "expected channel close "+
×
1431
                        "update, instead got %v", pendingClose)
×
1432

×
1433
                if !pendingClose.ClosePending.LocalCloseTx &&
×
1434
                        closeOpts.localTxOnly {
×
1435

×
1436
                        continue
×
1437
                }
1438

1439
                notifyRate := pendingClose.ClosePending.FeePerVbyte
×
1440
                if closeOpts.localTxOnly &&
×
1441
                        notifyRate != int64(closeReq.SatPerVbyte) {
×
1442

×
1443
                        continue
×
1444
                }
1445

1446
                closeTxid, err = chainhash.NewHash(
×
1447
                        pendingClose.ClosePending.Txid,
×
1448
                )
×
1449
                require.NoErrorf(h, err, "unable to decode closeTxid: %v",
×
1450
                        pendingClose.ClosePending.Txid)
×
1451

×
1452
                break
×
1453
        }
1454

1455
        if !closeOpts.skipMempoolCheck {
×
1456
                // Assert the closing tx is in the mempool.
×
1457
                h.miner.AssertTxInMempool(*closeTxid)
×
1458
        }
×
1459

1460
        return stream, event
×
1461
}
1462

1463
// CloseChannel attempts to coop close a non-anchored channel identified by the
1464
// passed channel point owned by the passed harness node. The following items
1465
// are asserted,
1466
//  1. a close pending event is sent from the close channel client.
1467
//  2. the closing tx is found in the mempool.
1468
//  3. the node reports the channel being waiting to close.
1469
//  4. a block is mined and the closing tx should be found in it.
1470
//  5. the node reports zero waiting close channels.
1471
//  6. the node receives a topology update regarding the channel close.
1472
func (h *HarnessTest) CloseChannel(hn *node.HarnessNode,
1473
        cp *lnrpc.ChannelPoint) chainhash.Hash {
×
1474

×
1475
        stream, _ := h.CloseChannelAssertPending(hn, cp, false)
×
1476

×
1477
        return h.AssertStreamChannelCoopClosed(hn, cp, false, stream)
×
1478
}
×
1479

1480
// ForceCloseChannel attempts to force close a non-anchored channel identified
1481
// by the passed channel point owned by the passed harness node. The following
1482
// items are asserted,
1483
//  1. a close pending event is sent from the close channel client.
1484
//  2. the closing tx is found in the mempool.
1485
//  3. the node reports the channel being waiting to close.
1486
//  4. a block is mined and the closing tx should be found in it.
1487
//  5. the node reports zero waiting close channels.
1488
//  6. the node receives a topology update regarding the channel close.
1489
//  7. mine DefaultCSV-1 blocks.
1490
//  8. the node reports zero pending force close channels.
1491
func (h *HarnessTest) ForceCloseChannel(hn *node.HarnessNode,
1492
        cp *lnrpc.ChannelPoint) chainhash.Hash {
×
1493

×
1494
        stream, _ := h.CloseChannelAssertPending(hn, cp, true)
×
1495

×
1496
        closingTxid := h.AssertStreamChannelForceClosed(hn, cp, false, stream)
×
1497

×
1498
        // Cleanup the force close.
×
1499
        h.CleanupForceClose(hn)
×
1500

×
1501
        return closingTxid
×
1502
}
×
1503

1504
// CloseChannelAssertErr closes the given channel and asserts an error
1505
// returned.
1506
func (h *HarnessTest) CloseChannelAssertErr(hn *node.HarnessNode,
1507
        req *lnrpc.CloseChannelRequest) error {
×
1508

×
1509
        // Calls the rpc to close the channel.
×
1510
        stream := hn.RPC.CloseChannel(req)
×
1511

×
1512
        // Consume the "channel close" update in order to wait for the closing
×
1513
        // transaction to be broadcast, then wait for the closing tx to be seen
×
1514
        // within the network.
×
1515
        _, err := h.ReceiveCloseChannelUpdate(stream)
×
1516
        require.Errorf(h, err, "%s: expect close channel to return an error",
×
1517
                hn.Name())
×
1518

×
1519
        return err
×
1520
}
×
1521

1522
// IsNeutrinoBackend returns a bool indicating whether the node is using a
1523
// neutrino as its backend. This is useful when we want to skip certain tests
1524
// which cannot be done with a neutrino backend.
1525
func (h *HarnessTest) IsNeutrinoBackend() bool {
×
1526
        return h.manager.chainBackend.Name() == NeutrinoBackendName
×
1527
}
×
1528

1529
// fundCoins attempts to send amt satoshis from the internal mining node to the
1530
// targeted lightning node. The confirmed boolean indicates whether the
1531
// transaction that pays to the target should confirm. For neutrino backend,
1532
// the `confirmed` param is ignored.
1533
func (h *HarnessTest) fundCoins(amt btcutil.Amount, target *node.HarnessNode,
1534
        addrType lnrpc.AddressType, confirmed bool) *wire.MsgTx {
×
1535

×
1536
        initialBalance := target.RPC.WalletBalance()
×
1537

×
1538
        // First, obtain an address from the target lightning node, preferring
×
1539
        // to receive a p2wkh address s.t the output can immediately be used as
×
1540
        // an input to a funding transaction.
×
1541
        req := &lnrpc.NewAddressRequest{Type: addrType}
×
1542
        resp := target.RPC.NewAddress(req)
×
1543
        addr := h.DecodeAddress(resp.Address)
×
1544
        addrScript := h.PayToAddrScript(addr)
×
1545

×
1546
        // Generate a transaction which creates an output to the target
×
1547
        // pkScript of the desired amount.
×
1548
        output := &wire.TxOut{
×
1549
                PkScript: addrScript,
×
1550
                Value:    int64(amt),
×
1551
        }
×
1552
        txid := h.miner.SendOutput(output, defaultMinerFeeRate)
×
1553

×
1554
        // Get the funding tx.
×
1555
        tx := h.GetRawTransaction(*txid)
×
1556
        msgTx := tx.MsgTx()
×
1557

×
1558
        // Since neutrino doesn't support unconfirmed outputs, skip this check.
×
1559
        if !h.IsNeutrinoBackend() {
×
1560
                expectedBalance := btcutil.Amount(
×
1561
                        initialBalance.UnconfirmedBalance,
×
1562
                ) + amt
×
1563
                h.WaitForBalanceUnconfirmed(target, expectedBalance)
×
1564
        }
×
1565

1566
        // If the transaction should remain unconfirmed, then we'll wait until
1567
        // the target node's unconfirmed balance reflects the expected balance
1568
        // and exit.
1569
        if !confirmed {
×
1570
                return msgTx
×
1571
        }
×
1572

1573
        // Otherwise, we'll generate 1 new blocks to ensure the output gains a
1574
        // sufficient number of confirmations and wait for the balance to
1575
        // reflect what's expected.
1576
        h.MineBlockWithTx(msgTx)
×
1577

×
1578
        expectedBalance := btcutil.Amount(initialBalance.ConfirmedBalance) + amt
×
1579
        h.WaitForBalanceConfirmed(target, expectedBalance)
×
1580

×
1581
        return msgTx
×
1582
}
1583

1584
// FundCoins attempts to send amt satoshis from the internal mining node to the
1585
// targeted lightning node using a P2WKH address. 1 blocks are mined after in
1586
// order to confirm the transaction.
1587
func (h *HarnessTest) FundCoins(amt btcutil.Amount,
1588
        hn *node.HarnessNode) *wire.MsgTx {
×
1589

×
1590
        return h.fundCoins(amt, hn, lnrpc.AddressType_WITNESS_PUBKEY_HASH, true)
×
1591
}
×
1592

1593
// FundCoinsUnconfirmed attempts to send amt satoshis from the internal mining
1594
// node to the targeted lightning node using a P2WKH address. No blocks are
1595
// mined after and the UTXOs are unconfirmed.
1596
func (h *HarnessTest) FundCoinsUnconfirmed(amt btcutil.Amount,
1597
        hn *node.HarnessNode) *wire.MsgTx {
×
1598

×
1599
        return h.fundCoins(
×
1600
                amt, hn, lnrpc.AddressType_WITNESS_PUBKEY_HASH, false,
×
1601
        )
×
1602
}
×
1603

1604
// FundCoinsNP2WKH attempts to send amt satoshis from the internal mining node
1605
// to the targeted lightning node using a NP2WKH address.
1606
func (h *HarnessTest) FundCoinsNP2WKH(amt btcutil.Amount,
1607
        target *node.HarnessNode) *wire.MsgTx {
×
1608

×
1609
        return h.fundCoins(
×
1610
                amt, target, lnrpc.AddressType_NESTED_PUBKEY_HASH, true,
×
1611
        )
×
1612
}
×
1613

1614
// FundCoinsP2TR attempts to send amt satoshis from the internal mining node to
1615
// the targeted lightning node using a P2TR address.
1616
func (h *HarnessTest) FundCoinsP2TR(amt btcutil.Amount,
1617
        target *node.HarnessNode) *wire.MsgTx {
×
1618

×
1619
        return h.fundCoins(amt, target, lnrpc.AddressType_TAPROOT_PUBKEY, true)
×
1620
}
×
1621

1622
// FundNumCoins attempts to send the given number of UTXOs from the internal
1623
// mining node to the targeted lightning node using a P2WKH address. Each UTXO
1624
// has an amount of 1 BTC. 1 blocks are mined to confirm the tx.
1625
func (h *HarnessTest) FundNumCoins(hn *node.HarnessNode, num int) {
×
1626
        // Get the initial balance first.
×
1627
        resp := hn.RPC.WalletBalance()
×
1628
        initialBalance := btcutil.Amount(resp.ConfirmedBalance)
×
1629

×
1630
        const fundAmount = 1 * btcutil.SatoshiPerBitcoin
×
1631

×
1632
        // Send out the outputs from the miner.
×
1633
        for i := 0; i < num; i++ {
×
1634
                h.createAndSendOutput(
×
1635
                        hn, fundAmount, lnrpc.AddressType_WITNESS_PUBKEY_HASH,
×
1636
                )
×
1637
        }
×
1638

1639
        // Wait for ListUnspent to show the correct number of unconfirmed
1640
        // UTXOs.
1641
        //
1642
        // Since neutrino doesn't support unconfirmed outputs, skip this check.
1643
        if !h.IsNeutrinoBackend() {
×
1644
                h.AssertNumUTXOsUnconfirmed(hn, num)
×
1645
        }
×
1646

1647
        // Mine a block to confirm the transactions.
1648
        h.MineBlocksAndAssertNumTxes(1, num)
×
1649

×
1650
        // Now block until the wallet have fully synced up.
×
1651
        totalAmount := btcutil.Amount(fundAmount * num)
×
1652
        expectedBalance := initialBalance + totalAmount
×
1653
        h.WaitForBalanceConfirmed(hn, expectedBalance)
×
1654
}
1655

1656
// completePaymentRequestsAssertStatus sends payments from a node to complete
1657
// all payment requests. This function does not return until all payments
1658
// have reached the specified status.
1659
func (h *HarnessTest) completePaymentRequestsAssertStatus(hn *node.HarnessNode,
1660
        paymentRequests []string, status lnrpc.Payment_PaymentStatus,
1661
        opts ...HarnessOpt) {
×
1662

×
1663
        payOpts := defaultHarnessOpts()
×
1664
        for _, opt := range opts {
×
1665
                opt(&payOpts)
×
1666
        }
×
1667

1668
        // Create a buffered chan to signal the results.
1669
        results := make(chan rpc.PaymentClient, len(paymentRequests))
×
1670

×
1671
        // send sends a payment and asserts if it doesn't succeeded.
×
1672
        send := func(payReq string) {
×
1673
                req := &routerrpc.SendPaymentRequest{
×
1674
                        PaymentRequest: payReq,
×
1675
                        TimeoutSeconds: int32(wait.PaymentTimeout.Seconds()),
×
1676
                        FeeLimitMsat:   noFeeLimitMsat,
×
1677
                        Amp:            payOpts.useAMP,
×
1678
                }
×
1679
                stream := hn.RPC.SendPayment(req)
×
1680

×
1681
                // Signal sent succeeded.
×
1682
                results <- stream
×
1683
        }
×
1684

1685
        // Launch all payments simultaneously.
1686
        for _, payReq := range paymentRequests {
×
1687
                payReqCopy := payReq
×
1688
                go send(payReqCopy)
×
1689
        }
×
1690

1691
        // Wait for all payments to report the expected status.
1692
        timer := time.After(wait.PaymentTimeout)
×
1693
        select {
×
1694
        case stream := <-results:
×
1695
                h.AssertPaymentStatusFromStream(stream, status)
×
1696

1697
        case <-timer:
×
1698
                require.Failf(h, "timeout", "%s: waiting payment results "+
×
1699
                        "timeout", hn.Name())
×
1700
        }
1701
}
1702

1703
// CompletePaymentRequests sends payments from a node to complete all payment
1704
// requests. This function does not return until all payments successfully
1705
// complete without errors.
1706
func (h *HarnessTest) CompletePaymentRequests(hn *node.HarnessNode,
1707
        paymentRequests []string, opts ...HarnessOpt) {
×
1708

×
1709
        h.completePaymentRequestsAssertStatus(
×
1710
                hn, paymentRequests, lnrpc.Payment_SUCCEEDED, opts...,
×
1711
        )
×
1712
}
×
1713

1714
// CompletePaymentRequestsNoWait sends payments from a node to complete all
1715
// payment requests without waiting for the results. Instead, it checks the
1716
// number of updates in the specified channel has increased.
1717
func (h *HarnessTest) CompletePaymentRequestsNoWait(hn *node.HarnessNode,
1718
        paymentRequests []string, chanPoint *lnrpc.ChannelPoint) {
×
1719

×
1720
        // We start by getting the current state of the client's channels. This
×
1721
        // is needed to ensure the payments actually have been committed before
×
1722
        // we return.
×
1723
        oldResp := h.GetChannelByChanPoint(hn, chanPoint)
×
1724

×
1725
        // Send payments and assert they are in-flight.
×
1726
        h.completePaymentRequestsAssertStatus(
×
1727
                hn, paymentRequests, lnrpc.Payment_IN_FLIGHT,
×
1728
        )
×
1729

×
1730
        // We are not waiting for feedback in the form of a response, but we
×
1731
        // should still wait long enough for the server to receive and handle
×
1732
        // the send before cancelling the request. We wait for the number of
×
1733
        // updates to one of our channels has increased before we return.
×
1734
        err := wait.NoError(func() error {
×
1735
                newResp := h.GetChannelByChanPoint(hn, chanPoint)
×
1736

×
1737
                // If this channel has an increased number of updates, we
×
1738
                // assume the payments are committed, and we can return.
×
1739
                if newResp.NumUpdates > oldResp.NumUpdates {
×
1740
                        return nil
×
1741
                }
×
1742

1743
                // Otherwise return an error as the NumUpdates are not
1744
                // increased.
1745
                return fmt.Errorf("%s: channel:%v not updated after sending "+
×
1746
                        "payments, old updates: %v, new updates: %v", hn.Name(),
×
1747
                        chanPoint, oldResp.NumUpdates, newResp.NumUpdates)
×
1748
        }, DefaultTimeout)
1749
        require.NoError(h, err, "timeout while checking for channel updates")
×
1750
}
1751

1752
// OpenChannelPsbt attempts to open a channel between srcNode and destNode with
1753
// the passed channel funding parameters. It will assert if the expected step
1754
// of funding the PSBT is not received from the source node.
1755
func (h *HarnessTest) OpenChannelPsbt(srcNode, destNode *node.HarnessNode,
1756
        p OpenChannelParams) (rpc.OpenChanClient, []byte) {
×
1757

×
1758
        // Wait until srcNode and destNode have the latest chain synced.
×
1759
        // Otherwise, we may run into a check within the funding manager that
×
1760
        // prevents any funding workflows from being kicked off if the chain
×
1761
        // isn't yet synced.
×
1762
        h.WaitForBlockchainSync(srcNode)
×
1763
        h.WaitForBlockchainSync(destNode)
×
1764

×
1765
        // Send the request to open a channel to the source node now. This will
×
1766
        // open a long-lived stream where we'll receive status updates about
×
1767
        // the progress of the channel.
×
1768
        // respStream := h.OpenChannelStreamAndAssert(srcNode, destNode, p)
×
1769
        req := &lnrpc.OpenChannelRequest{
×
1770
                NodePubkey:         destNode.PubKey[:],
×
1771
                LocalFundingAmount: int64(p.Amt),
×
1772
                PushSat:            int64(p.PushAmt),
×
1773
                Private:            p.Private,
×
1774
                SpendUnconfirmed:   p.SpendUnconfirmed,
×
1775
                MinHtlcMsat:        int64(p.MinHtlc),
×
1776
                FundingShim:        p.FundingShim,
×
1777
                CommitmentType:     p.CommitmentType,
×
1778
        }
×
1779
        respStream := srcNode.RPC.OpenChannel(req)
×
1780

×
1781
        // Consume the "PSBT funding ready" update. This waits until the node
×
1782
        // notifies us that the PSBT can now be funded.
×
1783
        resp := h.ReceiveOpenChannelUpdate(respStream)
×
1784
        upd, ok := resp.Update.(*lnrpc.OpenStatusUpdate_PsbtFund)
×
1785
        require.Truef(h, ok, "expected PSBT funding update, got %v", resp)
×
1786

×
1787
        // Make sure the channel funding address has the correct type for the
×
1788
        // given commitment type.
×
1789
        fundingAddr, err := btcutil.DecodeAddress(
×
1790
                upd.PsbtFund.FundingAddress, miner.HarnessNetParams,
×
1791
        )
×
1792
        require.NoError(h, err)
×
1793

×
1794
        switch p.CommitmentType {
×
1795
        case lnrpc.CommitmentType_SIMPLE_TAPROOT:
×
1796
                require.IsType(h, &btcutil.AddressTaproot{}, fundingAddr)
×
1797

1798
        default:
×
1799
                require.IsType(
×
1800
                        h, &btcutil.AddressWitnessScriptHash{}, fundingAddr,
×
1801
                )
×
1802
        }
1803

1804
        return respStream, upd.PsbtFund.Psbt
×
1805
}
1806

1807
// CleanupForceClose mines blocks to clean up the force close process. This is
1808
// used for tests that are not asserting the expected behavior is found during
1809
// the force close process, e.g., num of sweeps, etc. Instead, it provides a
1810
// shortcut to move the test forward with a clean mempool.
1811
func (h *HarnessTest) CleanupForceClose(hn *node.HarnessNode) {
×
1812
        // Wait for the channel to be marked pending force close.
×
1813
        h.AssertNumPendingForceClose(hn, 1)
×
1814

×
1815
        // Mine enough blocks for the node to sweep its funds from the force
×
1816
        // closed channel. The commit sweep resolver is offers the input to the
×
1817
        // sweeper when it's force closed, and broadcast the sweep tx at
×
1818
        // defaulCSV-1.
×
1819
        //
×
1820
        // NOTE: we might empty blocks here as we don't know the exact number
×
1821
        // of blocks to mine. This may end up mining more blocks than needed.
×
1822
        h.MineEmptyBlocks(node.DefaultCSV - 1)
×
1823

×
1824
        // Assert there is one pending sweep.
×
1825
        h.AssertNumPendingSweeps(hn, 1)
×
1826

×
1827
        // The node should now sweep the funds, clean up by mining the sweeping
×
1828
        // tx.
×
1829
        h.MineBlocksAndAssertNumTxes(1, 1)
×
1830

×
1831
        // Mine blocks to get any second level HTLC resolved. If there are no
×
1832
        // HTLCs, this will behave like h.AssertNumPendingCloseChannels.
×
1833
        h.mineTillForceCloseResolved(hn)
×
1834
}
×
1835

1836
// CreatePayReqs is a helper method that will create a slice of payment
1837
// requests for the given node.
1838
func (h *HarnessTest) CreatePayReqs(hn *node.HarnessNode,
1839
        paymentAmt btcutil.Amount, numInvoices int,
1840
        routeHints ...*lnrpc.RouteHint) ([]string, [][]byte, []*lnrpc.Invoice) {
×
1841

×
1842
        payReqs := make([]string, numInvoices)
×
1843
        rHashes := make([][]byte, numInvoices)
×
1844
        invoices := make([]*lnrpc.Invoice, numInvoices)
×
1845
        for i := 0; i < numInvoices; i++ {
×
1846
                preimage := h.Random32Bytes()
×
1847

×
1848
                invoice := &lnrpc.Invoice{
×
1849
                        Memo:       "testing",
×
1850
                        RPreimage:  preimage,
×
1851
                        Value:      int64(paymentAmt),
×
1852
                        RouteHints: routeHints,
×
1853
                }
×
1854
                resp := hn.RPC.AddInvoice(invoice)
×
1855

×
1856
                // Set the payment address in the invoice so the caller can
×
1857
                // properly use it.
×
1858
                invoice.PaymentAddr = resp.PaymentAddr
×
1859

×
1860
                payReqs[i] = resp.PaymentRequest
×
1861
                rHashes[i] = resp.RHash
×
1862
                invoices[i] = invoice
×
1863
        }
×
1864

1865
        return payReqs, rHashes, invoices
×
1866
}
1867

1868
// BackupDB creates a backup of the current database. It will stop the node
1869
// first, copy the database files, and restart the node.
1870
func (h *HarnessTest) BackupDB(hn *node.HarnessNode) {
×
1871
        restart := h.SuspendNode(hn)
×
1872

×
1873
        err := hn.BackupDB()
×
1874
        require.NoErrorf(h, err, "%s: failed to backup db", hn.Name())
×
1875

×
1876
        err = restart()
×
1877
        require.NoErrorf(h, err, "%s: failed to restart", hn.Name())
×
1878
}
×
1879

1880
// RestartNodeAndRestoreDB restarts a given node with a callback to restore the
1881
// db.
1882
func (h *HarnessTest) RestartNodeAndRestoreDB(hn *node.HarnessNode) {
×
1883
        cb := func() error { return hn.RestoreDB() }
×
1884
        err := h.manager.restartNode(h.runCtx, hn, cb)
×
1885
        require.NoErrorf(h, err, "failed to restart node %s", hn.Name())
×
1886

×
1887
        err = h.manager.unlockNode(hn)
×
1888
        require.NoErrorf(h, err, "failed to unlock node %s", hn.Name())
×
1889

×
1890
        // Give the node some time to catch up with the chain before we
×
1891
        // continue with the tests.
×
1892
        h.WaitForBlockchainSync(hn)
×
1893
}
1894

1895
// CleanShutDown is used to quickly end a test by shutting down all non-standby
1896
// nodes and mining blocks to empty the mempool.
1897
//
1898
// NOTE: this method provides a faster exit for a test that involves force
1899
// closures as the caller doesn't need to mine all the blocks to make sure the
1900
// mempool is empty.
1901
func (h *HarnessTest) CleanShutDown() {
×
1902
        // First, shutdown all nodes to prevent new transactions being created
×
1903
        // and fed into the mempool.
×
1904
        h.shutdownAllNodes()
×
1905

×
1906
        // Now mine blocks till the mempool is empty.
×
1907
        h.cleanMempool()
×
1908
}
×
1909

1910
// QueryChannelByChanPoint tries to find a channel matching the channel point
1911
// and asserts. It returns the channel found.
1912
func (h *HarnessTest) QueryChannelByChanPoint(hn *node.HarnessNode,
1913
        chanPoint *lnrpc.ChannelPoint,
1914
        opts ...ListChannelOption) *lnrpc.Channel {
×
1915

×
1916
        channel, err := h.findChannel(hn, chanPoint, opts...)
×
1917
        require.NoError(h, err, "failed to query channel")
×
1918

×
1919
        return channel
×
1920
}
×
1921

1922
// SendPaymentAndAssertStatus sends a payment from the passed node and asserts
1923
// the desired status is reached.
1924
func (h *HarnessTest) SendPaymentAndAssertStatus(hn *node.HarnessNode,
1925
        req *routerrpc.SendPaymentRequest,
1926
        status lnrpc.Payment_PaymentStatus) *lnrpc.Payment {
×
1927

×
1928
        stream := hn.RPC.SendPayment(req)
×
1929
        return h.AssertPaymentStatusFromStream(stream, status)
×
1930
}
×
1931

1932
// SendPaymentAssertFail sends a payment from the passed node and asserts the
1933
// payment is failed with the specified failure reason .
1934
func (h *HarnessTest) SendPaymentAssertFail(hn *node.HarnessNode,
1935
        req *routerrpc.SendPaymentRequest,
1936
        reason lnrpc.PaymentFailureReason) *lnrpc.Payment {
×
1937

×
1938
        payment := h.SendPaymentAndAssertStatus(hn, req, lnrpc.Payment_FAILED)
×
1939
        require.Equal(h, reason, payment.FailureReason,
×
1940
                "payment failureReason not matched")
×
1941

×
1942
        return payment
×
1943
}
×
1944

1945
// SendPaymentAssertSettled sends a payment from the passed node and asserts the
1946
// payment is settled.
1947
func (h *HarnessTest) SendPaymentAssertSettled(hn *node.HarnessNode,
1948
        req *routerrpc.SendPaymentRequest) *lnrpc.Payment {
×
1949

×
1950
        return h.SendPaymentAndAssertStatus(hn, req, lnrpc.Payment_SUCCEEDED)
×
1951
}
×
1952

1953
// SendPaymentAssertInflight sends a payment from the passed node and asserts
1954
// the payment is inflight.
1955
func (h *HarnessTest) SendPaymentAssertInflight(hn *node.HarnessNode,
1956
        req *routerrpc.SendPaymentRequest) *lnrpc.Payment {
×
1957

×
1958
        return h.SendPaymentAndAssertStatus(hn, req, lnrpc.Payment_IN_FLIGHT)
×
1959
}
×
1960

1961
// OpenChannelRequest is used to open a channel using the method
1962
// OpenMultiChannelsAsync.
1963
type OpenChannelRequest struct {
1964
        // Local is the funding node.
1965
        Local *node.HarnessNode
1966

1967
        // Remote is the receiving node.
1968
        Remote *node.HarnessNode
1969

1970
        // Param is the open channel params.
1971
        Param OpenChannelParams
1972

1973
        // stream is the client created after calling OpenChannel RPC.
1974
        stream rpc.OpenChanClient
1975

1976
        // result is a channel used to send the channel point once the funding
1977
        // has succeeded.
1978
        result chan *lnrpc.ChannelPoint
1979
}
1980

1981
// OpenMultiChannelsAsync takes a list of OpenChannelRequest and opens them in
1982
// batch. The channel points are returned in same the order of the requests
1983
// once all of the channel open succeeded.
1984
//
1985
// NOTE: compared to open multiple channel sequentially, this method will be
1986
// faster as it doesn't need to mine 6 blocks for each channel open. However,
1987
// it does make debugging the logs more difficult as messages are intertwined.
1988
func (h *HarnessTest) OpenMultiChannelsAsync(
1989
        reqs []*OpenChannelRequest) []*lnrpc.ChannelPoint {
×
1990

×
1991
        // openChannel opens a channel based on the request.
×
1992
        openChannel := func(req *OpenChannelRequest) {
×
1993
                stream := h.OpenChannelAssertStream(
×
1994
                        req.Local, req.Remote, req.Param,
×
1995
                )
×
1996
                req.stream = stream
×
1997
        }
×
1998

1999
        // assertChannelOpen is a helper closure that asserts a channel is
2000
        // open.
2001
        assertChannelOpen := func(req *OpenChannelRequest) {
×
2002
                // Wait for the channel open event from the stream.
×
2003
                cp := h.WaitForChannelOpenEvent(req.stream)
×
2004

×
2005
                if !req.Param.Private {
×
2006
                        // Check that both alice and bob have seen the channel
×
2007
                        // from their channel watch request.
×
2008
                        h.AssertChannelInGraph(req.Local, cp)
×
2009
                        h.AssertChannelInGraph(req.Remote, cp)
×
2010
                }
×
2011

2012
                // Finally, check that the channel can be seen in their
2013
                // ListChannels.
2014
                h.AssertChannelExists(req.Local, cp)
×
2015
                h.AssertChannelExists(req.Remote, cp)
×
2016

×
2017
                req.result <- cp
×
2018
        }
2019

2020
        // Go through the requests and make the OpenChannel RPC call.
2021
        for _, r := range reqs {
×
2022
                openChannel(r)
×
2023
        }
×
2024

2025
        // Mine one block to confirm all the funding transactions.
2026
        h.MineBlocksAndAssertNumTxes(1, len(reqs))
×
2027

×
2028
        // Mine 5 more blocks so all the public channels are announced to the
×
2029
        // network.
×
2030
        h.MineBlocks(numBlocksOpenChannel - 1)
×
2031

×
2032
        // Once the blocks are mined, we fire goroutines for each of the
×
2033
        // request to watch for the channel openning.
×
2034
        for _, r := range reqs {
×
2035
                r.result = make(chan *lnrpc.ChannelPoint, 1)
×
2036
                go assertChannelOpen(r)
×
2037
        }
×
2038

2039
        // Finally, collect the results.
2040
        channelPoints := make([]*lnrpc.ChannelPoint, 0)
×
2041
        for _, r := range reqs {
×
2042
                select {
×
2043
                case cp := <-r.result:
×
2044
                        channelPoints = append(channelPoints, cp)
×
2045

2046
                case <-time.After(wait.ChannelOpenTimeout):
×
2047
                        require.Failf(h, "timeout", "wait channel point "+
×
2048
                                "timeout for channel %s=>%s", r.Local.Name(),
×
2049
                                r.Remote.Name())
×
2050
                }
2051
        }
2052

2053
        // Assert that we have the expected num of channel points.
2054
        require.Len(h, channelPoints, len(reqs),
×
2055
                "returned channel points not match")
×
2056

×
2057
        return channelPoints
×
2058
}
2059

2060
// ReceiveInvoiceUpdate waits until a message is received on the subscribe
2061
// invoice stream or the timeout is reached.
2062
func (h *HarnessTest) ReceiveInvoiceUpdate(
2063
        stream rpc.InvoiceUpdateClient) *lnrpc.Invoice {
×
2064

×
2065
        chanMsg := make(chan *lnrpc.Invoice)
×
2066
        errChan := make(chan error)
×
2067
        go func() {
×
2068
                // Consume one message. This will block until the message is
×
2069
                // received.
×
2070
                resp, err := stream.Recv()
×
2071
                if err != nil {
×
2072
                        errChan <- err
×
2073
                        return
×
2074
                }
×
2075
                chanMsg <- resp
×
2076
        }()
2077

2078
        select {
×
2079
        case <-time.After(DefaultTimeout):
×
2080
                require.Fail(h, "timeout", "timeout receiving invoice update")
×
2081

2082
        case err := <-errChan:
×
2083
                require.Failf(h, "err from stream",
×
2084
                        "received err from stream: %v", err)
×
2085

2086
        case updateMsg := <-chanMsg:
×
2087
                return updateMsg
×
2088
        }
2089

2090
        return nil
×
2091
}
2092

2093
// CalculateTxFee retrieves parent transactions and reconstructs the fee paid.
2094
func (h *HarnessTest) CalculateTxFee(tx *wire.MsgTx) btcutil.Amount {
×
2095
        var balance btcutil.Amount
×
2096
        for _, in := range tx.TxIn {
×
2097
                parentHash := in.PreviousOutPoint.Hash
×
2098
                rawTx := h.miner.GetRawTransaction(parentHash)
×
2099
                parent := rawTx.MsgTx()
×
2100
                value := parent.TxOut[in.PreviousOutPoint.Index].Value
×
2101

×
2102
                balance += btcutil.Amount(value)
×
2103
        }
×
2104

2105
        for _, out := range tx.TxOut {
×
2106
                balance -= btcutil.Amount(out.Value)
×
2107
        }
×
2108

2109
        return balance
×
2110
}
2111

2112
// CalculateTxWeight calculates the weight for a given tx.
2113
//
2114
// TODO(yy): use weight estimator to get more accurate result.
2115
func (h *HarnessTest) CalculateTxWeight(tx *wire.MsgTx) lntypes.WeightUnit {
×
2116
        utx := btcutil.NewTx(tx)
×
2117
        return lntypes.WeightUnit(blockchain.GetTransactionWeight(utx))
×
2118
}
×
2119

2120
// CalculateTxFeeRate calculates the fee rate for a given tx.
2121
func (h *HarnessTest) CalculateTxFeeRate(
2122
        tx *wire.MsgTx) chainfee.SatPerKWeight {
×
2123

×
2124
        w := h.CalculateTxWeight(tx)
×
2125
        fee := h.CalculateTxFee(tx)
×
2126

×
2127
        return chainfee.NewSatPerKWeight(fee, w)
×
2128
}
×
2129

2130
// CalculateTxesFeeRate takes a list of transactions and estimates the fee rate
2131
// used to sweep them.
2132
//
2133
// NOTE: only used in current test file.
2134
func (h *HarnessTest) CalculateTxesFeeRate(txns []*wire.MsgTx) int64 {
×
2135
        const scale = 1000
×
2136

×
2137
        var totalWeight, totalFee int64
×
2138
        for _, tx := range txns {
×
2139
                utx := btcutil.NewTx(tx)
×
2140
                totalWeight += blockchain.GetTransactionWeight(utx)
×
2141

×
2142
                fee := h.CalculateTxFee(tx)
×
2143
                totalFee += int64(fee)
×
2144
        }
×
2145
        feeRate := totalFee * scale / totalWeight
×
2146

×
2147
        return feeRate
×
2148
}
2149

2150
// AssertSweepFound looks up a sweep in a nodes list of broadcast sweeps and
2151
// asserts it's found.
2152
//
2153
// NOTE: Does not account for node's internal state.
2154
func (h *HarnessTest) AssertSweepFound(hn *node.HarnessNode,
2155
        sweep string, verbose bool, startHeight int32) {
×
2156

×
2157
        err := wait.NoError(func() error {
×
2158
                // List all sweeps that alice's node had broadcast.
×
2159
                sweepResp := hn.RPC.ListSweeps(verbose, startHeight)
×
2160

×
2161
                var found bool
×
2162
                if verbose {
×
2163
                        found = findSweepInDetails(h, sweep, sweepResp)
×
2164
                } else {
×
2165
                        found = findSweepInTxids(h, sweep, sweepResp)
×
2166
                }
×
2167

2168
                if found {
×
2169
                        return nil
×
2170
                }
×
2171

2172
                return fmt.Errorf("sweep tx %v not found in resp %v", sweep,
×
2173
                        sweepResp)
×
2174
        }, wait.DefaultTimeout)
2175
        require.NoError(h, err, "%s: timeout checking sweep tx", hn.Name())
×
2176
}
2177

2178
func findSweepInTxids(ht *HarnessTest, sweepTxid string,
2179
        sweepResp *walletrpc.ListSweepsResponse) bool {
×
2180

×
2181
        sweepTxIDs := sweepResp.GetTransactionIds()
×
2182
        require.NotNil(ht, sweepTxIDs, "expected transaction ids")
×
2183
        require.Nil(ht, sweepResp.GetTransactionDetails())
×
2184

×
2185
        // Check that the sweep tx we have just produced is present.
×
2186
        for _, tx := range sweepTxIDs.TransactionIds {
×
2187
                if tx == sweepTxid {
×
2188
                        return true
×
2189
                }
×
2190
        }
2191

2192
        return false
×
2193
}
2194

2195
func findSweepInDetails(ht *HarnessTest, sweepTxid string,
2196
        sweepResp *walletrpc.ListSweepsResponse) bool {
×
2197

×
2198
        sweepDetails := sweepResp.GetTransactionDetails()
×
2199
        require.NotNil(ht, sweepDetails, "expected transaction details")
×
2200
        require.Nil(ht, sweepResp.GetTransactionIds())
×
2201

×
2202
        for _, tx := range sweepDetails.Transactions {
×
2203
                if tx.TxHash == sweepTxid {
×
2204
                        return true
×
2205
                }
×
2206
        }
2207

2208
        return false
×
2209
}
2210

2211
// QueryRoutesAndRetry attempts to keep querying a route until timeout is
2212
// reached.
2213
//
2214
// NOTE: when a channel is opened, we may need to query multiple times to get
2215
// it in our QueryRoutes RPC. This happens even after we check the channel is
2216
// heard by the node using ht.AssertChannelOpen. Deep down, this is because our
2217
// GraphTopologySubscription and QueryRoutes give different results regarding a
2218
// specific channel, with the formal reporting it being open while the latter
2219
// not, resulting GraphTopologySubscription acting "faster" than QueryRoutes.
2220
// TODO(yy): make sure related subsystems share the same view on a given
2221
// channel.
2222
func (h *HarnessTest) QueryRoutesAndRetry(hn *node.HarnessNode,
2223
        req *lnrpc.QueryRoutesRequest) *lnrpc.QueryRoutesResponse {
×
2224

×
2225
        var routes *lnrpc.QueryRoutesResponse
×
2226
        err := wait.NoError(func() error {
×
2227
                ctxt, cancel := context.WithCancel(h.runCtx)
×
2228
                defer cancel()
×
2229

×
2230
                resp, err := hn.RPC.LN.QueryRoutes(ctxt, req)
×
2231
                if err != nil {
×
2232
                        return fmt.Errorf("%s: failed to query route: %w",
×
2233
                                hn.Name(), err)
×
2234
                }
×
2235

2236
                routes = resp
×
2237

×
2238
                return nil
×
2239
        }, DefaultTimeout)
2240

2241
        require.NoError(h, err, "timeout querying routes")
×
2242

×
2243
        return routes
×
2244
}
2245

2246
// ReceiveHtlcInterceptor waits until a message is received on the htlc
2247
// interceptor stream or the timeout is reached.
2248
func (h *HarnessTest) ReceiveHtlcInterceptor(
2249
        stream rpc.InterceptorClient) *routerrpc.ForwardHtlcInterceptRequest {
×
2250

×
2251
        chanMsg := make(chan *routerrpc.ForwardHtlcInterceptRequest)
×
2252
        errChan := make(chan error)
×
2253
        go func() {
×
2254
                // Consume one message. This will block until the message is
×
2255
                // received.
×
2256
                resp, err := stream.Recv()
×
2257
                if err != nil {
×
2258
                        errChan <- err
×
2259
                        return
×
2260
                }
×
2261
                chanMsg <- resp
×
2262
        }()
2263

2264
        select {
×
2265
        case <-time.After(DefaultTimeout):
×
2266
                require.Fail(h, "timeout", "timeout intercepting htlc")
×
2267

2268
        case err := <-errChan:
×
2269
                require.Failf(h, "err from HTLC interceptor stream",
×
2270
                        "received err from HTLC interceptor stream: %v", err)
×
2271

2272
        case updateMsg := <-chanMsg:
×
2273
                return updateMsg
×
2274
        }
2275

2276
        return nil
×
2277
}
2278

2279
// ReceiveInvoiceHtlcModification waits until a message is received on the
2280
// invoice HTLC modifier stream or the timeout is reached.
2281
func (h *HarnessTest) ReceiveInvoiceHtlcModification(
2282
        stream rpc.InvoiceHtlcModifierClient) *invoicesrpc.HtlcModifyRequest {
×
2283

×
2284
        chanMsg := make(chan *invoicesrpc.HtlcModifyRequest)
×
2285
        errChan := make(chan error)
×
2286
        go func() {
×
2287
                // Consume one message. This will block until the message is
×
2288
                // received.
×
2289
                resp, err := stream.Recv()
×
2290
                if err != nil {
×
2291
                        errChan <- err
×
2292
                        return
×
2293
                }
×
2294
                chanMsg <- resp
×
2295
        }()
2296

2297
        select {
×
2298
        case <-time.After(DefaultTimeout):
×
2299
                require.Fail(h, "timeout", "timeout invoice HTLC modifier")
×
2300

2301
        case err := <-errChan:
×
2302
                require.Failf(h, "err from invoice HTLC modifier stream",
×
2303
                        "received err from invoice HTLC modifier stream: %v",
×
2304
                        err)
×
2305

2306
        case updateMsg := <-chanMsg:
×
2307
                return updateMsg
×
2308
        }
2309

2310
        return nil
×
2311
}
2312

2313
// ReceiveChannelEvent waits until a message is received from the
2314
// ChannelEventsClient stream or the timeout is reached.
2315
func (h *HarnessTest) ReceiveChannelEvent(
2316
        stream rpc.ChannelEventsClient) *lnrpc.ChannelEventUpdate {
×
2317

×
2318
        chanMsg := make(chan *lnrpc.ChannelEventUpdate)
×
2319
        errChan := make(chan error)
×
2320
        go func() {
×
2321
                // Consume one message. This will block until the message is
×
2322
                // received.
×
2323
                resp, err := stream.Recv()
×
2324
                if err != nil {
×
2325
                        errChan <- err
×
2326
                        return
×
2327
                }
×
2328
                chanMsg <- resp
×
2329
        }()
2330

2331
        select {
×
2332
        case <-time.After(DefaultTimeout):
×
2333
                require.Fail(h, "timeout", "timeout intercepting htlc")
×
2334

2335
        case err := <-errChan:
×
2336
                require.Failf(h, "err from stream",
×
2337
                        "received err from stream: %v", err)
×
2338

2339
        case updateMsg := <-chanMsg:
×
2340
                return updateMsg
×
2341
        }
2342

2343
        return nil
×
2344
}
2345

2346
// GetOutputIndex returns the output index of the given address in the given
2347
// transaction.
2348
func (h *HarnessTest) GetOutputIndex(txid chainhash.Hash, addr string) int {
×
2349
        // We'll then extract the raw transaction from the mempool in order to
×
2350
        // determine the index of the p2tr output.
×
2351
        tx := h.miner.GetRawTransaction(txid)
×
2352

×
2353
        p2trOutputIndex := -1
×
2354
        for i, txOut := range tx.MsgTx().TxOut {
×
2355
                _, addrs, _, err := txscript.ExtractPkScriptAddrs(
×
2356
                        txOut.PkScript, h.miner.ActiveNet,
×
2357
                )
×
2358
                require.NoError(h, err)
×
2359

×
2360
                if addrs[0].String() == addr {
×
2361
                        p2trOutputIndex = i
×
2362
                }
×
2363
        }
2364
        require.Greater(h, p2trOutputIndex, -1)
×
2365

×
2366
        return p2trOutputIndex
×
2367
}
2368

2369
// SendCoins sends a coin from node A to node B with the given amount, returns
2370
// the sending tx.
2371
func (h *HarnessTest) SendCoins(a, b *node.HarnessNode,
2372
        amt btcutil.Amount) *wire.MsgTx {
×
2373

×
2374
        // Create an address for Bob receive the coins.
×
2375
        req := &lnrpc.NewAddressRequest{
×
2376
                Type: lnrpc.AddressType_TAPROOT_PUBKEY,
×
2377
        }
×
2378
        resp := b.RPC.NewAddress(req)
×
2379

×
2380
        // Send the coins from Alice to Bob. We should expect a tx to be
×
2381
        // broadcast and seen in the mempool.
×
2382
        sendReq := &lnrpc.SendCoinsRequest{
×
2383
                Addr:       resp.Address,
×
2384
                Amount:     int64(amt),
×
2385
                TargetConf: 6,
×
2386
        }
×
2387
        a.RPC.SendCoins(sendReq)
×
2388
        tx := h.GetNumTxsFromMempool(1)[0]
×
2389

×
2390
        return tx
×
2391
}
×
2392

2393
// SendCoins sends all coins from node A to node B, returns the sending tx.
2394
func (h *HarnessTest) SendAllCoins(a, b *node.HarnessNode) *wire.MsgTx {
×
2395
        // Create an address for Bob receive the coins.
×
2396
        req := &lnrpc.NewAddressRequest{
×
2397
                Type: lnrpc.AddressType_TAPROOT_PUBKEY,
×
2398
        }
×
2399
        resp := b.RPC.NewAddress(req)
×
2400

×
2401
        // Send the coins from Alice to Bob. We should expect a tx to be
×
2402
        // broadcast and seen in the mempool.
×
2403
        sendReq := &lnrpc.SendCoinsRequest{
×
2404
                Addr:             resp.Address,
×
2405
                TargetConf:       6,
×
2406
                SendAll:          true,
×
2407
                SpendUnconfirmed: true,
×
2408
        }
×
2409
        a.RPC.SendCoins(sendReq)
×
2410
        tx := h.GetNumTxsFromMempool(1)[0]
×
2411

×
2412
        return tx
×
2413
}
×
2414

2415
// CreateSimpleNetwork creates the number of nodes specified by the number of
2416
// configs and makes a topology of `node1 -> node2 -> node3...`. Each node is
2417
// created using the specified config, the neighbors are connected, and the
2418
// channels are opened. Each node will be funded with a single UTXO of 1 BTC
2419
// except the last one.
2420
//
2421
// For instance, to create a network with 2 nodes that share the same node
2422
// config,
2423
//
2424
//        cfg := []string{"--protocol.anchors"}
2425
//        cfgs := [][]string{cfg, cfg}
2426
//        params := OpenChannelParams{...}
2427
//        chanPoints, nodes := ht.CreateSimpleNetwork(cfgs, params)
2428
//
2429
// This will create two nodes and open an anchor channel between them.
2430
func (h *HarnessTest) CreateSimpleNetwork(nodeCfgs [][]string,
2431
        p OpenChannelParams) ([]*lnrpc.ChannelPoint, []*node.HarnessNode) {
×
2432

×
2433
        // Create new nodes.
×
2434
        nodes := h.createNodes(nodeCfgs)
×
2435

×
2436
        var resp []*lnrpc.ChannelPoint
×
2437

×
2438
        // Open zero-conf channels if specified.
×
2439
        if p.ZeroConf {
×
2440
                resp = h.openZeroConfChannelsForNodes(nodes, p)
×
2441
        } else {
×
2442
                // Open channels between the nodes.
×
2443
                resp = h.openChannelsForNodes(nodes, p)
×
2444
        }
×
2445

2446
        return resp, nodes
×
2447
}
2448

2449
// acceptChannel is used to accept a single channel that comes across. This
2450
// should be run in a goroutine and is used to test nodes with the zero-conf
2451
// feature bit.
2452
func acceptChannel(t *testing.T, zeroConf bool, stream rpc.AcceptorClient) {
×
2453
        req, err := stream.Recv()
×
2454
        require.NoError(t, err)
×
2455

×
2456
        resp := &lnrpc.ChannelAcceptResponse{
×
2457
                Accept:        true,
×
2458
                PendingChanId: req.PendingChanId,
×
2459
                ZeroConf:      zeroConf,
×
2460
        }
×
2461
        err = stream.Send(resp)
×
2462
        require.NoError(t, err)
×
2463
}
×
2464

2465
// nodeNames defines a slice of human-reable names for the nodes created in the
2466
// `createNodes` method. 8 nodes are defined here as by default we can only
2467
// create this many nodes in one test.
2468
var nodeNames = []string{
2469
        "Alice", "Bob", "Carol", "Dave", "Eve", "Frank", "Grace", "Heidi",
2470
}
2471

2472
// createNodes creates the number of nodes specified by the number of configs.
2473
// Each node is created using the specified config, the neighbors are
2474
// connected.
2475
func (h *HarnessTest) createNodes(nodeCfgs [][]string) []*node.HarnessNode {
×
2476
        // Get the number of nodes.
×
2477
        numNodes := len(nodeCfgs)
×
2478

×
2479
        // Make sure we are creating a reasonable number of nodes.
×
2480
        require.LessOrEqual(h, numNodes, len(nodeNames), "too many nodes")
×
2481

×
2482
        // Make a slice of nodes.
×
2483
        nodes := make([]*node.HarnessNode, numNodes)
×
2484

×
2485
        // Create new nodes.
×
2486
        for i, nodeCfg := range nodeCfgs {
×
2487
                nodeName := nodeNames[i]
×
2488
                n := h.NewNode(nodeName, nodeCfg)
×
2489
                nodes[i] = n
×
2490
        }
×
2491

2492
        // Connect the nodes in a chain.
2493
        for i := 1; i < len(nodes); i++ {
×
2494
                nodeA := nodes[i-1]
×
2495
                nodeB := nodes[i]
×
2496
                h.EnsureConnected(nodeA, nodeB)
×
2497
        }
×
2498

2499
        // Fund all the nodes expect the last one.
2500
        for i := 0; i < len(nodes)-1; i++ {
×
2501
                node := nodes[i]
×
2502
                h.FundCoinsUnconfirmed(btcutil.SatoshiPerBitcoin, node)
×
2503
        }
×
2504

2505
        // Mine 1 block to get the above coins confirmed.
2506
        h.MineBlocksAndAssertNumTxes(1, numNodes-1)
×
2507

×
2508
        return nodes
×
2509
}
2510

2511
// openChannelsForNodes takes a list of nodes and makes a topology of `node1 ->
2512
// node2 -> node3...`.
2513
func (h *HarnessTest) openChannelsForNodes(nodes []*node.HarnessNode,
2514
        p OpenChannelParams) []*lnrpc.ChannelPoint {
×
2515

×
2516
        // Sanity check the params.
×
2517
        require.Greater(h, len(nodes), 1, "need at least 2 nodes")
×
2518

×
2519
        // attachFundingShim is a helper closure that optionally attaches a
×
2520
        // funding shim to the open channel params and returns it.
×
2521
        attachFundingShim := func(
×
2522
                nodeA, nodeB *node.HarnessNode) OpenChannelParams {
×
2523

×
2524
                // If this channel is not a script enforced lease channel,
×
2525
                // we'll do nothing and return the params.
×
2526
                leasedType := lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE
×
2527
                if p.CommitmentType != leasedType {
×
2528
                        return p
×
2529
                }
×
2530

2531
                // Otherwise derive the funding shim, attach it to the original
2532
                // open channel params and return it.
2533
                minerHeight := h.CurrentHeight()
×
2534
                thawHeight := minerHeight + thawHeightDelta
×
2535
                fundingShim, _ := h.DeriveFundingShim(
×
2536
                        nodeA, nodeB, p.Amt, thawHeight, true, leasedType,
×
2537
                )
×
2538

×
2539
                p.FundingShim = fundingShim
×
2540

×
2541
                return p
×
2542
        }
2543

2544
        // Open channels in batch to save blocks mined.
2545
        reqs := make([]*OpenChannelRequest, 0, len(nodes)-1)
×
2546
        for i := 0; i < len(nodes)-1; i++ {
×
2547
                nodeA := nodes[i]
×
2548
                nodeB := nodes[i+1]
×
2549

×
2550
                // Optionally attach a funding shim to the open channel params.
×
2551
                p = attachFundingShim(nodeA, nodeB)
×
2552

×
2553
                req := &OpenChannelRequest{
×
2554
                        Local:  nodeA,
×
2555
                        Remote: nodeB,
×
2556
                        Param:  p,
×
2557
                }
×
2558
                reqs = append(reqs, req)
×
2559
        }
×
2560
        resp := h.OpenMultiChannelsAsync(reqs)
×
2561

×
2562
        // If the channels are private, make sure the channel participants know
×
2563
        // the relevant channels.
×
2564
        if p.Private {
×
2565
                for i, chanPoint := range resp {
×
2566
                        // Get the channel participants - for n channels we
×
2567
                        // would have n+1 nodes.
×
2568
                        nodeA, nodeB := nodes[i], nodes[i+1]
×
2569
                        h.AssertChannelInGraph(nodeA, chanPoint)
×
2570
                        h.AssertChannelInGraph(nodeB, chanPoint)
×
2571
                }
×
2572
        } else {
×
2573
                // Make sure the all nodes know all the channels if they are
×
2574
                // public.
×
2575
                for _, node := range nodes {
×
2576
                        for _, chanPoint := range resp {
×
2577
                                h.AssertChannelInGraph(node, chanPoint)
×
2578
                        }
×
2579

2580
                        // Make sure every node has updated its cached graph
2581
                        // about the edges as indicated in `DescribeGraph`.
2582
                        h.AssertNumEdges(node, len(resp), false)
×
2583
                }
2584
        }
2585

2586
        return resp
×
2587
}
2588

2589
// openZeroConfChannelsForNodes takes a list of nodes and makes a topology of
2590
// `node1 -> node2 -> node3...` with zero-conf channels.
2591
func (h *HarnessTest) openZeroConfChannelsForNodes(nodes []*node.HarnessNode,
2592
        p OpenChannelParams) []*lnrpc.ChannelPoint {
×
2593

×
2594
        // Sanity check the params.
×
2595
        require.True(h, p.ZeroConf, "zero-conf channels must be enabled")
×
2596
        require.Greater(h, len(nodes), 1, "need at least 2 nodes")
×
2597

×
2598
        // We are opening numNodes-1 channels.
×
2599
        cancels := make([]context.CancelFunc, 0, len(nodes)-1)
×
2600

×
2601
        // Create the channel acceptors.
×
2602
        for _, node := range nodes[1:] {
×
2603
                acceptor, cancel := node.RPC.ChannelAcceptor()
×
2604
                go acceptChannel(h.T, true, acceptor)
×
2605

×
2606
                cancels = append(cancels, cancel)
×
2607
        }
×
2608

2609
        // Open channels between the nodes.
2610
        resp := h.openChannelsForNodes(nodes, p)
×
2611

×
2612
        for _, cancel := range cancels {
×
2613
                cancel()
×
2614
        }
×
2615

2616
        return resp
×
2617
}
2618

2619
// DeriveFundingShim creates a channel funding shim by deriving the necessary
2620
// keys on both sides.
2621
func (h *HarnessTest) DeriveFundingShim(alice, bob *node.HarnessNode,
2622
        chanSize btcutil.Amount, thawHeight uint32, publish bool,
2623
        commitType lnrpc.CommitmentType) (*lnrpc.FundingShim,
2624
        *lnrpc.ChannelPoint) {
×
2625

×
2626
        keyLoc := &signrpc.KeyLocator{KeyFamily: 9999}
×
2627
        carolFundingKey := alice.RPC.DeriveKey(keyLoc)
×
2628
        daveFundingKey := bob.RPC.DeriveKey(keyLoc)
×
2629

×
2630
        // Now that we have the multi-sig keys for each party, we can manually
×
2631
        // construct the funding transaction. We'll instruct the backend to
×
2632
        // immediately create and broadcast a transaction paying out an exact
×
2633
        // amount. Normally this would reside in the mempool, but we just
×
2634
        // confirm it now for simplicity.
×
2635
        var (
×
2636
                fundingOutput *wire.TxOut
×
2637
                musig2        bool
×
2638
                err           error
×
2639
        )
×
2640

×
2641
        if commitType == lnrpc.CommitmentType_SIMPLE_TAPROOT ||
×
2642
                commitType == lnrpc.CommitmentType_SIMPLE_TAPROOT_OVERLAY {
×
2643

×
2644
                var carolKey, daveKey *btcec.PublicKey
×
2645
                carolKey, err = btcec.ParsePubKey(carolFundingKey.RawKeyBytes)
×
2646
                require.NoError(h, err)
×
2647
                daveKey, err = btcec.ParsePubKey(daveFundingKey.RawKeyBytes)
×
2648
                require.NoError(h, err)
×
2649

×
2650
                _, fundingOutput, err = input.GenTaprootFundingScript(
×
2651
                        carolKey, daveKey, int64(chanSize),
×
2652
                        fn.None[chainhash.Hash](),
×
2653
                )
×
2654
                require.NoError(h, err)
×
2655

×
2656
                musig2 = true
×
2657
        } else {
×
2658
                _, fundingOutput, err = input.GenFundingPkScript(
×
2659
                        carolFundingKey.RawKeyBytes, daveFundingKey.RawKeyBytes,
×
2660
                        int64(chanSize),
×
2661
                )
×
2662
                require.NoError(h, err)
×
2663
        }
×
2664

2665
        var txid *chainhash.Hash
×
2666
        targetOutputs := []*wire.TxOut{fundingOutput}
×
2667
        if publish {
×
2668
                txid = h.SendOutputsWithoutChange(targetOutputs, 5)
×
2669
        } else {
×
2670
                tx := h.CreateTransaction(targetOutputs, 5)
×
2671

×
2672
                txHash := tx.TxHash()
×
2673
                txid = &txHash
×
2674
        }
×
2675

2676
        // At this point, we can being our external channel funding workflow.
2677
        // We'll start by generating a pending channel ID externally that will
2678
        // be used to track this new funding type.
2679
        pendingChanID := h.Random32Bytes()
×
2680

×
2681
        // Now that we have the pending channel ID, Dave (our responder) will
×
2682
        // register the intent to receive a new channel funding workflow using
×
2683
        // the pending channel ID.
×
2684
        chanPoint := &lnrpc.ChannelPoint{
×
2685
                FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
×
2686
                        FundingTxidBytes: txid[:],
×
2687
                },
×
2688
        }
×
2689
        chanPointShim := &lnrpc.ChanPointShim{
×
2690
                Amt:       int64(chanSize),
×
2691
                ChanPoint: chanPoint,
×
2692
                LocalKey: &lnrpc.KeyDescriptor{
×
2693
                        RawKeyBytes: daveFundingKey.RawKeyBytes,
×
2694
                        KeyLoc: &lnrpc.KeyLocator{
×
2695
                                KeyFamily: daveFundingKey.KeyLoc.KeyFamily,
×
2696
                                KeyIndex:  daveFundingKey.KeyLoc.KeyIndex,
×
2697
                        },
×
2698
                },
×
2699
                RemoteKey:     carolFundingKey.RawKeyBytes,
×
2700
                PendingChanId: pendingChanID,
×
2701
                ThawHeight:    thawHeight,
×
2702
                Musig2:        musig2,
×
2703
        }
×
2704
        fundingShim := &lnrpc.FundingShim{
×
2705
                Shim: &lnrpc.FundingShim_ChanPointShim{
×
2706
                        ChanPointShim: chanPointShim,
×
2707
                },
×
2708
        }
×
2709
        bob.RPC.FundingStateStep(&lnrpc.FundingTransitionMsg{
×
2710
                Trigger: &lnrpc.FundingTransitionMsg_ShimRegister{
×
2711
                        ShimRegister: fundingShim,
×
2712
                },
×
2713
        })
×
2714

×
2715
        // If we attempt to register the same shim (has the same pending chan
×
2716
        // ID), then we should get an error.
×
2717
        bob.RPC.FundingStateStepAssertErr(&lnrpc.FundingTransitionMsg{
×
2718
                Trigger: &lnrpc.FundingTransitionMsg_ShimRegister{
×
2719
                        ShimRegister: fundingShim,
×
2720
                },
×
2721
        })
×
2722

×
2723
        // We'll take the chan point shim we just registered for Dave (the
×
2724
        // responder), and swap the local/remote keys before we feed it in as
×
2725
        // Carol's funding shim as the initiator.
×
2726
        fundingShim.GetChanPointShim().LocalKey = &lnrpc.KeyDescriptor{
×
2727
                RawKeyBytes: carolFundingKey.RawKeyBytes,
×
2728
                KeyLoc: &lnrpc.KeyLocator{
×
2729
                        KeyFamily: carolFundingKey.KeyLoc.KeyFamily,
×
2730
                        KeyIndex:  carolFundingKey.KeyLoc.KeyIndex,
×
2731
                },
×
2732
        }
×
2733
        fundingShim.GetChanPointShim().RemoteKey = daveFundingKey.RawKeyBytes
×
2734

×
2735
        return fundingShim, chanPoint
×
2736
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc