• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 12379012817

17 Dec 2024 06:18PM UTC coverage: 57.508% (+0.02%) from 57.489%
12379012817

Pull #9307

github

yyforyongyu
itest: document a flake found in `SendToRoute`
Pull Request #9307: Beat itest [2/3]: document and fix itest flakes

1 of 34 new or added lines in 5 files covered. (2.94%)

24 existing lines in 4 files now uncovered.

102408 of 178075 relevant lines covered (57.51%)

24802.2 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/lntest/harness.go
1
package lntest
2

3
import (
4
        "context"
5
        "encoding/hex"
6
        "fmt"
7
        "strings"
8
        "testing"
9
        "time"
10

11
        "github.com/btcsuite/btcd/blockchain"
12
        "github.com/btcsuite/btcd/btcec/v2"
13
        "github.com/btcsuite/btcd/btcutil"
14
        "github.com/btcsuite/btcd/chaincfg/chainhash"
15
        "github.com/btcsuite/btcd/txscript"
16
        "github.com/btcsuite/btcd/wire"
17
        "github.com/go-errors/errors"
18
        "github.com/lightningnetwork/lnd/fn/v2"
19
        "github.com/lightningnetwork/lnd/input"
20
        "github.com/lightningnetwork/lnd/kvdb/etcd"
21
        "github.com/lightningnetwork/lnd/lnrpc"
22
        "github.com/lightningnetwork/lnd/lnrpc/invoicesrpc"
23
        "github.com/lightningnetwork/lnd/lnrpc/routerrpc"
24
        "github.com/lightningnetwork/lnd/lnrpc/signrpc"
25
        "github.com/lightningnetwork/lnd/lnrpc/walletrpc"
26
        "github.com/lightningnetwork/lnd/lntest/miner"
27
        "github.com/lightningnetwork/lnd/lntest/node"
28
        "github.com/lightningnetwork/lnd/lntest/rpc"
29
        "github.com/lightningnetwork/lnd/lntest/wait"
30
        "github.com/lightningnetwork/lnd/lntypes"
31
        "github.com/lightningnetwork/lnd/lnwallet/chainfee"
32
        "github.com/lightningnetwork/lnd/lnwire"
33
        "github.com/lightningnetwork/lnd/routing"
34
        "github.com/stretchr/testify/require"
35
)
36

37
const (
38
        // defaultMinerFeeRate specifies the fee rate in sats when sending
39
        // outputs from the miner.
40
        defaultMinerFeeRate = 7500
41

42
        // numBlocksSendOutput specifies the number of blocks to mine after
43
        // sending outputs from the miner.
44
        numBlocksSendOutput = 2
45

46
        // numBlocksOpenChannel specifies the number of blocks mined when
47
        // opening a channel.
48
        numBlocksOpenChannel = 6
49

50
        // lndErrorChanSize specifies the buffer size used to receive errors
51
        // from lnd process.
52
        lndErrorChanSize = 10
53

54
        // maxBlocksAllowed specifies the max allowed value to be used when
55
        // mining blocks.
56
        maxBlocksAllowed = 100
57

58
        finalCltvDelta  = routing.MinCLTVDelta // 18.
59
        thawHeightDelta = finalCltvDelta * 2   // 36.
60
)
61

62
// TestCase defines a test case that's been used in the integration test.
63
type TestCase struct {
64
        // Name specifies the test name.
65
        Name string
66

67
        // TestFunc is the test case wrapped in a function.
68
        TestFunc func(t *HarnessTest)
69
}
70

71
// HarnessTest builds on top of a testing.T with enhanced error detection. It
72
// is responsible for managing the interactions among different nodes, and
73
// providing easy-to-use assertions.
74
type HarnessTest struct {
75
        *testing.T
76

77
        // miner is a reference to a running full node that can be used to
78
        // create new blocks on the network.
79
        miner *miner.HarnessMiner
80

81
        // manager handles the start and stop of a given node.
82
        manager *nodeManager
83

84
        // feeService is a web service that provides external fee estimates to
85
        // lnd.
86
        feeService WebFeeService
87

88
        // Channel for transmitting stderr output from failed lightning node
89
        // to main process.
90
        lndErrorChan chan error
91

92
        // runCtx is a context with cancel method. It's used to signal when the
93
        // node needs to quit, and used as the parent context when spawning
94
        // children contexts for RPC requests.
95
        runCtx context.Context //nolint:containedctx
96
        cancel context.CancelFunc
97

98
        // stopChainBackend points to the cleanup function returned by the
99
        // chainBackend.
100
        stopChainBackend func()
101

102
        // cleaned specifies whether the cleanup has been applied for the
103
        // current HarnessTest.
104
        cleaned bool
105

106
        // currentHeight is the current height of the chain backend.
107
        currentHeight uint32
108
}
109

110
// harnessOpts contains functional option to modify the behavior of the various
111
// harness calls.
112
type harnessOpts struct {
113
        useAMP bool
114
}
115

116
// defaultHarnessOpts returns a new instance of the harnessOpts with default
117
// values specified.
118
func defaultHarnessOpts() harnessOpts {
×
119
        return harnessOpts{
×
120
                useAMP: false,
×
121
        }
×
122
}
×
123

124
// HarnessOpt is a functional option that can be used to modify the behavior of
125
// harness functionality.
126
type HarnessOpt func(*harnessOpts)
127

128
// WithAMP is a functional option that can be used to enable the AMP feature
129
// for sending payments.
130
func WithAMP() HarnessOpt {
×
131
        return func(h *harnessOpts) {
×
132
                h.useAMP = true
×
133
        }
×
134
}
135

136
// NewHarnessTest creates a new instance of a harnessTest from a regular
137
// testing.T instance.
138
func NewHarnessTest(t *testing.T, lndBinary string, feeService WebFeeService,
139
        dbBackend node.DatabaseBackend, nativeSQL bool) *HarnessTest {
×
140

×
141
        t.Helper()
×
142

×
143
        // Create the run context.
×
144
        ctxt, cancel := context.WithCancel(context.Background())
×
145

×
146
        manager := newNodeManager(lndBinary, dbBackend, nativeSQL)
×
147

×
148
        return &HarnessTest{
×
149
                T:          t,
×
150
                manager:    manager,
×
151
                feeService: feeService,
×
152
                runCtx:     ctxt,
×
153
                cancel:     cancel,
×
154
                // We need to use buffered channel here as we don't want to
×
155
                // block sending errors.
×
156
                lndErrorChan: make(chan error, lndErrorChanSize),
×
157
        }
×
158
}
×
159

160
// Start will assemble the chain backend and the miner for the HarnessTest. It
161
// also starts the fee service and watches lnd process error.
162
func (h *HarnessTest) Start(chain node.BackendConfig,
163
        miner *miner.HarnessMiner) {
×
164

×
165
        // Spawn a new goroutine to watch for any fatal errors that any of the
×
166
        // running lnd processes encounter. If an error occurs, then the test
×
167
        // case should naturally as a result and we log the server error here
×
168
        // to help debug.
×
169
        go func() {
×
170
                select {
×
171
                case err, more := <-h.lndErrorChan:
×
172
                        if !more {
×
173
                                return
×
174
                        }
×
175
                        h.Logf("lnd finished with error (stderr):\n%v", err)
×
176

177
                case <-h.runCtx.Done():
×
178
                        return
×
179
                }
180
        }()
181

182
        // Start the fee service.
183
        err := h.feeService.Start()
×
184
        require.NoError(h, err, "failed to start fee service")
×
185

×
186
        // Assemble the node manager with chainBackend and feeServiceURL.
×
187
        h.manager.chainBackend = chain
×
188
        h.manager.feeServiceURL = h.feeService.URL()
×
189

×
190
        // Assemble the miner.
×
191
        h.miner = miner
×
192

×
193
        // Update block height.
×
194
        h.updateCurrentHeight()
×
195
}
196

197
// ChainBackendName returns the chain backend name used in the test.
198
func (h *HarnessTest) ChainBackendName() string {
×
199
        return h.manager.chainBackend.Name()
×
200
}
×
201

202
// Context returns the run context used in this test. Usaually it should be
203
// managed by the test itself otherwise undefined behaviors will occur. It can
204
// be used, however, when a test needs to have its own context being managed
205
// differently. In that case, instead of using a background context, the run
206
// context should be used such that the test context scope can be fully
207
// controlled.
208
func (h *HarnessTest) Context() context.Context {
×
209
        return h.runCtx
×
210
}
×
211

212
// setupWatchOnlyNode initializes a node with the watch-only accounts of an
213
// associated remote signing instance.
214
func (h *HarnessTest) setupWatchOnlyNode(name string,
215
        signerNode *node.HarnessNode, password []byte) *node.HarnessNode {
×
216

×
217
        // Prepare arguments for watch-only node connected to the remote signer.
×
218
        remoteSignerArgs := []string{
×
219
                "--remotesigner.enable",
×
220
                fmt.Sprintf("--remotesigner.rpchost=localhost:%d",
×
221
                        signerNode.Cfg.RPCPort),
×
222
                fmt.Sprintf("--remotesigner.tlscertpath=%s",
×
223
                        signerNode.Cfg.TLSCertPath),
×
224
                fmt.Sprintf("--remotesigner.macaroonpath=%s",
×
225
                        signerNode.Cfg.AdminMacPath),
×
226
        }
×
227

×
228
        // Fetch watch-only accounts from the signer node.
×
229
        resp := signerNode.RPC.ListAccounts(&walletrpc.ListAccountsRequest{})
×
230
        watchOnlyAccounts, err := walletrpc.AccountsToWatchOnly(resp.Accounts)
×
231
        require.NoErrorf(h, err, "unable to find watch only accounts for %s",
×
232
                name)
×
233

×
234
        // Create a new watch-only node with remote signer configuration.
×
235
        return h.NewNodeRemoteSigner(
×
236
                name, remoteSignerArgs, password,
×
237
                &lnrpc.WatchOnly{
×
238
                        MasterKeyBirthdayTimestamp: 0,
×
239
                        MasterKeyFingerprint:       nil,
×
240
                        Accounts:                   watchOnlyAccounts,
×
241
                },
×
242
        )
×
243
}
×
244

245
// createAndSendOutput send amt satoshis from the internal mining node to the
246
// targeted lightning node using a P2WKH address. No blocks are mined so
247
// transactions will sit unconfirmed in mempool.
248
func (h *HarnessTest) createAndSendOutput(target *node.HarnessNode,
249
        amt btcutil.Amount, addrType lnrpc.AddressType) {
×
250

×
251
        req := &lnrpc.NewAddressRequest{Type: addrType}
×
252
        resp := target.RPC.NewAddress(req)
×
253
        addr := h.DecodeAddress(resp.Address)
×
254
        addrScript := h.PayToAddrScript(addr)
×
255

×
256
        output := &wire.TxOut{
×
257
                PkScript: addrScript,
×
258
                Value:    int64(amt),
×
259
        }
×
260
        h.miner.SendOutput(output, defaultMinerFeeRate)
×
261
}
×
262

263
// Stop stops the test harness.
264
func (h *HarnessTest) Stop() {
×
265
        // Do nothing if it's not started.
×
266
        if h.runCtx == nil {
×
267
                h.Log("HarnessTest is not started")
×
268
                return
×
269
        }
×
270

271
        h.shutdownAllNodes()
×
272

×
273
        close(h.lndErrorChan)
×
274

×
275
        // Stop the fee service.
×
276
        err := h.feeService.Stop()
×
277
        require.NoError(h, err, "failed to stop fee service")
×
278

×
279
        // Stop the chainBackend.
×
280
        h.stopChainBackend()
×
281

×
282
        // Stop the miner.
×
283
        h.miner.Stop()
×
284
}
285

286
// RunTestCase executes a harness test case. Any errors or panics will be
287
// represented as fatal.
288
func (h *HarnessTest) RunTestCase(testCase *TestCase) {
×
289
        defer func() {
×
290
                if err := recover(); err != nil {
×
291
                        description := errors.Wrap(err, 2).ErrorStack()
×
292
                        h.Fatalf("Failed: (%v) panic with: \n%v",
×
293
                                testCase.Name, description)
×
294
                }
×
295
        }()
296

297
        testCase.TestFunc(h)
×
298
}
299

300
// Subtest creates a child HarnessTest, which inherits the harness net and
301
// stand by nodes created by the parent test. It will return a cleanup function
302
// which resets  all the standby nodes' configs back to its original state and
303
// create snapshots of each nodes' internal state.
304
func (h *HarnessTest) Subtest(t *testing.T) *HarnessTest {
×
305
        t.Helper()
×
306

×
307
        st := &HarnessTest{
×
308
                T:            t,
×
309
                manager:      h.manager,
×
310
                miner:        h.miner,
×
311
                feeService:   h.feeService,
×
312
                lndErrorChan: make(chan error, lndErrorChanSize),
×
313
        }
×
314

×
315
        // Inherit context from the main test.
×
316
        st.runCtx, st.cancel = context.WithCancel(h.runCtx)
×
317

×
318
        // Inherit the subtest for the miner.
×
319
        st.miner.T = st.T
×
320

×
321
        // Reset fee estimator.
×
322
        st.feeService.Reset()
×
323

×
324
        // Record block height.
×
325
        h.updateCurrentHeight()
×
326
        startHeight := int32(h.CurrentHeight())
×
327

×
328
        st.Cleanup(func() {
×
329
                _, endHeight := h.GetBestBlock()
×
330

×
331
                st.Logf("finished test: %s, start height=%d, end height=%d, "+
×
332
                        "mined blocks=%d", st.manager.currentTestCase,
×
333
                        startHeight, endHeight, endHeight-startHeight)
×
334

×
335
                // Don't bother run the cleanups if the test is failed.
×
336
                if st.Failed() {
×
337
                        st.Log("test failed, skipped cleanup")
×
338
                        st.shutdownAllNodes()
×
339
                        return
×
340
                }
×
341

342
                // Don't run cleanup if it's already done. This can happen if
343
                // we have multiple level inheritance of the parent harness
344
                // test. For instance, a `Subtest(st)`.
345
                if st.cleaned {
×
346
                        st.Log("test already cleaned, skipped cleanup")
×
347
                        return
×
348
                }
×
349

350
                // If found running nodes, shut them down.
351
                st.shutdownAllNodes()
×
352

×
353
                // We require the mempool to be cleaned from the test.
×
354
                require.Empty(st, st.miner.GetRawMempool(), "mempool not "+
×
355
                        "cleaned, please mine blocks to clean them all.")
×
356

×
357
                // Finally, cancel the run context. We have to do it here
×
358
                // because we need to keep the context alive for the above
×
359
                // assertions used in cleanup.
×
360
                st.cancel()
×
361

×
362
                // We now want to mark the parent harness as cleaned to avoid
×
363
                // running cleanup again since its internal state has been
×
364
                // cleaned up by its child harness tests.
×
365
                h.cleaned = true
×
366
        })
367

368
        return st
×
369
}
370

371
// shutdownAllNodes will shutdown all running nodes.
372
func (h *HarnessTest) shutdownAllNodes() {
×
373
        for _, node := range h.manager.activeNodes {
×
374
                // The process may not be in a state to always shutdown
×
375
                // immediately, so we'll retry up to a hard limit to ensure we
×
376
                // eventually shutdown.
×
377
                err := wait.NoError(func() error {
×
378
                        return h.manager.shutdownNode(node)
×
379
                }, DefaultTimeout)
×
380

381
                if err == nil {
×
382
                        continue
×
383
                }
384

385
                // Instead of returning the error, we will log it instead. This
386
                // is needed so other nodes can continue their shutdown
387
                // processes.
388
                h.Logf("unable to shutdown %s, got err: %v", node.Name(), err)
×
389
        }
390
}
391

392
// cleanupStandbyNode is a function should be called with defer whenever a
393
// subtest is created. It will reset the standby nodes configs, snapshot the
394
// states, and validate the node has a clean state.
395
func (h *HarnessTest) cleanupStandbyNode(hn *node.HarnessNode) {
×
396
        // Remove connections made from this test.
×
397
        h.removeConnectionns(hn)
×
398

×
399
        // Delete all payments made from this test.
×
400
        hn.RPC.DeleteAllPayments()
×
401

×
402
        // Check the node's current state with timeout.
×
403
        //
×
404
        // NOTE: we need to do this in a `wait` because it takes some time for
×
405
        // the node to update its internal state. Once the RPCs are synced we
×
406
        // can then remove this wait.
×
407
        err := wait.NoError(func() error {
×
408
                // Update the node's internal state.
×
409
                hn.UpdateState()
×
410

×
411
                // Check the node is in a clean state for the following tests.
×
412
                return h.validateNodeState(hn)
×
413
        }, wait.DefaultTimeout)
×
414
        require.NoError(h, err, "timeout checking node's state")
×
415
}
416

417
// removeConnectionns will remove all connections made on the standby nodes
418
// expect the connections between Alice and Bob.
419
func (h *HarnessTest) removeConnectionns(hn *node.HarnessNode) {
×
420
        resp := hn.RPC.ListPeers()
×
421
        for _, peer := range resp.Peers {
×
422
                hn.RPC.DisconnectPeer(peer.PubKey)
×
423
        }
×
424
}
425

426
// SetTestName set the test case name.
427
func (h *HarnessTest) SetTestName(name string) {
×
428
        cleanTestCaseName := strings.ReplaceAll(name, " ", "_")
×
429
        h.manager.currentTestCase = cleanTestCaseName
×
430
}
×
431

432
// NewNode creates a new node and asserts its creation. The node is guaranteed
433
// to have finished its initialization and all its subservers are started.
434
func (h *HarnessTest) NewNode(name string,
435
        extraArgs []string) *node.HarnessNode {
×
436

×
437
        node, err := h.manager.newNode(h.T, name, extraArgs, nil, false)
×
438
        require.NoErrorf(h, err, "unable to create new node for %s", name)
×
439

×
440
        // Start the node.
×
441
        err = node.Start(h.runCtx)
×
442
        require.NoError(h, err, "failed to start node %s", node.Name())
×
443

×
444
        return node
×
445
}
×
446

447
// NewNodeWithCoins creates a new node and asserts its creation. The node is
448
// guaranteed to have finished its initialization and all its subservers are
449
// started. In addition, 5 UTXO of 1 BTC each are sent to the node.
450
func (h *HarnessTest) NewNodeWithCoins(name string,
451
        extraArgs []string) *node.HarnessNode {
×
452

×
453
        node, err := h.manager.newNode(h.T, name, extraArgs, nil, false)
×
454
        require.NoErrorf(h, err, "unable to create new node for %s", name)
×
455

×
456
        // Start the node.
×
457
        err = node.Start(h.runCtx)
×
458
        require.NoError(h, err, "failed to start node %s", node.Name())
×
459

×
460
        // Load up the wallets of the node with 5 outputs of 1 BTC each.
×
461
        const (
×
462
                numOutputs  = 5
×
463
                fundAmount  = 1 * btcutil.SatoshiPerBitcoin
×
464
                totalAmount = fundAmount * numOutputs
×
465
        )
×
466

×
467
        for i := 0; i < numOutputs; i++ {
×
468
                h.createAndSendOutput(
×
469
                        node, fundAmount,
×
470
                        lnrpc.AddressType_WITNESS_PUBKEY_HASH,
×
471
                )
×
472
        }
×
473

474
        // Mine a block to confirm the transactions.
475
        h.MineBlocksAndAssertNumTxes(1, numOutputs)
×
476

×
477
        // Now block until the wallet have fully synced up.
×
478
        h.WaitForBalanceConfirmed(node, totalAmount)
×
479

×
480
        return node
×
481
}
482

483
// Shutdown shuts down the given node and asserts that no errors occur.
484
func (h *HarnessTest) Shutdown(node *node.HarnessNode) {
×
485
        // The process may not be in a state to always shutdown immediately, so
×
486
        // we'll retry up to a hard limit to ensure we eventually shutdown.
×
487
        err := wait.NoError(func() error {
×
488
                return h.manager.shutdownNode(node)
×
489
        }, DefaultTimeout)
×
490

491
        require.NoErrorf(h, err, "unable to shutdown %v in %v", node.Name(),
×
492
                h.manager.currentTestCase)
×
493
}
494

495
// SuspendNode stops the given node and returns a callback that can be used to
496
// start it again.
497
func (h *HarnessTest) SuspendNode(node *node.HarnessNode) func() error {
×
498
        err := node.Stop()
×
499
        require.NoErrorf(h, err, "failed to stop %s", node.Name())
×
500

×
501
        // Remove the node from active nodes.
×
502
        delete(h.manager.activeNodes, node.Cfg.NodeID)
×
503

×
504
        return func() error {
×
505
                h.manager.registerNode(node)
×
506

×
507
                if err := node.Start(h.runCtx); err != nil {
×
508
                        return err
×
509
                }
×
510
                h.WaitForBlockchainSync(node)
×
511

×
512
                return nil
×
513
        }
514
}
515

516
// RestartNode restarts a given node, unlocks it and asserts it's successfully
517
// started.
518
func (h *HarnessTest) RestartNode(hn *node.HarnessNode) {
×
519
        err := h.manager.restartNode(h.runCtx, hn, nil)
×
520
        require.NoErrorf(h, err, "failed to restart node %s", hn.Name())
×
521

×
522
        err = h.manager.unlockNode(hn)
×
523
        require.NoErrorf(h, err, "failed to unlock node %s", hn.Name())
×
524

×
525
        if !hn.Cfg.SkipUnlock {
×
526
                // Give the node some time to catch up with the chain before we
×
527
                // continue with the tests.
×
528
                h.WaitForBlockchainSync(hn)
×
529
        }
×
530
}
531

532
// RestartNodeNoUnlock restarts a given node without unlocking its wallet.
533
func (h *HarnessTest) RestartNodeNoUnlock(hn *node.HarnessNode) {
×
534
        err := h.manager.restartNode(h.runCtx, hn, nil)
×
535
        require.NoErrorf(h, err, "failed to restart node %s", hn.Name())
×
536
}
×
537

538
// RestartNodeWithChanBackups restarts a given node with the specified channel
539
// backups.
540
func (h *HarnessTest) RestartNodeWithChanBackups(hn *node.HarnessNode,
541
        chanBackups ...*lnrpc.ChanBackupSnapshot) {
×
542

×
543
        err := h.manager.restartNode(h.runCtx, hn, nil)
×
544
        require.NoErrorf(h, err, "failed to restart node %s", hn.Name())
×
545

×
546
        err = h.manager.unlockNode(hn, chanBackups...)
×
547
        require.NoErrorf(h, err, "failed to unlock node %s", hn.Name())
×
548

×
549
        // Give the node some time to catch up with the chain before we
×
550
        // continue with the tests.
×
551
        h.WaitForBlockchainSync(hn)
×
552
}
×
553

554
// RestartNodeWithExtraArgs updates the node's config and restarts it.
555
func (h *HarnessTest) RestartNodeWithExtraArgs(hn *node.HarnessNode,
556
        extraArgs []string) {
×
557

×
558
        hn.SetExtraArgs(extraArgs)
×
559
        h.RestartNode(hn)
×
560
}
×
561

562
// NewNodeWithSeed fully initializes a new HarnessNode after creating a fresh
563
// aezeed. The provided password is used as both the aezeed password and the
564
// wallet password. The generated mnemonic is returned along with the
565
// initialized harness node.
566
func (h *HarnessTest) NewNodeWithSeed(name string,
567
        extraArgs []string, password []byte,
568
        statelessInit bool) (*node.HarnessNode, []string, []byte) {
×
569

×
570
        // Create a request to generate a new aezeed. The new seed will have
×
571
        // the same password as the internal wallet.
×
572
        req := &lnrpc.GenSeedRequest{
×
573
                AezeedPassphrase: password,
×
574
                SeedEntropy:      nil,
×
575
        }
×
576

×
577
        return h.newNodeWithSeed(name, extraArgs, req, statelessInit)
×
578
}
×
579

580
// newNodeWithSeed creates and initializes a new HarnessNode such that it'll be
581
// ready to accept RPC calls. A `GenSeedRequest` is needed to generate the
582
// seed.
583
func (h *HarnessTest) newNodeWithSeed(name string,
584
        extraArgs []string, req *lnrpc.GenSeedRequest,
585
        statelessInit bool) (*node.HarnessNode, []string, []byte) {
×
586

×
587
        node, err := h.manager.newNode(
×
588
                h.T, name, extraArgs, req.AezeedPassphrase, true,
×
589
        )
×
590
        require.NoErrorf(h, err, "unable to create new node for %s", name)
×
591

×
592
        // Start the node with seed only, which will only create the `State`
×
593
        // and `WalletUnlocker` clients.
×
594
        err = node.StartWithNoAuth(h.runCtx)
×
595
        require.NoErrorf(h, err, "failed to start node %s", node.Name())
×
596

×
597
        // Generate a new seed.
×
598
        genSeedResp := node.RPC.GenSeed(req)
×
599

×
600
        // With the seed created, construct the init request to the node,
×
601
        // including the newly generated seed.
×
602
        initReq := &lnrpc.InitWalletRequest{
×
603
                WalletPassword:     req.AezeedPassphrase,
×
604
                CipherSeedMnemonic: genSeedResp.CipherSeedMnemonic,
×
605
                AezeedPassphrase:   req.AezeedPassphrase,
×
606
                StatelessInit:      statelessInit,
×
607
        }
×
608

×
609
        // Pass the init request via rpc to finish unlocking the node. This
×
610
        // will also initialize the macaroon-authenticated LightningClient.
×
611
        adminMac, err := h.manager.initWalletAndNode(node, initReq)
×
612
        require.NoErrorf(h, err, "failed to unlock and init node %s",
×
613
                node.Name())
×
614

×
615
        // In stateless initialization mode we get a macaroon back that we have
×
616
        // to return to the test, otherwise gRPC calls won't be possible since
×
617
        // there are no macaroon files created in that mode.
×
618
        // In stateful init the admin macaroon will just be nil.
×
619
        return node, genSeedResp.CipherSeedMnemonic, adminMac
×
620
}
×
621

622
// RestoreNodeWithSeed fully initializes a HarnessNode using a chosen mnemonic,
623
// password, recovery window, and optionally a set of static channel backups.
624
// After providing the initialization request to unlock the node, this method
625
// will finish initializing the LightningClient such that the HarnessNode can
626
// be used for regular rpc operations.
627
func (h *HarnessTest) RestoreNodeWithSeed(name string, extraArgs []string,
628
        password []byte, mnemonic []string, rootKey string,
629
        recoveryWindow int32,
630
        chanBackups *lnrpc.ChanBackupSnapshot) *node.HarnessNode {
×
631

×
632
        n, err := h.manager.newNode(h.T, name, extraArgs, password, true)
×
633
        require.NoErrorf(h, err, "unable to create new node for %s", name)
×
634

×
635
        // Start the node with seed only, which will only create the `State`
×
636
        // and `WalletUnlocker` clients.
×
637
        err = n.StartWithNoAuth(h.runCtx)
×
638
        require.NoErrorf(h, err, "failed to start node %s", n.Name())
×
639

×
640
        // Create the wallet.
×
641
        initReq := &lnrpc.InitWalletRequest{
×
642
                WalletPassword:     password,
×
643
                CipherSeedMnemonic: mnemonic,
×
644
                AezeedPassphrase:   password,
×
645
                ExtendedMasterKey:  rootKey,
×
646
                RecoveryWindow:     recoveryWindow,
×
647
                ChannelBackups:     chanBackups,
×
648
        }
×
649
        _, err = h.manager.initWalletAndNode(n, initReq)
×
650
        require.NoErrorf(h, err, "failed to unlock and init node %s",
×
651
                n.Name())
×
652

×
653
        return n
×
654
}
×
655

656
// NewNodeEtcd starts a new node with seed that'll use an external etcd
657
// database as its storage. The passed cluster flag indicates that we'd like
658
// the node to join the cluster leader election. We won't wait until RPC is
659
// available (this is useful when the node is not expected to become the leader
660
// right away).
661
func (h *HarnessTest) NewNodeEtcd(name string, etcdCfg *etcd.Config,
662
        password []byte, cluster bool,
663
        leaderSessionTTL int) *node.HarnessNode {
×
664

×
665
        // We don't want to use the embedded etcd instance.
×
666
        h.manager.dbBackend = node.BackendBbolt
×
667

×
668
        extraArgs := node.ExtraArgsEtcd(
×
669
                etcdCfg, name, cluster, leaderSessionTTL,
×
670
        )
×
671
        node, err := h.manager.newNode(h.T, name, extraArgs, password, true)
×
672
        require.NoError(h, err, "failed to create new node with etcd")
×
673

×
674
        // Start the node daemon only.
×
675
        err = node.StartLndCmd(h.runCtx)
×
676
        require.NoError(h, err, "failed to start node %s", node.Name())
×
677

×
678
        return node
×
679
}
×
680

681
// NewNodeWithSeedEtcd starts a new node with seed that'll use an external etcd
682
// database as its storage. The passed cluster flag indicates that we'd like
683
// the node to join the cluster leader election.
684
func (h *HarnessTest) NewNodeWithSeedEtcd(name string, etcdCfg *etcd.Config,
685
        password []byte, statelessInit, cluster bool,
686
        leaderSessionTTL int) (*node.HarnessNode, []string, []byte) {
×
687

×
688
        // We don't want to use the embedded etcd instance.
×
689
        h.manager.dbBackend = node.BackendBbolt
×
690

×
691
        // Create a request to generate a new aezeed. The new seed will have
×
692
        // the same password as the internal wallet.
×
693
        req := &lnrpc.GenSeedRequest{
×
694
                AezeedPassphrase: password,
×
695
                SeedEntropy:      nil,
×
696
        }
×
697

×
698
        extraArgs := node.ExtraArgsEtcd(
×
699
                etcdCfg, name, cluster, leaderSessionTTL,
×
700
        )
×
701

×
702
        return h.newNodeWithSeed(name, extraArgs, req, statelessInit)
×
703
}
×
704

705
// NewNodeRemoteSigner creates a new remote signer node and asserts its
706
// creation.
707
func (h *HarnessTest) NewNodeRemoteSigner(name string, extraArgs []string,
708
        password []byte, watchOnly *lnrpc.WatchOnly) *node.HarnessNode {
×
709

×
710
        hn, err := h.manager.newNode(h.T, name, extraArgs, password, true)
×
711
        require.NoErrorf(h, err, "unable to create new node for %s", name)
×
712

×
713
        err = hn.StartWithNoAuth(h.runCtx)
×
714
        require.NoError(h, err, "failed to start node %s", name)
×
715

×
716
        // With the seed created, construct the init request to the node,
×
717
        // including the newly generated seed.
×
718
        initReq := &lnrpc.InitWalletRequest{
×
719
                WalletPassword: password,
×
720
                WatchOnly:      watchOnly,
×
721
        }
×
722

×
723
        // Pass the init request via rpc to finish unlocking the node. This
×
724
        // will also initialize the macaroon-authenticated LightningClient.
×
725
        _, err = h.manager.initWalletAndNode(hn, initReq)
×
726
        require.NoErrorf(h, err, "failed to init node %s", name)
×
727

×
728
        return hn
×
729
}
×
730

731
// KillNode kills the node and waits for the node process to stop.
732
func (h *HarnessTest) KillNode(hn *node.HarnessNode) {
×
733
        h.Logf("Manually killing the node %s", hn.Name())
×
734
        require.NoErrorf(h, hn.KillAndWait(), "%s: kill got error", hn.Name())
×
735
        delete(h.manager.activeNodes, hn.Cfg.NodeID)
×
736
}
×
737

738
// SetFeeEstimate sets a fee rate to be returned from fee estimator.
739
//
740
// NOTE: this method will set the fee rate for a conf target of 1, which is the
741
// fallback fee rate for a `WebAPIEstimator` if a higher conf target's fee rate
742
// is not set. This means if the fee rate for conf target 6 is set, the fee
743
// estimator will use that value instead.
744
func (h *HarnessTest) SetFeeEstimate(fee chainfee.SatPerKWeight) {
×
745
        h.feeService.SetFeeRate(fee, 1)
×
746
}
×
747

748
// SetFeeEstimateWithConf sets a fee rate of a specified conf target to be
749
// returned from fee estimator.
750
func (h *HarnessTest) SetFeeEstimateWithConf(
751
        fee chainfee.SatPerKWeight, conf uint32) {
×
752

×
753
        h.feeService.SetFeeRate(fee, conf)
×
754
}
×
755

756
// SetMinRelayFeerate sets a min relay fee rate to be returned from fee
757
// estimator.
758
func (h *HarnessTest) SetMinRelayFeerate(fee chainfee.SatPerKVByte) {
×
759
        h.feeService.SetMinRelayFeerate(fee)
×
760
}
×
761

762
// validateNodeState checks that the node doesn't have any uncleaned states
763
// which will affect its following tests.
764
func (h *HarnessTest) validateNodeState(hn *node.HarnessNode) error {
×
765
        errStr := func(subject string) error {
×
766
                return fmt.Errorf("%s: found %s channels, please close "+
×
767
                        "them properly", hn.Name(), subject)
×
768
        }
×
769
        // If the node still has open channels, it's most likely that the
770
        // current test didn't close it properly.
771
        if hn.State.OpenChannel.Active != 0 {
×
772
                return errStr("active")
×
773
        }
×
774
        if hn.State.OpenChannel.Public != 0 {
×
775
                return errStr("public")
×
776
        }
×
777
        if hn.State.OpenChannel.Private != 0 {
×
778
                return errStr("private")
×
779
        }
×
780
        if hn.State.OpenChannel.Pending != 0 {
×
781
                return errStr("pending open")
×
782
        }
×
783

784
        // The number of pending force close channels should be zero.
785
        if hn.State.CloseChannel.PendingForceClose != 0 {
×
786
                return errStr("pending force")
×
787
        }
×
788

789
        // The number of waiting close channels should be zero.
790
        if hn.State.CloseChannel.WaitingClose != 0 {
×
791
                return errStr("waiting close")
×
792
        }
×
793

794
        // Ths number of payments should be zero.
795
        if hn.State.Payment.Total != 0 {
×
796
                return fmt.Errorf("%s: found uncleaned payments, please "+
×
797
                        "delete all of them properly", hn.Name())
×
798
        }
×
799

800
        // The number of public edges should be zero.
801
        if hn.State.Edge.Public != 0 {
×
802
                return fmt.Errorf("%s: found active public egdes, please "+
×
803
                        "clean them properly", hn.Name())
×
804
        }
×
805

806
        // The number of edges should be zero.
807
        if hn.State.Edge.Total != 0 {
×
808
                return fmt.Errorf("%s: found active edges, please "+
×
809
                        "clean them properly", hn.Name())
×
810
        }
×
811

812
        return nil
×
813
}
814

815
// GetChanPointFundingTxid takes a channel point and converts it into a chain
816
// hash.
817
func (h *HarnessTest) GetChanPointFundingTxid(
818
        cp *lnrpc.ChannelPoint) chainhash.Hash {
×
819

×
820
        txid, err := lnrpc.GetChanPointFundingTxid(cp)
×
821
        require.NoError(h, err, "unable to get txid")
×
822

×
823
        return *txid
×
824
}
×
825

826
// OutPointFromChannelPoint creates an outpoint from a given channel point.
827
func (h *HarnessTest) OutPointFromChannelPoint(
828
        cp *lnrpc.ChannelPoint) wire.OutPoint {
×
829

×
830
        txid := h.GetChanPointFundingTxid(cp)
×
831
        return wire.OutPoint{
×
832
                Hash:  txid,
×
833
                Index: cp.OutputIndex,
×
834
        }
×
835
}
×
836

837
// OpenChannelParams houses the params to specify when opening a new channel.
838
type OpenChannelParams struct {
839
        // Amt is the local amount being put into the channel.
840
        Amt btcutil.Amount
841

842
        // PushAmt is the amount that should be pushed to the remote when the
843
        // channel is opened.
844
        PushAmt btcutil.Amount
845

846
        // Private is a boolan indicating whether the opened channel should be
847
        // private.
848
        Private bool
849

850
        // SpendUnconfirmed is a boolean indicating whether we can utilize
851
        // unconfirmed outputs to fund the channel.
852
        SpendUnconfirmed bool
853

854
        // MinHtlc is the htlc_minimum_msat value set when opening the channel.
855
        MinHtlc lnwire.MilliSatoshi
856

857
        // RemoteMaxHtlcs is the remote_max_htlcs value set when opening the
858
        // channel, restricting the number of concurrent HTLCs the remote party
859
        // can add to a commitment.
860
        RemoteMaxHtlcs uint16
861

862
        // FundingShim is an optional funding shim that the caller can specify
863
        // in order to modify the channel funding workflow.
864
        FundingShim *lnrpc.FundingShim
865

866
        // SatPerVByte is the amount of satoshis to spend in chain fees per
867
        // virtual byte of the transaction.
868
        SatPerVByte btcutil.Amount
869

870
        // ConfTarget is the number of blocks that the funding transaction
871
        // should be confirmed in.
872
        ConfTarget fn.Option[int32]
873

874
        // CommitmentType is the commitment type that should be used for the
875
        // channel to be opened.
876
        CommitmentType lnrpc.CommitmentType
877

878
        // ZeroConf is used to determine if the channel will be a zero-conf
879
        // channel. This only works if the explicit negotiation is used with
880
        // anchors or script enforced leases.
881
        ZeroConf bool
882

883
        // ScidAlias denotes whether the channel will be an option-scid-alias
884
        // channel type negotiation.
885
        ScidAlias bool
886

887
        // BaseFee is the channel base fee applied during the channel
888
        // announcement phase.
889
        BaseFee uint64
890

891
        // FeeRate is the channel fee rate in ppm applied during the channel
892
        // announcement phase.
893
        FeeRate uint64
894

895
        // UseBaseFee, if set, instructs the downstream logic to apply the
896
        // user-specified channel base fee to the channel update announcement.
897
        // If set to false it avoids applying a base fee of 0 and instead
898
        // activates the default configured base fee.
899
        UseBaseFee bool
900

901
        // UseFeeRate, if set, instructs the downstream logic to apply the
902
        // user-specified channel fee rate to the channel update announcement.
903
        // If set to false it avoids applying a fee rate of 0 and instead
904
        // activates the default configured fee rate.
905
        UseFeeRate bool
906

907
        // FundMax is a boolean indicating whether the channel should be funded
908
        // with the maximum possible amount from the wallet.
909
        FundMax bool
910

911
        // An optional note-to-self containing some useful information about the
912
        // channel. This is stored locally only, and is purely for reference. It
913
        // has no bearing on the channel's operation. Max allowed length is 500
914
        // characters.
915
        Memo string
916

917
        // Outpoints is a list of client-selected outpoints that should be used
918
        // for funding a channel. If Amt is specified then this amount is
919
        // allocated from the sum of outpoints towards funding. If the
920
        // FundMax flag is specified the entirety of selected funds is
921
        // allocated towards channel funding.
922
        Outpoints []*lnrpc.OutPoint
923

924
        // CloseAddress sets the upfront_shutdown_script parameter during
925
        // channel open. It is expected to be encoded as a bitcoin address.
926
        CloseAddress string
927
}
928

929
// prepareOpenChannel waits for both nodes to be synced to chain and returns an
930
// OpenChannelRequest.
931
func (h *HarnessTest) prepareOpenChannel(srcNode, destNode *node.HarnessNode,
932
        p OpenChannelParams) *lnrpc.OpenChannelRequest {
×
933

×
934
        // Wait until srcNode and destNode have the latest chain synced.
×
935
        // Otherwise, we may run into a check within the funding manager that
×
936
        // prevents any funding workflows from being kicked off if the chain
×
937
        // isn't yet synced.
×
938
        h.WaitForBlockchainSync(srcNode)
×
939
        h.WaitForBlockchainSync(destNode)
×
940

×
941
        // Specify the minimal confirmations of the UTXOs used for channel
×
942
        // funding.
×
943
        minConfs := int32(1)
×
944
        if p.SpendUnconfirmed {
×
945
                minConfs = 0
×
946
        }
×
947

948
        // Get the requested conf target. If not set, default to 6.
949
        confTarget := p.ConfTarget.UnwrapOr(6)
×
950

×
951
        // If there's fee rate set, unset the conf target.
×
952
        if p.SatPerVByte != 0 {
×
953
                confTarget = 0
×
954
        }
×
955

956
        // Prepare the request.
957
        return &lnrpc.OpenChannelRequest{
×
958
                NodePubkey:         destNode.PubKey[:],
×
959
                LocalFundingAmount: int64(p.Amt),
×
960
                PushSat:            int64(p.PushAmt),
×
961
                Private:            p.Private,
×
962
                TargetConf:         confTarget,
×
963
                MinConfs:           minConfs,
×
964
                SpendUnconfirmed:   p.SpendUnconfirmed,
×
965
                MinHtlcMsat:        int64(p.MinHtlc),
×
966
                RemoteMaxHtlcs:     uint32(p.RemoteMaxHtlcs),
×
967
                FundingShim:        p.FundingShim,
×
968
                SatPerVbyte:        uint64(p.SatPerVByte),
×
969
                CommitmentType:     p.CommitmentType,
×
970
                ZeroConf:           p.ZeroConf,
×
971
                ScidAlias:          p.ScidAlias,
×
972
                BaseFee:            p.BaseFee,
×
973
                FeeRate:            p.FeeRate,
×
974
                UseBaseFee:         p.UseBaseFee,
×
975
                UseFeeRate:         p.UseFeeRate,
×
976
                FundMax:            p.FundMax,
×
977
                Memo:               p.Memo,
×
978
                Outpoints:          p.Outpoints,
×
979
                CloseAddress:       p.CloseAddress,
×
980
        }
×
981
}
982

983
// OpenChannelAssertPending attempts to open a channel between srcNode and
984
// destNode with the passed channel funding parameters. Once the `OpenChannel`
985
// is called, it will consume the first event it receives from the open channel
986
// client and asserts it's a channel pending event.
987
func (h *HarnessTest) openChannelAssertPending(srcNode,
988
        destNode *node.HarnessNode,
989
        p OpenChannelParams) (*lnrpc.PendingUpdate, rpc.OpenChanClient) {
×
990

×
991
        // Prepare the request and open the channel.
×
992
        openReq := h.prepareOpenChannel(srcNode, destNode, p)
×
993
        respStream := srcNode.RPC.OpenChannel(openReq)
×
994

×
995
        // Consume the "channel pending" update. This waits until the node
×
996
        // notifies us that the final message in the channel funding workflow
×
997
        // has been sent to the remote node.
×
998
        resp := h.ReceiveOpenChannelUpdate(respStream)
×
999

×
1000
        // Check that the update is channel pending.
×
1001
        update, ok := resp.Update.(*lnrpc.OpenStatusUpdate_ChanPending)
×
1002
        require.Truef(h, ok, "expected channel pending: update, instead got %v",
×
1003
                resp)
×
1004

×
1005
        return update.ChanPending, respStream
×
1006
}
×
1007

1008
// OpenChannelAssertPending attempts to open a channel between srcNode and
1009
// destNode with the passed channel funding parameters. Once the `OpenChannel`
1010
// is called, it will consume the first event it receives from the open channel
1011
// client and asserts it's a channel pending event. It returns the
1012
// `PendingUpdate`.
1013
func (h *HarnessTest) OpenChannelAssertPending(srcNode,
1014
        destNode *node.HarnessNode, p OpenChannelParams) *lnrpc.PendingUpdate {
×
1015

×
1016
        resp, _ := h.openChannelAssertPending(srcNode, destNode, p)
×
1017
        return resp
×
1018
}
×
1019

1020
// OpenChannelAssertStream attempts to open a channel between srcNode and
1021
// destNode with the passed channel funding parameters. Once the `OpenChannel`
1022
// is called, it will consume the first event it receives from the open channel
1023
// client and asserts it's a channel pending event. It returns the open channel
1024
// stream.
1025
func (h *HarnessTest) OpenChannelAssertStream(srcNode,
1026
        destNode *node.HarnessNode, p OpenChannelParams) rpc.OpenChanClient {
×
1027

×
1028
        _, stream := h.openChannelAssertPending(srcNode, destNode, p)
×
1029
        return stream
×
1030
}
×
1031

1032
// OpenChannel attempts to open a channel with the specified parameters
1033
// extended from Alice to Bob. Additionally, for public channels, it will mine
1034
// extra blocks so they are announced to the network. In specific, the
1035
// following items are asserted,
1036
//   - for non-zero conf channel, 1 blocks will be mined to confirm the funding
1037
//     tx.
1038
//   - both nodes should see the channel edge update in their network graph.
1039
//   - both nodes can report the status of the new channel from ListChannels.
1040
//   - extra blocks are mined if it's a public channel.
1041
func (h *HarnessTest) OpenChannel(alice, bob *node.HarnessNode,
1042
        p OpenChannelParams) *lnrpc.ChannelPoint {
×
1043

×
1044
        // First, open the channel without announcing it.
×
1045
        cp := h.OpenChannelNoAnnounce(alice, bob, p)
×
1046

×
1047
        // If this is a private channel, there's no need to mine extra blocks
×
1048
        // since it will never be announced to the network.
×
1049
        if p.Private {
×
1050
                return cp
×
1051
        }
×
1052

1053
        // Mine extra blocks to announce the channel.
1054
        if p.ZeroConf {
×
1055
                // For a zero-conf channel, no blocks have been mined so we
×
1056
                // need to mine 6 blocks.
×
1057
                //
×
1058
                // Mine 1 block to confirm the funding transaction.
×
1059
                h.MineBlocksAndAssertNumTxes(numBlocksOpenChannel, 1)
×
1060
        } else {
×
1061
                // For a regular channel, 1 block has already been mined to
×
1062
                // confirm the funding transaction, so we mine 5 blocks.
×
1063
                h.MineBlocks(numBlocksOpenChannel - 1)
×
1064
        }
×
1065

1066
        return cp
×
1067
}
1068

1069
// OpenChannelNoAnnounce attempts to open a channel with the specified
1070
// parameters extended from Alice to Bob without mining the necessary blocks to
1071
// announce the channel. Additionally, the following items are asserted,
1072
//   - for non-zero conf channel, 1 blocks will be mined to confirm the funding
1073
//     tx.
1074
//   - both nodes should see the channel edge update in their network graph.
1075
//   - both nodes can report the status of the new channel from ListChannels.
1076
func (h *HarnessTest) OpenChannelNoAnnounce(alice, bob *node.HarnessNode,
1077
        p OpenChannelParams) *lnrpc.ChannelPoint {
×
1078

×
1079
        chanOpenUpdate := h.OpenChannelAssertStream(alice, bob, p)
×
1080

×
1081
        // Open a zero conf channel.
×
1082
        if p.ZeroConf {
×
1083
                return h.openChannelZeroConf(alice, bob, chanOpenUpdate)
×
1084
        }
×
1085

1086
        // Open a non-zero conf channel.
1087
        return h.openChannel(alice, bob, chanOpenUpdate)
×
1088
}
1089

1090
// openChannel attempts to open a channel with the specified parameters
1091
// extended from Alice to Bob. Additionally, the following items are asserted,
1092
//   - 1 block is mined and the funding transaction should be found in it.
1093
//   - both nodes should see the channel edge update in their network graph.
1094
//   - both nodes can report the status of the new channel from ListChannels.
1095
func (h *HarnessTest) openChannel(alice, bob *node.HarnessNode,
1096
        stream rpc.OpenChanClient) *lnrpc.ChannelPoint {
×
1097

×
1098
        // Mine 1 block to confirm the funding transaction.
×
1099
        block := h.MineBlocksAndAssertNumTxes(1, 1)[0]
×
1100

×
1101
        // Wait for the channel open event.
×
1102
        fundingChanPoint := h.WaitForChannelOpenEvent(stream)
×
1103

×
1104
        // Check that the funding tx is found in the first block.
×
1105
        fundingTxID := h.GetChanPointFundingTxid(fundingChanPoint)
×
1106
        h.AssertTxInBlock(block, fundingTxID)
×
1107

×
1108
        // Check that both alice and bob have seen the channel from their
×
1109
        // network topology.
×
1110
        h.AssertChannelInGraph(alice, fundingChanPoint)
×
1111
        h.AssertChannelInGraph(bob, fundingChanPoint)
×
1112

×
1113
        // Check that the channel can be seen in their ListChannels.
×
1114
        h.AssertChannelExists(alice, fundingChanPoint)
×
1115
        h.AssertChannelExists(bob, fundingChanPoint)
×
1116

×
1117
        return fundingChanPoint
×
1118
}
×
1119

1120
// openChannelZeroConf attempts to open a channel with the specified parameters
1121
// extended from Alice to Bob. Additionally, the following items are asserted,
1122
//   - both nodes should see the channel edge update in their network graph.
1123
//   - both nodes can report the status of the new channel from ListChannels.
1124
func (h *HarnessTest) openChannelZeroConf(alice, bob *node.HarnessNode,
1125
        stream rpc.OpenChanClient) *lnrpc.ChannelPoint {
×
1126

×
1127
        // Wait for the channel open event.
×
1128
        fundingChanPoint := h.WaitForChannelOpenEvent(stream)
×
1129

×
1130
        // Check that both alice and bob have seen the channel from their
×
1131
        // network topology.
×
1132
        h.AssertChannelInGraph(alice, fundingChanPoint)
×
1133
        h.AssertChannelInGraph(bob, fundingChanPoint)
×
1134

×
1135
        // Finally, check that the channel can be seen in their ListChannels.
×
1136
        h.AssertChannelExists(alice, fundingChanPoint)
×
1137
        h.AssertChannelExists(bob, fundingChanPoint)
×
1138

×
1139
        return fundingChanPoint
×
1140
}
×
1141

1142
// OpenChannelAssertErr opens a channel between node srcNode and destNode,
1143
// asserts that the expected error is returned from the channel opening.
1144
func (h *HarnessTest) OpenChannelAssertErr(srcNode, destNode *node.HarnessNode,
1145
        p OpenChannelParams, expectedErr error) {
×
1146

×
1147
        // Prepare the request and open the channel.
×
1148
        openReq := h.prepareOpenChannel(srcNode, destNode, p)
×
1149
        respStream := srcNode.RPC.OpenChannel(openReq)
×
1150

×
1151
        // Receive an error to be sent from the stream.
×
1152
        _, err := h.receiveOpenChannelUpdate(respStream)
×
1153
        require.NotNil(h, err, "expected channel opening to fail")
×
1154

×
1155
        // Use string comparison here as we haven't codified all the RPC errors
×
1156
        // yet.
×
1157
        require.Containsf(h, err.Error(), expectedErr.Error(), "unexpected "+
×
1158
                "error returned, want %v, got %v", expectedErr, err)
×
1159
}
×
1160

1161
// CloseChannelAssertPending attempts to close the channel indicated by the
1162
// passed channel point, initiated by the passed node. Once the CloseChannel
1163
// rpc is called, it will consume one event and assert it's a close pending
1164
// event. In addition, it will check that the closing tx can be found in the
1165
// mempool.
1166
func (h *HarnessTest) CloseChannelAssertPending(hn *node.HarnessNode,
1167
        cp *lnrpc.ChannelPoint,
1168
        force bool) (rpc.CloseChanClient, chainhash.Hash) {
×
1169

×
1170
        // Calls the rpc to close the channel.
×
1171
        closeReq := &lnrpc.CloseChannelRequest{
×
1172
                ChannelPoint: cp,
×
1173
                Force:        force,
×
1174
                NoWait:       true,
×
1175
        }
×
1176

×
1177
        // For coop close, we use a default confg target of 6.
×
1178
        if !force {
×
1179
                closeReq.TargetConf = 6
×
1180
        }
×
1181

1182
        var (
×
1183
                stream rpc.CloseChanClient
×
1184
                event  *lnrpc.CloseStatusUpdate
×
1185
                err    error
×
1186
        )
×
1187

×
1188
        // Consume the "channel close" update in order to wait for the closing
×
1189
        // transaction to be broadcast, then wait for the closing tx to be seen
×
1190
        // within the network.
×
1191
        stream = hn.RPC.CloseChannel(closeReq)
×
1192
        _, err = h.ReceiveCloseChannelUpdate(stream)
×
1193
        require.NoError(h, err, "close channel update got error: %v", err)
×
1194

×
1195
        event, err = h.ReceiveCloseChannelUpdate(stream)
×
1196
        if err != nil {
×
1197
                h.Logf("Test: %s, close channel got error: %v",
×
1198
                        h.manager.currentTestCase, err)
×
1199
        }
×
1200
        require.NoError(h, err, "retry closing channel failed")
×
1201

×
1202
        pendingClose, ok := event.Update.(*lnrpc.CloseStatusUpdate_ClosePending)
×
1203
        require.Truef(h, ok, "expected channel close update, instead got %v",
×
1204
                pendingClose)
×
1205

×
1206
        closeTxid, err := chainhash.NewHash(pendingClose.ClosePending.Txid)
×
1207
        require.NoErrorf(h, err, "unable to decode closeTxid: %v",
×
1208
                pendingClose.ClosePending.Txid)
×
1209

×
1210
        // Assert the closing tx is in the mempool.
×
1211
        h.miner.AssertTxInMempool(*closeTxid)
×
1212

×
1213
        return stream, *closeTxid
×
1214
}
1215

1216
// CloseChannel attempts to coop close a non-anchored channel identified by the
1217
// passed channel point owned by the passed harness node. The following items
1218
// are asserted,
1219
//  1. a close pending event is sent from the close channel client.
1220
//  2. the closing tx is found in the mempool.
1221
//  3. the node reports the channel being waiting to close.
1222
//  4. a block is mined and the closing tx should be found in it.
1223
//  5. the node reports zero waiting close channels.
1224
//  6. the node receives a topology update regarding the channel close.
1225
func (h *HarnessTest) CloseChannel(hn *node.HarnessNode,
1226
        cp *lnrpc.ChannelPoint) chainhash.Hash {
×
1227

×
1228
        stream, _ := h.CloseChannelAssertPending(hn, cp, false)
×
1229

×
1230
        return h.AssertStreamChannelCoopClosed(hn, cp, false, stream)
×
1231
}
×
1232

1233
// ForceCloseChannel attempts to force close a non-anchored channel identified
1234
// by the passed channel point owned by the passed harness node. The following
1235
// items are asserted,
1236
//  1. a close pending event is sent from the close channel client.
1237
//  2. the closing tx is found in the mempool.
1238
//  3. the node reports the channel being waiting to close.
1239
//  4. a block is mined and the closing tx should be found in it.
1240
//  5. the node reports zero waiting close channels.
1241
//  6. the node receives a topology update regarding the channel close.
1242
//  7. mine DefaultCSV-1 blocks.
1243
//  8. the node reports zero pending force close channels.
1244
func (h *HarnessTest) ForceCloseChannel(hn *node.HarnessNode,
1245
        cp *lnrpc.ChannelPoint) chainhash.Hash {
×
1246

×
1247
        stream, _ := h.CloseChannelAssertPending(hn, cp, true)
×
1248

×
1249
        closingTxid := h.AssertStreamChannelForceClosed(hn, cp, false, stream)
×
1250

×
1251
        // Cleanup the force close.
×
1252
        h.CleanupForceClose(hn)
×
1253

×
1254
        return closingTxid
×
1255
}
×
1256

1257
// CloseChannelAssertErr closes the given channel and asserts an error
1258
// returned.
1259
func (h *HarnessTest) CloseChannelAssertErr(hn *node.HarnessNode,
1260
        cp *lnrpc.ChannelPoint, force bool) error {
×
1261

×
1262
        // Calls the rpc to close the channel.
×
1263
        closeReq := &lnrpc.CloseChannelRequest{
×
1264
                ChannelPoint: cp,
×
1265
                Force:        force,
×
1266
        }
×
1267
        stream := hn.RPC.CloseChannel(closeReq)
×
1268

×
1269
        // Consume the "channel close" update in order to wait for the closing
×
1270
        // transaction to be broadcast, then wait for the closing tx to be seen
×
1271
        // within the network.
×
1272
        _, err := h.ReceiveCloseChannelUpdate(stream)
×
1273
        require.Errorf(h, err, "%s: expect close channel to return an error",
×
1274
                hn.Name())
×
1275

×
1276
        return err
×
1277
}
×
1278

1279
// IsNeutrinoBackend returns a bool indicating whether the node is using a
1280
// neutrino as its backend. This is useful when we want to skip certain tests
1281
// which cannot be done with a neutrino backend.
1282
func (h *HarnessTest) IsNeutrinoBackend() bool {
×
1283
        return h.manager.chainBackend.Name() == NeutrinoBackendName
×
1284
}
×
1285

1286
// fundCoins attempts to send amt satoshis from the internal mining node to the
1287
// targeted lightning node. The confirmed boolean indicates whether the
1288
// transaction that pays to the target should confirm. For neutrino backend,
1289
// the `confirmed` param is ignored.
1290
func (h *HarnessTest) fundCoins(amt btcutil.Amount, target *node.HarnessNode,
1291
        addrType lnrpc.AddressType, confirmed bool) {
×
1292

×
1293
        initialBalance := target.RPC.WalletBalance()
×
1294

×
1295
        // First, obtain an address from the target lightning node, preferring
×
1296
        // to receive a p2wkh address s.t the output can immediately be used as
×
1297
        // an input to a funding transaction.
×
1298
        req := &lnrpc.NewAddressRequest{Type: addrType}
×
1299
        resp := target.RPC.NewAddress(req)
×
1300
        addr := h.DecodeAddress(resp.Address)
×
1301
        addrScript := h.PayToAddrScript(addr)
×
1302

×
1303
        // Generate a transaction which creates an output to the target
×
1304
        // pkScript of the desired amount.
×
1305
        output := &wire.TxOut{
×
1306
                PkScript: addrScript,
×
1307
                Value:    int64(amt),
×
1308
        }
×
1309
        h.miner.SendOutput(output, defaultMinerFeeRate)
×
1310

×
1311
        // Encode the pkScript in hex as this the format that it will be
×
1312
        // returned via rpc.
×
1313
        expPkScriptStr := hex.EncodeToString(addrScript)
×
1314

×
1315
        // Now, wait for ListUnspent to show the unconfirmed transaction
×
1316
        // containing the correct pkscript.
×
1317
        //
×
1318
        // Since neutrino doesn't support unconfirmed outputs, skip this check.
×
1319
        if !h.IsNeutrinoBackend() {
×
1320
                utxos := h.AssertNumUTXOsUnconfirmed(target, 1)
×
1321

×
1322
                // Assert that the lone unconfirmed utxo contains the same
×
1323
                // pkscript as the output generated above.
×
1324
                pkScriptStr := utxos[0].PkScript
×
1325
                require.Equal(h, pkScriptStr, expPkScriptStr,
×
1326
                        "pkscript mismatch")
×
1327

×
1328
                expectedBalance := btcutil.Amount(
×
1329
                        initialBalance.UnconfirmedBalance,
×
1330
                ) + amt
×
1331
                h.WaitForBalanceUnconfirmed(target, expectedBalance)
×
1332
        }
×
1333

1334
        // If the transaction should remain unconfirmed, then we'll wait until
1335
        // the target node's unconfirmed balance reflects the expected balance
1336
        // and exit.
1337
        if !confirmed {
×
1338
                return
×
1339
        }
×
1340

1341
        // Otherwise, we'll generate 1 new blocks to ensure the output gains a
1342
        // sufficient number of confirmations and wait for the balance to
1343
        // reflect what's expected.
1344
        h.MineBlocksAndAssertNumTxes(1, 1)
×
1345

×
1346
        expectedBalance := btcutil.Amount(initialBalance.ConfirmedBalance) + amt
×
1347
        h.WaitForBalanceConfirmed(target, expectedBalance)
×
1348
}
1349

1350
// FundCoins attempts to send amt satoshis from the internal mining node to the
1351
// targeted lightning node using a P2WKH address. 1 blocks are mined after in
1352
// order to confirm the transaction.
1353
func (h *HarnessTest) FundCoins(amt btcutil.Amount, hn *node.HarnessNode) {
×
1354
        h.fundCoins(amt, hn, lnrpc.AddressType_WITNESS_PUBKEY_HASH, true)
×
1355
}
×
1356

1357
// FundCoinsUnconfirmed attempts to send amt satoshis from the internal mining
1358
// node to the targeted lightning node using a P2WKH address. No blocks are
1359
// mined after and the UTXOs are unconfirmed.
1360
func (h *HarnessTest) FundCoinsUnconfirmed(amt btcutil.Amount,
1361
        hn *node.HarnessNode) {
×
1362

×
1363
        h.fundCoins(amt, hn, lnrpc.AddressType_WITNESS_PUBKEY_HASH, false)
×
1364
}
×
1365

1366
// FundCoinsNP2WKH attempts to send amt satoshis from the internal mining node
1367
// to the targeted lightning node using a NP2WKH address.
1368
func (h *HarnessTest) FundCoinsNP2WKH(amt btcutil.Amount,
1369
        target *node.HarnessNode) {
×
1370

×
1371
        h.fundCoins(amt, target, lnrpc.AddressType_NESTED_PUBKEY_HASH, true)
×
1372
}
×
1373

1374
// FundCoinsP2TR attempts to send amt satoshis from the internal mining node to
1375
// the targeted lightning node using a P2TR address.
1376
func (h *HarnessTest) FundCoinsP2TR(amt btcutil.Amount,
1377
        target *node.HarnessNode) {
×
1378

×
1379
        h.fundCoins(amt, target, lnrpc.AddressType_TAPROOT_PUBKEY, true)
×
1380
}
×
1381

1382
// completePaymentRequestsAssertStatus sends payments from a node to complete
1383
// all payment requests. This function does not return until all payments
1384
// have reached the specified status.
1385
func (h *HarnessTest) completePaymentRequestsAssertStatus(hn *node.HarnessNode,
1386
        paymentRequests []string, status lnrpc.Payment_PaymentStatus,
1387
        opts ...HarnessOpt) {
×
1388

×
1389
        payOpts := defaultHarnessOpts()
×
1390
        for _, opt := range opts {
×
1391
                opt(&payOpts)
×
1392
        }
×
1393

1394
        // Create a buffered chan to signal the results.
1395
        results := make(chan rpc.PaymentClient, len(paymentRequests))
×
1396

×
1397
        // send sends a payment and asserts if it doesn't succeeded.
×
1398
        send := func(payReq string) {
×
1399
                req := &routerrpc.SendPaymentRequest{
×
1400
                        PaymentRequest: payReq,
×
1401
                        TimeoutSeconds: int32(wait.PaymentTimeout.Seconds()),
×
1402
                        FeeLimitMsat:   noFeeLimitMsat,
×
1403
                        Amp:            payOpts.useAMP,
×
1404
                }
×
1405
                stream := hn.RPC.SendPayment(req)
×
1406

×
1407
                // Signal sent succeeded.
×
1408
                results <- stream
×
1409
        }
×
1410

1411
        // Launch all payments simultaneously.
1412
        for _, payReq := range paymentRequests {
×
1413
                payReqCopy := payReq
×
1414
                go send(payReqCopy)
×
1415
        }
×
1416

1417
        // Wait for all payments to report the expected status.
1418
        timer := time.After(wait.PaymentTimeout)
×
1419
        select {
×
1420
        case stream := <-results:
×
1421
                h.AssertPaymentStatusFromStream(stream, status)
×
1422

1423
        case <-timer:
×
1424
                require.Fail(h, "timeout", "waiting payment results timeout")
×
1425
        }
1426
}
1427

1428
// CompletePaymentRequests sends payments from a node to complete all payment
1429
// requests. This function does not return until all payments successfully
1430
// complete without errors.
1431
func (h *HarnessTest) CompletePaymentRequests(hn *node.HarnessNode,
1432
        paymentRequests []string, opts ...HarnessOpt) {
×
1433

×
1434
        h.completePaymentRequestsAssertStatus(
×
1435
                hn, paymentRequests, lnrpc.Payment_SUCCEEDED, opts...,
×
1436
        )
×
1437
}
×
1438

1439
// CompletePaymentRequestsNoWait sends payments from a node to complete all
1440
// payment requests without waiting for the results. Instead, it checks the
1441
// number of updates in the specified channel has increased.
1442
func (h *HarnessTest) CompletePaymentRequestsNoWait(hn *node.HarnessNode,
1443
        paymentRequests []string, chanPoint *lnrpc.ChannelPoint) {
×
1444

×
1445
        // We start by getting the current state of the client's channels. This
×
1446
        // is needed to ensure the payments actually have been committed before
×
1447
        // we return.
×
1448
        oldResp := h.GetChannelByChanPoint(hn, chanPoint)
×
1449

×
1450
        // Send payments and assert they are in-flight.
×
1451
        h.completePaymentRequestsAssertStatus(
×
1452
                hn, paymentRequests, lnrpc.Payment_IN_FLIGHT,
×
1453
        )
×
1454

×
1455
        // We are not waiting for feedback in the form of a response, but we
×
1456
        // should still wait long enough for the server to receive and handle
×
1457
        // the send before cancelling the request. We wait for the number of
×
1458
        // updates to one of our channels has increased before we return.
×
1459
        err := wait.NoError(func() error {
×
1460
                newResp := h.GetChannelByChanPoint(hn, chanPoint)
×
1461

×
1462
                // If this channel has an increased number of updates, we
×
1463
                // assume the payments are committed, and we can return.
×
1464
                if newResp.NumUpdates > oldResp.NumUpdates {
×
1465
                        return nil
×
1466
                }
×
1467

1468
                // Otherwise return an error as the NumUpdates are not
1469
                // increased.
1470
                return fmt.Errorf("%s: channel:%v not updated after sending "+
×
1471
                        "payments, old updates: %v, new updates: %v", hn.Name(),
×
1472
                        chanPoint, oldResp.NumUpdates, newResp.NumUpdates)
×
1473
        }, DefaultTimeout)
1474
        require.NoError(h, err, "timeout while checking for channel updates")
×
1475
}
1476

1477
// OpenChannelPsbt attempts to open a channel between srcNode and destNode with
1478
// the passed channel funding parameters. It will assert if the expected step
1479
// of funding the PSBT is not received from the source node.
1480
func (h *HarnessTest) OpenChannelPsbt(srcNode, destNode *node.HarnessNode,
1481
        p OpenChannelParams) (rpc.OpenChanClient, []byte) {
×
1482

×
1483
        // Wait until srcNode and destNode have the latest chain synced.
×
1484
        // Otherwise, we may run into a check within the funding manager that
×
1485
        // prevents any funding workflows from being kicked off if the chain
×
1486
        // isn't yet synced.
×
1487
        h.WaitForBlockchainSync(srcNode)
×
1488
        h.WaitForBlockchainSync(destNode)
×
1489

×
1490
        // Send the request to open a channel to the source node now. This will
×
1491
        // open a long-lived stream where we'll receive status updates about
×
1492
        // the progress of the channel.
×
1493
        // respStream := h.OpenChannelStreamAndAssert(srcNode, destNode, p)
×
1494
        req := &lnrpc.OpenChannelRequest{
×
1495
                NodePubkey:         destNode.PubKey[:],
×
1496
                LocalFundingAmount: int64(p.Amt),
×
1497
                PushSat:            int64(p.PushAmt),
×
1498
                Private:            p.Private,
×
1499
                SpendUnconfirmed:   p.SpendUnconfirmed,
×
1500
                MinHtlcMsat:        int64(p.MinHtlc),
×
1501
                FundingShim:        p.FundingShim,
×
1502
                CommitmentType:     p.CommitmentType,
×
1503
        }
×
1504
        respStream := srcNode.RPC.OpenChannel(req)
×
1505

×
1506
        // Consume the "PSBT funding ready" update. This waits until the node
×
1507
        // notifies us that the PSBT can now be funded.
×
1508
        resp := h.ReceiveOpenChannelUpdate(respStream)
×
1509
        upd, ok := resp.Update.(*lnrpc.OpenStatusUpdate_PsbtFund)
×
1510
        require.Truef(h, ok, "expected PSBT funding update, got %v", resp)
×
1511

×
1512
        // Make sure the channel funding address has the correct type for the
×
1513
        // given commitment type.
×
1514
        fundingAddr, err := btcutil.DecodeAddress(
×
1515
                upd.PsbtFund.FundingAddress, miner.HarnessNetParams,
×
1516
        )
×
1517
        require.NoError(h, err)
×
1518

×
1519
        switch p.CommitmentType {
×
1520
        case lnrpc.CommitmentType_SIMPLE_TAPROOT:
×
1521
                require.IsType(h, &btcutil.AddressTaproot{}, fundingAddr)
×
1522

1523
        default:
×
1524
                require.IsType(
×
1525
                        h, &btcutil.AddressWitnessScriptHash{}, fundingAddr,
×
1526
                )
×
1527
        }
1528

1529
        return respStream, upd.PsbtFund.Psbt
×
1530
}
1531

1532
// CleanupForceClose mines blocks to clean up the force close process. This is
1533
// used for tests that are not asserting the expected behavior is found during
1534
// the force close process, e.g., num of sweeps, etc. Instead, it provides a
1535
// shortcut to move the test forward with a clean mempool.
1536
func (h *HarnessTest) CleanupForceClose(hn *node.HarnessNode) {
×
1537
        // Wait for the channel to be marked pending force close.
×
1538
        h.AssertNumPendingForceClose(hn, 1)
×
1539

×
1540
        // Mine enough blocks for the node to sweep its funds from the force
×
1541
        // closed channel. The commit sweep resolver is offers the input to the
×
1542
        // sweeper when it's force closed, and broadcast the sweep tx at
×
1543
        // defaulCSV-1.
×
1544
        //
×
1545
        // NOTE: we might empty blocks here as we don't know the exact number
×
1546
        // of blocks to mine. This may end up mining more blocks than needed.
×
1547
        h.MineEmptyBlocks(node.DefaultCSV - 1)
×
1548

×
1549
        // Assert there is one pending sweep.
×
1550
        h.AssertNumPendingSweeps(hn, 1)
×
1551

×
1552
        // The node should now sweep the funds, clean up by mining the sweeping
×
1553
        // tx.
×
1554
        h.MineBlocksAndAssertNumTxes(1, 1)
×
1555

×
1556
        // Mine blocks to get any second level HTLC resolved. If there are no
×
1557
        // HTLCs, this will behave like h.AssertNumPendingCloseChannels.
×
1558
        h.mineTillForceCloseResolved(hn)
×
1559
}
×
1560

1561
// CreatePayReqs is a helper method that will create a slice of payment
1562
// requests for the given node.
1563
func (h *HarnessTest) CreatePayReqs(hn *node.HarnessNode,
1564
        paymentAmt btcutil.Amount, numInvoices int,
1565
        routeHints ...*lnrpc.RouteHint) ([]string, [][]byte, []*lnrpc.Invoice) {
×
1566

×
1567
        payReqs := make([]string, numInvoices)
×
1568
        rHashes := make([][]byte, numInvoices)
×
1569
        invoices := make([]*lnrpc.Invoice, numInvoices)
×
1570
        for i := 0; i < numInvoices; i++ {
×
1571
                preimage := h.Random32Bytes()
×
1572

×
1573
                invoice := &lnrpc.Invoice{
×
1574
                        Memo:       "testing",
×
1575
                        RPreimage:  preimage,
×
1576
                        Value:      int64(paymentAmt),
×
1577
                        RouteHints: routeHints,
×
1578
                }
×
1579
                resp := hn.RPC.AddInvoice(invoice)
×
1580

×
1581
                // Set the payment address in the invoice so the caller can
×
1582
                // properly use it.
×
1583
                invoice.PaymentAddr = resp.PaymentAddr
×
1584

×
1585
                payReqs[i] = resp.PaymentRequest
×
1586
                rHashes[i] = resp.RHash
×
1587
                invoices[i] = invoice
×
1588
        }
×
1589

1590
        return payReqs, rHashes, invoices
×
1591
}
1592

1593
// BackupDB creates a backup of the current database. It will stop the node
1594
// first, copy the database files, and restart the node.
1595
func (h *HarnessTest) BackupDB(hn *node.HarnessNode) {
×
1596
        restart := h.SuspendNode(hn)
×
1597

×
1598
        err := hn.BackupDB()
×
1599
        require.NoErrorf(h, err, "%s: failed to backup db", hn.Name())
×
1600

×
1601
        err = restart()
×
1602
        require.NoErrorf(h, err, "%s: failed to restart", hn.Name())
×
1603
}
×
1604

1605
// RestartNodeAndRestoreDB restarts a given node with a callback to restore the
1606
// db.
1607
func (h *HarnessTest) RestartNodeAndRestoreDB(hn *node.HarnessNode) {
×
1608
        cb := func() error { return hn.RestoreDB() }
×
1609
        err := h.manager.restartNode(h.runCtx, hn, cb)
×
1610
        require.NoErrorf(h, err, "failed to restart node %s", hn.Name())
×
1611

×
1612
        err = h.manager.unlockNode(hn)
×
1613
        require.NoErrorf(h, err, "failed to unlock node %s", hn.Name())
×
1614

×
1615
        // Give the node some time to catch up with the chain before we
×
1616
        // continue with the tests.
×
1617
        h.WaitForBlockchainSync(hn)
×
1618
}
1619

1620
// CleanShutDown is used to quickly end a test by shutting down all non-standby
1621
// nodes and mining blocks to empty the mempool.
1622
//
1623
// NOTE: this method provides a faster exit for a test that involves force
1624
// closures as the caller doesn't need to mine all the blocks to make sure the
1625
// mempool is empty.
1626
func (h *HarnessTest) CleanShutDown() {
×
1627
        // First, shutdown all nodes to prevent new transactions being created
×
1628
        // and fed into the mempool.
×
1629
        h.shutdownAllNodes()
×
1630

×
1631
        // Now mine blocks till the mempool is empty.
×
1632
        h.cleanMempool()
×
1633
}
×
1634

1635
// QueryChannelByChanPoint tries to find a channel matching the channel point
1636
// and asserts. It returns the channel found.
1637
func (h *HarnessTest) QueryChannelByChanPoint(hn *node.HarnessNode,
1638
        chanPoint *lnrpc.ChannelPoint,
1639
        opts ...ListChannelOption) *lnrpc.Channel {
×
1640

×
1641
        channel, err := h.findChannel(hn, chanPoint, opts...)
×
1642
        require.NoError(h, err, "failed to query channel")
×
1643

×
1644
        return channel
×
1645
}
×
1646

1647
// SendPaymentAndAssertStatus sends a payment from the passed node and asserts
1648
// the desired status is reached.
1649
func (h *HarnessTest) SendPaymentAndAssertStatus(hn *node.HarnessNode,
1650
        req *routerrpc.SendPaymentRequest,
1651
        status lnrpc.Payment_PaymentStatus) *lnrpc.Payment {
×
1652

×
1653
        stream := hn.RPC.SendPayment(req)
×
1654
        return h.AssertPaymentStatusFromStream(stream, status)
×
1655
}
×
1656

1657
// SendPaymentAssertFail sends a payment from the passed node and asserts the
1658
// payment is failed with the specified failure reason .
1659
func (h *HarnessTest) SendPaymentAssertFail(hn *node.HarnessNode,
1660
        req *routerrpc.SendPaymentRequest,
1661
        reason lnrpc.PaymentFailureReason) *lnrpc.Payment {
×
1662

×
1663
        payment := h.SendPaymentAndAssertStatus(hn, req, lnrpc.Payment_FAILED)
×
1664
        require.Equal(h, reason, payment.FailureReason,
×
1665
                "payment failureReason not matched")
×
1666

×
1667
        return payment
×
1668
}
×
1669

1670
// SendPaymentAssertSettled sends a payment from the passed node and asserts the
1671
// payment is settled.
1672
func (h *HarnessTest) SendPaymentAssertSettled(hn *node.HarnessNode,
1673
        req *routerrpc.SendPaymentRequest) *lnrpc.Payment {
×
1674

×
1675
        return h.SendPaymentAndAssertStatus(hn, req, lnrpc.Payment_SUCCEEDED)
×
1676
}
×
1677

1678
// SendPaymentAssertInflight sends a payment from the passed node and asserts
1679
// the payment is inflight.
1680
func (h *HarnessTest) SendPaymentAssertInflight(hn *node.HarnessNode,
1681
        req *routerrpc.SendPaymentRequest) *lnrpc.Payment {
×
1682

×
1683
        return h.SendPaymentAndAssertStatus(hn, req, lnrpc.Payment_IN_FLIGHT)
×
1684
}
×
1685

1686
// OpenChannelRequest is used to open a channel using the method
1687
// OpenMultiChannelsAsync.
1688
type OpenChannelRequest struct {
1689
        // Local is the funding node.
1690
        Local *node.HarnessNode
1691

1692
        // Remote is the receiving node.
1693
        Remote *node.HarnessNode
1694

1695
        // Param is the open channel params.
1696
        Param OpenChannelParams
1697

1698
        // stream is the client created after calling OpenChannel RPC.
1699
        stream rpc.OpenChanClient
1700

1701
        // result is a channel used to send the channel point once the funding
1702
        // has succeeded.
1703
        result chan *lnrpc.ChannelPoint
1704
}
1705

1706
// OpenMultiChannelsAsync takes a list of OpenChannelRequest and opens them in
1707
// batch. The channel points are returned in same the order of the requests
1708
// once all of the channel open succeeded.
1709
//
1710
// NOTE: compared to open multiple channel sequentially, this method will be
1711
// faster as it doesn't need to mine 6 blocks for each channel open. However,
1712
// it does make debugging the logs more difficult as messages are intertwined.
1713
func (h *HarnessTest) OpenMultiChannelsAsync(
1714
        reqs []*OpenChannelRequest) []*lnrpc.ChannelPoint {
×
1715

×
1716
        // openChannel opens a channel based on the request.
×
1717
        openChannel := func(req *OpenChannelRequest) {
×
1718
                stream := h.OpenChannelAssertStream(
×
1719
                        req.Local, req.Remote, req.Param,
×
1720
                )
×
1721
                req.stream = stream
×
1722
        }
×
1723

1724
        // assertChannelOpen is a helper closure that asserts a channel is
1725
        // open.
1726
        assertChannelOpen := func(req *OpenChannelRequest) {
×
1727
                // Wait for the channel open event from the stream.
×
1728
                cp := h.WaitForChannelOpenEvent(req.stream)
×
1729

×
1730
                if !req.Param.Private {
×
1731
                        // Check that both alice and bob have seen the channel
×
1732
                        // from their channel watch request.
×
1733
                        h.AssertChannelInGraph(req.Local, cp)
×
1734
                        h.AssertChannelInGraph(req.Remote, cp)
×
1735
                }
×
1736

1737
                // Finally, check that the channel can be seen in their
1738
                // ListChannels.
1739
                h.AssertChannelExists(req.Local, cp)
×
1740
                h.AssertChannelExists(req.Remote, cp)
×
1741

×
1742
                req.result <- cp
×
1743
        }
1744

1745
        // Go through the requests and make the OpenChannel RPC call.
1746
        for _, r := range reqs {
×
1747
                openChannel(r)
×
1748
        }
×
1749

1750
        // Mine one block to confirm all the funding transactions.
1751
        h.MineBlocksAndAssertNumTxes(1, len(reqs))
×
1752

×
1753
        // Mine 5 more blocks so all the public channels are announced to the
×
1754
        // network.
×
1755
        h.MineBlocks(numBlocksOpenChannel - 1)
×
1756

×
1757
        // Once the blocks are mined, we fire goroutines for each of the
×
1758
        // request to watch for the channel openning.
×
1759
        for _, r := range reqs {
×
1760
                r.result = make(chan *lnrpc.ChannelPoint, 1)
×
1761
                go assertChannelOpen(r)
×
1762
        }
×
1763

1764
        // Finally, collect the results.
1765
        channelPoints := make([]*lnrpc.ChannelPoint, 0)
×
1766
        for _, r := range reqs {
×
1767
                select {
×
1768
                case cp := <-r.result:
×
1769
                        channelPoints = append(channelPoints, cp)
×
1770

1771
                case <-time.After(wait.ChannelOpenTimeout):
×
1772
                        require.Failf(h, "timeout", "wait channel point "+
×
1773
                                "timeout for channel %s=>%s", r.Local.Name(),
×
1774
                                r.Remote.Name())
×
1775
                }
1776
        }
1777

1778
        // Assert that we have the expected num of channel points.
1779
        require.Len(h, channelPoints, len(reqs),
×
1780
                "returned channel points not match")
×
1781

×
1782
        return channelPoints
×
1783
}
1784

1785
// ReceiveInvoiceUpdate waits until a message is received on the subscribe
1786
// invoice stream or the timeout is reached.
1787
func (h *HarnessTest) ReceiveInvoiceUpdate(
1788
        stream rpc.InvoiceUpdateClient) *lnrpc.Invoice {
×
1789

×
1790
        chanMsg := make(chan *lnrpc.Invoice)
×
1791
        errChan := make(chan error)
×
1792
        go func() {
×
1793
                // Consume one message. This will block until the message is
×
1794
                // received.
×
1795
                resp, err := stream.Recv()
×
1796
                if err != nil {
×
1797
                        errChan <- err
×
1798
                        return
×
1799
                }
×
1800
                chanMsg <- resp
×
1801
        }()
1802

1803
        select {
×
1804
        case <-time.After(DefaultTimeout):
×
1805
                require.Fail(h, "timeout", "timeout receiving invoice update")
×
1806

1807
        case err := <-errChan:
×
1808
                require.Failf(h, "err from stream",
×
1809
                        "received err from stream: %v", err)
×
1810

1811
        case updateMsg := <-chanMsg:
×
1812
                return updateMsg
×
1813
        }
1814

1815
        return nil
×
1816
}
1817

1818
// CalculateTxFee retrieves parent transactions and reconstructs the fee paid.
1819
func (h *HarnessTest) CalculateTxFee(tx *wire.MsgTx) btcutil.Amount {
×
1820
        var balance btcutil.Amount
×
1821
        for _, in := range tx.TxIn {
×
1822
                parentHash := in.PreviousOutPoint.Hash
×
1823
                rawTx := h.miner.GetRawTransaction(parentHash)
×
1824
                parent := rawTx.MsgTx()
×
1825
                value := parent.TxOut[in.PreviousOutPoint.Index].Value
×
1826

×
1827
                balance += btcutil.Amount(value)
×
1828
        }
×
1829

1830
        for _, out := range tx.TxOut {
×
1831
                balance -= btcutil.Amount(out.Value)
×
1832
        }
×
1833

1834
        return balance
×
1835
}
1836

1837
// CalculateTxWeight calculates the weight for a given tx.
1838
//
1839
// TODO(yy): use weight estimator to get more accurate result.
1840
func (h *HarnessTest) CalculateTxWeight(tx *wire.MsgTx) lntypes.WeightUnit {
×
1841
        utx := btcutil.NewTx(tx)
×
1842
        return lntypes.WeightUnit(blockchain.GetTransactionWeight(utx))
×
1843
}
×
1844

1845
// CalculateTxFeeRate calculates the fee rate for a given tx.
1846
func (h *HarnessTest) CalculateTxFeeRate(
1847
        tx *wire.MsgTx) chainfee.SatPerKWeight {
×
1848

×
1849
        w := h.CalculateTxWeight(tx)
×
1850
        fee := h.CalculateTxFee(tx)
×
1851

×
1852
        return chainfee.NewSatPerKWeight(fee, w)
×
1853
}
×
1854

1855
// CalculateTxesFeeRate takes a list of transactions and estimates the fee rate
1856
// used to sweep them.
1857
//
1858
// NOTE: only used in current test file.
1859
func (h *HarnessTest) CalculateTxesFeeRate(txns []*wire.MsgTx) int64 {
×
1860
        const scale = 1000
×
1861

×
1862
        var totalWeight, totalFee int64
×
1863
        for _, tx := range txns {
×
1864
                utx := btcutil.NewTx(tx)
×
1865
                totalWeight += blockchain.GetTransactionWeight(utx)
×
1866

×
1867
                fee := h.CalculateTxFee(tx)
×
1868
                totalFee += int64(fee)
×
1869
        }
×
1870
        feeRate := totalFee * scale / totalWeight
×
1871

×
1872
        return feeRate
×
1873
}
1874

1875
// AssertSweepFound looks up a sweep in a nodes list of broadcast sweeps and
1876
// asserts it's found.
1877
//
1878
// NOTE: Does not account for node's internal state.
1879
func (h *HarnessTest) AssertSweepFound(hn *node.HarnessNode,
1880
        sweep string, verbose bool, startHeight int32) {
×
1881

×
1882
        err := wait.NoError(func() error {
×
1883
                // List all sweeps that alice's node had broadcast.
×
1884
                sweepResp := hn.RPC.ListSweeps(verbose, startHeight)
×
1885

×
1886
                var found bool
×
1887
                if verbose {
×
1888
                        found = findSweepInDetails(h, sweep, sweepResp)
×
1889
                } else {
×
1890
                        found = findSweepInTxids(h, sweep, sweepResp)
×
1891
                }
×
1892

1893
                if found {
×
1894
                        return nil
×
1895
                }
×
1896

1897
                return fmt.Errorf("sweep tx %v not found in resp %v", sweep,
×
1898
                        sweepResp)
×
1899
        }, wait.DefaultTimeout)
1900
        require.NoError(h, err, "%s: timeout checking sweep tx", hn.Name())
×
1901
}
1902

1903
func findSweepInTxids(ht *HarnessTest, sweepTxid string,
1904
        sweepResp *walletrpc.ListSweepsResponse) bool {
×
1905

×
1906
        sweepTxIDs := sweepResp.GetTransactionIds()
×
1907
        require.NotNil(ht, sweepTxIDs, "expected transaction ids")
×
1908
        require.Nil(ht, sweepResp.GetTransactionDetails())
×
1909

×
1910
        // Check that the sweep tx we have just produced is present.
×
1911
        for _, tx := range sweepTxIDs.TransactionIds {
×
1912
                if tx == sweepTxid {
×
1913
                        return true
×
1914
                }
×
1915
        }
1916

1917
        return false
×
1918
}
1919

1920
func findSweepInDetails(ht *HarnessTest, sweepTxid string,
1921
        sweepResp *walletrpc.ListSweepsResponse) bool {
×
1922

×
1923
        sweepDetails := sweepResp.GetTransactionDetails()
×
1924
        require.NotNil(ht, sweepDetails, "expected transaction details")
×
1925
        require.Nil(ht, sweepResp.GetTransactionIds())
×
1926

×
1927
        for _, tx := range sweepDetails.Transactions {
×
1928
                if tx.TxHash == sweepTxid {
×
1929
                        return true
×
1930
                }
×
1931
        }
1932

1933
        return false
×
1934
}
1935

1936
// QueryRoutesAndRetry attempts to keep querying a route until timeout is
1937
// reached.
1938
//
1939
// NOTE: when a channel is opened, we may need to query multiple times to get
1940
// it in our QueryRoutes RPC. This happens even after we check the channel is
1941
// heard by the node using ht.AssertChannelOpen. Deep down, this is because our
1942
// GraphTopologySubscription and QueryRoutes give different results regarding a
1943
// specific channel, with the formal reporting it being open while the latter
1944
// not, resulting GraphTopologySubscription acting "faster" than QueryRoutes.
1945
// TODO(yy): make sure related subsystems share the same view on a given
1946
// channel.
1947
func (h *HarnessTest) QueryRoutesAndRetry(hn *node.HarnessNode,
1948
        req *lnrpc.QueryRoutesRequest) *lnrpc.QueryRoutesResponse {
×
1949

×
1950
        var routes *lnrpc.QueryRoutesResponse
×
1951
        err := wait.NoError(func() error {
×
1952
                ctxt, cancel := context.WithCancel(h.runCtx)
×
1953
                defer cancel()
×
1954

×
1955
                resp, err := hn.RPC.LN.QueryRoutes(ctxt, req)
×
1956
                if err != nil {
×
1957
                        return fmt.Errorf("%s: failed to query route: %w",
×
1958
                                hn.Name(), err)
×
1959
                }
×
1960

1961
                routes = resp
×
1962

×
1963
                return nil
×
1964
        }, DefaultTimeout)
1965

1966
        require.NoError(h, err, "timeout querying routes")
×
1967

×
1968
        return routes
×
1969
}
1970

1971
// ReceiveHtlcInterceptor waits until a message is received on the htlc
1972
// interceptor stream or the timeout is reached.
1973
func (h *HarnessTest) ReceiveHtlcInterceptor(
1974
        stream rpc.InterceptorClient) *routerrpc.ForwardHtlcInterceptRequest {
×
1975

×
1976
        chanMsg := make(chan *routerrpc.ForwardHtlcInterceptRequest)
×
1977
        errChan := make(chan error)
×
1978
        go func() {
×
1979
                // Consume one message. This will block until the message is
×
1980
                // received.
×
1981
                resp, err := stream.Recv()
×
1982
                if err != nil {
×
1983
                        errChan <- err
×
1984
                        return
×
1985
                }
×
1986
                chanMsg <- resp
×
1987
        }()
1988

1989
        select {
×
1990
        case <-time.After(DefaultTimeout):
×
1991
                require.Fail(h, "timeout", "timeout intercepting htlc")
×
1992

1993
        case err := <-errChan:
×
1994
                require.Failf(h, "err from HTLC interceptor stream",
×
1995
                        "received err from HTLC interceptor stream: %v", err)
×
1996

1997
        case updateMsg := <-chanMsg:
×
1998
                return updateMsg
×
1999
        }
2000

2001
        return nil
×
2002
}
2003

2004
// ReceiveInvoiceHtlcModification waits until a message is received on the
2005
// invoice HTLC modifier stream or the timeout is reached.
2006
func (h *HarnessTest) ReceiveInvoiceHtlcModification(
2007
        stream rpc.InvoiceHtlcModifierClient) *invoicesrpc.HtlcModifyRequest {
×
2008

×
2009
        chanMsg := make(chan *invoicesrpc.HtlcModifyRequest)
×
2010
        errChan := make(chan error)
×
2011
        go func() {
×
2012
                // Consume one message. This will block until the message is
×
2013
                // received.
×
2014
                resp, err := stream.Recv()
×
2015
                if err != nil {
×
2016
                        errChan <- err
×
2017
                        return
×
2018
                }
×
2019
                chanMsg <- resp
×
2020
        }()
2021

2022
        select {
×
2023
        case <-time.After(DefaultTimeout):
×
2024
                require.Fail(h, "timeout", "timeout invoice HTLC modifier")
×
2025

2026
        case err := <-errChan:
×
2027
                require.Failf(h, "err from invoice HTLC modifier stream",
×
2028
                        "received err from invoice HTLC modifier stream: %v",
×
2029
                        err)
×
2030

2031
        case updateMsg := <-chanMsg:
×
2032
                return updateMsg
×
2033
        }
2034

2035
        return nil
×
2036
}
2037

2038
// ReceiveChannelEvent waits until a message is received from the
2039
// ChannelEventsClient stream or the timeout is reached.
2040
func (h *HarnessTest) ReceiveChannelEvent(
2041
        stream rpc.ChannelEventsClient) *lnrpc.ChannelEventUpdate {
×
2042

×
2043
        chanMsg := make(chan *lnrpc.ChannelEventUpdate)
×
2044
        errChan := make(chan error)
×
2045
        go func() {
×
2046
                // Consume one message. This will block until the message is
×
2047
                // received.
×
2048
                resp, err := stream.Recv()
×
2049
                if err != nil {
×
2050
                        errChan <- err
×
2051
                        return
×
2052
                }
×
2053
                chanMsg <- resp
×
2054
        }()
2055

2056
        select {
×
2057
        case <-time.After(DefaultTimeout):
×
2058
                require.Fail(h, "timeout", "timeout intercepting htlc")
×
2059

2060
        case err := <-errChan:
×
2061
                require.Failf(h, "err from stream",
×
2062
                        "received err from stream: %v", err)
×
2063

2064
        case updateMsg := <-chanMsg:
×
2065
                return updateMsg
×
2066
        }
2067

2068
        return nil
×
2069
}
2070

2071
// GetOutputIndex returns the output index of the given address in the given
2072
// transaction.
2073
func (h *HarnessTest) GetOutputIndex(txid chainhash.Hash, addr string) int {
×
2074
        // We'll then extract the raw transaction from the mempool in order to
×
2075
        // determine the index of the p2tr output.
×
2076
        tx := h.miner.GetRawTransaction(txid)
×
2077

×
2078
        p2trOutputIndex := -1
×
2079
        for i, txOut := range tx.MsgTx().TxOut {
×
2080
                _, addrs, _, err := txscript.ExtractPkScriptAddrs(
×
2081
                        txOut.PkScript, h.miner.ActiveNet,
×
2082
                )
×
2083
                require.NoError(h, err)
×
2084

×
2085
                if addrs[0].String() == addr {
×
2086
                        p2trOutputIndex = i
×
2087
                }
×
2088
        }
2089
        require.Greater(h, p2trOutputIndex, -1)
×
2090

×
2091
        return p2trOutputIndex
×
2092
}
2093

2094
// SendCoins sends a coin from node A to node B with the given amount, returns
2095
// the sending tx.
2096
func (h *HarnessTest) SendCoins(a, b *node.HarnessNode,
2097
        amt btcutil.Amount) *wire.MsgTx {
×
2098

×
2099
        // Create an address for Bob receive the coins.
×
2100
        req := &lnrpc.NewAddressRequest{
×
2101
                Type: lnrpc.AddressType_TAPROOT_PUBKEY,
×
2102
        }
×
2103
        resp := b.RPC.NewAddress(req)
×
2104

×
2105
        // Send the coins from Alice to Bob. We should expect a tx to be
×
2106
        // broadcast and seen in the mempool.
×
2107
        sendReq := &lnrpc.SendCoinsRequest{
×
2108
                Addr:       resp.Address,
×
2109
                Amount:     int64(amt),
×
2110
                TargetConf: 6,
×
2111
        }
×
2112
        a.RPC.SendCoins(sendReq)
×
2113
        tx := h.GetNumTxsFromMempool(1)[0]
×
2114

×
2115
        return tx
×
2116
}
×
2117

2118
// CreateSimpleNetwork creates the number of nodes specified by the number of
2119
// configs and makes a topology of `node1 -> node2 -> node3...`. Each node is
2120
// created using the specified config, the neighbors are connected, and the
2121
// channels are opened. Each node will be funded with a single UTXO of 1 BTC
2122
// except the last one.
2123
//
2124
// For instance, to create a network with 2 nodes that share the same node
2125
// config,
2126
//
2127
//        cfg := []string{"--protocol.anchors"}
2128
//        cfgs := [][]string{cfg, cfg}
2129
//        params := OpenChannelParams{...}
2130
//        chanPoints, nodes := ht.CreateSimpleNetwork(cfgs, params)
2131
//
2132
// This will create two nodes and open an anchor channel between them.
2133
func (h *HarnessTest) CreateSimpleNetwork(nodeCfgs [][]string,
2134
        p OpenChannelParams) ([]*lnrpc.ChannelPoint, []*node.HarnessNode) {
×
2135

×
2136
        // Create new nodes.
×
2137
        nodes := h.createNodes(nodeCfgs)
×
2138

×
2139
        var resp []*lnrpc.ChannelPoint
×
2140

×
2141
        // Open zero-conf channels if specified.
×
2142
        if p.ZeroConf {
×
2143
                resp = h.openZeroConfChannelsForNodes(nodes, p)
×
2144
        } else {
×
2145
                // Open channels between the nodes.
×
2146
                resp = h.openChannelsForNodes(nodes, p)
×
2147
        }
×
2148

2149
        return resp, nodes
×
2150
}
2151

2152
// acceptChannel is used to accept a single channel that comes across. This
2153
// should be run in a goroutine and is used to test nodes with the zero-conf
2154
// feature bit.
2155
func acceptChannel(t *testing.T, zeroConf bool, stream rpc.AcceptorClient) {
×
2156
        req, err := stream.Recv()
×
2157
        require.NoError(t, err)
×
2158

×
2159
        resp := &lnrpc.ChannelAcceptResponse{
×
2160
                Accept:        true,
×
2161
                PendingChanId: req.PendingChanId,
×
2162
                ZeroConf:      zeroConf,
×
2163
        }
×
2164
        err = stream.Send(resp)
×
2165
        require.NoError(t, err)
×
2166
}
×
2167

2168
// nodeNames defines a slice of human-reable names for the nodes created in the
2169
// `createNodes` method. 8 nodes are defined here as by default we can only
2170
// create this many nodes in one test.
2171
var nodeNames = []string{
2172
        "Alice", "Bob", "Carol", "Dave", "Eve", "Frank", "Grace", "Heidi",
2173
}
2174

2175
// createNodes creates the number of nodes specified by the number of configs.
2176
// Each node is created using the specified config, the neighbors are
2177
// connected.
2178
func (h *HarnessTest) createNodes(nodeCfgs [][]string) []*node.HarnessNode {
×
2179
        // Get the number of nodes.
×
2180
        numNodes := len(nodeCfgs)
×
2181

×
2182
        // Make sure we are creating a reasonable number of nodes.
×
2183
        require.LessOrEqual(h, numNodes, len(nodeNames), "too many nodes")
×
2184

×
2185
        // Make a slice of nodes.
×
2186
        nodes := make([]*node.HarnessNode, numNodes)
×
2187

×
2188
        // Create new nodes.
×
2189
        for i, nodeCfg := range nodeCfgs {
×
2190
                nodeName := nodeNames[i]
×
2191
                n := h.NewNode(nodeName, nodeCfg)
×
2192
                nodes[i] = n
×
2193
        }
×
2194

2195
        // Connect the nodes in a chain.
2196
        for i := 1; i < len(nodes); i++ {
×
2197
                nodeA := nodes[i-1]
×
2198
                nodeB := nodes[i]
×
2199
                h.EnsureConnected(nodeA, nodeB)
×
2200
        }
×
2201

2202
        // Fund all the nodes expect the last one.
2203
        for i := 0; i < len(nodes)-1; i++ {
×
2204
                node := nodes[i]
×
2205
                h.FundCoinsUnconfirmed(btcutil.SatoshiPerBitcoin, node)
×
2206
        }
×
2207

2208
        // Mine 1 block to get the above coins confirmed.
2209
        h.MineBlocksAndAssertNumTxes(1, numNodes-1)
×
2210

×
2211
        return nodes
×
2212
}
2213

2214
// openChannelsForNodes takes a list of nodes and makes a topology of `node1 ->
2215
// node2 -> node3...`.
2216
func (h *HarnessTest) openChannelsForNodes(nodes []*node.HarnessNode,
2217
        p OpenChannelParams) []*lnrpc.ChannelPoint {
×
2218

×
2219
        // Sanity check the params.
×
2220
        require.Greater(h, len(nodes), 1, "need at least 2 nodes")
×
2221

×
2222
        // attachFundingShim is a helper closure that optionally attaches a
×
2223
        // funding shim to the open channel params and returns it.
×
2224
        attachFundingShim := func(
×
2225
                nodeA, nodeB *node.HarnessNode) OpenChannelParams {
×
2226

×
2227
                // If this channel is not a script enforced lease channel,
×
2228
                // we'll do nothing and return the params.
×
2229
                leasedType := lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE
×
2230
                if p.CommitmentType != leasedType {
×
2231
                        return p
×
2232
                }
×
2233

2234
                // Otherwise derive the funding shim, attach it to the original
2235
                // open channel params and return it.
2236
                minerHeight := h.CurrentHeight()
×
2237
                thawHeight := minerHeight + thawHeightDelta
×
2238
                fundingShim, _ := h.deriveFundingShim(
×
2239
                        nodeA, nodeB, p.Amt, thawHeight, true, leasedType,
×
2240
                )
×
2241

×
2242
                p.FundingShim = fundingShim
×
2243

×
2244
                return p
×
2245
        }
2246

2247
        // Open channels in batch to save blocks mined.
2248
        reqs := make([]*OpenChannelRequest, 0, len(nodes)-1)
×
2249
        for i := 0; i < len(nodes)-1; i++ {
×
2250
                nodeA := nodes[i]
×
2251
                nodeB := nodes[i+1]
×
2252

×
2253
                // Optionally attach a funding shim to the open channel params.
×
2254
                p = attachFundingShim(nodeA, nodeB)
×
2255

×
2256
                req := &OpenChannelRequest{
×
2257
                        Local:  nodeA,
×
2258
                        Remote: nodeB,
×
2259
                        Param:  p,
×
2260
                }
×
2261
                reqs = append(reqs, req)
×
2262
        }
×
2263
        resp := h.OpenMultiChannelsAsync(reqs)
×
2264

×
NEW
2265
        // If the channels are private, make sure the channel participants know
×
NEW
2266
        // the relevant channel.
×
NEW
2267
        if p.Private {
×
NEW
2268
                for i, chanPoint := range resp {
×
NEW
2269
                        // Get the channel participants - for n channels we
×
NEW
2270
                        // would have n+1 nodes.
×
NEW
2271
                        nodeA, nodeB := nodes[i], nodes[i+1]
×
NEW
2272
                        h.AssertChannelInGraph(nodeA, chanPoint)
×
NEW
2273
                        h.AssertChannelInGraph(nodeB, chanPoint)
×
NEW
2274
                }
×
NEW
2275
        } else {
×
NEW
2276
                // Make sure the all nodes know all the channels if they are
×
NEW
2277
                // public.
×
2278
                for _, node := range nodes {
×
2279
                        for _, chanPoint := range resp {
×
2280
                                h.AssertChannelInGraph(node, chanPoint)
×
2281
                        }
×
2282

2283
                        // Make sure every node has updated its cached graph
2284
                        // about the edges as indicated in `DescribeGraph`.
NEW
2285
                        h.AssertNumEdges(node, len(resp), false)
×
2286
                }
2287
        }
2288

2289
        return resp
×
2290
}
2291

2292
// openZeroConfChannelsForNodes takes a list of nodes and makes a topology of
2293
// `node1 -> node2 -> node3...` with zero-conf channels.
2294
func (h *HarnessTest) openZeroConfChannelsForNodes(nodes []*node.HarnessNode,
2295
        p OpenChannelParams) []*lnrpc.ChannelPoint {
×
2296

×
2297
        // Sanity check the params.
×
2298
        require.True(h, p.ZeroConf, "zero-conf channels must be enabled")
×
2299
        require.Greater(h, len(nodes), 1, "need at least 2 nodes")
×
2300

×
2301
        // We are opening numNodes-1 channels.
×
2302
        cancels := make([]context.CancelFunc, 0, len(nodes)-1)
×
2303

×
2304
        // Create the channel acceptors.
×
2305
        for _, node := range nodes[1:] {
×
2306
                acceptor, cancel := node.RPC.ChannelAcceptor()
×
2307
                go acceptChannel(h.T, true, acceptor)
×
2308

×
2309
                cancels = append(cancels, cancel)
×
2310
        }
×
2311

2312
        // Open channels between the nodes.
2313
        resp := h.openChannelsForNodes(nodes, p)
×
2314

×
2315
        for _, cancel := range cancels {
×
2316
                cancel()
×
2317
        }
×
2318

2319
        return resp
×
2320
}
2321

2322
// deriveFundingShim creates a channel funding shim by deriving the necessary
2323
// keys on both sides.
2324
func (h *HarnessTest) deriveFundingShim(alice, bob *node.HarnessNode,
2325
        chanSize btcutil.Amount, thawHeight uint32, publish bool,
2326
        commitType lnrpc.CommitmentType) (*lnrpc.FundingShim,
2327
        *lnrpc.ChannelPoint) {
×
2328

×
2329
        keyLoc := &signrpc.KeyLocator{KeyFamily: 9999}
×
2330
        carolFundingKey := alice.RPC.DeriveKey(keyLoc)
×
2331
        daveFundingKey := bob.RPC.DeriveKey(keyLoc)
×
2332

×
2333
        // Now that we have the multi-sig keys for each party, we can manually
×
2334
        // construct the funding transaction. We'll instruct the backend to
×
2335
        // immediately create and broadcast a transaction paying out an exact
×
2336
        // amount. Normally this would reside in the mempool, but we just
×
2337
        // confirm it now for simplicity.
×
2338
        var (
×
2339
                fundingOutput *wire.TxOut
×
2340
                musig2        bool
×
2341
                err           error
×
2342
        )
×
2343
        if commitType == lnrpc.CommitmentType_SIMPLE_TAPROOT {
×
2344
                var carolKey, daveKey *btcec.PublicKey
×
2345
                carolKey, err = btcec.ParsePubKey(carolFundingKey.RawKeyBytes)
×
2346
                require.NoError(h, err)
×
2347
                daveKey, err = btcec.ParsePubKey(daveFundingKey.RawKeyBytes)
×
2348
                require.NoError(h, err)
×
2349

×
2350
                _, fundingOutput, err = input.GenTaprootFundingScript(
×
2351
                        carolKey, daveKey, int64(chanSize),
×
2352
                        fn.None[chainhash.Hash](),
×
2353
                )
×
2354
                require.NoError(h, err)
×
2355

×
2356
                musig2 = true
×
2357
        } else {
×
2358
                _, fundingOutput, err = input.GenFundingPkScript(
×
2359
                        carolFundingKey.RawKeyBytes, daveFundingKey.RawKeyBytes,
×
2360
                        int64(chanSize),
×
2361
                )
×
2362
                require.NoError(h, err)
×
2363
        }
×
2364

2365
        var txid *chainhash.Hash
×
2366
        targetOutputs := []*wire.TxOut{fundingOutput}
×
2367
        if publish {
×
2368
                txid = h.SendOutputsWithoutChange(targetOutputs, 5)
×
2369
        } else {
×
2370
                tx := h.CreateTransaction(targetOutputs, 5)
×
2371

×
2372
                txHash := tx.TxHash()
×
2373
                txid = &txHash
×
2374
        }
×
2375

2376
        // At this point, we can being our external channel funding workflow.
2377
        // We'll start by generating a pending channel ID externally that will
2378
        // be used to track this new funding type.
2379
        pendingChanID := h.Random32Bytes()
×
2380

×
2381
        // Now that we have the pending channel ID, Dave (our responder) will
×
2382
        // register the intent to receive a new channel funding workflow using
×
2383
        // the pending channel ID.
×
2384
        chanPoint := &lnrpc.ChannelPoint{
×
2385
                FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
×
2386
                        FundingTxidBytes: txid[:],
×
2387
                },
×
2388
        }
×
2389
        chanPointShim := &lnrpc.ChanPointShim{
×
2390
                Amt:       int64(chanSize),
×
2391
                ChanPoint: chanPoint,
×
2392
                LocalKey: &lnrpc.KeyDescriptor{
×
2393
                        RawKeyBytes: daveFundingKey.RawKeyBytes,
×
2394
                        KeyLoc: &lnrpc.KeyLocator{
×
2395
                                KeyFamily: daveFundingKey.KeyLoc.KeyFamily,
×
2396
                                KeyIndex:  daveFundingKey.KeyLoc.KeyIndex,
×
2397
                        },
×
2398
                },
×
2399
                RemoteKey:     carolFundingKey.RawKeyBytes,
×
2400
                PendingChanId: pendingChanID,
×
2401
                ThawHeight:    thawHeight,
×
2402
                Musig2:        musig2,
×
2403
        }
×
2404
        fundingShim := &lnrpc.FundingShim{
×
2405
                Shim: &lnrpc.FundingShim_ChanPointShim{
×
2406
                        ChanPointShim: chanPointShim,
×
2407
                },
×
2408
        }
×
2409
        bob.RPC.FundingStateStep(&lnrpc.FundingTransitionMsg{
×
2410
                Trigger: &lnrpc.FundingTransitionMsg_ShimRegister{
×
2411
                        ShimRegister: fundingShim,
×
2412
                },
×
2413
        })
×
2414

×
2415
        // If we attempt to register the same shim (has the same pending chan
×
2416
        // ID), then we should get an error.
×
2417
        bob.RPC.FundingStateStepAssertErr(&lnrpc.FundingTransitionMsg{
×
2418
                Trigger: &lnrpc.FundingTransitionMsg_ShimRegister{
×
2419
                        ShimRegister: fundingShim,
×
2420
                },
×
2421
        })
×
2422

×
2423
        // We'll take the chan point shim we just registered for Dave (the
×
2424
        // responder), and swap the local/remote keys before we feed it in as
×
2425
        // Carol's funding shim as the initiator.
×
2426
        fundingShim.GetChanPointShim().LocalKey = &lnrpc.KeyDescriptor{
×
2427
                RawKeyBytes: carolFundingKey.RawKeyBytes,
×
2428
                KeyLoc: &lnrpc.KeyLocator{
×
2429
                        KeyFamily: carolFundingKey.KeyLoc.KeyFamily,
×
2430
                        KeyIndex:  carolFundingKey.KeyLoc.KeyIndex,
×
2431
                },
×
2432
        }
×
2433
        fundingShim.GetChanPointShim().RemoteKey = daveFundingKey.RawKeyBytes
×
2434

×
2435
        return fundingShim, chanPoint
×
2436
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc