• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 12426855566

20 Dec 2024 06:42AM UTC coverage: 58.73% (+0.09%) from 58.64%
12426855566

Pull #9315

github

yyforyongyu
contractcourt: include custom records on replayed htlc

Add another case in addition to #9357.
Pull Request #9315: Implement `blockbeat`

2262 of 2729 new or added lines in 35 files covered. (82.89%)

132 existing lines in 25 files now uncovered.

135298 of 230373 relevant lines covered (58.73%)

19195.08 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/lntest/harness.go
1
package lntest
2

3
import (
4
        "context"
5
        "encoding/hex"
6
        "fmt"
7
        "testing"
8
        "time"
9

10
        "github.com/btcsuite/btcd/blockchain"
11
        "github.com/btcsuite/btcd/btcec/v2"
12
        "github.com/btcsuite/btcd/btcutil"
13
        "github.com/btcsuite/btcd/chaincfg/chainhash"
14
        "github.com/btcsuite/btcd/txscript"
15
        "github.com/btcsuite/btcd/wire"
16
        "github.com/go-errors/errors"
17
        "github.com/lightningnetwork/lnd/fn/v2"
18
        "github.com/lightningnetwork/lnd/input"
19
        "github.com/lightningnetwork/lnd/kvdb/etcd"
20
        "github.com/lightningnetwork/lnd/lnrpc"
21
        "github.com/lightningnetwork/lnd/lnrpc/invoicesrpc"
22
        "github.com/lightningnetwork/lnd/lnrpc/routerrpc"
23
        "github.com/lightningnetwork/lnd/lnrpc/signrpc"
24
        "github.com/lightningnetwork/lnd/lnrpc/walletrpc"
25
        "github.com/lightningnetwork/lnd/lntest/miner"
26
        "github.com/lightningnetwork/lnd/lntest/node"
27
        "github.com/lightningnetwork/lnd/lntest/rpc"
28
        "github.com/lightningnetwork/lnd/lntest/wait"
29
        "github.com/lightningnetwork/lnd/lntypes"
30
        "github.com/lightningnetwork/lnd/lnwallet/chainfee"
31
        "github.com/lightningnetwork/lnd/lnwire"
32
        "github.com/lightningnetwork/lnd/routing"
33
        "github.com/stretchr/testify/require"
34
)
35

36
const (
37
        // defaultMinerFeeRate specifies the fee rate in sats when sending
38
        // outputs from the miner.
39
        defaultMinerFeeRate = 7500
40

41
        // numBlocksSendOutput specifies the number of blocks to mine after
42
        // sending outputs from the miner.
43
        numBlocksSendOutput = 2
44

45
        // numBlocksOpenChannel specifies the number of blocks mined when
46
        // opening a channel.
47
        numBlocksOpenChannel = 6
48

49
        // lndErrorChanSize specifies the buffer size used to receive errors
50
        // from lnd process.
51
        lndErrorChanSize = 10
52

53
        // maxBlocksAllowed specifies the max allowed value to be used when
54
        // mining blocks.
55
        maxBlocksAllowed = 100
56

57
        finalCltvDelta  = routing.MinCLTVDelta // 18.
58
        thawHeightDelta = finalCltvDelta * 2   // 36.
59
)
60

61
// TestCase defines a test case that's been used in the integration test.
62
type TestCase struct {
63
        // Name specifies the test name.
64
        Name string
65

66
        // TestFunc is the test case wrapped in a function.
67
        TestFunc func(t *HarnessTest)
68
}
69

70
// standbyNodes are a list of nodes which are created during the initialization
71
// of the test and used across all test cases.
72
type standbyNodes struct {
73
        // Alice and Bob are the initial seeder nodes that are automatically
74
        // created to be the initial participants of the test network.
75
        Alice *node.HarnessNode
76
        Bob   *node.HarnessNode
77
}
78

79
// HarnessTest builds on top of a testing.T with enhanced error detection. It
80
// is responsible for managing the interactions among different nodes, and
81
// providing easy-to-use assertions.
82
type HarnessTest struct {
83
        *testing.T
84

85
        // Embed the standbyNodes so we can easily access them via `ht.Alice`.
86
        standbyNodes
87

88
        // miner is a reference to a running full node that can be used to
89
        // create new blocks on the network.
90
        miner *miner.HarnessMiner
91

92
        // manager handles the start and stop of a given node.
93
        manager *nodeManager
94

95
        // feeService is a web service that provides external fee estimates to
96
        // lnd.
97
        feeService WebFeeService
98

99
        // Channel for transmitting stderr output from failed lightning node
100
        // to main process.
101
        lndErrorChan chan error
102

103
        // runCtx is a context with cancel method. It's used to signal when the
104
        // node needs to quit, and used as the parent context when spawning
105
        // children contexts for RPC requests.
106
        runCtx context.Context //nolint:containedctx
107
        cancel context.CancelFunc
108

109
        // stopChainBackend points to the cleanup function returned by the
110
        // chainBackend.
111
        stopChainBackend func()
112

113
        // cleaned specifies whether the cleanup has been applied for the
114
        // current HarnessTest.
115
        cleaned bool
116

117
        // currentHeight is the current height of the chain backend.
118
        currentHeight uint32
119
}
120

121
// harnessOpts contains functional option to modify the behavior of the various
122
// harness calls.
123
type harnessOpts struct {
124
        useAMP bool
125
}
126

127
// defaultHarnessOpts returns a new instance of the harnessOpts with default
128
// values specified.
129
func defaultHarnessOpts() harnessOpts {
×
130
        return harnessOpts{
×
131
                useAMP: false,
×
132
        }
×
133
}
×
134

135
// HarnessOpt is a functional option that can be used to modify the behavior of
136
// harness functionality.
137
type HarnessOpt func(*harnessOpts)
138

139
// WithAMP is a functional option that can be used to enable the AMP feature
140
// for sending payments.
141
func WithAMP() HarnessOpt {
×
142
        return func(h *harnessOpts) {
×
143
                h.useAMP = true
×
144
        }
×
145
}
146

147
// NewHarnessTest creates a new instance of a harnessTest from a regular
148
// testing.T instance.
149
func NewHarnessTest(t *testing.T, lndBinary string, feeService WebFeeService,
150
        dbBackend node.DatabaseBackend, nativeSQL bool) *HarnessTest {
×
151

×
152
        t.Helper()
×
153

×
154
        // Create the run context.
×
155
        ctxt, cancel := context.WithCancel(context.Background())
×
156

×
157
        manager := newNodeManager(lndBinary, dbBackend, nativeSQL)
×
158

×
159
        return &HarnessTest{
×
160
                T:          t,
×
161
                manager:    manager,
×
162
                feeService: feeService,
×
163
                runCtx:     ctxt,
×
164
                cancel:     cancel,
×
165
                // We need to use buffered channel here as we don't want to
×
166
                // block sending errors.
×
167
                lndErrorChan: make(chan error, lndErrorChanSize),
×
168
        }
×
169
}
×
170

171
// Start will assemble the chain backend and the miner for the HarnessTest. It
172
// also starts the fee service and watches lnd process error.
173
func (h *HarnessTest) Start(chain node.BackendConfig,
174
        miner *miner.HarnessMiner) {
×
175

×
176
        // Spawn a new goroutine to watch for any fatal errors that any of the
×
177
        // running lnd processes encounter. If an error occurs, then the test
×
178
        // case should naturally as a result and we log the server error here
×
179
        // to help debug.
×
180
        go func() {
×
181
                select {
×
182
                case err, more := <-h.lndErrorChan:
×
183
                        if !more {
×
184
                                return
×
185
                        }
×
186
                        h.Logf("lnd finished with error (stderr):\n%v", err)
×
187

188
                case <-h.runCtx.Done():
×
189
                        return
×
190
                }
191
        }()
192

193
        // Start the fee service.
194
        err := h.feeService.Start()
×
195
        require.NoError(h, err, "failed to start fee service")
×
196

×
197
        // Assemble the node manager with chainBackend and feeServiceURL.
×
198
        h.manager.chainBackend = chain
×
199
        h.manager.feeServiceURL = h.feeService.URL()
×
200

×
201
        // Assemble the miner.
×
202
        h.miner = miner
×
203

×
204
        // Update block height.
×
205
        h.updateCurrentHeight()
×
206
}
207

208
// ChainBackendName returns the chain backend name used in the test.
209
func (h *HarnessTest) ChainBackendName() string {
×
210
        return h.manager.chainBackend.Name()
×
211
}
×
212

213
// Context returns the run context used in this test. Usaually it should be
214
// managed by the test itself otherwise undefined behaviors will occur. It can
215
// be used, however, when a test needs to have its own context being managed
216
// differently. In that case, instead of using a background context, the run
217
// context should be used such that the test context scope can be fully
218
// controlled.
219
func (h *HarnessTest) Context() context.Context {
×
220
        return h.runCtx
×
221
}
×
222

223
// setupWatchOnlyNode initializes a node with the watch-only accounts of an
224
// associated remote signing instance.
225
func (h *HarnessTest) setupWatchOnlyNode(name string,
226
        signerNode *node.HarnessNode, password []byte) *node.HarnessNode {
×
227

×
228
        // Prepare arguments for watch-only node connected to the remote signer.
×
229
        remoteSignerArgs := []string{
×
230
                "--remotesigner.enable",
×
231
                fmt.Sprintf("--remotesigner.rpchost=localhost:%d",
×
232
                        signerNode.Cfg.RPCPort),
×
233
                fmt.Sprintf("--remotesigner.tlscertpath=%s",
×
234
                        signerNode.Cfg.TLSCertPath),
×
235
                fmt.Sprintf("--remotesigner.macaroonpath=%s",
×
236
                        signerNode.Cfg.AdminMacPath),
×
237
        }
×
238

×
239
        // Fetch watch-only accounts from the signer node.
×
240
        resp := signerNode.RPC.ListAccounts(&walletrpc.ListAccountsRequest{})
×
241
        watchOnlyAccounts, err := walletrpc.AccountsToWatchOnly(resp.Accounts)
×
242
        require.NoErrorf(h, err, "unable to find watch only accounts for %s",
×
243
                name)
×
244

×
245
        // Create a new watch-only node with remote signer configuration.
×
246
        return h.NewNodeRemoteSigner(
×
247
                name, remoteSignerArgs, password,
×
248
                &lnrpc.WatchOnly{
×
249
                        MasterKeyBirthdayTimestamp: 0,
×
250
                        MasterKeyFingerprint:       nil,
×
251
                        Accounts:                   watchOnlyAccounts,
×
252
                },
×
253
        )
×
254
}
×
255

256
// createAndSendOutput send amt satoshis from the internal mining node to the
257
// targeted lightning node using a P2WKH address. No blocks are mined so
258
// transactions will sit unconfirmed in mempool.
259
func (h *HarnessTest) createAndSendOutput(target *node.HarnessNode,
260
        amt btcutil.Amount, addrType lnrpc.AddressType) {
×
261

×
262
        req := &lnrpc.NewAddressRequest{Type: addrType}
×
263
        resp := target.RPC.NewAddress(req)
×
264
        addr := h.DecodeAddress(resp.Address)
×
265
        addrScript := h.PayToAddrScript(addr)
×
266

×
267
        output := &wire.TxOut{
×
268
                PkScript: addrScript,
×
269
                Value:    int64(amt),
×
270
        }
×
271
        h.miner.SendOutput(output, defaultMinerFeeRate)
×
272
}
×
273

274
// SetupRemoteSigningStandbyNodes starts the initial seeder nodes within the
275
// test harness in a remote signing configuration. The initial node's wallets
276
// will be funded wallets with 100x1 BTC outputs each.
277
func (h *HarnessTest) SetupRemoteSigningStandbyNodes() {
×
278
        h.Log("Setting up standby nodes Alice and Bob with remote " +
×
279
                "signing configurations...")
×
280
        defer h.Log("Finished the setup, now running tests...")
×
281

×
282
        password := []byte("itestpassword")
×
283

×
284
        // Setup remote signing nodes for Alice and Bob.
×
285
        signerAlice := h.NewNode("SignerAlice", nil)
×
286
        signerBob := h.NewNode("SignerBob", nil)
×
287

×
288
        // Setup watch-only nodes for Alice and Bob, each configured with their
×
289
        // own remote signing instance.
×
290
        h.Alice = h.setupWatchOnlyNode("Alice", signerAlice, password)
×
291
        h.Bob = h.setupWatchOnlyNode("Bob", signerBob, password)
×
292

×
293
        // Fund each node with 100 BTC (using 100 separate transactions).
×
294
        const fundAmount = 1 * btcutil.SatoshiPerBitcoin
×
295
        const numOutputs = 100
×
296
        const totalAmount = fundAmount * numOutputs
×
297
        for _, node := range []*node.HarnessNode{h.Alice, h.Bob} {
×
298
                h.manager.standbyNodes[node.Cfg.NodeID] = node
×
299
                for i := 0; i < numOutputs; i++ {
×
300
                        h.createAndSendOutput(
×
301
                                node, fundAmount,
×
302
                                lnrpc.AddressType_WITNESS_PUBKEY_HASH,
×
303
                        )
×
304
                }
×
305
        }
306

307
        // We generate several blocks in order to give the outputs created
308
        // above a good number of confirmations.
309
        const totalTxes = 200
×
310
        h.MineBlocksAndAssertNumTxes(numBlocksSendOutput, totalTxes)
×
311

×
312
        // Now we want to wait for the nodes to catch up.
×
313
        h.WaitForBlockchainSync(h.Alice)
×
314
        h.WaitForBlockchainSync(h.Bob)
×
315

×
316
        // Now block until both wallets have fully synced up.
×
317
        h.WaitForBalanceConfirmed(h.Alice, totalAmount)
×
318
        h.WaitForBalanceConfirmed(h.Bob, totalAmount)
×
319
}
320

321
// SetUp starts the initial seeder nodes within the test harness. The initial
322
// node's wallets will be funded wallets with 10x10 BTC outputs each.
323
func (h *HarnessTest) SetupStandbyNodes() {
×
324
        h.Log("Setting up standby nodes Alice and Bob...")
×
325
        defer h.Log("Finished the setup, now running tests...")
×
326

×
327
        lndArgs := []string{
×
328
                "--default-remote-max-htlcs=483",
×
329
                "--channel-max-fee-exposure=5000000",
×
330
        }
×
331

×
332
        // Start the initial seeder nodes within the test network.
×
333
        h.Alice = h.NewNode("Alice", lndArgs)
×
334
        h.Bob = h.NewNode("Bob", lndArgs)
×
335

×
336
        // Load up the wallets of the seeder nodes with 100 outputs of 1 BTC
×
337
        // each.
×
338
        const fundAmount = 1 * btcutil.SatoshiPerBitcoin
×
339
        const numOutputs = 100
×
340
        const totalAmount = fundAmount * numOutputs
×
341
        for _, node := range []*node.HarnessNode{h.Alice, h.Bob} {
×
342
                h.manager.standbyNodes[node.Cfg.NodeID] = node
×
343
                for i := 0; i < numOutputs; i++ {
×
344
                        h.createAndSendOutput(
×
345
                                node, fundAmount,
×
346
                                lnrpc.AddressType_WITNESS_PUBKEY_HASH,
×
347
                        )
×
348
                }
×
349
        }
350

351
        // We generate several blocks in order to give the outputs created
352
        // above a good number of confirmations.
353
        const totalTxes = 200
×
354
        h.MineBlocksAndAssertNumTxes(numBlocksSendOutput, totalTxes)
×
355

×
356
        // Now we want to wait for the nodes to catch up.
×
357
        h.WaitForBlockchainSync(h.Alice)
×
358
        h.WaitForBlockchainSync(h.Bob)
×
359

×
360
        // Now block until both wallets have fully synced up.
×
361
        h.WaitForBalanceConfirmed(h.Alice, totalAmount)
×
362
        h.WaitForBalanceConfirmed(h.Bob, totalAmount)
×
363
}
364

365
// Stop stops the test harness.
366
func (h *HarnessTest) Stop() {
×
367
        // Do nothing if it's not started.
×
368
        if h.runCtx == nil {
×
369
                h.Log("HarnessTest is not started")
×
370
                return
×
371
        }
×
372

373
        h.shutdownAllNodes()
×
374

×
375
        close(h.lndErrorChan)
×
376

×
377
        // Stop the fee service.
×
378
        err := h.feeService.Stop()
×
379
        require.NoError(h, err, "failed to stop fee service")
×
380

×
381
        // Stop the chainBackend.
×
382
        h.stopChainBackend()
×
383

×
384
        // Stop the miner.
×
385
        h.miner.Stop()
×
386
}
387

388
// RunTestCase executes a harness test case. Any errors or panics will be
389
// represented as fatal.
390
func (h *HarnessTest) RunTestCase(testCase *TestCase) {
×
391
        defer func() {
×
392
                if err := recover(); err != nil {
×
393
                        description := errors.Wrap(err, 2).ErrorStack()
×
394
                        h.Fatalf("Failed: (%v) panic with: \n%v",
×
395
                                testCase.Name, description)
×
396
                }
×
397
        }()
398

399
        testCase.TestFunc(h)
×
400
}
401

402
// resetStandbyNodes resets all standby nodes by attaching the new testing.T
403
// and restarting them with the original config.
404
func (h *HarnessTest) resetStandbyNodes(t *testing.T) {
×
405
        t.Helper()
×
406

×
407
        for _, hn := range h.manager.standbyNodes {
×
408
                // Inherit the testing.T.
×
409
                h.T = t
×
410

×
411
                // Reset the config so the node will be using the default
×
412
                // config for the coming test. This will also inherit the
×
413
                // test's running context.
×
414
                h.RestartNodeWithExtraArgs(hn, hn.Cfg.OriginalExtraArgs)
×
415

×
416
                hn.AddToLogf("Finished test case %v", h.manager.currentTestCase)
×
417
        }
×
418
}
419

420
// Subtest creates a child HarnessTest, which inherits the harness net and
421
// stand by nodes created by the parent test. It will return a cleanup function
422
// which resets  all the standby nodes' configs back to its original state and
423
// create snapshots of each nodes' internal state.
424
func (h *HarnessTest) Subtest(t *testing.T) *HarnessTest {
×
425
        t.Helper()
×
426

×
427
        st := &HarnessTest{
×
428
                T:            t,
×
429
                manager:      h.manager,
×
430
                miner:        h.miner,
×
431
                standbyNodes: h.standbyNodes,
×
432
                feeService:   h.feeService,
×
433
                lndErrorChan: make(chan error, lndErrorChanSize),
×
434
        }
×
435

×
436
        // Inherit context from the main test.
×
437
        st.runCtx, st.cancel = context.WithCancel(h.runCtx)
×
438

×
439
        // Inherit the subtest for the miner.
×
440
        st.miner.T = st.T
×
441

×
442
        // Reset the standby nodes.
×
443
        st.resetStandbyNodes(t)
×
444

×
445
        // Reset fee estimator.
×
446
        st.feeService.Reset()
×
447

×
448
        // Record block height.
×
449
        h.updateCurrentHeight()
×
450
        startHeight := int32(h.CurrentHeight())
×
451

×
452
        st.Cleanup(func() {
×
453
                _, endHeight := h.GetBestBlock()
×
454

×
455
                st.Logf("finished test: %s, start height=%d, end height=%d, "+
×
456
                        "mined blocks=%d", st.manager.currentTestCase,
×
457
                        startHeight, endHeight, endHeight-startHeight)
×
458

×
459
                // Don't bother run the cleanups if the test is failed.
×
460
                if st.Failed() {
×
461
                        st.Log("test failed, skipped cleanup")
×
462
                        st.shutdownAllNodes()
×
463
                        return
×
464
                }
×
465

466
                // Don't run cleanup if it's already done. This can happen if
467
                // we have multiple level inheritance of the parent harness
468
                // test. For instance, a `Subtest(st)`.
469
                if st.cleaned {
×
470
                        st.Log("test already cleaned, skipped cleanup")
×
471
                        return
×
472
                }
×
473

474
                // When we finish the test, reset the nodes' configs and take a
475
                // snapshot of each of the nodes' internal states.
476
                for _, node := range st.manager.standbyNodes {
×
477
                        st.cleanupStandbyNode(node)
×
478
                }
×
479

480
                // If found running nodes, shut them down.
481
                st.shutdownNonStandbyNodes()
×
482

×
483
                // We require the mempool to be cleaned from the test.
×
484
                require.Empty(st, st.miner.GetRawMempool(), "mempool not "+
×
485
                        "cleaned, please mine blocks to clean them all.")
×
486

×
487
                // Finally, cancel the run context. We have to do it here
×
488
                // because we need to keep the context alive for the above
×
489
                // assertions used in cleanup.
×
490
                st.cancel()
×
491

×
492
                // We now want to mark the parent harness as cleaned to avoid
×
493
                // running cleanup again since its internal state has been
×
494
                // cleaned up by its child harness tests.
×
495
                h.cleaned = true
×
496
        })
497

498
        return st
×
499
}
500

501
// shutdownNonStandbyNodes will shutdown any non-standby nodes.
502
func (h *HarnessTest) shutdownNonStandbyNodes() {
×
503
        h.shutdownNodes(true)
×
504
}
×
505

506
// shutdownAllNodes will shutdown all running nodes.
507
func (h *HarnessTest) shutdownAllNodes() {
×
508
        h.shutdownNodes(false)
×
509
}
×
510

511
// shutdownNodes will shutdown any non-standby nodes. If skipStandby is false,
512
// all the standby nodes will be shutdown too.
513
func (h *HarnessTest) shutdownNodes(skipStandby bool) {
×
514
        for nid, node := range h.manager.activeNodes {
×
515
                // If it's a standby node, skip.
×
516
                _, ok := h.manager.standbyNodes[nid]
×
517
                if ok && skipStandby {
×
518
                        continue
×
519
                }
520

521
                // The process may not be in a state to always shutdown
522
                // immediately, so we'll retry up to a hard limit to ensure we
523
                // eventually shutdown.
524
                err := wait.NoError(func() error {
×
525
                        return h.manager.shutdownNode(node)
×
526
                }, DefaultTimeout)
×
527

528
                if err == nil {
×
529
                        continue
×
530
                }
531

532
                // Instead of returning the error, we will log it instead. This
533
                // is needed so other nodes can continue their shutdown
534
                // processes.
535
                h.Logf("unable to shutdown %s, got err: %v", node.Name(), err)
×
536
        }
537
}
538

539
// cleanupStandbyNode is a function should be called with defer whenever a
540
// subtest is created. It will reset the standby nodes configs, snapshot the
541
// states, and validate the node has a clean state.
542
func (h *HarnessTest) cleanupStandbyNode(hn *node.HarnessNode) {
×
543
        // Remove connections made from this test.
×
544
        h.removeConnectionns(hn)
×
545

×
546
        // Delete all payments made from this test.
×
547
        hn.RPC.DeleteAllPayments()
×
548

×
549
        // Check the node's current state with timeout.
×
550
        //
×
551
        // NOTE: we need to do this in a `wait` because it takes some time for
×
552
        // the node to update its internal state. Once the RPCs are synced we
×
553
        // can then remove this wait.
×
554
        err := wait.NoError(func() error {
×
555
                // Update the node's internal state.
×
556
                hn.UpdateState()
×
557

×
558
                // Check the node is in a clean state for the following tests.
×
559
                return h.validateNodeState(hn)
×
560
        }, wait.DefaultTimeout)
×
561
        require.NoError(h, err, "timeout checking node's state")
×
562
}
563

564
// removeConnectionns will remove all connections made on the standby nodes
565
// expect the connections between Alice and Bob.
566
func (h *HarnessTest) removeConnectionns(hn *node.HarnessNode) {
×
567
        resp := hn.RPC.ListPeers()
×
568
        for _, peer := range resp.Peers {
×
569
                // Skip disconnecting Alice and Bob.
×
570
                switch peer.PubKey {
×
571
                case h.Alice.PubKeyStr:
×
572
                        continue
×
573
                case h.Bob.PubKeyStr:
×
574
                        continue
×
575
                }
576

577
                hn.RPC.DisconnectPeer(peer.PubKey)
×
578
        }
579
}
580

581
// SetTestName set the test case name.
582
func (h *HarnessTest) SetTestName(name string) {
×
583
        h.manager.currentTestCase = name
×
584

×
585
        // Overwrite the old log filename so we can create new log files.
×
586
        for _, node := range h.manager.standbyNodes {
×
587
                node.Cfg.LogFilenamePrefix = name
×
588
        }
×
589
}
590

591
// NewNode creates a new node and asserts its creation. The node is guaranteed
592
// to have finished its initialization and all its subservers are started.
593
func (h *HarnessTest) NewNode(name string,
594
        extraArgs []string) *node.HarnessNode {
×
595

×
596
        node, err := h.manager.newNode(h.T, name, extraArgs, nil, false)
×
597
        require.NoErrorf(h, err, "unable to create new node for %s", name)
×
598

×
599
        // Start the node.
×
600
        err = node.Start(h.runCtx)
×
601
        require.NoError(h, err, "failed to start node %s", node.Name())
×
602

×
603
        return node
×
604
}
×
605

606
// Shutdown shuts down the given node and asserts that no errors occur.
607
func (h *HarnessTest) Shutdown(node *node.HarnessNode) {
×
608
        // The process may not be in a state to always shutdown immediately, so
×
609
        // we'll retry up to a hard limit to ensure we eventually shutdown.
×
610
        err := wait.NoError(func() error {
×
611
                return h.manager.shutdownNode(node)
×
612
        }, DefaultTimeout)
×
613

614
        require.NoErrorf(h, err, "unable to shutdown %v in %v", node.Name(),
×
615
                h.manager.currentTestCase)
×
616
}
617

618
// SuspendNode stops the given node and returns a callback that can be used to
619
// start it again.
620
func (h *HarnessTest) SuspendNode(node *node.HarnessNode) func() error {
×
621
        err := node.Stop()
×
622
        require.NoErrorf(h, err, "failed to stop %s", node.Name())
×
623

×
624
        // Remove the node from active nodes.
×
625
        delete(h.manager.activeNodes, node.Cfg.NodeID)
×
626

×
627
        return func() error {
×
628
                h.manager.registerNode(node)
×
629

×
630
                if err := node.Start(h.runCtx); err != nil {
×
631
                        return err
×
632
                }
×
633
                h.WaitForBlockchainSync(node)
×
634

×
635
                return nil
×
636
        }
637
}
638

639
// RestartNode restarts a given node, unlocks it and asserts it's successfully
640
// started.
641
func (h *HarnessTest) RestartNode(hn *node.HarnessNode) {
×
642
        err := h.manager.restartNode(h.runCtx, hn, nil)
×
643
        require.NoErrorf(h, err, "failed to restart node %s", hn.Name())
×
644

×
645
        err = h.manager.unlockNode(hn)
×
646
        require.NoErrorf(h, err, "failed to unlock node %s", hn.Name())
×
647

×
648
        if !hn.Cfg.SkipUnlock {
×
649
                // Give the node some time to catch up with the chain before we
×
650
                // continue with the tests.
×
651
                h.WaitForBlockchainSync(hn)
×
652
        }
×
653
}
654

655
// RestartNodeNoUnlock restarts a given node without unlocking its wallet.
656
func (h *HarnessTest) RestartNodeNoUnlock(hn *node.HarnessNode) {
×
657
        err := h.manager.restartNode(h.runCtx, hn, nil)
×
658
        require.NoErrorf(h, err, "failed to restart node %s", hn.Name())
×
659
}
×
660

661
// RestartNodeWithChanBackups restarts a given node with the specified channel
662
// backups.
663
func (h *HarnessTest) RestartNodeWithChanBackups(hn *node.HarnessNode,
664
        chanBackups ...*lnrpc.ChanBackupSnapshot) {
×
665

×
666
        err := h.manager.restartNode(h.runCtx, hn, nil)
×
667
        require.NoErrorf(h, err, "failed to restart node %s", hn.Name())
×
668

×
669
        err = h.manager.unlockNode(hn, chanBackups...)
×
670
        require.NoErrorf(h, err, "failed to unlock node %s", hn.Name())
×
671

×
672
        // Give the node some time to catch up with the chain before we
×
673
        // continue with the tests.
×
674
        h.WaitForBlockchainSync(hn)
×
675
}
×
676

677
// RestartNodeWithExtraArgs updates the node's config and restarts it.
678
func (h *HarnessTest) RestartNodeWithExtraArgs(hn *node.HarnessNode,
679
        extraArgs []string) {
×
680

×
681
        hn.SetExtraArgs(extraArgs)
×
682
        h.RestartNode(hn)
×
683
}
×
684

685
// NewNodeWithSeed fully initializes a new HarnessNode after creating a fresh
686
// aezeed. The provided password is used as both the aezeed password and the
687
// wallet password. The generated mnemonic is returned along with the
688
// initialized harness node.
689
func (h *HarnessTest) NewNodeWithSeed(name string,
690
        extraArgs []string, password []byte,
691
        statelessInit bool) (*node.HarnessNode, []string, []byte) {
×
692

×
693
        // Create a request to generate a new aezeed. The new seed will have
×
694
        // the same password as the internal wallet.
×
695
        req := &lnrpc.GenSeedRequest{
×
696
                AezeedPassphrase: password,
×
697
                SeedEntropy:      nil,
×
698
        }
×
699

×
700
        return h.newNodeWithSeed(name, extraArgs, req, statelessInit)
×
701
}
×
702

703
// newNodeWithSeed creates and initializes a new HarnessNode such that it'll be
704
// ready to accept RPC calls. A `GenSeedRequest` is needed to generate the
705
// seed.
706
func (h *HarnessTest) newNodeWithSeed(name string,
707
        extraArgs []string, req *lnrpc.GenSeedRequest,
708
        statelessInit bool) (*node.HarnessNode, []string, []byte) {
×
709

×
710
        node, err := h.manager.newNode(
×
711
                h.T, name, extraArgs, req.AezeedPassphrase, true,
×
712
        )
×
713
        require.NoErrorf(h, err, "unable to create new node for %s", name)
×
714

×
715
        // Start the node with seed only, which will only create the `State`
×
716
        // and `WalletUnlocker` clients.
×
717
        err = node.StartWithNoAuth(h.runCtx)
×
718
        require.NoErrorf(h, err, "failed to start node %s", node.Name())
×
719

×
720
        // Generate a new seed.
×
721
        genSeedResp := node.RPC.GenSeed(req)
×
722

×
723
        // With the seed created, construct the init request to the node,
×
724
        // including the newly generated seed.
×
725
        initReq := &lnrpc.InitWalletRequest{
×
726
                WalletPassword:     req.AezeedPassphrase,
×
727
                CipherSeedMnemonic: genSeedResp.CipherSeedMnemonic,
×
728
                AezeedPassphrase:   req.AezeedPassphrase,
×
729
                StatelessInit:      statelessInit,
×
730
        }
×
731

×
732
        // Pass the init request via rpc to finish unlocking the node. This
×
733
        // will also initialize the macaroon-authenticated LightningClient.
×
734
        adminMac, err := h.manager.initWalletAndNode(node, initReq)
×
735
        require.NoErrorf(h, err, "failed to unlock and init node %s",
×
736
                node.Name())
×
737

×
738
        // In stateless initialization mode we get a macaroon back that we have
×
739
        // to return to the test, otherwise gRPC calls won't be possible since
×
740
        // there are no macaroon files created in that mode.
×
741
        // In stateful init the admin macaroon will just be nil.
×
742
        return node, genSeedResp.CipherSeedMnemonic, adminMac
×
743
}
×
744

745
// RestoreNodeWithSeed fully initializes a HarnessNode using a chosen mnemonic,
746
// password, recovery window, and optionally a set of static channel backups.
747
// After providing the initialization request to unlock the node, this method
748
// will finish initializing the LightningClient such that the HarnessNode can
749
// be used for regular rpc operations.
750
func (h *HarnessTest) RestoreNodeWithSeed(name string, extraArgs []string,
751
        password []byte, mnemonic []string, rootKey string,
752
        recoveryWindow int32,
753
        chanBackups *lnrpc.ChanBackupSnapshot) *node.HarnessNode {
×
754

×
755
        n, err := h.manager.newNode(h.T, name, extraArgs, password, true)
×
756
        require.NoErrorf(h, err, "unable to create new node for %s", name)
×
757

×
758
        // Start the node with seed only, which will only create the `State`
×
759
        // and `WalletUnlocker` clients.
×
760
        err = n.StartWithNoAuth(h.runCtx)
×
761
        require.NoErrorf(h, err, "failed to start node %s", n.Name())
×
762

×
763
        // Create the wallet.
×
764
        initReq := &lnrpc.InitWalletRequest{
×
765
                WalletPassword:     password,
×
766
                CipherSeedMnemonic: mnemonic,
×
767
                AezeedPassphrase:   password,
×
768
                ExtendedMasterKey:  rootKey,
×
769
                RecoveryWindow:     recoveryWindow,
×
770
                ChannelBackups:     chanBackups,
×
771
        }
×
772
        _, err = h.manager.initWalletAndNode(n, initReq)
×
773
        require.NoErrorf(h, err, "failed to unlock and init node %s",
×
774
                n.Name())
×
775

×
776
        return n
×
777
}
×
778

779
// NewNodeEtcd starts a new node with seed that'll use an external etcd
780
// database as its storage. The passed cluster flag indicates that we'd like
781
// the node to join the cluster leader election. We won't wait until RPC is
782
// available (this is useful when the node is not expected to become the leader
783
// right away).
784
func (h *HarnessTest) NewNodeEtcd(name string, etcdCfg *etcd.Config,
785
        password []byte, cluster bool,
786
        leaderSessionTTL int) *node.HarnessNode {
×
787

×
788
        // We don't want to use the embedded etcd instance.
×
789
        h.manager.dbBackend = node.BackendBbolt
×
790

×
791
        extraArgs := node.ExtraArgsEtcd(
×
792
                etcdCfg, name, cluster, leaderSessionTTL,
×
793
        )
×
794
        node, err := h.manager.newNode(h.T, name, extraArgs, password, true)
×
795
        require.NoError(h, err, "failed to create new node with etcd")
×
796

×
797
        // Start the node daemon only.
×
798
        err = node.StartLndCmd(h.runCtx)
×
799
        require.NoError(h, err, "failed to start node %s", node.Name())
×
800

×
801
        return node
×
802
}
×
803

804
// NewNodeWithSeedEtcd starts a new node with seed that'll use an external etcd
805
// database as its storage. The passed cluster flag indicates that we'd like
806
// the node to join the cluster leader election.
807
func (h *HarnessTest) NewNodeWithSeedEtcd(name string, etcdCfg *etcd.Config,
808
        password []byte, statelessInit, cluster bool,
809
        leaderSessionTTL int) (*node.HarnessNode, []string, []byte) {
×
810

×
811
        // We don't want to use the embedded etcd instance.
×
812
        h.manager.dbBackend = node.BackendBbolt
×
813

×
814
        // Create a request to generate a new aezeed. The new seed will have
×
815
        // the same password as the internal wallet.
×
816
        req := &lnrpc.GenSeedRequest{
×
817
                AezeedPassphrase: password,
×
818
                SeedEntropy:      nil,
×
819
        }
×
820

×
821
        extraArgs := node.ExtraArgsEtcd(
×
822
                etcdCfg, name, cluster, leaderSessionTTL,
×
823
        )
×
824

×
825
        return h.newNodeWithSeed(name, extraArgs, req, statelessInit)
×
826
}
×
827

828
// NewNodeRemoteSigner creates a new remote signer node and asserts its
829
// creation.
830
func (h *HarnessTest) NewNodeRemoteSigner(name string, extraArgs []string,
831
        password []byte, watchOnly *lnrpc.WatchOnly) *node.HarnessNode {
×
832

×
833
        hn, err := h.manager.newNode(h.T, name, extraArgs, password, true)
×
834
        require.NoErrorf(h, err, "unable to create new node for %s", name)
×
835

×
836
        err = hn.StartWithNoAuth(h.runCtx)
×
837
        require.NoError(h, err, "failed to start node %s", name)
×
838

×
839
        // With the seed created, construct the init request to the node,
×
840
        // including the newly generated seed.
×
841
        initReq := &lnrpc.InitWalletRequest{
×
842
                WalletPassword: password,
×
843
                WatchOnly:      watchOnly,
×
844
        }
×
845

×
846
        // Pass the init request via rpc to finish unlocking the node. This
×
847
        // will also initialize the macaroon-authenticated LightningClient.
×
848
        _, err = h.manager.initWalletAndNode(hn, initReq)
×
849
        require.NoErrorf(h, err, "failed to init node %s", name)
×
850

×
851
        return hn
×
852
}
×
853

854
// KillNode kills the node and waits for the node process to stop.
855
func (h *HarnessTest) KillNode(hn *node.HarnessNode) {
×
856
        h.Logf("Manually killing the node %s", hn.Name())
×
857
        require.NoErrorf(h, hn.KillAndWait(), "%s: kill got error", hn.Name())
×
858
        delete(h.manager.activeNodes, hn.Cfg.NodeID)
×
859
}
×
860

861
// SetFeeEstimate sets a fee rate to be returned from fee estimator.
862
//
863
// NOTE: this method will set the fee rate for a conf target of 1, which is the
864
// fallback fee rate for a `WebAPIEstimator` if a higher conf target's fee rate
865
// is not set. This means if the fee rate for conf target 6 is set, the fee
866
// estimator will use that value instead.
867
func (h *HarnessTest) SetFeeEstimate(fee chainfee.SatPerKWeight) {
×
868
        h.feeService.SetFeeRate(fee, 1)
×
869
}
×
870

871
// SetFeeEstimateWithConf sets a fee rate of a specified conf target to be
872
// returned from fee estimator.
873
func (h *HarnessTest) SetFeeEstimateWithConf(
874
        fee chainfee.SatPerKWeight, conf uint32) {
×
875

×
876
        h.feeService.SetFeeRate(fee, conf)
×
877
}
×
878

879
// SetMinRelayFeerate sets a min relay fee rate to be returned from fee
880
// estimator.
881
func (h *HarnessTest) SetMinRelayFeerate(fee chainfee.SatPerKVByte) {
×
882
        h.feeService.SetMinRelayFeerate(fee)
×
883
}
×
884

885
// validateNodeState checks that the node doesn't have any uncleaned states
886
// which will affect its following tests.
887
func (h *HarnessTest) validateNodeState(hn *node.HarnessNode) error {
×
888
        errStr := func(subject string) error {
×
889
                return fmt.Errorf("%s: found %s channels, please close "+
×
890
                        "them properly", hn.Name(), subject)
×
891
        }
×
892
        // If the node still has open channels, it's most likely that the
893
        // current test didn't close it properly.
894
        if hn.State.OpenChannel.Active != 0 {
×
895
                return errStr("active")
×
896
        }
×
897
        if hn.State.OpenChannel.Public != 0 {
×
898
                return errStr("public")
×
899
        }
×
900
        if hn.State.OpenChannel.Private != 0 {
×
901
                return errStr("private")
×
902
        }
×
903
        if hn.State.OpenChannel.Pending != 0 {
×
904
                return errStr("pending open")
×
905
        }
×
906

907
        // The number of pending force close channels should be zero.
908
        if hn.State.CloseChannel.PendingForceClose != 0 {
×
909
                return errStr("pending force")
×
910
        }
×
911

912
        // The number of waiting close channels should be zero.
913
        if hn.State.CloseChannel.WaitingClose != 0 {
×
914
                return errStr("waiting close")
×
915
        }
×
916

917
        // Ths number of payments should be zero.
918
        if hn.State.Payment.Total != 0 {
×
919
                return fmt.Errorf("%s: found uncleaned payments, please "+
×
920
                        "delete all of them properly", hn.Name())
×
921
        }
×
922

923
        // The number of public edges should be zero.
924
        if hn.State.Edge.Public != 0 {
×
925
                return fmt.Errorf("%s: found active public egdes, please "+
×
926
                        "clean them properly", hn.Name())
×
927
        }
×
928

929
        // The number of edges should be zero.
930
        if hn.State.Edge.Total != 0 {
×
931
                return fmt.Errorf("%s: found active edges, please "+
×
932
                        "clean them properly", hn.Name())
×
933
        }
×
934

935
        return nil
×
936
}
937

938
// GetChanPointFundingTxid takes a channel point and converts it into a chain
939
// hash.
940
func (h *HarnessTest) GetChanPointFundingTxid(
941
        cp *lnrpc.ChannelPoint) chainhash.Hash {
×
942

×
943
        txid, err := lnrpc.GetChanPointFundingTxid(cp)
×
944
        require.NoError(h, err, "unable to get txid")
×
945

×
946
        return *txid
×
947
}
×
948

949
// OutPointFromChannelPoint creates an outpoint from a given channel point.
950
func (h *HarnessTest) OutPointFromChannelPoint(
951
        cp *lnrpc.ChannelPoint) wire.OutPoint {
×
952

×
953
        txid := h.GetChanPointFundingTxid(cp)
×
954
        return wire.OutPoint{
×
955
                Hash:  txid,
×
956
                Index: cp.OutputIndex,
×
957
        }
×
958
}
×
959

960
// OpenChannelParams houses the params to specify when opening a new channel.
961
type OpenChannelParams struct {
962
        // Amt is the local amount being put into the channel.
963
        Amt btcutil.Amount
964

965
        // PushAmt is the amount that should be pushed to the remote when the
966
        // channel is opened.
967
        PushAmt btcutil.Amount
968

969
        // Private is a boolan indicating whether the opened channel should be
970
        // private.
971
        Private bool
972

973
        // SpendUnconfirmed is a boolean indicating whether we can utilize
974
        // unconfirmed outputs to fund the channel.
975
        SpendUnconfirmed bool
976

977
        // MinHtlc is the htlc_minimum_msat value set when opening the channel.
978
        MinHtlc lnwire.MilliSatoshi
979

980
        // RemoteMaxHtlcs is the remote_max_htlcs value set when opening the
981
        // channel, restricting the number of concurrent HTLCs the remote party
982
        // can add to a commitment.
983
        RemoteMaxHtlcs uint16
984

985
        // FundingShim is an optional funding shim that the caller can specify
986
        // in order to modify the channel funding workflow.
987
        FundingShim *lnrpc.FundingShim
988

989
        // SatPerVByte is the amount of satoshis to spend in chain fees per
990
        // virtual byte of the transaction.
991
        SatPerVByte btcutil.Amount
992

993
        // ConfTarget is the number of blocks that the funding transaction
994
        // should be confirmed in.
995
        ConfTarget fn.Option[int32]
996

997
        // CommitmentType is the commitment type that should be used for the
998
        // channel to be opened.
999
        CommitmentType lnrpc.CommitmentType
1000

1001
        // ZeroConf is used to determine if the channel will be a zero-conf
1002
        // channel. This only works if the explicit negotiation is used with
1003
        // anchors or script enforced leases.
1004
        ZeroConf bool
1005

1006
        // ScidAlias denotes whether the channel will be an option-scid-alias
1007
        // channel type negotiation.
1008
        ScidAlias bool
1009

1010
        // BaseFee is the channel base fee applied during the channel
1011
        // announcement phase.
1012
        BaseFee uint64
1013

1014
        // FeeRate is the channel fee rate in ppm applied during the channel
1015
        // announcement phase.
1016
        FeeRate uint64
1017

1018
        // UseBaseFee, if set, instructs the downstream logic to apply the
1019
        // user-specified channel base fee to the channel update announcement.
1020
        // If set to false it avoids applying a base fee of 0 and instead
1021
        // activates the default configured base fee.
1022
        UseBaseFee bool
1023

1024
        // UseFeeRate, if set, instructs the downstream logic to apply the
1025
        // user-specified channel fee rate to the channel update announcement.
1026
        // If set to false it avoids applying a fee rate of 0 and instead
1027
        // activates the default configured fee rate.
1028
        UseFeeRate bool
1029

1030
        // FundMax is a boolean indicating whether the channel should be funded
1031
        // with the maximum possible amount from the wallet.
1032
        FundMax bool
1033

1034
        // An optional note-to-self containing some useful information about the
1035
        // channel. This is stored locally only, and is purely for reference. It
1036
        // has no bearing on the channel's operation. Max allowed length is 500
1037
        // characters.
1038
        Memo string
1039

1040
        // Outpoints is a list of client-selected outpoints that should be used
1041
        // for funding a channel. If Amt is specified then this amount is
1042
        // allocated from the sum of outpoints towards funding. If the
1043
        // FundMax flag is specified the entirety of selected funds is
1044
        // allocated towards channel funding.
1045
        Outpoints []*lnrpc.OutPoint
1046

1047
        // CloseAddress sets the upfront_shutdown_script parameter during
1048
        // channel open. It is expected to be encoded as a bitcoin address.
1049
        CloseAddress string
1050
}
1051

1052
// prepareOpenChannel waits for both nodes to be synced to chain and returns an
1053
// OpenChannelRequest.
1054
func (h *HarnessTest) prepareOpenChannel(srcNode, destNode *node.HarnessNode,
1055
        p OpenChannelParams) *lnrpc.OpenChannelRequest {
×
1056

×
1057
        // Wait until srcNode and destNode have the latest chain synced.
×
1058
        // Otherwise, we may run into a check within the funding manager that
×
1059
        // prevents any funding workflows from being kicked off if the chain
×
1060
        // isn't yet synced.
×
1061
        h.WaitForBlockchainSync(srcNode)
×
1062
        h.WaitForBlockchainSync(destNode)
×
1063

×
1064
        // Specify the minimal confirmations of the UTXOs used for channel
×
1065
        // funding.
×
1066
        minConfs := int32(1)
×
1067
        if p.SpendUnconfirmed {
×
1068
                minConfs = 0
×
1069
        }
×
1070

1071
        // Get the requested conf target. If not set, default to 6.
1072
        confTarget := p.ConfTarget.UnwrapOr(6)
×
1073

×
1074
        // If there's fee rate set, unset the conf target.
×
1075
        if p.SatPerVByte != 0 {
×
1076
                confTarget = 0
×
1077
        }
×
1078

1079
        // Prepare the request.
1080
        return &lnrpc.OpenChannelRequest{
×
1081
                NodePubkey:         destNode.PubKey[:],
×
1082
                LocalFundingAmount: int64(p.Amt),
×
1083
                PushSat:            int64(p.PushAmt),
×
1084
                Private:            p.Private,
×
1085
                TargetConf:         confTarget,
×
1086
                MinConfs:           minConfs,
×
1087
                SpendUnconfirmed:   p.SpendUnconfirmed,
×
1088
                MinHtlcMsat:        int64(p.MinHtlc),
×
1089
                RemoteMaxHtlcs:     uint32(p.RemoteMaxHtlcs),
×
1090
                FundingShim:        p.FundingShim,
×
1091
                SatPerVbyte:        uint64(p.SatPerVByte),
×
1092
                CommitmentType:     p.CommitmentType,
×
1093
                ZeroConf:           p.ZeroConf,
×
1094
                ScidAlias:          p.ScidAlias,
×
1095
                BaseFee:            p.BaseFee,
×
1096
                FeeRate:            p.FeeRate,
×
1097
                UseBaseFee:         p.UseBaseFee,
×
1098
                UseFeeRate:         p.UseFeeRate,
×
1099
                FundMax:            p.FundMax,
×
1100
                Memo:               p.Memo,
×
1101
                Outpoints:          p.Outpoints,
×
1102
                CloseAddress:       p.CloseAddress,
×
1103
        }
×
1104
}
1105

1106
// OpenChannelAssertPending attempts to open a channel between srcNode and
1107
// destNode with the passed channel funding parameters. Once the `OpenChannel`
1108
// is called, it will consume the first event it receives from the open channel
1109
// client and asserts it's a channel pending event.
1110
func (h *HarnessTest) openChannelAssertPending(srcNode,
1111
        destNode *node.HarnessNode,
1112
        p OpenChannelParams) (*lnrpc.PendingUpdate, rpc.OpenChanClient) {
×
1113

×
1114
        // Prepare the request and open the channel.
×
1115
        openReq := h.prepareOpenChannel(srcNode, destNode, p)
×
1116
        respStream := srcNode.RPC.OpenChannel(openReq)
×
1117

×
1118
        // Consume the "channel pending" update. This waits until the node
×
1119
        // notifies us that the final message in the channel funding workflow
×
1120
        // has been sent to the remote node.
×
1121
        resp := h.ReceiveOpenChannelUpdate(respStream)
×
1122

×
1123
        // Check that the update is channel pending.
×
1124
        update, ok := resp.Update.(*lnrpc.OpenStatusUpdate_ChanPending)
×
1125
        require.Truef(h, ok, "expected channel pending: update, instead got %v",
×
1126
                resp)
×
1127

×
1128
        return update.ChanPending, respStream
×
1129
}
×
1130

1131
// OpenChannelAssertPending attempts to open a channel between srcNode and
1132
// destNode with the passed channel funding parameters. Once the `OpenChannel`
1133
// is called, it will consume the first event it receives from the open channel
1134
// client and asserts it's a channel pending event. It returns the
1135
// `PendingUpdate`.
1136
func (h *HarnessTest) OpenChannelAssertPending(srcNode,
1137
        destNode *node.HarnessNode, p OpenChannelParams) *lnrpc.PendingUpdate {
×
1138

×
1139
        resp, _ := h.openChannelAssertPending(srcNode, destNode, p)
×
1140
        return resp
×
1141
}
×
1142

1143
// OpenChannelAssertStream attempts to open a channel between srcNode and
1144
// destNode with the passed channel funding parameters. Once the `OpenChannel`
1145
// is called, it will consume the first event it receives from the open channel
1146
// client and asserts it's a channel pending event. It returns the open channel
1147
// stream.
1148
func (h *HarnessTest) OpenChannelAssertStream(srcNode,
1149
        destNode *node.HarnessNode, p OpenChannelParams) rpc.OpenChanClient {
×
1150

×
1151
        _, stream := h.openChannelAssertPending(srcNode, destNode, p)
×
1152
        return stream
×
1153
}
×
1154

1155
// OpenChannel attempts to open a channel with the specified parameters
1156
// extended from Alice to Bob. Additionally, for public channels, it will mine
1157
// extra blocks so they are announced to the network. In specific, the
1158
// following items are asserted,
1159
//   - for non-zero conf channel, 1 blocks will be mined to confirm the funding
1160
//     tx.
1161
//   - both nodes should see the channel edge update in their network graph.
1162
//   - both nodes can report the status of the new channel from ListChannels.
1163
//   - extra blocks are mined if it's a public channel.
1164
func (h *HarnessTest) OpenChannel(alice, bob *node.HarnessNode,
1165
        p OpenChannelParams) *lnrpc.ChannelPoint {
×
1166

×
1167
        // First, open the channel without announcing it.
×
1168
        cp := h.OpenChannelNoAnnounce(alice, bob, p)
×
1169

×
1170
        // If this is a private channel, there's no need to mine extra blocks
×
1171
        // since it will never be announced to the network.
×
1172
        if p.Private {
×
1173
                return cp
×
1174
        }
×
1175

1176
        // Mine extra blocks to announce the channel.
1177
        if p.ZeroConf {
×
1178
                // For a zero-conf channel, no blocks have been mined so we
×
1179
                // need to mine 6 blocks.
×
1180
                //
×
1181
                // Mine 1 block to confirm the funding transaction.
×
1182
                h.MineBlocksAndAssertNumTxes(numBlocksOpenChannel, 1)
×
1183
        } else {
×
1184
                // For a regular channel, 1 block has already been mined to
×
1185
                // confirm the funding transaction, so we mine 5 blocks.
×
1186
                h.MineBlocks(numBlocksOpenChannel - 1)
×
1187
        }
×
1188

1189
        return cp
×
1190
}
1191

1192
// OpenChannelNoAnnounce attempts to open a channel with the specified
1193
// parameters extended from Alice to Bob without mining the necessary blocks to
1194
// announce the channel. Additionally, the following items are asserted,
1195
//   - for non-zero conf channel, 1 blocks will be mined to confirm the funding
1196
//     tx.
1197
//   - both nodes should see the channel edge update in their network graph.
1198
//   - both nodes can report the status of the new channel from ListChannels.
1199
func (h *HarnessTest) OpenChannelNoAnnounce(alice, bob *node.HarnessNode,
1200
        p OpenChannelParams) *lnrpc.ChannelPoint {
×
1201

×
1202
        chanOpenUpdate := h.OpenChannelAssertStream(alice, bob, p)
×
1203

×
1204
        // Open a zero conf channel.
×
1205
        if p.ZeroConf {
×
1206
                return h.openChannelZeroConf(alice, bob, chanOpenUpdate)
×
1207
        }
×
1208

1209
        // Open a non-zero conf channel.
1210
        return h.openChannel(alice, bob, chanOpenUpdate)
×
1211
}
1212

1213
// openChannel attempts to open a channel with the specified parameters
1214
// extended from Alice to Bob. Additionally, the following items are asserted,
1215
//   - 1 block is mined and the funding transaction should be found in it.
1216
//   - both nodes should see the channel edge update in their network graph.
1217
//   - both nodes can report the status of the new channel from ListChannels.
1218
func (h *HarnessTest) openChannel(alice, bob *node.HarnessNode,
1219
        stream rpc.OpenChanClient) *lnrpc.ChannelPoint {
×
1220

×
1221
        // Mine 1 block to confirm the funding transaction.
×
1222
        block := h.MineBlocksAndAssertNumTxes(1, 1)[0]
×
1223

×
1224
        // Wait for the channel open event.
×
1225
        fundingChanPoint := h.WaitForChannelOpenEvent(stream)
×
1226

×
1227
        // Check that the funding tx is found in the first block.
×
1228
        fundingTxID := h.GetChanPointFundingTxid(fundingChanPoint)
×
1229
        h.AssertTxInBlock(block, fundingTxID)
×
1230

×
1231
        // Check that both alice and bob have seen the channel from their
×
1232
        // network topology.
×
1233
        h.AssertChannelInGraph(alice, fundingChanPoint)
×
1234
        h.AssertChannelInGraph(bob, fundingChanPoint)
×
1235

×
1236
        // Check that the channel can be seen in their ListChannels.
×
1237
        h.AssertChannelExists(alice, fundingChanPoint)
×
1238
        h.AssertChannelExists(bob, fundingChanPoint)
×
1239

×
1240
        return fundingChanPoint
×
1241
}
×
1242

1243
// openChannelZeroConf attempts to open a channel with the specified parameters
1244
// extended from Alice to Bob. Additionally, the following items are asserted,
1245
//   - both nodes should see the channel edge update in their network graph.
1246
//   - both nodes can report the status of the new channel from ListChannels.
1247
func (h *HarnessTest) openChannelZeroConf(alice, bob *node.HarnessNode,
1248
        stream rpc.OpenChanClient) *lnrpc.ChannelPoint {
×
1249

×
1250
        // Wait for the channel open event.
×
1251
        fundingChanPoint := h.WaitForChannelOpenEvent(stream)
×
1252

×
1253
        // Check that both alice and bob have seen the channel from their
×
1254
        // network topology.
×
1255
        h.AssertChannelInGraph(alice, fundingChanPoint)
×
1256
        h.AssertChannelInGraph(bob, fundingChanPoint)
×
1257

×
1258
        // Finally, check that the channel can be seen in their ListChannels.
×
1259
        h.AssertChannelExists(alice, fundingChanPoint)
×
1260
        h.AssertChannelExists(bob, fundingChanPoint)
×
1261

×
1262
        return fundingChanPoint
×
1263
}
×
1264

1265
// OpenChannelAssertErr opens a channel between node srcNode and destNode,
1266
// asserts that the expected error is returned from the channel opening.
1267
func (h *HarnessTest) OpenChannelAssertErr(srcNode, destNode *node.HarnessNode,
1268
        p OpenChannelParams, expectedErr error) {
×
1269

×
1270
        // Prepare the request and open the channel.
×
1271
        openReq := h.prepareOpenChannel(srcNode, destNode, p)
×
1272
        respStream := srcNode.RPC.OpenChannel(openReq)
×
1273

×
1274
        // Receive an error to be sent from the stream.
×
1275
        _, err := h.receiveOpenChannelUpdate(respStream)
×
1276
        require.NotNil(h, err, "expected channel opening to fail")
×
1277

×
1278
        // Use string comparison here as we haven't codified all the RPC errors
×
1279
        // yet.
×
1280
        require.Containsf(h, err.Error(), expectedErr.Error(), "unexpected "+
×
1281
                "error returned, want %v, got %v", expectedErr, err)
×
1282
}
×
1283

1284
// CloseChannelAssertPending attempts to close the channel indicated by the
1285
// passed channel point, initiated by the passed node. Once the CloseChannel
1286
// rpc is called, it will consume one event and assert it's a close pending
1287
// event. In addition, it will check that the closing tx can be found in the
1288
// mempool.
1289
func (h *HarnessTest) CloseChannelAssertPending(hn *node.HarnessNode,
1290
        cp *lnrpc.ChannelPoint,
1291
        force bool) (rpc.CloseChanClient, chainhash.Hash) {
×
1292

×
1293
        // Calls the rpc to close the channel.
×
1294
        closeReq := &lnrpc.CloseChannelRequest{
×
1295
                ChannelPoint: cp,
×
1296
                Force:        force,
×
1297
                NoWait:       true,
×
1298
        }
×
1299

×
1300
        // For coop close, we use a default confg target of 6.
×
1301
        if !force {
×
1302
                closeReq.TargetConf = 6
×
1303
        }
×
1304

1305
        var (
×
1306
                stream rpc.CloseChanClient
×
1307
                event  *lnrpc.CloseStatusUpdate
×
1308
                err    error
×
1309
        )
×
1310

×
1311
        // Consume the "channel close" update in order to wait for the closing
×
1312
        // transaction to be broadcast, then wait for the closing tx to be seen
×
1313
        // within the network.
×
1314
        stream = hn.RPC.CloseChannel(closeReq)
×
1315
        _, err = h.ReceiveCloseChannelUpdate(stream)
×
1316
        require.NoError(h, err, "close channel update got error: %v", err)
×
1317

×
1318
        event, err = h.ReceiveCloseChannelUpdate(stream)
×
1319
        if err != nil {
×
1320
                h.Logf("Test: %s, close channel got error: %v",
×
1321
                        h.manager.currentTestCase, err)
×
1322
        }
×
1323
        require.NoError(h, err, "retry closing channel failed")
×
1324

×
1325
        pendingClose, ok := event.Update.(*lnrpc.CloseStatusUpdate_ClosePending)
×
1326
        require.Truef(h, ok, "expected channel close update, instead got %v",
×
1327
                pendingClose)
×
1328

×
1329
        closeTxid, err := chainhash.NewHash(pendingClose.ClosePending.Txid)
×
1330
        require.NoErrorf(h, err, "unable to decode closeTxid: %v",
×
1331
                pendingClose.ClosePending.Txid)
×
1332

×
1333
        // Assert the closing tx is in the mempool.
×
1334
        h.miner.AssertTxInMempool(*closeTxid)
×
1335

×
1336
        return stream, *closeTxid
×
1337
}
1338

1339
// CloseChannel attempts to coop close a non-anchored channel identified by the
1340
// passed channel point owned by the passed harness node. The following items
1341
// are asserted,
1342
//  1. a close pending event is sent from the close channel client.
1343
//  2. the closing tx is found in the mempool.
1344
//  3. the node reports the channel being waiting to close.
1345
//  4. a block is mined and the closing tx should be found in it.
1346
//  5. the node reports zero waiting close channels.
1347
//  6. the node receives a topology update regarding the channel close.
1348
func (h *HarnessTest) CloseChannel(hn *node.HarnessNode,
1349
        cp *lnrpc.ChannelPoint) chainhash.Hash {
×
1350

×
1351
        stream, _ := h.CloseChannelAssertPending(hn, cp, false)
×
1352

×
1353
        return h.AssertStreamChannelCoopClosed(hn, cp, false, stream)
×
1354
}
×
1355

1356
// ForceCloseChannel attempts to force close a non-anchored channel identified
1357
// by the passed channel point owned by the passed harness node. The following
1358
// items are asserted,
1359
//  1. a close pending event is sent from the close channel client.
1360
//  2. the closing tx is found in the mempool.
1361
//  3. the node reports the channel being waiting to close.
1362
//  4. a block is mined and the closing tx should be found in it.
1363
//  5. the node reports zero waiting close channels.
1364
//  6. the node receives a topology update regarding the channel close.
1365
//  7. mine DefaultCSV-1 blocks.
1366
//  8. the node reports zero pending force close channels.
1367
func (h *HarnessTest) ForceCloseChannel(hn *node.HarnessNode,
1368
        cp *lnrpc.ChannelPoint) chainhash.Hash {
×
1369

×
1370
        stream, _ := h.CloseChannelAssertPending(hn, cp, true)
×
1371

×
1372
        closingTxid := h.AssertStreamChannelForceClosed(hn, cp, false, stream)
×
1373

×
1374
        // Cleanup the force close.
×
1375
        h.CleanupForceClose(hn)
×
1376

×
1377
        return closingTxid
×
1378
}
×
1379

1380
// CloseChannelAssertErr closes the given channel and asserts an error
1381
// returned.
1382
func (h *HarnessTest) CloseChannelAssertErr(hn *node.HarnessNode,
1383
        cp *lnrpc.ChannelPoint, force bool) error {
×
1384

×
1385
        // Calls the rpc to close the channel.
×
1386
        closeReq := &lnrpc.CloseChannelRequest{
×
1387
                ChannelPoint: cp,
×
1388
                Force:        force,
×
1389
        }
×
1390
        stream := hn.RPC.CloseChannel(closeReq)
×
1391

×
1392
        // Consume the "channel close" update in order to wait for the closing
×
1393
        // transaction to be broadcast, then wait for the closing tx to be seen
×
1394
        // within the network.
×
1395
        _, err := h.ReceiveCloseChannelUpdate(stream)
×
1396
        require.Errorf(h, err, "%s: expect close channel to return an error",
×
1397
                hn.Name())
×
1398

×
1399
        return err
×
1400
}
×
1401

1402
// IsNeutrinoBackend returns a bool indicating whether the node is using a
1403
// neutrino as its backend. This is useful when we want to skip certain tests
1404
// which cannot be done with a neutrino backend.
1405
func (h *HarnessTest) IsNeutrinoBackend() bool {
×
1406
        return h.manager.chainBackend.Name() == NeutrinoBackendName
×
1407
}
×
1408

1409
// fundCoins attempts to send amt satoshis from the internal mining node to the
1410
// targeted lightning node. The confirmed boolean indicates whether the
1411
// transaction that pays to the target should confirm. For neutrino backend,
1412
// the `confirmed` param is ignored.
1413
func (h *HarnessTest) fundCoins(amt btcutil.Amount, target *node.HarnessNode,
1414
        addrType lnrpc.AddressType, confirmed bool) {
×
1415

×
1416
        initialBalance := target.RPC.WalletBalance()
×
1417

×
1418
        // First, obtain an address from the target lightning node, preferring
×
1419
        // to receive a p2wkh address s.t the output can immediately be used as
×
1420
        // an input to a funding transaction.
×
1421
        req := &lnrpc.NewAddressRequest{Type: addrType}
×
1422
        resp := target.RPC.NewAddress(req)
×
1423
        addr := h.DecodeAddress(resp.Address)
×
1424
        addrScript := h.PayToAddrScript(addr)
×
1425

×
1426
        // Generate a transaction which creates an output to the target
×
1427
        // pkScript of the desired amount.
×
1428
        output := &wire.TxOut{
×
1429
                PkScript: addrScript,
×
1430
                Value:    int64(amt),
×
1431
        }
×
1432
        h.miner.SendOutput(output, defaultMinerFeeRate)
×
1433

×
1434
        // Encode the pkScript in hex as this the format that it will be
×
1435
        // returned via rpc.
×
1436
        expPkScriptStr := hex.EncodeToString(addrScript)
×
1437

×
1438
        // Now, wait for ListUnspent to show the unconfirmed transaction
×
1439
        // containing the correct pkscript.
×
1440
        //
×
1441
        // Since neutrino doesn't support unconfirmed outputs, skip this check.
×
1442
        if !h.IsNeutrinoBackend() {
×
1443
                utxos := h.AssertNumUTXOsUnconfirmed(target, 1)
×
1444

×
1445
                // Assert that the lone unconfirmed utxo contains the same
×
1446
                // pkscript as the output generated above.
×
1447
                pkScriptStr := utxos[0].PkScript
×
1448
                require.Equal(h, pkScriptStr, expPkScriptStr,
×
1449
                        "pkscript mismatch")
×
1450

×
1451
                expectedBalance := btcutil.Amount(
×
1452
                        initialBalance.UnconfirmedBalance,
×
1453
                ) + amt
×
1454
                h.WaitForBalanceUnconfirmed(target, expectedBalance)
×
1455
        }
×
1456

1457
        // If the transaction should remain unconfirmed, then we'll wait until
1458
        // the target node's unconfirmed balance reflects the expected balance
1459
        // and exit.
1460
        if !confirmed {
×
1461
                return
×
1462
        }
×
1463

1464
        // Otherwise, we'll generate 1 new blocks to ensure the output gains a
1465
        // sufficient number of confirmations and wait for the balance to
1466
        // reflect what's expected.
1467
        h.MineBlocksAndAssertNumTxes(1, 1)
×
1468

×
1469
        expectedBalance := btcutil.Amount(initialBalance.ConfirmedBalance) + amt
×
1470
        h.WaitForBalanceConfirmed(target, expectedBalance)
×
1471
}
1472

1473
// FundCoins attempts to send amt satoshis from the internal mining node to the
1474
// targeted lightning node using a P2WKH address. 2 blocks are mined after in
1475
// order to confirm the transaction.
1476
func (h *HarnessTest) FundCoins(amt btcutil.Amount, hn *node.HarnessNode) {
×
1477
        h.fundCoins(amt, hn, lnrpc.AddressType_WITNESS_PUBKEY_HASH, true)
×
1478
}
×
1479

1480
// FundCoinsUnconfirmed attempts to send amt satoshis from the internal mining
1481
// node to the targeted lightning node using a P2WKH address. No blocks are
1482
// mined after and the UTXOs are unconfirmed.
1483
func (h *HarnessTest) FundCoinsUnconfirmed(amt btcutil.Amount,
1484
        hn *node.HarnessNode) {
×
1485

×
1486
        h.fundCoins(amt, hn, lnrpc.AddressType_WITNESS_PUBKEY_HASH, false)
×
1487
}
×
1488

1489
// FundCoinsNP2WKH attempts to send amt satoshis from the internal mining node
1490
// to the targeted lightning node using a NP2WKH address.
1491
func (h *HarnessTest) FundCoinsNP2WKH(amt btcutil.Amount,
1492
        target *node.HarnessNode) {
×
1493

×
1494
        h.fundCoins(amt, target, lnrpc.AddressType_NESTED_PUBKEY_HASH, true)
×
1495
}
×
1496

1497
// FundCoinsP2TR attempts to send amt satoshis from the internal mining node to
1498
// the targeted lightning node using a P2TR address.
1499
func (h *HarnessTest) FundCoinsP2TR(amt btcutil.Amount,
1500
        target *node.HarnessNode) {
×
1501

×
1502
        h.fundCoins(amt, target, lnrpc.AddressType_TAPROOT_PUBKEY, true)
×
1503
}
×
1504

1505
// completePaymentRequestsAssertStatus sends payments from a node to complete
1506
// all payment requests. This function does not return until all payments
1507
// have reached the specified status.
1508
func (h *HarnessTest) completePaymentRequestsAssertStatus(hn *node.HarnessNode,
1509
        paymentRequests []string, status lnrpc.Payment_PaymentStatus,
1510
        opts ...HarnessOpt) {
×
1511

×
1512
        payOpts := defaultHarnessOpts()
×
1513
        for _, opt := range opts {
×
1514
                opt(&payOpts)
×
1515
        }
×
1516

1517
        // Create a buffered chan to signal the results.
1518
        results := make(chan rpc.PaymentClient, len(paymentRequests))
×
1519

×
1520
        // send sends a payment and asserts if it doesn't succeeded.
×
1521
        send := func(payReq string) {
×
1522
                req := &routerrpc.SendPaymentRequest{
×
1523
                        PaymentRequest: payReq,
×
1524
                        TimeoutSeconds: int32(wait.PaymentTimeout.Seconds()),
×
1525
                        FeeLimitMsat:   noFeeLimitMsat,
×
1526
                        Amp:            payOpts.useAMP,
×
1527
                }
×
1528
                stream := hn.RPC.SendPayment(req)
×
1529

×
1530
                // Signal sent succeeded.
×
1531
                results <- stream
×
1532
        }
×
1533

1534
        // Launch all payments simultaneously.
1535
        for _, payReq := range paymentRequests {
×
1536
                payReqCopy := payReq
×
1537
                go send(payReqCopy)
×
1538
        }
×
1539

1540
        // Wait for all payments to report the expected status.
1541
        timer := time.After(wait.PaymentTimeout)
×
1542
        select {
×
1543
        case stream := <-results:
×
1544
                h.AssertPaymentStatusFromStream(stream, status)
×
1545

1546
        case <-timer:
×
1547
                require.Fail(h, "timeout", "waiting payment results timeout")
×
1548
        }
1549
}
1550

1551
// CompletePaymentRequests sends payments from a node to complete all payment
1552
// requests. This function does not return until all payments successfully
1553
// complete without errors.
1554
func (h *HarnessTest) CompletePaymentRequests(hn *node.HarnessNode,
1555
        paymentRequests []string, opts ...HarnessOpt) {
×
1556

×
1557
        h.completePaymentRequestsAssertStatus(
×
1558
                hn, paymentRequests, lnrpc.Payment_SUCCEEDED, opts...,
×
1559
        )
×
1560
}
×
1561

1562
// CompletePaymentRequestsNoWait sends payments from a node to complete all
1563
// payment requests without waiting for the results. Instead, it checks the
1564
// number of updates in the specified channel has increased.
1565
func (h *HarnessTest) CompletePaymentRequestsNoWait(hn *node.HarnessNode,
1566
        paymentRequests []string, chanPoint *lnrpc.ChannelPoint) {
×
1567

×
1568
        // We start by getting the current state of the client's channels. This
×
1569
        // is needed to ensure the payments actually have been committed before
×
1570
        // we return.
×
1571
        oldResp := h.GetChannelByChanPoint(hn, chanPoint)
×
1572

×
1573
        // Send payments and assert they are in-flight.
×
1574
        h.completePaymentRequestsAssertStatus(
×
1575
                hn, paymentRequests, lnrpc.Payment_IN_FLIGHT,
×
1576
        )
×
1577

×
1578
        // We are not waiting for feedback in the form of a response, but we
×
1579
        // should still wait long enough for the server to receive and handle
×
1580
        // the send before cancelling the request. We wait for the number of
×
1581
        // updates to one of our channels has increased before we return.
×
1582
        err := wait.NoError(func() error {
×
1583
                newResp := h.GetChannelByChanPoint(hn, chanPoint)
×
1584

×
1585
                // If this channel has an increased number of updates, we
×
1586
                // assume the payments are committed, and we can return.
×
1587
                if newResp.NumUpdates > oldResp.NumUpdates {
×
1588
                        return nil
×
1589
                }
×
1590

1591
                // Otherwise return an error as the NumUpdates are not
1592
                // increased.
1593
                return fmt.Errorf("%s: channel:%v not updated after sending "+
×
1594
                        "payments, old updates: %v, new updates: %v", hn.Name(),
×
1595
                        chanPoint, oldResp.NumUpdates, newResp.NumUpdates)
×
1596
        }, DefaultTimeout)
1597
        require.NoError(h, err, "timeout while checking for channel updates")
×
1598
}
1599

1600
// OpenChannelPsbt attempts to open a channel between srcNode and destNode with
1601
// the passed channel funding parameters. It will assert if the expected step
1602
// of funding the PSBT is not received from the source node.
1603
func (h *HarnessTest) OpenChannelPsbt(srcNode, destNode *node.HarnessNode,
1604
        p OpenChannelParams) (rpc.OpenChanClient, []byte) {
×
1605

×
1606
        // Wait until srcNode and destNode have the latest chain synced.
×
1607
        // Otherwise, we may run into a check within the funding manager that
×
1608
        // prevents any funding workflows from being kicked off if the chain
×
1609
        // isn't yet synced.
×
1610
        h.WaitForBlockchainSync(srcNode)
×
1611
        h.WaitForBlockchainSync(destNode)
×
1612

×
1613
        // Send the request to open a channel to the source node now. This will
×
1614
        // open a long-lived stream where we'll receive status updates about
×
1615
        // the progress of the channel.
×
1616
        // respStream := h.OpenChannelStreamAndAssert(srcNode, destNode, p)
×
1617
        req := &lnrpc.OpenChannelRequest{
×
1618
                NodePubkey:         destNode.PubKey[:],
×
1619
                LocalFundingAmount: int64(p.Amt),
×
1620
                PushSat:            int64(p.PushAmt),
×
1621
                Private:            p.Private,
×
1622
                SpendUnconfirmed:   p.SpendUnconfirmed,
×
1623
                MinHtlcMsat:        int64(p.MinHtlc),
×
1624
                FundingShim:        p.FundingShim,
×
1625
                CommitmentType:     p.CommitmentType,
×
1626
        }
×
1627
        respStream := srcNode.RPC.OpenChannel(req)
×
1628

×
1629
        // Consume the "PSBT funding ready" update. This waits until the node
×
1630
        // notifies us that the PSBT can now be funded.
×
1631
        resp := h.ReceiveOpenChannelUpdate(respStream)
×
1632
        upd, ok := resp.Update.(*lnrpc.OpenStatusUpdate_PsbtFund)
×
1633
        require.Truef(h, ok, "expected PSBT funding update, got %v", resp)
×
1634

×
1635
        // Make sure the channel funding address has the correct type for the
×
1636
        // given commitment type.
×
1637
        fundingAddr, err := btcutil.DecodeAddress(
×
1638
                upd.PsbtFund.FundingAddress, miner.HarnessNetParams,
×
1639
        )
×
1640
        require.NoError(h, err)
×
1641

×
1642
        switch p.CommitmentType {
×
1643
        case lnrpc.CommitmentType_SIMPLE_TAPROOT:
×
1644
                require.IsType(h, &btcutil.AddressTaproot{}, fundingAddr)
×
1645

1646
        default:
×
1647
                require.IsType(
×
1648
                        h, &btcutil.AddressWitnessScriptHash{}, fundingAddr,
×
1649
                )
×
1650
        }
1651

1652
        return respStream, upd.PsbtFund.Psbt
×
1653
}
1654

1655
// CleanupForceClose mines blocks to clean up the force close process. This is
1656
// used for tests that are not asserting the expected behavior is found during
1657
// the force close process, e.g., num of sweeps, etc. Instead, it provides a
1658
// shortcut to move the test forward with a clean mempool.
1659
func (h *HarnessTest) CleanupForceClose(hn *node.HarnessNode) {
×
1660
        // Wait for the channel to be marked pending force close.
×
1661
        h.AssertNumPendingForceClose(hn, 1)
×
1662

×
NEW
1663
        // Mine enough blocks for the node to sweep its funds from the force
×
NEW
1664
        // closed channel. The commit sweep resolver is offers the input to the
×
NEW
1665
        // sweeper when it's force closed, and broadcast the sweep tx at
×
NEW
1666
        // defaulCSV-1.
×
NEW
1667
        //
×
NEW
1668
        // NOTE: we might empty blocks here as we don't know the exact number
×
NEW
1669
        // of blocks to mine. This may end up mining more blocks than needed.
×
NEW
1670
        h.MineEmptyBlocks(node.DefaultCSV - 1)
×
NEW
1671

×
NEW
1672
        // Assert there is one pending sweep.
×
NEW
1673
        h.AssertNumPendingSweeps(hn, 1)
×
NEW
1674

×
NEW
1675
        // The node should now sweep the funds, clean up by mining the sweeping
×
NEW
1676
        // tx.
×
NEW
1677
        h.MineBlocksAndAssertNumTxes(1, 1)
×
NEW
1678

×
1679
        // Mine blocks to get any second level HTLC resolved. If there are no
×
1680
        // HTLCs, this will behave like h.AssertNumPendingCloseChannels.
×
1681
        h.mineTillForceCloseResolved(hn)
×
1682
}
×
1683

1684
// CreatePayReqs is a helper method that will create a slice of payment
1685
// requests for the given node.
1686
func (h *HarnessTest) CreatePayReqs(hn *node.HarnessNode,
1687
        paymentAmt btcutil.Amount, numInvoices int,
1688
        routeHints ...*lnrpc.RouteHint) ([]string, [][]byte, []*lnrpc.Invoice) {
×
1689

×
1690
        payReqs := make([]string, numInvoices)
×
1691
        rHashes := make([][]byte, numInvoices)
×
1692
        invoices := make([]*lnrpc.Invoice, numInvoices)
×
1693
        for i := 0; i < numInvoices; i++ {
×
1694
                preimage := h.Random32Bytes()
×
1695

×
1696
                invoice := &lnrpc.Invoice{
×
1697
                        Memo:       "testing",
×
1698
                        RPreimage:  preimage,
×
1699
                        Value:      int64(paymentAmt),
×
1700
                        RouteHints: routeHints,
×
1701
                }
×
1702
                resp := hn.RPC.AddInvoice(invoice)
×
1703

×
1704
                // Set the payment address in the invoice so the caller can
×
1705
                // properly use it.
×
1706
                invoice.PaymentAddr = resp.PaymentAddr
×
1707

×
1708
                payReqs[i] = resp.PaymentRequest
×
1709
                rHashes[i] = resp.RHash
×
1710
                invoices[i] = invoice
×
1711
        }
×
1712

1713
        return payReqs, rHashes, invoices
×
1714
}
1715

1716
// BackupDB creates a backup of the current database. It will stop the node
1717
// first, copy the database files, and restart the node.
1718
func (h *HarnessTest) BackupDB(hn *node.HarnessNode) {
×
1719
        restart := h.SuspendNode(hn)
×
1720

×
1721
        err := hn.BackupDB()
×
1722
        require.NoErrorf(h, err, "%s: failed to backup db", hn.Name())
×
1723

×
1724
        err = restart()
×
1725
        require.NoErrorf(h, err, "%s: failed to restart", hn.Name())
×
1726
}
×
1727

1728
// RestartNodeAndRestoreDB restarts a given node with a callback to restore the
1729
// db.
1730
func (h *HarnessTest) RestartNodeAndRestoreDB(hn *node.HarnessNode) {
×
1731
        cb := func() error { return hn.RestoreDB() }
×
1732
        err := h.manager.restartNode(h.runCtx, hn, cb)
×
1733
        require.NoErrorf(h, err, "failed to restart node %s", hn.Name())
×
1734

×
1735
        err = h.manager.unlockNode(hn)
×
1736
        require.NoErrorf(h, err, "failed to unlock node %s", hn.Name())
×
1737

×
1738
        // Give the node some time to catch up with the chain before we
×
1739
        // continue with the tests.
×
1740
        h.WaitForBlockchainSync(hn)
×
1741
}
1742

1743
// CleanShutDown is used to quickly end a test by shutting down all non-standby
1744
// nodes and mining blocks to empty the mempool.
1745
//
1746
// NOTE: this method provides a faster exit for a test that involves force
1747
// closures as the caller doesn't need to mine all the blocks to make sure the
1748
// mempool is empty.
1749
func (h *HarnessTest) CleanShutDown() {
×
1750
        // First, shutdown all non-standby nodes to prevent new transactions
×
1751
        // being created and fed into the mempool.
×
1752
        h.shutdownNonStandbyNodes()
×
1753

×
1754
        // Now mine blocks till the mempool is empty.
×
1755
        h.cleanMempool()
×
1756
}
×
1757

1758
// QueryChannelByChanPoint tries to find a channel matching the channel point
1759
// and asserts. It returns the channel found.
1760
func (h *HarnessTest) QueryChannelByChanPoint(hn *node.HarnessNode,
1761
        chanPoint *lnrpc.ChannelPoint,
1762
        opts ...ListChannelOption) *lnrpc.Channel {
×
1763

×
1764
        channel, err := h.findChannel(hn, chanPoint, opts...)
×
1765
        require.NoError(h, err, "failed to query channel")
×
1766

×
1767
        return channel
×
1768
}
×
1769

1770
// SendPaymentAndAssertStatus sends a payment from the passed node and asserts
1771
// the desired status is reached.
1772
func (h *HarnessTest) SendPaymentAndAssertStatus(hn *node.HarnessNode,
1773
        req *routerrpc.SendPaymentRequest,
1774
        status lnrpc.Payment_PaymentStatus) *lnrpc.Payment {
×
1775

×
1776
        stream := hn.RPC.SendPayment(req)
×
1777
        return h.AssertPaymentStatusFromStream(stream, status)
×
1778
}
×
1779

1780
// SendPaymentAssertFail sends a payment from the passed node and asserts the
1781
// payment is failed with the specified failure reason .
1782
func (h *HarnessTest) SendPaymentAssertFail(hn *node.HarnessNode,
1783
        req *routerrpc.SendPaymentRequest,
1784
        reason lnrpc.PaymentFailureReason) *lnrpc.Payment {
×
1785

×
1786
        payment := h.SendPaymentAndAssertStatus(hn, req, lnrpc.Payment_FAILED)
×
1787
        require.Equal(h, reason, payment.FailureReason,
×
1788
                "payment failureReason not matched")
×
1789

×
1790
        return payment
×
1791
}
×
1792

1793
// SendPaymentAssertSettled sends a payment from the passed node and asserts the
1794
// payment is settled.
1795
func (h *HarnessTest) SendPaymentAssertSettled(hn *node.HarnessNode,
1796
        req *routerrpc.SendPaymentRequest) *lnrpc.Payment {
×
1797

×
1798
        return h.SendPaymentAndAssertStatus(hn, req, lnrpc.Payment_SUCCEEDED)
×
1799
}
×
1800

1801
// SendPaymentAssertInflight sends a payment from the passed node and asserts
1802
// the payment is inflight.
1803
func (h *HarnessTest) SendPaymentAssertInflight(hn *node.HarnessNode,
1804
        req *routerrpc.SendPaymentRequest) *lnrpc.Payment {
×
1805

×
1806
        return h.SendPaymentAndAssertStatus(hn, req, lnrpc.Payment_IN_FLIGHT)
×
1807
}
×
1808

1809
// OpenChannelRequest is used to open a channel using the method
1810
// OpenMultiChannelsAsync.
1811
type OpenChannelRequest struct {
1812
        // Local is the funding node.
1813
        Local *node.HarnessNode
1814

1815
        // Remote is the receiving node.
1816
        Remote *node.HarnessNode
1817

1818
        // Param is the open channel params.
1819
        Param OpenChannelParams
1820

1821
        // stream is the client created after calling OpenChannel RPC.
1822
        stream rpc.OpenChanClient
1823

1824
        // result is a channel used to send the channel point once the funding
1825
        // has succeeded.
1826
        result chan *lnrpc.ChannelPoint
1827
}
1828

1829
// OpenMultiChannelsAsync takes a list of OpenChannelRequest and opens them in
1830
// batch. The channel points are returned in same the order of the requests
1831
// once all of the channel open succeeded.
1832
//
1833
// NOTE: compared to open multiple channel sequentially, this method will be
1834
// faster as it doesn't need to mine 6 blocks for each channel open. However,
1835
// it does make debugging the logs more difficult as messages are intertwined.
1836
func (h *HarnessTest) OpenMultiChannelsAsync(
1837
        reqs []*OpenChannelRequest) []*lnrpc.ChannelPoint {
×
1838

×
1839
        // openChannel opens a channel based on the request.
×
1840
        openChannel := func(req *OpenChannelRequest) {
×
1841
                stream := h.OpenChannelAssertStream(
×
1842
                        req.Local, req.Remote, req.Param,
×
1843
                )
×
1844
                req.stream = stream
×
1845
        }
×
1846

1847
        // assertChannelOpen is a helper closure that asserts a channel is
1848
        // open.
1849
        assertChannelOpen := func(req *OpenChannelRequest) {
×
1850
                // Wait for the channel open event from the stream.
×
1851
                cp := h.WaitForChannelOpenEvent(req.stream)
×
1852

×
1853
                if !req.Param.Private {
×
1854
                        // Check that both alice and bob have seen the channel
×
1855
                        // from their channel watch request.
×
1856
                        h.AssertChannelInGraph(req.Local, cp)
×
1857
                        h.AssertChannelInGraph(req.Remote, cp)
×
1858
                }
×
1859

1860
                // Finally, check that the channel can be seen in their
1861
                // ListChannels.
1862
                h.AssertChannelExists(req.Local, cp)
×
1863
                h.AssertChannelExists(req.Remote, cp)
×
1864

×
1865
                req.result <- cp
×
1866
        }
1867

1868
        // Go through the requests and make the OpenChannel RPC call.
1869
        for _, r := range reqs {
×
1870
                openChannel(r)
×
1871
        }
×
1872

1873
        // Mine one block to confirm all the funding transactions.
1874
        h.MineBlocksAndAssertNumTxes(1, len(reqs))
×
1875

×
1876
        // Mine 5 more blocks so all the public channels are announced to the
×
1877
        // network.
×
1878
        h.MineBlocks(numBlocksOpenChannel - 1)
×
1879

×
1880
        // Once the blocks are mined, we fire goroutines for each of the
×
1881
        // request to watch for the channel openning.
×
1882
        for _, r := range reqs {
×
1883
                r.result = make(chan *lnrpc.ChannelPoint, 1)
×
1884
                go assertChannelOpen(r)
×
1885
        }
×
1886

1887
        // Finally, collect the results.
1888
        channelPoints := make([]*lnrpc.ChannelPoint, 0)
×
1889
        for _, r := range reqs {
×
1890
                select {
×
1891
                case cp := <-r.result:
×
1892
                        channelPoints = append(channelPoints, cp)
×
1893

1894
                case <-time.After(wait.ChannelOpenTimeout):
×
1895
                        require.Failf(h, "timeout", "wait channel point "+
×
1896
                                "timeout for channel %s=>%s", r.Local.Name(),
×
1897
                                r.Remote.Name())
×
1898
                }
1899
        }
1900

1901
        // Assert that we have the expected num of channel points.
1902
        require.Len(h, channelPoints, len(reqs),
×
1903
                "returned channel points not match")
×
1904

×
1905
        return channelPoints
×
1906
}
1907

1908
// ReceiveInvoiceUpdate waits until a message is received on the subscribe
1909
// invoice stream or the timeout is reached.
1910
func (h *HarnessTest) ReceiveInvoiceUpdate(
1911
        stream rpc.InvoiceUpdateClient) *lnrpc.Invoice {
×
1912

×
1913
        chanMsg := make(chan *lnrpc.Invoice)
×
1914
        errChan := make(chan error)
×
1915
        go func() {
×
1916
                // Consume one message. This will block until the message is
×
1917
                // received.
×
1918
                resp, err := stream.Recv()
×
1919
                if err != nil {
×
1920
                        errChan <- err
×
1921
                        return
×
1922
                }
×
1923
                chanMsg <- resp
×
1924
        }()
1925

1926
        select {
×
1927
        case <-time.After(DefaultTimeout):
×
1928
                require.Fail(h, "timeout", "timeout receiving invoice update")
×
1929

1930
        case err := <-errChan:
×
1931
                require.Failf(h, "err from stream",
×
1932
                        "received err from stream: %v", err)
×
1933

1934
        case updateMsg := <-chanMsg:
×
1935
                return updateMsg
×
1936
        }
1937

1938
        return nil
×
1939
}
1940

1941
// CalculateTxFee retrieves parent transactions and reconstructs the fee paid.
1942
func (h *HarnessTest) CalculateTxFee(tx *wire.MsgTx) btcutil.Amount {
×
1943
        var balance btcutil.Amount
×
1944
        for _, in := range tx.TxIn {
×
1945
                parentHash := in.PreviousOutPoint.Hash
×
1946
                rawTx := h.miner.GetRawTransaction(parentHash)
×
1947
                parent := rawTx.MsgTx()
×
1948
                value := parent.TxOut[in.PreviousOutPoint.Index].Value
×
1949

×
1950
                balance += btcutil.Amount(value)
×
1951
        }
×
1952

1953
        for _, out := range tx.TxOut {
×
1954
                balance -= btcutil.Amount(out.Value)
×
1955
        }
×
1956

1957
        return balance
×
1958
}
1959

1960
// CalculateTxWeight calculates the weight for a given tx.
1961
//
1962
// TODO(yy): use weight estimator to get more accurate result.
1963
func (h *HarnessTest) CalculateTxWeight(tx *wire.MsgTx) lntypes.WeightUnit {
×
1964
        utx := btcutil.NewTx(tx)
×
1965
        return lntypes.WeightUnit(blockchain.GetTransactionWeight(utx))
×
1966
}
×
1967

1968
// CalculateTxFeeRate calculates the fee rate for a given tx.
1969
func (h *HarnessTest) CalculateTxFeeRate(
1970
        tx *wire.MsgTx) chainfee.SatPerKWeight {
×
1971

×
1972
        w := h.CalculateTxWeight(tx)
×
1973
        fee := h.CalculateTxFee(tx)
×
1974

×
1975
        return chainfee.NewSatPerKWeight(fee, w)
×
1976
}
×
1977

1978
// CalculateTxesFeeRate takes a list of transactions and estimates the fee rate
1979
// used to sweep them.
1980
//
1981
// NOTE: only used in current test file.
1982
func (h *HarnessTest) CalculateTxesFeeRate(txns []*wire.MsgTx) int64 {
×
1983
        const scale = 1000
×
1984

×
1985
        var totalWeight, totalFee int64
×
1986
        for _, tx := range txns {
×
1987
                utx := btcutil.NewTx(tx)
×
1988
                totalWeight += blockchain.GetTransactionWeight(utx)
×
1989

×
1990
                fee := h.CalculateTxFee(tx)
×
1991
                totalFee += int64(fee)
×
1992
        }
×
1993
        feeRate := totalFee * scale / totalWeight
×
1994

×
1995
        return feeRate
×
1996
}
1997

1998
// AssertSweepFound looks up a sweep in a nodes list of broadcast sweeps and
1999
// asserts it's found.
2000
//
2001
// NOTE: Does not account for node's internal state.
2002
func (h *HarnessTest) AssertSweepFound(hn *node.HarnessNode,
2003
        sweep string, verbose bool, startHeight int32) {
×
2004

×
2005
        err := wait.NoError(func() error {
×
2006
                // List all sweeps that alice's node had broadcast.
×
2007
                sweepResp := hn.RPC.ListSweeps(verbose, startHeight)
×
2008

×
2009
                var found bool
×
2010
                if verbose {
×
2011
                        found = findSweepInDetails(h, sweep, sweepResp)
×
2012
                } else {
×
2013
                        found = findSweepInTxids(h, sweep, sweepResp)
×
2014
                }
×
2015

2016
                if found {
×
2017
                        return nil
×
2018
                }
×
2019

NEW
2020
                return fmt.Errorf("sweep tx %v not found in resp %v", sweep,
×
NEW
2021
                        sweepResp)
×
2022
        }, wait.DefaultTimeout)
2023
        require.NoError(h, err, "%s: timeout checking sweep tx", hn.Name())
×
2024
}
2025

2026
func findSweepInTxids(ht *HarnessTest, sweepTxid string,
2027
        sweepResp *walletrpc.ListSweepsResponse) bool {
×
2028

×
2029
        sweepTxIDs := sweepResp.GetTransactionIds()
×
2030
        require.NotNil(ht, sweepTxIDs, "expected transaction ids")
×
2031
        require.Nil(ht, sweepResp.GetTransactionDetails())
×
2032

×
2033
        // Check that the sweep tx we have just produced is present.
×
2034
        for _, tx := range sweepTxIDs.TransactionIds {
×
2035
                if tx == sweepTxid {
×
2036
                        return true
×
2037
                }
×
2038
        }
2039

2040
        return false
×
2041
}
2042

2043
func findSweepInDetails(ht *HarnessTest, sweepTxid string,
2044
        sweepResp *walletrpc.ListSweepsResponse) bool {
×
2045

×
2046
        sweepDetails := sweepResp.GetTransactionDetails()
×
2047
        require.NotNil(ht, sweepDetails, "expected transaction details")
×
2048
        require.Nil(ht, sweepResp.GetTransactionIds())
×
2049

×
2050
        for _, tx := range sweepDetails.Transactions {
×
2051
                if tx.TxHash == sweepTxid {
×
2052
                        return true
×
2053
                }
×
2054
        }
2055

2056
        return false
×
2057
}
2058

2059
// QueryRoutesAndRetry attempts to keep querying a route until timeout is
2060
// reached.
2061
//
2062
// NOTE: when a channel is opened, we may need to query multiple times to get
2063
// it in our QueryRoutes RPC. This happens even after we check the channel is
2064
// heard by the node using ht.AssertChannelOpen. Deep down, this is because our
2065
// GraphTopologySubscription and QueryRoutes give different results regarding a
2066
// specific channel, with the formal reporting it being open while the latter
2067
// not, resulting GraphTopologySubscription acting "faster" than QueryRoutes.
2068
// TODO(yy): make sure related subsystems share the same view on a given
2069
// channel.
2070
func (h *HarnessTest) QueryRoutesAndRetry(hn *node.HarnessNode,
2071
        req *lnrpc.QueryRoutesRequest) *lnrpc.QueryRoutesResponse {
×
2072

×
2073
        var routes *lnrpc.QueryRoutesResponse
×
2074
        err := wait.NoError(func() error {
×
2075
                ctxt, cancel := context.WithCancel(h.runCtx)
×
2076
                defer cancel()
×
2077

×
2078
                resp, err := hn.RPC.LN.QueryRoutes(ctxt, req)
×
2079
                if err != nil {
×
2080
                        return fmt.Errorf("%s: failed to query route: %w",
×
2081
                                hn.Name(), err)
×
2082
                }
×
2083

2084
                routes = resp
×
2085

×
2086
                return nil
×
2087
        }, DefaultTimeout)
2088

2089
        require.NoError(h, err, "timeout querying routes")
×
2090

×
2091
        return routes
×
2092
}
2093

2094
// ReceiveHtlcInterceptor waits until a message is received on the htlc
2095
// interceptor stream or the timeout is reached.
2096
func (h *HarnessTest) ReceiveHtlcInterceptor(
2097
        stream rpc.InterceptorClient) *routerrpc.ForwardHtlcInterceptRequest {
×
2098

×
2099
        chanMsg := make(chan *routerrpc.ForwardHtlcInterceptRequest)
×
2100
        errChan := make(chan error)
×
2101
        go func() {
×
2102
                // Consume one message. This will block until the message is
×
2103
                // received.
×
2104
                resp, err := stream.Recv()
×
2105
                if err != nil {
×
2106
                        errChan <- err
×
2107
                        return
×
2108
                }
×
2109
                chanMsg <- resp
×
2110
        }()
2111

2112
        select {
×
2113
        case <-time.After(DefaultTimeout):
×
2114
                require.Fail(h, "timeout", "timeout intercepting htlc")
×
2115

2116
        case err := <-errChan:
×
2117
                require.Failf(h, "err from HTLC interceptor stream",
×
2118
                        "received err from HTLC interceptor stream: %v", err)
×
2119

2120
        case updateMsg := <-chanMsg:
×
2121
                return updateMsg
×
2122
        }
2123

2124
        return nil
×
2125
}
2126

2127
// ReceiveInvoiceHtlcModification waits until a message is received on the
2128
// invoice HTLC modifier stream or the timeout is reached.
2129
func (h *HarnessTest) ReceiveInvoiceHtlcModification(
2130
        stream rpc.InvoiceHtlcModifierClient) *invoicesrpc.HtlcModifyRequest {
×
2131

×
2132
        chanMsg := make(chan *invoicesrpc.HtlcModifyRequest)
×
2133
        errChan := make(chan error)
×
2134
        go func() {
×
2135
                // Consume one message. This will block until the message is
×
2136
                // received.
×
2137
                resp, err := stream.Recv()
×
2138
                if err != nil {
×
2139
                        errChan <- err
×
2140
                        return
×
2141
                }
×
2142
                chanMsg <- resp
×
2143
        }()
2144

2145
        select {
×
2146
        case <-time.After(DefaultTimeout):
×
2147
                require.Fail(h, "timeout", "timeout invoice HTLC modifier")
×
2148

2149
        case err := <-errChan:
×
2150
                require.Failf(h, "err from invoice HTLC modifier stream",
×
2151
                        "received err from invoice HTLC modifier stream: %v",
×
2152
                        err)
×
2153

2154
        case updateMsg := <-chanMsg:
×
2155
                return updateMsg
×
2156
        }
2157

2158
        return nil
×
2159
}
2160

2161
// ReceiveChannelEvent waits until a message is received from the
2162
// ChannelEventsClient stream or the timeout is reached.
2163
func (h *HarnessTest) ReceiveChannelEvent(
2164
        stream rpc.ChannelEventsClient) *lnrpc.ChannelEventUpdate {
×
2165

×
2166
        chanMsg := make(chan *lnrpc.ChannelEventUpdate)
×
2167
        errChan := make(chan error)
×
2168
        go func() {
×
2169
                // Consume one message. This will block until the message is
×
2170
                // received.
×
2171
                resp, err := stream.Recv()
×
2172
                if err != nil {
×
2173
                        errChan <- err
×
2174
                        return
×
2175
                }
×
2176
                chanMsg <- resp
×
2177
        }()
2178

2179
        select {
×
2180
        case <-time.After(DefaultTimeout):
×
2181
                require.Fail(h, "timeout", "timeout intercepting htlc")
×
2182

2183
        case err := <-errChan:
×
2184
                require.Failf(h, "err from stream",
×
2185
                        "received err from stream: %v", err)
×
2186

2187
        case updateMsg := <-chanMsg:
×
2188
                return updateMsg
×
2189
        }
2190

2191
        return nil
×
2192
}
2193

2194
// GetOutputIndex returns the output index of the given address in the given
2195
// transaction.
2196
func (h *HarnessTest) GetOutputIndex(txid chainhash.Hash, addr string) int {
×
2197
        // We'll then extract the raw transaction from the mempool in order to
×
2198
        // determine the index of the p2tr output.
×
2199
        tx := h.miner.GetRawTransaction(txid)
×
2200

×
2201
        p2trOutputIndex := -1
×
2202
        for i, txOut := range tx.MsgTx().TxOut {
×
2203
                _, addrs, _, err := txscript.ExtractPkScriptAddrs(
×
2204
                        txOut.PkScript, h.miner.ActiveNet,
×
2205
                )
×
2206
                require.NoError(h, err)
×
2207

×
2208
                if addrs[0].String() == addr {
×
2209
                        p2trOutputIndex = i
×
2210
                }
×
2211
        }
2212
        require.Greater(h, p2trOutputIndex, -1)
×
2213

×
2214
        return p2trOutputIndex
×
2215
}
2216

2217
// SendCoins sends a coin from node A to node B with the given amount, returns
2218
// the sending tx.
2219
func (h *HarnessTest) SendCoins(a, b *node.HarnessNode,
2220
        amt btcutil.Amount) *wire.MsgTx {
×
2221

×
2222
        // Create an address for Bob receive the coins.
×
2223
        req := &lnrpc.NewAddressRequest{
×
2224
                Type: lnrpc.AddressType_TAPROOT_PUBKEY,
×
2225
        }
×
2226
        resp := b.RPC.NewAddress(req)
×
2227

×
2228
        // Send the coins from Alice to Bob. We should expect a tx to be
×
2229
        // broadcast and seen in the mempool.
×
2230
        sendReq := &lnrpc.SendCoinsRequest{
×
2231
                Addr:       resp.Address,
×
2232
                Amount:     int64(amt),
×
2233
                TargetConf: 6,
×
2234
        }
×
2235
        a.RPC.SendCoins(sendReq)
×
2236
        tx := h.GetNumTxsFromMempool(1)[0]
×
2237

×
2238
        return tx
×
2239
}
×
2240

2241
// CreateSimpleNetwork creates the number of nodes specified by the number of
2242
// configs and makes a topology of `node1 -> node2 -> node3...`. Each node is
2243
// created using the specified config, the neighbors are connected, and the
2244
// channels are opened. Each node will be funded with a single UTXO of 1 BTC
2245
// except the last one.
2246
//
2247
// For instance, to create a network with 2 nodes that share the same node
2248
// config,
2249
//
2250
//        cfg := []string{"--protocol.anchors"}
2251
//        cfgs := [][]string{cfg, cfg}
2252
//        params := OpenChannelParams{...}
2253
//        chanPoints, nodes := ht.CreateSimpleNetwork(cfgs, params)
2254
//
2255
// This will create two nodes and open an anchor channel between them.
2256
func (h *HarnessTest) CreateSimpleNetwork(nodeCfgs [][]string,
2257
        p OpenChannelParams) ([]*lnrpc.ChannelPoint, []*node.HarnessNode) {
×
2258

×
2259
        // Create new nodes.
×
2260
        nodes := h.createNodes(nodeCfgs)
×
2261

×
2262
        var resp []*lnrpc.ChannelPoint
×
2263

×
2264
        // Open zero-conf channels if specified.
×
2265
        if p.ZeroConf {
×
2266
                resp = h.openZeroConfChannelsForNodes(nodes, p)
×
2267
        } else {
×
2268
                // Open channels between the nodes.
×
2269
                resp = h.openChannelsForNodes(nodes, p)
×
2270
        }
×
2271

2272
        return resp, nodes
×
2273
}
2274

2275
// acceptChannel is used to accept a single channel that comes across. This
2276
// should be run in a goroutine and is used to test nodes with the zero-conf
2277
// feature bit.
2278
func acceptChannel(t *testing.T, zeroConf bool, stream rpc.AcceptorClient) {
×
2279
        req, err := stream.Recv()
×
2280
        require.NoError(t, err)
×
2281

×
2282
        resp := &lnrpc.ChannelAcceptResponse{
×
2283
                Accept:        true,
×
2284
                PendingChanId: req.PendingChanId,
×
2285
                ZeroConf:      zeroConf,
×
2286
        }
×
2287
        err = stream.Send(resp)
×
2288
        require.NoError(t, err)
×
2289
}
×
2290

2291
// createNodes creates the number of nodes specified by the number of configs.
2292
// Each node is created using the specified config, the neighbors are
2293
// connected.
2294
func (h *HarnessTest) createNodes(nodeCfgs [][]string) []*node.HarnessNode {
×
2295
        // Get the number of nodes.
×
2296
        numNodes := len(nodeCfgs)
×
2297

×
2298
        // Make a slice of nodes.
×
2299
        nodes := make([]*node.HarnessNode, numNodes)
×
2300

×
2301
        // Create new nodes.
×
2302
        for i, nodeCfg := range nodeCfgs {
×
2303
                nodeName := fmt.Sprintf("Node%q", string(rune('A'+i)))
×
2304
                n := h.NewNode(nodeName, nodeCfg)
×
2305
                nodes[i] = n
×
2306
        }
×
2307

2308
        // Connect the nodes in a chain.
2309
        for i := 1; i < len(nodes); i++ {
×
2310
                nodeA := nodes[i-1]
×
2311
                nodeB := nodes[i]
×
2312
                h.EnsureConnected(nodeA, nodeB)
×
2313
        }
×
2314

2315
        // Fund all the nodes expect the last one.
2316
        for i := 0; i < len(nodes)-1; i++ {
×
2317
                node := nodes[i]
×
2318
                h.FundCoinsUnconfirmed(btcutil.SatoshiPerBitcoin, node)
×
2319
        }
×
2320

2321
        // Mine 1 block to get the above coins confirmed.
2322
        h.MineBlocksAndAssertNumTxes(1, numNodes-1)
×
2323

×
2324
        return nodes
×
2325
}
2326

2327
// openChannelsForNodes takes a list of nodes and makes a topology of `node1 ->
2328
// node2 -> node3...`.
2329
func (h *HarnessTest) openChannelsForNodes(nodes []*node.HarnessNode,
2330
        p OpenChannelParams) []*lnrpc.ChannelPoint {
×
2331

×
2332
        // Sanity check the params.
×
2333
        require.Greater(h, len(nodes), 1, "need at least 2 nodes")
×
2334

×
NEW
2335
        // attachFundingShim is a helper closure that optionally attaches a
×
NEW
2336
        // funding shim to the open channel params and returns it.
×
NEW
2337
        attachFundingShim := func(
×
NEW
2338
                nodeA, nodeB *node.HarnessNode) OpenChannelParams {
×
NEW
2339

×
NEW
2340
                // If this channel is not a script enforced lease channel,
×
NEW
2341
                // we'll do nothing and return the params.
×
NEW
2342
                leasedType := lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE
×
NEW
2343
                if p.CommitmentType != leasedType {
×
NEW
2344
                        return p
×
NEW
2345
                }
×
2346

2347
                // Otherwise derive the funding shim, attach it to the original
2348
                // open channel params and return it.
NEW
2349
                minerHeight := h.CurrentHeight()
×
NEW
2350
                thawHeight := minerHeight + thawHeightDelta
×
NEW
2351
                fundingShim, _ := h.DeriveFundingShim(
×
NEW
2352
                        nodeA, nodeB, p.Amt, thawHeight, true, leasedType,
×
NEW
2353
                )
×
NEW
2354

×
NEW
2355
                p.FundingShim = fundingShim
×
NEW
2356

×
NEW
2357
                return p
×
2358
        }
2359

2360
        // Open channels in batch to save blocks mined.
2361
        reqs := make([]*OpenChannelRequest, 0, len(nodes)-1)
×
2362
        for i := 0; i < len(nodes)-1; i++ {
×
2363
                nodeA := nodes[i]
×
2364
                nodeB := nodes[i+1]
×
2365

×
NEW
2366
                // Optionally attach a funding shim to the open channel params.
×
NEW
2367
                p = attachFundingShim(nodeA, nodeB)
×
NEW
2368

×
2369
                req := &OpenChannelRequest{
×
2370
                        Local:  nodeA,
×
2371
                        Remote: nodeB,
×
2372
                        Param:  p,
×
2373
                }
×
2374
                reqs = append(reqs, req)
×
2375
        }
×
2376
        resp := h.OpenMultiChannelsAsync(reqs)
×
2377

×
2378
        // Make sure the nodes know each other's channels if they are public.
×
2379
        if !p.Private {
×
2380
                for _, node := range nodes {
×
2381
                        for _, chanPoint := range resp {
×
2382
                                h.AssertChannelInGraph(node, chanPoint)
×
2383
                        }
×
2384
                }
2385
        }
2386

2387
        return resp
×
2388
}
2389

2390
// openZeroConfChannelsForNodes takes a list of nodes and makes a topology of
2391
// `node1 -> node2 -> node3...` with zero-conf channels.
2392
func (h *HarnessTest) openZeroConfChannelsForNodes(nodes []*node.HarnessNode,
2393
        p OpenChannelParams) []*lnrpc.ChannelPoint {
×
2394

×
2395
        // Sanity check the params.
×
2396
        require.True(h, p.ZeroConf, "zero-conf channels must be enabled")
×
2397
        require.Greater(h, len(nodes), 1, "need at least 2 nodes")
×
2398

×
2399
        // We are opening numNodes-1 channels.
×
2400
        cancels := make([]context.CancelFunc, 0, len(nodes)-1)
×
2401

×
2402
        // Create the channel acceptors.
×
2403
        for _, node := range nodes[1:] {
×
2404
                acceptor, cancel := node.RPC.ChannelAcceptor()
×
2405
                go acceptChannel(h.T, true, acceptor)
×
2406

×
2407
                cancels = append(cancels, cancel)
×
2408
        }
×
2409

2410
        // Open channels between the nodes.
2411
        resp := h.openChannelsForNodes(nodes, p)
×
2412

×
2413
        for _, cancel := range cancels {
×
2414
                cancel()
×
2415
        }
×
2416

2417
        return resp
×
2418
}
2419

2420
// DeriveFundingShim creates a channel funding shim by deriving the necessary
2421
// keys on both sides.
2422
func (h *HarnessTest) DeriveFundingShim(alice, bob *node.HarnessNode,
2423
        chanSize btcutil.Amount, thawHeight uint32, publish bool,
2424
        commitType lnrpc.CommitmentType) (*lnrpc.FundingShim,
NEW
2425
        *lnrpc.ChannelPoint) {
×
NEW
2426

×
NEW
2427
        keyLoc := &signrpc.KeyLocator{KeyFamily: 9999}
×
NEW
2428
        carolFundingKey := alice.RPC.DeriveKey(keyLoc)
×
NEW
2429
        daveFundingKey := bob.RPC.DeriveKey(keyLoc)
×
NEW
2430

×
NEW
2431
        // Now that we have the multi-sig keys for each party, we can manually
×
NEW
2432
        // construct the funding transaction. We'll instruct the backend to
×
NEW
2433
        // immediately create and broadcast a transaction paying out an exact
×
NEW
2434
        // amount. Normally this would reside in the mempool, but we just
×
NEW
2435
        // confirm it now for simplicity.
×
NEW
2436
        var (
×
NEW
2437
                fundingOutput *wire.TxOut
×
NEW
2438
                musig2        bool
×
NEW
2439
                err           error
×
NEW
2440
        )
×
NEW
2441

×
NEW
2442
        if commitType == lnrpc.CommitmentType_SIMPLE_TAPROOT ||
×
NEW
2443
                commitType == lnrpc.CommitmentType_SIMPLE_TAPROOT_OVERLAY {
×
NEW
2444

×
NEW
2445
                var carolKey, daveKey *btcec.PublicKey
×
NEW
2446
                carolKey, err = btcec.ParsePubKey(carolFundingKey.RawKeyBytes)
×
NEW
2447
                require.NoError(h, err)
×
NEW
2448
                daveKey, err = btcec.ParsePubKey(daveFundingKey.RawKeyBytes)
×
NEW
2449
                require.NoError(h, err)
×
NEW
2450

×
NEW
2451
                _, fundingOutput, err = input.GenTaprootFundingScript(
×
NEW
2452
                        carolKey, daveKey, int64(chanSize),
×
NEW
2453
                        fn.None[chainhash.Hash](),
×
NEW
2454
                )
×
NEW
2455
                require.NoError(h, err)
×
NEW
2456

×
NEW
2457
                musig2 = true
×
NEW
2458
        } else {
×
NEW
2459
                _, fundingOutput, err = input.GenFundingPkScript(
×
NEW
2460
                        carolFundingKey.RawKeyBytes, daveFundingKey.RawKeyBytes,
×
NEW
2461
                        int64(chanSize),
×
NEW
2462
                )
×
NEW
2463
                require.NoError(h, err)
×
NEW
2464
        }
×
2465

NEW
2466
        var txid *chainhash.Hash
×
NEW
2467
        targetOutputs := []*wire.TxOut{fundingOutput}
×
NEW
2468
        if publish {
×
NEW
2469
                txid = h.SendOutputsWithoutChange(targetOutputs, 5)
×
NEW
2470
        } else {
×
NEW
2471
                tx := h.CreateTransaction(targetOutputs, 5)
×
NEW
2472

×
NEW
2473
                txHash := tx.TxHash()
×
NEW
2474
                txid = &txHash
×
NEW
2475
        }
×
2476

2477
        // At this point, we can being our external channel funding workflow.
2478
        // We'll start by generating a pending channel ID externally that will
2479
        // be used to track this new funding type.
NEW
2480
        pendingChanID := h.Random32Bytes()
×
NEW
2481

×
NEW
2482
        // Now that we have the pending channel ID, Dave (our responder) will
×
NEW
2483
        // register the intent to receive a new channel funding workflow using
×
NEW
2484
        // the pending channel ID.
×
NEW
2485
        chanPoint := &lnrpc.ChannelPoint{
×
NEW
2486
                FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
×
NEW
2487
                        FundingTxidBytes: txid[:],
×
NEW
2488
                },
×
NEW
2489
        }
×
NEW
2490
        chanPointShim := &lnrpc.ChanPointShim{
×
NEW
2491
                Amt:       int64(chanSize),
×
NEW
2492
                ChanPoint: chanPoint,
×
NEW
2493
                LocalKey: &lnrpc.KeyDescriptor{
×
NEW
2494
                        RawKeyBytes: daveFundingKey.RawKeyBytes,
×
NEW
2495
                        KeyLoc: &lnrpc.KeyLocator{
×
NEW
2496
                                KeyFamily: daveFundingKey.KeyLoc.KeyFamily,
×
NEW
2497
                                KeyIndex:  daveFundingKey.KeyLoc.KeyIndex,
×
NEW
2498
                        },
×
NEW
2499
                },
×
NEW
2500
                RemoteKey:     carolFundingKey.RawKeyBytes,
×
NEW
2501
                PendingChanId: pendingChanID,
×
NEW
2502
                ThawHeight:    thawHeight,
×
NEW
2503
                Musig2:        musig2,
×
NEW
2504
        }
×
NEW
2505
        fundingShim := &lnrpc.FundingShim{
×
NEW
2506
                Shim: &lnrpc.FundingShim_ChanPointShim{
×
NEW
2507
                        ChanPointShim: chanPointShim,
×
NEW
2508
                },
×
NEW
2509
        }
×
NEW
2510
        bob.RPC.FundingStateStep(&lnrpc.FundingTransitionMsg{
×
NEW
2511
                Trigger: &lnrpc.FundingTransitionMsg_ShimRegister{
×
NEW
2512
                        ShimRegister: fundingShim,
×
NEW
2513
                },
×
NEW
2514
        })
×
NEW
2515

×
NEW
2516
        // If we attempt to register the same shim (has the same pending chan
×
NEW
2517
        // ID), then we should get an error.
×
NEW
2518
        bob.RPC.FundingStateStepAssertErr(&lnrpc.FundingTransitionMsg{
×
NEW
2519
                Trigger: &lnrpc.FundingTransitionMsg_ShimRegister{
×
NEW
2520
                        ShimRegister: fundingShim,
×
NEW
2521
                },
×
NEW
2522
        })
×
NEW
2523

×
NEW
2524
        // We'll take the chan point shim we just registered for Dave (the
×
NEW
2525
        // responder), and swap the local/remote keys before we feed it in as
×
NEW
2526
        // Carol's funding shim as the initiator.
×
NEW
2527
        fundingShim.GetChanPointShim().LocalKey = &lnrpc.KeyDescriptor{
×
NEW
2528
                RawKeyBytes: carolFundingKey.RawKeyBytes,
×
NEW
2529
                KeyLoc: &lnrpc.KeyLocator{
×
NEW
2530
                        KeyFamily: carolFundingKey.KeyLoc.KeyFamily,
×
NEW
2531
                        KeyIndex:  carolFundingKey.KeyLoc.KeyIndex,
×
NEW
2532
                },
×
NEW
2533
        }
×
NEW
2534
        fundingShim.GetChanPointShim().RemoteKey = daveFundingKey.RawKeyBytes
×
NEW
2535

×
NEW
2536
        return fundingShim, chanPoint
×
2537
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc