• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 11963610117

21 Nov 2024 11:38PM UTC coverage: 59.117% (+0.1%) from 58.98%
11963610117

Pull #8754

github

ViktorTigerstrom
itest: wrap deriveCustomScopeAccounts at 80 chars

This commit fixes that word wrapping for the deriveCustomScopeAccounts
function docs, and ensures that it wraps at 80 characters or less.
Pull Request #8754: Add `Outbound` Remote Signer implementation

1950 of 2984 new or added lines in 44 files covered. (65.35%)

200 existing lines in 39 files now uncovered.

134504 of 227522 relevant lines covered (59.12%)

19449.04 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/lntest/harness.go
1
package lntest
2

3
import (
4
        "context"
5
        "encoding/hex"
6
        "fmt"
7
        "testing"
8
        "time"
9

10
        "github.com/btcsuite/btcd/blockchain"
11
        "github.com/btcsuite/btcd/btcutil"
12
        "github.com/btcsuite/btcd/chaincfg/chainhash"
13
        "github.com/btcsuite/btcd/txscript"
14
        "github.com/btcsuite/btcd/wire"
15
        "github.com/go-errors/errors"
16
        "github.com/lightningnetwork/lnd/fn"
17
        "github.com/lightningnetwork/lnd/kvdb/etcd"
18
        "github.com/lightningnetwork/lnd/lnrpc"
19
        "github.com/lightningnetwork/lnd/lnrpc/invoicesrpc"
20
        "github.com/lightningnetwork/lnd/lnrpc/routerrpc"
21
        "github.com/lightningnetwork/lnd/lnrpc/walletrpc"
22
        "github.com/lightningnetwork/lnd/lntest/miner"
23
        "github.com/lightningnetwork/lnd/lntest/node"
24
        "github.com/lightningnetwork/lnd/lntest/rpc"
25
        "github.com/lightningnetwork/lnd/lntest/wait"
26
        "github.com/lightningnetwork/lnd/lntypes"
27
        "github.com/lightningnetwork/lnd/lnwallet/chainfee"
28
        "github.com/lightningnetwork/lnd/lnwire"
29
        "github.com/stretchr/testify/require"
30
)
31

32
const (
33
        // defaultMinerFeeRate specifies the fee rate in sats when sending
34
        // outputs from the miner.
35
        defaultMinerFeeRate = 7500
36

37
        // numBlocksSendOutput specifies the number of blocks to mine after
38
        // sending outputs from the miner.
39
        numBlocksSendOutput = 2
40

41
        // numBlocksOpenChannel specifies the number of blocks mined when
42
        // opening a channel.
43
        numBlocksOpenChannel = 6
44

45
        // lndErrorChanSize specifies the buffer size used to receive errors
46
        // from lnd process.
47
        lndErrorChanSize = 10
48

49
        // maxBlocksAllowed specifies the max allowed value to be used when
50
        // mining blocks.
51
        maxBlocksAllowed = 100
52
)
53

54
// TestCase defines a test case that's been used in the integration test.
55
type TestCase struct {
56
        // Name specifies the test name.
57
        Name string
58

59
        // TestFunc is the test case wrapped in a function.
60
        TestFunc func(t *HarnessTest)
61
}
62

63
// standbyNodes are a list of nodes which are created during the initialization
64
// of the test and used across all test cases.
65
type standbyNodes struct {
66
        // Alice and Bob are the initial seeder nodes that are automatically
67
        // created to be the initial participants of the test network.
68
        Alice *node.HarnessNode
69
        Bob   *node.HarnessNode
70
}
71

72
// HarnessTest builds on top of a testing.T with enhanced error detection. It
73
// is responsible for managing the interactions among different nodes, and
74
// providing easy-to-use assertions.
75
type HarnessTest struct {
76
        *testing.T
77

78
        // Embed the standbyNodes so we can easily access them via `ht.Alice`.
79
        standbyNodes
80

81
        // miner is a reference to a running full node that can be used to
82
        // create new blocks on the network.
83
        miner *miner.HarnessMiner
84

85
        // manager handles the start and stop of a given node.
86
        manager *nodeManager
87

88
        // feeService is a web service that provides external fee estimates to
89
        // lnd.
90
        feeService WebFeeService
91

92
        // Channel for transmitting stderr output from failed lightning node
93
        // to main process.
94
        lndErrorChan chan error
95

96
        // runCtx is a context with cancel method. It's used to signal when the
97
        // node needs to quit, and used as the parent context when spawning
98
        // children contexts for RPC requests.
99
        runCtx context.Context //nolint:containedctx
100
        cancel context.CancelFunc
101

102
        // stopChainBackend points to the cleanup function returned by the
103
        // chainBackend.
104
        stopChainBackend func()
105

106
        // cleaned specifies whether the cleanup has been applied for the
107
        // current HarnessTest.
108
        cleaned bool
109

110
        // currentHeight is the current height of the chain backend.
111
        currentHeight uint32
112
}
113

114
// harnessOpts contains functional option to modify the behavior of the various
115
// harness calls.
116
type harnessOpts struct {
117
        useAMP bool
118
}
119

120
// defaultHarnessOpts returns a new instance of the harnessOpts with default
121
// values specified.
122
func defaultHarnessOpts() harnessOpts {
×
123
        return harnessOpts{
×
124
                useAMP: false,
×
125
        }
×
126
}
×
127

128
// HarnessOpt is a functional option that can be used to modify the behavior of
129
// harness functionality.
130
type HarnessOpt func(*harnessOpts)
131

132
// WithAMP is a functional option that can be used to enable the AMP feature
133
// for sending payments.
134
func WithAMP() HarnessOpt {
×
135
        return func(h *harnessOpts) {
×
136
                h.useAMP = true
×
137
        }
×
138
}
139

140
// NewHarnessTest creates a new instance of a harnessTest from a regular
141
// testing.T instance.
142
func NewHarnessTest(t *testing.T, lndBinary string, feeService WebFeeService,
143
        dbBackend node.DatabaseBackend, nativeSQL bool) *HarnessTest {
×
144

×
145
        t.Helper()
×
146

×
147
        // Create the run context.
×
148
        ctxt, cancel := context.WithCancel(context.Background())
×
149

×
150
        manager := newNodeManager(lndBinary, dbBackend, nativeSQL)
×
151

×
152
        return &HarnessTest{
×
153
                T:          t,
×
154
                manager:    manager,
×
155
                feeService: feeService,
×
156
                runCtx:     ctxt,
×
157
                cancel:     cancel,
×
158
                // We need to use buffered channel here as we don't want to
×
159
                // block sending errors.
×
160
                lndErrorChan: make(chan error, lndErrorChanSize),
×
161
        }
×
162
}
×
163

164
// Start will assemble the chain backend and the miner for the HarnessTest. It
165
// also starts the fee service and watches lnd process error.
166
func (h *HarnessTest) Start(chain node.BackendConfig,
167
        miner *miner.HarnessMiner) {
×
168

×
169
        // Spawn a new goroutine to watch for any fatal errors that any of the
×
170
        // running lnd processes encounter. If an error occurs, then the test
×
171
        // case should naturally as a result and we log the server error here
×
172
        // to help debug.
×
173
        go func() {
×
174
                select {
×
175
                case err, more := <-h.lndErrorChan:
×
176
                        if !more {
×
177
                                return
×
178
                        }
×
179
                        h.Logf("lnd finished with error (stderr):\n%v", err)
×
180

181
                case <-h.runCtx.Done():
×
182
                        return
×
183
                }
184
        }()
185

186
        // Start the fee service.
187
        err := h.feeService.Start()
×
188
        require.NoError(h, err, "failed to start fee service")
×
189

×
190
        // Assemble the node manager with chainBackend and feeServiceURL.
×
191
        h.manager.chainBackend = chain
×
192
        h.manager.feeServiceURL = h.feeService.URL()
×
193

×
194
        // Assemble the miner.
×
195
        h.miner = miner
×
196

×
197
        // Update block height.
×
198
        h.updateCurrentHeight()
×
199
}
200

201
// ChainBackendName returns the chain backend name used in the test.
202
func (h *HarnessTest) ChainBackendName() string {
×
203
        return h.manager.chainBackend.Name()
×
204
}
×
205

206
// Context returns the run context used in this test. Usaually it should be
207
// managed by the test itself otherwise undefined behaviors will occur. It can
208
// be used, however, when a test needs to have its own context being managed
209
// differently. In that case, instead of using a background context, the run
210
// context should be used such that the test context scope can be fully
211
// controlled.
212
func (h *HarnessTest) Context() context.Context {
×
213
        return h.runCtx
×
214
}
×
215

216
// setupWatchOnlyNode initializes a node with the watch-only accounts of an
217
// associated remote signing instance.
218
func (h *HarnessTest) setupWatchOnlyNode(name string,
219
        signerNode *node.HarnessNode, password []byte) *node.HarnessNode {
×
220

×
221
        // Prepare arguments for watch-only node connected to the remote signer.
×
222
        remoteSignerArgs := []string{
×
223
                "--remotesigner.enable",
×
224
                fmt.Sprintf("--remotesigner.rpchost=localhost:%d",
×
225
                        signerNode.Cfg.RPCPort),
×
226
                fmt.Sprintf("--remotesigner.tlscertpath=%s",
×
227
                        signerNode.Cfg.TLSCertPath),
×
228
                fmt.Sprintf("--remotesigner.macaroonpath=%s",
×
229
                        signerNode.Cfg.AdminMacPath),
×
230
        }
×
231

×
232
        // Fetch watch-only accounts from the signer node.
×
233
        resp := signerNode.RPC.ListAccounts(&walletrpc.ListAccountsRequest{})
×
234
        watchOnlyAccounts, err := walletrpc.AccountsToWatchOnly(resp.Accounts)
×
235
        require.NoErrorf(h, err, "unable to find watch only accounts for %s",
×
236
                name)
×
237

×
238
        // Create a new watch-only node with remote signer configuration.
×
NEW
239
        return h.NewNodeWatchOnly(
×
240
                name, remoteSignerArgs, password,
×
241
                &lnrpc.WatchOnly{
×
242
                        MasterKeyBirthdayTimestamp: 0,
×
243
                        MasterKeyFingerprint:       nil,
×
244
                        Accounts:                   watchOnlyAccounts,
×
245
                },
×
246
        )
×
247
}
×
248

249
// createAndSendOutput send amt satoshis from the internal mining node to the
250
// targeted lightning node using a P2WKH address. No blocks are mined so
251
// transactions will sit unconfirmed in mempool.
252
func (h *HarnessTest) createAndSendOutput(target *node.HarnessNode,
253
        amt btcutil.Amount, addrType lnrpc.AddressType) {
×
254

×
255
        req := &lnrpc.NewAddressRequest{Type: addrType}
×
256
        resp := target.RPC.NewAddress(req)
×
257
        addr := h.DecodeAddress(resp.Address)
×
258
        addrScript := h.PayToAddrScript(addr)
×
259

×
260
        output := &wire.TxOut{
×
261
                PkScript: addrScript,
×
262
                Value:    int64(amt),
×
263
        }
×
264
        h.miner.SendOutput(output, defaultMinerFeeRate)
×
265
}
×
266

267
// SetupRemoteSigningStandbyNodes starts the initial seeder nodes within the
268
// test harness in a remote signing configuration. The initial node's wallets
269
// will be funded wallets with 100x1 BTC outputs each.
270
func (h *HarnessTest) SetupRemoteSigningStandbyNodes() {
×
271
        h.Log("Setting up standby nodes Alice and Bob with remote " +
×
272
                "signing configurations...")
×
273
        defer h.Log("Finished the setup, now running tests...")
×
274

×
275
        password := []byte("itestpassword")
×
276

×
277
        // Setup remote signing nodes for Alice and Bob.
×
278
        signerAlice := h.NewNode("SignerAlice", nil)
×
279
        signerBob := h.NewNode("SignerBob", nil)
×
280

×
281
        // Setup watch-only nodes for Alice and Bob, each configured with their
×
282
        // own remote signing instance.
×
283
        h.Alice = h.setupWatchOnlyNode("Alice", signerAlice, password)
×
284
        h.Bob = h.setupWatchOnlyNode("Bob", signerBob, password)
×
285

×
286
        // Fund each node with 100 BTC (using 100 separate transactions).
×
287
        const fundAmount = 1 * btcutil.SatoshiPerBitcoin
×
288
        const numOutputs = 100
×
289
        const totalAmount = fundAmount * numOutputs
×
290
        for _, node := range []*node.HarnessNode{h.Alice, h.Bob} {
×
291
                h.manager.standbyNodes[node.Cfg.NodeID] = node
×
292
                for i := 0; i < numOutputs; i++ {
×
293
                        h.createAndSendOutput(
×
294
                                node, fundAmount,
×
295
                                lnrpc.AddressType_WITNESS_PUBKEY_HASH,
×
296
                        )
×
297
                }
×
298
        }
299

300
        // We generate several blocks in order to give the outputs created
301
        // above a good number of confirmations.
302
        const totalTxes = 200
×
303
        h.MineBlocksAndAssertNumTxes(numBlocksSendOutput, totalTxes)
×
304

×
305
        // Now we want to wait for the nodes to catch up.
×
306
        h.WaitForBlockchainSync(h.Alice)
×
307
        h.WaitForBlockchainSync(h.Bob)
×
308

×
309
        // Now block until both wallets have fully synced up.
×
310
        h.WaitForBalanceConfirmed(h.Alice, totalAmount)
×
311
        h.WaitForBalanceConfirmed(h.Bob, totalAmount)
×
312
}
313

314
// SetUp starts the initial seeder nodes within the test harness. The initial
315
// node's wallets will be funded wallets with 10x10 BTC outputs each.
316
func (h *HarnessTest) SetupStandbyNodes() {
×
317
        h.Log("Setting up standby nodes Alice and Bob...")
×
318
        defer h.Log("Finished the setup, now running tests...")
×
319

×
320
        lndArgs := []string{
×
321
                "--default-remote-max-htlcs=483",
×
322
                "--channel-max-fee-exposure=5000000",
×
323
        }
×
324

×
325
        // Start the initial seeder nodes within the test network.
×
326
        h.Alice = h.NewNode("Alice", lndArgs)
×
327
        h.Bob = h.NewNode("Bob", lndArgs)
×
328

×
329
        // Load up the wallets of the seeder nodes with 100 outputs of 1 BTC
×
330
        // each.
×
331
        const fundAmount = 1 * btcutil.SatoshiPerBitcoin
×
332
        const numOutputs = 100
×
333
        const totalAmount = fundAmount * numOutputs
×
334
        for _, node := range []*node.HarnessNode{h.Alice, h.Bob} {
×
335
                h.manager.standbyNodes[node.Cfg.NodeID] = node
×
336
                for i := 0; i < numOutputs; i++ {
×
337
                        h.createAndSendOutput(
×
338
                                node, fundAmount,
×
339
                                lnrpc.AddressType_WITNESS_PUBKEY_HASH,
×
340
                        )
×
341
                }
×
342
        }
343

344
        // We generate several blocks in order to give the outputs created
345
        // above a good number of confirmations.
346
        const totalTxes = 200
×
347
        h.MineBlocksAndAssertNumTxes(numBlocksSendOutput, totalTxes)
×
348

×
349
        // Now we want to wait for the nodes to catch up.
×
350
        h.WaitForBlockchainSync(h.Alice)
×
351
        h.WaitForBlockchainSync(h.Bob)
×
352

×
353
        // Now block until both wallets have fully synced up.
×
354
        h.WaitForBalanceConfirmed(h.Alice, totalAmount)
×
355
        h.WaitForBalanceConfirmed(h.Bob, totalAmount)
×
356
}
357

358
// Stop stops the test harness.
359
func (h *HarnessTest) Stop() {
×
360
        // Do nothing if it's not started.
×
361
        if h.runCtx == nil {
×
362
                h.Log("HarnessTest is not started")
×
363
                return
×
364
        }
×
365

366
        h.shutdownAllNodes()
×
367

×
368
        close(h.lndErrorChan)
×
369

×
370
        // Stop the fee service.
×
371
        err := h.feeService.Stop()
×
372
        require.NoError(h, err, "failed to stop fee service")
×
373

×
374
        // Stop the chainBackend.
×
375
        h.stopChainBackend()
×
376

×
377
        // Stop the miner.
×
378
        h.miner.Stop()
×
379
}
380

381
// RunTestCase executes a harness test case. Any errors or panics will be
382
// represented as fatal.
383
func (h *HarnessTest) RunTestCase(testCase *TestCase) {
×
384
        defer func() {
×
385
                if err := recover(); err != nil {
×
386
                        description := errors.Wrap(err, 2).ErrorStack()
×
387
                        h.Fatalf("Failed: (%v) panic with: \n%v",
×
388
                                testCase.Name, description)
×
389
                }
×
390
        }()
391

392
        testCase.TestFunc(h)
×
393
}
394

395
// resetStandbyNodes resets all standby nodes by attaching the new testing.T
396
// and restarting them with the original config.
397
func (h *HarnessTest) resetStandbyNodes(t *testing.T) {
×
398
        t.Helper()
×
399

×
400
        for _, hn := range h.manager.standbyNodes {
×
401
                // Inherit the testing.T.
×
402
                h.T = t
×
403

×
404
                // Reset the config so the node will be using the default
×
405
                // config for the coming test. This will also inherit the
×
406
                // test's running context.
×
407
                h.RestartNodeWithExtraArgs(hn, hn.Cfg.OriginalExtraArgs)
×
408

×
409
                hn.AddToLogf("Finished test case %v", h.manager.currentTestCase)
×
410
        }
×
411
}
412

413
// Subtest creates a child HarnessTest, which inherits the harness net and
414
// stand by nodes created by the parent test. It will return a cleanup function
415
// which resets  all the standby nodes' configs back to its original state and
416
// create snapshots of each nodes' internal state.
417
func (h *HarnessTest) Subtest(t *testing.T) *HarnessTest {
×
418
        t.Helper()
×
419

×
420
        st := &HarnessTest{
×
421
                T:            t,
×
422
                manager:      h.manager,
×
423
                miner:        h.miner,
×
424
                standbyNodes: h.standbyNodes,
×
425
                feeService:   h.feeService,
×
426
                lndErrorChan: make(chan error, lndErrorChanSize),
×
427
        }
×
428

×
429
        // Inherit context from the main test.
×
430
        st.runCtx, st.cancel = context.WithCancel(h.runCtx)
×
431

×
432
        // Inherit the subtest for the miner.
×
433
        st.miner.T = st.T
×
434

×
435
        // Reset the standby nodes.
×
436
        st.resetStandbyNodes(t)
×
437

×
438
        // Reset fee estimator.
×
439
        st.feeService.Reset()
×
440

×
441
        // Record block height.
×
442
        h.updateCurrentHeight()
×
443
        startHeight := int32(h.CurrentHeight())
×
444

×
445
        st.Cleanup(func() {
×
446
                _, endHeight := h.GetBestBlock()
×
447

×
448
                st.Logf("finished test: %s, start height=%d, end height=%d, "+
×
449
                        "mined blocks=%d", st.manager.currentTestCase,
×
450
                        startHeight, endHeight, endHeight-startHeight)
×
451

×
452
                // Don't bother run the cleanups if the test is failed.
×
453
                if st.Failed() {
×
454
                        st.Log("test failed, skipped cleanup")
×
455
                        st.shutdownAllNodes()
×
456
                        return
×
457
                }
×
458

459
                // Don't run cleanup if it's already done. This can happen if
460
                // we have multiple level inheritance of the parent harness
461
                // test. For instance, a `Subtest(st)`.
462
                if st.cleaned {
×
463
                        st.Log("test already cleaned, skipped cleanup")
×
464
                        return
×
465
                }
×
466

467
                // When we finish the test, reset the nodes' configs and take a
468
                // snapshot of each of the nodes' internal states.
469
                for _, node := range st.manager.standbyNodes {
×
470
                        st.cleanupStandbyNode(node)
×
471
                }
×
472

473
                // If found running nodes, shut them down.
474
                st.shutdownNonStandbyNodes()
×
475

×
476
                // We require the mempool to be cleaned from the test.
×
477
                require.Empty(st, st.miner.GetRawMempool(), "mempool not "+
×
478
                        "cleaned, please mine blocks to clean them all.")
×
479

×
480
                // Finally, cancel the run context. We have to do it here
×
481
                // because we need to keep the context alive for the above
×
482
                // assertions used in cleanup.
×
483
                st.cancel()
×
484

×
485
                // We now want to mark the parent harness as cleaned to avoid
×
486
                // running cleanup again since its internal state has been
×
487
                // cleaned up by its child harness tests.
×
488
                h.cleaned = true
×
489
        })
490

491
        return st
×
492
}
493

494
// shutdownNonStandbyNodes will shutdown any non-standby nodes.
495
func (h *HarnessTest) shutdownNonStandbyNodes() {
×
496
        h.shutdownNodes(true)
×
497
}
×
498

499
// shutdownAllNodes will shutdown all running nodes.
500
func (h *HarnessTest) shutdownAllNodes() {
×
501
        h.shutdownNodes(false)
×
502
}
×
503

504
// shutdownNodes will shutdown any non-standby nodes. If skipStandby is false,
505
// all the standby nodes will be shutdown too.
506
func (h *HarnessTest) shutdownNodes(skipStandby bool) {
×
507
        for nid, node := range h.manager.activeNodes {
×
508
                // If it's a standby node, skip.
×
509
                _, ok := h.manager.standbyNodes[nid]
×
510
                if ok && skipStandby {
×
511
                        continue
×
512
                }
513

514
                // The process may not be in a state to always shutdown
515
                // immediately, so we'll retry up to a hard limit to ensure we
516
                // eventually shutdown.
517
                err := wait.NoError(func() error {
×
518
                        return h.manager.shutdownNode(node)
×
519
                }, DefaultTimeout)
×
520

521
                if err == nil {
×
522
                        continue
×
523
                }
524

525
                // Instead of returning the error, we will log it instead. This
526
                // is needed so other nodes can continue their shutdown
527
                // processes.
528
                h.Logf("unable to shutdown %s, got err: %v", node.Name(), err)
×
529
        }
530
}
531

532
// cleanupStandbyNode is a function should be called with defer whenever a
533
// subtest is created. It will reset the standby nodes configs, snapshot the
534
// states, and validate the node has a clean state.
535
func (h *HarnessTest) cleanupStandbyNode(hn *node.HarnessNode) {
×
536
        // Remove connections made from this test.
×
537
        h.removeConnectionns(hn)
×
538

×
539
        // Delete all payments made from this test.
×
540
        hn.RPC.DeleteAllPayments()
×
541

×
542
        // Check the node's current state with timeout.
×
543
        //
×
544
        // NOTE: we need to do this in a `wait` because it takes some time for
×
545
        // the node to update its internal state. Once the RPCs are synced we
×
546
        // can then remove this wait.
×
547
        err := wait.NoError(func() error {
×
548
                // Update the node's internal state.
×
549
                hn.UpdateState()
×
550

×
551
                // Check the node is in a clean state for the following tests.
×
552
                return h.validateNodeState(hn)
×
553
        }, wait.DefaultTimeout)
×
554
        require.NoError(h, err, "timeout checking node's state")
×
555
}
556

557
// removeConnectionns will remove all connections made on the standby nodes
558
// expect the connections between Alice and Bob.
559
func (h *HarnessTest) removeConnectionns(hn *node.HarnessNode) {
×
560
        resp := hn.RPC.ListPeers()
×
561
        for _, peer := range resp.Peers {
×
562
                // Skip disconnecting Alice and Bob.
×
563
                switch peer.PubKey {
×
564
                case h.Alice.PubKeyStr:
×
565
                        continue
×
566
                case h.Bob.PubKeyStr:
×
567
                        continue
×
568
                }
569

570
                hn.RPC.DisconnectPeer(peer.PubKey)
×
571
        }
572
}
573

574
// SetTestName set the test case name.
575
func (h *HarnessTest) SetTestName(name string) {
×
576
        h.manager.currentTestCase = name
×
577

×
578
        // Overwrite the old log filename so we can create new log files.
×
579
        for _, node := range h.manager.standbyNodes {
×
580
                node.Cfg.LogFilenamePrefix = name
×
581
        }
×
582
}
583

584
// NewNode creates a new node and asserts its creation. The node is guaranteed
585
// to have finished its initialization and all its subservers are started.
586
func (h *HarnessTest) NewNode(name string,
587
        extraArgs []string) *node.HarnessNode {
×
588

×
589
        node, err := h.manager.newNode(h.T, name, extraArgs, nil, false)
×
590
        require.NoErrorf(h, err, "unable to create new node for %s", name)
×
591

×
592
        // Start the node.
×
593
        err = node.Start(h.runCtx)
×
594
        require.NoError(h, err, "failed to start node %s", node.Name())
×
595

×
596
        return node
×
597
}
×
598

599
// Shutdown shuts down the given node and asserts that no errors occur.
600
func (h *HarnessTest) Shutdown(node *node.HarnessNode) {
×
601
        // The process may not be in a state to always shutdown immediately, so
×
602
        // we'll retry up to a hard limit to ensure we eventually shutdown.
×
603
        err := wait.NoError(func() error {
×
604
                return h.manager.shutdownNode(node)
×
605
        }, DefaultTimeout)
×
606

607
        require.NoErrorf(h, err, "unable to shutdown %v in %v", node.Name(),
×
608
                h.manager.currentTestCase)
×
609
}
610

611
// SuspendNode stops the given node and returns a callback that can be used to
612
// start it again.
613
func (h *HarnessTest) SuspendNode(node *node.HarnessNode) func() error {
×
614
        err := node.Stop()
×
615
        require.NoErrorf(h, err, "failed to stop %s", node.Name())
×
616

×
617
        // Remove the node from active nodes.
×
618
        delete(h.manager.activeNodes, node.Cfg.NodeID)
×
619

×
620
        return func() error {
×
621
                h.manager.registerNode(node)
×
622

×
623
                if err := node.Start(h.runCtx); err != nil {
×
624
                        return err
×
625
                }
×
626
                h.WaitForBlockchainSync(node)
×
627

×
628
                return nil
×
629
        }
630
}
631

632
// RestartNode restarts a given node, unlocks it and asserts it's successfully
633
// started.
634
func (h *HarnessTest) RestartNode(hn *node.HarnessNode) {
×
635
        err := h.manager.restartNode(h.runCtx, hn, nil)
×
636
        require.NoErrorf(h, err, "failed to restart node %s", hn.Name())
×
637

×
638
        err = h.manager.unlockNode(hn)
×
639
        require.NoErrorf(h, err, "failed to unlock node %s", hn.Name())
×
640

×
641
        if !hn.Cfg.SkipUnlock {
×
642
                // Give the node some time to catch up with the chain before we
×
643
                // continue with the tests.
×
644
                h.WaitForBlockchainSync(hn)
×
645
        }
×
646
}
647

648
// RestartNodeNoUnlock restarts a given node without unlocking its wallet.
649
func (h *HarnessTest) RestartNodeNoUnlock(hn *node.HarnessNode) {
×
650
        err := h.manager.restartNode(h.runCtx, hn, nil)
×
651
        require.NoErrorf(h, err, "failed to restart node %s", hn.Name())
×
652
}
×
653

654
// RestartNodeWithChanBackups restarts a given node with the specified channel
655
// backups.
656
func (h *HarnessTest) RestartNodeWithChanBackups(hn *node.HarnessNode,
657
        chanBackups ...*lnrpc.ChanBackupSnapshot) {
×
658

×
659
        err := h.manager.restartNode(h.runCtx, hn, nil)
×
660
        require.NoErrorf(h, err, "failed to restart node %s", hn.Name())
×
661

×
662
        err = h.manager.unlockNode(hn, chanBackups...)
×
663
        require.NoErrorf(h, err, "failed to unlock node %s", hn.Name())
×
664

×
665
        // Give the node some time to catch up with the chain before we
×
666
        // continue with the tests.
×
667
        h.WaitForBlockchainSync(hn)
×
668
}
×
669

670
// RestartNodeWithExtraArgs updates the node's config and restarts it.
671
func (h *HarnessTest) RestartNodeWithExtraArgs(hn *node.HarnessNode,
672
        extraArgs []string) {
×
673

×
674
        hn.SetExtraArgs(extraArgs)
×
675
        h.RestartNode(hn)
×
676
}
×
677

678
// NewNodeWithSeed fully initializes a new HarnessNode after creating a fresh
679
// aezeed. The provided password is used as both the aezeed password and the
680
// wallet password. The generated mnemonic is returned along with the
681
// initialized harness node.
682
func (h *HarnessTest) NewNodeWithSeed(name string,
683
        extraArgs []string, password []byte,
684
        statelessInit bool) (*node.HarnessNode, []string, []byte) {
×
685

×
686
        // Create a request to generate a new aezeed. The new seed will have
×
687
        // the same password as the internal wallet.
×
688
        req := &lnrpc.GenSeedRequest{
×
689
                AezeedPassphrase: password,
×
690
                SeedEntropy:      nil,
×
691
        }
×
692

×
693
        return h.newNodeWithSeed(name, extraArgs, req, statelessInit)
×
694
}
×
695

696
// newNodeWithSeed creates and initializes a new HarnessNode such that it'll be
697
// ready to accept RPC calls. A `GenSeedRequest` is needed to generate the
698
// seed.
699
func (h *HarnessTest) newNodeWithSeed(name string,
700
        extraArgs []string, req *lnrpc.GenSeedRequest,
701
        statelessInit bool) (*node.HarnessNode, []string, []byte) {
×
702

×
703
        node, err := h.manager.newNode(
×
704
                h.T, name, extraArgs, req.AezeedPassphrase, true,
×
705
        )
×
706
        require.NoErrorf(h, err, "unable to create new node for %s", name)
×
707

×
708
        // Start the node with seed only, which will only create the `State`
×
709
        // and `WalletUnlocker` clients.
×
710
        err = node.StartWithNoAuth(h.runCtx)
×
711
        require.NoErrorf(h, err, "failed to start node %s", node.Name())
×
712

×
713
        // Generate a new seed.
×
714
        genSeedResp := node.RPC.GenSeed(req)
×
715

×
716
        // With the seed created, construct the init request to the node,
×
717
        // including the newly generated seed.
×
718
        initReq := &lnrpc.InitWalletRequest{
×
719
                WalletPassword:     req.AezeedPassphrase,
×
720
                CipherSeedMnemonic: genSeedResp.CipherSeedMnemonic,
×
721
                AezeedPassphrase:   req.AezeedPassphrase,
×
722
                StatelessInit:      statelessInit,
×
723
        }
×
724

×
725
        // Pass the init request via rpc to finish unlocking the node. This
×
726
        // will also initialize the macaroon-authenticated LightningClient.
×
727
        adminMac, err := h.manager.initWalletAndNode(node, initReq)
×
728
        require.NoErrorf(h, err, "failed to unlock and init node %s",
×
729
                node.Name())
×
730

×
731
        // In stateless initialization mode we get a macaroon back that we have
×
732
        // to return to the test, otherwise gRPC calls won't be possible since
×
733
        // there are no macaroon files created in that mode.
×
734
        // In stateful init the admin macaroon will just be nil.
×
735
        return node, genSeedResp.CipherSeedMnemonic, adminMac
×
736
}
×
737

738
// RestoreNodeWithSeed fully initializes a HarnessNode using a chosen mnemonic,
739
// password, recovery window, and optionally a set of static channel backups.
740
// After providing the initialization request to unlock the node, this method
741
// will finish initializing the LightningClient such that the HarnessNode can
742
// be used for regular rpc operations.
743
func (h *HarnessTest) RestoreNodeWithSeed(name string, extraArgs []string,
744
        password []byte, mnemonic []string, rootKey string,
745
        recoveryWindow int32,
746
        chanBackups *lnrpc.ChanBackupSnapshot) *node.HarnessNode {
×
747

×
748
        n, err := h.manager.newNode(h.T, name, extraArgs, password, true)
×
749
        require.NoErrorf(h, err, "unable to create new node for %s", name)
×
750

×
751
        // Start the node with seed only, which will only create the `State`
×
752
        // and `WalletUnlocker` clients.
×
753
        err = n.StartWithNoAuth(h.runCtx)
×
754
        require.NoErrorf(h, err, "failed to start node %s", n.Name())
×
755

×
756
        // Create the wallet.
×
757
        initReq := &lnrpc.InitWalletRequest{
×
758
                WalletPassword:     password,
×
759
                CipherSeedMnemonic: mnemonic,
×
760
                AezeedPassphrase:   password,
×
761
                ExtendedMasterKey:  rootKey,
×
762
                RecoveryWindow:     recoveryWindow,
×
763
                ChannelBackups:     chanBackups,
×
764
        }
×
765
        _, err = h.manager.initWalletAndNode(n, initReq)
×
766
        require.NoErrorf(h, err, "failed to unlock and init node %s",
×
767
                n.Name())
×
768

×
769
        return n
×
770
}
×
771

772
// NewNodeEtcd starts a new node with seed that'll use an external etcd
773
// database as its storage. The passed cluster flag indicates that we'd like
774
// the node to join the cluster leader election. We won't wait until RPC is
775
// available (this is useful when the node is not expected to become the leader
776
// right away).
777
func (h *HarnessTest) NewNodeEtcd(name string, etcdCfg *etcd.Config,
778
        password []byte, cluster bool,
779
        leaderSessionTTL int) *node.HarnessNode {
×
780

×
781
        // We don't want to use the embedded etcd instance.
×
782
        h.manager.dbBackend = node.BackendBbolt
×
783

×
784
        extraArgs := node.ExtraArgsEtcd(
×
785
                etcdCfg, name, cluster, leaderSessionTTL,
×
786
        )
×
787
        node, err := h.manager.newNode(h.T, name, extraArgs, password, true)
×
788
        require.NoError(h, err, "failed to create new node with etcd")
×
789

×
790
        // Start the node daemon only.
×
791
        err = node.StartLndCmd(h.runCtx)
×
792
        require.NoError(h, err, "failed to start node %s", node.Name())
×
793

×
794
        return node
×
795
}
×
796

797
// NewNodeWithSeedEtcd starts a new node with seed that'll use an external etcd
798
// database as its storage. The passed cluster flag indicates that we'd like
799
// the node to join the cluster leader election.
800
func (h *HarnessTest) NewNodeWithSeedEtcd(name string, etcdCfg *etcd.Config,
801
        password []byte, statelessInit, cluster bool,
802
        leaderSessionTTL int) (*node.HarnessNode, []string, []byte) {
×
803

×
804
        // We don't want to use the embedded etcd instance.
×
805
        h.manager.dbBackend = node.BackendBbolt
×
806

×
807
        // Create a request to generate a new aezeed. The new seed will have
×
808
        // the same password as the internal wallet.
×
809
        req := &lnrpc.GenSeedRequest{
×
810
                AezeedPassphrase: password,
×
811
                SeedEntropy:      nil,
×
812
        }
×
813

×
814
        extraArgs := node.ExtraArgsEtcd(
×
815
                etcdCfg, name, cluster, leaderSessionTTL,
×
816
        )
×
817

×
818
        return h.newNodeWithSeed(name, extraArgs, req, statelessInit)
×
819
}
×
820

821
// NewNodeWatchOnly creates a new watch-only node and asserts its
822
// creation.
823
func (h *HarnessTest) NewNodeWatchOnly(name string, extraArgs []string,
824
        password []byte, watchOnly *lnrpc.WatchOnly) *node.HarnessNode {
×
825

×
NEW
826
        hn := h.CreateNewNode(name, extraArgs, password, true)
×
NEW
827

×
NEW
828
        h.StartWatchOnly(hn, name, password, watchOnly)
×
NEW
829

×
NEW
830
        return hn
×
NEW
831
}
×
832

833
// CreateNodeWatchOnly creates a new node and asserts its creation. The function
834
// will only create the node and will not start it.
835
func (h *HarnessTest) CreateNewNode(name string, extraArgs []string,
NEW
836
        password []byte, noAuth bool) *node.HarnessNode {
×
NEW
837

×
NEW
838
        hn, err := h.manager.newNode(h.T, name, extraArgs, password, noAuth)
×
839
        require.NoErrorf(h, err, "unable to create new node for %s", name)
×
840

×
NEW
841
        return hn
×
NEW
842
}
×
843

844
// StartWatchOnly starts the passed node in watch-only mode. The function will
845
// assert that the node is started and that the initialization is successful.
846
func (h *HarnessTest) StartWatchOnly(hn *node.HarnessNode, name string,
NEW
847
        password []byte, watchOnly *lnrpc.WatchOnly) {
×
NEW
848

×
NEW
849
        err := hn.StartWithNoAuth(h.runCtx)
×
850
        require.NoError(h, err, "failed to start node %s", name)
×
851

×
852
        // With the seed created, construct the init request to the node,
×
853
        // including the newly generated seed.
×
854
        initReq := &lnrpc.InitWalletRequest{
×
855
                WalletPassword: password,
×
856
                WatchOnly:      watchOnly,
×
857
        }
×
858

×
859
        // Pass the init request via rpc to finish unlocking the node. This
×
860
        // will also initialize the macaroon-authenticated LightningClient.
×
861
        _, err = h.manager.initWalletAndNode(hn, initReq)
×
862
        require.NoErrorf(h, err, "failed to init node %s", name)
×
863
}
×
864

865
// KillNode kills the node and waits for the node process to stop.
866
func (h *HarnessTest) KillNode(hn *node.HarnessNode) {
×
867
        h.Logf("Manually killing the node %s", hn.Name())
×
868
        require.NoErrorf(h, hn.KillAndWait(), "%s: kill got error", hn.Name())
×
869
        delete(h.manager.activeNodes, hn.Cfg.NodeID)
×
870
}
×
871

872
// SetFeeEstimate sets a fee rate to be returned from fee estimator.
873
//
874
// NOTE: this method will set the fee rate for a conf target of 1, which is the
875
// fallback fee rate for a `WebAPIEstimator` if a higher conf target's fee rate
876
// is not set. This means if the fee rate for conf target 6 is set, the fee
877
// estimator will use that value instead.
878
func (h *HarnessTest) SetFeeEstimate(fee chainfee.SatPerKWeight) {
×
879
        h.feeService.SetFeeRate(fee, 1)
×
880
}
×
881

882
// SetFeeEstimateWithConf sets a fee rate of a specified conf target to be
883
// returned from fee estimator.
884
func (h *HarnessTest) SetFeeEstimateWithConf(
885
        fee chainfee.SatPerKWeight, conf uint32) {
×
886

×
887
        h.feeService.SetFeeRate(fee, conf)
×
888
}
×
889

890
// SetMinRelayFeerate sets a min relay fee rate to be returned from fee
891
// estimator.
892
func (h *HarnessTest) SetMinRelayFeerate(fee chainfee.SatPerKVByte) {
×
893
        h.feeService.SetMinRelayFeerate(fee)
×
894
}
×
895

896
// validateNodeState checks that the node doesn't have any uncleaned states
897
// which will affect its following tests.
898
func (h *HarnessTest) validateNodeState(hn *node.HarnessNode) error {
×
899
        errStr := func(subject string) error {
×
900
                return fmt.Errorf("%s: found %s channels, please close "+
×
901
                        "them properly", hn.Name(), subject)
×
902
        }
×
903
        // If the node still has open channels, it's most likely that the
904
        // current test didn't close it properly.
905
        if hn.State.OpenChannel.Active != 0 {
×
906
                return errStr("active")
×
907
        }
×
908
        if hn.State.OpenChannel.Public != 0 {
×
909
                return errStr("public")
×
910
        }
×
911
        if hn.State.OpenChannel.Private != 0 {
×
912
                return errStr("private")
×
913
        }
×
914
        if hn.State.OpenChannel.Pending != 0 {
×
915
                return errStr("pending open")
×
916
        }
×
917

918
        // The number of pending force close channels should be zero.
919
        if hn.State.CloseChannel.PendingForceClose != 0 {
×
920
                return errStr("pending force")
×
921
        }
×
922

923
        // The number of waiting close channels should be zero.
924
        if hn.State.CloseChannel.WaitingClose != 0 {
×
925
                return errStr("waiting close")
×
926
        }
×
927

928
        // Ths number of payments should be zero.
929
        if hn.State.Payment.Total != 0 {
×
930
                return fmt.Errorf("%s: found uncleaned payments, please "+
×
931
                        "delete all of them properly", hn.Name())
×
932
        }
×
933

934
        // The number of public edges should be zero.
935
        if hn.State.Edge.Public != 0 {
×
936
                return fmt.Errorf("%s: found active public egdes, please "+
×
937
                        "clean them properly", hn.Name())
×
938
        }
×
939

940
        // The number of edges should be zero.
941
        if hn.State.Edge.Total != 0 {
×
942
                return fmt.Errorf("%s: found active edges, please "+
×
943
                        "clean them properly", hn.Name())
×
944
        }
×
945

946
        return nil
×
947
}
948

949
// GetChanPointFundingTxid takes a channel point and converts it into a chain
950
// hash.
951
func (h *HarnessTest) GetChanPointFundingTxid(
952
        cp *lnrpc.ChannelPoint) chainhash.Hash {
×
953

×
954
        txid, err := lnrpc.GetChanPointFundingTxid(cp)
×
955
        require.NoError(h, err, "unable to get txid")
×
956

×
957
        return *txid
×
958
}
×
959

960
// OutPointFromChannelPoint creates an outpoint from a given channel point.
961
func (h *HarnessTest) OutPointFromChannelPoint(
962
        cp *lnrpc.ChannelPoint) wire.OutPoint {
×
963

×
964
        txid := h.GetChanPointFundingTxid(cp)
×
965
        return wire.OutPoint{
×
966
                Hash:  txid,
×
967
                Index: cp.OutputIndex,
×
968
        }
×
969
}
×
970

971
// OpenChannelParams houses the params to specify when opening a new channel.
972
type OpenChannelParams struct {
973
        // Amt is the local amount being put into the channel.
974
        Amt btcutil.Amount
975

976
        // PushAmt is the amount that should be pushed to the remote when the
977
        // channel is opened.
978
        PushAmt btcutil.Amount
979

980
        // Private is a boolan indicating whether the opened channel should be
981
        // private.
982
        Private bool
983

984
        // SpendUnconfirmed is a boolean indicating whether we can utilize
985
        // unconfirmed outputs to fund the channel.
986
        SpendUnconfirmed bool
987

988
        // MinHtlc is the htlc_minimum_msat value set when opening the channel.
989
        MinHtlc lnwire.MilliSatoshi
990

991
        // RemoteMaxHtlcs is the remote_max_htlcs value set when opening the
992
        // channel, restricting the number of concurrent HTLCs the remote party
993
        // can add to a commitment.
994
        RemoteMaxHtlcs uint16
995

996
        // FundingShim is an optional funding shim that the caller can specify
997
        // in order to modify the channel funding workflow.
998
        FundingShim *lnrpc.FundingShim
999

1000
        // SatPerVByte is the amount of satoshis to spend in chain fees per
1001
        // virtual byte of the transaction.
1002
        SatPerVByte btcutil.Amount
1003

1004
        // ConfTarget is the number of blocks that the funding transaction
1005
        // should be confirmed in.
1006
        ConfTarget fn.Option[int32]
1007

1008
        // CommitmentType is the commitment type that should be used for the
1009
        // channel to be opened.
1010
        CommitmentType lnrpc.CommitmentType
1011

1012
        // ZeroConf is used to determine if the channel will be a zero-conf
1013
        // channel. This only works if the explicit negotiation is used with
1014
        // anchors or script enforced leases.
1015
        ZeroConf bool
1016

1017
        // ScidAlias denotes whether the channel will be an option-scid-alias
1018
        // channel type negotiation.
1019
        ScidAlias bool
1020

1021
        // BaseFee is the channel base fee applied during the channel
1022
        // announcement phase.
1023
        BaseFee uint64
1024

1025
        // FeeRate is the channel fee rate in ppm applied during the channel
1026
        // announcement phase.
1027
        FeeRate uint64
1028

1029
        // UseBaseFee, if set, instructs the downstream logic to apply the
1030
        // user-specified channel base fee to the channel update announcement.
1031
        // If set to false it avoids applying a base fee of 0 and instead
1032
        // activates the default configured base fee.
1033
        UseBaseFee bool
1034

1035
        // UseFeeRate, if set, instructs the downstream logic to apply the
1036
        // user-specified channel fee rate to the channel update announcement.
1037
        // If set to false it avoids applying a fee rate of 0 and instead
1038
        // activates the default configured fee rate.
1039
        UseFeeRate bool
1040

1041
        // FundMax is a boolean indicating whether the channel should be funded
1042
        // with the maximum possible amount from the wallet.
1043
        FundMax bool
1044

1045
        // An optional note-to-self containing some useful information about the
1046
        // channel. This is stored locally only, and is purely for reference. It
1047
        // has no bearing on the channel's operation. Max allowed length is 500
1048
        // characters.
1049
        Memo string
1050

1051
        // Outpoints is a list of client-selected outpoints that should be used
1052
        // for funding a channel. If Amt is specified then this amount is
1053
        // allocated from the sum of outpoints towards funding. If the
1054
        // FundMax flag is specified the entirety of selected funds is
1055
        // allocated towards channel funding.
1056
        Outpoints []*lnrpc.OutPoint
1057

1058
        // CloseAddress sets the upfront_shutdown_script parameter during
1059
        // channel open. It is expected to be encoded as a bitcoin address.
1060
        CloseAddress string
1061
}
1062

1063
// prepareOpenChannel waits for both nodes to be synced to chain and returns an
1064
// OpenChannelRequest.
1065
func (h *HarnessTest) prepareOpenChannel(srcNode, destNode *node.HarnessNode,
1066
        p OpenChannelParams) *lnrpc.OpenChannelRequest {
×
1067

×
1068
        // Wait until srcNode and destNode have the latest chain synced.
×
1069
        // Otherwise, we may run into a check within the funding manager that
×
1070
        // prevents any funding workflows from being kicked off if the chain
×
1071
        // isn't yet synced.
×
1072
        h.WaitForBlockchainSync(srcNode)
×
1073
        h.WaitForBlockchainSync(destNode)
×
1074

×
1075
        // Specify the minimal confirmations of the UTXOs used for channel
×
1076
        // funding.
×
1077
        minConfs := int32(1)
×
1078
        if p.SpendUnconfirmed {
×
1079
                minConfs = 0
×
1080
        }
×
1081

1082
        // Get the requested conf target. If not set, default to 6.
1083
        confTarget := p.ConfTarget.UnwrapOr(6)
×
1084

×
1085
        // If there's fee rate set, unset the conf target.
×
1086
        if p.SatPerVByte != 0 {
×
1087
                confTarget = 0
×
1088
        }
×
1089

1090
        // Prepare the request.
1091
        return &lnrpc.OpenChannelRequest{
×
1092
                NodePubkey:         destNode.PubKey[:],
×
1093
                LocalFundingAmount: int64(p.Amt),
×
1094
                PushSat:            int64(p.PushAmt),
×
1095
                Private:            p.Private,
×
1096
                TargetConf:         confTarget,
×
1097
                MinConfs:           minConfs,
×
1098
                SpendUnconfirmed:   p.SpendUnconfirmed,
×
1099
                MinHtlcMsat:        int64(p.MinHtlc),
×
1100
                RemoteMaxHtlcs:     uint32(p.RemoteMaxHtlcs),
×
1101
                FundingShim:        p.FundingShim,
×
1102
                SatPerVbyte:        uint64(p.SatPerVByte),
×
1103
                CommitmentType:     p.CommitmentType,
×
1104
                ZeroConf:           p.ZeroConf,
×
1105
                ScidAlias:          p.ScidAlias,
×
1106
                BaseFee:            p.BaseFee,
×
1107
                FeeRate:            p.FeeRate,
×
1108
                UseBaseFee:         p.UseBaseFee,
×
1109
                UseFeeRate:         p.UseFeeRate,
×
1110
                FundMax:            p.FundMax,
×
1111
                Memo:               p.Memo,
×
1112
                Outpoints:          p.Outpoints,
×
1113
                CloseAddress:       p.CloseAddress,
×
1114
        }
×
1115
}
1116

1117
// OpenChannelAssertPending attempts to open a channel between srcNode and
1118
// destNode with the passed channel funding parameters. Once the `OpenChannel`
1119
// is called, it will consume the first event it receives from the open channel
1120
// client and asserts it's a channel pending event.
1121
func (h *HarnessTest) openChannelAssertPending(srcNode,
1122
        destNode *node.HarnessNode,
1123
        p OpenChannelParams) (*lnrpc.PendingUpdate, rpc.OpenChanClient) {
×
1124

×
1125
        // Prepare the request and open the channel.
×
1126
        openReq := h.prepareOpenChannel(srcNode, destNode, p)
×
1127
        respStream := srcNode.RPC.OpenChannel(openReq)
×
1128

×
1129
        // Consume the "channel pending" update. This waits until the node
×
1130
        // notifies us that the final message in the channel funding workflow
×
1131
        // has been sent to the remote node.
×
1132
        resp := h.ReceiveOpenChannelUpdate(respStream)
×
1133

×
1134
        // Check that the update is channel pending.
×
1135
        update, ok := resp.Update.(*lnrpc.OpenStatusUpdate_ChanPending)
×
1136
        require.Truef(h, ok, "expected channel pending: update, instead got %v",
×
1137
                resp)
×
1138

×
1139
        return update.ChanPending, respStream
×
1140
}
×
1141

1142
// OpenChannelAssertPending attempts to open a channel between srcNode and
1143
// destNode with the passed channel funding parameters. Once the `OpenChannel`
1144
// is called, it will consume the first event it receives from the open channel
1145
// client and asserts it's a channel pending event. It returns the
1146
// `PendingUpdate`.
1147
func (h *HarnessTest) OpenChannelAssertPending(srcNode,
1148
        destNode *node.HarnessNode, p OpenChannelParams) *lnrpc.PendingUpdate {
×
1149

×
1150
        resp, _ := h.openChannelAssertPending(srcNode, destNode, p)
×
1151
        return resp
×
1152
}
×
1153

1154
// OpenChannelAssertStream attempts to open a channel between srcNode and
1155
// destNode with the passed channel funding parameters. Once the `OpenChannel`
1156
// is called, it will consume the first event it receives from the open channel
1157
// client and asserts it's a channel pending event. It returns the open channel
1158
// stream.
1159
func (h *HarnessTest) OpenChannelAssertStream(srcNode,
1160
        destNode *node.HarnessNode, p OpenChannelParams) rpc.OpenChanClient {
×
1161

×
1162
        _, stream := h.openChannelAssertPending(srcNode, destNode, p)
×
1163
        return stream
×
1164
}
×
1165

1166
// OpenChannel attempts to open a channel with the specified parameters
1167
// extended from Alice to Bob. Additionally, for public channels, it will mine
1168
// extra blocks so they are announced to the network. In specific, the
1169
// following items are asserted,
1170
//   - for non-zero conf channel, 1 blocks will be mined to confirm the funding
1171
//     tx.
1172
//   - both nodes should see the channel edge update in their network graph.
1173
//   - both nodes can report the status of the new channel from ListChannels.
1174
//   - extra blocks are mined if it's a public channel.
1175
func (h *HarnessTest) OpenChannel(alice, bob *node.HarnessNode,
1176
        p OpenChannelParams) *lnrpc.ChannelPoint {
×
1177

×
1178
        // First, open the channel without announcing it.
×
1179
        cp := h.OpenChannelNoAnnounce(alice, bob, p)
×
1180

×
1181
        // If this is a private channel, there's no need to mine extra blocks
×
1182
        // since it will never be announced to the network.
×
1183
        if p.Private {
×
1184
                return cp
×
1185
        }
×
1186

1187
        // Mine extra blocks to announce the channel.
1188
        if p.ZeroConf {
×
1189
                // For a zero-conf channel, no blocks have been mined so we
×
1190
                // need to mine 6 blocks.
×
1191
                //
×
1192
                // Mine 1 block to confirm the funding transaction.
×
1193
                h.MineBlocksAndAssertNumTxes(numBlocksOpenChannel, 1)
×
1194
        } else {
×
1195
                // For a regular channel, 1 block has already been mined to
×
1196
                // confirm the funding transaction, so we mine 5 blocks.
×
1197
                h.MineBlocks(numBlocksOpenChannel - 1)
×
1198
        }
×
1199

1200
        return cp
×
1201
}
1202

1203
// OpenChannelNoAnnounce attempts to open a channel with the specified
1204
// parameters extended from Alice to Bob without mining the necessary blocks to
1205
// announce the channel. Additionally, the following items are asserted,
1206
//   - for non-zero conf channel, 1 blocks will be mined to confirm the funding
1207
//     tx.
1208
//   - both nodes should see the channel edge update in their network graph.
1209
//   - both nodes can report the status of the new channel from ListChannels.
1210
func (h *HarnessTest) OpenChannelNoAnnounce(alice, bob *node.HarnessNode,
1211
        p OpenChannelParams) *lnrpc.ChannelPoint {
×
1212

×
1213
        chanOpenUpdate := h.OpenChannelAssertStream(alice, bob, p)
×
1214

×
1215
        // Open a zero conf channel.
×
1216
        if p.ZeroConf {
×
1217
                return h.openChannelZeroConf(alice, bob, chanOpenUpdate)
×
1218
        }
×
1219

1220
        // Open a non-zero conf channel.
1221
        return h.openChannel(alice, bob, chanOpenUpdate)
×
1222
}
1223

1224
// openChannel attempts to open a channel with the specified parameters
1225
// extended from Alice to Bob. Additionally, the following items are asserted,
1226
//   - 1 block is mined and the funding transaction should be found in it.
1227
//   - both nodes should see the channel edge update in their network graph.
1228
//   - both nodes can report the status of the new channel from ListChannels.
1229
func (h *HarnessTest) openChannel(alice, bob *node.HarnessNode,
1230
        stream rpc.OpenChanClient) *lnrpc.ChannelPoint {
×
1231

×
1232
        // Mine 1 block to confirm the funding transaction.
×
1233
        block := h.MineBlocksAndAssertNumTxes(1, 1)[0]
×
1234

×
1235
        // Wait for the channel open event.
×
1236
        fundingChanPoint := h.WaitForChannelOpenEvent(stream)
×
1237

×
1238
        // Check that the funding tx is found in the first block.
×
1239
        fundingTxID := h.GetChanPointFundingTxid(fundingChanPoint)
×
1240
        h.AssertTxInBlock(block, fundingTxID)
×
1241

×
1242
        // Check that both alice and bob have seen the channel from their
×
1243
        // network topology.
×
1244
        h.AssertChannelInGraph(alice, fundingChanPoint)
×
1245
        h.AssertChannelInGraph(bob, fundingChanPoint)
×
1246

×
1247
        // Check that the channel can be seen in their ListChannels.
×
1248
        h.AssertChannelExists(alice, fundingChanPoint)
×
1249
        h.AssertChannelExists(bob, fundingChanPoint)
×
1250

×
1251
        return fundingChanPoint
×
1252
}
×
1253

1254
// openChannelZeroConf attempts to open a channel with the specified parameters
1255
// extended from Alice to Bob. Additionally, the following items are asserted,
1256
//   - both nodes should see the channel edge update in their network graph.
1257
//   - both nodes can report the status of the new channel from ListChannels.
1258
func (h *HarnessTest) openChannelZeroConf(alice, bob *node.HarnessNode,
1259
        stream rpc.OpenChanClient) *lnrpc.ChannelPoint {
×
1260

×
1261
        // Wait for the channel open event.
×
1262
        fundingChanPoint := h.WaitForChannelOpenEvent(stream)
×
1263

×
1264
        // Check that both alice and bob have seen the channel from their
×
1265
        // network topology.
×
1266
        h.AssertChannelInGraph(alice, fundingChanPoint)
×
1267
        h.AssertChannelInGraph(bob, fundingChanPoint)
×
1268

×
1269
        // Finally, check that the channel can be seen in their ListChannels.
×
1270
        h.AssertChannelExists(alice, fundingChanPoint)
×
1271
        h.AssertChannelExists(bob, fundingChanPoint)
×
1272

×
1273
        return fundingChanPoint
×
1274
}
×
1275

1276
// OpenChannelAssertErr opens a channel between node srcNode and destNode,
1277
// asserts that the expected error is returned from the channel opening.
1278
func (h *HarnessTest) OpenChannelAssertErr(srcNode, destNode *node.HarnessNode,
1279
        p OpenChannelParams, expectedErr error) {
×
1280

×
1281
        // Prepare the request and open the channel.
×
1282
        openReq := h.prepareOpenChannel(srcNode, destNode, p)
×
1283
        respStream := srcNode.RPC.OpenChannel(openReq)
×
1284

×
1285
        // Receive an error to be sent from the stream.
×
1286
        _, err := h.receiveOpenChannelUpdate(respStream)
×
1287
        require.NotNil(h, err, "expected channel opening to fail")
×
1288

×
1289
        // Use string comparison here as we haven't codified all the RPC errors
×
1290
        // yet.
×
1291
        require.Containsf(h, err.Error(), expectedErr.Error(), "unexpected "+
×
1292
                "error returned, want %v, got %v", expectedErr, err)
×
1293
}
×
1294

1295
// CloseChannelAssertPending attempts to close the channel indicated by the
1296
// passed channel point, initiated by the passed node. Once the CloseChannel
1297
// rpc is called, it will consume one event and assert it's a close pending
1298
// event. In addition, it will check that the closing tx can be found in the
1299
// mempool.
1300
func (h *HarnessTest) CloseChannelAssertPending(hn *node.HarnessNode,
1301
        cp *lnrpc.ChannelPoint,
1302
        force bool) (rpc.CloseChanClient, chainhash.Hash) {
×
1303

×
1304
        // Calls the rpc to close the channel.
×
1305
        closeReq := &lnrpc.CloseChannelRequest{
×
1306
                ChannelPoint: cp,
×
1307
                Force:        force,
×
1308
                NoWait:       true,
×
1309
        }
×
1310

×
1311
        // For coop close, we use a default confg target of 6.
×
1312
        if !force {
×
1313
                closeReq.TargetConf = 6
×
1314
        }
×
1315

1316
        var (
×
1317
                stream rpc.CloseChanClient
×
1318
                event  *lnrpc.CloseStatusUpdate
×
1319
                err    error
×
1320
        )
×
1321

×
1322
        // Consume the "channel close" update in order to wait for the closing
×
1323
        // transaction to be broadcast, then wait for the closing tx to be seen
×
1324
        // within the network.
×
1325
        stream = hn.RPC.CloseChannel(closeReq)
×
1326
        _, err = h.ReceiveCloseChannelUpdate(stream)
×
1327
        require.NoError(h, err, "close channel update got error: %v", err)
×
1328

×
1329
        event, err = h.ReceiveCloseChannelUpdate(stream)
×
1330
        if err != nil {
×
1331
                h.Logf("Test: %s, close channel got error: %v",
×
1332
                        h.manager.currentTestCase, err)
×
1333
        }
×
1334
        require.NoError(h, err, "retry closing channel failed")
×
1335

×
1336
        pendingClose, ok := event.Update.(*lnrpc.CloseStatusUpdate_ClosePending)
×
1337
        require.Truef(h, ok, "expected channel close update, instead got %v",
×
1338
                pendingClose)
×
1339

×
1340
        closeTxid, err := chainhash.NewHash(pendingClose.ClosePending.Txid)
×
1341
        require.NoErrorf(h, err, "unable to decode closeTxid: %v",
×
1342
                pendingClose.ClosePending.Txid)
×
1343

×
1344
        // Assert the closing tx is in the mempool.
×
1345
        h.miner.AssertTxInMempool(*closeTxid)
×
1346

×
1347
        return stream, *closeTxid
×
1348
}
1349

1350
// CloseChannel attempts to coop close a non-anchored channel identified by the
1351
// passed channel point owned by the passed harness node. The following items
1352
// are asserted,
1353
//  1. a close pending event is sent from the close channel client.
1354
//  2. the closing tx is found in the mempool.
1355
//  3. the node reports the channel being waiting to close.
1356
//  4. a block is mined and the closing tx should be found in it.
1357
//  5. the node reports zero waiting close channels.
1358
//  6. the node receives a topology update regarding the channel close.
1359
func (h *HarnessTest) CloseChannel(hn *node.HarnessNode,
1360
        cp *lnrpc.ChannelPoint) chainhash.Hash {
×
1361

×
1362
        stream, _ := h.CloseChannelAssertPending(hn, cp, false)
×
1363

×
1364
        return h.AssertStreamChannelCoopClosed(hn, cp, false, stream)
×
1365
}
×
1366

1367
// ForceCloseChannel attempts to force close a non-anchored channel identified
1368
// by the passed channel point owned by the passed harness node. The following
1369
// items are asserted,
1370
//  1. a close pending event is sent from the close channel client.
1371
//  2. the closing tx is found in the mempool.
1372
//  3. the node reports the channel being waiting to close.
1373
//  4. a block is mined and the closing tx should be found in it.
1374
//  5. the node reports zero waiting close channels.
1375
//  6. the node receives a topology update regarding the channel close.
1376
//  7. mine DefaultCSV-1 blocks.
1377
//  8. the node reports zero pending force close channels.
1378
func (h *HarnessTest) ForceCloseChannel(hn *node.HarnessNode,
1379
        cp *lnrpc.ChannelPoint) chainhash.Hash {
×
1380

×
1381
        stream, _ := h.CloseChannelAssertPending(hn, cp, true)
×
1382

×
1383
        closingTxid := h.AssertStreamChannelForceClosed(hn, cp, false, stream)
×
1384

×
1385
        // Cleanup the force close.
×
1386
        h.CleanupForceClose(hn)
×
1387

×
1388
        return closingTxid
×
1389
}
×
1390

1391
// CloseChannelAssertErr closes the given channel and asserts an error
1392
// returned.
1393
func (h *HarnessTest) CloseChannelAssertErr(hn *node.HarnessNode,
1394
        cp *lnrpc.ChannelPoint, force bool) error {
×
1395

×
1396
        // Calls the rpc to close the channel.
×
1397
        closeReq := &lnrpc.CloseChannelRequest{
×
1398
                ChannelPoint: cp,
×
1399
                Force:        force,
×
1400
        }
×
1401
        stream := hn.RPC.CloseChannel(closeReq)
×
1402

×
1403
        // Consume the "channel close" update in order to wait for the closing
×
1404
        // transaction to be broadcast, then wait for the closing tx to be seen
×
1405
        // within the network.
×
1406
        _, err := h.ReceiveCloseChannelUpdate(stream)
×
1407
        require.Errorf(h, err, "%s: expect close channel to return an error",
×
1408
                hn.Name())
×
1409

×
1410
        return err
×
1411
}
×
1412

1413
// IsNeutrinoBackend returns a bool indicating whether the node is using a
1414
// neutrino as its backend. This is useful when we want to skip certain tests
1415
// which cannot be done with a neutrino backend.
1416
func (h *HarnessTest) IsNeutrinoBackend() bool {
×
1417
        return h.manager.chainBackend.Name() == NeutrinoBackendName
×
1418
}
×
1419

1420
// fundCoins attempts to send amt satoshis from the internal mining node to the
1421
// targeted lightning node. The confirmed boolean indicates whether the
1422
// transaction that pays to the target should confirm. For neutrino backend,
1423
// the `confirmed` param is ignored.
1424
func (h *HarnessTest) fundCoins(amt btcutil.Amount, target *node.HarnessNode,
1425
        addrType lnrpc.AddressType, confirmed bool) {
×
1426

×
1427
        initialBalance := target.RPC.WalletBalance()
×
1428

×
1429
        // First, obtain an address from the target lightning node, preferring
×
1430
        // to receive a p2wkh address s.t the output can immediately be used as
×
1431
        // an input to a funding transaction.
×
1432
        req := &lnrpc.NewAddressRequest{Type: addrType}
×
1433
        resp := target.RPC.NewAddress(req)
×
1434
        addr := h.DecodeAddress(resp.Address)
×
1435
        addrScript := h.PayToAddrScript(addr)
×
1436

×
1437
        // Generate a transaction which creates an output to the target
×
1438
        // pkScript of the desired amount.
×
1439
        output := &wire.TxOut{
×
1440
                PkScript: addrScript,
×
1441
                Value:    int64(amt),
×
1442
        }
×
1443
        h.miner.SendOutput(output, defaultMinerFeeRate)
×
1444

×
1445
        // Encode the pkScript in hex as this the format that it will be
×
1446
        // returned via rpc.
×
1447
        expPkScriptStr := hex.EncodeToString(addrScript)
×
1448

×
1449
        // Now, wait for ListUnspent to show the unconfirmed transaction
×
1450
        // containing the correct pkscript.
×
1451
        //
×
1452
        // Since neutrino doesn't support unconfirmed outputs, skip this check.
×
1453
        if !h.IsNeutrinoBackend() {
×
1454
                utxos := h.AssertNumUTXOsUnconfirmed(target, 1)
×
1455

×
1456
                // Assert that the lone unconfirmed utxo contains the same
×
1457
                // pkscript as the output generated above.
×
1458
                pkScriptStr := utxos[0].PkScript
×
1459
                require.Equal(h, pkScriptStr, expPkScriptStr,
×
1460
                        "pkscript mismatch")
×
1461

×
1462
                expectedBalance := btcutil.Amount(
×
1463
                        initialBalance.UnconfirmedBalance,
×
1464
                ) + amt
×
1465
                h.WaitForBalanceUnconfirmed(target, expectedBalance)
×
1466
        }
×
1467

1468
        // If the transaction should remain unconfirmed, then we'll wait until
1469
        // the target node's unconfirmed balance reflects the expected balance
1470
        // and exit.
1471
        if !confirmed {
×
1472
                return
×
1473
        }
×
1474

1475
        // Otherwise, we'll generate 1 new blocks to ensure the output gains a
1476
        // sufficient number of confirmations and wait for the balance to
1477
        // reflect what's expected.
1478
        h.MineBlocksAndAssertNumTxes(1, 1)
×
1479

×
1480
        expectedBalance := btcutil.Amount(initialBalance.ConfirmedBalance) + amt
×
1481
        h.WaitForBalanceConfirmed(target, expectedBalance)
×
1482
}
1483

1484
// FundCoins attempts to send amt satoshis from the internal mining node to the
1485
// targeted lightning node using a P2WKH address. 2 blocks are mined after in
1486
// order to confirm the transaction.
1487
func (h *HarnessTest) FundCoins(amt btcutil.Amount, hn *node.HarnessNode) {
×
1488
        h.fundCoins(amt, hn, lnrpc.AddressType_WITNESS_PUBKEY_HASH, true)
×
1489
}
×
1490

1491
// FundCoinsUnconfirmed attempts to send amt satoshis from the internal mining
1492
// node to the targeted lightning node using a P2WKH address. No blocks are
1493
// mined after and the UTXOs are unconfirmed.
1494
func (h *HarnessTest) FundCoinsUnconfirmed(amt btcutil.Amount,
1495
        hn *node.HarnessNode) {
×
1496

×
1497
        h.fundCoins(amt, hn, lnrpc.AddressType_WITNESS_PUBKEY_HASH, false)
×
1498
}
×
1499

1500
// FundCoinsNP2WKH attempts to send amt satoshis from the internal mining node
1501
// to the targeted lightning node using a NP2WKH address.
1502
func (h *HarnessTest) FundCoinsNP2WKH(amt btcutil.Amount,
1503
        target *node.HarnessNode) {
×
1504

×
1505
        h.fundCoins(amt, target, lnrpc.AddressType_NESTED_PUBKEY_HASH, true)
×
1506
}
×
1507

1508
// FundCoinsP2TR attempts to send amt satoshis from the internal mining node to
1509
// the targeted lightning node using a P2TR address.
1510
func (h *HarnessTest) FundCoinsP2TR(amt btcutil.Amount,
1511
        target *node.HarnessNode) {
×
1512

×
1513
        h.fundCoins(amt, target, lnrpc.AddressType_TAPROOT_PUBKEY, true)
×
1514
}
×
1515

1516
// completePaymentRequestsAssertStatus sends payments from a node to complete
1517
// all payment requests. This function does not return until all payments
1518
// have reached the specified status.
1519
func (h *HarnessTest) completePaymentRequestsAssertStatus(hn *node.HarnessNode,
1520
        paymentRequests []string, status lnrpc.Payment_PaymentStatus,
1521
        opts ...HarnessOpt) {
×
1522

×
1523
        payOpts := defaultHarnessOpts()
×
1524
        for _, opt := range opts {
×
1525
                opt(&payOpts)
×
1526
        }
×
1527

1528
        // Create a buffered chan to signal the results.
1529
        results := make(chan rpc.PaymentClient, len(paymentRequests))
×
1530

×
1531
        // send sends a payment and asserts if it doesn't succeeded.
×
1532
        send := func(payReq string) {
×
1533
                req := &routerrpc.SendPaymentRequest{
×
1534
                        PaymentRequest: payReq,
×
1535
                        TimeoutSeconds: int32(wait.PaymentTimeout.Seconds()),
×
1536
                        FeeLimitMsat:   noFeeLimitMsat,
×
1537
                        Amp:            payOpts.useAMP,
×
1538
                }
×
1539
                stream := hn.RPC.SendPayment(req)
×
1540

×
1541
                // Signal sent succeeded.
×
1542
                results <- stream
×
1543
        }
×
1544

1545
        // Launch all payments simultaneously.
1546
        for _, payReq := range paymentRequests {
×
1547
                payReqCopy := payReq
×
1548
                go send(payReqCopy)
×
1549
        }
×
1550

1551
        // Wait for all payments to report the expected status.
1552
        timer := time.After(wait.PaymentTimeout)
×
1553
        select {
×
1554
        case stream := <-results:
×
1555
                h.AssertPaymentStatusFromStream(stream, status)
×
1556

1557
        case <-timer:
×
1558
                require.Fail(h, "timeout", "waiting payment results timeout")
×
1559
        }
1560
}
1561

1562
// CompletePaymentRequests sends payments from a node to complete all payment
1563
// requests. This function does not return until all payments successfully
1564
// complete without errors.
1565
func (h *HarnessTest) CompletePaymentRequests(hn *node.HarnessNode,
1566
        paymentRequests []string, opts ...HarnessOpt) {
×
1567

×
1568
        h.completePaymentRequestsAssertStatus(
×
1569
                hn, paymentRequests, lnrpc.Payment_SUCCEEDED, opts...,
×
1570
        )
×
1571
}
×
1572

1573
// CompletePaymentRequestsNoWait sends payments from a node to complete all
1574
// payment requests without waiting for the results. Instead, it checks the
1575
// number of updates in the specified channel has increased.
1576
func (h *HarnessTest) CompletePaymentRequestsNoWait(hn *node.HarnessNode,
1577
        paymentRequests []string, chanPoint *lnrpc.ChannelPoint) {
×
1578

×
1579
        // We start by getting the current state of the client's channels. This
×
1580
        // is needed to ensure the payments actually have been committed before
×
1581
        // we return.
×
1582
        oldResp := h.GetChannelByChanPoint(hn, chanPoint)
×
1583

×
1584
        // Send payments and assert they are in-flight.
×
1585
        h.completePaymentRequestsAssertStatus(
×
1586
                hn, paymentRequests, lnrpc.Payment_IN_FLIGHT,
×
1587
        )
×
1588

×
1589
        // We are not waiting for feedback in the form of a response, but we
×
1590
        // should still wait long enough for the server to receive and handle
×
1591
        // the send before cancelling the request. We wait for the number of
×
1592
        // updates to one of our channels has increased before we return.
×
1593
        err := wait.NoError(func() error {
×
1594
                newResp := h.GetChannelByChanPoint(hn, chanPoint)
×
1595

×
1596
                // If this channel has an increased number of updates, we
×
1597
                // assume the payments are committed, and we can return.
×
1598
                if newResp.NumUpdates > oldResp.NumUpdates {
×
1599
                        return nil
×
1600
                }
×
1601

1602
                // Otherwise return an error as the NumUpdates are not
1603
                // increased.
1604
                return fmt.Errorf("%s: channel:%v not updated after sending "+
×
1605
                        "payments, old updates: %v, new updates: %v", hn.Name(),
×
1606
                        chanPoint, oldResp.NumUpdates, newResp.NumUpdates)
×
1607
        }, DefaultTimeout)
1608
        require.NoError(h, err, "timeout while checking for channel updates")
×
1609
}
1610

1611
// OpenChannelPsbt attempts to open a channel between srcNode and destNode with
1612
// the passed channel funding parameters. It will assert if the expected step
1613
// of funding the PSBT is not received from the source node.
1614
func (h *HarnessTest) OpenChannelPsbt(srcNode, destNode *node.HarnessNode,
1615
        p OpenChannelParams) (rpc.OpenChanClient, []byte) {
×
1616

×
1617
        // Wait until srcNode and destNode have the latest chain synced.
×
1618
        // Otherwise, we may run into a check within the funding manager that
×
1619
        // prevents any funding workflows from being kicked off if the chain
×
1620
        // isn't yet synced.
×
1621
        h.WaitForBlockchainSync(srcNode)
×
1622
        h.WaitForBlockchainSync(destNode)
×
1623

×
1624
        // Send the request to open a channel to the source node now. This will
×
1625
        // open a long-lived stream where we'll receive status updates about
×
1626
        // the progress of the channel.
×
1627
        // respStream := h.OpenChannelStreamAndAssert(srcNode, destNode, p)
×
1628
        req := &lnrpc.OpenChannelRequest{
×
1629
                NodePubkey:         destNode.PubKey[:],
×
1630
                LocalFundingAmount: int64(p.Amt),
×
1631
                PushSat:            int64(p.PushAmt),
×
1632
                Private:            p.Private,
×
1633
                SpendUnconfirmed:   p.SpendUnconfirmed,
×
1634
                MinHtlcMsat:        int64(p.MinHtlc),
×
1635
                FundingShim:        p.FundingShim,
×
1636
                CommitmentType:     p.CommitmentType,
×
1637
        }
×
1638
        respStream := srcNode.RPC.OpenChannel(req)
×
1639

×
1640
        // Consume the "PSBT funding ready" update. This waits until the node
×
1641
        // notifies us that the PSBT can now be funded.
×
1642
        resp := h.ReceiveOpenChannelUpdate(respStream)
×
1643
        upd, ok := resp.Update.(*lnrpc.OpenStatusUpdate_PsbtFund)
×
1644
        require.Truef(h, ok, "expected PSBT funding update, got %v", resp)
×
1645

×
1646
        // Make sure the channel funding address has the correct type for the
×
1647
        // given commitment type.
×
1648
        fundingAddr, err := btcutil.DecodeAddress(
×
1649
                upd.PsbtFund.FundingAddress, miner.HarnessNetParams,
×
1650
        )
×
1651
        require.NoError(h, err)
×
1652

×
1653
        switch p.CommitmentType {
×
1654
        case lnrpc.CommitmentType_SIMPLE_TAPROOT:
×
1655
                require.IsType(h, &btcutil.AddressTaproot{}, fundingAddr)
×
1656

1657
        default:
×
1658
                require.IsType(
×
1659
                        h, &btcutil.AddressWitnessScriptHash{}, fundingAddr,
×
1660
                )
×
1661
        }
1662

1663
        return respStream, upd.PsbtFund.Psbt
×
1664
}
1665

1666
// CleanupForceClose mines blocks to clean up the force close process. This is
1667
// used for tests that are not asserting the expected behavior is found during
1668
// the force close process, e.g., num of sweeps, etc. Instead, it provides a
1669
// shortcut to move the test forward with a clean mempool.
1670
func (h *HarnessTest) CleanupForceClose(hn *node.HarnessNode) {
×
1671
        // Wait for the channel to be marked pending force close.
×
1672
        h.AssertNumPendingForceClose(hn, 1)
×
1673

×
1674
        // Mine blocks to get any second level HTLC resolved. If there are no
×
1675
        // HTLCs, this will behave like h.AssertNumPendingCloseChannels.
×
1676
        h.mineTillForceCloseResolved(hn)
×
1677
}
×
1678

1679
// CreatePayReqs is a helper method that will create a slice of payment
1680
// requests for the given node.
1681
func (h *HarnessTest) CreatePayReqs(hn *node.HarnessNode,
1682
        paymentAmt btcutil.Amount, numInvoices int,
1683
        routeHints ...*lnrpc.RouteHint) ([]string, [][]byte, []*lnrpc.Invoice) {
×
1684

×
1685
        payReqs := make([]string, numInvoices)
×
1686
        rHashes := make([][]byte, numInvoices)
×
1687
        invoices := make([]*lnrpc.Invoice, numInvoices)
×
1688
        for i := 0; i < numInvoices; i++ {
×
1689
                preimage := h.Random32Bytes()
×
1690

×
1691
                invoice := &lnrpc.Invoice{
×
1692
                        Memo:       "testing",
×
1693
                        RPreimage:  preimage,
×
1694
                        Value:      int64(paymentAmt),
×
1695
                        RouteHints: routeHints,
×
1696
                }
×
1697
                resp := hn.RPC.AddInvoice(invoice)
×
1698

×
1699
                // Set the payment address in the invoice so the caller can
×
1700
                // properly use it.
×
1701
                invoice.PaymentAddr = resp.PaymentAddr
×
1702

×
1703
                payReqs[i] = resp.PaymentRequest
×
1704
                rHashes[i] = resp.RHash
×
1705
                invoices[i] = invoice
×
1706
        }
×
1707

1708
        return payReqs, rHashes, invoices
×
1709
}
1710

1711
// BackupDB creates a backup of the current database. It will stop the node
1712
// first, copy the database files, and restart the node.
1713
func (h *HarnessTest) BackupDB(hn *node.HarnessNode) {
×
1714
        restart := h.SuspendNode(hn)
×
1715

×
1716
        err := hn.BackupDB()
×
1717
        require.NoErrorf(h, err, "%s: failed to backup db", hn.Name())
×
1718

×
1719
        err = restart()
×
1720
        require.NoErrorf(h, err, "%s: failed to restart", hn.Name())
×
1721
}
×
1722

1723
// RestartNodeAndRestoreDB restarts a given node with a callback to restore the
1724
// db.
1725
func (h *HarnessTest) RestartNodeAndRestoreDB(hn *node.HarnessNode) {
×
1726
        cb := func() error { return hn.RestoreDB() }
×
1727
        err := h.manager.restartNode(h.runCtx, hn, cb)
×
1728
        require.NoErrorf(h, err, "failed to restart node %s", hn.Name())
×
1729

×
1730
        err = h.manager.unlockNode(hn)
×
1731
        require.NoErrorf(h, err, "failed to unlock node %s", hn.Name())
×
1732

×
1733
        // Give the node some time to catch up with the chain before we
×
1734
        // continue with the tests.
×
1735
        h.WaitForBlockchainSync(hn)
×
1736
}
1737

1738
// CleanShutDown is used to quickly end a test by shutting down all non-standby
1739
// nodes and mining blocks to empty the mempool.
1740
//
1741
// NOTE: this method provides a faster exit for a test that involves force
1742
// closures as the caller doesn't need to mine all the blocks to make sure the
1743
// mempool is empty.
1744
func (h *HarnessTest) CleanShutDown() {
×
1745
        // First, shutdown all non-standby nodes to prevent new transactions
×
1746
        // being created and fed into the mempool.
×
1747
        h.shutdownNonStandbyNodes()
×
1748

×
1749
        // Now mine blocks till the mempool is empty.
×
1750
        h.cleanMempool()
×
1751
}
×
1752

1753
// QueryChannelByChanPoint tries to find a channel matching the channel point
1754
// and asserts. It returns the channel found.
1755
func (h *HarnessTest) QueryChannelByChanPoint(hn *node.HarnessNode,
1756
        chanPoint *lnrpc.ChannelPoint,
1757
        opts ...ListChannelOption) *lnrpc.Channel {
×
1758

×
1759
        channel, err := h.findChannel(hn, chanPoint, opts...)
×
1760
        require.NoError(h, err, "failed to query channel")
×
1761

×
1762
        return channel
×
1763
}
×
1764

1765
// SendPaymentAndAssertStatus sends a payment from the passed node and asserts
1766
// the desired status is reached.
1767
func (h *HarnessTest) SendPaymentAndAssertStatus(hn *node.HarnessNode,
1768
        req *routerrpc.SendPaymentRequest,
1769
        status lnrpc.Payment_PaymentStatus) *lnrpc.Payment {
×
1770

×
1771
        stream := hn.RPC.SendPayment(req)
×
1772
        return h.AssertPaymentStatusFromStream(stream, status)
×
1773
}
×
1774

1775
// SendPaymentAssertFail sends a payment from the passed node and asserts the
1776
// payment is failed with the specified failure reason .
1777
func (h *HarnessTest) SendPaymentAssertFail(hn *node.HarnessNode,
1778
        req *routerrpc.SendPaymentRequest,
1779
        reason lnrpc.PaymentFailureReason) *lnrpc.Payment {
×
1780

×
1781
        payment := h.SendPaymentAndAssertStatus(hn, req, lnrpc.Payment_FAILED)
×
1782
        require.Equal(h, reason, payment.FailureReason,
×
1783
                "payment failureReason not matched")
×
1784

×
1785
        return payment
×
1786
}
×
1787

1788
// SendPaymentAssertSettled sends a payment from the passed node and asserts the
1789
// payment is settled.
1790
func (h *HarnessTest) SendPaymentAssertSettled(hn *node.HarnessNode,
1791
        req *routerrpc.SendPaymentRequest) *lnrpc.Payment {
×
1792

×
1793
        return h.SendPaymentAndAssertStatus(hn, req, lnrpc.Payment_SUCCEEDED)
×
1794
}
×
1795

1796
// SendPaymentAssertInflight sends a payment from the passed node and asserts
1797
// the payment is inflight.
1798
func (h *HarnessTest) SendPaymentAssertInflight(hn *node.HarnessNode,
1799
        req *routerrpc.SendPaymentRequest) *lnrpc.Payment {
×
1800

×
1801
        return h.SendPaymentAndAssertStatus(hn, req, lnrpc.Payment_IN_FLIGHT)
×
1802
}
×
1803

1804
// OpenChannelRequest is used to open a channel using the method
1805
// OpenMultiChannelsAsync.
1806
type OpenChannelRequest struct {
1807
        // Local is the funding node.
1808
        Local *node.HarnessNode
1809

1810
        // Remote is the receiving node.
1811
        Remote *node.HarnessNode
1812

1813
        // Param is the open channel params.
1814
        Param OpenChannelParams
1815

1816
        // stream is the client created after calling OpenChannel RPC.
1817
        stream rpc.OpenChanClient
1818

1819
        // result is a channel used to send the channel point once the funding
1820
        // has succeeded.
1821
        result chan *lnrpc.ChannelPoint
1822
}
1823

1824
// OpenMultiChannelsAsync takes a list of OpenChannelRequest and opens them in
1825
// batch. The channel points are returned in same the order of the requests
1826
// once all of the channel open succeeded.
1827
//
1828
// NOTE: compared to open multiple channel sequentially, this method will be
1829
// faster as it doesn't need to mine 6 blocks for each channel open. However,
1830
// it does make debugging the logs more difficult as messages are intertwined.
1831
func (h *HarnessTest) OpenMultiChannelsAsync(
1832
        reqs []*OpenChannelRequest) []*lnrpc.ChannelPoint {
×
1833

×
1834
        // openChannel opens a channel based on the request.
×
1835
        openChannel := func(req *OpenChannelRequest) {
×
1836
                stream := h.OpenChannelAssertStream(
×
1837
                        req.Local, req.Remote, req.Param,
×
1838
                )
×
1839
                req.stream = stream
×
1840
        }
×
1841

1842
        // assertChannelOpen is a helper closure that asserts a channel is
1843
        // open.
1844
        assertChannelOpen := func(req *OpenChannelRequest) {
×
1845
                // Wait for the channel open event from the stream.
×
1846
                cp := h.WaitForChannelOpenEvent(req.stream)
×
1847

×
1848
                if !req.Param.Private {
×
1849
                        // Check that both alice and bob have seen the channel
×
1850
                        // from their channel watch request.
×
1851
                        h.AssertChannelInGraph(req.Local, cp)
×
1852
                        h.AssertChannelInGraph(req.Remote, cp)
×
1853
                }
×
1854

1855
                // Finally, check that the channel can be seen in their
1856
                // ListChannels.
1857
                h.AssertChannelExists(req.Local, cp)
×
1858
                h.AssertChannelExists(req.Remote, cp)
×
1859

×
1860
                req.result <- cp
×
1861
        }
1862

1863
        // Go through the requests and make the OpenChannel RPC call.
1864
        for _, r := range reqs {
×
1865
                openChannel(r)
×
1866
        }
×
1867

1868
        // Mine one block to confirm all the funding transactions.
1869
        h.MineBlocksAndAssertNumTxes(1, len(reqs))
×
1870

×
1871
        // Mine 5 more blocks so all the public channels are announced to the
×
1872
        // network.
×
1873
        h.MineBlocks(numBlocksOpenChannel - 1)
×
1874

×
1875
        // Once the blocks are mined, we fire goroutines for each of the
×
1876
        // request to watch for the channel openning.
×
1877
        for _, r := range reqs {
×
1878
                r.result = make(chan *lnrpc.ChannelPoint, 1)
×
1879
                go assertChannelOpen(r)
×
1880
        }
×
1881

1882
        // Finally, collect the results.
1883
        channelPoints := make([]*lnrpc.ChannelPoint, 0)
×
1884
        for _, r := range reqs {
×
1885
                select {
×
1886
                case cp := <-r.result:
×
1887
                        channelPoints = append(channelPoints, cp)
×
1888

1889
                case <-time.After(wait.ChannelOpenTimeout):
×
1890
                        require.Failf(h, "timeout", "wait channel point "+
×
1891
                                "timeout for channel %s=>%s", r.Local.Name(),
×
1892
                                r.Remote.Name())
×
1893
                }
1894
        }
1895

1896
        // Assert that we have the expected num of channel points.
1897
        require.Len(h, channelPoints, len(reqs),
×
1898
                "returned channel points not match")
×
1899

×
1900
        return channelPoints
×
1901
}
1902

1903
// ReceiveInvoiceUpdate waits until a message is received on the subscribe
1904
// invoice stream or the timeout is reached.
1905
func (h *HarnessTest) ReceiveInvoiceUpdate(
1906
        stream rpc.InvoiceUpdateClient) *lnrpc.Invoice {
×
1907

×
1908
        chanMsg := make(chan *lnrpc.Invoice)
×
1909
        errChan := make(chan error)
×
1910
        go func() {
×
1911
                // Consume one message. This will block until the message is
×
1912
                // received.
×
1913
                resp, err := stream.Recv()
×
1914
                if err != nil {
×
1915
                        errChan <- err
×
1916
                        return
×
1917
                }
×
1918
                chanMsg <- resp
×
1919
        }()
1920

1921
        select {
×
1922
        case <-time.After(DefaultTimeout):
×
1923
                require.Fail(h, "timeout", "timeout receiving invoice update")
×
1924

1925
        case err := <-errChan:
×
1926
                require.Failf(h, "err from stream",
×
1927
                        "received err from stream: %v", err)
×
1928

1929
        case updateMsg := <-chanMsg:
×
1930
                return updateMsg
×
1931
        }
1932

1933
        return nil
×
1934
}
1935

1936
// CalculateTxFee retrieves parent transactions and reconstructs the fee paid.
1937
func (h *HarnessTest) CalculateTxFee(tx *wire.MsgTx) btcutil.Amount {
×
1938
        var balance btcutil.Amount
×
1939
        for _, in := range tx.TxIn {
×
1940
                parentHash := in.PreviousOutPoint.Hash
×
1941
                rawTx := h.miner.GetRawTransaction(parentHash)
×
1942
                parent := rawTx.MsgTx()
×
1943
                value := parent.TxOut[in.PreviousOutPoint.Index].Value
×
1944

×
1945
                balance += btcutil.Amount(value)
×
1946
        }
×
1947

1948
        for _, out := range tx.TxOut {
×
1949
                balance -= btcutil.Amount(out.Value)
×
1950
        }
×
1951

1952
        return balance
×
1953
}
1954

1955
// CalculateTxWeight calculates the weight for a given tx.
1956
//
1957
// TODO(yy): use weight estimator to get more accurate result.
1958
func (h *HarnessTest) CalculateTxWeight(tx *wire.MsgTx) lntypes.WeightUnit {
×
1959
        utx := btcutil.NewTx(tx)
×
1960
        return lntypes.WeightUnit(blockchain.GetTransactionWeight(utx))
×
1961
}
×
1962

1963
// CalculateTxFeeRate calculates the fee rate for a given tx.
1964
func (h *HarnessTest) CalculateTxFeeRate(
1965
        tx *wire.MsgTx) chainfee.SatPerKWeight {
×
1966

×
1967
        w := h.CalculateTxWeight(tx)
×
1968
        fee := h.CalculateTxFee(tx)
×
1969

×
1970
        return chainfee.NewSatPerKWeight(fee, w)
×
1971
}
×
1972

1973
// CalculateTxesFeeRate takes a list of transactions and estimates the fee rate
1974
// used to sweep them.
1975
//
1976
// NOTE: only used in current test file.
1977
func (h *HarnessTest) CalculateTxesFeeRate(txns []*wire.MsgTx) int64 {
×
1978
        const scale = 1000
×
1979

×
1980
        var totalWeight, totalFee int64
×
1981
        for _, tx := range txns {
×
1982
                utx := btcutil.NewTx(tx)
×
1983
                totalWeight += blockchain.GetTransactionWeight(utx)
×
1984

×
1985
                fee := h.CalculateTxFee(tx)
×
1986
                totalFee += int64(fee)
×
1987
        }
×
1988
        feeRate := totalFee * scale / totalWeight
×
1989

×
1990
        return feeRate
×
1991
}
1992

1993
// AssertSweepFound looks up a sweep in a nodes list of broadcast sweeps and
1994
// asserts it's found.
1995
//
1996
// NOTE: Does not account for node's internal state.
1997
func (h *HarnessTest) AssertSweepFound(hn *node.HarnessNode,
1998
        sweep string, verbose bool, startHeight int32) {
×
1999

×
2000
        err := wait.NoError(func() error {
×
2001
                // List all sweeps that alice's node had broadcast.
×
2002
                sweepResp := hn.RPC.ListSweeps(verbose, startHeight)
×
2003

×
2004
                var found bool
×
2005
                if verbose {
×
2006
                        found = findSweepInDetails(h, sweep, sweepResp)
×
2007
                } else {
×
2008
                        found = findSweepInTxids(h, sweep, sweepResp)
×
2009
                }
×
2010

2011
                if found {
×
2012
                        return nil
×
2013
                }
×
2014

2015
                return fmt.Errorf("sweep tx %v not found", sweep)
×
2016
        }, wait.DefaultTimeout)
2017
        require.NoError(h, err, "%s: timeout checking sweep tx", hn.Name())
×
2018
}
2019

2020
func findSweepInTxids(ht *HarnessTest, sweepTxid string,
2021
        sweepResp *walletrpc.ListSweepsResponse) bool {
×
2022

×
2023
        sweepTxIDs := sweepResp.GetTransactionIds()
×
2024
        require.NotNil(ht, sweepTxIDs, "expected transaction ids")
×
2025
        require.Nil(ht, sweepResp.GetTransactionDetails())
×
2026

×
2027
        // Check that the sweep tx we have just produced is present.
×
2028
        for _, tx := range sweepTxIDs.TransactionIds {
×
2029
                if tx == sweepTxid {
×
2030
                        return true
×
2031
                }
×
2032
        }
2033

2034
        return false
×
2035
}
2036

2037
func findSweepInDetails(ht *HarnessTest, sweepTxid string,
2038
        sweepResp *walletrpc.ListSweepsResponse) bool {
×
2039

×
2040
        sweepDetails := sweepResp.GetTransactionDetails()
×
2041
        require.NotNil(ht, sweepDetails, "expected transaction details")
×
2042
        require.Nil(ht, sweepResp.GetTransactionIds())
×
2043

×
2044
        for _, tx := range sweepDetails.Transactions {
×
2045
                if tx.TxHash == sweepTxid {
×
2046
                        return true
×
2047
                }
×
2048
        }
2049

2050
        return false
×
2051
}
2052

2053
// QueryRoutesAndRetry attempts to keep querying a route until timeout is
2054
// reached.
2055
//
2056
// NOTE: when a channel is opened, we may need to query multiple times to get
2057
// it in our QueryRoutes RPC. This happens even after we check the channel is
2058
// heard by the node using ht.AssertChannelOpen. Deep down, this is because our
2059
// GraphTopologySubscription and QueryRoutes give different results regarding a
2060
// specific channel, with the formal reporting it being open while the latter
2061
// not, resulting GraphTopologySubscription acting "faster" than QueryRoutes.
2062
// TODO(yy): make sure related subsystems share the same view on a given
2063
// channel.
2064
func (h *HarnessTest) QueryRoutesAndRetry(hn *node.HarnessNode,
2065
        req *lnrpc.QueryRoutesRequest) *lnrpc.QueryRoutesResponse {
×
2066

×
2067
        var routes *lnrpc.QueryRoutesResponse
×
2068
        err := wait.NoError(func() error {
×
2069
                ctxt, cancel := context.WithCancel(h.runCtx)
×
2070
                defer cancel()
×
2071

×
2072
                resp, err := hn.RPC.LN.QueryRoutes(ctxt, req)
×
2073
                if err != nil {
×
2074
                        return fmt.Errorf("%s: failed to query route: %w",
×
2075
                                hn.Name(), err)
×
2076
                }
×
2077

2078
                routes = resp
×
2079

×
2080
                return nil
×
2081
        }, DefaultTimeout)
2082

2083
        require.NoError(h, err, "timeout querying routes")
×
2084

×
2085
        return routes
×
2086
}
2087

2088
// ReceiveHtlcInterceptor waits until a message is received on the htlc
2089
// interceptor stream or the timeout is reached.
2090
func (h *HarnessTest) ReceiveHtlcInterceptor(
2091
        stream rpc.InterceptorClient) *routerrpc.ForwardHtlcInterceptRequest {
×
2092

×
2093
        chanMsg := make(chan *routerrpc.ForwardHtlcInterceptRequest)
×
2094
        errChan := make(chan error)
×
2095
        go func() {
×
2096
                // Consume one message. This will block until the message is
×
2097
                // received.
×
2098
                resp, err := stream.Recv()
×
2099
                if err != nil {
×
2100
                        errChan <- err
×
2101
                        return
×
2102
                }
×
2103
                chanMsg <- resp
×
2104
        }()
2105

2106
        select {
×
2107
        case <-time.After(DefaultTimeout):
×
2108
                require.Fail(h, "timeout", "timeout intercepting htlc")
×
2109

2110
        case err := <-errChan:
×
2111
                require.Failf(h, "err from HTLC interceptor stream",
×
2112
                        "received err from HTLC interceptor stream: %v", err)
×
2113

2114
        case updateMsg := <-chanMsg:
×
2115
                return updateMsg
×
2116
        }
2117

2118
        return nil
×
2119
}
2120

2121
// ReceiveInvoiceHtlcModification waits until a message is received on the
2122
// invoice HTLC modifier stream or the timeout is reached.
2123
func (h *HarnessTest) ReceiveInvoiceHtlcModification(
2124
        stream rpc.InvoiceHtlcModifierClient) *invoicesrpc.HtlcModifyRequest {
×
2125

×
2126
        chanMsg := make(chan *invoicesrpc.HtlcModifyRequest)
×
2127
        errChan := make(chan error)
×
2128
        go func() {
×
2129
                // Consume one message. This will block until the message is
×
2130
                // received.
×
2131
                resp, err := stream.Recv()
×
2132
                if err != nil {
×
2133
                        errChan <- err
×
2134
                        return
×
2135
                }
×
2136
                chanMsg <- resp
×
2137
        }()
2138

2139
        select {
×
2140
        case <-time.After(DefaultTimeout):
×
2141
                require.Fail(h, "timeout", "timeout invoice HTLC modifier")
×
2142

2143
        case err := <-errChan:
×
2144
                require.Failf(h, "err from invoice HTLC modifier stream",
×
2145
                        "received err from invoice HTLC modifier stream: %v",
×
2146
                        err)
×
2147

2148
        case updateMsg := <-chanMsg:
×
2149
                return updateMsg
×
2150
        }
2151

2152
        return nil
×
2153
}
2154

2155
// ReceiveChannelEvent waits until a message is received from the
2156
// ChannelEventsClient stream or the timeout is reached.
2157
func (h *HarnessTest) ReceiveChannelEvent(
2158
        stream rpc.ChannelEventsClient) *lnrpc.ChannelEventUpdate {
×
2159

×
2160
        chanMsg := make(chan *lnrpc.ChannelEventUpdate)
×
2161
        errChan := make(chan error)
×
2162
        go func() {
×
2163
                // Consume one message. This will block until the message is
×
2164
                // received.
×
2165
                resp, err := stream.Recv()
×
2166
                if err != nil {
×
2167
                        errChan <- err
×
2168
                        return
×
2169
                }
×
2170
                chanMsg <- resp
×
2171
        }()
2172

2173
        select {
×
2174
        case <-time.After(DefaultTimeout):
×
2175
                require.Fail(h, "timeout", "timeout intercepting htlc")
×
2176

2177
        case err := <-errChan:
×
2178
                require.Failf(h, "err from stream",
×
2179
                        "received err from stream: %v", err)
×
2180

2181
        case updateMsg := <-chanMsg:
×
2182
                return updateMsg
×
2183
        }
2184

2185
        return nil
×
2186
}
2187

2188
// GetOutputIndex returns the output index of the given address in the given
2189
// transaction.
2190
func (h *HarnessTest) GetOutputIndex(txid chainhash.Hash, addr string) int {
×
2191
        // We'll then extract the raw transaction from the mempool in order to
×
2192
        // determine the index of the p2tr output.
×
2193
        tx := h.miner.GetRawTransaction(txid)
×
2194

×
2195
        p2trOutputIndex := -1
×
2196
        for i, txOut := range tx.MsgTx().TxOut {
×
2197
                _, addrs, _, err := txscript.ExtractPkScriptAddrs(
×
2198
                        txOut.PkScript, h.miner.ActiveNet,
×
2199
                )
×
2200
                require.NoError(h, err)
×
2201

×
2202
                if addrs[0].String() == addr {
×
2203
                        p2trOutputIndex = i
×
2204
                }
×
2205
        }
2206
        require.Greater(h, p2trOutputIndex, -1)
×
2207

×
2208
        return p2trOutputIndex
×
2209
}
2210

2211
// SendCoins sends a coin from node A to node B with the given amount, returns
2212
// the sending tx.
2213
func (h *HarnessTest) SendCoins(a, b *node.HarnessNode,
2214
        amt btcutil.Amount) *wire.MsgTx {
×
2215

×
2216
        // Create an address for Bob receive the coins.
×
2217
        req := &lnrpc.NewAddressRequest{
×
2218
                Type: lnrpc.AddressType_TAPROOT_PUBKEY,
×
2219
        }
×
2220
        resp := b.RPC.NewAddress(req)
×
2221

×
2222
        // Send the coins from Alice to Bob. We should expect a tx to be
×
2223
        // broadcast and seen in the mempool.
×
2224
        sendReq := &lnrpc.SendCoinsRequest{
×
2225
                Addr:       resp.Address,
×
2226
                Amount:     int64(amt),
×
2227
                TargetConf: 6,
×
2228
        }
×
2229
        a.RPC.SendCoins(sendReq)
×
2230
        tx := h.GetNumTxsFromMempool(1)[0]
×
2231

×
2232
        return tx
×
2233
}
×
2234

2235
// CreateSimpleNetwork creates the number of nodes specified by the number of
2236
// configs and makes a topology of `node1 -> node2 -> node3...`. Each node is
2237
// created using the specified config, the neighbors are connected, and the
2238
// channels are opened. Each node will be funded with a single UTXO of 1 BTC
2239
// except the last one.
2240
//
2241
// For instance, to create a network with 2 nodes that share the same node
2242
// config,
2243
//
2244
//        cfg := []string{"--protocol.anchors"}
2245
//        cfgs := [][]string{cfg, cfg}
2246
//        params := OpenChannelParams{...}
2247
//        chanPoints, nodes := ht.CreateSimpleNetwork(cfgs, params)
2248
//
2249
// This will create two nodes and open an anchor channel between them.
2250
func (h *HarnessTest) CreateSimpleNetwork(nodeCfgs [][]string,
2251
        p OpenChannelParams) ([]*lnrpc.ChannelPoint, []*node.HarnessNode) {
×
2252

×
2253
        // Create new nodes.
×
2254
        nodes := h.createNodes(nodeCfgs)
×
2255

×
2256
        var resp []*lnrpc.ChannelPoint
×
2257

×
2258
        // Open zero-conf channels if specified.
×
2259
        if p.ZeroConf {
×
2260
                resp = h.openZeroConfChannelsForNodes(nodes, p)
×
2261
        } else {
×
2262
                // Open channels between the nodes.
×
2263
                resp = h.openChannelsForNodes(nodes, p)
×
2264
        }
×
2265

2266
        return resp, nodes
×
2267
}
2268

2269
// acceptChannel is used to accept a single channel that comes across. This
2270
// should be run in a goroutine and is used to test nodes with the zero-conf
2271
// feature bit.
2272
func acceptChannel(t *testing.T, zeroConf bool, stream rpc.AcceptorClient) {
×
2273
        req, err := stream.Recv()
×
2274
        require.NoError(t, err)
×
2275

×
2276
        resp := &lnrpc.ChannelAcceptResponse{
×
2277
                Accept:        true,
×
2278
                PendingChanId: req.PendingChanId,
×
2279
                ZeroConf:      zeroConf,
×
2280
        }
×
2281
        err = stream.Send(resp)
×
2282
        require.NoError(t, err)
×
2283
}
×
2284

2285
// createNodes creates the number of nodes specified by the number of configs.
2286
// Each node is created using the specified config, the neighbors are
2287
// connected.
2288
func (h *HarnessTest) createNodes(nodeCfgs [][]string) []*node.HarnessNode {
×
2289
        // Get the number of nodes.
×
2290
        numNodes := len(nodeCfgs)
×
2291

×
2292
        // Make a slice of nodes.
×
2293
        nodes := make([]*node.HarnessNode, numNodes)
×
2294

×
2295
        // Create new nodes.
×
2296
        for i, nodeCfg := range nodeCfgs {
×
2297
                nodeName := fmt.Sprintf("Node%q", string(rune('A'+i)))
×
2298
                n := h.NewNode(nodeName, nodeCfg)
×
2299
                nodes[i] = n
×
2300
        }
×
2301

2302
        // Connect the nodes in a chain.
2303
        for i := 1; i < len(nodes); i++ {
×
2304
                nodeA := nodes[i-1]
×
2305
                nodeB := nodes[i]
×
2306
                h.EnsureConnected(nodeA, nodeB)
×
2307
        }
×
2308

2309
        // Fund all the nodes expect the last one.
2310
        for i := 0; i < len(nodes)-1; i++ {
×
2311
                node := nodes[i]
×
2312
                h.FundCoinsUnconfirmed(btcutil.SatoshiPerBitcoin, node)
×
2313
        }
×
2314

2315
        // Mine 1 block to get the above coins confirmed.
2316
        h.MineBlocksAndAssertNumTxes(1, numNodes-1)
×
2317

×
2318
        return nodes
×
2319
}
2320

2321
// openChannelsForNodes takes a list of nodes and makes a topology of `node1 ->
2322
// node2 -> node3...`.
2323
func (h *HarnessTest) openChannelsForNodes(nodes []*node.HarnessNode,
2324
        p OpenChannelParams) []*lnrpc.ChannelPoint {
×
2325

×
2326
        // Sanity check the params.
×
2327
        require.Greater(h, len(nodes), 1, "need at least 2 nodes")
×
2328

×
2329
        // Open channels in batch to save blocks mined.
×
2330
        reqs := make([]*OpenChannelRequest, 0, len(nodes)-1)
×
2331
        for i := 0; i < len(nodes)-1; i++ {
×
2332
                nodeA := nodes[i]
×
2333
                nodeB := nodes[i+1]
×
2334

×
2335
                req := &OpenChannelRequest{
×
2336
                        Local:  nodeA,
×
2337
                        Remote: nodeB,
×
2338
                        Param:  p,
×
2339
                }
×
2340
                reqs = append(reqs, req)
×
2341
        }
×
2342
        resp := h.OpenMultiChannelsAsync(reqs)
×
2343

×
2344
        // Make sure the nodes know each other's channels if they are public.
×
2345
        if !p.Private {
×
2346
                for _, node := range nodes {
×
2347
                        for _, chanPoint := range resp {
×
2348
                                h.AssertChannelInGraph(node, chanPoint)
×
2349
                        }
×
2350
                }
2351
        }
2352

2353
        return resp
×
2354
}
2355

2356
// openZeroConfChannelsForNodes takes a list of nodes and makes a topology of
2357
// `node1 -> node2 -> node3...` with zero-conf channels.
2358
func (h *HarnessTest) openZeroConfChannelsForNodes(nodes []*node.HarnessNode,
2359
        p OpenChannelParams) []*lnrpc.ChannelPoint {
×
2360

×
2361
        // Sanity check the params.
×
2362
        require.True(h, p.ZeroConf, "zero-conf channels must be enabled")
×
2363
        require.Greater(h, len(nodes), 1, "need at least 2 nodes")
×
2364

×
2365
        // We are opening numNodes-1 channels.
×
2366
        cancels := make([]context.CancelFunc, 0, len(nodes)-1)
×
2367

×
2368
        // Create the channel acceptors.
×
2369
        for _, node := range nodes[1:] {
×
2370
                acceptor, cancel := node.RPC.ChannelAcceptor()
×
2371
                go acceptChannel(h.T, true, acceptor)
×
2372

×
2373
                cancels = append(cancels, cancel)
×
2374
        }
×
2375

2376
        // Open channels between the nodes.
2377
        resp := h.openChannelsForNodes(nodes, p)
×
2378

×
2379
        for _, cancel := range cancels {
×
2380
                cancel()
×
2381
        }
×
2382

2383
        return resp
×
2384
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc