• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 12986279612

27 Jan 2025 09:51AM UTC coverage: 57.652% (-1.1%) from 58.788%
12986279612

Pull #9447

github

yyforyongyu
sweep: rename methods for clarity

We now rename "third party" to "unknown" as the inputs can be spent via
an older sweeping tx, a third party (anchor), or a remote party (pin).
In fee bumper we don't have the info to distinguish the above cases, and
leave them to be further handled by the sweeper as it has more context.
Pull Request #9447: sweep: start tracking input spending status in the fee bumper

83 of 87 new or added lines in 2 files covered. (95.4%)

19578 existing lines in 256 files now uncovered.

103448 of 179434 relevant lines covered (57.65%)

24884.58 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

82.23
/chainntnfs/neutrinonotify/neutrino.go
1
package neutrinonotify
2

3
import (
4
        "errors"
5
        "fmt"
6
        "strings"
7
        "sync"
8
        "sync/atomic"
9
        "time"
10

11
        "github.com/btcsuite/btcd/btcjson"
12
        "github.com/btcsuite/btcd/btcutil"
13
        "github.com/btcsuite/btcd/btcutil/gcs/builder"
14
        "github.com/btcsuite/btcd/chaincfg/chainhash"
15
        "github.com/btcsuite/btcd/rpcclient"
16
        "github.com/btcsuite/btcd/txscript"
17
        "github.com/btcsuite/btcd/wire"
18
        "github.com/lightninglabs/neutrino"
19
        "github.com/lightninglabs/neutrino/headerfs"
20
        "github.com/lightningnetwork/lnd/blockcache"
21
        "github.com/lightningnetwork/lnd/chainntnfs"
22
        "github.com/lightningnetwork/lnd/lntypes"
23
        "github.com/lightningnetwork/lnd/queue"
24
)
25

26
const (
27
        // notifierType uniquely identifies this concrete implementation of the
28
        // ChainNotifier interface.
29
        notifierType = "neutrino"
30
)
31

32
// NeutrinoNotifier is a version of ChainNotifier that's backed by the neutrino
33
// Bitcoin light client. Unlike other implementations, this implementation
34
// speaks directly to the p2p network. As a result, this implementation of the
35
// ChainNotifier interface is much more light weight that other implementation
36
// which rely of receiving notification over an RPC interface backed by a
37
// running full node.
38
//
39
// TODO(roasbeef): heavily consolidate with NeutrinoNotifier code
40
//   - maybe combine into single package?
41
type NeutrinoNotifier struct {
42
        epochClientCounter uint64 // To be used atomically.
43

44
        start   sync.Once
45
        active  int32 // To be used atomically.
46
        stopped int32 // To be used atomically.
47

48
        bestBlockMtx sync.RWMutex
49
        bestBlock    chainntnfs.BlockEpoch
50

51
        p2pNode   *neutrino.ChainService
52
        chainView *neutrino.Rescan
53

54
        chainConn *NeutrinoChainConn
55

56
        notificationCancels  chan interface{}
57
        notificationRegistry chan interface{}
58

59
        txNotifier *chainntnfs.TxNotifier
60

61
        blockEpochClients map[uint64]*blockEpochRegistration
62

63
        rescanErr <-chan error
64

65
        chainUpdates chan *filteredBlock
66

67
        txUpdates *queue.ConcurrentQueue
68

69
        // spendHintCache is a cache used to query and update the latest height
70
        // hints for an outpoint. Each height hint represents the earliest
71
        // height at which the outpoint could have been spent within the chain.
72
        spendHintCache chainntnfs.SpendHintCache
73

74
        // confirmHintCache is a cache used to query the latest height hints for
75
        // a transaction. Each height hint represents the earliest height at
76
        // which the transaction could have confirmed within the chain.
77
        confirmHintCache chainntnfs.ConfirmHintCache
78

79
        // blockCache is an LRU block cache.
80
        blockCache *blockcache.BlockCache
81

82
        wg   sync.WaitGroup
83
        quit chan struct{}
84
}
85

86
// Ensure NeutrinoNotifier implements the ChainNotifier interface at compile time.
87
var _ chainntnfs.ChainNotifier = (*NeutrinoNotifier)(nil)
88

89
// New creates a new instance of the NeutrinoNotifier concrete implementation
90
// of the ChainNotifier interface.
91
//
92
// NOTE: The passed neutrino node should already be running and active before
93
// being passed into this function.
94
func New(node *neutrino.ChainService, spendHintCache chainntnfs.SpendHintCache,
95
        confirmHintCache chainntnfs.ConfirmHintCache,
96
        blockCache *blockcache.BlockCache) *NeutrinoNotifier {
4✔
97

4✔
98
        return &NeutrinoNotifier{
4✔
99
                notificationCancels:  make(chan interface{}),
4✔
100
                notificationRegistry: make(chan interface{}),
4✔
101

4✔
102
                blockEpochClients: make(map[uint64]*blockEpochRegistration),
4✔
103

4✔
104
                p2pNode:   node,
4✔
105
                chainConn: &NeutrinoChainConn{node},
4✔
106

4✔
107
                rescanErr: make(chan error),
4✔
108

4✔
109
                chainUpdates: make(chan *filteredBlock, 100),
4✔
110

4✔
111
                txUpdates: queue.NewConcurrentQueue(10),
4✔
112

4✔
113
                spendHintCache:   spendHintCache,
4✔
114
                confirmHintCache: confirmHintCache,
4✔
115

4✔
116
                blockCache: blockCache,
4✔
117

4✔
118
                quit: make(chan struct{}),
4✔
119
        }
4✔
120
}
4✔
121

122
// Start contacts the running neutrino light client and kicks off an initial
123
// empty rescan.
124
func (n *NeutrinoNotifier) Start() error {
1✔
125
        var startErr error
1✔
126
        n.start.Do(func() {
2✔
127
                startErr = n.startNotifier()
1✔
128
        })
1✔
129
        return startErr
1✔
130
}
131

132
// Stop shuts down the NeutrinoNotifier.
133
func (n *NeutrinoNotifier) Stop() error {
3✔
134
        // Already shutting down?
3✔
135
        if atomic.AddInt32(&n.stopped, 1) != 1 {
3✔
136
                return nil
×
137
        }
×
138

139
        chainntnfs.Log.Info("neutrino notifier shutting down...")
3✔
140
        defer chainntnfs.Log.Debug("neutrino notifier shutdown complete")
3✔
141

3✔
142
        close(n.quit)
3✔
143
        n.wg.Wait()
3✔
144

3✔
145
        n.txUpdates.Stop()
3✔
146

3✔
147
        // Notify all pending clients of our shutdown by closing the related
3✔
148
        // notification channels.
3✔
149
        for _, epochClient := range n.blockEpochClients {
21✔
150
                close(epochClient.cancelChan)
18✔
151
                epochClient.wg.Wait()
18✔
152

18✔
153
                close(epochClient.epochChan)
18✔
154
        }
18✔
155

156
        // The txNotifier is only initialized in the start method therefore we
157
        // need to make sure we don't access a nil pointer here.
158
        if n.txNotifier != nil {
6✔
159
                n.txNotifier.TearDown()
3✔
160
        }
3✔
161

162
        return nil
3✔
163
}
164

165
// Started returns true if this instance has been started, and false otherwise.
UNCOV
166
func (n *NeutrinoNotifier) Started() bool {
×
UNCOV
167
        return atomic.LoadInt32(&n.active) != 0
×
UNCOV
168
}
×
169

170
func (n *NeutrinoNotifier) startNotifier() error {
1✔
171
        // Start our concurrent queues before starting the rescan, to ensure
1✔
172
        // onFilteredBlockConnected and onRelavantTx callbacks won't be
1✔
173
        // blocked.
1✔
174
        n.txUpdates.Start()
1✔
175

1✔
176
        // First, we'll obtain the latest block height of the p2p node. We'll
1✔
177
        // start the auto-rescan from this point. Once a caller actually wishes
1✔
178
        // to register a chain view, the rescan state will be rewound
1✔
179
        // accordingly.
1✔
180
        startingPoint, err := n.p2pNode.BestBlock()
1✔
181
        if err != nil {
1✔
182
                n.txUpdates.Stop()
×
183
                return err
×
184
        }
×
185
        startingHeader, err := n.p2pNode.GetBlockHeader(
1✔
186
                &startingPoint.Hash,
1✔
187
        )
1✔
188
        if err != nil {
1✔
189
                n.txUpdates.Stop()
×
190
                return err
×
191
        }
×
192

193
        n.bestBlock.Hash = &startingPoint.Hash
1✔
194
        n.bestBlock.Height = startingPoint.Height
1✔
195
        n.bestBlock.BlockHeader = startingHeader
1✔
196

1✔
197
        n.txNotifier = chainntnfs.NewTxNotifier(
1✔
198
                uint32(n.bestBlock.Height), chainntnfs.ReorgSafetyLimit,
1✔
199
                n.confirmHintCache, n.spendHintCache,
1✔
200
        )
1✔
201

1✔
202
        // Next, we'll create our set of rescan options. Currently it's
1✔
203
        // required that a user MUST set an addr/outpoint/txid when creating a
1✔
204
        // rescan. To get around this, we'll add a "zero" outpoint, that won't
1✔
205
        // actually be matched.
1✔
206
        var zeroInput neutrino.InputWithScript
1✔
207
        rescanOptions := []neutrino.RescanOption{
1✔
208
                neutrino.StartBlock(startingPoint),
1✔
209
                neutrino.QuitChan(n.quit),
1✔
210
                neutrino.NotificationHandlers(
1✔
211
                        rpcclient.NotificationHandlers{
1✔
212
                                OnFilteredBlockConnected:    n.onFilteredBlockConnected,
1✔
213
                                OnFilteredBlockDisconnected: n.onFilteredBlockDisconnected,
1✔
214
                                OnRedeemingTx:               n.onRelevantTx,
1✔
215
                        },
1✔
216
                ),
1✔
217
                neutrino.WatchInputs(zeroInput),
1✔
218
        }
1✔
219

1✔
220
        // Finally, we'll create our rescan struct, start it, and launch all
1✔
221
        // the goroutines we need to operate this ChainNotifier instance.
1✔
222
        n.chainView = neutrino.NewRescan(
1✔
223
                &neutrino.RescanChainSource{
1✔
224
                        ChainService: n.p2pNode,
1✔
225
                },
1✔
226
                rescanOptions...,
1✔
227
        )
1✔
228
        n.rescanErr = n.chainView.Start()
1✔
229

1✔
230
        n.wg.Add(1)
1✔
231
        go n.notificationDispatcher()
1✔
232

1✔
233
        // Set the active flag now that we've completed the full
1✔
234
        // startup.
1✔
235
        atomic.StoreInt32(&n.active, 1)
1✔
236

1✔
237
        return nil
1✔
238
}
239

240
// filteredBlock represents a new block which has been connected to the main
241
// chain. The slice of transactions will only be populated if the block
242
// includes a transaction that confirmed one of our watched txids, or spends
243
// one of the outputs currently being watched.
244
type filteredBlock struct {
245
        header *wire.BlockHeader
246
        hash   chainhash.Hash
247
        height uint32
248
        txns   []*btcutil.Tx
249

250
        // connected is true if this update is a new block and false if it is a
251
        // disconnected block.
252
        connect bool
253
}
254

255
// rescanFilterUpdate represents a request that will be sent to the
256
// notificaionRegistry in order to prevent race conditions between the filter
257
// update and new block notifications.
258
type rescanFilterUpdate struct {
259
        updateOptions []neutrino.UpdateOption
260
        errChan       chan error
261
}
262

263
// onFilteredBlockConnected is a callback which is executed each a new block is
264
// connected to the end of the main chain.
265
func (n *NeutrinoNotifier) onFilteredBlockConnected(height int32,
266
        header *wire.BlockHeader, txns []*btcutil.Tx) {
180✔
267

180✔
268
        // Append this new chain update to the end of the queue of new chain
180✔
269
        // updates.
180✔
270
        select {
180✔
271
        case n.chainUpdates <- &filteredBlock{
272
                hash:    header.BlockHash(),
273
                height:  uint32(height),
274
                txns:    txns,
275
                header:  header,
276
                connect: true,
277
        }:
180✔
278
        case <-n.quit:
×
279
        }
280
}
281

282
// onFilteredBlockDisconnected is a callback which is executed each time a new
283
// block has been disconnected from the end of the mainchain due to a re-org.
284
func (n *NeutrinoNotifier) onFilteredBlockDisconnected(height int32,
285
        header *wire.BlockHeader) {
4✔
286

4✔
287
        // Append this new chain update to the end of the queue of new chain
4✔
288
        // disconnects.
4✔
289
        select {
4✔
290
        case n.chainUpdates <- &filteredBlock{
291
                hash:    header.BlockHash(),
292
                height:  uint32(height),
293
                connect: false,
294
        }:
4✔
295
        case <-n.quit:
×
296
        }
297
}
298

299
// relevantTx represents a relevant transaction to the notifier that fulfills
300
// any outstanding spend requests.
301
type relevantTx struct {
302
        tx      *btcutil.Tx
303
        details *btcjson.BlockDetails
304
}
305

306
// onRelevantTx is a callback that proxies relevant transaction notifications
307
// from the backend to the notifier's main event handler.
308
func (n *NeutrinoNotifier) onRelevantTx(tx *btcutil.Tx, details *btcjson.BlockDetails) {
10✔
309
        select {
10✔
310
        case n.txUpdates.ChanIn() <- &relevantTx{tx, details}:
10✔
311
        case <-n.quit:
×
312
        }
313
}
314

315
// connectFilteredBlock is called when we receive a filteredBlock from the
316
// backend. If the block is ahead of what we're expecting, we'll attempt to
317
// catch up and then process the block.
318
func (n *NeutrinoNotifier) connectFilteredBlock(update *filteredBlock) {
159✔
319
        n.bestBlockMtx.Lock()
159✔
320
        defer n.bestBlockMtx.Unlock()
159✔
321

159✔
322
        if update.height != uint32(n.bestBlock.Height+1) {
164✔
323
                chainntnfs.Log.Infof("Missed blocks, attempting to catch up")
5✔
324

5✔
325
                _, missedBlocks, err := chainntnfs.HandleMissedBlocks(
5✔
326
                        n.chainConn, n.txNotifier, n.bestBlock,
5✔
327
                        int32(update.height), false,
5✔
328
                )
5✔
329
                if err != nil {
9✔
330
                        chainntnfs.Log.Error(err)
4✔
331
                        return
4✔
332
                }
4✔
333

334
                for _, block := range missedBlocks {
11✔
335
                        filteredBlock, err := n.getFilteredBlock(block)
10✔
336
                        if err != nil {
10✔
337
                                chainntnfs.Log.Error(err)
×
338
                                return
×
339
                        }
×
340
                        err = n.handleBlockConnected(filteredBlock)
10✔
341
                        if err != nil {
10✔
342
                                chainntnfs.Log.Error(err)
×
343
                                return
×
344
                        }
×
345
                }
346
        }
347

348
        err := n.handleBlockConnected(update)
155✔
349
        if err != nil {
155✔
350
                chainntnfs.Log.Error(err)
×
351
        }
×
352
}
353

354
// disconnectFilteredBlock is called when our disconnected filtered block
355
// callback is fired. It attempts to rewind the chain to before the
356
// disconnection and updates our best block.
357
func (n *NeutrinoNotifier) disconnectFilteredBlock(update *filteredBlock) {
4✔
358
        n.bestBlockMtx.Lock()
4✔
359
        defer n.bestBlockMtx.Unlock()
4✔
360

4✔
361
        if update.height != uint32(n.bestBlock.Height) {
4✔
362
                chainntnfs.Log.Infof("Missed disconnected blocks, attempting" +
×
363
                        " to catch up")
×
364
        }
×
365
        newBestBlock, err := chainntnfs.RewindChain(n.chainConn, n.txNotifier,
4✔
366
                n.bestBlock, int32(update.height-1),
4✔
367
        )
4✔
368
        if err != nil {
4✔
369
                chainntnfs.Log.Errorf("Unable to rewind chain from height %d"+
×
370
                        "to height %d: %v", n.bestBlock.Height,
×
371
                        update.height-1, err,
×
372
                )
×
373
        }
×
374

375
        n.bestBlock = newBestBlock
4✔
376
}
377

378
// drainChainUpdates is called after updating the filter. It reads every
379
// buffered item off the chan and returns when no more are available. It is
380
// used to ensure that callers performing a historical scan properly update
381
// their EndHeight to scan blocks that did not have the filter applied at
382
// processing time. Without this, a race condition exists that could allow a
383
// spend or confirmation notification to be missed. It is unlikely this would
384
// occur in a real-world scenario, and instead would manifest itself in tests.
385
func (n *NeutrinoNotifier) drainChainUpdates() {
74✔
386
        for {
150✔
387
                select {
76✔
388
                case update := <-n.chainUpdates:
2✔
389
                        if update.connect {
4✔
390
                                n.connectFilteredBlock(update)
2✔
391
                                break
2✔
392
                        }
393
                        n.disconnectFilteredBlock(update)
×
394
                default:
74✔
395
                        return
74✔
396
                }
397
        }
398
}
399

400
// notificationDispatcher is the primary goroutine which handles client
401
// notification registrations, as well as notification dispatches.
402
func (n *NeutrinoNotifier) notificationDispatcher() {
3✔
403
        defer n.wg.Done()
3✔
404

3✔
405
        for {
304✔
406
                select {
301✔
407
                case cancelMsg := <-n.notificationCancels:
1✔
408
                        switch msg := cancelMsg.(type) {
1✔
409
                        case *epochCancel:
1✔
410
                                chainntnfs.Log.Infof("Cancelling epoch "+
1✔
411
                                        "notification, epoch_id=%v", msg.epochID)
1✔
412

1✔
413
                                // First, we'll lookup the original
1✔
414
                                // registration in order to stop the active
1✔
415
                                // queue goroutine.
1✔
416
                                reg := n.blockEpochClients[msg.epochID]
1✔
417
                                reg.epochQueue.Stop()
1✔
418

1✔
419
                                // Next, close the cancel channel for this
1✔
420
                                // specific client, and wait for the client to
1✔
421
                                // exit.
1✔
422
                                close(n.blockEpochClients[msg.epochID].cancelChan)
1✔
423
                                n.blockEpochClients[msg.epochID].wg.Wait()
1✔
424

1✔
425
                                // Once the client has exited, we can then
1✔
426
                                // safely close the channel used to send epoch
1✔
427
                                // notifications, in order to notify any
1✔
428
                                // listeners that the intent has been
1✔
429
                                // canceled.
1✔
430
                                close(n.blockEpochClients[msg.epochID].epochChan)
1✔
431
                                delete(n.blockEpochClients, msg.epochID)
1✔
432
                        }
433

434
                case registerMsg := <-n.notificationRegistry:
126✔
435
                        switch msg := registerMsg.(type) {
126✔
436
                        case *chainntnfs.HistoricalConfDispatch:
33✔
437
                                // We'll start a historical rescan chain of the
33✔
438
                                // chain asynchronously to prevent blocking
33✔
439
                                // potentially long rescans.
33✔
440
                                n.wg.Add(1)
33✔
441

33✔
442
                                //nolint:ll
33✔
443
                                go func(msg *chainntnfs.HistoricalConfDispatch) {
66✔
444
                                        defer n.wg.Done()
33✔
445

33✔
446
                                        confDetails, err := n.historicalConfDetails(
33✔
447
                                                msg.ConfRequest,
33✔
448
                                                msg.StartHeight, msg.EndHeight,
33✔
449
                                        )
33✔
450
                                        if err != nil {
33✔
451
                                                chainntnfs.Log.Error(err)
×
452
                                                return
×
453
                                        }
×
454

455
                                        // If the historical dispatch finished
456
                                        // without error, we will invoke
457
                                        // UpdateConfDetails even if none were
458
                                        // found. This allows the notifier to
459
                                        // begin safely updating the height hint
460
                                        // cache at tip, since any pending
461
                                        // rescans have now completed.
462
                                        err = n.txNotifier.UpdateConfDetails(
33✔
463
                                                msg.ConfRequest, confDetails,
33✔
464
                                        )
33✔
465
                                        if err != nil {
33✔
466
                                                chainntnfs.Log.Error(err)
×
467
                                        }
×
468
                                }(msg)
469

470
                        case *blockEpochRegistration:
19✔
471
                                chainntnfs.Log.Infof("New block epoch subscription")
19✔
472

19✔
473
                                n.blockEpochClients[msg.epochID] = msg
19✔
474

19✔
475
                                // If the client did not provide their best
19✔
476
                                // known block, then we'll immediately dispatch
19✔
477
                                // a notification for the current tip.
19✔
478
                                if msg.bestBlock == nil {
33✔
479
                                        n.notifyBlockEpochClient(
14✔
480
                                                msg, n.bestBlock.Height,
14✔
481
                                                n.bestBlock.Hash,
14✔
482
                                                n.bestBlock.BlockHeader,
14✔
483
                                        )
14✔
484

14✔
485
                                        msg.errorChan <- nil
14✔
486
                                        continue
14✔
487
                                }
488

489
                                // Otherwise, we'll attempt to deliver the
490
                                // backlog of notifications from their best
491
                                // known block.
492
                                n.bestBlockMtx.Lock()
5✔
493
                                bestHeight := n.bestBlock.Height
5✔
494
                                n.bestBlockMtx.Unlock()
5✔
495

5✔
496
                                missedBlocks, err := chainntnfs.GetClientMissedBlocks(
5✔
497
                                        n.chainConn, msg.bestBlock, bestHeight,
5✔
498
                                        false,
5✔
499
                                )
5✔
500
                                if err != nil {
5✔
501
                                        msg.errorChan <- err
×
502
                                        continue
×
503
                                }
504

505
                                for _, block := range missedBlocks {
55✔
506
                                        n.notifyBlockEpochClient(
50✔
507
                                                msg, block.Height, block.Hash,
50✔
508
                                                block.BlockHeader,
50✔
509
                                        )
50✔
510
                                }
50✔
511

512
                                msg.errorChan <- nil
5✔
513

514
                        case *rescanFilterUpdate:
74✔
515
                                err := n.chainView.Update(msg.updateOptions...)
74✔
516
                                if err != nil {
74✔
517
                                        chainntnfs.Log.Errorf("Unable to "+
×
518
                                                "update rescan filter: %v", err)
×
519
                                }
×
520

521
                                // Drain the chainUpdates chan so the caller
522
                                // listening on errChan can be sure that
523
                                // updates after receiving the error will have
524
                                // the filter applied. This allows the caller
525
                                // to update their EndHeight if they're
526
                                // performing a historical scan.
527
                                n.drainChainUpdates()
74✔
528

74✔
529
                                // After draining, send the error to the
74✔
530
                                // caller.
74✔
531
                                msg.errChan <- err
74✔
532
                        }
533

534
                case item := <-n.chainUpdates:
161✔
535
                        update := item
161✔
536
                        if update.connect {
318✔
537
                                n.connectFilteredBlock(update)
157✔
538
                                continue
157✔
539
                        }
540

541
                        n.disconnectFilteredBlock(update)
4✔
542

543
                case txUpdate := <-n.txUpdates.ChanOut():
10✔
544
                        // A new relevant transaction notification has been
10✔
545
                        // received from the backend. We'll attempt to process
10✔
546
                        // it to determine if it fulfills any outstanding
10✔
547
                        // confirmation and/or spend requests and dispatch
10✔
548
                        // notifications for them.
10✔
549
                        update := txUpdate.(*relevantTx)
10✔
550
                        err := n.txNotifier.ProcessRelevantSpendTx(
10✔
551
                                update.tx, uint32(update.details.Height),
10✔
552
                        )
10✔
553
                        if err != nil {
10✔
554
                                chainntnfs.Log.Errorf("Unable to process "+
×
555
                                        "transaction %v: %v", update.tx.Hash(),
×
556
                                        err)
×
557
                        }
×
558

559
                case err := <-n.rescanErr:
×
560
                        chainntnfs.Log.Errorf("Error during rescan: %v", err)
×
561

562
                case <-n.quit:
3✔
563
                        return
3✔
564

565
                }
566
        }
567
}
568

569
// historicalConfDetails looks up whether a confirmation request (txid/output
570
// script) has already been included in a block in the active chain and, if so,
571
// returns details about said block.
572
func (n *NeutrinoNotifier) historicalConfDetails(confRequest chainntnfs.ConfRequest,
573
        startHeight, endHeight uint32) (*chainntnfs.TxConfirmation, error) {
33✔
574

33✔
575
        // Starting from the height hint, we'll walk forwards in the chain to
33✔
576
        // see if this transaction/output script has already been confirmed.
33✔
577
        for scanHeight := endHeight; scanHeight >= startHeight && scanHeight > 0; scanHeight-- {
80✔
578
                // Ensure we haven't been requested to shut down before
47✔
579
                // processing the next height.
47✔
580
                select {
47✔
581
                case <-n.quit:
×
582
                        return nil, chainntnfs.ErrChainNotifierShuttingDown
×
583
                default:
47✔
584
                }
585

586
                // First, we'll fetch the block header for this height so we
587
                // can compute the current block hash.
588
                blockHash, err := n.p2pNode.GetBlockHash(int64(scanHeight))
47✔
589
                if err != nil {
47✔
590
                        return nil, fmt.Errorf("unable to get header for "+
×
591
                                "height=%v: %w", scanHeight, err)
×
592
                }
×
593

594
                // With the hash computed, we can now fetch the basic filter for this
595
                // height. Since the range of required items is known we avoid
596
                // roundtrips by requesting a batched response and save bandwidth by
597
                // limiting the max number of items per batch. Since neutrino populates
598
                // its underline filters cache with the batch response, the next call
599
                // will execute a network query only once per batch and not on every
600
                // iteration.
601
                regFilter, err := n.p2pNode.GetCFilter(
47✔
602
                        *blockHash, wire.GCSFilterRegular,
47✔
603
                        neutrino.NumRetries(5),
47✔
604
                        neutrino.OptimisticReverseBatch(),
47✔
605
                        neutrino.MaxBatchSize(int64(scanHeight-startHeight+1)),
47✔
606
                )
47✔
607
                if err != nil {
47✔
608
                        return nil, fmt.Errorf("unable to retrieve regular "+
×
609
                                "filter for height=%v: %w", scanHeight, err)
×
610
                }
×
611

612
                // In the case that the filter exists, we'll attempt to see if
613
                // any element in it matches our target public key script.
614
                key := builder.DeriveKey(blockHash)
47✔
615
                match, err := regFilter.Match(key, confRequest.PkScript.Script())
47✔
616
                if err != nil {
47✔
617
                        return nil, fmt.Errorf("unable to query filter: %w",
×
618
                                err)
×
619
                }
×
620

621
                // If there's no match, then we can continue forward to the
622
                // next block.
623
                if !match {
88✔
624
                        continue
41✔
625
                }
626

627
                // In the case that we do have a match, we'll fetch the block
628
                // from the network so we can find the positional data required
629
                // to send the proper response.
630
                block, err := n.GetBlock(*blockHash)
6✔
631
                if err != nil {
6✔
632
                        return nil, fmt.Errorf("unable to get block from "+
×
633
                                "network: %w", err)
×
634
                }
×
635

636
                // For every transaction in the block, check which one matches
637
                // our request. If we find one that does, we can dispatch its
638
                // confirmation details.
639
                for i, tx := range block.Transactions() {
22✔
640
                        if !confRequest.MatchesTx(tx.MsgTx()) {
28✔
641
                                continue
12✔
642
                        }
643

644
                        return &chainntnfs.TxConfirmation{
4✔
645
                                Tx:          tx.MsgTx().Copy(),
4✔
646
                                BlockHash:   blockHash,
4✔
647
                                BlockHeight: scanHeight,
4✔
648
                                TxIndex:     uint32(i),
4✔
649
                                Block:       block.MsgBlock(),
4✔
650
                        }, nil
4✔
651
                }
652
        }
653

654
        return nil, nil
29✔
655
}
656

657
// handleBlockConnected applies a chain update for a new block. Any watched
658
// transactions included this block will processed to either send notifications
659
// now or after numConfirmations confs.
660
//
661
// NOTE: This method must be called with the bestBlockMtx lock held.
662
func (n *NeutrinoNotifier) handleBlockConnected(newBlock *filteredBlock) error {
165✔
663
        // We'll extend the txNotifier's height with the information of this
165✔
664
        // new block, which will handle all of the notification logic for us.
165✔
665
        //
165✔
666
        // We actually need the _full_ block here as well in order to be able
165✔
667
        // to send the full block back up to the client. The neutrino client
165✔
668
        // itself will only dispatch a block if one of the items we're looking
165✔
669
        // for matches, so ultimately passing it the full block will still only
165✔
670
        // result in the items we care about being dispatched.
165✔
671
        rawBlock, err := n.GetBlock(newBlock.hash)
165✔
672
        if err != nil {
165✔
673
                return fmt.Errorf("unable to get full block: %w", err)
×
674
        }
×
675
        err = n.txNotifier.ConnectTip(rawBlock, newBlock.height)
165✔
676
        if err != nil {
165✔
677
                return fmt.Errorf("unable to connect tip: %w", err)
×
678
        }
×
679

680
        chainntnfs.Log.Infof("New block: height=%v, sha=%v", newBlock.height,
165✔
681
                newBlock.hash)
165✔
682

165✔
683
        // Now that we've guaranteed the new block extends the txNotifier's
165✔
684
        // current tip, we'll proceed to dispatch notifications to all of our
165✔
685
        // registered clients whom have had notifications fulfilled. Before
165✔
686
        // doing so, we'll make sure update our in memory state in order to
165✔
687
        // satisfy any client requests based upon the new block.
165✔
688
        n.bestBlock.Hash = &newBlock.hash
165✔
689
        n.bestBlock.Height = int32(newBlock.height)
165✔
690
        n.bestBlock.BlockHeader = newBlock.header
165✔
691

165✔
692
        err = n.txNotifier.NotifyHeight(newBlock.height)
165✔
693
        if err != nil {
165✔
694
                return fmt.Errorf("unable to notify height: %w", err)
×
695
        }
×
696

697
        n.notifyBlockEpochs(
165✔
698
                int32(newBlock.height), &newBlock.hash, newBlock.header,
165✔
699
        )
165✔
700

165✔
701
        return nil
165✔
702
}
703

704
// getFilteredBlock is a utility to retrieve the full filtered block from a block epoch.
705
func (n *NeutrinoNotifier) getFilteredBlock(epoch chainntnfs.BlockEpoch) (*filteredBlock, error) {
10✔
706
        rawBlock, err := n.GetBlock(*epoch.Hash)
10✔
707
        if err != nil {
10✔
708
                return nil, fmt.Errorf("unable to get block: %w", err)
×
709
        }
×
710

711
        txns := rawBlock.Transactions()
10✔
712

10✔
713
        block := &filteredBlock{
10✔
714
                hash:    *epoch.Hash,
10✔
715
                height:  uint32(epoch.Height),
10✔
716
                header:  &rawBlock.MsgBlock().Header,
10✔
717
                txns:    txns,
10✔
718
                connect: true,
10✔
719
        }
10✔
720
        return block, nil
10✔
721
}
722

723
// notifyBlockEpochs notifies all registered block epoch clients of the newly
724
// connected block to the main chain.
725
func (n *NeutrinoNotifier) notifyBlockEpochs(newHeight int32, newSha *chainhash.Hash,
726
        blockHeader *wire.BlockHeader) {
165✔
727

165✔
728
        for _, client := range n.blockEpochClients {
376✔
729
                n.notifyBlockEpochClient(client, newHeight, newSha, blockHeader)
211✔
730
        }
211✔
731
}
732

733
// notifyBlockEpochClient sends a registered block epoch client a notification
734
// about a specific block.
735
func (n *NeutrinoNotifier) notifyBlockEpochClient(epochClient *blockEpochRegistration,
736
        height int32, sha *chainhash.Hash, blockHeader *wire.BlockHeader) {
275✔
737

275✔
738
        epoch := &chainntnfs.BlockEpoch{
275✔
739
                Height:      height,
275✔
740
                Hash:        sha,
275✔
741
                BlockHeader: blockHeader,
275✔
742
        }
275✔
743

275✔
744
        select {
275✔
745
        case epochClient.epochQueue.ChanIn() <- epoch:
275✔
746
        case <-epochClient.cancelChan:
×
747
        case <-n.quit:
×
748
        }
749
}
750

751
// RegisterSpendNtfn registers an intent to be notified once the target
752
// outpoint/output script has been spent by a transaction on-chain. When
753
// intending to be notified of the spend of an output script, a nil outpoint
754
// must be used. The heightHint should represent the earliest height in the
755
// chain of the transaction that spent the outpoint/output script.
756
//
757
// Once a spend of has been detected, the details of the spending event will be
758
// sent across the 'Spend' channel.
759
func (n *NeutrinoNotifier) RegisterSpendNtfn(outpoint *wire.OutPoint,
760
        pkScript []byte, heightHint uint32) (*chainntnfs.SpendEvent, error) {
26✔
761

26✔
762
        // Register the conf notification with the TxNotifier. A non-nil value
26✔
763
        // for `dispatch` will be returned if we are required to perform a
26✔
764
        // manual scan for the confirmation. Otherwise the notifier will begin
26✔
765
        // watching at tip for the transaction to confirm.
26✔
766
        ntfn, err := n.txNotifier.RegisterSpend(outpoint, pkScript, heightHint)
26✔
767
        if err != nil {
26✔
UNCOV
768
                return nil, err
×
UNCOV
769
        }
×
770

771
        // To determine whether this outpoint has been spent on-chain, we'll
772
        // update our filter to watch for the transaction at tip and we'll also
773
        // dispatch a historical rescan to determine if it has been spent in the
774
        // past.
775
        //
776
        // We'll update our filter first to ensure we can immediately detect the
777
        // spend at tip.
778
        if outpoint == nil {
39✔
779
                outpoint = &chainntnfs.ZeroOutPoint
13✔
780
        }
13✔
781
        inputToWatch := neutrino.InputWithScript{
26✔
782
                OutPoint: *outpoint,
26✔
783
                PkScript: pkScript,
26✔
784
        }
26✔
785
        updateOptions := []neutrino.UpdateOption{
26✔
786
                neutrino.AddInputs(inputToWatch),
26✔
787
                neutrino.DisableDisconnectedNtfns(true),
26✔
788
        }
26✔
789

26✔
790
        // We'll use the txNotifier's tip as the starting point of our filter
26✔
791
        // update. In the case of an output script spend request, we'll check if
26✔
792
        // we should perform a historical rescan and start from there, as we
26✔
793
        // cannot do so with GetUtxo since it matches outpoints.
26✔
794
        rewindHeight := ntfn.Height
26✔
795
        if ntfn.HistoricalDispatch != nil && *outpoint == chainntnfs.ZeroOutPoint {
27✔
796
                rewindHeight = ntfn.HistoricalDispatch.StartHeight
1✔
797
        }
1✔
798
        updateOptions = append(updateOptions, neutrino.Rewind(rewindHeight))
26✔
799

26✔
800
        errChan := make(chan error, 1)
26✔
801
        select {
26✔
802
        case n.notificationRegistry <- &rescanFilterUpdate{
803
                updateOptions: updateOptions,
804
                errChan:       errChan,
805
        }:
26✔
806
        case <-n.quit:
×
807
                return nil, chainntnfs.ErrChainNotifierShuttingDown
×
808
        }
809

810
        select {
26✔
811
        case err = <-errChan:
26✔
812
        case <-n.quit:
×
813
                return nil, chainntnfs.ErrChainNotifierShuttingDown
×
814
        }
815
        if err != nil {
26✔
816
                return nil, fmt.Errorf("unable to update filter: %w", err)
×
817
        }
×
818

819
        // If the txNotifier didn't return any details to perform a historical
820
        // scan of the chain, or if we already performed one like in the case of
821
        // output script spend requests, then we can return early as there's
822
        // nothing left for us to do.
823
        if ntfn.HistoricalDispatch == nil || *outpoint == chainntnfs.ZeroOutPoint {
51✔
824
                return ntfn.Event, nil
25✔
825
        }
25✔
826

827
        // Grab the current best height as the height may have been updated
828
        // while we were draining the chainUpdates queue.
829
        n.bestBlockMtx.RLock()
1✔
830
        currentHeight := uint32(n.bestBlock.Height)
1✔
831
        n.bestBlockMtx.RUnlock()
1✔
832

1✔
833
        ntfn.HistoricalDispatch.EndHeight = currentHeight
1✔
834

1✔
835
        // With the filter updated, we'll dispatch our historical rescan to
1✔
836
        // ensure we detect the spend if it happened in the past.
1✔
837
        n.wg.Add(1)
1✔
838
        go func() {
2✔
839
                defer n.wg.Done()
1✔
840

1✔
841
                // We'll ensure that neutrino is caught up to the starting
1✔
842
                // height before we attempt to fetch the UTXO from the chain.
1✔
843
                // If we're behind, then we may miss a notification dispatch.
1✔
844
                for {
2✔
845
                        n.bestBlockMtx.RLock()
1✔
846
                        currentHeight := uint32(n.bestBlock.Height)
1✔
847
                        n.bestBlockMtx.RUnlock()
1✔
848

1✔
849
                        if currentHeight >= ntfn.HistoricalDispatch.StartHeight {
2✔
850
                                break
1✔
851
                        }
852

853
                        select {
×
854
                        case <-time.After(time.Millisecond * 200):
×
855
                        case <-n.quit:
×
856
                                return
×
857
                        }
858
                }
859

860
                spendReport, err := n.p2pNode.GetUtxo(
1✔
861
                        neutrino.WatchInputs(inputToWatch),
1✔
862
                        neutrino.StartBlock(&headerfs.BlockStamp{
1✔
863
                                Height: int32(ntfn.HistoricalDispatch.StartHeight),
1✔
864
                        }),
1✔
865
                        neutrino.EndBlock(&headerfs.BlockStamp{
1✔
866
                                Height: int32(ntfn.HistoricalDispatch.EndHeight),
1✔
867
                        }),
1✔
868
                        neutrino.ProgressHandler(func(processedHeight uint32) {
2✔
869
                                // We persist the rescan progress to achieve incremental
1✔
870
                                // behavior across restarts, otherwise long rescans may
1✔
871
                                // start from the beginning with every restart.
1✔
872
                                err := n.spendHintCache.CommitSpendHint(
1✔
873
                                        processedHeight,
1✔
874
                                        ntfn.HistoricalDispatch.SpendRequest)
1✔
875
                                if err != nil {
1✔
876
                                        chainntnfs.Log.Errorf("Failed to update rescan "+
×
877
                                                "progress: %v", err)
×
878
                                }
×
879
                        }),
880
                        neutrino.QuitChan(n.quit),
881
                )
882
                if err != nil && !strings.Contains(err.Error(), "not found") {
1✔
UNCOV
883
                        chainntnfs.Log.Errorf("Failed getting UTXO: %v", err)
×
UNCOV
884
                        return
×
UNCOV
885
                }
×
886

887
                // If a spend report was returned, and the transaction is present, then
888
                // this means that the output is already spent.
889
                var spendDetails *chainntnfs.SpendDetail
1✔
890
                if spendReport != nil && spendReport.SpendingTx != nil {
2✔
891
                        spendingTxHash := spendReport.SpendingTx.TxHash()
1✔
892
                        spendDetails = &chainntnfs.SpendDetail{
1✔
893
                                SpentOutPoint:     outpoint,
1✔
894
                                SpenderTxHash:     &spendingTxHash,
1✔
895
                                SpendingTx:        spendReport.SpendingTx,
1✔
896
                                SpenderInputIndex: spendReport.SpendingInputIndex,
1✔
897
                                SpendingHeight:    int32(spendReport.SpendingTxHeight),
1✔
898
                        }
1✔
899
                }
1✔
900

901
                // Finally, no matter whether the rescan found a spend in the past or
902
                // not, we'll mark our historical rescan as complete to ensure the
903
                // outpoint's spend hint gets updated upon connected/disconnected
904
                // blocks.
905
                err = n.txNotifier.UpdateSpendDetails(
1✔
906
                        ntfn.HistoricalDispatch.SpendRequest, spendDetails,
1✔
907
                )
1✔
908
                if err != nil {
1✔
909
                        chainntnfs.Log.Errorf("Failed to update spend details: %v", err)
×
910
                        return
×
911
                }
×
912
        }()
913

914
        return ntfn.Event, nil
1✔
915
}
916

917
// RegisterConfirmationsNtfn registers an intent to be notified once the target
918
// txid/output script has reached numConfs confirmations on-chain. When
919
// intending to be notified of the confirmation of an output script, a nil txid
920
// must be used. The heightHint should represent the earliest height at which
921
// the txid/output script could have been included in the chain.
922
//
923
// Progress on the number of confirmations left can be read from the 'Updates'
924
// channel. Once it has reached all of its confirmations, a notification will be
925
// sent across the 'Confirmed' channel.
926
func (n *NeutrinoNotifier) RegisterConfirmationsNtfn(txid *chainhash.Hash,
927
        pkScript []byte, numConfs, heightHint uint32,
928
        opts ...chainntnfs.NotifierOption) (*chainntnfs.ConfirmationEvent, error) {
48✔
929

48✔
930
        // Register the conf notification with the TxNotifier. A non-nil value
48✔
931
        // for `dispatch` will be returned if we are required to perform a
48✔
932
        // manual scan for the confirmation. Otherwise the notifier will begin
48✔
933
        // watching at tip for the transaction to confirm.
48✔
934
        ntfn, err := n.txNotifier.RegisterConf(
48✔
935
                txid, pkScript, numConfs, heightHint, opts...,
48✔
936
        )
48✔
937
        if err != nil {
48✔
938
                return nil, err
×
939
        }
×
940

941
        // To determine whether this transaction has confirmed on-chain, we'll
942
        // update our filter to watch for the transaction at tip and we'll also
943
        // dispatch a historical rescan to determine if it has confirmed in the
944
        // past.
945
        //
946
        // We'll update our filter first to ensure we can immediately detect the
947
        // confirmation at tip. To do so, we'll map the script into an address
948
        // type so we can instruct neutrino to match if the transaction
949
        // containing the script is found in a block.
950
        params := n.p2pNode.ChainParams()
48✔
951
        _, addrs, _, err := txscript.ExtractPkScriptAddrs(pkScript, &params)
48✔
952
        if err != nil {
48✔
953
                return nil, fmt.Errorf("unable to extract script: %w", err)
×
954
        }
×
955

956
        // We'll send the filter update request to the notifier's main event
957
        // handler and wait for its response.
958
        errChan := make(chan error, 1)
48✔
959
        select {
48✔
960
        case n.notificationRegistry <- &rescanFilterUpdate{
961
                updateOptions: []neutrino.UpdateOption{
962
                        neutrino.AddAddrs(addrs...),
963
                        neutrino.Rewind(ntfn.Height),
964
                        neutrino.DisableDisconnectedNtfns(true),
965
                },
966
                errChan: errChan,
967
        }:
48✔
968
        case <-n.quit:
×
969
                return nil, chainntnfs.ErrChainNotifierShuttingDown
×
970
        }
971

972
        select {
48✔
973
        case err = <-errChan:
48✔
974
        case <-n.quit:
×
975
                return nil, chainntnfs.ErrChainNotifierShuttingDown
×
976
        }
977
        if err != nil {
48✔
978
                return nil, fmt.Errorf("unable to update filter: %w", err)
×
979
        }
×
980

981
        // If a historical rescan was not requested by the txNotifier, then we
982
        // can return to the caller.
983
        if ntfn.HistoricalDispatch == nil {
63✔
984
                return ntfn.Event, nil
15✔
985
        }
15✔
986

987
        // Grab the current best height as the height may have been updated
988
        // while we were draining the chainUpdates queue.
989
        n.bestBlockMtx.RLock()
33✔
990
        currentHeight := uint32(n.bestBlock.Height)
33✔
991
        n.bestBlockMtx.RUnlock()
33✔
992

33✔
993
        ntfn.HistoricalDispatch.EndHeight = currentHeight
33✔
994

33✔
995
        // Finally, with the filter updated, we can dispatch the historical
33✔
996
        // rescan to ensure we can detect if the event happened in the past.
33✔
997
        select {
33✔
998
        case n.notificationRegistry <- ntfn.HistoricalDispatch:
33✔
999
        case <-n.quit:
×
1000
                return nil, chainntnfs.ErrChainNotifierShuttingDown
×
1001
        }
1002

1003
        return ntfn.Event, nil
33✔
1004
}
1005

1006
// GetBlock is used to retrieve the block with the given hash. Since the block
1007
// cache used by neutrino will be the same as that used by LND (since it is
1008
// passed to neutrino on initialisation), the neutrino GetBlock method can be
1009
// called directly since it already uses the block cache. However, neutrino
1010
// does not lock the block cache mutex for the given block hash and so that is
1011
// done here.
1012
func (n *NeutrinoNotifier) GetBlock(hash chainhash.Hash) (
1013
        *btcutil.Block, error) {
181✔
1014

181✔
1015
        n.blockCache.HashMutex.Lock(lntypes.Hash(hash))
181✔
1016
        defer n.blockCache.HashMutex.Unlock(lntypes.Hash(hash))
181✔
1017

181✔
1018
        return n.p2pNode.GetBlock(hash)
181✔
1019
}
181✔
1020

1021
// blockEpochRegistration represents a client's intent to receive a
1022
// notification with each newly connected block.
1023
type blockEpochRegistration struct {
1024
        epochID uint64
1025

1026
        epochChan chan *chainntnfs.BlockEpoch
1027

1028
        epochQueue *queue.ConcurrentQueue
1029

1030
        cancelChan chan struct{}
1031

1032
        bestBlock *chainntnfs.BlockEpoch
1033

1034
        errorChan chan error
1035

1036
        wg sync.WaitGroup
1037
}
1038

1039
// epochCancel is a message sent to the NeutrinoNotifier when a client wishes
1040
// to cancel an outstanding epoch notification that has yet to be dispatched.
1041
type epochCancel struct {
1042
        epochID uint64
1043
}
1044

1045
// RegisterBlockEpochNtfn returns a BlockEpochEvent which subscribes the
1046
// caller to receive notifications, of each new block connected to the main
1047
// chain. Clients have the option of passing in their best known block, which
1048
// the notifier uses to check if they are behind on blocks and catch them up. If
1049
// they do not provide one, then a notification will be dispatched immediately
1050
// for the current tip of the chain upon a successful registration.
1051
func (n *NeutrinoNotifier) RegisterBlockEpochNtfn(
1052
        bestBlock *chainntnfs.BlockEpoch) (*chainntnfs.BlockEpochEvent, error) {
19✔
1053

19✔
1054
        reg := &blockEpochRegistration{
19✔
1055
                epochQueue: queue.NewConcurrentQueue(20),
19✔
1056
                epochChan:  make(chan *chainntnfs.BlockEpoch, 20),
19✔
1057
                cancelChan: make(chan struct{}),
19✔
1058
                epochID:    atomic.AddUint64(&n.epochClientCounter, 1),
19✔
1059
                bestBlock:  bestBlock,
19✔
1060
                errorChan:  make(chan error, 1),
19✔
1061
        }
19✔
1062
        reg.epochQueue.Start()
19✔
1063

19✔
1064
        // Before we send the request to the main goroutine, we'll launch a new
19✔
1065
        // goroutine to proxy items added to our queue to the client itself.
19✔
1066
        // This ensures that all notifications are received *in order*.
19✔
1067
        reg.wg.Add(1)
19✔
1068
        go func() {
38✔
1069
                defer reg.wg.Done()
19✔
1070

19✔
1071
                for {
267✔
1072
                        select {
248✔
1073
                        case ntfn := <-reg.epochQueue.ChanOut():
231✔
1074
                                blockNtfn := ntfn.(*chainntnfs.BlockEpoch)
231✔
1075
                                select {
231✔
1076
                                case reg.epochChan <- blockNtfn:
229✔
1077

UNCOV
1078
                                case <-reg.cancelChan:
×
UNCOV
1079
                                        return
×
1080

1081
                                case <-n.quit:
2✔
1082
                                        return
2✔
1083
                                }
1084

1085
                        case <-reg.cancelChan:
1✔
1086
                                return
1✔
1087

1088
                        case <-n.quit:
16✔
1089
                                return
16✔
1090
                        }
1091
                }
1092
        }()
1093

1094
        select {
19✔
1095
        case <-n.quit:
×
1096
                // As we're exiting before the registration could be sent,
×
1097
                // we'll stop the queue now ourselves.
×
1098
                reg.epochQueue.Stop()
×
1099

×
1100
                return nil, errors.New("chainntnfs: system interrupt while " +
×
1101
                        "attempting to register for block epoch notification.")
×
1102
        case n.notificationRegistry <- reg:
19✔
1103
                return &chainntnfs.BlockEpochEvent{
19✔
1104
                        Epochs: reg.epochChan,
19✔
1105
                        Cancel: func() {
20✔
1106
                                cancel := &epochCancel{
1✔
1107
                                        epochID: reg.epochID,
1✔
1108
                                }
1✔
1109

1✔
1110
                                // Submit epoch cancellation to notification dispatcher.
1✔
1111
                                select {
1✔
1112
                                case n.notificationCancels <- cancel:
1✔
1113
                                        // Cancellation is being handled, drain the epoch channel until it is
1✔
1114
                                        // closed before yielding to caller.
1✔
1115
                                        for {
3✔
1116
                                                select {
2✔
1117
                                                case _, ok := <-reg.epochChan:
2✔
1118
                                                        if !ok {
3✔
1119
                                                                return
1✔
1120
                                                        }
1✔
1121
                                                case <-n.quit:
×
1122
                                                        return
×
1123
                                                }
1124
                                        }
UNCOV
1125
                                case <-n.quit:
×
1126
                                }
1127
                        },
1128
                }, nil
1129
        }
1130
}
1131

1132
// NeutrinoChainConn is a wrapper around neutrino's chain backend in order
1133
// to satisfy the chainntnfs.ChainConn interface.
1134
type NeutrinoChainConn struct {
1135
        p2pNode *neutrino.ChainService
1136
}
1137

1138
// GetBlockHeader returns the block header for a hash.
1139
func (n *NeutrinoChainConn) GetBlockHeader(blockHash *chainhash.Hash) (*wire.BlockHeader, error) {
64✔
1140
        return n.p2pNode.GetBlockHeader(blockHash)
64✔
1141
}
64✔
1142

1143
// GetBlockHeaderVerbose returns a verbose block header result for a hash. This
1144
// result only contains the height with a nil hash.
1145
func (n *NeutrinoChainConn) GetBlockHeaderVerbose(blockHash *chainhash.Hash) (
1146
        *btcjson.GetBlockHeaderVerboseResult, error) {
×
1147

×
1148
        height, err := n.p2pNode.GetBlockHeight(blockHash)
×
1149
        if err != nil {
×
1150
                return nil, err
×
1151
        }
×
1152
        // Since only the height is used from the result, leave the hash nil.
1153
        return &btcjson.GetBlockHeaderVerboseResult{Height: int32(height)}, nil
×
1154
}
1155

1156
// GetBlockHash returns the hash from a block height.
1157
func (n *NeutrinoChainConn) GetBlockHash(blockHeight int64) (*chainhash.Hash, error) {
64✔
1158
        return n.p2pNode.GetBlockHash(blockHeight)
64✔
1159
}
64✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc