• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 10204896993

01 Aug 2024 07:57PM UTC coverage: 58.591% (-0.08%) from 58.674%
10204896993

push

github

web-flow
Merge pull request #8962 from ProofOfKeags/refactor/quiescence-micro-spinoffs

[NANO]: Refactor/quiescence micro spinoffs

3 of 4 new or added lines in 2 files covered. (75.0%)

242 existing lines in 26 files now uncovered.

125214 of 213710 relevant lines covered (58.59%)

28092.24 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

84.26
/chainntnfs/neutrinonotify/neutrino.go
1
package neutrinonotify
2

3
import (
4
        "errors"
5
        "fmt"
6
        "strings"
7
        "sync"
8
        "sync/atomic"
9
        "time"
10

11
        "github.com/btcsuite/btcd/btcjson"
12
        "github.com/btcsuite/btcd/btcutil"
13
        "github.com/btcsuite/btcd/btcutil/gcs/builder"
14
        "github.com/btcsuite/btcd/chaincfg/chainhash"
15
        "github.com/btcsuite/btcd/rpcclient"
16
        "github.com/btcsuite/btcd/txscript"
17
        "github.com/btcsuite/btcd/wire"
18
        "github.com/lightninglabs/neutrino"
19
        "github.com/lightninglabs/neutrino/headerfs"
20
        "github.com/lightningnetwork/lnd/blockcache"
21
        "github.com/lightningnetwork/lnd/chainntnfs"
22
        "github.com/lightningnetwork/lnd/lntypes"
23
        "github.com/lightningnetwork/lnd/queue"
24
)
25

26
const (
27
        // notifierType uniquely identifies this concrete implementation of the
28
        // ChainNotifier interface.
29
        notifierType = "neutrino"
30
)
31

32
// NeutrinoNotifier is a version of ChainNotifier that's backed by the neutrino
33
// Bitcoin light client. Unlike other implementations, this implementation
34
// speaks directly to the p2p network. As a result, this implementation of the
35
// ChainNotifier interface is much more light weight that other implementation
36
// which rely of receiving notification over an RPC interface backed by a
37
// running full node.
38
//
39
// TODO(roasbeef): heavily consolidate with NeutrinoNotifier code
40
//   - maybe combine into single package?
41
type NeutrinoNotifier struct {
42
        epochClientCounter uint64 // To be used atomically.
43

44
        start   sync.Once
45
        active  int32 // To be used atomically.
46
        stopped int32 // To be used atomically.
47

48
        bestBlockMtx sync.RWMutex
49
        bestBlock    chainntnfs.BlockEpoch
50

51
        p2pNode   *neutrino.ChainService
52
        chainView *neutrino.Rescan
53

54
        chainConn *NeutrinoChainConn
55

56
        notificationCancels  chan interface{}
57
        notificationRegistry chan interface{}
58

59
        txNotifier *chainntnfs.TxNotifier
60

61
        blockEpochClients map[uint64]*blockEpochRegistration
62

63
        rescanErr <-chan error
64

65
        chainUpdates chan *filteredBlock
66

67
        txUpdates *queue.ConcurrentQueue
68

69
        // spendHintCache is a cache used to query and update the latest height
70
        // hints for an outpoint. Each height hint represents the earliest
71
        // height at which the outpoint could have been spent within the chain.
72
        spendHintCache chainntnfs.SpendHintCache
73

74
        // confirmHintCache is a cache used to query the latest height hints for
75
        // a transaction. Each height hint represents the earliest height at
76
        // which the transaction could have confirmed within the chain.
77
        confirmHintCache chainntnfs.ConfirmHintCache
78

79
        // blockCache is an LRU block cache.
80
        blockCache *blockcache.BlockCache
81

82
        wg   sync.WaitGroup
83
        quit chan struct{}
84
}
85

86
// Ensure NeutrinoNotifier implements the ChainNotifier interface at compile time.
87
var _ chainntnfs.ChainNotifier = (*NeutrinoNotifier)(nil)
88

89
// New creates a new instance of the NeutrinoNotifier concrete implementation
90
// of the ChainNotifier interface.
91
//
92
// NOTE: The passed neutrino node should already be running and active before
93
// being passed into this function.
94
func New(node *neutrino.ChainService, spendHintCache chainntnfs.SpendHintCache,
95
        confirmHintCache chainntnfs.ConfirmHintCache,
96
        blockCache *blockcache.BlockCache) *NeutrinoNotifier {
5✔
97

5✔
98
        return &NeutrinoNotifier{
5✔
99
                notificationCancels:  make(chan interface{}),
5✔
100
                notificationRegistry: make(chan interface{}),
5✔
101

5✔
102
                blockEpochClients: make(map[uint64]*blockEpochRegistration),
5✔
103

5✔
104
                p2pNode:   node,
5✔
105
                chainConn: &NeutrinoChainConn{node},
5✔
106

5✔
107
                rescanErr: make(chan error),
5✔
108

5✔
109
                chainUpdates: make(chan *filteredBlock, 100),
5✔
110

5✔
111
                txUpdates: queue.NewConcurrentQueue(10),
5✔
112

5✔
113
                spendHintCache:   spendHintCache,
5✔
114
                confirmHintCache: confirmHintCache,
5✔
115

5✔
116
                blockCache: blockCache,
5✔
117

5✔
118
                quit: make(chan struct{}),
5✔
119
        }
5✔
120
}
5✔
121

122
// Start contacts the running neutrino light client and kicks off an initial
123
// empty rescan.
124
func (n *NeutrinoNotifier) Start() error {
2✔
125
        var startErr error
2✔
126
        n.start.Do(func() {
4✔
127
                startErr = n.startNotifier()
2✔
128
        })
2✔
129
        return startErr
2✔
130
}
131

132
// Stop shuts down the NeutrinoNotifier.
133
func (n *NeutrinoNotifier) Stop() error {
4✔
134
        // Already shutting down?
4✔
135
        if atomic.AddInt32(&n.stopped, 1) != 1 {
4✔
136
                return nil
×
137
        }
×
138

139
        chainntnfs.Log.Info("neutrino notifier shutting down...")
4✔
140
        defer chainntnfs.Log.Debug("neutrino notifier shutdown complete")
4✔
141

4✔
142
        close(n.quit)
4✔
143
        n.wg.Wait()
4✔
144

4✔
145
        n.txUpdates.Stop()
4✔
146

4✔
147
        // Notify all pending clients of our shutdown by closing the related
4✔
148
        // notification channels.
4✔
149
        for _, epochClient := range n.blockEpochClients {
23✔
150
                close(epochClient.cancelChan)
19✔
151
                epochClient.wg.Wait()
19✔
152

19✔
153
                close(epochClient.epochChan)
19✔
154
        }
19✔
155
        n.txNotifier.TearDown()
4✔
156

4✔
157
        return nil
4✔
158
}
159

160
// Started returns true if this instance has been started, and false otherwise.
161
func (n *NeutrinoNotifier) Started() bool {
1✔
162
        return atomic.LoadInt32(&n.active) != 0
1✔
163
}
1✔
164

165
func (n *NeutrinoNotifier) startNotifier() error {
2✔
166
        // Start our concurrent queues before starting the rescan, to ensure
2✔
167
        // onFilteredBlockConnected and onRelavantTx callbacks won't be
2✔
168
        // blocked.
2✔
169
        n.txUpdates.Start()
2✔
170

2✔
171
        // First, we'll obtain the latest block height of the p2p node. We'll
2✔
172
        // start the auto-rescan from this point. Once a caller actually wishes
2✔
173
        // to register a chain view, the rescan state will be rewound
2✔
174
        // accordingly.
2✔
175
        startingPoint, err := n.p2pNode.BestBlock()
2✔
176
        if err != nil {
2✔
177
                n.txUpdates.Stop()
×
178
                return err
×
179
        }
×
180
        startingHeader, err := n.p2pNode.GetBlockHeader(
2✔
181
                &startingPoint.Hash,
2✔
182
        )
2✔
183
        if err != nil {
2✔
184
                n.txUpdates.Stop()
×
185
                return err
×
186
        }
×
187

188
        n.bestBlock.Hash = &startingPoint.Hash
2✔
189
        n.bestBlock.Height = startingPoint.Height
2✔
190
        n.bestBlock.BlockHeader = startingHeader
2✔
191

2✔
192
        n.txNotifier = chainntnfs.NewTxNotifier(
2✔
193
                uint32(n.bestBlock.Height), chainntnfs.ReorgSafetyLimit,
2✔
194
                n.confirmHintCache, n.spendHintCache,
2✔
195
        )
2✔
196

2✔
197
        // Next, we'll create our set of rescan options. Currently it's
2✔
198
        // required that a user MUST set an addr/outpoint/txid when creating a
2✔
199
        // rescan. To get around this, we'll add a "zero" outpoint, that won't
2✔
200
        // actually be matched.
2✔
201
        var zeroInput neutrino.InputWithScript
2✔
202
        rescanOptions := []neutrino.RescanOption{
2✔
203
                neutrino.StartBlock(startingPoint),
2✔
204
                neutrino.QuitChan(n.quit),
2✔
205
                neutrino.NotificationHandlers(
2✔
206
                        rpcclient.NotificationHandlers{
2✔
207
                                OnFilteredBlockConnected:    n.onFilteredBlockConnected,
2✔
208
                                OnFilteredBlockDisconnected: n.onFilteredBlockDisconnected,
2✔
209
                                OnRedeemingTx:               n.onRelevantTx,
2✔
210
                        },
2✔
211
                ),
2✔
212
                neutrino.WatchInputs(zeroInput),
2✔
213
        }
2✔
214

2✔
215
        // Finally, we'll create our rescan struct, start it, and launch all
2✔
216
        // the goroutines we need to operate this ChainNotifier instance.
2✔
217
        n.chainView = neutrino.NewRescan(
2✔
218
                &neutrino.RescanChainSource{
2✔
219
                        ChainService: n.p2pNode,
2✔
220
                },
2✔
221
                rescanOptions...,
2✔
222
        )
2✔
223
        n.rescanErr = n.chainView.Start()
2✔
224

2✔
225
        n.wg.Add(1)
2✔
226
        go n.notificationDispatcher()
2✔
227

2✔
228
        // Set the active flag now that we've completed the full
2✔
229
        // startup.
2✔
230
        atomic.StoreInt32(&n.active, 1)
2✔
231

2✔
232
        return nil
2✔
233
}
234

235
// filteredBlock represents a new block which has been connected to the main
236
// chain. The slice of transactions will only be populated if the block
237
// includes a transaction that confirmed one of our watched txids, or spends
238
// one of the outputs currently being watched.
239
type filteredBlock struct {
240
        header *wire.BlockHeader
241
        hash   chainhash.Hash
242
        height uint32
243
        txns   []*btcutil.Tx
244

245
        // connected is true if this update is a new block and false if it is a
246
        // disconnected block.
247
        connect bool
248
}
249

250
// rescanFilterUpdate represents a request that will be sent to the
251
// notificaionRegistry in order to prevent race conditions between the filter
252
// update and new block notifications.
253
type rescanFilterUpdate struct {
254
        updateOptions []neutrino.UpdateOption
255
        errChan       chan error
256
}
257

258
// onFilteredBlockConnected is a callback which is executed each a new block is
259
// connected to the end of the main chain.
260
func (n *NeutrinoNotifier) onFilteredBlockConnected(height int32,
261
        header *wire.BlockHeader, txns []*btcutil.Tx) {
179✔
262

179✔
263
        // Append this new chain update to the end of the queue of new chain
179✔
264
        // updates.
179✔
265
        select {
179✔
266
        case n.chainUpdates <- &filteredBlock{
267
                hash:    header.BlockHash(),
268
                height:  uint32(height),
269
                txns:    txns,
270
                header:  header,
271
                connect: true,
272
        }:
179✔
273
        case <-n.quit:
×
274
        }
275
}
276

277
// onFilteredBlockDisconnected is a callback which is executed each time a new
278
// block has been disconnected from the end of the mainchain due to a re-org.
279
func (n *NeutrinoNotifier) onFilteredBlockDisconnected(height int32,
280
        header *wire.BlockHeader) {
4✔
281

4✔
282
        // Append this new chain update to the end of the queue of new chain
4✔
283
        // disconnects.
4✔
284
        select {
4✔
285
        case n.chainUpdates <- &filteredBlock{
286
                hash:    header.BlockHash(),
287
                height:  uint32(height),
288
                connect: false,
289
        }:
4✔
290
        case <-n.quit:
×
291
        }
292
}
293

294
// relevantTx represents a relevant transaction to the notifier that fulfills
295
// any outstanding spend requests.
296
type relevantTx struct {
297
        tx      *btcutil.Tx
298
        details *btcjson.BlockDetails
299
}
300

301
// onRelevantTx is a callback that proxies relevant transaction notifications
302
// from the backend to the notifier's main event handler.
303
func (n *NeutrinoNotifier) onRelevantTx(tx *btcutil.Tx, details *btcjson.BlockDetails) {
11✔
304
        select {
11✔
305
        case n.txUpdates.ChanIn() <- &relevantTx{tx, details}:
11✔
306
        case <-n.quit:
×
307
        }
308
}
309

310
// connectFilteredBlock is called when we receive a filteredBlock from the
311
// backend. If the block is ahead of what we're expecting, we'll attempt to
312
// catch up and then process the block.
313
func (n *NeutrinoNotifier) connectFilteredBlock(update *filteredBlock) {
158✔
314
        n.bestBlockMtx.Lock()
158✔
315
        defer n.bestBlockMtx.Unlock()
158✔
316

158✔
317
        if update.height != uint32(n.bestBlock.Height+1) {
162✔
318
                chainntnfs.Log.Infof("Missed blocks, attempting to catch up")
4✔
319

4✔
320
                _, missedBlocks, err := chainntnfs.HandleMissedBlocks(
4✔
321
                        n.chainConn, n.txNotifier, n.bestBlock,
4✔
322
                        int32(update.height), false,
4✔
323
                )
4✔
324
                if err != nil {
7✔
325
                        chainntnfs.Log.Error(err)
3✔
326
                        return
3✔
327
                }
3✔
328

329
                for _, block := range missedBlocks {
11✔
330
                        filteredBlock, err := n.getFilteredBlock(block)
10✔
331
                        if err != nil {
10✔
332
                                chainntnfs.Log.Error(err)
×
333
                                return
×
334
                        }
×
335
                        err = n.handleBlockConnected(filteredBlock)
10✔
336
                        if err != nil {
10✔
337
                                chainntnfs.Log.Error(err)
×
338
                                return
×
339
                        }
×
340
                }
341
        }
342

343
        err := n.handleBlockConnected(update)
156✔
344
        if err != nil {
156✔
345
                chainntnfs.Log.Error(err)
×
346
        }
×
347
}
348

349
// disconnectFilteredBlock is called when our disconnected filtered block
350
// callback is fired. It attempts to rewind the chain to before the
351
// disconnection and updates our best block.
352
func (n *NeutrinoNotifier) disconnectFilteredBlock(update *filteredBlock) {
4✔
353
        n.bestBlockMtx.Lock()
4✔
354
        defer n.bestBlockMtx.Unlock()
4✔
355

4✔
356
        if update.height != uint32(n.bestBlock.Height) {
4✔
357
                chainntnfs.Log.Infof("Missed disconnected blocks, attempting" +
×
358
                        " to catch up")
×
359
        }
×
360
        newBestBlock, err := chainntnfs.RewindChain(n.chainConn, n.txNotifier,
4✔
361
                n.bestBlock, int32(update.height-1),
4✔
362
        )
4✔
363
        if err != nil {
4✔
364
                chainntnfs.Log.Errorf("Unable to rewind chain from height %d"+
×
365
                        "to height %d: %v", n.bestBlock.Height,
×
366
                        update.height-1, err,
×
367
                )
×
368
        }
×
369

370
        n.bestBlock = newBestBlock
4✔
371
}
372

373
// drainChainUpdates is called after updating the filter. It reads every
374
// buffered item off the chan and returns when no more are available. It is
375
// used to ensure that callers performing a historical scan properly update
376
// their EndHeight to scan blocks that did not have the filter applied at
377
// processing time. Without this, a race condition exists that could allow a
378
// spend or confirmation notification to be missed. It is unlikely this would
379
// occur in a real-world scenario, and instead would manifest itself in tests.
380
func (n *NeutrinoNotifier) drainChainUpdates() {
75✔
381
        for {
151✔
382
                select {
76✔
383
                case update := <-n.chainUpdates:
2✔
384
                        if update.connect {
4✔
385
                                n.connectFilteredBlock(update)
2✔
386
                                break
2✔
387
                        }
388
                        n.disconnectFilteredBlock(update)
×
389
                default:
75✔
390
                        return
75✔
391
                }
392
        }
393
}
394

395
// notificationDispatcher is the primary goroutine which handles client
396
// notification registrations, as well as notification dispatches.
397
func (n *NeutrinoNotifier) notificationDispatcher() {
4✔
398
        defer n.wg.Done()
4✔
399

4✔
400
        for {
306✔
401
                select {
302✔
402
                case cancelMsg := <-n.notificationCancels:
2✔
403
                        switch msg := cancelMsg.(type) {
2✔
404
                        case *epochCancel:
2✔
405
                                chainntnfs.Log.Infof("Cancelling epoch "+
2✔
406
                                        "notification, epoch_id=%v", msg.epochID)
2✔
407

2✔
408
                                // First, we'll lookup the original
2✔
409
                                // registration in order to stop the active
2✔
410
                                // queue goroutine.
2✔
411
                                reg := n.blockEpochClients[msg.epochID]
2✔
412
                                reg.epochQueue.Stop()
2✔
413

2✔
414
                                // Next, close the cancel channel for this
2✔
415
                                // specific client, and wait for the client to
2✔
416
                                // exit.
2✔
417
                                close(n.blockEpochClients[msg.epochID].cancelChan)
2✔
418
                                n.blockEpochClients[msg.epochID].wg.Wait()
2✔
419

2✔
420
                                // Once the client has exited, we can then
2✔
421
                                // safely close the channel used to send epoch
2✔
422
                                // notifications, in order to notify any
2✔
423
                                // listeners that the intent has been
2✔
424
                                // canceled.
2✔
425
                                close(n.blockEpochClients[msg.epochID].epochChan)
2✔
426
                                delete(n.blockEpochClients, msg.epochID)
2✔
427
                        }
428

429
                case registerMsg := <-n.notificationRegistry:
128✔
430
                        switch msg := registerMsg.(type) {
128✔
431
                        case *chainntnfs.HistoricalConfDispatch:
35✔
432
                                // We'll start a historical rescan chain of the
35✔
433
                                // chain asynchronously to prevent blocking
35✔
434
                                // potentially long rescans.
35✔
435
                                n.wg.Add(1)
35✔
436

35✔
437
                                //nolint:lll
35✔
438
                                go func(msg *chainntnfs.HistoricalConfDispatch) {
70✔
439
                                        defer n.wg.Done()
35✔
440

35✔
441
                                        confDetails, err := n.historicalConfDetails(
35✔
442
                                                msg.ConfRequest,
35✔
443
                                                msg.StartHeight, msg.EndHeight,
35✔
444
                                        )
35✔
445
                                        if err != nil {
35✔
446
                                                chainntnfs.Log.Error(err)
×
447
                                                return
×
448
                                        }
×
449

450
                                        // If the historical dispatch finished
451
                                        // without error, we will invoke
452
                                        // UpdateConfDetails even if none were
453
                                        // found. This allows the notifier to
454
                                        // begin safely updating the height hint
455
                                        // cache at tip, since any pending
456
                                        // rescans have now completed.
457
                                        err = n.txNotifier.UpdateConfDetails(
35✔
458
                                                msg.ConfRequest, confDetails,
35✔
459
                                        )
35✔
460
                                        if err != nil {
35✔
461
                                                chainntnfs.Log.Error(err)
×
462
                                        }
×
463
                                }(msg)
464

465
                        case *blockEpochRegistration:
20✔
466
                                chainntnfs.Log.Infof("New block epoch subscription")
20✔
467

20✔
468
                                n.blockEpochClients[msg.epochID] = msg
20✔
469

20✔
470
                                // If the client did not provide their best
20✔
471
                                // known block, then we'll immediately dispatch
20✔
472
                                // a notification for the current tip.
20✔
473
                                if msg.bestBlock == nil {
35✔
474
                                        n.notifyBlockEpochClient(
15✔
475
                                                msg, n.bestBlock.Height,
15✔
476
                                                n.bestBlock.Hash,
15✔
477
                                                n.bestBlock.BlockHeader,
15✔
478
                                        )
15✔
479

15✔
480
                                        msg.errorChan <- nil
15✔
481
                                        continue
15✔
482
                                }
483

484
                                // Otherwise, we'll attempt to deliver the
485
                                // backlog of notifications from their best
486
                                // known block.
487
                                n.bestBlockMtx.Lock()
6✔
488
                                bestHeight := n.bestBlock.Height
6✔
489
                                n.bestBlockMtx.Unlock()
6✔
490

6✔
491
                                missedBlocks, err := chainntnfs.GetClientMissedBlocks(
6✔
492
                                        n.chainConn, msg.bestBlock, bestHeight,
6✔
493
                                        false,
6✔
494
                                )
6✔
495
                                if err != nil {
6✔
496
                                        msg.errorChan <- err
×
497
                                        continue
×
498
                                }
499

500
                                for _, block := range missedBlocks {
56✔
501
                                        n.notifyBlockEpochClient(
50✔
502
                                                msg, block.Height, block.Hash,
50✔
503
                                                block.BlockHeader,
50✔
504
                                        )
50✔
505
                                }
50✔
506

507
                                msg.errorChan <- nil
6✔
508

509
                        case *rescanFilterUpdate:
75✔
510
                                err := n.chainView.Update(msg.updateOptions...)
75✔
511
                                if err != nil {
75✔
512
                                        chainntnfs.Log.Errorf("Unable to "+
×
513
                                                "update rescan filter: %v", err)
×
514
                                }
×
515

516
                                // Drain the chainUpdates chan so the caller
517
                                // listening on errChan can be sure that
518
                                // updates after receiving the error will have
519
                                // the filter applied. This allows the caller
520
                                // to update their EndHeight if they're
521
                                // performing a historical scan.
522
                                n.drainChainUpdates()
75✔
523

75✔
524
                                // After draining, send the error to the
75✔
525
                                // caller.
75✔
526
                                msg.errChan <- err
75✔
527
                        }
528

529
                case item := <-n.chainUpdates:
161✔
530
                        update := item
161✔
531
                        if update.connect {
318✔
532
                                n.connectFilteredBlock(update)
157✔
533
                                continue
157✔
534
                        }
535

536
                        n.disconnectFilteredBlock(update)
4✔
537

538
                case txUpdate := <-n.txUpdates.ChanOut():
11✔
539
                        // A new relevant transaction notification has been
11✔
540
                        // received from the backend. We'll attempt to process
11✔
541
                        // it to determine if it fulfills any outstanding
11✔
542
                        // confirmation and/or spend requests and dispatch
11✔
543
                        // notifications for them.
11✔
544
                        update := txUpdate.(*relevantTx)
11✔
545
                        err := n.txNotifier.ProcessRelevantSpendTx(
11✔
546
                                update.tx, uint32(update.details.Height),
11✔
547
                        )
11✔
548
                        if err != nil {
11✔
549
                                chainntnfs.Log.Errorf("Unable to process "+
×
550
                                        "transaction %v: %v", update.tx.Hash(),
×
551
                                        err)
×
552
                        }
×
553

UNCOV
554
                case err := <-n.rescanErr:
×
UNCOV
555
                        chainntnfs.Log.Errorf("Error during rescan: %v", err)
×
556

557
                case <-n.quit:
4✔
558
                        return
4✔
559

560
                }
561
        }
562
}
563

564
// historicalConfDetails looks up whether a confirmation request (txid/output
565
// script) has already been included in a block in the active chain and, if so,
566
// returns details about said block.
567
func (n *NeutrinoNotifier) historicalConfDetails(confRequest chainntnfs.ConfRequest,
568
        startHeight, endHeight uint32) (*chainntnfs.TxConfirmation, error) {
35✔
569

35✔
570
        // Starting from the height hint, we'll walk forwards in the chain to
35✔
571
        // see if this transaction/output script has already been confirmed.
35✔
572
        for scanHeight := endHeight; scanHeight >= startHeight && scanHeight > 0; scanHeight-- {
84✔
573
                // Ensure we haven't been requested to shut down before
49✔
574
                // processing the next height.
49✔
575
                select {
49✔
576
                case <-n.quit:
×
577
                        return nil, chainntnfs.ErrChainNotifierShuttingDown
×
578
                default:
49✔
579
                }
580

581
                // First, we'll fetch the block header for this height so we
582
                // can compute the current block hash.
583
                blockHash, err := n.p2pNode.GetBlockHash(int64(scanHeight))
49✔
584
                if err != nil {
49✔
585
                        return nil, fmt.Errorf("unable to get header for "+
×
586
                                "height=%v: %w", scanHeight, err)
×
587
                }
×
588

589
                // With the hash computed, we can now fetch the basic filter for this
590
                // height. Since the range of required items is known we avoid
591
                // roundtrips by requesting a batched response and save bandwidth by
592
                // limiting the max number of items per batch. Since neutrino populates
593
                // its underline filters cache with the batch response, the next call
594
                // will execute a network query only once per batch and not on every
595
                // iteration.
596
                regFilter, err := n.p2pNode.GetCFilter(
49✔
597
                        *blockHash, wire.GCSFilterRegular,
49✔
598
                        neutrino.NumRetries(5),
49✔
599
                        neutrino.OptimisticReverseBatch(),
49✔
600
                        neutrino.MaxBatchSize(int64(scanHeight-startHeight+1)),
49✔
601
                )
49✔
602
                if err != nil {
49✔
603
                        return nil, fmt.Errorf("unable to retrieve regular "+
×
604
                                "filter for height=%v: %w", scanHeight, err)
×
605
                }
×
606

607
                // In the case that the filter exists, we'll attempt to see if
608
                // any element in it matches our target public key script.
609
                key := builder.DeriveKey(blockHash)
49✔
610
                match, err := regFilter.Match(key, confRequest.PkScript.Script())
49✔
611
                if err != nil {
49✔
612
                        return nil, fmt.Errorf("unable to query filter: %w",
×
613
                                err)
×
614
                }
×
615

616
                // If there's no match, then we can continue forward to the
617
                // next block.
618
                if !match {
92✔
619
                        continue
43✔
620
                }
621

622
                // In the case that we do have a match, we'll fetch the block
623
                // from the network so we can find the positional data required
624
                // to send the proper response.
625
                block, err := n.GetBlock(*blockHash)
7✔
626
                if err != nil {
7✔
627
                        return nil, fmt.Errorf("unable to get block from "+
×
628
                                "network: %w", err)
×
629
                }
×
630

631
                // For every transaction in the block, check which one matches
632
                // our request. If we find one that does, we can dispatch its
633
                // confirmation details.
634
                for i, tx := range block.Transactions() {
23✔
635
                        if !confRequest.MatchesTx(tx.MsgTx()) {
28✔
636
                                continue
12✔
637
                        }
638

639
                        return &chainntnfs.TxConfirmation{
5✔
640
                                Tx:          tx.MsgTx().Copy(),
5✔
641
                                BlockHash:   blockHash,
5✔
642
                                BlockHeight: scanHeight,
5✔
643
                                TxIndex:     uint32(i),
5✔
644
                                Block:       block.MsgBlock(),
5✔
645
                        }, nil
5✔
646
                }
647
        }
648

649
        return nil, nil
31✔
650
}
651

652
// handleBlockConnected applies a chain update for a new block. Any watched
653
// transactions included this block will processed to either send notifications
654
// now or after numConfirmations confs.
655
//
656
// NOTE: This method must be called with the bestBlockMtx lock held.
657
func (n *NeutrinoNotifier) handleBlockConnected(newBlock *filteredBlock) error {
166✔
658
        // We'll extend the txNotifier's height with the information of this
166✔
659
        // new block, which will handle all of the notification logic for us.
166✔
660
        //
166✔
661
        // We actually need the _full_ block here as well in order to be able
166✔
662
        // to send the full block back up to the client. The neutrino client
166✔
663
        // itself will only dispatch a block if one of the items we're looking
166✔
664
        // for matches, so ultimately passing it the full block will still only
166✔
665
        // result in the items we care about being dispatched.
166✔
666
        rawBlock, err := n.GetBlock(newBlock.hash)
166✔
667
        if err != nil {
166✔
668
                return fmt.Errorf("unable to get full block: %w", err)
×
669
        }
×
670
        err = n.txNotifier.ConnectTip(rawBlock, newBlock.height)
166✔
671
        if err != nil {
166✔
672
                return fmt.Errorf("unable to connect tip: %w", err)
×
673
        }
×
674

675
        chainntnfs.Log.Infof("New block: height=%v, sha=%v", newBlock.height,
166✔
676
                newBlock.hash)
166✔
677

166✔
678
        // Now that we've guaranteed the new block extends the txNotifier's
166✔
679
        // current tip, we'll proceed to dispatch notifications to all of our
166✔
680
        // registered clients whom have had notifications fulfilled. Before
166✔
681
        // doing so, we'll make sure update our in memory state in order to
166✔
682
        // satisfy any client requests based upon the new block.
166✔
683
        n.bestBlock.Hash = &newBlock.hash
166✔
684
        n.bestBlock.Height = int32(newBlock.height)
166✔
685
        n.bestBlock.BlockHeader = newBlock.header
166✔
686

166✔
687
        n.notifyBlockEpochs(
166✔
688
                int32(newBlock.height), &newBlock.hash, newBlock.header,
166✔
689
        )
166✔
690
        return n.txNotifier.NotifyHeight(newBlock.height)
166✔
691
}
692

693
// getFilteredBlock is a utility to retrieve the full filtered block from a block epoch.
694
func (n *NeutrinoNotifier) getFilteredBlock(epoch chainntnfs.BlockEpoch) (*filteredBlock, error) {
10✔
695
        rawBlock, err := n.GetBlock(*epoch.Hash)
10✔
696
        if err != nil {
10✔
697
                return nil, fmt.Errorf("unable to get block: %w", err)
×
698
        }
×
699

700
        txns := rawBlock.Transactions()
10✔
701

10✔
702
        block := &filteredBlock{
10✔
703
                hash:    *epoch.Hash,
10✔
704
                height:  uint32(epoch.Height),
10✔
705
                header:  &rawBlock.MsgBlock().Header,
10✔
706
                txns:    txns,
10✔
707
                connect: true,
10✔
708
        }
10✔
709
        return block, nil
10✔
710
}
711

712
// notifyBlockEpochs notifies all registered block epoch clients of the newly
713
// connected block to the main chain.
714
func (n *NeutrinoNotifier) notifyBlockEpochs(newHeight int32, newSha *chainhash.Hash,
715
        blockHeader *wire.BlockHeader) {
166✔
716

166✔
717
        for _, client := range n.blockEpochClients {
378✔
718
                n.notifyBlockEpochClient(client, newHeight, newSha, blockHeader)
212✔
719
        }
212✔
720
}
721

722
// notifyBlockEpochClient sends a registered block epoch client a notification
723
// about a specific block.
724
func (n *NeutrinoNotifier) notifyBlockEpochClient(epochClient *blockEpochRegistration,
725
        height int32, sha *chainhash.Hash, blockHeader *wire.BlockHeader) {
276✔
726

276✔
727
        epoch := &chainntnfs.BlockEpoch{
276✔
728
                Height:      height,
276✔
729
                Hash:        sha,
276✔
730
                BlockHeader: blockHeader,
276✔
731
        }
276✔
732

276✔
733
        select {
276✔
734
        case epochClient.epochQueue.ChanIn() <- epoch:
276✔
735
        case <-epochClient.cancelChan:
×
736
        case <-n.quit:
×
737
        }
738
}
739

740
// RegisterSpendNtfn registers an intent to be notified once the target
741
// outpoint/output script has been spent by a transaction on-chain. When
742
// intending to be notified of the spend of an output script, a nil outpoint
743
// must be used. The heightHint should represent the earliest height in the
744
// chain of the transaction that spent the outpoint/output script.
745
//
746
// Once a spend of has been detected, the details of the spending event will be
747
// sent across the 'Spend' channel.
748
func (n *NeutrinoNotifier) RegisterSpendNtfn(outpoint *wire.OutPoint,
749
        pkScript []byte, heightHint uint32) (*chainntnfs.SpendEvent, error) {
27✔
750

27✔
751
        // Register the conf notification with the TxNotifier. A non-nil value
27✔
752
        // for `dispatch` will be returned if we are required to perform a
27✔
753
        // manual scan for the confirmation. Otherwise the notifier will begin
27✔
754
        // watching at tip for the transaction to confirm.
27✔
755
        ntfn, err := n.txNotifier.RegisterSpend(outpoint, pkScript, heightHint)
27✔
756
        if err != nil {
28✔
757
                return nil, err
1✔
758
        }
1✔
759

760
        // To determine whether this outpoint has been spent on-chain, we'll
761
        // update our filter to watch for the transaction at tip and we'll also
762
        // dispatch a historical rescan to determine if it has been spent in the
763
        // past.
764
        //
765
        // We'll update our filter first to ensure we can immediately detect the
766
        // spend at tip.
767
        if outpoint == nil {
40✔
768
                outpoint = &chainntnfs.ZeroOutPoint
13✔
769
        }
13✔
770
        inputToWatch := neutrino.InputWithScript{
27✔
771
                OutPoint: *outpoint,
27✔
772
                PkScript: pkScript,
27✔
773
        }
27✔
774
        updateOptions := []neutrino.UpdateOption{
27✔
775
                neutrino.AddInputs(inputToWatch),
27✔
776
                neutrino.DisableDisconnectedNtfns(true),
27✔
777
        }
27✔
778

27✔
779
        // We'll use the txNotifier's tip as the starting point of our filter
27✔
780
        // update. In the case of an output script spend request, we'll check if
27✔
781
        // we should perform a historical rescan and start from there, as we
27✔
782
        // cannot do so with GetUtxo since it matches outpoints.
27✔
783
        rewindHeight := ntfn.Height
27✔
784
        if ntfn.HistoricalDispatch != nil && *outpoint == chainntnfs.ZeroOutPoint {
28✔
785
                rewindHeight = ntfn.HistoricalDispatch.StartHeight
1✔
786
        }
1✔
787
        updateOptions = append(updateOptions, neutrino.Rewind(rewindHeight))
27✔
788

27✔
789
        errChan := make(chan error, 1)
27✔
790
        select {
27✔
791
        case n.notificationRegistry <- &rescanFilterUpdate{
792
                updateOptions: updateOptions,
793
                errChan:       errChan,
794
        }:
27✔
795
        case <-n.quit:
×
796
                return nil, chainntnfs.ErrChainNotifierShuttingDown
×
797
        }
798

799
        select {
27✔
800
        case err = <-errChan:
27✔
801
        case <-n.quit:
×
802
                return nil, chainntnfs.ErrChainNotifierShuttingDown
×
803
        }
804
        if err != nil {
27✔
805
                return nil, fmt.Errorf("unable to update filter: %w", err)
×
806
        }
×
807

808
        // If the txNotifier didn't return any details to perform a historical
809
        // scan of the chain, or if we already performed one like in the case of
810
        // output script spend requests, then we can return early as there's
811
        // nothing left for us to do.
812
        if ntfn.HistoricalDispatch == nil || *outpoint == chainntnfs.ZeroOutPoint {
53✔
813
                return ntfn.Event, nil
26✔
814
        }
26✔
815

816
        // Grab the current best height as the height may have been updated
817
        // while we were draining the chainUpdates queue.
818
        n.bestBlockMtx.RLock()
2✔
819
        currentHeight := uint32(n.bestBlock.Height)
2✔
820
        n.bestBlockMtx.RUnlock()
2✔
821

2✔
822
        ntfn.HistoricalDispatch.EndHeight = currentHeight
2✔
823

2✔
824
        // With the filter updated, we'll dispatch our historical rescan to
2✔
825
        // ensure we detect the spend if it happened in the past.
2✔
826
        n.wg.Add(1)
2✔
827
        go func() {
4✔
828
                defer n.wg.Done()
2✔
829

2✔
830
                // We'll ensure that neutrino is caught up to the starting
2✔
831
                // height before we attempt to fetch the UTXO from the chain.
2✔
832
                // If we're behind, then we may miss a notification dispatch.
2✔
833
                for {
4✔
834
                        n.bestBlockMtx.RLock()
2✔
835
                        currentHeight := uint32(n.bestBlock.Height)
2✔
836
                        n.bestBlockMtx.RUnlock()
2✔
837

2✔
838
                        if currentHeight >= ntfn.HistoricalDispatch.StartHeight {
4✔
839
                                break
2✔
840
                        }
841

842
                        select {
×
843
                        case <-time.After(time.Millisecond * 200):
×
844
                        case <-n.quit:
×
845
                                return
×
846
                        }
847
                }
848

849
                spendReport, err := n.p2pNode.GetUtxo(
2✔
850
                        neutrino.WatchInputs(inputToWatch),
2✔
851
                        neutrino.StartBlock(&headerfs.BlockStamp{
2✔
852
                                Height: int32(ntfn.HistoricalDispatch.StartHeight),
2✔
853
                        }),
2✔
854
                        neutrino.EndBlock(&headerfs.BlockStamp{
2✔
855
                                Height: int32(ntfn.HistoricalDispatch.EndHeight),
2✔
856
                        }),
2✔
857
                        neutrino.ProgressHandler(func(processedHeight uint32) {
4✔
858
                                // We persist the rescan progress to achieve incremental
2✔
859
                                // behavior across restarts, otherwise long rescans may
2✔
860
                                // start from the beginning with every restart.
2✔
861
                                err := n.spendHintCache.CommitSpendHint(
2✔
862
                                        processedHeight,
2✔
863
                                        ntfn.HistoricalDispatch.SpendRequest)
2✔
864
                                if err != nil {
2✔
865
                                        chainntnfs.Log.Errorf("Failed to update rescan "+
×
866
                                                "progress: %v", err)
×
867
                                }
×
868
                        }),
869
                        neutrino.QuitChan(n.quit),
870
                )
871
                if err != nil && !strings.Contains(err.Error(), "not found") {
3✔
872
                        chainntnfs.Log.Errorf("Failed getting UTXO: %v", err)
1✔
873
                        return
1✔
874
                }
1✔
875

876
                // If a spend report was returned, and the transaction is present, then
877
                // this means that the output is already spent.
878
                var spendDetails *chainntnfs.SpendDetail
2✔
879
                if spendReport != nil && spendReport.SpendingTx != nil {
4✔
880
                        spendingTxHash := spendReport.SpendingTx.TxHash()
2✔
881
                        spendDetails = &chainntnfs.SpendDetail{
2✔
882
                                SpentOutPoint:     outpoint,
2✔
883
                                SpenderTxHash:     &spendingTxHash,
2✔
884
                                SpendingTx:        spendReport.SpendingTx,
2✔
885
                                SpenderInputIndex: spendReport.SpendingInputIndex,
2✔
886
                                SpendingHeight:    int32(spendReport.SpendingTxHeight),
2✔
887
                        }
2✔
888
                }
2✔
889

890
                // Finally, no matter whether the rescan found a spend in the past or
891
                // not, we'll mark our historical rescan as complete to ensure the
892
                // outpoint's spend hint gets updated upon connected/disconnected
893
                // blocks.
894
                err = n.txNotifier.UpdateSpendDetails(
2✔
895
                        ntfn.HistoricalDispatch.SpendRequest, spendDetails,
2✔
896
                )
2✔
897
                if err != nil {
2✔
898
                        chainntnfs.Log.Errorf("Failed to update spend details: %v", err)
×
899
                        return
×
900
                }
×
901
        }()
902

903
        return ntfn.Event, nil
2✔
904
}
905

906
// RegisterConfirmationsNtfn registers an intent to be notified once the target
907
// txid/output script has reached numConfs confirmations on-chain. When
908
// intending to be notified of the confirmation of an output script, a nil txid
909
// must be used. The heightHint should represent the earliest height at which
910
// the txid/output script could have been included in the chain.
911
//
912
// Progress on the number of confirmations left can be read from the 'Updates'
913
// channel. Once it has reached all of its confirmations, a notification will be
914
// sent across the 'Confirmed' channel.
915
func (n *NeutrinoNotifier) RegisterConfirmationsNtfn(txid *chainhash.Hash,
916
        pkScript []byte, numConfs, heightHint uint32,
917
        opts ...chainntnfs.NotifierOption) (*chainntnfs.ConfirmationEvent, error) {
49✔
918

49✔
919
        // Register the conf notification with the TxNotifier. A non-nil value
49✔
920
        // for `dispatch` will be returned if we are required to perform a
49✔
921
        // manual scan for the confirmation. Otherwise the notifier will begin
49✔
922
        // watching at tip for the transaction to confirm.
49✔
923
        ntfn, err := n.txNotifier.RegisterConf(
49✔
924
                txid, pkScript, numConfs, heightHint, opts...,
49✔
925
        )
49✔
926
        if err != nil {
49✔
927
                return nil, err
×
928
        }
×
929

930
        // To determine whether this transaction has confirmed on-chain, we'll
931
        // update our filter to watch for the transaction at tip and we'll also
932
        // dispatch a historical rescan to determine if it has confirmed in the
933
        // past.
934
        //
935
        // We'll update our filter first to ensure we can immediately detect the
936
        // confirmation at tip. To do so, we'll map the script into an address
937
        // type so we can instruct neutrino to match if the transaction
938
        // containing the script is found in a block.
939
        params := n.p2pNode.ChainParams()
49✔
940
        _, addrs, _, err := txscript.ExtractPkScriptAddrs(pkScript, &params)
49✔
941
        if err != nil {
49✔
942
                return nil, fmt.Errorf("unable to extract script: %w", err)
×
943
        }
×
944

945
        // We'll send the filter update request to the notifier's main event
946
        // handler and wait for its response.
947
        errChan := make(chan error, 1)
49✔
948
        select {
49✔
949
        case n.notificationRegistry <- &rescanFilterUpdate{
950
                updateOptions: []neutrino.UpdateOption{
951
                        neutrino.AddAddrs(addrs...),
952
                        neutrino.Rewind(ntfn.Height),
953
                        neutrino.DisableDisconnectedNtfns(true),
954
                },
955
                errChan: errChan,
956
        }:
49✔
957
        case <-n.quit:
×
958
                return nil, chainntnfs.ErrChainNotifierShuttingDown
×
959
        }
960

961
        select {
49✔
962
        case err = <-errChan:
49✔
963
        case <-n.quit:
×
964
                return nil, chainntnfs.ErrChainNotifierShuttingDown
×
965
        }
966
        if err != nil {
49✔
967
                return nil, fmt.Errorf("unable to update filter: %w", err)
×
968
        }
×
969

970
        // If a historical rescan was not requested by the txNotifier, then we
971
        // can return to the caller.
972
        if ntfn.HistoricalDispatch == nil {
64✔
973
                return ntfn.Event, nil
15✔
974
        }
15✔
975

976
        // Grab the current best height as the height may have been updated
977
        // while we were draining the chainUpdates queue.
978
        n.bestBlockMtx.RLock()
35✔
979
        currentHeight := uint32(n.bestBlock.Height)
35✔
980
        n.bestBlockMtx.RUnlock()
35✔
981

35✔
982
        ntfn.HistoricalDispatch.EndHeight = currentHeight
35✔
983

35✔
984
        // Finally, with the filter updated, we can dispatch the historical
35✔
985
        // rescan to ensure we can detect if the event happened in the past.
35✔
986
        select {
35✔
987
        case n.notificationRegistry <- ntfn.HistoricalDispatch:
35✔
988
        case <-n.quit:
×
989
                return nil, chainntnfs.ErrChainNotifierShuttingDown
×
990
        }
991

992
        return ntfn.Event, nil
35✔
993
}
994

995
// GetBlock is used to retrieve the block with the given hash. Since the block
996
// cache used by neutrino will be the same as that used by LND (since it is
997
// passed to neutrino on initialisation), the neutrino GetBlock method can be
998
// called directly since it already uses the block cache. However, neutrino
999
// does not lock the block cache mutex for the given block hash and so that is
1000
// done here.
1001
func (n *NeutrinoNotifier) GetBlock(hash chainhash.Hash) (
1002
        *btcutil.Block, error) {
182✔
1003

182✔
1004
        n.blockCache.HashMutex.Lock(lntypes.Hash(hash))
182✔
1005
        defer n.blockCache.HashMutex.Unlock(lntypes.Hash(hash))
182✔
1006

182✔
1007
        return n.p2pNode.GetBlock(hash)
182✔
1008
}
182✔
1009

1010
// blockEpochRegistration represents a client's intent to receive a
1011
// notification with each newly connected block.
1012
type blockEpochRegistration struct {
1013
        epochID uint64
1014

1015
        epochChan chan *chainntnfs.BlockEpoch
1016

1017
        epochQueue *queue.ConcurrentQueue
1018

1019
        cancelChan chan struct{}
1020

1021
        bestBlock *chainntnfs.BlockEpoch
1022

1023
        errorChan chan error
1024

1025
        wg sync.WaitGroup
1026
}
1027

1028
// epochCancel is a message sent to the NeutrinoNotifier when a client wishes
1029
// to cancel an outstanding epoch notification that has yet to be dispatched.
1030
type epochCancel struct {
1031
        epochID uint64
1032
}
1033

1034
// RegisterBlockEpochNtfn returns a BlockEpochEvent which subscribes the
1035
// caller to receive notifications, of each new block connected to the main
1036
// chain. Clients have the option of passing in their best known block, which
1037
// the notifier uses to check if they are behind on blocks and catch them up. If
1038
// they do not provide one, then a notification will be dispatched immediately
1039
// for the current tip of the chain upon a successful registration.
1040
func (n *NeutrinoNotifier) RegisterBlockEpochNtfn(
1041
        bestBlock *chainntnfs.BlockEpoch) (*chainntnfs.BlockEpochEvent, error) {
20✔
1042

20✔
1043
        reg := &blockEpochRegistration{
20✔
1044
                epochQueue: queue.NewConcurrentQueue(20),
20✔
1045
                epochChan:  make(chan *chainntnfs.BlockEpoch, 20),
20✔
1046
                cancelChan: make(chan struct{}),
20✔
1047
                epochID:    atomic.AddUint64(&n.epochClientCounter, 1),
20✔
1048
                bestBlock:  bestBlock,
20✔
1049
                errorChan:  make(chan error, 1),
20✔
1050
        }
20✔
1051
        reg.epochQueue.Start()
20✔
1052

20✔
1053
        // Before we send the request to the main goroutine, we'll launch a new
20✔
1054
        // goroutine to proxy items added to our queue to the client itself.
20✔
1055
        // This ensures that all notifications are received *in order*.
20✔
1056
        reg.wg.Add(1)
20✔
1057
        go func() {
40✔
1058
                defer reg.wg.Done()
20✔
1059

20✔
1060
                for {
269✔
1061
                        select {
249✔
1062
                        case ntfn := <-reg.epochQueue.ChanOut():
232✔
1063
                                blockNtfn := ntfn.(*chainntnfs.BlockEpoch)
232✔
1064
                                select {
232✔
1065
                                case reg.epochChan <- blockNtfn:
230✔
1066

1067
                                case <-reg.cancelChan:
1✔
1068
                                        return
1✔
1069

1070
                                case <-n.quit:
2✔
1071
                                        return
2✔
1072
                                }
1073

1074
                        case <-reg.cancelChan:
2✔
1075
                                return
2✔
1076

1077
                        case <-n.quit:
17✔
1078
                                return
17✔
1079
                        }
1080
                }
1081
        }()
1082

1083
        select {
20✔
1084
        case <-n.quit:
×
1085
                // As we're exiting before the registration could be sent,
×
1086
                // we'll stop the queue now ourselves.
×
1087
                reg.epochQueue.Stop()
×
1088

×
1089
                return nil, errors.New("chainntnfs: system interrupt while " +
×
1090
                        "attempting to register for block epoch notification.")
×
1091
        case n.notificationRegistry <- reg:
20✔
1092
                return &chainntnfs.BlockEpochEvent{
20✔
1093
                        Epochs: reg.epochChan,
20✔
1094
                        Cancel: func() {
22✔
1095
                                cancel := &epochCancel{
2✔
1096
                                        epochID: reg.epochID,
2✔
1097
                                }
2✔
1098

2✔
1099
                                // Submit epoch cancellation to notification dispatcher.
2✔
1100
                                select {
2✔
1101
                                case n.notificationCancels <- cancel:
2✔
1102
                                        // Cancellation is being handled, drain the epoch channel until it is
2✔
1103
                                        // closed before yielding to caller.
2✔
1104
                                        for {
5✔
1105
                                                select {
3✔
1106
                                                case _, ok := <-reg.epochChan:
3✔
1107
                                                        if !ok {
5✔
1108
                                                                return
2✔
1109
                                                        }
2✔
1110
                                                case <-n.quit:
1✔
1111
                                                        return
1✔
1112
                                                }
1113
                                        }
1114
                                case <-n.quit:
1✔
1115
                                }
1116
                        },
1117
                }, nil
1118
        }
1119
}
1120

1121
// NeutrinoChainConn is a wrapper around neutrino's chain backend in order
1122
// to satisfy the chainntnfs.ChainConn interface.
1123
type NeutrinoChainConn struct {
1124
        p2pNode *neutrino.ChainService
1125
}
1126

1127
// GetBlockHeader returns the block header for a hash.
1128
func (n *NeutrinoChainConn) GetBlockHeader(blockHash *chainhash.Hash) (*wire.BlockHeader, error) {
64✔
1129
        return n.p2pNode.GetBlockHeader(blockHash)
64✔
1130
}
64✔
1131

1132
// GetBlockHeaderVerbose returns a verbose block header result for a hash. This
1133
// result only contains the height with a nil hash.
1134
func (n *NeutrinoChainConn) GetBlockHeaderVerbose(blockHash *chainhash.Hash) (
1135
        *btcjson.GetBlockHeaderVerboseResult, error) {
×
1136

×
1137
        height, err := n.p2pNode.GetBlockHeight(blockHash)
×
1138
        if err != nil {
×
1139
                return nil, err
×
1140
        }
×
1141
        // Since only the height is used from the result, leave the hash nil.
1142
        return &btcjson.GetBlockHeaderVerboseResult{Height: int32(height)}, nil
×
1143
}
1144

1145
// GetBlockHash returns the hash from a block height.
1146
func (n *NeutrinoChainConn) GetBlockHash(blockHeight int64) (*chainhash.Hash, error) {
64✔
1147
        return n.p2pNode.GetBlockHash(blockHeight)
64✔
1148
}
64✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc