• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 13180337775

06 Feb 2025 01:54PM UTC coverage: 57.708% (+8.4%) from 49.279%
13180337775

Pull #9470

github

ziggie1984
walletrpc+lncli: add new param. to bumpfee rpc.

Add new parameter deadline-delta to the bumpfee request and only
allow it to be used when the budget value is used as well.
Pull Request #9470: Make sure we fail the bump fee request as long as no budget is specified

103606 of 179536 relevant lines covered (57.71%)

24866.63 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

69.05
/graph/builder.go
1
package graph
2

3
import (
4
        "bytes"
5
        "fmt"
6
        "strings"
7
        "sync"
8
        "sync/atomic"
9
        "time"
10

11
        "github.com/btcsuite/btcd/btcec/v2"
12
        "github.com/btcsuite/btcd/btcutil"
13
        "github.com/btcsuite/btcd/chaincfg/chainhash"
14
        "github.com/btcsuite/btcd/wire"
15
        "github.com/go-errors/errors"
16
        "github.com/lightningnetwork/lnd/batch"
17
        "github.com/lightningnetwork/lnd/chainntnfs"
18
        "github.com/lightningnetwork/lnd/fn/v2"
19
        graphdb "github.com/lightningnetwork/lnd/graph/db"
20
        "github.com/lightningnetwork/lnd/graph/db/models"
21
        "github.com/lightningnetwork/lnd/input"
22
        "github.com/lightningnetwork/lnd/kvdb"
23
        "github.com/lightningnetwork/lnd/lnutils"
24
        "github.com/lightningnetwork/lnd/lnwallet"
25
        "github.com/lightningnetwork/lnd/lnwallet/btcwallet"
26
        "github.com/lightningnetwork/lnd/lnwallet/chanvalidate"
27
        "github.com/lightningnetwork/lnd/lnwire"
28
        "github.com/lightningnetwork/lnd/multimutex"
29
        "github.com/lightningnetwork/lnd/netann"
30
        "github.com/lightningnetwork/lnd/routing/chainview"
31
        "github.com/lightningnetwork/lnd/routing/route"
32
        "github.com/lightningnetwork/lnd/ticker"
33
)
34

35
const (
36
        // DefaultChannelPruneExpiry is the default duration used to determine
37
        // if a channel should be pruned or not.
38
        DefaultChannelPruneExpiry = time.Hour * 24 * 14
39

40
        // DefaultFirstTimePruneDelay is the time we'll wait after startup
41
        // before attempting to prune the graph for zombie channels. We don't
42
        // do it immediately after startup to allow lnd to start up without
43
        // getting blocked by this job.
44
        DefaultFirstTimePruneDelay = 30 * time.Second
45

46
        // defaultStatInterval governs how often the router will log non-empty
47
        // stats related to processing new channels, updates, or node
48
        // announcements.
49
        defaultStatInterval = time.Minute
50
)
51

52
var (
53
        // ErrGraphBuilderShuttingDown is returned if the graph builder is in
54
        // the process of shutting down.
55
        ErrGraphBuilderShuttingDown = fmt.Errorf("graph builder shutting down")
56
)
57

58
// Config holds the configuration required by the Builder.
59
type Config struct {
60
        // SelfNode is the public key of the node that this channel router
61
        // belongs to.
62
        SelfNode route.Vertex
63

64
        // Graph is the channel graph that the ChannelRouter will use to gather
65
        // metrics from and also to carry out path finding queries.
66
        Graph DB
67

68
        // Chain is the router's source to the most up-to-date blockchain data.
69
        // All incoming advertised channels will be checked against the chain
70
        // to ensure that the channels advertised are still open.
71
        Chain lnwallet.BlockChainIO
72

73
        // ChainView is an instance of a FilteredChainView which is used to
74
        // watch the sub-set of the UTXO set (the set of active channels) that
75
        // we need in order to properly maintain the channel graph.
76
        ChainView chainview.FilteredChainView
77

78
        // Notifier is a reference to the ChainNotifier, used to grab
79
        // the latest blocks if the router is missing any.
80
        Notifier chainntnfs.ChainNotifier
81

82
        // ChannelPruneExpiry is the duration used to determine if a channel
83
        // should be pruned or not. If the delta between now and when the
84
        // channel was last updated is greater than ChannelPruneExpiry, then
85
        // the channel is marked as a zombie channel eligible for pruning.
86
        ChannelPruneExpiry time.Duration
87

88
        // GraphPruneInterval is used as an interval to determine how often we
89
        // should examine the channel graph to garbage collect zombie channels.
90
        GraphPruneInterval time.Duration
91

92
        // FirstTimePruneDelay is the time we'll wait after startup before
93
        // attempting to prune the graph for zombie channels. We don't do it
94
        // immediately after startup to allow lnd to start up without getting
95
        // blocked by this job.
96
        FirstTimePruneDelay time.Duration
97

98
        // AssumeChannelValid toggles whether the router will check for
99
        // spentness of channel outpoints. For neutrino, this saves long rescans
100
        // from blocking initial usage of the daemon.
101
        AssumeChannelValid bool
102

103
        // StrictZombiePruning determines if we attempt to prune zombie
104
        // channels according to a stricter criteria. If true, then we'll prune
105
        // a channel if only *one* of the edges is considered a zombie.
106
        // Otherwise, we'll only prune the channel when both edges have a very
107
        // dated last update.
108
        StrictZombiePruning bool
109

110
        // IsAlias returns whether a passed ShortChannelID is an alias. This is
111
        // only used for our local channels.
112
        IsAlias func(scid lnwire.ShortChannelID) bool
113
}
114

115
// Builder builds and maintains a view of the Lightning Network graph.
116
type Builder struct {
117
        started atomic.Bool
118
        stopped atomic.Bool
119

120
        ntfnClientCounter atomic.Uint64
121
        bestHeight        atomic.Uint32
122

123
        cfg *Config
124

125
        // newBlocks is a channel in which new blocks connected to the end of
126
        // the main chain are sent over, and blocks updated after a call to
127
        // UpdateFilter.
128
        newBlocks <-chan *chainview.FilteredBlock
129

130
        // staleBlocks is a channel in which blocks disconnected from the end
131
        // of our currently known best chain are sent over.
132
        staleBlocks <-chan *chainview.FilteredBlock
133

134
        // networkUpdates is a channel that carries new topology updates
135
        // messages from outside the Builder to be processed by the
136
        // networkHandler.
137
        networkUpdates chan *routingMsg
138

139
        // topologyClients maps a client's unique notification ID to a
140
        // topologyClient client that contains its notification dispatch
141
        // channel.
142
        topologyClients *lnutils.SyncMap[uint64, *topologyClient]
143

144
        // ntfnClientUpdates is a channel that's used to send new updates to
145
        // topology notification clients to the Builder. Updates either
146
        // add a new notification client, or cancel notifications for an
147
        // existing client.
148
        ntfnClientUpdates chan *topologyClientUpdate
149

150
        // channelEdgeMtx is a mutex we use to make sure we process only one
151
        // ChannelEdgePolicy at a time for a given channelID, to ensure
152
        // consistency between the various database accesses.
153
        channelEdgeMtx *multimutex.Mutex[uint64]
154

155
        // statTicker is a resumable ticker that logs the router's progress as
156
        // it discovers channels or receives updates.
157
        statTicker ticker.Ticker
158

159
        // stats tracks newly processed channels, updates, and node
160
        // announcements over a window of defaultStatInterval.
161
        stats *routerStats
162

163
        quit chan struct{}
164
        wg   sync.WaitGroup
165
}
166

167
// A compile time check to ensure Builder implements the
168
// ChannelGraphSource interface.
169
var _ ChannelGraphSource = (*Builder)(nil)
170

171
// NewBuilder constructs a new Builder.
172
func NewBuilder(cfg *Config) (*Builder, error) {
21✔
173
        return &Builder{
21✔
174
                cfg:               cfg,
21✔
175
                networkUpdates:    make(chan *routingMsg),
21✔
176
                topologyClients:   &lnutils.SyncMap[uint64, *topologyClient]{},
21✔
177
                ntfnClientUpdates: make(chan *topologyClientUpdate),
21✔
178
                channelEdgeMtx:    multimutex.NewMutex[uint64](),
21✔
179
                statTicker:        ticker.New(defaultStatInterval),
21✔
180
                stats:             new(routerStats),
21✔
181
                quit:              make(chan struct{}),
21✔
182
        }, nil
21✔
183
}
21✔
184

185
// Start launches all the goroutines the Builder requires to carry out its
186
// duties. If the builder has already been started, then this method is a noop.
187
func (b *Builder) Start() error {
21✔
188
        if !b.started.CompareAndSwap(false, true) {
21✔
189
                return nil
×
190
        }
×
191

192
        log.Info("Builder starting")
21✔
193

21✔
194
        bestHash, bestHeight, err := b.cfg.Chain.GetBestBlock()
21✔
195
        if err != nil {
21✔
196
                return err
×
197
        }
×
198

199
        // If the graph has never been pruned, or hasn't fully been created yet,
200
        // then we don't treat this as an explicit error.
201
        if _, _, err := b.cfg.Graph.PruneTip(); err != nil {
40✔
202
                switch {
19✔
203
                case errors.Is(err, graphdb.ErrGraphNeverPruned):
19✔
204
                        fallthrough
19✔
205

206
                case errors.Is(err, graphdb.ErrGraphNotFound):
19✔
207
                        // If the graph has never been pruned, then we'll set
19✔
208
                        // the prune height to the current best height of the
19✔
209
                        // chain backend.
19✔
210
                        _, err = b.cfg.Graph.PruneGraph(
19✔
211
                                nil, bestHash, uint32(bestHeight),
19✔
212
                        )
19✔
213
                        if err != nil {
19✔
214
                                return err
×
215
                        }
×
216

217
                default:
×
218
                        return err
×
219
                }
220
        }
221

222
        // If AssumeChannelValid is present, then we won't rely on pruning
223
        // channels from the graph based on their spentness, but whether they
224
        // are considered zombies or not. We will start zombie pruning after a
225
        // small delay, to avoid slowing down startup of lnd.
226
        if b.cfg.AssumeChannelValid { //nolint:nestif
22✔
227
                time.AfterFunc(b.cfg.FirstTimePruneDelay, func() {
2✔
228
                        select {
1✔
229
                        case <-b.quit:
×
230
                                return
×
231
                        default:
1✔
232
                        }
233

234
                        log.Info("Initial zombie prune starting")
1✔
235
                        if err := b.pruneZombieChans(); err != nil {
1✔
236
                                log.Errorf("Unable to prune zombies: %v", err)
×
237
                        }
×
238
                })
239
        } else {
20✔
240
                // Otherwise, we'll use our filtered chain view to prune
20✔
241
                // channels as soon as they are detected as spent on-chain.
20✔
242
                if err := b.cfg.ChainView.Start(); err != nil {
20✔
243
                        return err
×
244
                }
×
245

246
                // Once the instance is active, we'll fetch the channel we'll
247
                // receive notifications over.
248
                b.newBlocks = b.cfg.ChainView.FilteredBlocks()
20✔
249
                b.staleBlocks = b.cfg.ChainView.DisconnectedBlocks()
20✔
250

20✔
251
                // Before we perform our manual block pruning, we'll construct
20✔
252
                // and apply a fresh chain filter to the active
20✔
253
                // FilteredChainView instance.  We do this before, as otherwise
20✔
254
                // we may miss on-chain events as the filter hasn't properly
20✔
255
                // been applied.
20✔
256
                channelView, err := b.cfg.Graph.ChannelView()
20✔
257
                if err != nil && !errors.Is(
20✔
258
                        err, graphdb.ErrGraphNoEdgesFound,
20✔
259
                ) {
20✔
260

×
261
                        return err
×
262
                }
×
263

264
                log.Infof("Filtering chain using %v channels active",
20✔
265
                        len(channelView))
20✔
266

20✔
267
                if len(channelView) != 0 {
27✔
268
                        err = b.cfg.ChainView.UpdateFilter(
7✔
269
                                channelView, uint32(bestHeight),
7✔
270
                        )
7✔
271
                        if err != nil {
7✔
272
                                return err
×
273
                        }
×
274
                }
275

276
                // The graph pruning might have taken a while and there could be
277
                // new blocks available.
278
                _, bestHeight, err = b.cfg.Chain.GetBestBlock()
20✔
279
                if err != nil {
20✔
280
                        return err
×
281
                }
×
282
                b.bestHeight.Store(uint32(bestHeight))
20✔
283

20✔
284
                // Before we begin normal operation of the router, we first need
20✔
285
                // to synchronize the channel graph to the latest state of the
20✔
286
                // UTXO set.
20✔
287
                if err := b.syncGraphWithChain(); err != nil {
20✔
288
                        return err
×
289
                }
×
290

291
                // Finally, before we proceed, we'll prune any unconnected nodes
292
                // from the graph in order to ensure we maintain a tight graph
293
                // of "useful" nodes.
294
                err = b.cfg.Graph.PruneGraphNodes()
20✔
295
                if err != nil &&
20✔
296
                        !errors.Is(err, graphdb.ErrGraphNodesNotFound) {
20✔
297

×
298
                        return err
×
299
                }
×
300
        }
301

302
        b.wg.Add(1)
21✔
303
        go b.networkHandler()
21✔
304

21✔
305
        log.Debug("Builder started")
21✔
306

21✔
307
        return nil
21✔
308
}
309

310
// Stop signals to the Builder that it should halt all routines. This method
311
// will *block* until all goroutines have excited. If the builder has already
312
// stopped then this method will return immediately.
313
func (b *Builder) Stop() error {
21✔
314
        if !b.stopped.CompareAndSwap(false, true) {
23✔
315
                return nil
2✔
316
        }
2✔
317

318
        log.Info("Builder shutting down...")
19✔
319

19✔
320
        // Our filtered chain view could've only been started if
19✔
321
        // AssumeChannelValid isn't present.
19✔
322
        if !b.cfg.AssumeChannelValid {
37✔
323
                if err := b.cfg.ChainView.Stop(); err != nil {
18✔
324
                        return err
×
325
                }
×
326
        }
327

328
        close(b.quit)
19✔
329
        b.wg.Wait()
19✔
330

19✔
331
        log.Debug("Builder shutdown complete")
19✔
332

19✔
333
        return nil
19✔
334
}
335

336
// syncGraphWithChain attempts to synchronize the current channel graph with
337
// the latest UTXO set state. This process involves pruning from the channel
338
// graph any channels which have been closed by spending their funding output
339
// since we've been down.
340
func (b *Builder) syncGraphWithChain() error {
20✔
341
        // First, we'll need to check to see if we're already in sync with the
20✔
342
        // latest state of the UTXO set.
20✔
343
        bestHash, bestHeight, err := b.cfg.Chain.GetBestBlock()
20✔
344
        if err != nil {
20✔
345
                return err
×
346
        }
×
347
        b.bestHeight.Store(uint32(bestHeight))
20✔
348

20✔
349
        pruneHash, pruneHeight, err := b.cfg.Graph.PruneTip()
20✔
350
        if err != nil {
20✔
351
                switch {
×
352
                // If the graph has never been pruned, or hasn't fully been
353
                // created yet, then we don't treat this as an explicit error.
354
                case errors.Is(err, graphdb.ErrGraphNeverPruned):
×
355
                case errors.Is(err, graphdb.ErrGraphNotFound):
×
356
                default:
×
357
                        return err
×
358
                }
359
        }
360

361
        log.Infof("Prune tip for Channel Graph: height=%v, hash=%v",
20✔
362
                pruneHeight, pruneHash)
20✔
363

20✔
364
        switch {
20✔
365
        // If the graph has never been pruned, then we can exit early as this
366
        // entails it's being created for the first time and hasn't seen any
367
        // block or created channels.
368
        case pruneHeight == 0 || pruneHash == nil:
4✔
369
                return nil
4✔
370

371
        // If the block hashes and heights match exactly, then we don't need to
372
        // prune the channel graph as we're already fully in sync.
373
        case bestHash.IsEqual(pruneHash) && uint32(bestHeight) == pruneHeight:
14✔
374
                return nil
14✔
375
        }
376

377
        // If the main chain blockhash at prune height is different from the
378
        // prune hash, this might indicate the database is on a stale branch.
379
        mainBlockHash, err := b.cfg.Chain.GetBlockHash(int64(pruneHeight))
2✔
380
        if err != nil {
2✔
381
                return err
×
382
        }
×
383

384
        // While we are on a stale branch of the chain, walk backwards to find
385
        // first common block.
386
        for !pruneHash.IsEqual(mainBlockHash) {
12✔
387
                log.Infof("channel graph is stale. Disconnecting block %v "+
10✔
388
                        "(hash=%v)", pruneHeight, pruneHash)
10✔
389
                // Prune the graph for every channel that was opened at height
10✔
390
                // >= pruneHeight.
10✔
391
                _, err := b.cfg.Graph.DisconnectBlockAtHeight(pruneHeight)
10✔
392
                if err != nil {
10✔
393
                        return err
×
394
                }
×
395

396
                pruneHash, pruneHeight, err = b.cfg.Graph.PruneTip()
10✔
397
                switch {
10✔
398
                // If at this point the graph has never been pruned, we can exit
399
                // as this entails we are back to the point where it hasn't seen
400
                // any block or created channels, alas there's nothing left to
401
                // prune.
402
                case errors.Is(err, graphdb.ErrGraphNeverPruned):
×
403
                        return nil
×
404

405
                case errors.Is(err, graphdb.ErrGraphNotFound):
×
406
                        return nil
×
407

408
                case err != nil:
×
409
                        return err
×
410

411
                default:
10✔
412
                }
413

414
                mainBlockHash, err = b.cfg.Chain.GetBlockHash(
10✔
415
                        int64(pruneHeight),
10✔
416
                )
10✔
417
                if err != nil {
10✔
418
                        return err
×
419
                }
×
420
        }
421

422
        log.Infof("Syncing channel graph from height=%v (hash=%v) to "+
2✔
423
                "height=%v (hash=%v)", pruneHeight, pruneHash, bestHeight,
2✔
424
                bestHash)
2✔
425

2✔
426
        // If we're not yet caught up, then we'll walk forward in the chain
2✔
427
        // pruning the channel graph with each new block that hasn't yet been
2✔
428
        // consumed by the channel graph.
2✔
429
        var spentOutputs []*wire.OutPoint
2✔
430
        for nextHeight := pruneHeight + 1; nextHeight <= uint32(bestHeight); nextHeight++ { //nolint:ll
26✔
431
                // Break out of the rescan early if a shutdown has been
24✔
432
                // requested, otherwise long rescans will block the daemon from
24✔
433
                // shutting down promptly.
24✔
434
                select {
24✔
435
                case <-b.quit:
×
436
                        return ErrGraphBuilderShuttingDown
×
437
                default:
24✔
438
                }
439

440
                // Using the next height, request a manual block pruning from
441
                // the chainview for the particular block hash.
442
                log.Infof("Filtering block for closed channels, at height: %v",
24✔
443
                        int64(nextHeight))
24✔
444
                nextHash, err := b.cfg.Chain.GetBlockHash(int64(nextHeight))
24✔
445
                if err != nil {
24✔
446
                        return err
×
447
                }
×
448
                log.Tracef("Running block filter on block with hash: %v",
24✔
449
                        nextHash)
24✔
450
                filterBlock, err := b.cfg.ChainView.FilterBlock(nextHash)
24✔
451
                if err != nil {
24✔
452
                        return err
×
453
                }
×
454

455
                // We're only interested in all prior outputs that have been
456
                // spent in the block, so collate all the referenced previous
457
                // outpoints within each tx and input.
458
                for _, tx := range filterBlock.Transactions {
25✔
459
                        for _, txIn := range tx.TxIn {
2✔
460
                                spentOutputs = append(spentOutputs,
1✔
461
                                        &txIn.PreviousOutPoint)
1✔
462
                        }
1✔
463
                }
464
        }
465

466
        // With the spent outputs gathered, attempt to prune the channel graph,
467
        // also passing in the best hash+height so the prune tip can be updated.
468
        closedChans, err := b.cfg.Graph.PruneGraph(
2✔
469
                spentOutputs, bestHash, uint32(bestHeight),
2✔
470
        )
2✔
471
        if err != nil {
2✔
472
                return err
×
473
        }
×
474

475
        log.Infof("Graph pruning complete: %v channels were closed since "+
2✔
476
                "height %v", len(closedChans), pruneHeight)
2✔
477

2✔
478
        return nil
2✔
479
}
480

481
// isZombieChannel takes two edge policy updates and determines if the
482
// corresponding channel should be considered a zombie. The first boolean is
483
// true if the policy update from node 1 is considered a zombie, the second
484
// boolean is that of node 2, and the final boolean is true if the channel
485
// is considered a zombie.
486
func (b *Builder) isZombieChannel(e1,
487
        e2 *models.ChannelEdgePolicy) (bool, bool, bool) {
6✔
488

6✔
489
        chanExpiry := b.cfg.ChannelPruneExpiry
6✔
490

6✔
491
        e1Zombie := e1 == nil || time.Since(e1.LastUpdate) >= chanExpiry
6✔
492
        e2Zombie := e2 == nil || time.Since(e2.LastUpdate) >= chanExpiry
6✔
493

6✔
494
        var e1Time, e2Time time.Time
6✔
495
        if e1 != nil {
10✔
496
                e1Time = e1.LastUpdate
4✔
497
        }
4✔
498
        if e2 != nil {
12✔
499
                e2Time = e2.LastUpdate
6✔
500
        }
6✔
501

502
        return e1Zombie, e2Zombie, b.IsZombieChannel(e1Time, e2Time)
6✔
503
}
504

505
// IsZombieChannel takes the timestamps of the latest channel updates for a
506
// channel and returns true if the channel should be considered a zombie based
507
// on these timestamps.
508
func (b *Builder) IsZombieChannel(updateTime1,
509
        updateTime2 time.Time) bool {
6✔
510

6✔
511
        chanExpiry := b.cfg.ChannelPruneExpiry
6✔
512

6✔
513
        e1Zombie := updateTime1.IsZero() ||
6✔
514
                time.Since(updateTime1) >= chanExpiry
6✔
515

6✔
516
        e2Zombie := updateTime2.IsZero() ||
6✔
517
                time.Since(updateTime2) >= chanExpiry
6✔
518

6✔
519
        // If we're using strict zombie pruning, then a channel is only
6✔
520
        // considered live if both edges have a recent update we know of.
6✔
521
        if b.cfg.StrictZombiePruning {
9✔
522
                return e1Zombie || e2Zombie
3✔
523
        }
3✔
524

525
        // Otherwise, if we're using the less strict variant, then a channel is
526
        // considered live if either of the edges have a recent update.
527
        return e1Zombie && e2Zombie
3✔
528
}
529

530
// pruneZombieChans is a method that will be called periodically to prune out
531
// any "zombie" channels. We consider channels zombies if *both* edges haven't
532
// been updated since our zombie horizon. If AssumeChannelValid is present,
533
// we'll also consider channels zombies if *both* edges are disabled. This
534
// usually signals that a channel has been closed on-chain. We do this
535
// periodically to keep a healthy, lively routing table.
536
func (b *Builder) pruneZombieChans() error {
5✔
537
        chansToPrune := make(map[uint64]struct{})
5✔
538
        chanExpiry := b.cfg.ChannelPruneExpiry
5✔
539

5✔
540
        log.Infof("Examining channel graph for zombie channels")
5✔
541

5✔
542
        // A helper method to detect if the channel belongs to this node
5✔
543
        isSelfChannelEdge := func(info *models.ChannelEdgeInfo) bool {
16✔
544
                return info.NodeKey1Bytes == b.cfg.SelfNode ||
11✔
545
                        info.NodeKey2Bytes == b.cfg.SelfNode
11✔
546
        }
11✔
547

548
        // First, we'll collect all the channels which are eligible for garbage
549
        // collection due to being zombies.
550
        filterPruneChans := func(info *models.ChannelEdgeInfo,
5✔
551
                e1, e2 *models.ChannelEdgePolicy) error {
13✔
552

8✔
553
                // Exit early in case this channel is already marked to be
8✔
554
                // pruned
8✔
555
                _, markedToPrune := chansToPrune[info.ChannelID]
8✔
556
                if markedToPrune {
8✔
557
                        return nil
×
558
                }
×
559

560
                // We'll ensure that we don't attempt to prune our *own*
561
                // channels from the graph, as in any case this should be
562
                // re-advertised by the sub-system above us.
563
                if isSelfChannelEdge(info) {
10✔
564
                        return nil
2✔
565
                }
2✔
566

567
                e1Zombie, e2Zombie, isZombieChan := b.isZombieChannel(e1, e2)
6✔
568

6✔
569
                if e1Zombie {
10✔
570
                        log.Tracef("Node1 pubkey=%x of chan_id=%v is zombie",
4✔
571
                                info.NodeKey1Bytes, info.ChannelID)
4✔
572
                }
4✔
573

574
                if e2Zombie {
12✔
575
                        log.Tracef("Node2 pubkey=%x of chan_id=%v is zombie",
6✔
576
                                info.NodeKey2Bytes, info.ChannelID)
6✔
577
                }
6✔
578

579
                // If either edge hasn't been updated for a period of
580
                // chanExpiry, then we'll mark the channel itself as eligible
581
                // for graph pruning.
582
                if !isZombieChan {
7✔
583
                        return nil
1✔
584
                }
1✔
585

586
                log.Debugf("ChannelID(%v) is a zombie, collecting to prune",
5✔
587
                        info.ChannelID)
5✔
588

5✔
589
                // TODO(roasbeef): add ability to delete single directional edge
5✔
590
                chansToPrune[info.ChannelID] = struct{}{}
5✔
591

5✔
592
                return nil
5✔
593
        }
594

595
        // If AssumeChannelValid is present we'll look at the disabled bit for
596
        // both edges. If they're both disabled, then we can interpret this as
597
        // the channel being closed and can prune it from our graph.
598
        if b.cfg.AssumeChannelValid {
7✔
599
                disabledChanIDs, err := b.cfg.Graph.DisabledChannelIDs()
2✔
600
                if err != nil {
2✔
601
                        return fmt.Errorf("unable to get disabled channels "+
×
602
                                "ids chans: %v", err)
×
603
                }
×
604

605
                disabledEdges, err := b.cfg.Graph.FetchChanInfos(
2✔
606
                        disabledChanIDs,
2✔
607
                )
2✔
608
                if err != nil {
2✔
609
                        return fmt.Errorf("unable to fetch disabled channels "+
×
610
                                "edges chans: %v", err)
×
611
                }
×
612

613
                // Ensuring we won't prune our own channel from the graph.
614
                for _, disabledEdge := range disabledEdges {
5✔
615
                        if !isSelfChannelEdge(disabledEdge.Info) {
4✔
616
                                chansToPrune[disabledEdge.Info.ChannelID] =
1✔
617
                                        struct{}{}
1✔
618
                        }
1✔
619
                }
620
        }
621

622
        startTime := time.Unix(0, 0)
5✔
623
        endTime := time.Now().Add(-1 * chanExpiry)
5✔
624
        oldEdges, err := b.cfg.Graph.ChanUpdatesInHorizon(startTime, endTime)
5✔
625
        if err != nil {
5✔
626
                return fmt.Errorf("unable to fetch expired channel updates "+
×
627
                        "chans: %v", err)
×
628
        }
×
629

630
        for _, u := range oldEdges {
13✔
631
                err = filterPruneChans(u.Info, u.Policy1, u.Policy2)
8✔
632
                if err != nil {
8✔
633
                        return fmt.Errorf("error filtering channels to "+
×
634
                                "prune: %w", err)
×
635
                }
×
636
        }
637

638
        log.Infof("Pruning %v zombie channels", len(chansToPrune))
5✔
639
        if len(chansToPrune) == 0 {
7✔
640
                return nil
2✔
641
        }
2✔
642

643
        // With the set of zombie-like channels obtained, we'll do another pass
644
        // to delete them from the channel graph.
645
        toPrune := make([]uint64, 0, len(chansToPrune))
3✔
646
        for chanID := range chansToPrune {
9✔
647
                toPrune = append(toPrune, chanID)
6✔
648
                log.Tracef("Pruning zombie channel with ChannelID(%v)", chanID)
6✔
649
        }
6✔
650
        err = b.cfg.Graph.DeleteChannelEdges(
3✔
651
                b.cfg.StrictZombiePruning, true, toPrune...,
3✔
652
        )
3✔
653
        if err != nil {
3✔
654
                return fmt.Errorf("unable to delete zombie channels: %w", err)
×
655
        }
×
656

657
        // With the channels pruned, we'll also attempt to prune any nodes that
658
        // were a part of them.
659
        err = b.cfg.Graph.PruneGraphNodes()
3✔
660
        if err != nil && !errors.Is(err, graphdb.ErrGraphNodesNotFound) {
3✔
661
                return fmt.Errorf("unable to prune graph nodes: %w", err)
×
662
        }
×
663

664
        return nil
3✔
665
}
666

667
// handleNetworkUpdate is responsible for processing the update message and
668
// notifies topology changes, if any.
669
//
670
// NOTE: must be run inside goroutine.
671
func (b *Builder) handleNetworkUpdate(update *routingMsg) {
30✔
672
        defer b.wg.Done()
30✔
673

30✔
674
        // Process the routing update to determine if this is either a new
30✔
675
        // update from our PoV or an update to a prior vertex/edge we
30✔
676
        // previously accepted.
30✔
677
        err := b.processUpdate(update.msg, update.op...)
30✔
678
        update.err <- err
30✔
679

30✔
680
        // If the error is not nil here, there's no need to send topology
30✔
681
        // change.
30✔
682
        if err != nil {
35✔
683
                // Log as a debug message if this is not an error we need to be
5✔
684
                // concerned about.
5✔
685
                if IsError(err, ErrIgnored, ErrOutdated) {
7✔
686
                        log.Debugf("process network updates got: %v", err)
2✔
687
                } else {
5✔
688
                        log.Errorf("process network updates got: %v", err)
3✔
689
                }
3✔
690

691
                return
5✔
692
        }
693

694
        // Otherwise, we'll send off a new notification for the newly accepted
695
        // update, if any.
696
        topChange := &TopologyChange{}
25✔
697
        err = addToTopologyChange(b.cfg.Graph, topChange, update.msg)
25✔
698
        if err != nil {
25✔
699
                log.Errorf("unable to update topology change notification: %v",
×
700
                        err)
×
701
                return
×
702
        }
×
703

704
        if !topChange.isEmpty() {
36✔
705
                b.notifyTopologyChange(topChange)
11✔
706
        }
11✔
707
}
708

709
// networkHandler is the primary goroutine for the Builder. The roles of
710
// this goroutine include answering queries related to the state of the
711
// network, pruning the graph on new block notification, applying network
712
// updates, and registering new topology clients.
713
//
714
// NOTE: This MUST be run as a goroutine.
715
func (b *Builder) networkHandler() {
21✔
716
        defer b.wg.Done()
21✔
717

21✔
718
        graphPruneTicker := time.NewTicker(b.cfg.GraphPruneInterval)
21✔
719
        defer graphPruneTicker.Stop()
21✔
720

21✔
721
        defer b.statTicker.Stop()
21✔
722

21✔
723
        b.stats.Reset()
21✔
724

21✔
725
        for {
151✔
726
                // If there are stats, resume the statTicker.
130✔
727
                if !b.stats.Empty() {
173✔
728
                        b.statTicker.Resume()
43✔
729
                }
43✔
730

731
                select {
130✔
732
                // A new fully validated network update has just arrived. As a
733
                // result we'll modify the channel graph accordingly depending
734
                // on the exact type of the message.
735
                case update := <-b.networkUpdates:
30✔
736
                        b.wg.Add(1)
30✔
737
                        go b.handleNetworkUpdate(update)
30✔
738

739
                        // TODO(roasbeef): remove all unconnected vertexes
740
                        // after N blocks pass with no corresponding
741
                        // announcements.
742

743
                case chainUpdate, ok := <-b.staleBlocks:
10✔
744
                        // If the channel has been closed, then this indicates
10✔
745
                        // the daemon is shutting down, so we exit ourselves.
10✔
746
                        if !ok {
10✔
747
                                return
×
748
                        }
×
749

750
                        // Since this block is stale, we update our best height
751
                        // to the previous block.
752
                        blockHeight := chainUpdate.Height
10✔
753
                        b.bestHeight.Store(blockHeight - 1)
10✔
754

10✔
755
                        // Update the channel graph to reflect that this block
10✔
756
                        // was disconnected.
10✔
757
                        _, err := b.cfg.Graph.DisconnectBlockAtHeight(
10✔
758
                                blockHeight,
10✔
759
                        )
10✔
760
                        if err != nil {
10✔
761
                                log.Errorf("unable to prune graph with stale "+
×
762
                                        "block: %v", err)
×
763
                                continue
×
764
                        }
765

766
                        // TODO(halseth): notify client about the reorg?
767

768
                // A new block has arrived, so we can prune the channel graph
769
                // of any channels which were closed in the block.
770
                case chainUpdate, ok := <-b.newBlocks:
64✔
771
                        // If the channel has been closed, then this indicates
64✔
772
                        // the daemon is shutting down, so we exit ourselves.
64✔
773
                        if !ok {
64✔
774
                                return
×
775
                        }
×
776

777
                        // We'll ensure that any new blocks received attach
778
                        // directly to the end of our main chain. If not, then
779
                        // we've somehow missed some blocks. Here we'll catch
780
                        // up the chain with the latest blocks.
781
                        currentHeight := b.bestHeight.Load()
64✔
782
                        switch {
64✔
783
                        case chainUpdate.Height == currentHeight+1:
58✔
784
                                err := b.updateGraphWithClosedChannels(
58✔
785
                                        chainUpdate,
58✔
786
                                )
58✔
787
                                if err != nil {
58✔
788
                                        log.Errorf("unable to prune graph "+
×
789
                                                "with closed channels: %v", err)
×
790
                                }
×
791

792
                        case chainUpdate.Height > currentHeight+1:
1✔
793
                                log.Errorf("out of order block: expecting "+
1✔
794
                                        "height=%v, got height=%v",
1✔
795
                                        currentHeight+1, chainUpdate.Height)
1✔
796

1✔
797
                                err := b.getMissingBlocks(
1✔
798
                                        currentHeight, chainUpdate,
1✔
799
                                )
1✔
800
                                if err != nil {
1✔
801
                                        log.Errorf("unable to retrieve missing"+
×
802
                                                "blocks: %v", err)
×
803
                                }
×
804

805
                        case chainUpdate.Height < currentHeight+1:
5✔
806
                                log.Errorf("out of order block: expecting "+
5✔
807
                                        "height=%v, got height=%v",
5✔
808
                                        currentHeight+1, chainUpdate.Height)
5✔
809

5✔
810
                                log.Infof("Skipping channel pruning since "+
5✔
811
                                        "received block height %v was already"+
5✔
812
                                        " processed.", chainUpdate.Height)
5✔
813
                        }
814

815
                // A new notification client update has arrived. We're either
816
                // gaining a new client, or cancelling notifications for an
817
                // existing client.
818
                case ntfnUpdate := <-b.ntfnClientUpdates:
5✔
819
                        clientID := ntfnUpdate.clientID
5✔
820

5✔
821
                        if ntfnUpdate.cancel {
6✔
822
                                client, ok := b.topologyClients.LoadAndDelete(
1✔
823
                                        clientID,
1✔
824
                                )
1✔
825
                                if ok {
2✔
826
                                        close(client.exit)
1✔
827
                                        client.wg.Wait()
1✔
828

1✔
829
                                        close(client.ntfnChan)
1✔
830
                                }
1✔
831

832
                                continue
1✔
833
                        }
834

835
                        b.topologyClients.Store(clientID, &topologyClient{
4✔
836
                                ntfnChan: ntfnUpdate.ntfnChan,
4✔
837
                                exit:     make(chan struct{}),
4✔
838
                        })
4✔
839

840
                // The graph prune ticker has ticked, so we'll examine the
841
                // state of the known graph to filter out any zombie channels
842
                // for pruning.
843
                case <-graphPruneTicker.C:
×
844
                        if err := b.pruneZombieChans(); err != nil {
×
845
                                log.Errorf("Unable to prune zombies: %v", err)
×
846
                        }
×
847

848
                // Log any stats if we've processed a non-empty number of
849
                // channels, updates, or nodes. We'll only pause the ticker if
850
                // the last window contained no updates to avoid resuming and
851
                // pausing while consecutive windows contain new info.
852
                case <-b.statTicker.Ticks():
×
853
                        if !b.stats.Empty() {
×
854
                                log.Infof(b.stats.String())
×
855
                        } else {
×
856
                                b.statTicker.Pause()
×
857
                        }
×
858
                        b.stats.Reset()
×
859

860
                // The router has been signalled to exit, to we exit our main
861
                // loop so the wait group can be decremented.
862
                case <-b.quit:
19✔
863
                        return
19✔
864
                }
865
        }
866
}
867

868
// getMissingBlocks walks through all missing blocks and updates the graph
869
// closed channels accordingly.
870
func (b *Builder) getMissingBlocks(currentHeight uint32,
871
        chainUpdate *chainview.FilteredBlock) error {
1✔
872

1✔
873
        outdatedHash, err := b.cfg.Chain.GetBlockHash(int64(currentHeight))
1✔
874
        if err != nil {
1✔
875
                return err
×
876
        }
×
877

878
        outdatedBlock := &chainntnfs.BlockEpoch{
1✔
879
                Height: int32(currentHeight),
1✔
880
                Hash:   outdatedHash,
1✔
881
        }
1✔
882

1✔
883
        epochClient, err := b.cfg.Notifier.RegisterBlockEpochNtfn(
1✔
884
                outdatedBlock,
1✔
885
        )
1✔
886
        if err != nil {
1✔
887
                return err
×
888
        }
×
889
        defer epochClient.Cancel()
1✔
890

1✔
891
        blockDifference := int(chainUpdate.Height - currentHeight)
1✔
892

1✔
893
        // We'll walk through all the outdated blocks and make sure we're able
1✔
894
        // to update the graph with any closed channels from them.
1✔
895
        for i := 0; i < blockDifference; i++ {
6✔
896
                var (
5✔
897
                        missingBlock *chainntnfs.BlockEpoch
5✔
898
                        ok           bool
5✔
899
                )
5✔
900

5✔
901
                select {
5✔
902
                case missingBlock, ok = <-epochClient.Epochs:
5✔
903
                        if !ok {
5✔
904
                                return nil
×
905
                        }
×
906

907
                case <-b.quit:
×
908
                        return nil
×
909
                }
910

911
                filteredBlock, err := b.cfg.ChainView.FilterBlock(
5✔
912
                        missingBlock.Hash,
5✔
913
                )
5✔
914
                if err != nil {
5✔
915
                        return err
×
916
                }
×
917

918
                err = b.updateGraphWithClosedChannels(
5✔
919
                        filteredBlock,
5✔
920
                )
5✔
921
                if err != nil {
5✔
922
                        return err
×
923
                }
×
924
        }
925

926
        return nil
1✔
927
}
928

929
// updateGraphWithClosedChannels prunes the channel graph of closed channels
930
// that are no longer needed.
931
func (b *Builder) updateGraphWithClosedChannels(
932
        chainUpdate *chainview.FilteredBlock) error {
63✔
933

63✔
934
        // Once a new block arrives, we update our running track of the height
63✔
935
        // of the chain tip.
63✔
936
        blockHeight := chainUpdate.Height
63✔
937

63✔
938
        b.bestHeight.Store(blockHeight)
63✔
939
        log.Infof("Pruning channel graph using block %v (height=%v)",
63✔
940
                chainUpdate.Hash, blockHeight)
63✔
941

63✔
942
        // We're only interested in all prior outputs that have been spent in
63✔
943
        // the block, so collate all the referenced previous outpoints within
63✔
944
        // each tx and input.
63✔
945
        var spentOutputs []*wire.OutPoint
63✔
946
        for _, tx := range chainUpdate.Transactions {
64✔
947
                for _, txIn := range tx.TxIn {
2✔
948
                        spentOutputs = append(spentOutputs,
1✔
949
                                &txIn.PreviousOutPoint)
1✔
950
                }
1✔
951
        }
952

953
        // With the spent outputs gathered, attempt to prune the channel graph,
954
        // also passing in the hash+height of the block being pruned so the
955
        // prune tip can be updated.
956
        chansClosed, err := b.cfg.Graph.PruneGraph(spentOutputs,
63✔
957
                &chainUpdate.Hash, chainUpdate.Height)
63✔
958
        if err != nil {
63✔
959
                log.Errorf("unable to prune routing table: %v", err)
×
960
                return err
×
961
        }
×
962

963
        log.Infof("Block %v (height=%v) closed %v channels", chainUpdate.Hash,
63✔
964
                blockHeight, len(chansClosed))
63✔
965

63✔
966
        if len(chansClosed) == 0 {
125✔
967
                return err
62✔
968
        }
62✔
969

970
        // Notify all currently registered clients of the newly closed channels.
971
        closeSummaries := createCloseSummaries(blockHeight, chansClosed...)
1✔
972
        b.notifyTopologyChange(&TopologyChange{
1✔
973
                ClosedChannels: closeSummaries,
1✔
974
        })
1✔
975

1✔
976
        return nil
1✔
977
}
978

979
// assertNodeAnnFreshness returns a non-nil error if we have an announcement in
980
// the database for the passed node with a timestamp newer than the passed
981
// timestamp. ErrIgnored will be returned if we already have the node, and
982
// ErrOutdated will be returned if we have a timestamp that's after the new
983
// timestamp.
984
func (b *Builder) assertNodeAnnFreshness(node route.Vertex,
985
        msgTimestamp time.Time) error {
10✔
986

10✔
987
        // If we are not already aware of this node, it means that we don't
10✔
988
        // know about any channel using this node. To avoid a DoS attack by
10✔
989
        // node announcements, we will ignore such nodes. If we do know about
10✔
990
        // this node, check that this update brings info newer than what we
10✔
991
        // already have.
10✔
992
        lastUpdate, exists, err := b.cfg.Graph.HasLightningNode(node)
10✔
993
        if err != nil {
10✔
994
                return errors.Errorf("unable to query for the "+
×
995
                        "existence of node: %v", err)
×
996
        }
×
997
        if !exists {
11✔
998
                return NewErrf(ErrIgnored, "Ignoring node announcement"+
1✔
999
                        " for node not found in channel graph (%x)",
1✔
1000
                        node[:])
1✔
1001
        }
1✔
1002

1003
        // If we've reached this point then we're aware of the vertex being
1004
        // advertised. So we now check if the new message has a new time stamp,
1005
        // if not then we won't accept the new data as it would override newer
1006
        // data.
1007
        if !lastUpdate.Before(msgTimestamp) {
10✔
1008
                return NewErrf(ErrOutdated, "Ignoring outdated "+
1✔
1009
                        "announcement for %x", node[:])
1✔
1010
        }
1✔
1011

1012
        return nil
8✔
1013
}
1014

1015
// addZombieEdge adds a channel that failed complete validation into the zombie
1016
// index so we can avoid having to re-validate it in the future.
1017
func (b *Builder) addZombieEdge(chanID uint64) error {
3✔
1018
        // If the edge fails validation we'll mark the edge itself as a zombie
3✔
1019
        // so we don't continue to request it. We use the "zero key" for both
3✔
1020
        // node pubkeys so this edge can't be resurrected.
3✔
1021
        var zeroKey [33]byte
3✔
1022
        err := b.cfg.Graph.MarkEdgeZombie(chanID, zeroKey, zeroKey)
3✔
1023
        if err != nil {
3✔
1024
                return fmt.Errorf("unable to mark spent chan(id=%v) as a "+
×
1025
                        "zombie: %w", chanID, err)
×
1026
        }
×
1027

1028
        return nil
3✔
1029
}
1030

1031
// makeFundingScript is used to make the funding script for both segwit v0 and
1032
// segwit v1 (taproot) channels.
1033
//
1034
// TODO(roasbeef: export and use elsewhere?
1035
func makeFundingScript(bitcoinKey1, bitcoinKey2 []byte, chanFeatures []byte,
1036
        tapscriptRoot fn.Option[chainhash.Hash]) ([]byte, error) {
16✔
1037

16✔
1038
        legacyFundingScript := func() ([]byte, error) {
32✔
1039
                witnessScript, err := input.GenMultiSigScript(
16✔
1040
                        bitcoinKey1, bitcoinKey2,
16✔
1041
                )
16✔
1042
                if err != nil {
16✔
1043
                        return nil, err
×
1044
                }
×
1045
                pkScript, err := input.WitnessScriptHash(witnessScript)
16✔
1046
                if err != nil {
16✔
1047
                        return nil, err
×
1048
                }
×
1049

1050
                return pkScript, nil
16✔
1051
        }
1052

1053
        if len(chanFeatures) == 0 {
32✔
1054
                return legacyFundingScript()
16✔
1055
        }
16✔
1056

1057
        // In order to make the correct funding script, we'll need to parse the
1058
        // chanFeatures bytes into a feature vector we can interact with.
1059
        rawFeatures := lnwire.NewRawFeatureVector()
×
1060
        err := rawFeatures.Decode(bytes.NewReader(chanFeatures))
×
1061
        if err != nil {
×
1062
                return nil, fmt.Errorf("unable to parse chan feature "+
×
1063
                        "bits: %w", err)
×
1064
        }
×
1065

1066
        chanFeatureBits := lnwire.NewFeatureVector(
×
1067
                rawFeatures, lnwire.Features,
×
1068
        )
×
1069
        if chanFeatureBits.HasFeature(
×
1070
                lnwire.SimpleTaprootChannelsOptionalStaging,
×
1071
        ) {
×
1072

×
1073
                pubKey1, err := btcec.ParsePubKey(bitcoinKey1)
×
1074
                if err != nil {
×
1075
                        return nil, err
×
1076
                }
×
1077
                pubKey2, err := btcec.ParsePubKey(bitcoinKey2)
×
1078
                if err != nil {
×
1079
                        return nil, err
×
1080
                }
×
1081

1082
                fundingScript, _, err := input.GenTaprootFundingScript(
×
1083
                        pubKey1, pubKey2, 0, tapscriptRoot,
×
1084
                )
×
1085
                if err != nil {
×
1086
                        return nil, err
×
1087
                }
×
1088

1089
                // TODO(roasbeef): add tapscript root to gossip v1.5
1090

1091
                return fundingScript, nil
×
1092
        }
1093

1094
        return legacyFundingScript()
×
1095
}
1096

1097
// processUpdate processes a new relate authenticated channel/edge, node or
1098
// channel/edge update network update. If the update didn't affect the internal
1099
// state of the draft due to either being out of date, invalid, or redundant,
1100
// then error is returned.
1101
//
1102
//nolint:funlen
1103
func (b *Builder) processUpdate(msg interface{},
1104
        op ...batch.SchedulerOption) error {
30✔
1105

30✔
1106
        switch msg := msg.(type) {
30✔
1107
        case *models.LightningNode:
7✔
1108
                // Before we add the node to the database, we'll check to see
7✔
1109
                // if the announcement is "fresh" or not. If it isn't, then
7✔
1110
                // we'll return an error.
7✔
1111
                err := b.assertNodeAnnFreshness(msg.PubKeyBytes, msg.LastUpdate)
7✔
1112
                if err != nil {
8✔
1113
                        return err
1✔
1114
                }
1✔
1115

1116
                if err := b.cfg.Graph.AddLightningNode(msg, op...); err != nil {
6✔
1117
                        return errors.Errorf("unable to add node %x to the "+
×
1118
                                "graph: %v", msg.PubKeyBytes, err)
×
1119
                }
×
1120

1121
                log.Tracef("Updated vertex data for node=%x", msg.PubKeyBytes)
6✔
1122
                b.stats.incNumNodeUpdates()
6✔
1123

1124
        case *models.ChannelEdgeInfo:
17✔
1125
                log.Debugf("Received ChannelEdgeInfo for channel %v",
17✔
1126
                        msg.ChannelID)
17✔
1127

17✔
1128
                // Prior to processing the announcement we first check if we
17✔
1129
                // already know of this channel, if so, then we can exit early.
17✔
1130
                _, _, exists, isZombie, err := b.cfg.Graph.HasChannelEdge(
17✔
1131
                        msg.ChannelID,
17✔
1132
                )
17✔
1133
                if err != nil &&
17✔
1134
                        !errors.Is(err, graphdb.ErrGraphNoEdgesFound) {
17✔
1135

×
1136
                        return errors.Errorf("unable to check for edge "+
×
1137
                                "existence: %v", err)
×
1138
                }
×
1139
                if isZombie {
17✔
1140
                        return NewErrf(ErrIgnored, "ignoring msg for zombie "+
×
1141
                                "chan_id=%v", msg.ChannelID)
×
1142
                }
×
1143
                if exists {
17✔
1144
                        return NewErrf(ErrIgnored, "ignoring msg for known "+
×
1145
                                "chan_id=%v", msg.ChannelID)
×
1146
                }
×
1147

1148
                // If AssumeChannelValid is present, then we are unable to
1149
                // perform any of the expensive checks below, so we'll
1150
                // short-circuit our path straight to adding the edge to our
1151
                // graph. If the passed ShortChannelID is an alias, then we'll
1152
                // skip validation as it will not map to a legitimate tx. This
1153
                // is not a DoS vector as only we can add an alias
1154
                // ChannelAnnouncement from the gossiper.
1155
                scid := lnwire.NewShortChanIDFromInt(msg.ChannelID)
17✔
1156
                if b.cfg.AssumeChannelValid || b.cfg.IsAlias(scid) {
17✔
1157
                        err := b.cfg.Graph.AddChannelEdge(msg, op...)
×
1158
                        if err != nil {
×
1159
                                return fmt.Errorf("unable to add edge: %w", err)
×
1160
                        }
×
1161
                        log.Tracef("New channel discovered! Link "+
×
1162
                                "connects %x and %x with ChannelID(%v)",
×
1163
                                msg.NodeKey1Bytes, msg.NodeKey2Bytes,
×
1164
                                msg.ChannelID)
×
1165
                        b.stats.incNumEdgesDiscovered()
×
1166

×
1167
                        break
×
1168
                }
1169

1170
                // Before we can add the channel to the channel graph, we need
1171
                // to obtain the full funding outpoint that's encoded within
1172
                // the channel ID.
1173
                channelID := lnwire.NewShortChanIDFromInt(msg.ChannelID)
17✔
1174
                fundingTx, err := lnwallet.FetchFundingTxWrapper(
17✔
1175
                        b.cfg.Chain, &channelID, b.quit,
17✔
1176
                )
17✔
1177
                if err != nil {
18✔
1178
                        //nolint:ll
1✔
1179
                        //
1✔
1180
                        // In order to ensure we don't erroneously mark a
1✔
1181
                        // channel as a zombie due to an RPC failure, we'll
1✔
1182
                        // attempt to string match for the relevant errors.
1✔
1183
                        //
1✔
1184
                        // * btcd:
1✔
1185
                        //    * https://github.com/btcsuite/btcd/blob/master/rpcserver.go#L1316
1✔
1186
                        //    * https://github.com/btcsuite/btcd/blob/master/rpcserver.go#L1086
1✔
1187
                        // * bitcoind:
1✔
1188
                        //    * https://github.com/bitcoin/bitcoin/blob/7fcf53f7b4524572d1d0c9a5fdc388e87eb02416/src/rpc/blockchain.cpp#L770
1✔
1189
                        //     * https://github.com/bitcoin/bitcoin/blob/7fcf53f7b4524572d1d0c9a5fdc388e87eb02416/src/rpc/blockchain.cpp#L954
1✔
1190
                        switch {
1✔
1191
                        case strings.Contains(err.Error(), "not found"):
×
1192
                                fallthrough
×
1193

1194
                        case strings.Contains(err.Error(), "out of range"):
1✔
1195
                                // If the funding transaction isn't found at
1✔
1196
                                // all, then we'll mark the edge itself as a
1✔
1197
                                // zombie so we don't continue to request it.
1✔
1198
                                // We use the "zero key" for both node pubkeys
1✔
1199
                                // so this edge can't be resurrected.
1✔
1200
                                zErr := b.addZombieEdge(msg.ChannelID)
1✔
1201
                                if zErr != nil {
1✔
1202
                                        return zErr
×
1203
                                }
×
1204

1205
                        default:
×
1206
                        }
1207

1208
                        return NewErrf(ErrNoFundingTransaction, "unable to "+
1✔
1209
                                "locate funding tx: %v", err)
1✔
1210
                }
1211

1212
                // Recreate witness output to be sure that declared in channel
1213
                // edge bitcoin keys and channel value corresponds to the
1214
                // reality.
1215
                fundingPkScript, err := makeFundingScript(
16✔
1216
                        msg.BitcoinKey1Bytes[:], msg.BitcoinKey2Bytes[:],
16✔
1217
                        msg.Features, msg.TapscriptRoot,
16✔
1218
                )
16✔
1219
                if err != nil {
16✔
1220
                        return err
×
1221
                }
×
1222

1223
                // Next we'll validate that this channel is actually well
1224
                // formed. If this check fails, then this channel either
1225
                // doesn't exist, or isn't the one that was meant to be created
1226
                // according to the passed channel proofs.
1227
                fundingPoint, err := chanvalidate.Validate(
16✔
1228
                        &chanvalidate.Context{
16✔
1229
                                Locator: &chanvalidate.ShortChanIDChanLocator{
16✔
1230
                                        ID: channelID,
16✔
1231
                                },
16✔
1232
                                MultiSigPkScript: fundingPkScript,
16✔
1233
                                FundingTx:        fundingTx,
16✔
1234
                        },
16✔
1235
                )
16✔
1236
                if err != nil {
17✔
1237
                        // Mark the edge as a zombie so we won't try to
1✔
1238
                        // re-validate it on start up.
1✔
1239
                        if err := b.addZombieEdge(msg.ChannelID); err != nil {
1✔
1240
                                return err
×
1241
                        }
×
1242

1243
                        return NewErrf(ErrInvalidFundingOutput, "output "+
1✔
1244
                                "failed validation: %w", err)
1✔
1245
                }
1246

1247
                // Now that we have the funding outpoint of the channel, ensure
1248
                // that it hasn't yet been spent. If so, then this channel has
1249
                // been closed so we'll ignore it.
1250
                chanUtxo, err := b.cfg.Chain.GetUtxo(
15✔
1251
                        fundingPoint, fundingPkScript, channelID.BlockHeight,
15✔
1252
                        b.quit,
15✔
1253
                )
15✔
1254
                if err != nil {
16✔
1255
                        if errors.Is(err, btcwallet.ErrOutputSpent) {
2✔
1256
                                zErr := b.addZombieEdge(msg.ChannelID)
1✔
1257
                                if zErr != nil {
1✔
1258
                                        return zErr
×
1259
                                }
×
1260
                        }
1261

1262
                        return NewErrf(ErrChannelSpent, "unable to fetch utxo "+
1✔
1263
                                "for chan_id=%v, chan_point=%v: %v",
1✔
1264
                                msg.ChannelID, fundingPoint, err)
1✔
1265
                }
1266

1267
                // TODO(roasbeef): this is a hack, needs to be removed
1268
                // after commitment fees are dynamic.
1269
                msg.Capacity = btcutil.Amount(chanUtxo.Value)
14✔
1270
                msg.ChannelPoint = *fundingPoint
14✔
1271
                if err := b.cfg.Graph.AddChannelEdge(msg, op...); err != nil {
14✔
1272
                        return errors.Errorf("unable to add edge: %v", err)
×
1273
                }
×
1274

1275
                log.Debugf("New channel discovered! Link "+
14✔
1276
                        "connects %x and %x with ChannelPoint(%v): "+
14✔
1277
                        "chan_id=%v, capacity=%v",
14✔
1278
                        msg.NodeKey1Bytes, msg.NodeKey2Bytes,
14✔
1279
                        fundingPoint, msg.ChannelID, msg.Capacity)
14✔
1280
                b.stats.incNumEdgesDiscovered()
14✔
1281

14✔
1282
                // As a new edge has been added to the channel graph, we'll
14✔
1283
                // update the current UTXO filter within our active
14✔
1284
                // FilteredChainView so we are notified if/when this channel is
14✔
1285
                // closed.
14✔
1286
                filterUpdate := []graphdb.EdgePoint{
14✔
1287
                        {
14✔
1288
                                FundingPkScript: fundingPkScript,
14✔
1289
                                OutPoint:        *fundingPoint,
14✔
1290
                        },
14✔
1291
                }
14✔
1292
                err = b.cfg.ChainView.UpdateFilter(
14✔
1293
                        filterUpdate, b.bestHeight.Load(),
14✔
1294
                )
14✔
1295
                if err != nil {
14✔
1296
                        return errors.Errorf("unable to update chain "+
×
1297
                                "view: %v", err)
×
1298
                }
×
1299

1300
        case *models.ChannelEdgePolicy:
6✔
1301
                log.Debugf("Received ChannelEdgePolicy for channel %v",
6✔
1302
                        msg.ChannelID)
6✔
1303

6✔
1304
                // We make sure to hold the mutex for this channel ID,
6✔
1305
                // such that no other goroutine is concurrently doing
6✔
1306
                // database accesses for the same channel ID.
6✔
1307
                b.channelEdgeMtx.Lock(msg.ChannelID)
6✔
1308
                defer b.channelEdgeMtx.Unlock(msg.ChannelID)
6✔
1309

6✔
1310
                edge1Timestamp, edge2Timestamp, exists, isZombie, err :=
6✔
1311
                        b.cfg.Graph.HasChannelEdge(msg.ChannelID)
6✔
1312
                if err != nil && !errors.Is(
6✔
1313
                        err, graphdb.ErrGraphNoEdgesFound,
6✔
1314
                ) {
6✔
1315

×
1316
                        return errors.Errorf("unable to check for edge "+
×
1317
                                "existence: %v", err)
×
1318
                }
×
1319

1320
                // If the channel is marked as a zombie in our database, and
1321
                // we consider this a stale update, then we should not apply the
1322
                // policy.
1323
                isStaleUpdate := time.Since(msg.LastUpdate) >
6✔
1324
                        b.cfg.ChannelPruneExpiry
6✔
1325

6✔
1326
                if isZombie && isStaleUpdate {
6✔
1327
                        return NewErrf(ErrIgnored, "ignoring stale update "+
×
1328
                                "(flags=%v|%v) for zombie chan_id=%v",
×
1329
                                msg.MessageFlags, msg.ChannelFlags,
×
1330
                                msg.ChannelID)
×
1331
                }
×
1332

1333
                // If the channel doesn't exist in our database, we cannot
1334
                // apply the updated policy.
1335
                if !exists {
7✔
1336
                        return NewErrf(ErrIgnored, "ignoring update "+
1✔
1337
                                "(flags=%v|%v) for unknown chan_id=%v",
1✔
1338
                                msg.MessageFlags, msg.ChannelFlags,
1✔
1339
                                msg.ChannelID)
1✔
1340
                }
1✔
1341

1342
                log.Debugf("Found edge1Timestamp=%v, edge2Timestamp=%v",
5✔
1343
                        edge1Timestamp, edge2Timestamp)
5✔
1344

5✔
1345
                // As edges are directional edge node has a unique policy for
5✔
1346
                // the direction of the edge they control. Therefore, we first
5✔
1347
                // check if we already have the most up-to-date information for
5✔
1348
                // that edge. If this message has a timestamp not strictly
5✔
1349
                // newer than what we already know of we can exit early.
5✔
1350
                switch msg.ChannelFlags & lnwire.ChanUpdateDirection {
5✔
1351
                // A flag set of 0 indicates this is an announcement for the
1352
                // "first" node in the channel.
1353
                case 0:
3✔
1354
                        // Ignore outdated message.
3✔
1355
                        if !edge1Timestamp.Before(msg.LastUpdate) {
3✔
1356
                                return NewErrf(ErrOutdated, "Ignoring "+
×
1357
                                        "outdated update (flags=%v|%v) for "+
×
1358
                                        "known chan_id=%v", msg.MessageFlags,
×
1359
                                        msg.ChannelFlags, msg.ChannelID)
×
1360
                        }
×
1361

1362
                // Similarly, a flag set of 1 indicates this is an announcement
1363
                // for the "second" node in the channel.
1364
                case 1:
2✔
1365
                        // Ignore outdated message.
2✔
1366
                        if !edge2Timestamp.Before(msg.LastUpdate) {
2✔
1367
                                return NewErrf(ErrOutdated, "Ignoring "+
×
1368
                                        "outdated update (flags=%v|%v) for "+
×
1369
                                        "known chan_id=%v", msg.MessageFlags,
×
1370
                                        msg.ChannelFlags, msg.ChannelID)
×
1371
                        }
×
1372
                }
1373

1374
                // Now that we know this isn't a stale update, we'll apply the
1375
                // new edge policy to the proper directional edge within the
1376
                // channel graph.
1377
                if err = b.cfg.Graph.UpdateEdgePolicy(msg, op...); err != nil {
5✔
1378
                        err := errors.Errorf("unable to add channel: %v", err)
×
1379
                        log.Error(err)
×
1380
                        return err
×
1381
                }
×
1382

1383
                log.Tracef("New channel update applied: %v",
5✔
1384
                        lnutils.SpewLogClosure(msg))
5✔
1385
                b.stats.incNumChannelUpdates()
5✔
1386

1387
        default:
×
1388
                return errors.Errorf("wrong routing update message type")
×
1389
        }
1390

1391
        return nil
25✔
1392
}
1393

1394
// routingMsg couples a routing related routing topology update to the
1395
// error channel.
1396
type routingMsg struct {
1397
        msg interface{}
1398
        op  []batch.SchedulerOption
1399
        err chan error
1400
}
1401

1402
// ApplyChannelUpdate validates a channel update and if valid, applies it to the
1403
// database. It returns a bool indicating whether the updates were successful.
1404
func (b *Builder) ApplyChannelUpdate(msg *lnwire.ChannelUpdate1) bool {
×
1405
        ch, _, _, err := b.GetChannelByID(msg.ShortChannelID)
×
1406
        if err != nil {
×
1407
                log.Errorf("Unable to retrieve channel by id: %v", err)
×
1408
                return false
×
1409
        }
×
1410

1411
        var pubKey *btcec.PublicKey
×
1412

×
1413
        switch msg.ChannelFlags & lnwire.ChanUpdateDirection {
×
1414
        case 0:
×
1415
                pubKey, _ = ch.NodeKey1()
×
1416

1417
        case 1:
×
1418
                pubKey, _ = ch.NodeKey2()
×
1419
        }
1420

1421
        // Exit early if the pubkey cannot be decided.
1422
        if pubKey == nil {
×
1423
                log.Errorf("Unable to decide pubkey with ChannelFlags=%v",
×
1424
                        msg.ChannelFlags)
×
1425
                return false
×
1426
        }
×
1427

1428
        err = netann.ValidateChannelUpdateAnn(pubKey, ch.Capacity, msg)
×
1429
        if err != nil {
×
1430
                log.Errorf("Unable to validate channel update: %v", err)
×
1431
                return false
×
1432
        }
×
1433

1434
        err = b.UpdateEdge(&models.ChannelEdgePolicy{
×
1435
                SigBytes:                  msg.Signature.ToSignatureBytes(),
×
1436
                ChannelID:                 msg.ShortChannelID.ToUint64(),
×
1437
                LastUpdate:                time.Unix(int64(msg.Timestamp), 0),
×
1438
                MessageFlags:              msg.MessageFlags,
×
1439
                ChannelFlags:              msg.ChannelFlags,
×
1440
                TimeLockDelta:             msg.TimeLockDelta,
×
1441
                MinHTLC:                   msg.HtlcMinimumMsat,
×
1442
                MaxHTLC:                   msg.HtlcMaximumMsat,
×
1443
                FeeBaseMSat:               lnwire.MilliSatoshi(msg.BaseFee),
×
1444
                FeeProportionalMillionths: lnwire.MilliSatoshi(msg.FeeRate),
×
1445
                ExtraOpaqueData:           msg.ExtraOpaqueData,
×
1446
        })
×
1447
        if err != nil && !IsError(err, ErrIgnored, ErrOutdated) {
×
1448
                log.Errorf("Unable to apply channel update: %v", err)
×
1449
                return false
×
1450
        }
×
1451

1452
        return true
×
1453
}
1454

1455
// AddNode is used to add information about a node to the router database. If
1456
// the node with this pubkey is not present in an existing channel, it will
1457
// be ignored.
1458
//
1459
// NOTE: This method is part of the ChannelGraphSource interface.
1460
func (b *Builder) AddNode(node *models.LightningNode,
1461
        op ...batch.SchedulerOption) error {
7✔
1462

7✔
1463
        rMsg := &routingMsg{
7✔
1464
                msg: node,
7✔
1465
                op:  op,
7✔
1466
                err: make(chan error, 1),
7✔
1467
        }
7✔
1468

7✔
1469
        select {
7✔
1470
        case b.networkUpdates <- rMsg:
7✔
1471
                select {
7✔
1472
                case err := <-rMsg.err:
7✔
1473
                        return err
7✔
1474
                case <-b.quit:
×
1475
                        return ErrGraphBuilderShuttingDown
×
1476
                }
1477
        case <-b.quit:
×
1478
                return ErrGraphBuilderShuttingDown
×
1479
        }
1480
}
1481

1482
// AddEdge is used to add edge/channel to the topology of the router, after all
1483
// information about channel will be gathered this edge/channel might be used
1484
// in construction of payment path.
1485
//
1486
// NOTE: This method is part of the ChannelGraphSource interface.
1487
func (b *Builder) AddEdge(edge *models.ChannelEdgeInfo,
1488
        op ...batch.SchedulerOption) error {
17✔
1489

17✔
1490
        rMsg := &routingMsg{
17✔
1491
                msg: edge,
17✔
1492
                op:  op,
17✔
1493
                err: make(chan error, 1),
17✔
1494
        }
17✔
1495

17✔
1496
        select {
17✔
1497
        case b.networkUpdates <- rMsg:
17✔
1498
                select {
17✔
1499
                case err := <-rMsg.err:
17✔
1500
                        return err
17✔
1501
                case <-b.quit:
×
1502
                        return ErrGraphBuilderShuttingDown
×
1503
                }
1504
        case <-b.quit:
×
1505
                return ErrGraphBuilderShuttingDown
×
1506
        }
1507
}
1508

1509
// UpdateEdge is used to update edge information, without this message edge
1510
// considered as not fully constructed.
1511
//
1512
// NOTE: This method is part of the ChannelGraphSource interface.
1513
func (b *Builder) UpdateEdge(update *models.ChannelEdgePolicy,
1514
        op ...batch.SchedulerOption) error {
6✔
1515

6✔
1516
        rMsg := &routingMsg{
6✔
1517
                msg: update,
6✔
1518
                op:  op,
6✔
1519
                err: make(chan error, 1),
6✔
1520
        }
6✔
1521

6✔
1522
        select {
6✔
1523
        case b.networkUpdates <- rMsg:
6✔
1524
                select {
6✔
1525
                case err := <-rMsg.err:
6✔
1526
                        return err
6✔
1527
                case <-b.quit:
×
1528
                        return ErrGraphBuilderShuttingDown
×
1529
                }
1530
        case <-b.quit:
×
1531
                return ErrGraphBuilderShuttingDown
×
1532
        }
1533
}
1534

1535
// CurrentBlockHeight returns the block height from POV of the router subsystem.
1536
//
1537
// NOTE: This method is part of the ChannelGraphSource interface.
1538
func (b *Builder) CurrentBlockHeight() (uint32, error) {
×
1539
        _, height, err := b.cfg.Chain.GetBestBlock()
×
1540
        return uint32(height), err
×
1541
}
×
1542

1543
// SyncedHeight returns the block height to which the router subsystem currently
1544
// is synced to. This can differ from the above chain height if the goroutine
1545
// responsible for processing the blocks isn't yet up to speed.
1546
func (b *Builder) SyncedHeight() uint32 {
×
1547
        return b.bestHeight.Load()
×
1548
}
×
1549

1550
// GetChannelByID return the channel by the channel id.
1551
//
1552
// NOTE: This method is part of the ChannelGraphSource interface.
1553
func (b *Builder) GetChannelByID(chanID lnwire.ShortChannelID) (
1554
        *models.ChannelEdgeInfo,
1555
        *models.ChannelEdgePolicy,
1556
        *models.ChannelEdgePolicy, error) {
1✔
1557

1✔
1558
        return b.cfg.Graph.FetchChannelEdgesByID(chanID.ToUint64())
1✔
1559
}
1✔
1560

1561
// FetchLightningNode attempts to look up a target node by its identity public
1562
// key. graphdb.ErrGraphNodeNotFound is returned if the node doesn't exist
1563
// within the graph.
1564
//
1565
// NOTE: This method is part of the ChannelGraphSource interface.
1566
func (b *Builder) FetchLightningNode(
1567
        node route.Vertex) (*models.LightningNode, error) {
×
1568

×
1569
        return b.cfg.Graph.FetchLightningNode(node)
×
1570
}
×
1571

1572
// ForEachNode is used to iterate over every node in router topology.
1573
//
1574
// NOTE: This method is part of the ChannelGraphSource interface.
1575
func (b *Builder) ForEachNode(
1576
        cb func(*models.LightningNode) error) error {
×
1577

×
1578
        return b.cfg.Graph.ForEachNode(
×
1579
                func(_ kvdb.RTx, n *models.LightningNode) error {
×
1580
                        return cb(n)
×
1581
                })
×
1582
}
1583

1584
// ForAllOutgoingChannels is used to iterate over all outgoing channels owned by
1585
// the router.
1586
//
1587
// NOTE: This method is part of the ChannelGraphSource interface.
1588
func (b *Builder) ForAllOutgoingChannels(cb func(*models.ChannelEdgeInfo,
1589
        *models.ChannelEdgePolicy) error) error {
×
1590

×
1591
        return b.cfg.Graph.ForEachNodeChannel(b.cfg.SelfNode,
×
1592
                func(_ kvdb.RTx, c *models.ChannelEdgeInfo,
×
1593
                        e *models.ChannelEdgePolicy,
×
1594
                        _ *models.ChannelEdgePolicy) error {
×
1595

×
1596
                        if e == nil {
×
1597
                                return fmt.Errorf("channel from self node " +
×
1598
                                        "has no policy")
×
1599
                        }
×
1600

1601
                        return cb(c, e)
×
1602
                },
1603
        )
1604
}
1605

1606
// AddProof updates the channel edge info with proof which is needed to
1607
// properly announce the edge to the rest of the network.
1608
//
1609
// NOTE: This method is part of the ChannelGraphSource interface.
1610
func (b *Builder) AddProof(chanID lnwire.ShortChannelID,
1611
        proof *models.ChannelAuthProof) error {
1✔
1612

1✔
1613
        info, _, _, err := b.cfg.Graph.FetchChannelEdgesByID(chanID.ToUint64())
1✔
1614
        if err != nil {
1✔
1615
                return err
×
1616
        }
×
1617

1618
        info.AuthProof = proof
1✔
1619

1✔
1620
        return b.cfg.Graph.UpdateChannelEdge(info)
1✔
1621
}
1622

1623
// IsStaleNode returns true if the graph source has a node announcement for the
1624
// target node with a more recent timestamp.
1625
//
1626
// NOTE: This method is part of the ChannelGraphSource interface.
1627
func (b *Builder) IsStaleNode(node route.Vertex,
1628
        timestamp time.Time) bool {
3✔
1629

3✔
1630
        // If our attempt to assert that the node announcement is fresh fails,
3✔
1631
        // then we know that this is actually a stale announcement.
3✔
1632
        err := b.assertNodeAnnFreshness(node, timestamp)
3✔
1633
        if err != nil {
4✔
1634
                log.Debugf("Checking stale node %x got %v", node, err)
1✔
1635
                return true
1✔
1636
        }
1✔
1637

1638
        return false
2✔
1639
}
1640

1641
// IsPublicNode determines whether the given vertex is seen as a public node in
1642
// the graph from the graph's source node's point of view.
1643
//
1644
// NOTE: This method is part of the ChannelGraphSource interface.
1645
func (b *Builder) IsPublicNode(node route.Vertex) (bool, error) {
×
1646
        return b.cfg.Graph.IsPublicNode(node)
×
1647
}
×
1648

1649
// IsKnownEdge returns true if the graph source already knows of the passed
1650
// channel ID either as a live or zombie edge.
1651
//
1652
// NOTE: This method is part of the ChannelGraphSource interface.
1653
func (b *Builder) IsKnownEdge(chanID lnwire.ShortChannelID) bool {
1✔
1654
        _, _, exists, isZombie, _ := b.cfg.Graph.HasChannelEdge(
1✔
1655
                chanID.ToUint64(),
1✔
1656
        )
1✔
1657

1✔
1658
        return exists || isZombie
1✔
1659
}
1✔
1660

1661
// IsStaleEdgePolicy returns true if the graph source has a channel edge for
1662
// the passed channel ID (and flags) that have a more recent timestamp.
1663
//
1664
// NOTE: This method is part of the ChannelGraphSource interface.
1665
func (b *Builder) IsStaleEdgePolicy(chanID lnwire.ShortChannelID,
1666
        timestamp time.Time, flags lnwire.ChanUpdateChanFlags) bool {
6✔
1667

6✔
1668
        edge1Timestamp, edge2Timestamp, exists, isZombie, err :=
6✔
1669
                b.cfg.Graph.HasChannelEdge(chanID.ToUint64())
6✔
1670
        if err != nil {
6✔
1671
                log.Debugf("Check stale edge policy got error: %v", err)
×
1672
                return false
×
1673
        }
×
1674

1675
        // If we know of the edge as a zombie, then we'll make some additional
1676
        // checks to determine if the new policy is fresh.
1677
        if isZombie {
6✔
1678
                // When running with AssumeChannelValid, we also prune channels
×
1679
                // if both of their edges are disabled. We'll mark the new
×
1680
                // policy as stale if it remains disabled.
×
1681
                if b.cfg.AssumeChannelValid {
×
1682
                        isDisabled := flags&lnwire.ChanUpdateDisabled ==
×
1683
                                lnwire.ChanUpdateDisabled
×
1684
                        if isDisabled {
×
1685
                                return true
×
1686
                        }
×
1687
                }
1688

1689
                // Otherwise, we'll fall back to our usual ChannelPruneExpiry.
1690
                return time.Since(timestamp) > b.cfg.ChannelPruneExpiry
×
1691
        }
1692

1693
        // If we don't know of the edge, then it means it's fresh (thus not
1694
        // stale).
1695
        if !exists {
8✔
1696
                return false
2✔
1697
        }
2✔
1698

1699
        // As edges are directional edge node has a unique policy for the
1700
        // direction of the edge they control. Therefore, we first check if we
1701
        // already have the most up-to-date information for that edge. If so,
1702
        // then we can exit early.
1703
        switch {
4✔
1704
        // A flag set of 0 indicates this is an announcement for the "first"
1705
        // node in the channel.
1706
        case flags&lnwire.ChanUpdateDirection == 0:
2✔
1707
                return !edge1Timestamp.Before(timestamp)
2✔
1708

1709
        // Similarly, a flag set of 1 indicates this is an announcement for the
1710
        // "second" node in the channel.
1711
        case flags&lnwire.ChanUpdateDirection == 1:
2✔
1712
                return !edge2Timestamp.Before(timestamp)
2✔
1713
        }
1714

1715
        return false
×
1716
}
1717

1718
// MarkEdgeLive clears an edge from our zombie index, deeming it as live.
1719
//
1720
// NOTE: This method is part of the ChannelGraphSource interface.
1721
func (b *Builder) MarkEdgeLive(chanID lnwire.ShortChannelID) error {
×
1722
        return b.cfg.Graph.MarkEdgeLive(chanID.ToUint64())
×
1723
}
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc