• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 13725358077

07 Mar 2025 04:51PM UTC coverage: 58.224% (-10.4%) from 68.615%
13725358077

Pull #9458

github

web-flow
Merge bf4c6625f into ab2dc09eb
Pull Request #9458: multi+server.go: add initial permissions for some peers

346 of 549 new or added lines in 10 files covered. (63.02%)

27466 existing lines in 443 files now uncovered.

94609 of 162492 relevant lines covered (58.22%)

1.81 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

59.33
/graph/builder.go
1
package graph
2

3
import (
4
        "fmt"
5
        "sync"
6
        "sync/atomic"
7
        "time"
8

9
        "github.com/btcsuite/btcd/btcec/v2"
10
        "github.com/btcsuite/btcd/wire"
11
        "github.com/go-errors/errors"
12
        "github.com/lightningnetwork/lnd/batch"
13
        "github.com/lightningnetwork/lnd/chainntnfs"
14
        graphdb "github.com/lightningnetwork/lnd/graph/db"
15
        "github.com/lightningnetwork/lnd/graph/db/models"
16
        "github.com/lightningnetwork/lnd/kvdb"
17
        "github.com/lightningnetwork/lnd/lnutils"
18
        "github.com/lightningnetwork/lnd/lnwallet"
19
        "github.com/lightningnetwork/lnd/lnwire"
20
        "github.com/lightningnetwork/lnd/multimutex"
21
        "github.com/lightningnetwork/lnd/netann"
22
        "github.com/lightningnetwork/lnd/routing/chainview"
23
        "github.com/lightningnetwork/lnd/routing/route"
24
        "github.com/lightningnetwork/lnd/ticker"
25
)
26

27
const (
28
        // DefaultChannelPruneExpiry is the default duration used to determine
29
        // if a channel should be pruned or not.
30
        DefaultChannelPruneExpiry = time.Hour * 24 * 14
31

32
        // DefaultFirstTimePruneDelay is the time we'll wait after startup
33
        // before attempting to prune the graph for zombie channels. We don't
34
        // do it immediately after startup to allow lnd to start up without
35
        // getting blocked by this job.
36
        DefaultFirstTimePruneDelay = 30 * time.Second
37

38
        // defaultStatInterval governs how often the router will log non-empty
39
        // stats related to processing new channels, updates, or node
40
        // announcements.
41
        defaultStatInterval = time.Minute
42
)
43

44
var (
45
        // ErrGraphBuilderShuttingDown is returned if the graph builder is in
46
        // the process of shutting down.
47
        ErrGraphBuilderShuttingDown = fmt.Errorf("graph builder shutting down")
48
)
49

50
// Config holds the configuration required by the Builder.
51
type Config struct {
52
        // SelfNode is the public key of the node that this channel router
53
        // belongs to.
54
        SelfNode route.Vertex
55

56
        // Graph is the channel graph that the ChannelRouter will use to gather
57
        // metrics from and also to carry out path finding queries.
58
        Graph DB
59

60
        // Chain is the router's source to the most up-to-date blockchain data.
61
        // All incoming advertised channels will be checked against the chain
62
        // to ensure that the channels advertised are still open.
63
        Chain lnwallet.BlockChainIO
64

65
        // ChainView is an instance of a FilteredChainView which is used to
66
        // watch the sub-set of the UTXO set (the set of active channels) that
67
        // we need in order to properly maintain the channel graph.
68
        ChainView chainview.FilteredChainView
69

70
        // Notifier is a reference to the ChainNotifier, used to grab
71
        // the latest blocks if the router is missing any.
72
        Notifier chainntnfs.ChainNotifier
73

74
        // ChannelPruneExpiry is the duration used to determine if a channel
75
        // should be pruned or not. If the delta between now and when the
76
        // channel was last updated is greater than ChannelPruneExpiry, then
77
        // the channel is marked as a zombie channel eligible for pruning.
78
        ChannelPruneExpiry time.Duration
79

80
        // GraphPruneInterval is used as an interval to determine how often we
81
        // should examine the channel graph to garbage collect zombie channels.
82
        GraphPruneInterval time.Duration
83

84
        // FirstTimePruneDelay is the time we'll wait after startup before
85
        // attempting to prune the graph for zombie channels. We don't do it
86
        // immediately after startup to allow lnd to start up without getting
87
        // blocked by this job.
88
        FirstTimePruneDelay time.Duration
89

90
        // AssumeChannelValid toggles whether the builder will prune channels
91
        // based on their spentness vs using the fact that they are considered
92
        // zombies.
93
        AssumeChannelValid bool
94

95
        // StrictZombiePruning determines if we attempt to prune zombie
96
        // channels according to a stricter criteria. If true, then we'll prune
97
        // a channel if only *one* of the edges is considered a zombie.
98
        // Otherwise, we'll only prune the channel when both edges have a very
99
        // dated last update.
100
        StrictZombiePruning bool
101

102
        // IsAlias returns whether a passed ShortChannelID is an alias. This is
103
        // only used for our local channels.
104
        IsAlias func(scid lnwire.ShortChannelID) bool
105
}
106

107
// Builder builds and maintains a view of the Lightning Network graph.
108
type Builder struct {
109
        started atomic.Bool
110
        stopped atomic.Bool
111

112
        ntfnClientCounter atomic.Uint64
113
        bestHeight        atomic.Uint32
114

115
        cfg *Config
116

117
        // newBlocks is a channel in which new blocks connected to the end of
118
        // the main chain are sent over, and blocks updated after a call to
119
        // UpdateFilter.
120
        newBlocks <-chan *chainview.FilteredBlock
121

122
        // staleBlocks is a channel in which blocks disconnected from the end
123
        // of our currently known best chain are sent over.
124
        staleBlocks <-chan *chainview.FilteredBlock
125

126
        // topologyUpdates is a channel that carries new topology updates
127
        // messages from outside the Builder to be processed by the
128
        // networkHandler.
129
        topologyUpdates chan any
130

131
        // topologyClients maps a client's unique notification ID to a
132
        // topologyClient client that contains its notification dispatch
133
        // channel.
134
        topologyClients *lnutils.SyncMap[uint64, *topologyClient]
135

136
        // ntfnClientUpdates is a channel that's used to send new updates to
137
        // topology notification clients to the Builder. Updates either
138
        // add a new notification client, or cancel notifications for an
139
        // existing client.
140
        ntfnClientUpdates chan *topologyClientUpdate
141

142
        // channelEdgeMtx is a mutex we use to make sure we process only one
143
        // ChannelEdgePolicy at a time for a given channelID, to ensure
144
        // consistency between the various database accesses.
145
        channelEdgeMtx *multimutex.Mutex[uint64]
146

147
        // statTicker is a resumable ticker that logs the router's progress as
148
        // it discovers channels or receives updates.
149
        statTicker ticker.Ticker
150

151
        // stats tracks newly processed channels, updates, and node
152
        // announcements over a window of defaultStatInterval.
153
        stats *builderStats
154

155
        quit chan struct{}
156
        wg   sync.WaitGroup
157
}
158

159
// A compile time check to ensure Builder implements the
160
// ChannelGraphSource interface.
161
var _ ChannelGraphSource = (*Builder)(nil)
162

163
// NewBuilder constructs a new Builder.
164
func NewBuilder(cfg *Config) (*Builder, error) {
3✔
165
        return &Builder{
3✔
166
                cfg:               cfg,
3✔
167
                topologyUpdates:   make(chan any),
3✔
168
                topologyClients:   &lnutils.SyncMap[uint64, *topologyClient]{},
3✔
169
                ntfnClientUpdates: make(chan *topologyClientUpdate),
3✔
170
                channelEdgeMtx:    multimutex.NewMutex[uint64](),
3✔
171
                statTicker:        ticker.New(defaultStatInterval),
3✔
172
                stats:             new(builderStats),
3✔
173
                quit:              make(chan struct{}),
3✔
174
        }, nil
3✔
175
}
3✔
176

177
// Start launches all the goroutines the Builder requires to carry out its
178
// duties. If the builder has already been started, then this method is a noop.
179
func (b *Builder) Start() error {
3✔
180
        if !b.started.CompareAndSwap(false, true) {
3✔
181
                return nil
×
182
        }
×
183

184
        log.Info("Builder starting")
3✔
185

3✔
186
        bestHash, bestHeight, err := b.cfg.Chain.GetBestBlock()
3✔
187
        if err != nil {
3✔
188
                return err
×
189
        }
×
190

191
        // If the graph has never been pruned, or hasn't fully been created yet,
192
        // then we don't treat this as an explicit error.
193
        if _, _, err := b.cfg.Graph.PruneTip(); err != nil {
6✔
194
                switch {
3✔
195
                case errors.Is(err, graphdb.ErrGraphNeverPruned):
3✔
196
                        fallthrough
3✔
197

198
                case errors.Is(err, graphdb.ErrGraphNotFound):
3✔
199
                        // If the graph has never been pruned, then we'll set
3✔
200
                        // the prune height to the current best height of the
3✔
201
                        // chain backend.
3✔
202
                        _, err = b.cfg.Graph.PruneGraph(
3✔
203
                                nil, bestHash, uint32(bestHeight),
3✔
204
                        )
3✔
205
                        if err != nil {
3✔
206
                                return err
×
207
                        }
×
208

209
                default:
×
210
                        return err
×
211
                }
212
        }
213

214
        // If AssumeChannelValid is present, then we won't rely on pruning
215
        // channels from the graph based on their spentness, but whether they
216
        // are considered zombies or not. We will start zombie pruning after a
217
        // small delay, to avoid slowing down startup of lnd.
218
        if b.cfg.AssumeChannelValid { //nolint:nestif
3✔
UNCOV
219
                time.AfterFunc(b.cfg.FirstTimePruneDelay, func() {
×
UNCOV
220
                        select {
×
221
                        case <-b.quit:
×
222
                                return
×
UNCOV
223
                        default:
×
224
                        }
225

UNCOV
226
                        log.Info("Initial zombie prune starting")
×
UNCOV
227
                        if err := b.pruneZombieChans(); err != nil {
×
228
                                log.Errorf("Unable to prune zombies: %v", err)
×
229
                        }
×
230
                })
231
        } else {
3✔
232
                // Otherwise, we'll use our filtered chain view to prune
3✔
233
                // channels as soon as they are detected as spent on-chain.
3✔
234
                if err := b.cfg.ChainView.Start(); err != nil {
3✔
235
                        return err
×
236
                }
×
237

238
                // Once the instance is active, we'll fetch the channel we'll
239
                // receive notifications over.
240
                b.newBlocks = b.cfg.ChainView.FilteredBlocks()
3✔
241
                b.staleBlocks = b.cfg.ChainView.DisconnectedBlocks()
3✔
242

3✔
243
                // Before we perform our manual block pruning, we'll construct
3✔
244
                // and apply a fresh chain filter to the active
3✔
245
                // FilteredChainView instance.  We do this before, as otherwise
3✔
246
                // we may miss on-chain events as the filter hasn't properly
3✔
247
                // been applied.
3✔
248
                channelView, err := b.cfg.Graph.ChannelView()
3✔
249
                if err != nil && !errors.Is(
3✔
250
                        err, graphdb.ErrGraphNoEdgesFound,
3✔
251
                ) {
3✔
252

×
253
                        return err
×
254
                }
×
255

256
                log.Infof("Filtering chain using %v channels active",
3✔
257
                        len(channelView))
3✔
258

3✔
259
                if len(channelView) != 0 {
6✔
260
                        err = b.cfg.ChainView.UpdateFilter(
3✔
261
                                channelView, uint32(bestHeight),
3✔
262
                        )
3✔
263
                        if err != nil {
3✔
264
                                return err
×
265
                        }
×
266
                }
267

268
                // The graph pruning might have taken a while and there could be
269
                // new blocks available.
270
                _, bestHeight, err = b.cfg.Chain.GetBestBlock()
3✔
271
                if err != nil {
3✔
272
                        return err
×
273
                }
×
274
                b.bestHeight.Store(uint32(bestHeight))
3✔
275

3✔
276
                // Before we begin normal operation of the router, we first need
3✔
277
                // to synchronize the channel graph to the latest state of the
3✔
278
                // UTXO set.
3✔
279
                if err := b.syncGraphWithChain(); err != nil {
3✔
280
                        return err
×
281
                }
×
282

283
                // Finally, before we proceed, we'll prune any unconnected nodes
284
                // from the graph in order to ensure we maintain a tight graph
285
                // of "useful" nodes.
286
                err = b.cfg.Graph.PruneGraphNodes()
3✔
287
                if err != nil &&
3✔
288
                        !errors.Is(err, graphdb.ErrGraphNodesNotFound) {
3✔
289

×
290
                        return err
×
291
                }
×
292
        }
293

294
        b.wg.Add(1)
3✔
295
        go b.networkHandler()
3✔
296

3✔
297
        log.Debug("Builder started")
3✔
298

3✔
299
        return nil
3✔
300
}
301

302
// Stop signals to the Builder that it should halt all routines. This method
303
// will *block* until all goroutines have excited. If the builder has already
304
// stopped then this method will return immediately.
305
func (b *Builder) Stop() error {
3✔
306
        if !b.stopped.CompareAndSwap(false, true) {
3✔
UNCOV
307
                return nil
×
UNCOV
308
        }
×
309

310
        log.Info("Builder shutting down...")
3✔
311

3✔
312
        // Our filtered chain view could've only been started if
3✔
313
        // AssumeChannelValid isn't present.
3✔
314
        if !b.cfg.AssumeChannelValid {
6✔
315
                if err := b.cfg.ChainView.Stop(); err != nil {
3✔
316
                        return err
×
317
                }
×
318
        }
319

320
        close(b.quit)
3✔
321
        b.wg.Wait()
3✔
322

3✔
323
        log.Debug("Builder shutdown complete")
3✔
324

3✔
325
        return nil
3✔
326
}
327

328
// syncGraphWithChain attempts to synchronize the current channel graph with
329
// the latest UTXO set state. This process involves pruning from the channel
330
// graph any channels which have been closed by spending their funding output
331
// since we've been down.
332
func (b *Builder) syncGraphWithChain() error {
3✔
333
        // First, we'll need to check to see if we're already in sync with the
3✔
334
        // latest state of the UTXO set.
3✔
335
        bestHash, bestHeight, err := b.cfg.Chain.GetBestBlock()
3✔
336
        if err != nil {
3✔
337
                return err
×
338
        }
×
339
        b.bestHeight.Store(uint32(bestHeight))
3✔
340

3✔
341
        pruneHash, pruneHeight, err := b.cfg.Graph.PruneTip()
3✔
342
        if err != nil {
3✔
343
                switch {
×
344
                // If the graph has never been pruned, or hasn't fully been
345
                // created yet, then we don't treat this as an explicit error.
346
                case errors.Is(err, graphdb.ErrGraphNeverPruned):
×
347
                case errors.Is(err, graphdb.ErrGraphNotFound):
×
348
                default:
×
349
                        return err
×
350
                }
351
        }
352

353
        log.Infof("Prune tip for Channel Graph: height=%v, hash=%v",
3✔
354
                pruneHeight, pruneHash)
3✔
355

3✔
356
        switch {
3✔
357
        // If the graph has never been pruned, then we can exit early as this
358
        // entails it's being created for the first time and hasn't seen any
359
        // block or created channels.
UNCOV
360
        case pruneHeight == 0 || pruneHash == nil:
×
UNCOV
361
                return nil
×
362

363
        // If the block hashes and heights match exactly, then we don't need to
364
        // prune the channel graph as we're already fully in sync.
365
        case bestHash.IsEqual(pruneHash) && uint32(bestHeight) == pruneHeight:
3✔
366
                return nil
3✔
367
        }
368

369
        // If the main chain blockhash at prune height is different from the
370
        // prune hash, this might indicate the database is on a stale branch.
371
        mainBlockHash, err := b.cfg.Chain.GetBlockHash(int64(pruneHeight))
3✔
372
        if err != nil {
3✔
373
                return err
×
374
        }
×
375

376
        // While we are on a stale branch of the chain, walk backwards to find
377
        // first common block.
378
        for !pruneHash.IsEqual(mainBlockHash) {
3✔
UNCOV
379
                log.Infof("channel graph is stale. Disconnecting block %v "+
×
UNCOV
380
                        "(hash=%v)", pruneHeight, pruneHash)
×
UNCOV
381
                // Prune the graph for every channel that was opened at height
×
UNCOV
382
                // >= pruneHeight.
×
UNCOV
383
                _, err := b.cfg.Graph.DisconnectBlockAtHeight(pruneHeight)
×
UNCOV
384
                if err != nil {
×
385
                        return err
×
386
                }
×
387

UNCOV
388
                pruneHash, pruneHeight, err = b.cfg.Graph.PruneTip()
×
UNCOV
389
                switch {
×
390
                // If at this point the graph has never been pruned, we can exit
391
                // as this entails we are back to the point where it hasn't seen
392
                // any block or created channels, alas there's nothing left to
393
                // prune.
394
                case errors.Is(err, graphdb.ErrGraphNeverPruned):
×
395
                        return nil
×
396

397
                case errors.Is(err, graphdb.ErrGraphNotFound):
×
398
                        return nil
×
399

400
                case err != nil:
×
401
                        return err
×
402

UNCOV
403
                default:
×
404
                }
405

UNCOV
406
                mainBlockHash, err = b.cfg.Chain.GetBlockHash(
×
UNCOV
407
                        int64(pruneHeight),
×
UNCOV
408
                )
×
UNCOV
409
                if err != nil {
×
410
                        return err
×
411
                }
×
412
        }
413

414
        log.Infof("Syncing channel graph from height=%v (hash=%v) to "+
3✔
415
                "height=%v (hash=%v)", pruneHeight, pruneHash, bestHeight,
3✔
416
                bestHash)
3✔
417

3✔
418
        // If we're not yet caught up, then we'll walk forward in the chain
3✔
419
        // pruning the channel graph with each new block that hasn't yet been
3✔
420
        // consumed by the channel graph.
3✔
421
        var spentOutputs []*wire.OutPoint
3✔
422
        for nextHeight := pruneHeight + 1; nextHeight <= uint32(bestHeight); nextHeight++ { //nolint:ll
6✔
423
                // Break out of the rescan early if a shutdown has been
3✔
424
                // requested, otherwise long rescans will block the daemon from
3✔
425
                // shutting down promptly.
3✔
426
                select {
3✔
427
                case <-b.quit:
×
428
                        return ErrGraphBuilderShuttingDown
×
429
                default:
3✔
430
                }
431

432
                // Using the next height, request a manual block pruning from
433
                // the chainview for the particular block hash.
434
                log.Infof("Filtering block for closed channels, at height: %v",
3✔
435
                        int64(nextHeight))
3✔
436
                nextHash, err := b.cfg.Chain.GetBlockHash(int64(nextHeight))
3✔
437
                if err != nil {
3✔
438
                        return err
×
439
                }
×
440
                log.Tracef("Running block filter on block with hash: %v",
3✔
441
                        nextHash)
3✔
442
                filterBlock, err := b.cfg.ChainView.FilterBlock(nextHash)
3✔
443
                if err != nil {
3✔
444
                        return err
×
445
                }
×
446

447
                // We're only interested in all prior outputs that have been
448
                // spent in the block, so collate all the referenced previous
449
                // outpoints within each tx and input.
450
                for _, tx := range filterBlock.Transactions {
6✔
451
                        for _, txIn := range tx.TxIn {
6✔
452
                                spentOutputs = append(spentOutputs,
3✔
453
                                        &txIn.PreviousOutPoint)
3✔
454
                        }
3✔
455
                }
456
        }
457

458
        // With the spent outputs gathered, attempt to prune the channel graph,
459
        // also passing in the best hash+height so the prune tip can be updated.
460
        closedChans, err := b.cfg.Graph.PruneGraph(
3✔
461
                spentOutputs, bestHash, uint32(bestHeight),
3✔
462
        )
3✔
463
        if err != nil {
3✔
464
                return err
×
465
        }
×
466

467
        log.Infof("Graph pruning complete: %v channels were closed since "+
3✔
468
                "height %v", len(closedChans), pruneHeight)
3✔
469

3✔
470
        return nil
3✔
471
}
472

473
// isZombieChannel takes two edge policy updates and determines if the
474
// corresponding channel should be considered a zombie. The first boolean is
475
// true if the policy update from node 1 is considered a zombie, the second
476
// boolean is that of node 2, and the final boolean is true if the channel
477
// is considered a zombie.
478
func (b *Builder) isZombieChannel(e1,
UNCOV
479
        e2 *models.ChannelEdgePolicy) (bool, bool, bool) {
×
UNCOV
480

×
UNCOV
481
        chanExpiry := b.cfg.ChannelPruneExpiry
×
UNCOV
482

×
UNCOV
483
        e1Zombie := e1 == nil || time.Since(e1.LastUpdate) >= chanExpiry
×
UNCOV
484
        e2Zombie := e2 == nil || time.Since(e2.LastUpdate) >= chanExpiry
×
UNCOV
485

×
UNCOV
486
        var e1Time, e2Time time.Time
×
UNCOV
487
        if e1 != nil {
×
UNCOV
488
                e1Time = e1.LastUpdate
×
UNCOV
489
        }
×
UNCOV
490
        if e2 != nil {
×
UNCOV
491
                e2Time = e2.LastUpdate
×
UNCOV
492
        }
×
493

UNCOV
494
        return e1Zombie, e2Zombie, b.IsZombieChannel(e1Time, e2Time)
×
495
}
496

497
// IsZombieChannel takes the timestamps of the latest channel updates for a
498
// channel and returns true if the channel should be considered a zombie based
499
// on these timestamps.
500
func (b *Builder) IsZombieChannel(updateTime1,
501
        updateTime2 time.Time) bool {
3✔
502

3✔
503
        chanExpiry := b.cfg.ChannelPruneExpiry
3✔
504

3✔
505
        e1Zombie := updateTime1.IsZero() ||
3✔
506
                time.Since(updateTime1) >= chanExpiry
3✔
507

3✔
508
        e2Zombie := updateTime2.IsZero() ||
3✔
509
                time.Since(updateTime2) >= chanExpiry
3✔
510

3✔
511
        // If we're using strict zombie pruning, then a channel is only
3✔
512
        // considered live if both edges have a recent update we know of.
3✔
513
        if b.cfg.StrictZombiePruning {
4✔
514
                return e1Zombie || e2Zombie
1✔
515
        }
1✔
516

517
        // Otherwise, if we're using the less strict variant, then a channel is
518
        // considered live if either of the edges have a recent update.
519
        return e1Zombie && e2Zombie
2✔
520
}
521

522
// pruneZombieChans is a method that will be called periodically to prune out
523
// any "zombie" channels. We consider channels zombies if *both* edges haven't
524
// been updated since our zombie horizon. If AssumeChannelValid is present,
525
// we'll also consider channels zombies if *both* edges are disabled. This
526
// usually signals that a channel has been closed on-chain. We do this
527
// periodically to keep a healthy, lively routing table.
UNCOV
528
func (b *Builder) pruneZombieChans() error {
×
UNCOV
529
        chansToPrune := make(map[uint64]struct{})
×
UNCOV
530
        chanExpiry := b.cfg.ChannelPruneExpiry
×
UNCOV
531

×
UNCOV
532
        log.Infof("Examining channel graph for zombie channels")
×
UNCOV
533

×
UNCOV
534
        // A helper method to detect if the channel belongs to this node
×
UNCOV
535
        isSelfChannelEdge := func(info *models.ChannelEdgeInfo) bool {
×
UNCOV
536
                return info.NodeKey1Bytes == b.cfg.SelfNode ||
×
UNCOV
537
                        info.NodeKey2Bytes == b.cfg.SelfNode
×
UNCOV
538
        }
×
539

540
        // First, we'll collect all the channels which are eligible for garbage
541
        // collection due to being zombies.
UNCOV
542
        filterPruneChans := func(info *models.ChannelEdgeInfo,
×
UNCOV
543
                e1, e2 *models.ChannelEdgePolicy) error {
×
UNCOV
544

×
UNCOV
545
                // Exit early in case this channel is already marked to be
×
UNCOV
546
                // pruned
×
UNCOV
547
                _, markedToPrune := chansToPrune[info.ChannelID]
×
UNCOV
548
                if markedToPrune {
×
549
                        return nil
×
550
                }
×
551

552
                // We'll ensure that we don't attempt to prune our *own*
553
                // channels from the graph, as in any case this should be
554
                // re-advertised by the sub-system above us.
UNCOV
555
                if isSelfChannelEdge(info) {
×
UNCOV
556
                        return nil
×
UNCOV
557
                }
×
558

UNCOV
559
                e1Zombie, e2Zombie, isZombieChan := b.isZombieChannel(e1, e2)
×
UNCOV
560

×
UNCOV
561
                if e1Zombie {
×
UNCOV
562
                        log.Tracef("Node1 pubkey=%x of chan_id=%v is zombie",
×
UNCOV
563
                                info.NodeKey1Bytes, info.ChannelID)
×
UNCOV
564
                }
×
565

UNCOV
566
                if e2Zombie {
×
UNCOV
567
                        log.Tracef("Node2 pubkey=%x of chan_id=%v is zombie",
×
UNCOV
568
                                info.NodeKey2Bytes, info.ChannelID)
×
UNCOV
569
                }
×
570

571
                // If either edge hasn't been updated for a period of
572
                // chanExpiry, then we'll mark the channel itself as eligible
573
                // for graph pruning.
UNCOV
574
                if !isZombieChan {
×
UNCOV
575
                        return nil
×
UNCOV
576
                }
×
577

UNCOV
578
                log.Debugf("ChannelID(%v) is a zombie, collecting to prune",
×
UNCOV
579
                        info.ChannelID)
×
UNCOV
580

×
UNCOV
581
                // TODO(roasbeef): add ability to delete single directional edge
×
UNCOV
582
                chansToPrune[info.ChannelID] = struct{}{}
×
UNCOV
583

×
UNCOV
584
                return nil
×
585
        }
586

587
        // If AssumeChannelValid is present we'll look at the disabled bit for
588
        // both edges. If they're both disabled, then we can interpret this as
589
        // the channel being closed and can prune it from our graph.
UNCOV
590
        if b.cfg.AssumeChannelValid {
×
UNCOV
591
                disabledChanIDs, err := b.cfg.Graph.DisabledChannelIDs()
×
UNCOV
592
                if err != nil {
×
593
                        return fmt.Errorf("unable to get disabled channels "+
×
594
                                "ids chans: %v", err)
×
595
                }
×
596

UNCOV
597
                disabledEdges, err := b.cfg.Graph.FetchChanInfos(
×
UNCOV
598
                        disabledChanIDs,
×
UNCOV
599
                )
×
UNCOV
600
                if err != nil {
×
601
                        return fmt.Errorf("unable to fetch disabled channels "+
×
602
                                "edges chans: %v", err)
×
603
                }
×
604

605
                // Ensuring we won't prune our own channel from the graph.
UNCOV
606
                for _, disabledEdge := range disabledEdges {
×
UNCOV
607
                        if !isSelfChannelEdge(disabledEdge.Info) {
×
UNCOV
608
                                chansToPrune[disabledEdge.Info.ChannelID] =
×
UNCOV
609
                                        struct{}{}
×
UNCOV
610
                        }
×
611
                }
612
        }
613

UNCOV
614
        startTime := time.Unix(0, 0)
×
UNCOV
615
        endTime := time.Now().Add(-1 * chanExpiry)
×
UNCOV
616
        oldEdges, err := b.cfg.Graph.ChanUpdatesInHorizon(startTime, endTime)
×
UNCOV
617
        if err != nil {
×
618
                return fmt.Errorf("unable to fetch expired channel updates "+
×
619
                        "chans: %v", err)
×
620
        }
×
621

UNCOV
622
        for _, u := range oldEdges {
×
UNCOV
623
                err = filterPruneChans(u.Info, u.Policy1, u.Policy2)
×
UNCOV
624
                if err != nil {
×
625
                        return fmt.Errorf("error filtering channels to "+
×
626
                                "prune: %w", err)
×
627
                }
×
628
        }
629

UNCOV
630
        log.Infof("Pruning %v zombie channels", len(chansToPrune))
×
UNCOV
631
        if len(chansToPrune) == 0 {
×
UNCOV
632
                return nil
×
UNCOV
633
        }
×
634

635
        // With the set of zombie-like channels obtained, we'll do another pass
636
        // to delete them from the channel graph.
UNCOV
637
        toPrune := make([]uint64, 0, len(chansToPrune))
×
UNCOV
638
        for chanID := range chansToPrune {
×
UNCOV
639
                toPrune = append(toPrune, chanID)
×
UNCOV
640
                log.Tracef("Pruning zombie channel with ChannelID(%v)", chanID)
×
UNCOV
641
        }
×
UNCOV
642
        err = b.cfg.Graph.DeleteChannelEdges(
×
UNCOV
643
                b.cfg.StrictZombiePruning, true, toPrune...,
×
UNCOV
644
        )
×
UNCOV
645
        if err != nil {
×
646
                return fmt.Errorf("unable to delete zombie channels: %w", err)
×
647
        }
×
648

649
        // With the channels pruned, we'll also attempt to prune any nodes that
650
        // were a part of them.
UNCOV
651
        err = b.cfg.Graph.PruneGraphNodes()
×
UNCOV
652
        if err != nil && !errors.Is(err, graphdb.ErrGraphNodesNotFound) {
×
653
                return fmt.Errorf("unable to prune graph nodes: %w", err)
×
654
        }
×
655

UNCOV
656
        return nil
×
657
}
658

659
// handleTopologyUpdate is responsible for sending any topology changes
660
// notifications to registered clients.
661
//
662
// NOTE: must be run inside goroutine.
663
func (b *Builder) handleTopologyUpdate(update any) {
3✔
664
        defer b.wg.Done()
3✔
665

3✔
666
        topChange := &TopologyChange{}
3✔
667
        err := addToTopologyChange(b.cfg.Graph, topChange, update)
3✔
668
        if err != nil {
3✔
669
                log.Errorf("unable to update topology change notification: %v",
×
670
                        err)
×
671
                return
×
672
        }
×
673

674
        if topChange.isEmpty() {
6✔
675
                return
3✔
676
        }
3✔
677

678
        b.notifyTopologyChange(topChange)
3✔
679
}
680

681
// networkHandler is the primary goroutine for the Builder. The roles of
682
// this goroutine include answering queries related to the state of the
683
// network, pruning the graph on new block notification, applying network
684
// updates, and registering new topology clients.
685
//
686
// NOTE: This MUST be run as a goroutine.
687
func (b *Builder) networkHandler() {
3✔
688
        defer b.wg.Done()
3✔
689

3✔
690
        graphPruneTicker := time.NewTicker(b.cfg.GraphPruneInterval)
3✔
691
        defer graphPruneTicker.Stop()
3✔
692

3✔
693
        defer b.statTicker.Stop()
3✔
694

3✔
695
        b.stats.Reset()
3✔
696

3✔
697
        for {
6✔
698
                // If there are stats, resume the statTicker.
3✔
699
                if !b.stats.Empty() {
6✔
700
                        b.statTicker.Resume()
3✔
701
                }
3✔
702

703
                select {
3✔
704
                // A new fully validated topology update has just arrived.
705
                // We'll notify any registered clients.
706
                case update := <-b.topologyUpdates:
3✔
707
                        b.wg.Add(1)
3✔
708
                        go b.handleTopologyUpdate(update)
3✔
709

710
                        // TODO(roasbeef): remove all unconnected vertexes
711
                        // after N blocks pass with no corresponding
712
                        // announcements.
713

714
                case chainUpdate, ok := <-b.staleBlocks:
2✔
715
                        // If the channel has been closed, then this indicates
2✔
716
                        // the daemon is shutting down, so we exit ourselves.
2✔
717
                        if !ok {
2✔
718
                                return
×
719
                        }
×
720

721
                        // Since this block is stale, we update our best height
722
                        // to the previous block.
723
                        blockHeight := chainUpdate.Height
2✔
724
                        b.bestHeight.Store(blockHeight - 1)
2✔
725

2✔
726
                        // Update the channel graph to reflect that this block
2✔
727
                        // was disconnected.
2✔
728
                        _, err := b.cfg.Graph.DisconnectBlockAtHeight(
2✔
729
                                blockHeight,
2✔
730
                        )
2✔
731
                        if err != nil {
2✔
732
                                log.Errorf("unable to prune graph with stale "+
×
733
                                        "block: %v", err)
×
734
                                continue
×
735
                        }
736

737
                        // TODO(halseth): notify client about the reorg?
738

739
                // A new block has arrived, so we can prune the channel graph
740
                // of any channels which were closed in the block.
741
                case chainUpdate, ok := <-b.newBlocks:
3✔
742
                        // If the channel has been closed, then this indicates
3✔
743
                        // the daemon is shutting down, so we exit ourselves.
3✔
744
                        if !ok {
3✔
745
                                return
×
746
                        }
×
747

748
                        // We'll ensure that any new blocks received attach
749
                        // directly to the end of our main chain. If not, then
750
                        // we've somehow missed some blocks. Here we'll catch
751
                        // up the chain with the latest blocks.
752
                        currentHeight := b.bestHeight.Load()
3✔
753
                        switch {
3✔
754
                        case chainUpdate.Height == currentHeight+1:
3✔
755
                                err := b.updateGraphWithClosedChannels(
3✔
756
                                        chainUpdate,
3✔
757
                                )
3✔
758
                                if err != nil {
3✔
759
                                        log.Errorf("unable to prune graph "+
×
760
                                                "with closed channels: %v", err)
×
761
                                }
×
762

UNCOV
763
                        case chainUpdate.Height > currentHeight+1:
×
UNCOV
764
                                log.Errorf("out of order block: expecting "+
×
UNCOV
765
                                        "height=%v, got height=%v",
×
UNCOV
766
                                        currentHeight+1, chainUpdate.Height)
×
UNCOV
767

×
UNCOV
768
                                err := b.getMissingBlocks(
×
UNCOV
769
                                        currentHeight, chainUpdate,
×
UNCOV
770
                                )
×
UNCOV
771
                                if err != nil {
×
772
                                        log.Errorf("unable to retrieve missing"+
×
773
                                                "blocks: %v", err)
×
774
                                }
×
775

UNCOV
776
                        case chainUpdate.Height < currentHeight+1:
×
UNCOV
777
                                log.Errorf("out of order block: expecting "+
×
UNCOV
778
                                        "height=%v, got height=%v",
×
UNCOV
779
                                        currentHeight+1, chainUpdate.Height)
×
UNCOV
780

×
UNCOV
781
                                log.Infof("Skipping channel pruning since "+
×
UNCOV
782
                                        "received block height %v was already"+
×
UNCOV
783
                                        " processed.", chainUpdate.Height)
×
784
                        }
785

786
                // A new notification client update has arrived. We're either
787
                // gaining a new client, or cancelling notifications for an
788
                // existing client.
789
                case ntfnUpdate := <-b.ntfnClientUpdates:
3✔
790
                        clientID := ntfnUpdate.clientID
3✔
791

3✔
792
                        if ntfnUpdate.cancel {
6✔
793
                                client, ok := b.topologyClients.LoadAndDelete(
3✔
794
                                        clientID,
3✔
795
                                )
3✔
796
                                if ok {
6✔
797
                                        close(client.exit)
3✔
798
                                        client.wg.Wait()
3✔
799

3✔
800
                                        close(client.ntfnChan)
3✔
801
                                }
3✔
802

803
                                continue
3✔
804
                        }
805

806
                        b.topologyClients.Store(clientID, &topologyClient{
3✔
807
                                ntfnChan: ntfnUpdate.ntfnChan,
3✔
808
                                exit:     make(chan struct{}),
3✔
809
                        })
3✔
810

811
                // The graph prune ticker has ticked, so we'll examine the
812
                // state of the known graph to filter out any zombie channels
813
                // for pruning.
814
                case <-graphPruneTicker.C:
×
815
                        if err := b.pruneZombieChans(); err != nil {
×
816
                                log.Errorf("Unable to prune zombies: %v", err)
×
817
                        }
×
818

819
                // Log any stats if we've processed a non-empty number of
820
                // channels, updates, or nodes. We'll only pause the ticker if
821
                // the last window contained no updates to avoid resuming and
822
                // pausing while consecutive windows contain new info.
823
                case <-b.statTicker.Ticks():
3✔
824
                        if !b.stats.Empty() {
6✔
825
                                log.Infof(b.stats.String())
3✔
826
                        } else {
3✔
827
                                b.statTicker.Pause()
×
828
                        }
×
829
                        b.stats.Reset()
3✔
830

831
                // The router has been signalled to exit, to we exit our main
832
                // loop so the wait group can be decremented.
833
                case <-b.quit:
3✔
834
                        return
3✔
835
                }
836
        }
837
}
838

839
// getMissingBlocks walks through all missing blocks and updates the graph
840
// closed channels accordingly.
841
func (b *Builder) getMissingBlocks(currentHeight uint32,
UNCOV
842
        chainUpdate *chainview.FilteredBlock) error {
×
UNCOV
843

×
UNCOV
844
        outdatedHash, err := b.cfg.Chain.GetBlockHash(int64(currentHeight))
×
UNCOV
845
        if err != nil {
×
846
                return err
×
847
        }
×
848

UNCOV
849
        outdatedBlock := &chainntnfs.BlockEpoch{
×
UNCOV
850
                Height: int32(currentHeight),
×
UNCOV
851
                Hash:   outdatedHash,
×
UNCOV
852
        }
×
UNCOV
853

×
UNCOV
854
        epochClient, err := b.cfg.Notifier.RegisterBlockEpochNtfn(
×
UNCOV
855
                outdatedBlock,
×
UNCOV
856
        )
×
UNCOV
857
        if err != nil {
×
858
                return err
×
859
        }
×
UNCOV
860
        defer epochClient.Cancel()
×
UNCOV
861

×
UNCOV
862
        blockDifference := int(chainUpdate.Height - currentHeight)
×
UNCOV
863

×
UNCOV
864
        // We'll walk through all the outdated blocks and make sure we're able
×
UNCOV
865
        // to update the graph with any closed channels from them.
×
UNCOV
866
        for i := 0; i < blockDifference; i++ {
×
UNCOV
867
                var (
×
UNCOV
868
                        missingBlock *chainntnfs.BlockEpoch
×
UNCOV
869
                        ok           bool
×
UNCOV
870
                )
×
UNCOV
871

×
UNCOV
872
                select {
×
UNCOV
873
                case missingBlock, ok = <-epochClient.Epochs:
×
UNCOV
874
                        if !ok {
×
875
                                return nil
×
876
                        }
×
877

878
                case <-b.quit:
×
879
                        return nil
×
880
                }
881

UNCOV
882
                filteredBlock, err := b.cfg.ChainView.FilterBlock(
×
UNCOV
883
                        missingBlock.Hash,
×
UNCOV
884
                )
×
UNCOV
885
                if err != nil {
×
886
                        return err
×
887
                }
×
888

UNCOV
889
                err = b.updateGraphWithClosedChannels(
×
UNCOV
890
                        filteredBlock,
×
UNCOV
891
                )
×
UNCOV
892
                if err != nil {
×
893
                        return err
×
894
                }
×
895
        }
896

UNCOV
897
        return nil
×
898
}
899

900
// updateGraphWithClosedChannels prunes the channel graph of closed channels
901
// that are no longer needed.
902
func (b *Builder) updateGraphWithClosedChannels(
903
        chainUpdate *chainview.FilteredBlock) error {
3✔
904

3✔
905
        // Once a new block arrives, we update our running track of the height
3✔
906
        // of the chain tip.
3✔
907
        blockHeight := chainUpdate.Height
3✔
908

3✔
909
        b.bestHeight.Store(blockHeight)
3✔
910
        log.Infof("Pruning channel graph using block %v (height=%v)",
3✔
911
                chainUpdate.Hash, blockHeight)
3✔
912

3✔
913
        // We're only interested in all prior outputs that have been spent in
3✔
914
        // the block, so collate all the referenced previous outpoints within
3✔
915
        // each tx and input.
3✔
916
        var spentOutputs []*wire.OutPoint
3✔
917
        for _, tx := range chainUpdate.Transactions {
6✔
918
                for _, txIn := range tx.TxIn {
6✔
919
                        spentOutputs = append(spentOutputs,
3✔
920
                                &txIn.PreviousOutPoint)
3✔
921
                }
3✔
922
        }
923

924
        // With the spent outputs gathered, attempt to prune the channel graph,
925
        // also passing in the hash+height of the block being pruned so the
926
        // prune tip can be updated.
927
        chansClosed, err := b.cfg.Graph.PruneGraph(spentOutputs,
3✔
928
                &chainUpdate.Hash, chainUpdate.Height)
3✔
929
        if err != nil {
3✔
930
                log.Errorf("unable to prune routing table: %v", err)
×
931
                return err
×
932
        }
×
933

934
        log.Infof("Block %v (height=%v) closed %v channels", chainUpdate.Hash,
3✔
935
                blockHeight, len(chansClosed))
3✔
936

3✔
937
        if len(chansClosed) == 0 {
6✔
938
                return err
3✔
939
        }
3✔
940

941
        // Notify all currently registered clients of the newly closed channels.
942
        closeSummaries := createCloseSummaries(blockHeight, chansClosed...)
3✔
943
        b.notifyTopologyChange(&TopologyChange{
3✔
944
                ClosedChannels: closeSummaries,
3✔
945
        })
3✔
946

3✔
947
        return nil
3✔
948
}
949

950
// assertNodeAnnFreshness returns a non-nil error if we have an announcement in
951
// the database for the passed node with a timestamp newer than the passed
952
// timestamp. ErrIgnored will be returned if we already have the node, and
953
// ErrOutdated will be returned if we have a timestamp that's after the new
954
// timestamp.
955
func (b *Builder) assertNodeAnnFreshness(node route.Vertex,
956
        msgTimestamp time.Time) error {
3✔
957

3✔
958
        // If we are not already aware of this node, it means that we don't
3✔
959
        // know about any channel using this node. To avoid a DoS attack by
3✔
960
        // node announcements, we will ignore such nodes. If we do know about
3✔
961
        // this node, check that this update brings info newer than what we
3✔
962
        // already have.
3✔
963
        lastUpdate, exists, err := b.cfg.Graph.HasLightningNode(node)
3✔
964
        if err != nil {
3✔
965
                return errors.Errorf("unable to query for the "+
×
966
                        "existence of node: %v", err)
×
967
        }
×
968
        if !exists {
6✔
969
                return NewErrf(ErrIgnored, "Ignoring node announcement"+
3✔
970
                        " for node not found in channel graph (%x)",
3✔
971
                        node[:])
3✔
972
        }
3✔
973

974
        // If we've reached this point then we're aware of the vertex being
975
        // advertised. So we now check if the new message has a new time stamp,
976
        // if not then we won't accept the new data as it would override newer
977
        // data.
978
        if !lastUpdate.Before(msgTimestamp) {
6✔
979
                return NewErrf(ErrOutdated, "Ignoring outdated "+
3✔
980
                        "announcement for %x", node[:])
3✔
981
        }
3✔
982

983
        return nil
3✔
984
}
985

986
// MarkZombieEdge adds a channel that failed complete validation into the zombie
987
// index so we can avoid having to re-validate it in the future.
988
func (b *Builder) MarkZombieEdge(chanID uint64) error {
×
989
        // If the edge fails validation we'll mark the edge itself as a zombie
×
990
        // so we don't continue to request it. We use the "zero key" for both
×
991
        // node pubkeys so this edge can't be resurrected.
×
992
        var zeroKey [33]byte
×
993
        err := b.cfg.Graph.MarkEdgeZombie(chanID, zeroKey, zeroKey)
×
994
        if err != nil {
×
995
                return fmt.Errorf("unable to mark spent chan(id=%v) as a "+
×
996
                        "zombie: %w", chanID, err)
×
997
        }
×
998

999
        return nil
×
1000
}
1001

1002
// ApplyChannelUpdate validates a channel update and if valid, applies it to the
1003
// database. It returns a bool indicating whether the updates were successful.
1004
func (b *Builder) ApplyChannelUpdate(msg *lnwire.ChannelUpdate1) bool {
3✔
1005
        ch, _, _, err := b.GetChannelByID(msg.ShortChannelID)
3✔
1006
        if err != nil {
6✔
1007
                log.Errorf("Unable to retrieve channel by id: %v", err)
3✔
1008
                return false
3✔
1009
        }
3✔
1010

1011
        var pubKey *btcec.PublicKey
3✔
1012

3✔
1013
        switch msg.ChannelFlags & lnwire.ChanUpdateDirection {
3✔
1014
        case 0:
3✔
1015
                pubKey, _ = ch.NodeKey1()
3✔
1016

1017
        case 1:
3✔
1018
                pubKey, _ = ch.NodeKey2()
3✔
1019
        }
1020

1021
        // Exit early if the pubkey cannot be decided.
1022
        if pubKey == nil {
3✔
1023
                log.Errorf("Unable to decide pubkey with ChannelFlags=%v",
×
1024
                        msg.ChannelFlags)
×
1025
                return false
×
1026
        }
×
1027

1028
        err = netann.ValidateChannelUpdateAnn(pubKey, ch.Capacity, msg)
3✔
1029
        if err != nil {
3✔
1030
                log.Errorf("Unable to validate channel update: %v", err)
×
1031
                return false
×
1032
        }
×
1033

1034
        err = b.UpdateEdge(&models.ChannelEdgePolicy{
3✔
1035
                SigBytes:                  msg.Signature.ToSignatureBytes(),
3✔
1036
                ChannelID:                 msg.ShortChannelID.ToUint64(),
3✔
1037
                LastUpdate:                time.Unix(int64(msg.Timestamp), 0),
3✔
1038
                MessageFlags:              msg.MessageFlags,
3✔
1039
                ChannelFlags:              msg.ChannelFlags,
3✔
1040
                TimeLockDelta:             msg.TimeLockDelta,
3✔
1041
                MinHTLC:                   msg.HtlcMinimumMsat,
3✔
1042
                MaxHTLC:                   msg.HtlcMaximumMsat,
3✔
1043
                FeeBaseMSat:               lnwire.MilliSatoshi(msg.BaseFee),
3✔
1044
                FeeProportionalMillionths: lnwire.MilliSatoshi(msg.FeeRate),
3✔
1045
                ExtraOpaqueData:           msg.ExtraOpaqueData,
3✔
1046
        })
3✔
1047
        if err != nil && !IsError(err, ErrIgnored, ErrOutdated) {
3✔
1048
                log.Errorf("Unable to apply channel update: %v", err)
×
1049
                return false
×
1050
        }
×
1051

1052
        return true
3✔
1053
}
1054

1055
// AddNode is used to add information about a node to the router database. If
1056
// the node with this pubkey is not present in an existing channel, it will
1057
// be ignored.
1058
//
1059
// NOTE: This method is part of the ChannelGraphSource interface.
1060
func (b *Builder) AddNode(node *models.LightningNode,
1061
        op ...batch.SchedulerOption) error {
3✔
1062

3✔
1063
        err := b.addNode(node, op...)
3✔
1064
        if err != nil {
6✔
1065
                logNetworkMsgProcessError(err)
3✔
1066

3✔
1067
                return err
3✔
1068
        }
3✔
1069

1070
        select {
3✔
1071
        case b.topologyUpdates <- node:
3✔
1072
        case <-b.quit:
×
1073
                return ErrGraphBuilderShuttingDown
×
1074
        }
1075

1076
        return nil
3✔
1077
}
1078

1079
// addNode does some basic checks on the given LightningNode against what we
1080
// currently have persisted in the graph, and then adds it to the graph. If we
1081
// already know about the node, then we only update our DB if the new update
1082
// has a newer timestamp than the last one we received.
1083
func (b *Builder) addNode(node *models.LightningNode,
1084
        op ...batch.SchedulerOption) error {
3✔
1085

3✔
1086
        // Before we add the node to the database, we'll check to see if the
3✔
1087
        // announcement is "fresh" or not. If it isn't, then we'll return an
3✔
1088
        // error.
3✔
1089
        err := b.assertNodeAnnFreshness(node.PubKeyBytes, node.LastUpdate)
3✔
1090
        if err != nil {
6✔
1091
                return err
3✔
1092
        }
3✔
1093

1094
        if err := b.cfg.Graph.AddLightningNode(node, op...); err != nil {
3✔
1095
                return errors.Errorf("unable to add node %x to the "+
×
1096
                        "graph: %v", node.PubKeyBytes, err)
×
1097
        }
×
1098

1099
        log.Tracef("Updated vertex data for node=%x", node.PubKeyBytes)
3✔
1100
        b.stats.incNumNodeUpdates()
3✔
1101

3✔
1102
        return nil
3✔
1103
}
1104

1105
// AddEdge is used to add edge/channel to the topology of the router, after all
1106
// information about channel will be gathered this edge/channel might be used
1107
// in construction of payment path.
1108
//
1109
// NOTE: This method is part of the ChannelGraphSource interface.
1110
func (b *Builder) AddEdge(edge *models.ChannelEdgeInfo,
1111
        op ...batch.SchedulerOption) error {
3✔
1112

3✔
1113
        err := b.addEdge(edge, op...)
3✔
1114
        if err != nil {
6✔
1115
                logNetworkMsgProcessError(err)
3✔
1116

3✔
1117
                return err
3✔
1118
        }
3✔
1119

1120
        select {
3✔
1121
        case b.topologyUpdates <- edge:
3✔
1122
        case <-b.quit:
×
1123
                return ErrGraphBuilderShuttingDown
×
1124
        }
1125

1126
        return nil
3✔
1127
}
1128

1129
// addEdge does some validation on the new channel edge against what we
1130
// currently have persisted in the graph, and then adds it to the graph. The
1131
// Chain View is updated with the new edge if it is successfully added to the
1132
// graph. We only persist the channel if we currently dont have it at all in
1133
// our graph.
1134
//
1135
// TODO(elle): this currently also does funding-transaction validation. But this
1136
// should be moved to the gossiper instead.
1137
func (b *Builder) addEdge(edge *models.ChannelEdgeInfo,
1138
        op ...batch.SchedulerOption) error {
3✔
1139

3✔
1140
        log.Debugf("Received ChannelEdgeInfo for channel %v", edge.ChannelID)
3✔
1141

3✔
1142
        // Prior to processing the announcement we first check if we
3✔
1143
        // already know of this channel, if so, then we can exit early.
3✔
1144
        _, _, exists, isZombie, err := b.cfg.Graph.HasChannelEdge(
3✔
1145
                edge.ChannelID,
3✔
1146
        )
3✔
1147
        if err != nil && !errors.Is(err, graphdb.ErrGraphNoEdgesFound) {
3✔
1148
                return errors.Errorf("unable to check for edge existence: %v",
×
1149
                        err)
×
1150
        }
×
1151
        if isZombie {
3✔
1152
                return NewErrf(ErrIgnored, "ignoring msg for zombie chan_id=%v",
×
1153
                        edge.ChannelID)
×
1154
        }
×
1155
        if exists {
6✔
1156
                return NewErrf(ErrIgnored, "ignoring msg for known chan_id=%v",
3✔
1157
                        edge.ChannelID)
3✔
1158
        }
3✔
1159

1160
        if err := b.cfg.Graph.AddChannelEdge(edge, op...); err != nil {
3✔
1161
                return fmt.Errorf("unable to add edge: %w", err)
×
1162
        }
×
1163

1164
        b.stats.incNumEdgesDiscovered()
3✔
1165

3✔
1166
        // If AssumeChannelValid is present, of if the SCID is an alias, then
3✔
1167
        // the gossiper would not have done the expensive work of fetching
3✔
1168
        // a funding transaction and validating it. So we won't have the channel
3✔
1169
        // capacity nor the funding script. So we just log and return here.
3✔
1170
        scid := lnwire.NewShortChanIDFromInt(edge.ChannelID)
3✔
1171
        if b.cfg.AssumeChannelValid || b.cfg.IsAlias(scid) {
6✔
1172
                log.Tracef("New channel discovered! Link connects %x and %x "+
3✔
1173
                        "with ChannelID(%v)", edge.NodeKey1Bytes,
3✔
1174
                        edge.NodeKey2Bytes, edge.ChannelID)
3✔
1175

3✔
1176
                return nil
3✔
1177
        }
3✔
1178

1179
        log.Debugf("New channel discovered! Link connects %x and %x with "+
3✔
1180
                "ChannelPoint(%v): chan_id=%v, capacity=%v", edge.NodeKey1Bytes,
3✔
1181
                edge.NodeKey2Bytes, edge.ChannelPoint, edge.ChannelID,
3✔
1182
                edge.Capacity)
3✔
1183

3✔
1184
        // Otherwise, then we expect the funding script to be present on the
3✔
1185
        // edge since it would have been fetched when the gossiper validated the
3✔
1186
        // announcement.
3✔
1187
        fundingPkScript, err := edge.FundingScript.UnwrapOrErr(fmt.Errorf(
3✔
1188
                "expected the funding transaction script to be set",
3✔
1189
        ))
3✔
1190
        if err != nil {
3✔
1191
                return err
×
1192
        }
×
1193

1194
        // As a new edge has been added to the channel graph, we'll update the
1195
        // current UTXO filter within our active FilteredChainView so we are
1196
        // notified if/when this channel is closed.
1197
        filterUpdate := []graphdb.EdgePoint{
3✔
1198
                {
3✔
1199
                        FundingPkScript: fundingPkScript,
3✔
1200
                        OutPoint:        edge.ChannelPoint,
3✔
1201
                },
3✔
1202
        }
3✔
1203

3✔
1204
        err = b.cfg.ChainView.UpdateFilter(filterUpdate, b.bestHeight.Load())
3✔
1205
        if err != nil {
3✔
1206
                return errors.Errorf("unable to update chain "+
×
1207
                        "view: %v", err)
×
1208
        }
×
1209

1210
        return nil
3✔
1211
}
1212

1213
// UpdateEdge is used to update edge information, without this message edge
1214
// considered as not fully constructed.
1215
//
1216
// NOTE: This method is part of the ChannelGraphSource interface.
1217
func (b *Builder) UpdateEdge(update *models.ChannelEdgePolicy,
1218
        op ...batch.SchedulerOption) error {
3✔
1219

3✔
1220
        err := b.updateEdge(update, op...)
3✔
1221
        if err != nil {
6✔
1222
                logNetworkMsgProcessError(err)
3✔
1223

3✔
1224
                return err
3✔
1225
        }
3✔
1226

1227
        select {
3✔
1228
        case b.topologyUpdates <- update:
3✔
1229
        case <-b.quit:
×
1230
                return ErrGraphBuilderShuttingDown
×
1231
        }
1232

1233
        return nil
3✔
1234
}
1235

1236
// updateEdge validates the new edge policy against what we currently have
1237
// persisted in the graph, and then applies it to the graph if the update is
1238
// considered fresh enough and if we actually have a channel persisted for the
1239
// given update.
1240
func (b *Builder) updateEdge(policy *models.ChannelEdgePolicy,
1241
        op ...batch.SchedulerOption) error {
3✔
1242

3✔
1243
        log.Debugf("Received ChannelEdgePolicy for channel %v",
3✔
1244
                policy.ChannelID)
3✔
1245

3✔
1246
        // We make sure to hold the mutex for this channel ID, such that no
3✔
1247
        // other goroutine is concurrently doing database accesses for the same
3✔
1248
        // channel ID.
3✔
1249
        b.channelEdgeMtx.Lock(policy.ChannelID)
3✔
1250
        defer b.channelEdgeMtx.Unlock(policy.ChannelID)
3✔
1251

3✔
1252
        edge1Timestamp, edge2Timestamp, exists, isZombie, err :=
3✔
1253
                b.cfg.Graph.HasChannelEdge(policy.ChannelID)
3✔
1254
        if err != nil && !errors.Is(err, graphdb.ErrGraphNoEdgesFound) {
3✔
1255
                return errors.Errorf("unable to check for edge existence: %v",
×
1256
                        err)
×
1257
        }
×
1258

1259
        // If the channel is marked as a zombie in our database, and
1260
        // we consider this a stale update, then we should not apply the
1261
        // policy.
1262
        isStaleUpdate := time.Since(policy.LastUpdate) >
3✔
1263
                b.cfg.ChannelPruneExpiry
3✔
1264

3✔
1265
        if isZombie && isStaleUpdate {
3✔
1266
                return NewErrf(ErrIgnored, "ignoring stale update "+
×
1267
                        "(flags=%v|%v) for zombie chan_id=%v",
×
1268
                        policy.MessageFlags, policy.ChannelFlags,
×
1269
                        policy.ChannelID)
×
1270
        }
×
1271

1272
        // If the channel doesn't exist in our database, we cannot apply the
1273
        // updated policy.
1274
        if !exists {
3✔
UNCOV
1275
                return NewErrf(ErrIgnored, "ignoring update (flags=%v|%v) for "+
×
UNCOV
1276
                        "unknown chan_id=%v", policy.MessageFlags,
×
UNCOV
1277
                        policy.ChannelFlags, policy.ChannelID)
×
UNCOV
1278
        }
×
1279

1280
        log.Debugf("Found edge1Timestamp=%v, edge2Timestamp=%v",
3✔
1281
                edge1Timestamp, edge2Timestamp)
3✔
1282

3✔
1283
        // As edges are directional edge node has a unique policy for the
3✔
1284
        // direction of the edge they control. Therefore, we first check if we
3✔
1285
        // already have the most up-to-date information for that edge. If this
3✔
1286
        // message has a timestamp not strictly newer than what we already know
3✔
1287
        // of we can exit early.
3✔
1288
        switch policy.ChannelFlags & lnwire.ChanUpdateDirection {
3✔
1289
        // A flag set of 0 indicates this is an announcement for the "first"
1290
        // node in the channel.
1291
        case 0:
3✔
1292
                // Ignore outdated message.
3✔
1293
                if !edge1Timestamp.Before(policy.LastUpdate) {
6✔
1294
                        return NewErrf(ErrOutdated, "Ignoring "+
3✔
1295
                                "outdated update (flags=%v|%v) for "+
3✔
1296
                                "known chan_id=%v", policy.MessageFlags,
3✔
1297
                                policy.ChannelFlags, policy.ChannelID)
3✔
1298
                }
3✔
1299

1300
        // Similarly, a flag set of 1 indicates this is an announcement
1301
        // for the "second" node in the channel.
1302
        case 1:
3✔
1303
                // Ignore outdated message.
3✔
1304
                if !edge2Timestamp.Before(policy.LastUpdate) {
6✔
1305
                        return NewErrf(ErrOutdated, "Ignoring "+
3✔
1306
                                "outdated update (flags=%v|%v) for "+
3✔
1307
                                "known chan_id=%v", policy.MessageFlags,
3✔
1308
                                policy.ChannelFlags, policy.ChannelID)
3✔
1309
                }
3✔
1310
        }
1311

1312
        // Now that we know this isn't a stale update, we'll apply the new edge
1313
        // policy to the proper directional edge within the channel graph.
1314
        if err = b.cfg.Graph.UpdateEdgePolicy(policy, op...); err != nil {
3✔
1315
                err := errors.Errorf("unable to add channel: %v", err)
×
1316
                log.Error(err)
×
1317
                return err
×
1318
        }
×
1319

1320
        log.Tracef("New channel update applied: %v",
3✔
1321
                lnutils.SpewLogClosure(policy))
3✔
1322
        b.stats.incNumChannelUpdates()
3✔
1323

3✔
1324
        return nil
3✔
1325
}
1326

1327
// logNetworkMsgProcessError logs the error received from processing a network
1328
// message. It logs as a debug message if the error is not critical.
1329
func logNetworkMsgProcessError(err error) {
3✔
1330
        if IsError(err, ErrIgnored, ErrOutdated) {
6✔
1331
                log.Debugf("process network updates got: %v", err)
3✔
1332

3✔
1333
                return
3✔
1334
        }
3✔
1335

1336
        log.Errorf("process network updates got: %v", err)
×
1337
}
1338

1339
// CurrentBlockHeight returns the block height from POV of the router subsystem.
1340
//
1341
// NOTE: This method is part of the ChannelGraphSource interface.
1342
func (b *Builder) CurrentBlockHeight() (uint32, error) {
3✔
1343
        _, height, err := b.cfg.Chain.GetBestBlock()
3✔
1344
        return uint32(height), err
3✔
1345
}
3✔
1346

1347
// SyncedHeight returns the block height to which the router subsystem currently
1348
// is synced to. This can differ from the above chain height if the goroutine
1349
// responsible for processing the blocks isn't yet up to speed.
1350
func (b *Builder) SyncedHeight() uint32 {
3✔
1351
        return b.bestHeight.Load()
3✔
1352
}
3✔
1353

1354
// GetChannelByID return the channel by the channel id.
1355
//
1356
// NOTE: This method is part of the ChannelGraphSource interface.
1357
func (b *Builder) GetChannelByID(chanID lnwire.ShortChannelID) (
1358
        *models.ChannelEdgeInfo,
1359
        *models.ChannelEdgePolicy,
1360
        *models.ChannelEdgePolicy, error) {
3✔
1361

3✔
1362
        return b.cfg.Graph.FetchChannelEdgesByID(chanID.ToUint64())
3✔
1363
}
3✔
1364

1365
// FetchLightningNode attempts to look up a target node by its identity public
1366
// key. graphdb.ErrGraphNodeNotFound is returned if the node doesn't exist
1367
// within the graph.
1368
//
1369
// NOTE: This method is part of the ChannelGraphSource interface.
1370
func (b *Builder) FetchLightningNode(
1371
        node route.Vertex) (*models.LightningNode, error) {
3✔
1372

3✔
1373
        return b.cfg.Graph.FetchLightningNode(node)
3✔
1374
}
3✔
1375

1376
// ForAllOutgoingChannels is used to iterate over all outgoing channels owned by
1377
// the router.
1378
//
1379
// NOTE: This method is part of the ChannelGraphSource interface.
1380
func (b *Builder) ForAllOutgoingChannels(cb func(*models.ChannelEdgeInfo,
1381
        *models.ChannelEdgePolicy) error) error {
3✔
1382

3✔
1383
        return b.cfg.Graph.ForEachNodeChannel(b.cfg.SelfNode,
3✔
1384
                func(_ kvdb.RTx, c *models.ChannelEdgeInfo,
3✔
1385
                        e *models.ChannelEdgePolicy,
3✔
1386
                        _ *models.ChannelEdgePolicy) error {
6✔
1387

3✔
1388
                        if e == nil {
3✔
1389
                                return fmt.Errorf("channel from self node " +
×
1390
                                        "has no policy")
×
1391
                        }
×
1392

1393
                        return cb(c, e)
3✔
1394
                },
1395
        )
1396
}
1397

1398
// AddProof updates the channel edge info with proof which is needed to
1399
// properly announce the edge to the rest of the network.
1400
//
1401
// NOTE: This method is part of the ChannelGraphSource interface.
1402
func (b *Builder) AddProof(chanID lnwire.ShortChannelID,
1403
        proof *models.ChannelAuthProof) error {
3✔
1404

3✔
1405
        return b.cfg.Graph.AddEdgeProof(chanID, proof)
3✔
1406
}
3✔
1407

1408
// IsStaleNode returns true if the graph source has a node announcement for the
1409
// target node with a more recent timestamp.
1410
//
1411
// NOTE: This method is part of the ChannelGraphSource interface.
1412
func (b *Builder) IsStaleNode(node route.Vertex,
1413
        timestamp time.Time) bool {
3✔
1414

3✔
1415
        // If our attempt to assert that the node announcement is fresh fails,
3✔
1416
        // then we know that this is actually a stale announcement.
3✔
1417
        err := b.assertNodeAnnFreshness(node, timestamp)
3✔
1418
        if err != nil {
6✔
1419
                log.Debugf("Checking stale node %x got %v", node, err)
3✔
1420
                return true
3✔
1421
        }
3✔
1422

1423
        return false
3✔
1424
}
1425

1426
// IsPublicNode determines whether the given vertex is seen as a public node in
1427
// the graph from the graph's source node's point of view.
1428
//
1429
// NOTE: This method is part of the ChannelGraphSource interface.
1430
func (b *Builder) IsPublicNode(node route.Vertex) (bool, error) {
3✔
1431
        return b.cfg.Graph.IsPublicNode(node)
3✔
1432
}
3✔
1433

1434
// IsKnownEdge returns true if the graph source already knows of the passed
1435
// channel ID either as a live or zombie edge.
1436
//
1437
// NOTE: This method is part of the ChannelGraphSource interface.
1438
func (b *Builder) IsKnownEdge(chanID lnwire.ShortChannelID) bool {
3✔
1439
        _, _, exists, isZombie, _ := b.cfg.Graph.HasChannelEdge(
3✔
1440
                chanID.ToUint64(),
3✔
1441
        )
3✔
1442

3✔
1443
        return exists || isZombie
3✔
1444
}
3✔
1445

1446
// IsZombieEdge returns true if the graph source has marked the given channel ID
1447
// as a zombie edge.
1448
//
1449
// NOTE: This method is part of the ChannelGraphSource interface.
1450
func (b *Builder) IsZombieEdge(chanID lnwire.ShortChannelID) (bool, error) {
×
1451
        _, _, _, isZombie, err := b.cfg.Graph.HasChannelEdge(chanID.ToUint64())
×
1452

×
1453
        return isZombie, err
×
1454
}
×
1455

1456
// IsStaleEdgePolicy returns true if the graph source has a channel edge for
1457
// the passed channel ID (and flags) that have a more recent timestamp.
1458
//
1459
// NOTE: This method is part of the ChannelGraphSource interface.
1460
func (b *Builder) IsStaleEdgePolicy(chanID lnwire.ShortChannelID,
1461
        timestamp time.Time, flags lnwire.ChanUpdateChanFlags) bool {
3✔
1462

3✔
1463
        edge1Timestamp, edge2Timestamp, exists, isZombie, err :=
3✔
1464
                b.cfg.Graph.HasChannelEdge(chanID.ToUint64())
3✔
1465
        if err != nil {
3✔
1466
                log.Debugf("Check stale edge policy got error: %v", err)
×
1467
                return false
×
1468
        }
×
1469

1470
        // If we know of the edge as a zombie, then we'll make some additional
1471
        // checks to determine if the new policy is fresh.
1472
        if isZombie {
3✔
1473
                // When running with AssumeChannelValid, we also prune channels
×
1474
                // if both of their edges are disabled. We'll mark the new
×
1475
                // policy as stale if it remains disabled.
×
1476
                if b.cfg.AssumeChannelValid {
×
1477
                        isDisabled := flags&lnwire.ChanUpdateDisabled ==
×
1478
                                lnwire.ChanUpdateDisabled
×
1479
                        if isDisabled {
×
1480
                                return true
×
1481
                        }
×
1482
                }
1483

1484
                // Otherwise, we'll fall back to our usual ChannelPruneExpiry.
1485
                return time.Since(timestamp) > b.cfg.ChannelPruneExpiry
×
1486
        }
1487

1488
        // If we don't know of the edge, then it means it's fresh (thus not
1489
        // stale).
1490
        if !exists {
6✔
1491
                return false
3✔
1492
        }
3✔
1493

1494
        // As edges are directional edge node has a unique policy for the
1495
        // direction of the edge they control. Therefore, we first check if we
1496
        // already have the most up-to-date information for that edge. If so,
1497
        // then we can exit early.
1498
        switch {
3✔
1499
        // A flag set of 0 indicates this is an announcement for the "first"
1500
        // node in the channel.
1501
        case flags&lnwire.ChanUpdateDirection == 0:
3✔
1502
                return !edge1Timestamp.Before(timestamp)
3✔
1503

1504
        // Similarly, a flag set of 1 indicates this is an announcement for the
1505
        // "second" node in the channel.
1506
        case flags&lnwire.ChanUpdateDirection == 1:
3✔
1507
                return !edge2Timestamp.Before(timestamp)
3✔
1508
        }
1509

1510
        return false
×
1511
}
1512

1513
// MarkEdgeLive clears an edge from our zombie index, deeming it as live.
1514
//
1515
// NOTE: This method is part of the ChannelGraphSource interface.
1516
func (b *Builder) MarkEdgeLive(chanID lnwire.ShortChannelID) error {
×
1517
        return b.cfg.Graph.MarkEdgeLive(chanID.ToUint64())
×
1518
}
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc