• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 13152857719

05 Feb 2025 08:07AM UTC coverage: 58.808% (+0.01%) from 58.798%
13152857719

Pull #9476

github

ellemouton
docs: update release notes
Pull Request #9476: graph: refactor `graph.Builder` update handling

221 of 274 new or added lines in 4 files covered. (80.66%)

46 existing lines in 16 files now uncovered.

136151 of 231519 relevant lines covered (58.81%)

19234.33 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

78.98
/graph/builder.go
1
package graph
2

3
import (
4
        "bytes"
5
        "fmt"
6
        "strings"
7
        "sync"
8
        "sync/atomic"
9
        "time"
10

11
        "github.com/btcsuite/btcd/btcec/v2"
12
        "github.com/btcsuite/btcd/btcutil"
13
        "github.com/btcsuite/btcd/chaincfg/chainhash"
14
        "github.com/btcsuite/btcd/wire"
15
        "github.com/go-errors/errors"
16
        "github.com/lightningnetwork/lnd/batch"
17
        "github.com/lightningnetwork/lnd/chainntnfs"
18
        "github.com/lightningnetwork/lnd/fn/v2"
19
        graphdb "github.com/lightningnetwork/lnd/graph/db"
20
        "github.com/lightningnetwork/lnd/graph/db/models"
21
        "github.com/lightningnetwork/lnd/input"
22
        "github.com/lightningnetwork/lnd/kvdb"
23
        "github.com/lightningnetwork/lnd/lnutils"
24
        "github.com/lightningnetwork/lnd/lnwallet"
25
        "github.com/lightningnetwork/lnd/lnwallet/btcwallet"
26
        "github.com/lightningnetwork/lnd/lnwallet/chanvalidate"
27
        "github.com/lightningnetwork/lnd/lnwire"
28
        "github.com/lightningnetwork/lnd/multimutex"
29
        "github.com/lightningnetwork/lnd/netann"
30
        "github.com/lightningnetwork/lnd/routing/chainview"
31
        "github.com/lightningnetwork/lnd/routing/route"
32
        "github.com/lightningnetwork/lnd/ticker"
33
)
34

35
const (
36
        // DefaultChannelPruneExpiry is the default duration used to determine
37
        // if a channel should be pruned or not.
38
        DefaultChannelPruneExpiry = time.Hour * 24 * 14
39

40
        // DefaultFirstTimePruneDelay is the time we'll wait after startup
41
        // before attempting to prune the graph for zombie channels. We don't
42
        // do it immediately after startup to allow lnd to start up without
43
        // getting blocked by this job.
44
        DefaultFirstTimePruneDelay = 30 * time.Second
45

46
        // defaultStatInterval governs how often the router will log non-empty
47
        // stats related to processing new channels, updates, or node
48
        // announcements.
49
        defaultStatInterval = time.Minute
50
)
51

52
var (
53
        // ErrGraphBuilderShuttingDown is returned if the graph builder is in
54
        // the process of shutting down.
55
        ErrGraphBuilderShuttingDown = fmt.Errorf("graph builder shutting down")
56
)
57

58
// Config holds the configuration required by the Builder.
59
type Config struct {
60
        // SelfNode is the public key of the node that this channel router
61
        // belongs to.
62
        SelfNode route.Vertex
63

64
        // Graph is the channel graph that the ChannelRouter will use to gather
65
        // metrics from and also to carry out path finding queries.
66
        Graph DB
67

68
        // Chain is the router's source to the most up-to-date blockchain data.
69
        // All incoming advertised channels will be checked against the chain
70
        // to ensure that the channels advertised are still open.
71
        Chain lnwallet.BlockChainIO
72

73
        // ChainView is an instance of a FilteredChainView which is used to
74
        // watch the sub-set of the UTXO set (the set of active channels) that
75
        // we need in order to properly maintain the channel graph.
76
        ChainView chainview.FilteredChainView
77

78
        // Notifier is a reference to the ChainNotifier, used to grab
79
        // the latest blocks if the router is missing any.
80
        Notifier chainntnfs.ChainNotifier
81

82
        // ChannelPruneExpiry is the duration used to determine if a channel
83
        // should be pruned or not. If the delta between now and when the
84
        // channel was last updated is greater than ChannelPruneExpiry, then
85
        // the channel is marked as a zombie channel eligible for pruning.
86
        ChannelPruneExpiry time.Duration
87

88
        // GraphPruneInterval is used as an interval to determine how often we
89
        // should examine the channel graph to garbage collect zombie channels.
90
        GraphPruneInterval time.Duration
91

92
        // FirstTimePruneDelay is the time we'll wait after startup before
93
        // attempting to prune the graph for zombie channels. We don't do it
94
        // immediately after startup to allow lnd to start up without getting
95
        // blocked by this job.
96
        FirstTimePruneDelay time.Duration
97

98
        // AssumeChannelValid toggles whether the router will check for
99
        // spentness of channel outpoints. For neutrino, this saves long rescans
100
        // from blocking initial usage of the daemon.
101
        AssumeChannelValid bool
102

103
        // StrictZombiePruning determines if we attempt to prune zombie
104
        // channels according to a stricter criteria. If true, then we'll prune
105
        // a channel if only *one* of the edges is considered a zombie.
106
        // Otherwise, we'll only prune the channel when both edges have a very
107
        // dated last update.
108
        StrictZombiePruning bool
109

110
        // IsAlias returns whether a passed ShortChannelID is an alias. This is
111
        // only used for our local channels.
112
        IsAlias func(scid lnwire.ShortChannelID) bool
113
}
114

115
// Builder builds and maintains a view of the Lightning Network graph.
116
type Builder struct {
117
        started atomic.Bool
118
        stopped atomic.Bool
119

120
        ntfnClientCounter atomic.Uint64
121
        bestHeight        atomic.Uint32
122

123
        cfg *Config
124

125
        // newBlocks is a channel in which new blocks connected to the end of
126
        // the main chain are sent over, and blocks updated after a call to
127
        // UpdateFilter.
128
        newBlocks <-chan *chainview.FilteredBlock
129

130
        // staleBlocks is a channel in which blocks disconnected from the end
131
        // of our currently known best chain are sent over.
132
        staleBlocks <-chan *chainview.FilteredBlock
133

134
        // topologyClients maps a client's unique notification ID to a
135
        // topologyClient client that contains its notification dispatch
136
        // channel.
137
        topologyClients *lnutils.SyncMap[uint64, *topologyClient]
138

139
        // ntfnClientUpdates is a channel that's used to send new updates to
140
        // topology notification clients to the Builder. Updates either
141
        // add a new notification client, or cancel notifications for an
142
        // existing client.
143
        ntfnClientUpdates chan *topologyClientUpdate
144

145
        // channelEdgeMtx is a mutex we use to make sure we process only one
146
        // ChannelEdgePolicy at a time for a given channelID, to ensure
147
        // consistency between the various database accesses.
148
        channelEdgeMtx *multimutex.Mutex[uint64]
149

150
        // statTicker is a resumable ticker that logs the router's progress as
151
        // it discovers channels or receives updates.
152
        statTicker ticker.Ticker
153

154
        // stats tracks newly processed channels, updates, and node
155
        // announcements over a window of defaultStatInterval.
156
        stats *builderStats
157

158
        quit chan struct{}
159
        wg   sync.WaitGroup
160
}
161

162
// A compile time check to ensure Builder implements the
163
// ChannelGraphSource interface.
164
var _ ChannelGraphSource = (*Builder)(nil)
165

166
// NewBuilder constructs a new Builder.
167
func NewBuilder(cfg *Config) (*Builder, error) {
24✔
168
        return &Builder{
24✔
169
                cfg:               cfg,
24✔
170
                topologyClients:   &lnutils.SyncMap[uint64, *topologyClient]{},
24✔
171
                ntfnClientUpdates: make(chan *topologyClientUpdate),
24✔
172
                channelEdgeMtx:    multimutex.NewMutex[uint64](),
24✔
173
                statTicker:        ticker.New(defaultStatInterval),
24✔
174
                stats:             new(builderStats),
24✔
175
                quit:              make(chan struct{}),
24✔
176
        }, nil
24✔
177
}
24✔
178

179
// Start launches all the goroutines the Builder requires to carry out its
180
// duties. If the builder has already been started, then this method is a noop.
181
func (b *Builder) Start() error {
24✔
182
        if !b.started.CompareAndSwap(false, true) {
24✔
183
                return nil
×
184
        }
×
185

186
        log.Info("Builder starting")
24✔
187

24✔
188
        bestHash, bestHeight, err := b.cfg.Chain.GetBestBlock()
24✔
189
        if err != nil {
24✔
190
                return err
×
191
        }
×
192

193
        // If the graph has never been pruned, or hasn't fully been created yet,
194
        // then we don't treat this as an explicit error.
195
        if _, _, err := b.cfg.Graph.PruneTip(); err != nil {
46✔
196
                switch {
22✔
197
                case errors.Is(err, graphdb.ErrGraphNeverPruned):
22✔
198
                        fallthrough
22✔
199

200
                case errors.Is(err, graphdb.ErrGraphNotFound):
22✔
201
                        // If the graph has never been pruned, then we'll set
22✔
202
                        // the prune height to the current best height of the
22✔
203
                        // chain backend.
22✔
204
                        _, err = b.cfg.Graph.PruneGraph(
22✔
205
                                nil, bestHash, uint32(bestHeight),
22✔
206
                        )
22✔
207
                        if err != nil {
22✔
208
                                return err
×
209
                        }
×
210

211
                default:
×
212
                        return err
×
213
                }
214
        }
215

216
        // If AssumeChannelValid is present, then we won't rely on pruning
217
        // channels from the graph based on their spentness, but whether they
218
        // are considered zombies or not. We will start zombie pruning after a
219
        // small delay, to avoid slowing down startup of lnd.
220
        if b.cfg.AssumeChannelValid { //nolint:nestif
25✔
221
                time.AfterFunc(b.cfg.FirstTimePruneDelay, func() {
2✔
222
                        select {
1✔
223
                        case <-b.quit:
×
224
                                return
×
225
                        default:
1✔
226
                        }
227

228
                        log.Info("Initial zombie prune starting")
1✔
229
                        if err := b.pruneZombieChans(); err != nil {
1✔
230
                                log.Errorf("Unable to prune zombies: %v", err)
×
231
                        }
×
232
                })
233
        } else {
23✔
234
                // Otherwise, we'll use our filtered chain view to prune
23✔
235
                // channels as soon as they are detected as spent on-chain.
23✔
236
                if err := b.cfg.ChainView.Start(); err != nil {
23✔
237
                        return err
×
238
                }
×
239

240
                // Once the instance is active, we'll fetch the channel we'll
241
                // receive notifications over.
242
                b.newBlocks = b.cfg.ChainView.FilteredBlocks()
23✔
243
                b.staleBlocks = b.cfg.ChainView.DisconnectedBlocks()
23✔
244

23✔
245
                // Before we perform our manual block pruning, we'll construct
23✔
246
                // and apply a fresh chain filter to the active
23✔
247
                // FilteredChainView instance.  We do this before, as otherwise
23✔
248
                // we may miss on-chain events as the filter hasn't properly
23✔
249
                // been applied.
23✔
250
                channelView, err := b.cfg.Graph.ChannelView()
23✔
251
                if err != nil && !errors.Is(
23✔
252
                        err, graphdb.ErrGraphNoEdgesFound,
23✔
253
                ) {
23✔
254

×
255
                        return err
×
256
                }
×
257

258
                log.Infof("Filtering chain using %v channels active",
23✔
259
                        len(channelView))
23✔
260

23✔
261
                if len(channelView) != 0 {
33✔
262
                        err = b.cfg.ChainView.UpdateFilter(
10✔
263
                                channelView, uint32(bestHeight),
10✔
264
                        )
10✔
265
                        if err != nil {
10✔
266
                                return err
×
267
                        }
×
268
                }
269

270
                // The graph pruning might have taken a while and there could be
271
                // new blocks available.
272
                _, bestHeight, err = b.cfg.Chain.GetBestBlock()
23✔
273
                if err != nil {
23✔
274
                        return err
×
275
                }
×
276
                b.bestHeight.Store(uint32(bestHeight))
23✔
277

23✔
278
                // Before we begin normal operation of the router, we first need
23✔
279
                // to synchronize the channel graph to the latest state of the
23✔
280
                // UTXO set.
23✔
281
                if err := b.syncGraphWithChain(); err != nil {
23✔
282
                        return err
×
283
                }
×
284

285
                // Finally, before we proceed, we'll prune any unconnected nodes
286
                // from the graph in order to ensure we maintain a tight graph
287
                // of "useful" nodes.
288
                err = b.cfg.Graph.PruneGraphNodes()
23✔
289
                if err != nil &&
23✔
290
                        !errors.Is(err, graphdb.ErrGraphNodesNotFound) {
23✔
291

×
292
                        return err
×
293
                }
×
294
        }
295

296
        b.wg.Add(1)
24✔
297
        go b.networkHandler()
24✔
298

24✔
299
        log.Debug("Builder started")
24✔
300

24✔
301
        return nil
24✔
302
}
303

304
// Stop signals to the Builder that it should halt all routines. This method
305
// will *block* until all goroutines have excited. If the builder has already
306
// stopped then this method will return immediately.
307
func (b *Builder) Stop() error {
24✔
308
        if !b.stopped.CompareAndSwap(false, true) {
26✔
309
                return nil
2✔
310
        }
2✔
311

312
        log.Info("Builder shutting down...")
22✔
313

22✔
314
        // Our filtered chain view could've only been started if
22✔
315
        // AssumeChannelValid isn't present.
22✔
316
        if !b.cfg.AssumeChannelValid {
43✔
317
                if err := b.cfg.ChainView.Stop(); err != nil {
21✔
318
                        return err
×
319
                }
×
320
        }
321

322
        close(b.quit)
22✔
323
        b.wg.Wait()
22✔
324

22✔
325
        log.Debug("Builder shutdown complete")
22✔
326

22✔
327
        return nil
22✔
328
}
329

330
// syncGraphWithChain attempts to synchronize the current channel graph with
331
// the latest UTXO set state. This process involves pruning from the channel
332
// graph any channels which have been closed by spending their funding output
333
// since we've been down.
334
func (b *Builder) syncGraphWithChain() error {
23✔
335
        // First, we'll need to check to see if we're already in sync with the
23✔
336
        // latest state of the UTXO set.
23✔
337
        bestHash, bestHeight, err := b.cfg.Chain.GetBestBlock()
23✔
338
        if err != nil {
23✔
339
                return err
×
340
        }
×
341
        b.bestHeight.Store(uint32(bestHeight))
23✔
342

23✔
343
        pruneHash, pruneHeight, err := b.cfg.Graph.PruneTip()
23✔
344
        if err != nil {
23✔
345
                switch {
×
346
                // If the graph has never been pruned, or hasn't fully been
347
                // created yet, then we don't treat this as an explicit error.
348
                case errors.Is(err, graphdb.ErrGraphNeverPruned):
×
349
                case errors.Is(err, graphdb.ErrGraphNotFound):
×
350
                default:
×
351
                        return err
×
352
                }
353
        }
354

355
        log.Infof("Prune tip for Channel Graph: height=%v, hash=%v",
23✔
356
                pruneHeight, pruneHash)
23✔
357

23✔
358
        switch {
23✔
359
        // If the graph has never been pruned, then we can exit early as this
360
        // entails it's being created for the first time and hasn't seen any
361
        // block or created channels.
362
        case pruneHeight == 0 || pruneHash == nil:
4✔
363
                return nil
4✔
364

365
        // If the block hashes and heights match exactly, then we don't need to
366
        // prune the channel graph as we're already fully in sync.
367
        case bestHash.IsEqual(pruneHash) && uint32(bestHeight) == pruneHeight:
17✔
368
                return nil
17✔
369
        }
370

371
        // If the main chain blockhash at prune height is different from the
372
        // prune hash, this might indicate the database is on a stale branch.
373
        mainBlockHash, err := b.cfg.Chain.GetBlockHash(int64(pruneHeight))
5✔
374
        if err != nil {
5✔
375
                return err
×
376
        }
×
377

378
        // While we are on a stale branch of the chain, walk backwards to find
379
        // first common block.
380
        for !pruneHash.IsEqual(mainBlockHash) {
15✔
381
                log.Infof("channel graph is stale. Disconnecting block %v "+
10✔
382
                        "(hash=%v)", pruneHeight, pruneHash)
10✔
383
                // Prune the graph for every channel that was opened at height
10✔
384
                // >= pruneHeight.
10✔
385
                _, err := b.cfg.Graph.DisconnectBlockAtHeight(pruneHeight)
10✔
386
                if err != nil {
10✔
387
                        return err
×
388
                }
×
389

390
                pruneHash, pruneHeight, err = b.cfg.Graph.PruneTip()
10✔
391
                switch {
10✔
392
                // If at this point the graph has never been pruned, we can exit
393
                // as this entails we are back to the point where it hasn't seen
394
                // any block or created channels, alas there's nothing left to
395
                // prune.
396
                case errors.Is(err, graphdb.ErrGraphNeverPruned):
×
397
                        return nil
×
398

399
                case errors.Is(err, graphdb.ErrGraphNotFound):
×
400
                        return nil
×
401

402
                case err != nil:
×
403
                        return err
×
404

405
                default:
10✔
406
                }
407

408
                mainBlockHash, err = b.cfg.Chain.GetBlockHash(
10✔
409
                        int64(pruneHeight),
10✔
410
                )
10✔
411
                if err != nil {
10✔
412
                        return err
×
413
                }
×
414
        }
415

416
        log.Infof("Syncing channel graph from height=%v (hash=%v) to "+
5✔
417
                "height=%v (hash=%v)", pruneHeight, pruneHash, bestHeight,
5✔
418
                bestHash)
5✔
419

5✔
420
        // If we're not yet caught up, then we'll walk forward in the chain
5✔
421
        // pruning the channel graph with each new block that hasn't yet been
5✔
422
        // consumed by the channel graph.
5✔
423
        var spentOutputs []*wire.OutPoint
5✔
424
        for nextHeight := pruneHeight + 1; nextHeight <= uint32(bestHeight); nextHeight++ { //nolint:ll
28✔
425
                // Break out of the rescan early if a shutdown has been
23✔
426
                // requested, otherwise long rescans will block the daemon from
23✔
427
                // shutting down promptly.
23✔
428
                select {
23✔
429
                case <-b.quit:
×
430
                        return ErrGraphBuilderShuttingDown
×
431
                default:
23✔
432
                }
433

434
                // Using the next height, request a manual block pruning from
435
                // the chainview for the particular block hash.
436
                log.Infof("Filtering block for closed channels, at height: %v",
23✔
437
                        int64(nextHeight))
23✔
438
                nextHash, err := b.cfg.Chain.GetBlockHash(int64(nextHeight))
23✔
439
                if err != nil {
23✔
440
                        return err
×
441
                }
×
442
                log.Tracef("Running block filter on block with hash: %v",
23✔
443
                        nextHash)
23✔
444
                filterBlock, err := b.cfg.ChainView.FilterBlock(nextHash)
23✔
445
                if err != nil {
23✔
446
                        return err
×
447
                }
×
448

449
                // We're only interested in all prior outputs that have been
450
                // spent in the block, so collate all the referenced previous
451
                // outpoints within each tx and input.
452
                for _, tx := range filterBlock.Transactions {
27✔
453
                        for _, txIn := range tx.TxIn {
8✔
454
                                spentOutputs = append(spentOutputs,
4✔
455
                                        &txIn.PreviousOutPoint)
4✔
456
                        }
4✔
457
                }
458
        }
459

460
        // With the spent outputs gathered, attempt to prune the channel graph,
461
        // also passing in the best hash+height so the prune tip can be updated.
462
        closedChans, err := b.cfg.Graph.PruneGraph(
5✔
463
                spentOutputs, bestHash, uint32(bestHeight),
5✔
464
        )
5✔
465
        if err != nil {
5✔
466
                return err
×
467
        }
×
468

469
        log.Infof("Graph pruning complete: %v channels were closed since "+
5✔
470
                "height %v", len(closedChans), pruneHeight)
5✔
471

5✔
472
        return nil
5✔
473
}
474

475
// isZombieChannel takes two edge policy updates and determines if the
476
// corresponding channel should be considered a zombie. The first boolean is
477
// true if the policy update from node 1 is considered a zombie, the second
478
// boolean is that of node 2, and the final boolean is true if the channel
479
// is considered a zombie.
480
func (b *Builder) isZombieChannel(e1,
481
        e2 *models.ChannelEdgePolicy) (bool, bool, bool) {
6✔
482

6✔
483
        chanExpiry := b.cfg.ChannelPruneExpiry
6✔
484

6✔
485
        e1Zombie := e1 == nil || time.Since(e1.LastUpdate) >= chanExpiry
6✔
486
        e2Zombie := e2 == nil || time.Since(e2.LastUpdate) >= chanExpiry
6✔
487

6✔
488
        var e1Time, e2Time time.Time
6✔
489
        if e1 != nil {
10✔
490
                e1Time = e1.LastUpdate
4✔
491
        }
4✔
492
        if e2 != nil {
12✔
493
                e2Time = e2.LastUpdate
6✔
494
        }
6✔
495

496
        return e1Zombie, e2Zombie, b.IsZombieChannel(e1Time, e2Time)
6✔
497
}
498

499
// IsZombieChannel takes the timestamps of the latest channel updates for a
500
// channel and returns true if the channel should be considered a zombie based
501
// on these timestamps.
502
func (b *Builder) IsZombieChannel(updateTime1,
503
        updateTime2 time.Time) bool {
9✔
504

9✔
505
        chanExpiry := b.cfg.ChannelPruneExpiry
9✔
506

9✔
507
        e1Zombie := updateTime1.IsZero() ||
9✔
508
                time.Since(updateTime1) >= chanExpiry
9✔
509

9✔
510
        e2Zombie := updateTime2.IsZero() ||
9✔
511
                time.Since(updateTime2) >= chanExpiry
9✔
512

9✔
513
        // If we're using strict zombie pruning, then a channel is only
9✔
514
        // considered live if both edges have a recent update we know of.
9✔
515
        if b.cfg.StrictZombiePruning {
13✔
516
                return e1Zombie || e2Zombie
4✔
517
        }
4✔
518

519
        // Otherwise, if we're using the less strict variant, then a channel is
520
        // considered live if either of the edges have a recent update.
521
        return e1Zombie && e2Zombie
5✔
522
}
523

524
// pruneZombieChans is a method that will be called periodically to prune out
525
// any "zombie" channels. We consider channels zombies if *both* edges haven't
526
// been updated since our zombie horizon. If AssumeChannelValid is present,
527
// we'll also consider channels zombies if *both* edges are disabled. This
528
// usually signals that a channel has been closed on-chain. We do this
529
// periodically to keep a healthy, lively routing table.
530
func (b *Builder) pruneZombieChans() error {
5✔
531
        chansToPrune := make(map[uint64]struct{})
5✔
532
        chanExpiry := b.cfg.ChannelPruneExpiry
5✔
533

5✔
534
        log.Infof("Examining channel graph for zombie channels")
5✔
535

5✔
536
        // A helper method to detect if the channel belongs to this node
5✔
537
        isSelfChannelEdge := func(info *models.ChannelEdgeInfo) bool {
16✔
538
                return info.NodeKey1Bytes == b.cfg.SelfNode ||
11✔
539
                        info.NodeKey2Bytes == b.cfg.SelfNode
11✔
540
        }
11✔
541

542
        // First, we'll collect all the channels which are eligible for garbage
543
        // collection due to being zombies.
544
        filterPruneChans := func(info *models.ChannelEdgeInfo,
5✔
545
                e1, e2 *models.ChannelEdgePolicy) error {
13✔
546

8✔
547
                // Exit early in case this channel is already marked to be
8✔
548
                // pruned
8✔
549
                _, markedToPrune := chansToPrune[info.ChannelID]
8✔
550
                if markedToPrune {
8✔
551
                        return nil
×
552
                }
×
553

554
                // We'll ensure that we don't attempt to prune our *own*
555
                // channels from the graph, as in any case this should be
556
                // re-advertised by the sub-system above us.
557
                if isSelfChannelEdge(info) {
10✔
558
                        return nil
2✔
559
                }
2✔
560

561
                e1Zombie, e2Zombie, isZombieChan := b.isZombieChannel(e1, e2)
6✔
562

6✔
563
                if e1Zombie {
10✔
564
                        log.Tracef("Node1 pubkey=%x of chan_id=%v is zombie",
4✔
565
                                info.NodeKey1Bytes, info.ChannelID)
4✔
566
                }
4✔
567

568
                if e2Zombie {
12✔
569
                        log.Tracef("Node2 pubkey=%x of chan_id=%v is zombie",
6✔
570
                                info.NodeKey2Bytes, info.ChannelID)
6✔
571
                }
6✔
572

573
                // If either edge hasn't been updated for a period of
574
                // chanExpiry, then we'll mark the channel itself as eligible
575
                // for graph pruning.
576
                if !isZombieChan {
7✔
577
                        return nil
1✔
578
                }
1✔
579

580
                log.Debugf("ChannelID(%v) is a zombie, collecting to prune",
5✔
581
                        info.ChannelID)
5✔
582

5✔
583
                // TODO(roasbeef): add ability to delete single directional edge
5✔
584
                chansToPrune[info.ChannelID] = struct{}{}
5✔
585

5✔
586
                return nil
5✔
587
        }
588

589
        // If AssumeChannelValid is present we'll look at the disabled bit for
590
        // both edges. If they're both disabled, then we can interpret this as
591
        // the channel being closed and can prune it from our graph.
592
        if b.cfg.AssumeChannelValid {
7✔
593
                disabledChanIDs, err := b.cfg.Graph.DisabledChannelIDs()
2✔
594
                if err != nil {
2✔
595
                        return fmt.Errorf("unable to get disabled channels "+
×
596
                                "ids chans: %v", err)
×
597
                }
×
598

599
                disabledEdges, err := b.cfg.Graph.FetchChanInfos(
2✔
600
                        disabledChanIDs,
2✔
601
                )
2✔
602
                if err != nil {
2✔
603
                        return fmt.Errorf("unable to fetch disabled channels "+
×
604
                                "edges chans: %v", err)
×
605
                }
×
606

607
                // Ensuring we won't prune our own channel from the graph.
608
                for _, disabledEdge := range disabledEdges {
5✔
609
                        if !isSelfChannelEdge(disabledEdge.Info) {
4✔
610
                                chansToPrune[disabledEdge.Info.ChannelID] =
1✔
611
                                        struct{}{}
1✔
612
                        }
1✔
613
                }
614
        }
615

616
        startTime := time.Unix(0, 0)
5✔
617
        endTime := time.Now().Add(-1 * chanExpiry)
5✔
618
        oldEdges, err := b.cfg.Graph.ChanUpdatesInHorizon(startTime, endTime)
5✔
619
        if err != nil {
5✔
620
                return fmt.Errorf("unable to fetch expired channel updates "+
×
621
                        "chans: %v", err)
×
622
        }
×
623

624
        for _, u := range oldEdges {
13✔
625
                err = filterPruneChans(u.Info, u.Policy1, u.Policy2)
8✔
626
                if err != nil {
8✔
627
                        return fmt.Errorf("error filtering channels to "+
×
628
                                "prune: %w", err)
×
629
                }
×
630
        }
631

632
        log.Infof("Pruning %v zombie channels", len(chansToPrune))
5✔
633
        if len(chansToPrune) == 0 {
7✔
634
                return nil
2✔
635
        }
2✔
636

637
        // With the set of zombie-like channels obtained, we'll do another pass
638
        // to delete them from the channel graph.
639
        toPrune := make([]uint64, 0, len(chansToPrune))
3✔
640
        for chanID := range chansToPrune {
9✔
641
                toPrune = append(toPrune, chanID)
6✔
642
                log.Tracef("Pruning zombie channel with ChannelID(%v)", chanID)
6✔
643
        }
6✔
644
        err = b.cfg.Graph.DeleteChannelEdges(
3✔
645
                b.cfg.StrictZombiePruning, true, toPrune...,
3✔
646
        )
3✔
647
        if err != nil {
3✔
648
                return fmt.Errorf("unable to delete zombie channels: %w", err)
×
649
        }
×
650

651
        // With the channels pruned, we'll also attempt to prune any nodes that
652
        // were a part of them.
653
        err = b.cfg.Graph.PruneGraphNodes()
3✔
654
        if err != nil && !errors.Is(err, graphdb.ErrGraphNodesNotFound) {
3✔
655
                return fmt.Errorf("unable to prune graph nodes: %w", err)
×
656
        }
×
657

658
        return nil
3✔
659
}
660

661
// handleNetworkUpdate is responsible for processing the update message and
662
// notifies topology changes, if any.
663
func (b *Builder) handleNetworkUpdate(msg any,
664
        op ...batch.SchedulerOption) error {
33✔
665

33✔
666
        select {
33✔
NEW
667
        case <-b.quit:
×
NEW
668
                return ErrGraphBuilderShuttingDown
×
669
        default:
33✔
670
        }
671

672
        // Process the routing update to determine if this is either a new
673
        // update from our PoV or an update to a prior vertex/edge we
674
        // previously accepted.
675
        var err error
33✔
676
        switch msg := msg.(type) {
33✔
677
        case *models.LightningNode:
10✔
678
                err = b.addNode(msg, op...)
10✔
679

680
        case *models.ChannelEdgeInfo:
20✔
681
                err = b.addEdge(msg, op...)
20✔
682

683
        case *models.ChannelEdgePolicy:
9✔
684
                err = b.updateEdge(msg, op...)
9✔
685

NEW
686
        default:
×
NEW
687
                return errors.Errorf("wrong routing update message type")
×
688
        }
689
        // If the error is not nil here, there's no need to send topology
690
        // change.
691
        if err != nil {
41✔
692
                // Log as a debug message if this is not an error we need to be
8✔
693
                // concerned about.
8✔
694
                if IsError(err, ErrIgnored, ErrOutdated) {
13✔
695
                        log.Debugf("process network updates got: %v", err)
5✔
696
                } else {
8✔
697
                        log.Errorf("process network updates got: %v", err)
3✔
698
                }
3✔
699

700
                return err
8✔
701
        }
702

703
        b.wg.Add(1)
28✔
704
        go func() {
56✔
705
                defer b.wg.Done()
28✔
706

28✔
707
                // Otherwise, we'll send off a new notification for the newly
28✔
708
                // accepted update, if any.
28✔
709
                topChange := &TopologyChange{}
28✔
710
                err = addToTopologyChange(b.cfg.Graph, topChange, msg)
28✔
711
                if err != nil {
28✔
NEW
712
                        log.Errorf("unable to update topology change "+
×
NEW
713
                                "notification: %v", err)
×
NEW
714
                        return
×
NEW
715
                }
×
716

717
                if !topChange.isEmpty() {
42✔
718
                        b.notifyTopologyChange(topChange)
14✔
719
                }
14✔
720
        }()
721

722
        return nil
28✔
723
}
724

725
// networkHandler is the primary goroutine for the Builder. The roles of
726
// this goroutine include answering queries related to the state of the
727
// network, pruning the graph on new block notification and registering new
728
// topology clients.
729
//
730
// NOTE: This MUST be run as a goroutine.
731
func (b *Builder) networkHandler() {
24✔
732
        defer b.wg.Done()
24✔
733

24✔
734
        graphPruneTicker := time.NewTicker(b.cfg.GraphPruneInterval)
24✔
735
        defer graphPruneTicker.Stop()
24✔
736

24✔
737
        defer b.statTicker.Stop()
24✔
738

24✔
739
        b.stats.Reset()
24✔
740

24✔
741
        for {
131✔
742
                // If there are stats, resume the statTicker.
107✔
743
                if !b.stats.Empty() {
138✔
744
                        b.statTicker.Resume()
31✔
745
                }
31✔
746

747
                select {
107✔
748
                case chainUpdate, ok := <-b.staleBlocks:
12✔
749
                        // If the channel has been closed, then this indicates
12✔
750
                        // the daemon is shutting down, so we exit ourselves.
12✔
751
                        if !ok {
12✔
752
                                return
×
753
                        }
×
754

755
                        // Since this block is stale, we update our best height
756
                        // to the previous block.
757
                        blockHeight := chainUpdate.Height
12✔
758
                        b.bestHeight.Store(blockHeight - 1)
12✔
759

12✔
760
                        // Update the channel graph to reflect that this block
12✔
761
                        // was disconnected.
12✔
762
                        _, err := b.cfg.Graph.DisconnectBlockAtHeight(
12✔
763
                                blockHeight,
12✔
764
                        )
12✔
765
                        if err != nil {
12✔
766
                                log.Errorf("unable to prune graph with stale "+
×
767
                                        "block: %v", err)
×
768
                                continue
×
769
                        }
770

771
                        // TODO(halseth): notify client about the reorg?
772

773
                // A new block has arrived, so we can prune the channel graph
774
                // of any channels which were closed in the block.
775
                case chainUpdate, ok := <-b.newBlocks:
71✔
776
                        // If the channel has been closed, then this indicates
71✔
777
                        // the daemon is shutting down, so we exit ourselves.
71✔
778
                        if !ok {
71✔
779
                                return
×
780
                        }
×
781

782
                        // We'll ensure that any new blocks received attach
783
                        // directly to the end of our main chain. If not, then
784
                        // we've somehow missed some blocks. Here we'll catch
785
                        // up the chain with the latest blocks.
786
                        currentHeight := b.bestHeight.Load()
71✔
787
                        switch {
71✔
788
                        case chainUpdate.Height == currentHeight+1:
65✔
789
                                err := b.updateGraphWithClosedChannels(
65✔
790
                                        chainUpdate,
65✔
791
                                )
65✔
792
                                if err != nil {
65✔
793
                                        log.Errorf("unable to prune graph "+
×
794
                                                "with closed channels: %v", err)
×
795
                                }
×
796

797
                        case chainUpdate.Height > currentHeight+1:
1✔
798
                                log.Errorf("out of order block: expecting "+
1✔
799
                                        "height=%v, got height=%v",
1✔
800
                                        currentHeight+1, chainUpdate.Height)
1✔
801

1✔
802
                                err := b.getMissingBlocks(
1✔
803
                                        currentHeight, chainUpdate,
1✔
804
                                )
1✔
805
                                if err != nil {
1✔
806
                                        log.Errorf("unable to retrieve missing"+
×
807
                                                "blocks: %v", err)
×
808
                                }
×
809

810
                        case chainUpdate.Height < currentHeight+1:
5✔
811
                                log.Errorf("out of order block: expecting "+
5✔
812
                                        "height=%v, got height=%v",
5✔
813
                                        currentHeight+1, chainUpdate.Height)
5✔
814

5✔
815
                                log.Infof("Skipping channel pruning since "+
5✔
816
                                        "received block height %v was already"+
5✔
817
                                        " processed.", chainUpdate.Height)
5✔
818
                        }
819

820
                // A new notification client update has arrived. We're either
821
                // gaining a new client, or cancelling notifications for an
822
                // existing client.
823
                case ntfnUpdate := <-b.ntfnClientUpdates:
8✔
824
                        clientID := ntfnUpdate.clientID
8✔
825

8✔
826
                        if ntfnUpdate.cancel {
12✔
827
                                client, ok := b.topologyClients.LoadAndDelete(
4✔
828
                                        clientID,
4✔
829
                                )
4✔
830
                                if ok {
8✔
831
                                        close(client.exit)
4✔
832
                                        client.wg.Wait()
4✔
833

4✔
834
                                        close(client.ntfnChan)
4✔
835
                                }
4✔
836

837
                                continue
4✔
838
                        }
839

840
                        b.topologyClients.Store(clientID, &topologyClient{
7✔
841
                                ntfnChan: ntfnUpdate.ntfnChan,
7✔
842
                                exit:     make(chan struct{}),
7✔
843
                        })
7✔
844

845
                // The graph prune ticker has ticked, so we'll examine the
846
                // state of the known graph to filter out any zombie channels
847
                // for pruning.
848
                case <-graphPruneTicker.C:
×
849
                        if err := b.pruneZombieChans(); err != nil {
×
850
                                log.Errorf("Unable to prune zombies: %v", err)
×
851
                        }
×
852

853
                // Log any stats if we've processed a non-empty number of
854
                // channels, updates, or nodes. We'll only pause the ticker if
855
                // the last window contained no updates to avoid resuming and
856
                // pausing while consecutive windows contain new info.
857
                case <-b.statTicker.Ticks():
2✔
858
                        if !b.stats.Empty() {
4✔
859
                                log.Infof(b.stats.String())
2✔
860
                        } else {
2✔
861
                                b.statTicker.Pause()
×
862
                        }
×
863
                        b.stats.Reset()
2✔
864

865
                // The router has been signalled to exit, to we exit our main
866
                // loop so the wait group can be decremented.
867
                case <-b.quit:
22✔
868
                        return
22✔
869
                }
870
        }
871
}
872

873
// getMissingBlocks walks through all missing blocks and updates the graph
874
// closed channels accordingly.
875
func (b *Builder) getMissingBlocks(currentHeight uint32,
876
        chainUpdate *chainview.FilteredBlock) error {
1✔
877

1✔
878
        outdatedHash, err := b.cfg.Chain.GetBlockHash(int64(currentHeight))
1✔
879
        if err != nil {
1✔
880
                return err
×
881
        }
×
882

883
        outdatedBlock := &chainntnfs.BlockEpoch{
1✔
884
                Height: int32(currentHeight),
1✔
885
                Hash:   outdatedHash,
1✔
886
        }
1✔
887

1✔
888
        epochClient, err := b.cfg.Notifier.RegisterBlockEpochNtfn(
1✔
889
                outdatedBlock,
1✔
890
        )
1✔
891
        if err != nil {
1✔
892
                return err
×
893
        }
×
894
        defer epochClient.Cancel()
1✔
895

1✔
896
        blockDifference := int(chainUpdate.Height - currentHeight)
1✔
897

1✔
898
        // We'll walk through all the outdated blocks and make sure we're able
1✔
899
        // to update the graph with any closed channels from them.
1✔
900
        for i := 0; i < blockDifference; i++ {
6✔
901
                var (
5✔
902
                        missingBlock *chainntnfs.BlockEpoch
5✔
903
                        ok           bool
5✔
904
                )
5✔
905

5✔
906
                select {
5✔
907
                case missingBlock, ok = <-epochClient.Epochs:
5✔
908
                        if !ok {
5✔
909
                                return nil
×
910
                        }
×
911

912
                case <-b.quit:
×
913
                        return nil
×
914
                }
915

916
                filteredBlock, err := b.cfg.ChainView.FilterBlock(
5✔
917
                        missingBlock.Hash,
5✔
918
                )
5✔
919
                if err != nil {
5✔
920
                        return err
×
921
                }
×
922

923
                err = b.updateGraphWithClosedChannels(
5✔
924
                        filteredBlock,
5✔
925
                )
5✔
926
                if err != nil {
5✔
927
                        return err
×
928
                }
×
929
        }
930

931
        return nil
1✔
932
}
933

934
// updateGraphWithClosedChannels prunes the channel graph of closed channels
935
// that are no longer needed.
936
func (b *Builder) updateGraphWithClosedChannels(
937
        chainUpdate *chainview.FilteredBlock) error {
70✔
938

70✔
939
        // Once a new block arrives, we update our running track of the height
70✔
940
        // of the chain tip.
70✔
941
        blockHeight := chainUpdate.Height
70✔
942

70✔
943
        b.bestHeight.Store(blockHeight)
70✔
944
        log.Infof("Pruning channel graph using block %v (height=%v)",
70✔
945
                chainUpdate.Hash, blockHeight)
70✔
946

70✔
947
        // We're only interested in all prior outputs that have been spent in
70✔
948
        // the block, so collate all the referenced previous outpoints within
70✔
949
        // each tx and input.
70✔
950
        var spentOutputs []*wire.OutPoint
70✔
951
        for _, tx := range chainUpdate.Transactions {
74✔
952
                for _, txIn := range tx.TxIn {
8✔
953
                        spentOutputs = append(spentOutputs,
4✔
954
                                &txIn.PreviousOutPoint)
4✔
955
                }
4✔
956
        }
957

958
        // With the spent outputs gathered, attempt to prune the channel graph,
959
        // also passing in the hash+height of the block being pruned so the
960
        // prune tip can be updated.
961
        chansClosed, err := b.cfg.Graph.PruneGraph(spentOutputs,
70✔
962
                &chainUpdate.Hash, chainUpdate.Height)
70✔
963
        if err != nil {
70✔
964
                log.Errorf("unable to prune routing table: %v", err)
×
965
                return err
×
966
        }
×
967

968
        log.Infof("Block %v (height=%v) closed %v channels", chainUpdate.Hash,
70✔
969
                blockHeight, len(chansClosed))
70✔
970

70✔
971
        if len(chansClosed) == 0 {
139✔
972
                return err
69✔
973
        }
69✔
974

975
        // Notify all currently registered clients of the newly closed channels.
976
        closeSummaries := createCloseSummaries(blockHeight, chansClosed...)
4✔
977
        b.notifyTopologyChange(&TopologyChange{
4✔
978
                ClosedChannels: closeSummaries,
4✔
979
        })
4✔
980

4✔
981
        return nil
4✔
982
}
983

984
// assertNodeAnnFreshness returns a non-nil error if we have an announcement in
985
// the database for the passed node with a timestamp newer than the passed
986
// timestamp. ErrIgnored will be returned if we already have the node, and
987
// ErrOutdated will be returned if we have a timestamp that's after the new
988
// timestamp.
989
func (b *Builder) assertNodeAnnFreshness(node route.Vertex,
990
        msgTimestamp time.Time) error {
13✔
991

13✔
992
        // If we are not already aware of this node, it means that we don't
13✔
993
        // know about any channel using this node. To avoid a DoS attack by
13✔
994
        // node announcements, we will ignore such nodes. If we do know about
13✔
995
        // this node, check that this update brings info newer than what we
13✔
996
        // already have.
13✔
997
        lastUpdate, exists, err := b.cfg.Graph.HasLightningNode(node)
13✔
998
        if err != nil {
13✔
999
                return errors.Errorf("unable to query for the "+
×
1000
                        "existence of node: %v", err)
×
1001
        }
×
1002
        if !exists {
17✔
1003
                return NewErrf(ErrIgnored, "Ignoring node announcement"+
4✔
1004
                        " for node not found in channel graph (%x)",
4✔
1005
                        node[:])
4✔
1006
        }
4✔
1007

1008
        // If we've reached this point then we're aware of the vertex being
1009
        // advertised. So we now check if the new message has a new time stamp,
1010
        // if not then we won't accept the new data as it would override newer
1011
        // data.
1012
        if !lastUpdate.Before(msgTimestamp) {
16✔
1013
                return NewErrf(ErrOutdated, "Ignoring outdated "+
4✔
1014
                        "announcement for %x", node[:])
4✔
1015
        }
4✔
1016

1017
        return nil
11✔
1018
}
1019

1020
// addZombieEdge adds a channel that failed complete validation into the zombie
1021
// index so we can avoid having to re-validate it in the future.
1022
func (b *Builder) addZombieEdge(chanID uint64) error {
3✔
1023
        // If the edge fails validation we'll mark the edge itself as a zombie
3✔
1024
        // so we don't continue to request it. We use the "zero key" for both
3✔
1025
        // node pubkeys so this edge can't be resurrected.
3✔
1026
        var zeroKey [33]byte
3✔
1027
        err := b.cfg.Graph.MarkEdgeZombie(chanID, zeroKey, zeroKey)
3✔
1028
        if err != nil {
3✔
1029
                return fmt.Errorf("unable to mark spent chan(id=%v) as a "+
×
1030
                        "zombie: %w", chanID, err)
×
1031
        }
×
1032

1033
        return nil
3✔
1034
}
1035

1036
// makeFundingScript is used to make the funding script for both segwit v0 and
1037
// segwit v1 (taproot) channels.
1038
//
1039
// TODO(roasbeef: export and use elsewhere?
1040
func makeFundingScript(bitcoinKey1, bitcoinKey2 []byte, chanFeatures []byte,
1041
        tapscriptRoot fn.Option[chainhash.Hash]) ([]byte, error) {
19✔
1042

19✔
1043
        legacyFundingScript := func() ([]byte, error) {
38✔
1044
                witnessScript, err := input.GenMultiSigScript(
19✔
1045
                        bitcoinKey1, bitcoinKey2,
19✔
1046
                )
19✔
1047
                if err != nil {
19✔
1048
                        return nil, err
×
1049
                }
×
1050
                pkScript, err := input.WitnessScriptHash(witnessScript)
19✔
1051
                if err != nil {
19✔
1052
                        return nil, err
×
1053
                }
×
1054

1055
                return pkScript, nil
19✔
1056
        }
1057

1058
        if len(chanFeatures) == 0 {
35✔
1059
                return legacyFundingScript()
16✔
1060
        }
16✔
1061

1062
        // In order to make the correct funding script, we'll need to parse the
1063
        // chanFeatures bytes into a feature vector we can interact with.
1064
        rawFeatures := lnwire.NewRawFeatureVector()
3✔
1065
        err := rawFeatures.Decode(bytes.NewReader(chanFeatures))
3✔
1066
        if err != nil {
3✔
1067
                return nil, fmt.Errorf("unable to parse chan feature "+
×
1068
                        "bits: %w", err)
×
1069
        }
×
1070

1071
        chanFeatureBits := lnwire.NewFeatureVector(
3✔
1072
                rawFeatures, lnwire.Features,
3✔
1073
        )
3✔
1074
        if chanFeatureBits.HasFeature(
3✔
1075
                lnwire.SimpleTaprootChannelsOptionalStaging,
3✔
1076
        ) {
6✔
1077

3✔
1078
                pubKey1, err := btcec.ParsePubKey(bitcoinKey1)
3✔
1079
                if err != nil {
3✔
1080
                        return nil, err
×
1081
                }
×
1082
                pubKey2, err := btcec.ParsePubKey(bitcoinKey2)
3✔
1083
                if err != nil {
3✔
1084
                        return nil, err
×
1085
                }
×
1086

1087
                fundingScript, _, err := input.GenTaprootFundingScript(
3✔
1088
                        pubKey1, pubKey2, 0, tapscriptRoot,
3✔
1089
                )
3✔
1090
                if err != nil {
3✔
1091
                        return nil, err
×
1092
                }
×
1093

1094
                // TODO(roasbeef): add tapscript root to gossip v1.5
1095

1096
                return fundingScript, nil
3✔
1097
        }
1098

1099
        return legacyFundingScript()
3✔
1100
}
1101

1102
// ApplyChannelUpdate validates a channel update and if valid, applies it to the
1103
// database. It returns a bool indicating whether the updates were successful.
1104
func (b *Builder) ApplyChannelUpdate(msg *lnwire.ChannelUpdate1) bool {
3✔
1105
        ch, _, _, err := b.GetChannelByID(msg.ShortChannelID)
3✔
1106
        if err != nil {
6✔
1107
                log.Errorf("Unable to retrieve channel by id: %v", err)
3✔
1108
                return false
3✔
1109
        }
3✔
1110

1111
        var pubKey *btcec.PublicKey
3✔
1112

3✔
1113
        switch msg.ChannelFlags & lnwire.ChanUpdateDirection {
3✔
1114
        case 0:
3✔
1115
                pubKey, _ = ch.NodeKey1()
3✔
1116

1117
        case 1:
3✔
1118
                pubKey, _ = ch.NodeKey2()
3✔
1119
        }
1120

1121
        // Exit early if the pubkey cannot be decided.
1122
        if pubKey == nil {
3✔
1123
                log.Errorf("Unable to decide pubkey with ChannelFlags=%v",
×
1124
                        msg.ChannelFlags)
×
1125
                return false
×
1126
        }
×
1127

1128
        err = netann.ValidateChannelUpdateAnn(pubKey, ch.Capacity, msg)
3✔
1129
        if err != nil {
3✔
1130
                log.Errorf("Unable to validate channel update: %v", err)
×
1131
                return false
×
1132
        }
×
1133

1134
        err = b.UpdateEdge(&models.ChannelEdgePolicy{
3✔
1135
                SigBytes:                  msg.Signature.ToSignatureBytes(),
3✔
1136
                ChannelID:                 msg.ShortChannelID.ToUint64(),
3✔
1137
                LastUpdate:                time.Unix(int64(msg.Timestamp), 0),
3✔
1138
                MessageFlags:              msg.MessageFlags,
3✔
1139
                ChannelFlags:              msg.ChannelFlags,
3✔
1140
                TimeLockDelta:             msg.TimeLockDelta,
3✔
1141
                MinHTLC:                   msg.HtlcMinimumMsat,
3✔
1142
                MaxHTLC:                   msg.HtlcMaximumMsat,
3✔
1143
                FeeBaseMSat:               lnwire.MilliSatoshi(msg.BaseFee),
3✔
1144
                FeeProportionalMillionths: lnwire.MilliSatoshi(msg.FeeRate),
3✔
1145
                ExtraOpaqueData:           msg.ExtraOpaqueData,
3✔
1146
        })
3✔
1147
        if err != nil && !IsError(err, ErrIgnored, ErrOutdated) {
3✔
1148
                log.Errorf("Unable to apply channel update: %v", err)
×
1149
                return false
×
1150
        }
×
1151

1152
        return true
3✔
1153
}
1154

1155
// AddNode is used to add information about a node to the router database. If
1156
// the node with this pubkey is not present in an existing channel, it will
1157
// be ignored.
1158
//
1159
// NOTE: This method is part of the ChannelGraphSource interface.
1160
func (b *Builder) AddNode(node *models.LightningNode,
1161
        op ...batch.SchedulerOption) error {
10✔
1162

10✔
1163
        return b.handleNetworkUpdate(node, op...)
10✔
1164
}
10✔
1165

1166
// addNode does some basic checks on the given LightningNode against what we
1167
// currently have persisted in the graph, and then adds it to the graph. If we
1168
// already know about the node, then we only update our DB if the new update
1169
// has a newer timestamp than the last one we received.
1170
func (b *Builder) addNode(node *models.LightningNode,
1171
        op ...batch.SchedulerOption) error {
10✔
1172

10✔
1173
        // Before we add the node to the database, we'll check to see if the
10✔
1174
        // announcement is "fresh" or not. If it isn't, then we'll return an
10✔
1175
        // error.
10✔
1176
        err := b.assertNodeAnnFreshness(node.PubKeyBytes, node.LastUpdate)
10✔
1177
        if err != nil {
14✔
1178
                return err
4✔
1179
        }
4✔
1180

1181
        if err := b.cfg.Graph.AddLightningNode(node, op...); err != nil {
9✔
NEW
1182
                return errors.Errorf("unable to add node %x to the "+
×
NEW
1183
                        "graph: %v", node.PubKeyBytes, err)
×
UNCOV
1184
        }
×
1185

1186
        log.Tracef("Updated vertex data for node=%x", node.PubKeyBytes)
9✔
1187
        b.stats.incNumNodeUpdates()
9✔
1188

9✔
1189
        return nil
9✔
1190
}
1191

1192
// AddEdge is used to add edge/channel to the topology of the router, after all
1193
// information about channel will be gathered this edge/channel might be used
1194
// in construction of payment path.
1195
//
1196
// NOTE: This method is part of the ChannelGraphSource interface.
1197
func (b *Builder) AddEdge(edge *models.ChannelEdgeInfo,
1198
        op ...batch.SchedulerOption) error {
20✔
1199

20✔
1200
        return b.handleNetworkUpdate(edge, op...)
20✔
1201
}
20✔
1202

1203
// addEdge does some validation on the new channel edge against what we
1204
// currently have persisted in the graph, and then adds it to the graph. The
1205
// Chain View is updated with the new edge if it is successfully added to the
1206
// graph. We only persist the channel if we currently dont have it at all in
1207
// our graph.
1208
//
1209
// TODO(elle): this currently also does funding-transaction validation. But this
1210
// should be moved to the gossiper instead.
1211
func (b *Builder) addEdge(edge *models.ChannelEdgeInfo,
1212
        op ...batch.SchedulerOption) error {
20✔
1213

20✔
1214
        log.Debugf("Received ChannelEdgeInfo for channel %v", edge.ChannelID)
20✔
1215

20✔
1216
        // Prior to processing the announcement we first check if we
20✔
1217
        // already know of this channel, if so, then we can exit early.
20✔
1218
        _, _, exists, isZombie, err := b.cfg.Graph.HasChannelEdge(
20✔
1219
                edge.ChannelID,
20✔
1220
        )
20✔
1221
        if err != nil && !errors.Is(err, graphdb.ErrGraphNoEdgesFound) {
20✔
NEW
1222
                return errors.Errorf("unable to check for edge existence: %v",
×
NEW
1223
                        err)
×
NEW
1224
        }
×
1225
        if isZombie {
20✔
NEW
1226
                return NewErrf(ErrIgnored, "ignoring msg for zombie chan_id=%v",
×
NEW
1227
                        edge.ChannelID)
×
NEW
1228
        }
×
1229
        if exists {
23✔
1230
                return NewErrf(ErrIgnored, "ignoring msg for known chan_id=%v",
3✔
1231
                        edge.ChannelID)
3✔
1232
        }
3✔
1233

1234
        // If AssumeChannelValid is present, then we are unable to perform any
1235
        // of the expensive checks below, so we'll short-circuit our path
1236
        // straight to adding the edge to our graph. If the passed
1237
        // ShortChannelID is an alias, then we'll skip validation as it will
1238
        // not map to a legitimate tx. This is not a DoS vector as only we can
1239
        // add an alias ChannelAnnouncement from the gossiper.
1240
        scid := lnwire.NewShortChanIDFromInt(edge.ChannelID)
20✔
1241
        if b.cfg.AssumeChannelValid || b.cfg.IsAlias(scid) {
23✔
1242
                err := b.cfg.Graph.AddChannelEdge(edge, op...)
3✔
1243
                if err != nil {
3✔
NEW
1244
                        return fmt.Errorf("unable to add edge: %w", err)
×
NEW
1245
                }
×
1246
                log.Tracef("New channel discovered! Link connects %x and %x "+
3✔
1247
                        "with ChannelID(%v)", edge.NodeKey1Bytes,
3✔
1248
                        edge.NodeKey2Bytes, edge.ChannelID)
3✔
1249
                b.stats.incNumEdgesDiscovered()
3✔
1250

3✔
1251
                return nil
3✔
1252
        }
1253

1254
        // Before we can add the channel to the channel graph, we need to obtain
1255
        // the full funding outpoint that's encoded within the channel ID.
1256
        channelID := lnwire.NewShortChanIDFromInt(edge.ChannelID)
20✔
1257
        fundingTx, err := lnwallet.FetchFundingTxWrapper(
20✔
1258
                b.cfg.Chain, &channelID, b.quit,
20✔
1259
        )
20✔
1260
        if err != nil {
21✔
1261
                //nolint:ll
1✔
1262
                //
1✔
1263
                // In order to ensure we don't erroneously mark a channel as a
1✔
1264
                // zombie due to an RPC failure, we'll attempt to string match
1✔
1265
                // for the relevant errors.
1✔
1266
                //
1✔
1267
                // * btcd:
1✔
1268
                //    * https://github.com/btcsuite/btcd/blob/master/rpcserver.go#L1316
1✔
1269
                //    * https://github.com/btcsuite/btcd/blob/master/rpcserver.go#L1086
1✔
1270
                // * bitcoind:
1✔
1271
                //    * https://github.com/bitcoin/bitcoin/blob/7fcf53f7b4524572d1d0c9a5fdc388e87eb02416/src/rpc/blockchain.cpp#L770
1✔
1272
                //     * https://github.com/bitcoin/bitcoin/blob/7fcf53f7b4524572d1d0c9a5fdc388e87eb02416/src/rpc/blockchain.cpp#L954
1✔
1273
                switch {
1✔
NEW
1274
                case strings.Contains(err.Error(), "not found"):
×
NEW
1275
                        fallthrough
×
1276

1277
                case strings.Contains(err.Error(), "out of range"):
1✔
1278
                        // If the funding transaction isn't found at all, then
1✔
1279
                        // we'll mark the edge itself as a zombie so we don't
1✔
1280
                        // continue to request it. We use the "zero key" for
1✔
1281
                        // both node pubkeys so this edge can't be resurrected.
1✔
1282
                        zErr := b.addZombieEdge(edge.ChannelID)
1✔
1283
                        if zErr != nil {
1✔
NEW
1284
                                return zErr
×
NEW
1285
                        }
×
1286

NEW
1287
                default:
×
1288
                }
1289

1290
                return NewErrf(ErrNoFundingTransaction, "unable to "+
1✔
1291
                        "locate funding tx: %v", err)
1✔
1292
        }
1293

1294
        // Recreate witness output to be sure that declared in channel edge
1295
        // bitcoin keys and channel value corresponds to the reality.
1296
        fundingPkScript, err := makeFundingScript(
19✔
1297
                edge.BitcoinKey1Bytes[:], edge.BitcoinKey2Bytes[:],
19✔
1298
                edge.Features, edge.TapscriptRoot,
19✔
1299
        )
19✔
1300
        if err != nil {
19✔
NEW
1301
                return err
×
NEW
1302
        }
×
1303

1304
        // Next we'll validate that this channel is actually well formed. If
1305
        // this check fails, then this channel either doesn't exist, or isn't
1306
        // the one that was meant to be created according to the passed channel
1307
        // proofs.
1308
        fundingPoint, err := chanvalidate.Validate(
19✔
1309
                &chanvalidate.Context{
19✔
1310
                        Locator: &chanvalidate.ShortChanIDChanLocator{
19✔
1311
                                ID: channelID,
19✔
1312
                        },
19✔
1313
                        MultiSigPkScript: fundingPkScript,
19✔
1314
                        FundingTx:        fundingTx,
19✔
1315
                },
19✔
1316
        )
19✔
1317
        if err != nil {
20✔
1318
                // Mark the edge as a zombie so we won't try to re-validate it
1✔
1319
                // on start up.
1✔
1320
                if err := b.addZombieEdge(edge.ChannelID); err != nil {
1✔
1321
                        return err
×
1322
                }
×
1323

1324
                return NewErrf(ErrInvalidFundingOutput, "output failed "+
1✔
1325
                        "validation: %w", err)
1✔
1326
        }
1327

1328
        // Now that we have the funding outpoint of the channel, ensure
1329
        // that it hasn't yet been spent. If so, then this channel has
1330
        // been closed so we'll ignore it.
1331
        chanUtxo, err := b.cfg.Chain.GetUtxo(
18✔
1332
                fundingPoint, fundingPkScript, channelID.BlockHeight, b.quit,
18✔
1333
        )
18✔
1334
        if err != nil {
19✔
1335
                if errors.Is(err, btcwallet.ErrOutputSpent) {
2✔
1336
                        zErr := b.addZombieEdge(edge.ChannelID)
1✔
1337
                        if zErr != nil {
1✔
NEW
1338
                                return zErr
×
NEW
1339
                        }
×
1340
                }
1341

1342
                return NewErrf(ErrChannelSpent, "unable to fetch utxo for "+
1✔
1343
                        "chan_id=%v, chan_point=%v: %v", edge.ChannelID,
1✔
1344
                        fundingPoint, err)
1✔
1345
        }
1346

1347
        // TODO(roasbeef): this is a hack, needs to be removed after commitment
1348
        // fees are dynamic.
1349
        edge.Capacity = btcutil.Amount(chanUtxo.Value)
17✔
1350
        edge.ChannelPoint = *fundingPoint
17✔
1351
        if err := b.cfg.Graph.AddChannelEdge(edge, op...); err != nil {
17✔
NEW
1352
                return errors.Errorf("unable to add edge: %v", err)
×
NEW
1353
        }
×
1354

1355
        log.Debugf("New channel discovered! Link connects %x and %x with "+
17✔
1356
                "ChannelPoint(%v): chan_id=%v, capacity=%v", edge.NodeKey1Bytes,
17✔
1357
                edge.NodeKey2Bytes, fundingPoint, edge.ChannelID, edge.Capacity)
17✔
1358
        b.stats.incNumEdgesDiscovered()
17✔
1359

17✔
1360
        // As a new edge has been added to the channel graph, we'll update the
17✔
1361
        // current UTXO filter within our active FilteredChainView so we are
17✔
1362
        // notified if/when this channel is closed.
17✔
1363
        filterUpdate := []graphdb.EdgePoint{
17✔
1364
                {
17✔
1365
                        FundingPkScript: fundingPkScript,
17✔
1366
                        OutPoint:        *fundingPoint,
17✔
1367
                },
17✔
1368
        }
17✔
1369

17✔
1370
        err = b.cfg.ChainView.UpdateFilter(filterUpdate, b.bestHeight.Load())
17✔
1371
        if err != nil {
17✔
NEW
1372
                return errors.Errorf("unable to update chain "+
×
NEW
1373
                        "view: %v", err)
×
NEW
1374
        }
×
1375

1376
        return nil
17✔
1377
}
1378

1379
// UpdateEdge is used to update edge information, without this message edge
1380
// considered as not fully constructed.
1381
//
1382
// NOTE: This method is part of the ChannelGraphSource interface.
1383
func (b *Builder) UpdateEdge(update *models.ChannelEdgePolicy,
1384
        op ...batch.SchedulerOption) error {
9✔
1385

9✔
1386
        return b.handleNetworkUpdate(update, op...)
9✔
1387
}
9✔
1388

1389
// updateEdge validates the new edge policy against what we currently have
1390
// persisted in the graph, and then applies it to the graph if the update is
1391
// considered fresh enough and if we actually have a channel persisted for the
1392
// given update.
1393
func (b *Builder) updateEdge(policy *models.ChannelEdgePolicy,
1394
        op ...batch.SchedulerOption) error {
9✔
1395

9✔
1396
        log.Debugf("Received ChannelEdgePolicy for channel %v",
9✔
1397
                policy.ChannelID)
9✔
1398

9✔
1399
        // We make sure to hold the mutex for this channel ID, such that no
9✔
1400
        // other goroutine is concurrently doing database accesses for the same
9✔
1401
        // channel ID.
9✔
1402
        b.channelEdgeMtx.Lock(policy.ChannelID)
9✔
1403
        defer b.channelEdgeMtx.Unlock(policy.ChannelID)
9✔
1404

9✔
1405
        edge1Timestamp, edge2Timestamp, exists, isZombie, err :=
9✔
1406
                b.cfg.Graph.HasChannelEdge(policy.ChannelID)
9✔
1407
        if err != nil && !errors.Is(err, graphdb.ErrGraphNoEdgesFound) {
9✔
NEW
1408
                return errors.Errorf("unable to check for edge existence: %v",
×
NEW
1409
                        err)
×
UNCOV
1410
        }
×
1411

1412
        // If the channel is marked as a zombie in our database, and
1413
        // we consider this a stale update, then we should not apply the
1414
        // policy.
1415
        isStaleUpdate := time.Since(policy.LastUpdate) >
9✔
1416
                b.cfg.ChannelPruneExpiry
9✔
1417

9✔
1418
        if isZombie && isStaleUpdate {
9✔
NEW
1419
                return NewErrf(ErrIgnored, "ignoring stale update "+
×
NEW
1420
                        "(flags=%v|%v) for zombie chan_id=%v",
×
NEW
1421
                        policy.MessageFlags, policy.ChannelFlags,
×
NEW
1422
                        policy.ChannelID)
×
NEW
1423
        }
×
1424

1425
        // If the channel doesn't exist in our database, we cannot apply the
1426
        // updated policy.
1427
        if !exists {
10✔
1428
                return NewErrf(ErrIgnored, "ignoring update (flags=%v|%v) for "+
1✔
1429
                        "unknown chan_id=%v", policy.MessageFlags,
1✔
1430
                        policy.ChannelFlags, policy.ChannelID)
1✔
1431
        }
1✔
1432

1433
        log.Debugf("Found edge1Timestamp=%v, edge2Timestamp=%v",
8✔
1434
                edge1Timestamp, edge2Timestamp)
8✔
1435

8✔
1436
        // As edges are directional edge node has a unique policy for the
8✔
1437
        // direction of the edge they control. Therefore, we first check if we
8✔
1438
        // already have the most up-to-date information for that edge. If this
8✔
1439
        // message has a timestamp not strictly newer than what we already know
8✔
1440
        // of we can exit early.
8✔
1441
        switch policy.ChannelFlags & lnwire.ChanUpdateDirection {
8✔
1442
        // A flag set of 0 indicates this is an announcement for the "first"
1443
        // node in the channel.
1444
        case 0:
6✔
1445
                // Ignore outdated message.
6✔
1446
                if !edge1Timestamp.Before(policy.LastUpdate) {
9✔
1447
                        return NewErrf(ErrOutdated, "Ignoring "+
3✔
1448
                                "outdated update (flags=%v|%v) for "+
3✔
1449
                                "known chan_id=%v", policy.MessageFlags,
3✔
1450
                                policy.ChannelFlags, policy.ChannelID)
3✔
1451
                }
3✔
1452

1453
        // Similarly, a flag set of 1 indicates this is an announcement
1454
        // for the "second" node in the channel.
1455
        case 1:
5✔
1456
                // Ignore outdated message.
5✔
1457
                if !edge2Timestamp.Before(policy.LastUpdate) {
8✔
1458
                        return NewErrf(ErrOutdated, "Ignoring "+
3✔
1459
                                "outdated update (flags=%v|%v) for "+
3✔
1460
                                "known chan_id=%v", policy.MessageFlags,
3✔
1461
                                policy.ChannelFlags, policy.ChannelID)
3✔
1462
                }
3✔
1463
        }
1464

1465
        // Now that we know this isn't a stale update, we'll apply the new edge
1466
        // policy to the proper directional edge within the channel graph.
1467
        if err = b.cfg.Graph.UpdateEdgePolicy(policy, op...); err != nil {
8✔
NEW
1468
                err := errors.Errorf("unable to add channel: %v", err)
×
NEW
1469
                log.Error(err)
×
NEW
1470
                return err
×
UNCOV
1471
        }
×
1472

1473
        log.Tracef("New channel update applied: %v",
8✔
1474
                lnutils.SpewLogClosure(policy))
8✔
1475
        b.stats.incNumChannelUpdates()
8✔
1476

8✔
1477
        return nil
8✔
1478
}
1479

1480
// CurrentBlockHeight returns the block height from POV of the router subsystem.
1481
//
1482
// NOTE: This method is part of the ChannelGraphSource interface.
1483
func (b *Builder) CurrentBlockHeight() (uint32, error) {
3✔
1484
        _, height, err := b.cfg.Chain.GetBestBlock()
3✔
1485
        return uint32(height), err
3✔
1486
}
3✔
1487

1488
// SyncedHeight returns the block height to which the router subsystem currently
1489
// is synced to. This can differ from the above chain height if the goroutine
1490
// responsible for processing the blocks isn't yet up to speed.
1491
func (b *Builder) SyncedHeight() uint32 {
3✔
1492
        return b.bestHeight.Load()
3✔
1493
}
3✔
1494

1495
// GetChannelByID return the channel by the channel id.
1496
//
1497
// NOTE: This method is part of the ChannelGraphSource interface.
1498
func (b *Builder) GetChannelByID(chanID lnwire.ShortChannelID) (
1499
        *models.ChannelEdgeInfo,
1500
        *models.ChannelEdgePolicy,
1501
        *models.ChannelEdgePolicy, error) {
4✔
1502

4✔
1503
        return b.cfg.Graph.FetchChannelEdgesByID(chanID.ToUint64())
4✔
1504
}
4✔
1505

1506
// FetchLightningNode attempts to look up a target node by its identity public
1507
// key. graphdb.ErrGraphNodeNotFound is returned if the node doesn't exist
1508
// within the graph.
1509
//
1510
// NOTE: This method is part of the ChannelGraphSource interface.
1511
func (b *Builder) FetchLightningNode(
1512
        node route.Vertex) (*models.LightningNode, error) {
3✔
1513

3✔
1514
        return b.cfg.Graph.FetchLightningNode(node)
3✔
1515
}
3✔
1516

1517
// ForEachNode is used to iterate over every node in router topology.
1518
//
1519
// NOTE: This method is part of the ChannelGraphSource interface.
1520
func (b *Builder) ForEachNode(
1521
        cb func(*models.LightningNode) error) error {
×
1522

×
1523
        return b.cfg.Graph.ForEachNode(
×
1524
                func(_ kvdb.RTx, n *models.LightningNode) error {
×
1525
                        return cb(n)
×
1526
                })
×
1527
}
1528

1529
// ForAllOutgoingChannels is used to iterate over all outgoing channels owned by
1530
// the router.
1531
//
1532
// NOTE: This method is part of the ChannelGraphSource interface.
1533
func (b *Builder) ForAllOutgoingChannels(cb func(*models.ChannelEdgeInfo,
1534
        *models.ChannelEdgePolicy) error) error {
3✔
1535

3✔
1536
        return b.cfg.Graph.ForEachNodeChannel(b.cfg.SelfNode,
3✔
1537
                func(_ kvdb.RTx, c *models.ChannelEdgeInfo,
3✔
1538
                        e *models.ChannelEdgePolicy,
3✔
1539
                        _ *models.ChannelEdgePolicy) error {
6✔
1540

3✔
1541
                        if e == nil {
3✔
1542
                                return fmt.Errorf("channel from self node " +
×
1543
                                        "has no policy")
×
1544
                        }
×
1545

1546
                        return cb(c, e)
3✔
1547
                },
1548
        )
1549
}
1550

1551
// AddProof updates the channel edge info with proof which is needed to
1552
// properly announce the edge to the rest of the network.
1553
//
1554
// NOTE: This method is part of the ChannelGraphSource interface.
1555
func (b *Builder) AddProof(chanID lnwire.ShortChannelID,
1556
        proof *models.ChannelAuthProof) error {
4✔
1557

4✔
1558
        info, _, _, err := b.cfg.Graph.FetchChannelEdgesByID(chanID.ToUint64())
4✔
1559
        if err != nil {
4✔
1560
                return err
×
1561
        }
×
1562

1563
        info.AuthProof = proof
4✔
1564

4✔
1565
        return b.cfg.Graph.UpdateChannelEdge(info)
4✔
1566
}
1567

1568
// IsStaleNode returns true if the graph source has a node announcement for the
1569
// target node with a more recent timestamp.
1570
//
1571
// NOTE: This method is part of the ChannelGraphSource interface.
1572
func (b *Builder) IsStaleNode(node route.Vertex,
1573
        timestamp time.Time) bool {
6✔
1574

6✔
1575
        // If our attempt to assert that the node announcement is fresh fails,
6✔
1576
        // then we know that this is actually a stale announcement.
6✔
1577
        err := b.assertNodeAnnFreshness(node, timestamp)
6✔
1578
        if err != nil {
10✔
1579
                log.Debugf("Checking stale node %x got %v", node, err)
4✔
1580
                return true
4✔
1581
        }
4✔
1582

1583
        return false
5✔
1584
}
1585

1586
// IsPublicNode determines whether the given vertex is seen as a public node in
1587
// the graph from the graph's source node's point of view.
1588
//
1589
// NOTE: This method is part of the ChannelGraphSource interface.
1590
func (b *Builder) IsPublicNode(node route.Vertex) (bool, error) {
3✔
1591
        return b.cfg.Graph.IsPublicNode(node)
3✔
1592
}
3✔
1593

1594
// IsKnownEdge returns true if the graph source already knows of the passed
1595
// channel ID either as a live or zombie edge.
1596
//
1597
// NOTE: This method is part of the ChannelGraphSource interface.
1598
func (b *Builder) IsKnownEdge(chanID lnwire.ShortChannelID) bool {
4✔
1599
        _, _, exists, isZombie, _ := b.cfg.Graph.HasChannelEdge(
4✔
1600
                chanID.ToUint64(),
4✔
1601
        )
4✔
1602

4✔
1603
        return exists || isZombie
4✔
1604
}
4✔
1605

1606
// IsStaleEdgePolicy returns true if the graph source has a channel edge for
1607
// the passed channel ID (and flags) that have a more recent timestamp.
1608
//
1609
// NOTE: This method is part of the ChannelGraphSource interface.
1610
func (b *Builder) IsStaleEdgePolicy(chanID lnwire.ShortChannelID,
1611
        timestamp time.Time, flags lnwire.ChanUpdateChanFlags) bool {
9✔
1612

9✔
1613
        edge1Timestamp, edge2Timestamp, exists, isZombie, err :=
9✔
1614
                b.cfg.Graph.HasChannelEdge(chanID.ToUint64())
9✔
1615
        if err != nil {
9✔
1616
                log.Debugf("Check stale edge policy got error: %v", err)
×
1617
                return false
×
1618
        }
×
1619

1620
        // If we know of the edge as a zombie, then we'll make some additional
1621
        // checks to determine if the new policy is fresh.
1622
        if isZombie {
9✔
1623
                // When running with AssumeChannelValid, we also prune channels
×
1624
                // if both of their edges are disabled. We'll mark the new
×
1625
                // policy as stale if it remains disabled.
×
1626
                if b.cfg.AssumeChannelValid {
×
1627
                        isDisabled := flags&lnwire.ChanUpdateDisabled ==
×
1628
                                lnwire.ChanUpdateDisabled
×
1629
                        if isDisabled {
×
1630
                                return true
×
1631
                        }
×
1632
                }
1633

1634
                // Otherwise, we'll fall back to our usual ChannelPruneExpiry.
1635
                return time.Since(timestamp) > b.cfg.ChannelPruneExpiry
×
1636
        }
1637

1638
        // If we don't know of the edge, then it means it's fresh (thus not
1639
        // stale).
1640
        if !exists {
14✔
1641
                return false
5✔
1642
        }
5✔
1643

1644
        // As edges are directional edge node has a unique policy for the
1645
        // direction of the edge they control. Therefore, we first check if we
1646
        // already have the most up-to-date information for that edge. If so,
1647
        // then we can exit early.
1648
        switch {
7✔
1649
        // A flag set of 0 indicates this is an announcement for the "first"
1650
        // node in the channel.
1651
        case flags&lnwire.ChanUpdateDirection == 0:
5✔
1652
                return !edge1Timestamp.Before(timestamp)
5✔
1653

1654
        // Similarly, a flag set of 1 indicates this is an announcement for the
1655
        // "second" node in the channel.
1656
        case flags&lnwire.ChanUpdateDirection == 1:
5✔
1657
                return !edge2Timestamp.Before(timestamp)
5✔
1658
        }
1659

1660
        return false
×
1661
}
1662

1663
// MarkEdgeLive clears an edge from our zombie index, deeming it as live.
1664
//
1665
// NOTE: This method is part of the ChannelGraphSource interface.
1666
func (b *Builder) MarkEdgeLive(chanID lnwire.ShortChannelID) error {
×
1667
        return b.cfg.Graph.MarkEdgeLive(chanID.ToUint64())
×
1668
}
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc