• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 18197857992

02 Oct 2025 03:32PM UTC coverage: 66.622% (-0.02%) from 66.646%
18197857992

Pull #10267

github

web-flow
Merge 0d9bfccfe into 1c2ff4a7e
Pull Request #10267: [g175] multi: small G175 preparations

24 of 141 new or added lines in 12 files covered. (17.02%)

64 existing lines in 20 files now uncovered.

137216 of 205963 relevant lines covered (66.62%)

21302.01 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

78.32
/graph/db/kv_store.go
1
package graphdb
2

3
import (
4
        "bytes"
5
        "context"
6
        "crypto/sha256"
7
        "encoding/binary"
8
        "errors"
9
        "fmt"
10
        "io"
11
        "iter"
12
        "math"
13
        "net"
14
        "sort"
15
        "sync"
16
        "time"
17

18
        "github.com/btcsuite/btcd/btcec/v2"
19
        "github.com/btcsuite/btcd/chaincfg/chainhash"
20
        "github.com/btcsuite/btcd/txscript"
21
        "github.com/btcsuite/btcd/wire"
22
        "github.com/btcsuite/btcwallet/walletdb"
23
        "github.com/lightningnetwork/lnd/aliasmgr"
24
        "github.com/lightningnetwork/lnd/batch"
25
        "github.com/lightningnetwork/lnd/fn/v2"
26
        "github.com/lightningnetwork/lnd/graph/db/models"
27
        "github.com/lightningnetwork/lnd/input"
28
        "github.com/lightningnetwork/lnd/kvdb"
29
        "github.com/lightningnetwork/lnd/lnwire"
30
        "github.com/lightningnetwork/lnd/routing/route"
31
)
32

33
var (
34
        // nodeBucket is a bucket which houses all the vertices or nodes within
35
        // the channel graph. This bucket has a single-sub bucket which adds an
36
        // additional index from pubkey -> alias. Within the top-level of this
37
        // bucket, the key space maps a node's compressed public key to the
38
        // serialized information for that node. Additionally, there's a
39
        // special key "source" which stores the pubkey of the source node. The
40
        // source node is used as the starting point for all graph/queries and
41
        // traversals. The graph is formed as a star-graph with the source node
42
        // at the center.
43
        //
44
        // maps: pubKey -> nodeInfo
45
        // maps: source -> selfPubKey
46
        nodeBucket = []byte("graph-node")
47

48
        // nodeUpdateIndexBucket is a sub-bucket of the nodeBucket. This bucket
49
        // will be used to quickly look up the "freshness" of a node's last
50
        // update to the network. The bucket only contains keys, and no values,
51
        // it's mapping:
52
        //
53
        // maps: updateTime || nodeID -> nil
54
        nodeUpdateIndexBucket = []byte("graph-node-update-index")
55

56
        // sourceKey is a special key that resides within the nodeBucket. The
57
        // sourceKey maps a key to the public key of the "self node".
58
        sourceKey = []byte("source")
59

60
        // aliasIndexBucket is a sub-bucket that's nested within the main
61
        // nodeBucket. This bucket maps the public key of a node to its
62
        // current alias. This bucket is provided as it can be used within a
63
        // future UI layer to add an additional degree of confirmation.
64
        aliasIndexBucket = []byte("alias")
65

66
        // edgeBucket is a bucket which houses all of the edge or channel
67
        // information within the channel graph. This bucket essentially acts
68
        // as an adjacency list, which in conjunction with a range scan, can be
69
        // used to iterate over all the incoming and outgoing edges for a
70
        // particular node. Key in the bucket use a prefix scheme which leads
71
        // with the node's public key and sends with the compact edge ID.
72
        // For each chanID, there will be two entries within the bucket, as the
73
        // graph is directed: nodes may have different policies w.r.t to fees
74
        // for their respective directions.
75
        //
76
        // maps: pubKey || chanID -> channel edge policy for node
77
        edgeBucket = []byte("graph-edge")
78

79
        // unknownPolicy is represented as an empty slice. It is
80
        // used as the value in edgeBucket for unknown channel edge policies.
81
        // Unknown policies are still stored in the database to enable efficient
82
        // lookup of incoming channel edges.
83
        unknownPolicy = []byte{}
84

85
        // chanStart is an array of all zero bytes which is used to perform
86
        // range scans within the edgeBucket to obtain all of the outgoing
87
        // edges for a particular node.
88
        chanStart [8]byte
89

90
        // edgeIndexBucket is an index which can be used to iterate all edges
91
        // in the bucket, grouping them according to their in/out nodes.
92
        // Additionally, the items in this bucket also contain the complete
93
        // edge information for a channel. The edge information includes the
94
        // capacity of the channel, the nodes that made the channel, etc. This
95
        // bucket resides within the edgeBucket above. Creation of an edge
96
        // proceeds in two phases: first the edge is added to the edge index,
97
        // afterwards the edgeBucket can be updated with the latest details of
98
        // the edge as they are announced on the network.
99
        //
100
        // maps: chanID -> pubKey1 || pubKey2 || restofEdgeInfo
101
        edgeIndexBucket = []byte("edge-index")
102

103
        // edgeUpdateIndexBucket is a sub-bucket of the main edgeBucket. This
104
        // bucket contains an index which allows us to gauge the "freshness" of
105
        // a channel's last updates.
106
        //
107
        // maps: updateTime || chanID -> nil
108
        edgeUpdateIndexBucket = []byte("edge-update-index")
109

110
        // channelPointBucket maps a channel's full outpoint (txid:index) to
111
        // its short 8-byte channel ID. This bucket resides within the
112
        // edgeBucket above, and can be used to quickly remove an edge due to
113
        // the outpoint being spent, or to query for existence of a channel.
114
        //
115
        // maps: outPoint -> chanID
116
        channelPointBucket = []byte("chan-index")
117

118
        // zombieBucket is a sub-bucket of the main edgeBucket bucket
119
        // responsible for maintaining an index of zombie channels. Each entry
120
        // exists within the bucket as follows:
121
        //
122
        // maps: chanID -> pubKey1 || pubKey2
123
        //
124
        // The chanID represents the channel ID of the edge that is marked as a
125
        // zombie and is used as the key, which maps to the public keys of the
126
        // edge's participants.
127
        zombieBucket = []byte("zombie-index")
128

129
        // disabledEdgePolicyBucket is a sub-bucket of the main edgeBucket
130
        // bucket responsible for maintaining an index of disabled edge
131
        // policies. Each entry exists within the bucket as follows:
132
        //
133
        // maps: <chanID><direction> -> []byte{}
134
        //
135
        // The chanID represents the channel ID of the edge and the direction is
136
        // one byte representing the direction of the edge. The main purpose of
137
        // this index is to allow pruning disabled channels in a fast way
138
        // without the need to iterate all over the graph.
139
        disabledEdgePolicyBucket = []byte("disabled-edge-policy-index")
140

141
        // graphMetaBucket is a top-level bucket which stores various meta-deta
142
        // related to the on-disk channel graph. Data stored in this bucket
143
        // includes the block to which the graph has been synced to, the total
144
        // number of channels, etc.
145
        graphMetaBucket = []byte("graph-meta")
146

147
        // pruneLogBucket is a bucket within the graphMetaBucket that stores
148
        // a mapping from the block height to the hash for the blocks used to
149
        // prune the graph.
150
        // Once a new block is discovered, any channels that have been closed
151
        // (by spending the outpoint) can safely be removed from the graph, and
152
        // the block is added to the prune log. We need to keep such a log for
153
        // the case where a reorg happens, and we must "rewind" the state of the
154
        // graph by removing channels that were previously confirmed. In such a
155
        // case we'll remove all entries from the prune log with a block height
156
        // that no longer exists.
157
        pruneLogBucket = []byte("prune-log")
158

159
        // closedScidBucket is a top-level bucket that stores scids for
160
        // channels that we know to be closed. This is used so that we don't
161
        // need to perform expensive validation checks if we receive a channel
162
        // announcement for the channel again.
163
        //
164
        // maps: scid -> []byte{}
165
        closedScidBucket = []byte("closed-scid")
166
)
167

168
const (
169
        // MaxAllowedExtraOpaqueBytes is the largest amount of opaque bytes that
170
        // we'll permit to be written to disk. We limit this as otherwise, it
171
        // would be possible for a node to create a ton of updates and slowly
172
        // fill our disk, and also waste bandwidth due to relaying.
173
        MaxAllowedExtraOpaqueBytes = 10000
174
)
175

176
// KVStore is a persistent, on-disk graph representation of the Lightning
177
// Network. This struct can be used to implement path finding algorithms on top
178
// of, and also to update a node's view based on information received from the
179
// p2p network. Internally, the graph is stored using a modified adjacency list
180
// representation with some added object interaction possible with each
181
// serialized edge/node. The graph is stored is directed, meaning that are two
182
// edges stored for each channel: an inbound/outbound edge for each node pair.
183
// Nodes, edges, and edge information can all be added to the graph
184
// independently. Edge removal results in the deletion of all edge information
185
// for that edge.
186
type KVStore struct {
187
        db kvdb.Backend
188

189
        // cacheMu guards all caches (rejectCache and chanCache). If
190
        // this mutex will be acquired at the same time as the DB mutex then
191
        // the cacheMu MUST be acquired first to prevent deadlock.
192
        cacheMu     sync.RWMutex
193
        rejectCache *rejectCache
194
        chanCache   *channelCache
195

196
        chanScheduler batch.Scheduler[kvdb.RwTx]
197
        nodeScheduler batch.Scheduler[kvdb.RwTx]
198
}
199

200
// A compile-time assertion to ensure that the KVStore struct implements the
201
// V1Store interface.
202
var _ V1Store = (*KVStore)(nil)
203

204
// NewKVStore allocates a new KVStore backed by a DB instance. The
205
// returned instance has its own unique reject cache and channel cache.
206
func NewKVStore(db kvdb.Backend, options ...StoreOptionModifier) (*KVStore,
207
        error) {
184✔
208

184✔
209
        opts := DefaultOptions()
184✔
210
        for _, o := range options {
187✔
211
                o(opts)
3✔
212
        }
3✔
213

214
        if !opts.NoMigration {
368✔
215
                if err := initKVStore(db); err != nil {
184✔
216
                        return nil, err
×
217
                }
×
218
        }
219

220
        g := &KVStore{
184✔
221
                db:          db,
184✔
222
                rejectCache: newRejectCache(opts.RejectCacheSize),
184✔
223
                chanCache:   newChannelCache(opts.ChannelCacheSize),
184✔
224
        }
184✔
225
        g.chanScheduler = batch.NewTimeScheduler(
184✔
226
                batch.NewBoltBackend[kvdb.RwTx](db), &g.cacheMu,
184✔
227
                opts.BatchCommitInterval,
184✔
228
        )
184✔
229
        g.nodeScheduler = batch.NewTimeScheduler(
184✔
230
                batch.NewBoltBackend[kvdb.RwTx](db), nil,
184✔
231
                opts.BatchCommitInterval,
184✔
232
        )
184✔
233

184✔
234
        return g, nil
184✔
235
}
236

237
// channelMapKey is the key structure used for storing channel edge policies.
238
type channelMapKey struct {
239
        nodeKey route.Vertex
240
        chanID  [8]byte
241
}
242

243
// String returns a human-readable representation of the key.
244
func (c channelMapKey) String() string {
×
245
        return fmt.Sprintf("node=%v, chanID=%x", c.nodeKey, c.chanID)
×
246
}
×
247

248
// getChannelMap loads all channel edge policies from the database and stores
249
// them in a map.
250
func getChannelMap(edges kvdb.RBucket) (
251
        map[channelMapKey]*models.ChannelEdgePolicy, error) {
159✔
252

159✔
253
        // Create a map to store all channel edge policies.
159✔
254
        channelMap := make(map[channelMapKey]*models.ChannelEdgePolicy)
159✔
255

159✔
256
        err := kvdb.ForAll(edges, func(k, edgeBytes []byte) error {
1,790✔
257
                // Skip embedded buckets.
1,631✔
258
                if bytes.Equal(k, edgeIndexBucket) ||
1,631✔
259
                        bytes.Equal(k, edgeUpdateIndexBucket) ||
1,631✔
260
                        bytes.Equal(k, zombieBucket) ||
1,631✔
261
                        bytes.Equal(k, disabledEdgePolicyBucket) ||
1,631✔
262
                        bytes.Equal(k, channelPointBucket) {
2,266✔
263

635✔
264
                        return nil
635✔
265
                }
635✔
266

267
                // Validate key length.
268
                if len(k) != 33+8 {
999✔
269
                        return fmt.Errorf("invalid edge key %x encountered", k)
×
270
                }
×
271

272
                var key channelMapKey
999✔
273
                copy(key.nodeKey[:], k[:33])
999✔
274
                copy(key.chanID[:], k[33:])
999✔
275

999✔
276
                // No need to deserialize unknown policy.
999✔
277
                if bytes.Equal(edgeBytes, unknownPolicy) {
999✔
278
                        return nil
×
279
                }
×
280

281
                edgeReader := bytes.NewReader(edgeBytes)
999✔
282
                edge, err := deserializeChanEdgePolicyRaw(
999✔
283
                        edgeReader,
999✔
284
                )
999✔
285

999✔
286
                switch {
999✔
287
                // If the db policy was missing an expected optional field, we
288
                // return nil as if the policy was unknown.
289
                case errors.Is(err, ErrEdgePolicyOptionalFieldNotFound):
×
290
                        return nil
×
291

292
                // We don't want a single policy with bad TLV data to stop us
293
                // from loading the rest of the data, so we just skip this
294
                // policy. This is for backwards compatibility since we did not
295
                // use to validate TLV data in the past before persisting it.
296
                case errors.Is(err, ErrParsingExtraTLVBytes):
×
297
                        return nil
×
298

299
                case err != nil:
×
300
                        return err
×
301
                }
302

303
                channelMap[key] = edge
999✔
304

999✔
305
                return nil
999✔
306
        })
307
        if err != nil {
159✔
308
                return nil, err
×
309
        }
×
310

311
        return channelMap, nil
159✔
312
}
313

314
var graphTopLevelBuckets = [][]byte{
315
        nodeBucket,
316
        edgeBucket,
317
        graphMetaBucket,
318
        closedScidBucket,
319
}
320

321
// createChannelDB creates and initializes a fresh version of  In
322
// the case that the target path has not yet been created or doesn't yet exist,
323
// then the path is created. Additionally, all required top-level buckets used
324
// within the database are created.
325
func initKVStore(db kvdb.Backend) error {
184✔
326
        err := kvdb.Update(db, func(tx kvdb.RwTx) error {
368✔
327
                for _, tlb := range graphTopLevelBuckets {
911✔
328
                        if _, err := tx.CreateTopLevelBucket(tlb); err != nil {
727✔
329
                                return err
×
330
                        }
×
331
                }
332

333
                nodes := tx.ReadWriteBucket(nodeBucket)
184✔
334
                _, err := nodes.CreateBucketIfNotExists(aliasIndexBucket)
184✔
335
                if err != nil {
184✔
336
                        return err
×
337
                }
×
338
                _, err = nodes.CreateBucketIfNotExists(nodeUpdateIndexBucket)
184✔
339
                if err != nil {
184✔
340
                        return err
×
341
                }
×
342

343
                edges := tx.ReadWriteBucket(edgeBucket)
184✔
344
                _, err = edges.CreateBucketIfNotExists(edgeIndexBucket)
184✔
345
                if err != nil {
184✔
346
                        return err
×
347
                }
×
348
                _, err = edges.CreateBucketIfNotExists(edgeUpdateIndexBucket)
184✔
349
                if err != nil {
184✔
350
                        return err
×
351
                }
×
352
                _, err = edges.CreateBucketIfNotExists(channelPointBucket)
184✔
353
                if err != nil {
184✔
354
                        return err
×
355
                }
×
356
                _, err = edges.CreateBucketIfNotExists(zombieBucket)
184✔
357
                if err != nil {
184✔
358
                        return err
×
359
                }
×
360

361
                graphMeta := tx.ReadWriteBucket(graphMetaBucket)
184✔
362
                _, err = graphMeta.CreateBucketIfNotExists(pruneLogBucket)
184✔
363

184✔
364
                return err
184✔
365
        }, func() {})
184✔
366
        if err != nil {
184✔
367
                return fmt.Errorf("unable to create new channel graph: %w", err)
×
368
        }
×
369

370
        return nil
184✔
371
}
372

373
// AddrsForNode returns all known addresses for the target node public key that
374
// the graph DB is aware of. The returned boolean indicates if the given node is
375
// unknown to the graph DB or not.
376
//
377
// NOTE: this is part of the channeldb.AddrSource interface.
378
func (c *KVStore) AddrsForNode(ctx context.Context,
379
        nodePub *btcec.PublicKey) (bool, []net.Addr, error) {
6✔
380

6✔
381
        pubKey, err := route.NewVertexFromBytes(nodePub.SerializeCompressed())
6✔
382
        if err != nil {
6✔
383
                return false, nil, err
×
384
        }
×
385

386
        node, err := c.FetchNode(ctx, pubKey)
6✔
387
        // We don't consider it an error if the graph is unaware of the node.
6✔
388
        switch {
6✔
389
        case err != nil && !errors.Is(err, ErrGraphNodeNotFound):
×
390
                return false, nil, err
×
391

392
        case errors.Is(err, ErrGraphNodeNotFound):
4✔
393
                return false, nil, nil
4✔
394
        }
395

396
        return true, node.Addresses, nil
5✔
397
}
398

399
// ForEachChannel iterates through all the channel edges stored within the
400
// graph and invokes the passed callback for each edge. The callback takes two
401
// edges as since this is a directed graph, both the in/out edges are visited.
402
// If the callback returns an error, then the transaction is aborted and the
403
// iteration stops early.
404
//
405
// NOTE: If an edge can't be found, or wasn't advertised, then a nil pointer
406
// for that particular channel edge routing policy will be passed into the
407
// callback.
408
func (c *KVStore) ForEachChannel(_ context.Context,
409
        cb func(*models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
410
                *models.ChannelEdgePolicy) error, reset func()) error {
10✔
411

10✔
412
        return forEachChannel(c.db, cb, reset)
10✔
413
}
10✔
414

415
// forEachChannel iterates through all the channel edges stored within the
416
// graph and invokes the passed callback for each edge. The callback takes two
417
// edges as since this is a directed graph, both the in/out edges are visited.
418
// If the callback returns an error, then the transaction is aborted and the
419
// iteration stops early.
420
//
421
// NOTE: If an edge can't be found, or wasn't advertised, then a nil pointer
422
// for that particular channel edge routing policy will be passed into the
423
// callback.
424
func forEachChannel(db kvdb.Backend, cb func(*models.ChannelEdgeInfo,
425
        *models.ChannelEdgePolicy, *models.ChannelEdgePolicy) error,
426
        reset func()) error {
10✔
427

10✔
428
        return db.View(func(tx kvdb.RTx) error {
20✔
429
                edges := tx.ReadBucket(edgeBucket)
10✔
430
                if edges == nil {
10✔
431
                        return ErrGraphNoEdgesFound
×
432
                }
×
433

434
                // First, load all edges in memory indexed by node and channel
435
                // id.
436
                channelMap, err := getChannelMap(edges)
10✔
437
                if err != nil {
10✔
438
                        return err
×
439
                }
×
440

441
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
10✔
442
                if edgeIndex == nil {
10✔
443
                        return ErrGraphNoEdgesFound
×
444
                }
×
445

446
                // Load edge index, recombine each channel with the policies
447
                // loaded above and invoke the callback.
448
                return kvdb.ForAll(
10✔
449
                        edgeIndex, func(k, edgeInfoBytes []byte) error {
115✔
450
                                var chanID [8]byte
105✔
451
                                copy(chanID[:], k)
105✔
452

105✔
453
                                edgeInfoReader := bytes.NewReader(edgeInfoBytes)
105✔
454
                                info, err := deserializeChanEdgeInfo(
105✔
455
                                        edgeInfoReader,
105✔
456
                                )
105✔
457
                                if err != nil {
105✔
458
                                        return err
×
459
                                }
×
460

461
                                policy1 := channelMap[channelMapKey{
105✔
462
                                        nodeKey: info.NodeKey1Bytes,
105✔
463
                                        chanID:  chanID,
105✔
464
                                }]
105✔
465

105✔
466
                                policy2 := channelMap[channelMapKey{
105✔
467
                                        nodeKey: info.NodeKey2Bytes,
105✔
468
                                        chanID:  chanID,
105✔
469
                                }]
105✔
470

105✔
471
                                return cb(&info, policy1, policy2)
105✔
472
                        },
473
                )
474
        }, reset)
475
}
476

477
// ForEachChannelCacheable iterates through all the channel edges stored within
478
// the graph and invokes the passed callback for each edge. The callback takes
479
// two edges as since this is a directed graph, both the in/out edges are
480
// visited. If the callback returns an error, then the transaction is aborted
481
// and the iteration stops early.
482
//
483
// NOTE: If an edge can't be found, or wasn't advertised, then a nil pointer
484
// for that particular channel edge routing policy will be passed into the
485
// callback.
486
//
487
// NOTE: this method is like ForEachChannel but fetches only the data required
488
// for the graph cache.
489
func (c *KVStore) ForEachChannelCacheable(cb func(*models.CachedEdgeInfo,
490
        *models.CachedEdgePolicy, *models.CachedEdgePolicy) error,
491
        reset func()) error {
152✔
492

152✔
493
        return c.db.View(func(tx kvdb.RTx) error {
304✔
494
                edges := tx.ReadBucket(edgeBucket)
152✔
495
                if edges == nil {
152✔
496
                        return ErrGraphNoEdgesFound
×
497
                }
×
498

499
                // First, load all edges in memory indexed by node and channel
500
                // id.
501
                channelMap, err := getChannelMap(edges)
152✔
502
                if err != nil {
152✔
503
                        return err
×
504
                }
×
505

506
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
152✔
507
                if edgeIndex == nil {
152✔
508
                        return ErrGraphNoEdgesFound
×
509
                }
×
510

511
                // Load edge index, recombine each channel with the policies
512
                // loaded above and invoke the callback.
513
                return kvdb.ForAll(
152✔
514
                        edgeIndex, func(k, edgeInfoBytes []byte) error {
551✔
515
                                var chanID [8]byte
399✔
516
                                copy(chanID[:], k)
399✔
517

399✔
518
                                edgeInfoReader := bytes.NewReader(edgeInfoBytes)
399✔
519
                                info, err := deserializeChanEdgeInfo(
399✔
520
                                        edgeInfoReader,
399✔
521
                                )
399✔
522
                                if err != nil {
399✔
523
                                        return err
×
524
                                }
×
525

526
                                key1 := channelMapKey{
399✔
527
                                        nodeKey: info.NodeKey1Bytes,
399✔
528
                                        chanID:  chanID,
399✔
529
                                }
399✔
530
                                policy1 := channelMap[key1]
399✔
531

399✔
532
                                key2 := channelMapKey{
399✔
533
                                        nodeKey: info.NodeKey2Bytes,
399✔
534
                                        chanID:  chanID,
399✔
535
                                }
399✔
536
                                policy2 := channelMap[key2]
399✔
537

399✔
538
                                // We now create the cached edge policies, but
399✔
539
                                // only when the above policies are found in the
399✔
540
                                // `channelMap`.
399✔
541
                                var (
399✔
542
                                        cachedPolicy1 *models.CachedEdgePolicy
399✔
543
                                        cachedPolicy2 *models.CachedEdgePolicy
399✔
544
                                )
399✔
545

399✔
546
                                if policy1 != nil {
798✔
547
                                        cachedPolicy1 = models.NewCachedPolicy(
399✔
548
                                                policy1,
399✔
549
                                        )
399✔
550
                                }
399✔
551

552
                                if policy2 != nil {
798✔
553
                                        cachedPolicy2 = models.NewCachedPolicy(
399✔
554
                                                policy2,
399✔
555
                                        )
399✔
556
                                }
399✔
557

558
                                return cb(
399✔
559
                                        models.NewCachedEdge(&info),
399✔
560
                                        cachedPolicy1, cachedPolicy2,
399✔
561
                                )
399✔
562
                        },
563
                )
564
        }, reset)
565
}
566

567
// forEachNodeDirectedChannel iterates through all channels of a given node,
568
// executing the passed callback on the directed edge representing the channel
569
// and its incoming policy. If the callback returns an error, then the iteration
570
// is halted with the error propagated back up to the caller. An optional read
571
// transaction may be provided. If none is provided, a new one will be created.
572
//
573
// Unknown policies are passed into the callback as nil values.
574
//
575
// NOTE: the reset param is only meaningful if the tx param is nil. If it is
576
// not nil, the caller is expected to have passed in a reset to the parent
577
// function's View/Update call which will then apply to the whole transaction.
578
func (c *KVStore) forEachNodeDirectedChannel(tx kvdb.RTx,
579
        node route.Vertex, cb func(channel *DirectedChannel) error,
580
        reset func()) error {
265✔
581

265✔
582
        // Fallback that uses the database.
265✔
583
        toNodeCallback := func() route.Vertex {
400✔
584
                return node
135✔
585
        }
135✔
586
        toNodeFeatures, err := c.fetchNodeFeatures(tx, node)
265✔
587
        if err != nil {
265✔
588
                return err
×
589
        }
×
590

591
        dbCallback := func(tx kvdb.RTx, e *models.ChannelEdgeInfo, p1,
265✔
592
                p2 *models.ChannelEdgePolicy) error {
954✔
593

689✔
594
                var cachedInPolicy *models.CachedEdgePolicy
689✔
595
                if p2 != nil {
1,375✔
596
                        cachedInPolicy = models.NewCachedPolicy(p2)
686✔
597
                        cachedInPolicy.ToNodePubKey = toNodeCallback
686✔
598
                        cachedInPolicy.ToNodeFeatures = toNodeFeatures
686✔
599
                }
686✔
600

601
                directedChannel := &DirectedChannel{
689✔
602
                        ChannelID:    e.ChannelID,
689✔
603
                        IsNode1:      node == e.NodeKey1Bytes,
689✔
604
                        OtherNode:    e.NodeKey2Bytes,
689✔
605
                        Capacity:     e.Capacity,
689✔
606
                        OutPolicySet: p1 != nil,
689✔
607
                        InPolicy:     cachedInPolicy,
689✔
608
                }
689✔
609

689✔
610
                if p1 != nil {
1,377✔
611
                        p1.InboundFee.WhenSome(func(fee lnwire.Fee) {
1,024✔
612
                                directedChannel.InboundFee = fee
336✔
613
                        })
336✔
614
                }
615

616
                if node == e.NodeKey2Bytes {
1,035✔
617
                        directedChannel.OtherNode = e.NodeKey1Bytes
346✔
618
                }
346✔
619

620
                return cb(directedChannel)
689✔
621
        }
622

623
        return nodeTraversal(tx, node[:], c.db, dbCallback, reset)
265✔
624
}
625

626
// fetchNodeFeatures returns the features of a given node. If no features are
627
// known for the node, an empty feature vector is returned. An optional read
628
// transaction may be provided. If none is provided, a new one will be created.
629
func (c *KVStore) fetchNodeFeatures(tx kvdb.RTx,
630
        node route.Vertex) (*lnwire.FeatureVector, error) {
3,654✔
631

3,654✔
632
        // Fallback that uses the database.
3,654✔
633
        targetNode, err := c.fetchNodeTx(tx, node)
3,654✔
634
        switch {
3,654✔
635
        // If the node exists and has features, return them directly.
636
        case err == nil:
3,643✔
637
                return targetNode.Features, nil
3,643✔
638

639
        // If we couldn't find a node announcement, populate a blank feature
640
        // vector.
641
        case errors.Is(err, ErrGraphNodeNotFound):
11✔
642
                return lnwire.EmptyFeatureVector(), nil
11✔
643

644
        // Otherwise, bubble the error up.
645
        default:
×
646
                return nil, err
×
647
        }
648
}
649

650
// ForEachNodeDirectedChannel iterates through all channels of a given node,
651
// executing the passed callback on the directed edge representing the channel
652
// and its incoming policy. If the callback returns an error, then the iteration
653
// is halted with the error propagated back up to the caller.
654
//
655
// Unknown policies are passed into the callback as nil values.
656
//
657
// NOTE: this is part of the graphdb.NodeTraverser interface.
658
func (c *KVStore) ForEachNodeDirectedChannel(nodePub route.Vertex,
659
        cb func(channel *DirectedChannel) error, reset func()) error {
26✔
660

26✔
661
        return c.forEachNodeDirectedChannel(nil, nodePub, cb, reset)
26✔
662
}
26✔
663

664
// FetchNodeFeatures returns the features of the given node. If no features are
665
// known for the node, an empty feature vector is returned.
666
//
667
// NOTE: this is part of the graphdb.NodeTraverser interface.
668
func (c *KVStore) FetchNodeFeatures(nodePub route.Vertex) (
669
        *lnwire.FeatureVector, error) {
4✔
670

4✔
671
        return c.fetchNodeFeatures(nil, nodePub)
4✔
672
}
4✔
673

674
// ForEachNodeCached is similar to forEachNode, but it returns DirectedChannel
675
// data to the call-back.
676
//
677
// NOTE: The callback contents MUST not be modified.
678
func (c *KVStore) ForEachNodeCached(ctx context.Context, withAddrs bool,
679
        cb func(ctx context.Context, node route.Vertex, addrs []net.Addr,
680
                chans map[uint64]*DirectedChannel) error, reset func()) error {
120✔
681

120✔
682
        // Otherwise call back to a version that uses the database directly.
120✔
683
        // We'll iterate over each node, then the set of channels for each
120✔
684
        // node, and construct a similar callback functiopn signature as the
120✔
685
        // main funcotin expects.
120✔
686
        return forEachNode(c.db, func(tx kvdb.RTx,
120✔
687
                node *models.Node) error {
1,106✔
688

986✔
689
                channels := make(map[uint64]*DirectedChannel)
986✔
690

986✔
691
                err := c.forEachNodeChannelTx(tx, node.PubKeyBytes,
986✔
692
                        func(tx kvdb.RTx, e *models.ChannelEdgeInfo,
986✔
693
                                p1 *models.ChannelEdgePolicy,
986✔
694
                                p2 *models.ChannelEdgePolicy) error {
4,120✔
695

3,134✔
696
                                toNodeCallback := func() route.Vertex {
3,134✔
697
                                        return node.PubKeyBytes
×
698
                                }
×
699
                                toNodeFeatures, err := c.fetchNodeFeatures(
3,134✔
700
                                        tx, node.PubKeyBytes,
3,134✔
701
                                )
3,134✔
702
                                if err != nil {
3,134✔
703
                                        return err
×
704
                                }
×
705

706
                                var cachedInPolicy *models.CachedEdgePolicy
3,134✔
707
                                if p2 != nil {
6,268✔
708
                                        cachedInPolicy =
3,134✔
709
                                                models.NewCachedPolicy(p2)
3,134✔
710
                                        cachedInPolicy.ToNodePubKey =
3,134✔
711
                                                toNodeCallback
3,134✔
712
                                        cachedInPolicy.ToNodeFeatures =
3,134✔
713
                                                toNodeFeatures
3,134✔
714
                                }
3,134✔
715

716
                                directedChannel := &DirectedChannel{
3,134✔
717
                                        ChannelID: e.ChannelID,
3,134✔
718
                                        IsNode1: node.PubKeyBytes ==
3,134✔
719
                                                e.NodeKey1Bytes,
3,134✔
720
                                        OtherNode:    e.NodeKey2Bytes,
3,134✔
721
                                        Capacity:     e.Capacity,
3,134✔
722
                                        OutPolicySet: p1 != nil,
3,134✔
723
                                        InPolicy:     cachedInPolicy,
3,134✔
724
                                }
3,134✔
725

3,134✔
726
                                if node.PubKeyBytes == e.NodeKey2Bytes {
4,701✔
727
                                        directedChannel.OtherNode =
1,567✔
728
                                                e.NodeKey1Bytes
1,567✔
729
                                }
1,567✔
730

731
                                channels[e.ChannelID] = directedChannel
3,134✔
732

3,134✔
733
                                return nil
3,134✔
734
                        }, reset,
735
                )
736
                if err != nil {
986✔
737
                        return err
×
738
                }
×
739

740
                var addrs []net.Addr
986✔
741
                if withAddrs {
1,952✔
742
                        addrs = node.Addresses
966✔
743
                }
966✔
744

745
                return cb(ctx, node.PubKeyBytes, addrs, channels)
986✔
746
        }, reset)
747
}
748

749
// DisabledChannelIDs returns the channel ids of disabled channels.
750
// A channel is disabled when two of the associated ChanelEdgePolicies
751
// have their disabled bit on.
752
func (c *KVStore) DisabledChannelIDs() ([]uint64, error) {
6✔
753
        var disabledChanIDs []uint64
6✔
754
        var chanEdgeFound map[uint64]struct{}
6✔
755

6✔
756
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
12✔
757
                edges := tx.ReadBucket(edgeBucket)
6✔
758
                if edges == nil {
6✔
759
                        return ErrGraphNoEdgesFound
×
760
                }
×
761

762
                disabledEdgePolicyIndex := edges.NestedReadBucket(
6✔
763
                        disabledEdgePolicyBucket,
6✔
764
                )
6✔
765
                if disabledEdgePolicyIndex == nil {
7✔
766
                        return nil
1✔
767
                }
1✔
768

769
                // We iterate over all disabled policies and we add each channel
770
                // that has more than one disabled policy to disabledChanIDs
771
                // array.
772
                return disabledEdgePolicyIndex.ForEach(
5✔
773
                        func(k, v []byte) error {
16✔
774
                                chanID := byteOrder.Uint64(k[:8])
11✔
775
                                _, edgeFound := chanEdgeFound[chanID]
11✔
776
                                if edgeFound {
15✔
777
                                        delete(chanEdgeFound, chanID)
4✔
778
                                        disabledChanIDs = append(
4✔
779
                                                disabledChanIDs, chanID,
4✔
780
                                        )
4✔
781

4✔
782
                                        return nil
4✔
783
                                }
4✔
784

785
                                chanEdgeFound[chanID] = struct{}{}
7✔
786

7✔
787
                                return nil
7✔
788
                        },
789
                )
790
        }, func() {
6✔
791
                disabledChanIDs = nil
6✔
792
                chanEdgeFound = make(map[uint64]struct{})
6✔
793
        })
6✔
794
        if err != nil {
6✔
795
                return nil, err
×
796
        }
×
797

798
        return disabledChanIDs, nil
6✔
799
}
800

801
// ForEachNode iterates through all the stored vertices/nodes in the graph,
802
// executing the passed callback with each node encountered. If the callback
803
// returns an error, then the transaction is aborted and the iteration stops
804
// early.
805
//
806
// NOTE: this is part of the V1Store interface.
807
func (c *KVStore) ForEachNode(_ context.Context,
808
        cb func(*models.Node) error, reset func()) error {
13✔
809

13✔
810
        return forEachNode(c.db, func(tx kvdb.RTx,
13✔
811
                node *models.Node) error {
209✔
812

196✔
813
                return cb(node)
196✔
814
        }, reset)
196✔
815
}
816

817
// forEachNode iterates through all the stored vertices/nodes in the graph,
818
// executing the passed callback with each node encountered. If the callback
819
// returns an error, then the transaction is aborted and the iteration stops
820
// early.
821
//
822
// TODO(roasbeef): add iterator interface to allow for memory efficient graph
823
// traversal when graph gets mega.
824
func forEachNode(db kvdb.Backend,
825
        cb func(kvdb.RTx, *models.Node) error, reset func()) error {
133✔
826

133✔
827
        traversal := func(tx kvdb.RTx) error {
266✔
828
                // First grab the nodes bucket which stores the mapping from
133✔
829
                // pubKey to node information.
133✔
830
                nodes := tx.ReadBucket(nodeBucket)
133✔
831
                if nodes == nil {
133✔
832
                        return ErrGraphNotFound
×
833
                }
×
834

835
                return nodes.ForEach(func(pubKey, nodeBytes []byte) error {
1,578✔
836
                        // If this is the source key, then we skip this
1,445✔
837
                        // iteration as the value for this key is a pubKey
1,445✔
838
                        // rather than raw node information.
1,445✔
839
                        if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 {
1,711✔
840
                                return nil
266✔
841
                        }
266✔
842

843
                        nodeReader := bytes.NewReader(nodeBytes)
1,182✔
844
                        node, err := deserializeLightningNode(nodeReader)
1,182✔
845
                        if err != nil {
1,182✔
846
                                return err
×
847
                        }
×
848

849
                        // Execute the callback, the transaction will abort if
850
                        // this returns an error.
851
                        return cb(tx, &node)
1,182✔
852
                })
853
        }
854

855
        return kvdb.View(db, traversal, reset)
133✔
856
}
857

858
// ForEachNodeCacheable iterates through all the stored vertices/nodes in the
859
// graph, executing the passed callback with each node encountered. If the
860
// callback returns an error, then the transaction is aborted and the iteration
861
// stops early.
862
func (c *KVStore) ForEachNodeCacheable(_ context.Context,
863
        cb func(route.Vertex, *lnwire.FeatureVector) error,
864
        reset func()) error {
153✔
865

153✔
866
        traversal := func(tx kvdb.RTx) error {
306✔
867
                // First grab the nodes bucket which stores the mapping from
153✔
868
                // pubKey to node information.
153✔
869
                nodes := tx.ReadBucket(nodeBucket)
153✔
870
                if nodes == nil {
153✔
871
                        return ErrGraphNotFound
×
872
                }
×
873

874
                return nodes.ForEach(func(pubKey, nodeBytes []byte) error {
576✔
875
                        // If this is the source key, then we skip this
423✔
876
                        // iteration as the value for this key is a pubKey
423✔
877
                        // rather than raw node information.
423✔
878
                        if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 {
726✔
879
                                return nil
303✔
880
                        }
303✔
881

882
                        nodeReader := bytes.NewReader(nodeBytes)
123✔
883
                        node, features, err := deserializeLightningNodeCacheable( //nolint:ll
123✔
884
                                nodeReader,
123✔
885
                        )
123✔
886
                        if err != nil {
123✔
887
                                return err
×
888
                        }
×
889

890
                        // Execute the callback, the transaction will abort if
891
                        // this returns an error.
892
                        return cb(node, features)
123✔
893
                })
894
        }
895

896
        return kvdb.View(c.db, traversal, reset)
153✔
897
}
898

899
// SourceNode returns the source node of the graph. The source node is treated
900
// as the center node within a star-graph. This method may be used to kick off
901
// a path finding algorithm in order to explore the reachability of another
902
// node based off the source node.
903
func (c *KVStore) SourceNode(_ context.Context) (*models.Node, error) {
241✔
904
        return sourceNode(c.db)
241✔
905
}
241✔
906

907
// sourceNode fetches the source node of the graph. The source node is treated
908
// as the center node within a star-graph.
909
func sourceNode(db kvdb.Backend) (*models.Node, error) {
241✔
910
        var source *models.Node
241✔
911
        err := kvdb.View(db, func(tx kvdb.RTx) error {
482✔
912
                // First grab the nodes bucket which stores the mapping from
241✔
913
                // pubKey to node information.
241✔
914
                nodes := tx.ReadBucket(nodeBucket)
241✔
915
                if nodes == nil {
241✔
916
                        return ErrGraphNotFound
×
917
                }
×
918

919
                node, err := sourceNodeWithTx(nodes)
241✔
920
                if err != nil {
245✔
921
                        return err
4✔
922
                }
4✔
923
                source = node
240✔
924

240✔
925
                return nil
240✔
926
        }, func() {
241✔
927
                source = nil
241✔
928
        })
241✔
929
        if err != nil {
245✔
930
                return nil, err
4✔
931
        }
4✔
932

933
        return source, nil
240✔
934
}
935

936
// sourceNodeWithTx uses an existing database transaction and returns the source
937
// node of the graph. The source node is treated as the center node within a
938
// star-graph. This method may be used to kick off a path finding algorithm in
939
// order to explore the reachability of another node based off the source node.
940
func sourceNodeWithTx(nodes kvdb.RBucket) (*models.Node, error) {
495✔
941
        selfPub := nodes.Get(sourceKey)
495✔
942
        if selfPub == nil {
499✔
943
                return nil, ErrSourceNodeNotSet
4✔
944
        }
4✔
945

946
        // With the pubKey of the source node retrieved, we're able to
947
        // fetch the full node information.
948
        node, err := fetchLightningNode(nodes, selfPub)
494✔
949
        if err != nil {
494✔
950
                return nil, err
×
951
        }
×
952

953
        return &node, nil
494✔
954
}
955

956
// SetSourceNode sets the source node within the graph database. The source
957
// node is to be used as the center of a star-graph within path finding
958
// algorithms.
959
func (c *KVStore) SetSourceNode(_ context.Context,
960
        node *models.Node) error {
117✔
961

117✔
962
        nodePubBytes := node.PubKeyBytes[:]
117✔
963

117✔
964
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
234✔
965
                // First grab the nodes bucket which stores the mapping from
117✔
966
                // pubKey to node information.
117✔
967
                nodes, err := tx.CreateTopLevelBucket(nodeBucket)
117✔
968
                if err != nil {
117✔
969
                        return err
×
970
                }
×
971

972
                // Next we create the mapping from source to the targeted
973
                // public key.
974
                if err := nodes.Put(sourceKey, nodePubBytes); err != nil {
117✔
975
                        return err
×
976
                }
×
977

978
                // Finally, we commit the information of the lightning node
979
                // itself.
980
                return addLightningNode(tx, node)
117✔
981
        }, func() {})
117✔
982
}
983

984
// AddNode adds a vertex/node to the graph database. If the node is not
985
// in the database from before, this will add a new, unconnected one to the
986
// graph. If it is present from before, this will update that node's
987
// information. Note that this method is expected to only be called to update an
988
// already present node from a node announcement, or to insert a node found in a
989
// channel update.
990
//
991
// TODO(roasbeef): also need sig of announcement.
992
func (c *KVStore) AddNode(ctx context.Context,
993
        node *models.Node, opts ...batch.SchedulerOption) error {
973✔
994

973✔
995
        r := &batch.Request[kvdb.RwTx]{
973✔
996
                Opts: batch.NewSchedulerOptions(opts...),
973✔
997
                Do: func(tx kvdb.RwTx) error {
1,946✔
998
                        return addLightningNode(tx, node)
973✔
999
                },
973✔
1000
        }
1001

1002
        return c.nodeScheduler.Execute(ctx, r)
973✔
1003
}
1004

1005
func addLightningNode(tx kvdb.RwTx, node *models.Node) error {
1,168✔
1006
        nodes, err := tx.CreateTopLevelBucket(nodeBucket)
1,168✔
1007
        if err != nil {
1,168✔
1008
                return err
×
1009
        }
×
1010

1011
        aliases, err := nodes.CreateBucketIfNotExists(aliasIndexBucket)
1,168✔
1012
        if err != nil {
1,168✔
1013
                return err
×
1014
        }
×
1015

1016
        updateIndex, err := nodes.CreateBucketIfNotExists(
1,168✔
1017
                nodeUpdateIndexBucket,
1,168✔
1018
        )
1,168✔
1019
        if err != nil {
1,168✔
1020
                return err
×
1021
        }
×
1022

1023
        return putLightningNode(nodes, aliases, updateIndex, node)
1,168✔
1024
}
1025

1026
// LookupAlias attempts to return the alias as advertised by the target node.
1027
// TODO(roasbeef): currently assumes that aliases are unique...
1028
func (c *KVStore) LookupAlias(_ context.Context,
1029
        pub *btcec.PublicKey) (string, error) {
5✔
1030

5✔
1031
        var alias string
5✔
1032

5✔
1033
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
10✔
1034
                nodes := tx.ReadBucket(nodeBucket)
5✔
1035
                if nodes == nil {
5✔
1036
                        return ErrGraphNodesNotFound
×
1037
                }
×
1038

1039
                aliases := nodes.NestedReadBucket(aliasIndexBucket)
5✔
1040
                if aliases == nil {
5✔
1041
                        return ErrGraphNodesNotFound
×
1042
                }
×
1043

1044
                nodePub := pub.SerializeCompressed()
5✔
1045
                a := aliases.Get(nodePub)
5✔
1046
                if a == nil {
6✔
1047
                        return ErrNodeAliasNotFound
1✔
1048
                }
1✔
1049

1050
                // TODO(roasbeef): should actually be using the utf-8
1051
                // package...
1052
                alias = string(a)
4✔
1053

4✔
1054
                return nil
4✔
1055
        }, func() {
5✔
1056
                alias = ""
5✔
1057
        })
5✔
1058
        if err != nil {
6✔
1059
                return "", err
1✔
1060
        }
1✔
1061

1062
        return alias, nil
4✔
1063
}
1064

1065
// DeleteNode starts a new database transaction to remove a vertex/node
1066
// from the database according to the node's public key.
1067
func (c *KVStore) DeleteNode(_ context.Context,
1068
        nodePub route.Vertex) error {
4✔
1069

4✔
1070
        // TODO(roasbeef): ensure dangling edges are removed...
4✔
1071
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
8✔
1072
                nodes := tx.ReadWriteBucket(nodeBucket)
4✔
1073
                if nodes == nil {
4✔
1074
                        return ErrGraphNodeNotFound
×
1075
                }
×
1076

1077
                return c.deleteLightningNode(nodes, nodePub[:])
4✔
1078
        }, func() {})
4✔
1079
}
1080

1081
// deleteLightningNode uses an existing database transaction to remove a
1082
// vertex/node from the database according to the node's public key.
1083
func (c *KVStore) deleteLightningNode(nodes kvdb.RwBucket,
1084
        compressedPubKey []byte) error {
69✔
1085

69✔
1086
        aliases := nodes.NestedReadWriteBucket(aliasIndexBucket)
69✔
1087
        if aliases == nil {
69✔
1088
                return ErrGraphNodesNotFound
×
1089
        }
×
1090

1091
        if err := aliases.Delete(compressedPubKey); err != nil {
69✔
1092
                return err
×
1093
        }
×
1094

1095
        // Before we delete the node, we'll fetch its current state so we can
1096
        // determine when its last update was to clear out the node update
1097
        // index.
1098
        node, err := fetchLightningNode(nodes, compressedPubKey)
69✔
1099
        if err != nil {
70✔
1100
                return err
1✔
1101
        }
1✔
1102

1103
        if err := nodes.Delete(compressedPubKey); err != nil {
68✔
1104
                return err
×
1105
        }
×
1106

1107
        // Finally, we'll delete the index entry for the node within the
1108
        // nodeUpdateIndexBucket as this node is no longer active, so we don't
1109
        // need to track its last update.
1110
        nodeUpdateIndex := nodes.NestedReadWriteBucket(nodeUpdateIndexBucket)
68✔
1111
        if nodeUpdateIndex == nil {
68✔
1112
                return ErrGraphNodesNotFound
×
1113
        }
×
1114

1115
        // In order to delete the entry, we'll need to reconstruct the key for
1116
        // its last update.
1117
        updateUnix := uint64(node.LastUpdate.Unix())
68✔
1118
        var indexKey [8 + 33]byte
68✔
1119
        byteOrder.PutUint64(indexKey[:8], updateUnix)
68✔
1120
        copy(indexKey[8:], compressedPubKey)
68✔
1121

68✔
1122
        return nodeUpdateIndex.Delete(indexKey[:])
68✔
1123
}
1124

1125
// AddChannelEdge adds a new (undirected, blank) edge to the graph database. An
1126
// undirected edge from the two target nodes are created. The information stored
1127
// denotes the static attributes of the channel, such as the channelID, the keys
1128
// involved in creation of the channel, and the set of features that the channel
1129
// supports. The chanPoint and chanID are used to uniquely identify the edge
1130
// globally within the database.
1131
func (c *KVStore) AddChannelEdge(ctx context.Context,
1132
        edge *models.ChannelEdgeInfo, opts ...batch.SchedulerOption) error {
1,824✔
1133

1,824✔
1134
        var alreadyExists bool
1,824✔
1135
        r := &batch.Request[kvdb.RwTx]{
1,824✔
1136
                Opts: batch.NewSchedulerOptions(opts...),
1,824✔
1137
                Reset: func() {
3,648✔
1138
                        alreadyExists = false
1,824✔
1139
                },
1,824✔
1140
                Do: func(tx kvdb.RwTx) error {
1,824✔
1141
                        err := c.addChannelEdge(tx, edge)
1,824✔
1142

1,824✔
1143
                        // Silence ErrEdgeAlreadyExist so that the batch can
1,824✔
1144
                        // succeed, but propagate the error via local state.
1,824✔
1145
                        if errors.Is(err, ErrEdgeAlreadyExist) {
2,061✔
1146
                                alreadyExists = true
237✔
1147
                                return nil
237✔
1148
                        }
237✔
1149

1150
                        return err
1,587✔
1151
                },
1152
                OnCommit: func(err error) error {
1,824✔
1153
                        switch {
1,824✔
1154
                        case err != nil:
×
1155
                                return err
×
1156
                        case alreadyExists:
237✔
1157
                                return ErrEdgeAlreadyExist
237✔
1158
                        default:
1,587✔
1159
                                c.rejectCache.remove(edge.ChannelID)
1,587✔
1160
                                c.chanCache.remove(edge.ChannelID)
1,587✔
1161
                                return nil
1,587✔
1162
                        }
1163
                },
1164
        }
1165

1166
        return c.chanScheduler.Execute(ctx, r)
1,824✔
1167
}
1168

1169
// addChannelEdge is the private form of AddChannelEdge that allows callers to
1170
// utilize an existing db transaction.
1171
func (c *KVStore) addChannelEdge(tx kvdb.RwTx,
1172
        edge *models.ChannelEdgeInfo) error {
1,824✔
1173

1,824✔
1174
        // Construct the channel's primary key which is the 8-byte channel ID.
1,824✔
1175
        var chanKey [8]byte
1,824✔
1176
        binary.BigEndian.PutUint64(chanKey[:], edge.ChannelID)
1,824✔
1177

1,824✔
1178
        nodes, err := tx.CreateTopLevelBucket(nodeBucket)
1,824✔
1179
        if err != nil {
1,824✔
1180
                return err
×
1181
        }
×
1182
        edges, err := tx.CreateTopLevelBucket(edgeBucket)
1,824✔
1183
        if err != nil {
1,824✔
1184
                return err
×
1185
        }
×
1186
        edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
1,824✔
1187
        if err != nil {
1,824✔
1188
                return err
×
1189
        }
×
1190
        chanIndex, err := edges.CreateBucketIfNotExists(channelPointBucket)
1,824✔
1191
        if err != nil {
1,824✔
1192
                return err
×
1193
        }
×
1194

1195
        // First, attempt to check if this edge has already been created. If
1196
        // so, then we can exit early as this method is meant to be idempotent.
1197
        if edgeInfo := edgeIndex.Get(chanKey[:]); edgeInfo != nil {
2,061✔
1198
                return ErrEdgeAlreadyExist
237✔
1199
        }
237✔
1200

1201
        // Before we insert the channel into the database, we'll ensure that
1202
        // both nodes already exist in the channel graph. If either node
1203
        // doesn't, then we'll insert a "shell" node that just includes its
1204
        // public key, so subsequent validation and queries can work properly.
1205
        _, node1Err := fetchLightningNode(nodes, edge.NodeKey1Bytes[:])
1,587✔
1206
        switch {
1,587✔
1207
        case errors.Is(node1Err, ErrGraphNodeNotFound):
27✔
1208
                node1Shell := models.Node{
27✔
1209
                        PubKeyBytes:          edge.NodeKey1Bytes,
27✔
1210
                        HaveNodeAnnouncement: false,
27✔
1211
                }
27✔
1212
                err := addLightningNode(tx, &node1Shell)
27✔
1213
                if err != nil {
27✔
1214
                        return fmt.Errorf("unable to create shell node "+
×
1215
                                "for: %x: %w", edge.NodeKey1Bytes, err)
×
1216
                }
×
1217
        case node1Err != nil:
×
1218
                return node1Err
×
1219
        }
1220

1221
        _, node2Err := fetchLightningNode(nodes, edge.NodeKey2Bytes[:])
1,587✔
1222
        switch {
1,587✔
1223
        case errors.Is(node2Err, ErrGraphNodeNotFound):
60✔
1224
                node2Shell := models.Node{
60✔
1225
                        PubKeyBytes:          edge.NodeKey2Bytes,
60✔
1226
                        HaveNodeAnnouncement: false,
60✔
1227
                }
60✔
1228
                err := addLightningNode(tx, &node2Shell)
60✔
1229
                if err != nil {
60✔
1230
                        return fmt.Errorf("unable to create shell node "+
×
1231
                                "for: %x: %w", edge.NodeKey2Bytes, err)
×
1232
                }
×
1233
        case node2Err != nil:
×
1234
                return node2Err
×
1235
        }
1236

1237
        // If the edge hasn't been created yet, then we'll first add it to the
1238
        // edge index in order to associate the edge between two nodes and also
1239
        // store the static components of the channel.
1240
        if err := putChanEdgeInfo(edgeIndex, edge, chanKey); err != nil {
1,587✔
1241
                return err
×
1242
        }
×
1243

1244
        // Mark edge policies for both sides as unknown. This is to enable
1245
        // efficient incoming channel lookup for a node.
1246
        keys := []*[33]byte{
1,587✔
1247
                &edge.NodeKey1Bytes,
1,587✔
1248
                &edge.NodeKey2Bytes,
1,587✔
1249
        }
1,587✔
1250
        for _, key := range keys {
4,758✔
1251
                err := putChanEdgePolicyUnknown(edges, edge.ChannelID, key[:])
3,171✔
1252
                if err != nil {
3,171✔
1253
                        return err
×
1254
                }
×
1255
        }
1256

1257
        // Finally we add it to the channel index which maps channel points
1258
        // (outpoints) to the shorter channel ID's.
1259
        var b bytes.Buffer
1,587✔
1260
        if err := WriteOutpoint(&b, &edge.ChannelPoint); err != nil {
1,587✔
1261
                return err
×
1262
        }
×
1263

1264
        return chanIndex.Put(b.Bytes(), chanKey[:])
1,587✔
1265
}
1266

1267
// HasChannelEdge returns true if the database knows of a channel edge with the
1268
// passed channel ID, and false otherwise. If an edge with that ID is found
1269
// within the graph, then two time stamps representing the last time the edge
1270
// was updated for both directed edges are returned along with the boolean. If
1271
// it is not found, then the zombie index is checked and its result is returned
1272
// as the second boolean.
1273
func (c *KVStore) HasChannelEdge(
1274
        chanID uint64) (time.Time, time.Time, bool, bool, error) {
224✔
1275

224✔
1276
        var (
224✔
1277
                upd1Time time.Time
224✔
1278
                upd2Time time.Time
224✔
1279
                exists   bool
224✔
1280
                isZombie bool
224✔
1281
        )
224✔
1282

224✔
1283
        // We'll query the cache with the shared lock held to allow multiple
224✔
1284
        // readers to access values in the cache concurrently if they exist.
224✔
1285
        c.cacheMu.RLock()
224✔
1286
        if entry, ok := c.rejectCache.get(chanID); ok {
301✔
1287
                c.cacheMu.RUnlock()
77✔
1288
                upd1Time = time.Unix(entry.upd1Time, 0)
77✔
1289
                upd2Time = time.Unix(entry.upd2Time, 0)
77✔
1290
                exists, isZombie = entry.flags.unpack()
77✔
1291

77✔
1292
                return upd1Time, upd2Time, exists, isZombie, nil
77✔
1293
        }
77✔
1294
        c.cacheMu.RUnlock()
150✔
1295

150✔
1296
        c.cacheMu.Lock()
150✔
1297
        defer c.cacheMu.Unlock()
150✔
1298

150✔
1299
        // The item was not found with the shared lock, so we'll acquire the
150✔
1300
        // exclusive lock and check the cache again in case another method added
150✔
1301
        // the entry to the cache while no lock was held.
150✔
1302
        if entry, ok := c.rejectCache.get(chanID); ok {
159✔
1303
                upd1Time = time.Unix(entry.upd1Time, 0)
9✔
1304
                upd2Time = time.Unix(entry.upd2Time, 0)
9✔
1305
                exists, isZombie = entry.flags.unpack()
9✔
1306

9✔
1307
                return upd1Time, upd2Time, exists, isZombie, nil
9✔
1308
        }
9✔
1309

1310
        if err := kvdb.View(c.db, func(tx kvdb.RTx) error {
288✔
1311
                edges := tx.ReadBucket(edgeBucket)
144✔
1312
                if edges == nil {
144✔
1313
                        return ErrGraphNoEdgesFound
×
1314
                }
×
1315
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
144✔
1316
                if edgeIndex == nil {
144✔
1317
                        return ErrGraphNoEdgesFound
×
1318
                }
×
1319

1320
                var channelID [8]byte
144✔
1321
                byteOrder.PutUint64(channelID[:], chanID)
144✔
1322

144✔
1323
                // If the edge doesn't exist, then we'll also check our zombie
144✔
1324
                // index.
144✔
1325
                if edgeIndex.Get(channelID[:]) == nil {
240✔
1326
                        exists = false
96✔
1327
                        zombieIndex := edges.NestedReadBucket(zombieBucket)
96✔
1328
                        if zombieIndex != nil {
192✔
1329
                                isZombie, _, _ = isZombieEdge(
96✔
1330
                                        zombieIndex, chanID,
96✔
1331
                                )
96✔
1332
                        }
96✔
1333

1334
                        return nil
96✔
1335
                }
1336

1337
                exists = true
51✔
1338
                isZombie = false
51✔
1339

51✔
1340
                // If the channel has been found in the graph, then retrieve
51✔
1341
                // the edges itself so we can return the last updated
51✔
1342
                // timestamps.
51✔
1343
                nodes := tx.ReadBucket(nodeBucket)
51✔
1344
                if nodes == nil {
51✔
1345
                        return ErrGraphNodeNotFound
×
1346
                }
×
1347

1348
                e1, e2, err := fetchChanEdgePolicies(
51✔
1349
                        edgeIndex, edges, channelID[:],
51✔
1350
                )
51✔
1351
                if err != nil {
51✔
1352
                        return err
×
1353
                }
×
1354

1355
                // As we may have only one of the edges populated, only set the
1356
                // update time if the edge was found in the database.
1357
                if e1 != nil {
72✔
1358
                        upd1Time = e1.LastUpdate
21✔
1359
                }
21✔
1360
                if e2 != nil {
70✔
1361
                        upd2Time = e2.LastUpdate
19✔
1362
                }
19✔
1363

1364
                return nil
51✔
1365
        }, func() {}); err != nil {
144✔
1366
                return time.Time{}, time.Time{}, exists, isZombie, err
×
1367
        }
×
1368

1369
        c.rejectCache.insert(chanID, rejectCacheEntry{
144✔
1370
                upd1Time: upd1Time.Unix(),
144✔
1371
                upd2Time: upd2Time.Unix(),
144✔
1372
                flags:    packRejectFlags(exists, isZombie),
144✔
1373
        })
144✔
1374

144✔
1375
        return upd1Time, upd2Time, exists, isZombie, nil
144✔
1376
}
1377

1378
// AddEdgeProof sets the proof of an existing edge in the graph database.
1379
func (c *KVStore) AddEdgeProof(chanID lnwire.ShortChannelID,
1380
        proof *models.ChannelAuthProof) error {
5✔
1381

5✔
1382
        // Construct the channel's primary key which is the 8-byte channel ID.
5✔
1383
        var chanKey [8]byte
5✔
1384
        binary.BigEndian.PutUint64(chanKey[:], chanID.ToUint64())
5✔
1385

5✔
1386
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
10✔
1387
                edges := tx.ReadWriteBucket(edgeBucket)
5✔
1388
                if edges == nil {
5✔
1389
                        return ErrEdgeNotFound
×
1390
                }
×
1391

1392
                edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
5✔
1393
                if edgeIndex == nil {
5✔
1394
                        return ErrEdgeNotFound
×
1395
                }
×
1396

1397
                edge, err := fetchChanEdgeInfo(edgeIndex, chanKey[:])
5✔
1398
                if err != nil {
5✔
1399
                        return err
×
1400
                }
×
1401

1402
                edge.AuthProof = proof
5✔
1403

5✔
1404
                return putChanEdgeInfo(edgeIndex, &edge, chanKey)
5✔
1405
        }, func() {})
5✔
1406
}
1407

1408
const (
1409
        // pruneTipBytes is the total size of the value which stores a prune
1410
        // entry of the graph in the prune log. The "prune tip" is the last
1411
        // entry in the prune log, and indicates if the channel graph is in
1412
        // sync with the current UTXO state. The structure of the value
1413
        // is: blockHash, taking 32 bytes total.
1414
        pruneTipBytes = 32
1415
)
1416

1417
// PruneGraph prunes newly closed channels from the channel graph in response
1418
// to a new block being solved on the network. Any transactions which spend the
1419
// funding output of any known channels within he graph will be deleted.
1420
// Additionally, the "prune tip", or the last block which has been used to
1421
// prune the graph is stored so callers can ensure the graph is fully in sync
1422
// with the current UTXO state. A slice of channels that have been closed by
1423
// the target block along with any pruned nodes are returned if the function
1424
// succeeds without error.
1425
func (c *KVStore) PruneGraph(spentOutputs []*wire.OutPoint,
1426
        blockHash *chainhash.Hash, blockHeight uint32) (
1427
        []*models.ChannelEdgeInfo, []route.Vertex, error) {
233✔
1428

233✔
1429
        c.cacheMu.Lock()
233✔
1430
        defer c.cacheMu.Unlock()
233✔
1431

233✔
1432
        var (
233✔
1433
                chansClosed []*models.ChannelEdgeInfo
233✔
1434
                prunedNodes []route.Vertex
233✔
1435
        )
233✔
1436

233✔
1437
        err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
466✔
1438
                // First grab the edges bucket which houses the information
233✔
1439
                // we'd like to delete
233✔
1440
                edges, err := tx.CreateTopLevelBucket(edgeBucket)
233✔
1441
                if err != nil {
233✔
1442
                        return err
×
1443
                }
×
1444

1445
                // Next grab the two edge indexes which will also need to be
1446
                // updated.
1447
                edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
233✔
1448
                if err != nil {
233✔
1449
                        return err
×
1450
                }
×
1451
                chanIndex, err := edges.CreateBucketIfNotExists(
233✔
1452
                        channelPointBucket,
233✔
1453
                )
233✔
1454
                if err != nil {
233✔
1455
                        return err
×
1456
                }
×
1457
                nodes := tx.ReadWriteBucket(nodeBucket)
233✔
1458
                if nodes == nil {
233✔
1459
                        return ErrSourceNodeNotSet
×
1460
                }
×
1461
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
233✔
1462
                if err != nil {
233✔
1463
                        return err
×
1464
                }
×
1465

1466
                // For each of the outpoints that have been spent within the
1467
                // block, we attempt to delete them from the graph as if that
1468
                // outpoint was a channel, then it has now been closed.
1469
                for _, chanPoint := range spentOutputs {
376✔
1470
                        // TODO(roasbeef): load channel bloom filter, continue
143✔
1471
                        // if NOT if filter
143✔
1472

143✔
1473
                        var opBytes bytes.Buffer
143✔
1474
                        err := WriteOutpoint(&opBytes, chanPoint)
143✔
1475
                        if err != nil {
143✔
1476
                                return err
×
1477
                        }
×
1478

1479
                        // First attempt to see if the channel exists within
1480
                        // the database, if not, then we can exit early.
1481
                        chanID := chanIndex.Get(opBytes.Bytes())
143✔
1482
                        if chanID == nil {
263✔
1483
                                continue
120✔
1484
                        }
1485

1486
                        // Attempt to delete the channel, an ErrEdgeNotFound
1487
                        // will be returned if that outpoint isn't known to be
1488
                        // a channel. If no error is returned, then a channel
1489
                        // was successfully pruned.
1490
                        edgeInfo, err := c.delChannelEdgeUnsafe(
23✔
1491
                                edges, edgeIndex, chanIndex, zombieIndex,
23✔
1492
                                chanID, false, false,
23✔
1493
                        )
23✔
1494
                        if err != nil && !errors.Is(err, ErrEdgeNotFound) {
23✔
1495
                                return err
×
1496
                        }
×
1497

1498
                        chansClosed = append(chansClosed, edgeInfo)
23✔
1499
                }
1500

1501
                metaBucket, err := tx.CreateTopLevelBucket(graphMetaBucket)
233✔
1502
                if err != nil {
233✔
1503
                        return err
×
1504
                }
×
1505

1506
                pruneBucket, err := metaBucket.CreateBucketIfNotExists(
233✔
1507
                        pruneLogBucket,
233✔
1508
                )
233✔
1509
                if err != nil {
233✔
1510
                        return err
×
1511
                }
×
1512

1513
                // With the graph pruned, add a new entry to the prune log,
1514
                // which can be used to check if the graph is fully synced with
1515
                // the current UTXO state.
1516
                var blockHeightBytes [4]byte
233✔
1517
                byteOrder.PutUint32(blockHeightBytes[:], blockHeight)
233✔
1518

233✔
1519
                var newTip [pruneTipBytes]byte
233✔
1520
                copy(newTip[:], blockHash[:])
233✔
1521

233✔
1522
                err = pruneBucket.Put(blockHeightBytes[:], newTip[:])
233✔
1523
                if err != nil {
233✔
1524
                        return err
×
1525
                }
×
1526

1527
                // Now that the graph has been pruned, we'll also attempt to
1528
                // prune any nodes that have had a channel closed within the
1529
                // latest block.
1530
                prunedNodes, err = c.pruneGraphNodes(nodes, edgeIndex)
233✔
1531

233✔
1532
                return err
233✔
1533
        }, func() {
233✔
1534
                chansClosed = nil
233✔
1535
                prunedNodes = nil
233✔
1536
        })
233✔
1537
        if err != nil {
233✔
1538
                return nil, nil, err
×
1539
        }
×
1540

1541
        for _, channel := range chansClosed {
256✔
1542
                c.rejectCache.remove(channel.ChannelID)
23✔
1543
                c.chanCache.remove(channel.ChannelID)
23✔
1544
        }
23✔
1545

1546
        return chansClosed, prunedNodes, nil
233✔
1547
}
1548

1549
// PruneGraphNodes is a garbage collection method which attempts to prune out
1550
// any nodes from the channel graph that are currently unconnected. This ensure
1551
// that we only maintain a graph of reachable nodes. In the event that a pruned
1552
// node gains more channels, it will be re-added back to the graph.
1553
func (c *KVStore) PruneGraphNodes() ([]route.Vertex, error) {
26✔
1554
        var prunedNodes []route.Vertex
26✔
1555
        err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
52✔
1556
                nodes := tx.ReadWriteBucket(nodeBucket)
26✔
1557
                if nodes == nil {
26✔
1558
                        return ErrGraphNodesNotFound
×
1559
                }
×
1560
                edges := tx.ReadWriteBucket(edgeBucket)
26✔
1561
                if edges == nil {
26✔
1562
                        return ErrGraphNotFound
×
1563
                }
×
1564
                edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
26✔
1565
                if edgeIndex == nil {
26✔
1566
                        return ErrGraphNoEdgesFound
×
1567
                }
×
1568

1569
                var err error
26✔
1570
                prunedNodes, err = c.pruneGraphNodes(nodes, edgeIndex)
26✔
1571
                if err != nil {
26✔
1572
                        return err
×
1573
                }
×
1574

1575
                return nil
26✔
1576
        }, func() {
26✔
1577
                prunedNodes = nil
26✔
1578
        })
26✔
1579

1580
        return prunedNodes, err
26✔
1581
}
1582

1583
// pruneGraphNodes attempts to remove any nodes from the graph who have had a
1584
// channel closed within the current block. If the node still has existing
1585
// channels in the graph, this will act as a no-op.
1586
func (c *KVStore) pruneGraphNodes(nodes kvdb.RwBucket,
1587
        edgeIndex kvdb.RwBucket) ([]route.Vertex, error) {
256✔
1588

256✔
1589
        log.Trace("Pruning nodes from graph with no open channels")
256✔
1590

256✔
1591
        // We'll retrieve the graph's source node to ensure we don't remove it
256✔
1592
        // even if it no longer has any open channels.
256✔
1593
        sourceNode, err := sourceNodeWithTx(nodes)
256✔
1594
        if err != nil {
256✔
1595
                return nil, err
×
1596
        }
×
1597

1598
        // We'll use this map to keep count the number of references to a node
1599
        // in the graph. A node should only be removed once it has no more
1600
        // references in the graph.
1601
        nodeRefCounts := make(map[[33]byte]int)
256✔
1602
        err = nodes.ForEach(func(pubKey, nodeBytes []byte) error {
1,498✔
1603
                // If this is the source key, then we skip this
1,242✔
1604
                // iteration as the value for this key is a pubKey
1,242✔
1605
                // rather than raw node information.
1,242✔
1606
                if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 {
2,004✔
1607
                        return nil
762✔
1608
                }
762✔
1609

1610
                var nodePub [33]byte
483✔
1611
                copy(nodePub[:], pubKey)
483✔
1612
                nodeRefCounts[nodePub] = 0
483✔
1613

483✔
1614
                return nil
483✔
1615
        })
1616
        if err != nil {
256✔
1617
                return nil, err
×
1618
        }
×
1619

1620
        // To ensure we never delete the source node, we'll start off by
1621
        // bumping its ref count to 1.
1622
        nodeRefCounts[sourceNode.PubKeyBytes] = 1
256✔
1623

256✔
1624
        // Next, we'll run through the edgeIndex which maps a channel ID to the
256✔
1625
        // edge info. We'll use this scan to populate our reference count map
256✔
1626
        // above.
256✔
1627
        err = edgeIndex.ForEach(func(chanID, edgeInfoBytes []byte) error {
439✔
1628
                // The first 66 bytes of the edge info contain the pubkeys of
183✔
1629
                // the nodes that this edge attaches. We'll extract them, and
183✔
1630
                // add them to the ref count map.
183✔
1631
                var node1, node2 [33]byte
183✔
1632
                copy(node1[:], edgeInfoBytes[:33])
183✔
1633
                copy(node2[:], edgeInfoBytes[33:])
183✔
1634

183✔
1635
                // With the nodes extracted, we'll increase the ref count of
183✔
1636
                // each of the nodes.
183✔
1637
                nodeRefCounts[node1]++
183✔
1638
                nodeRefCounts[node2]++
183✔
1639

183✔
1640
                return nil
183✔
1641
        })
183✔
1642
        if err != nil {
256✔
1643
                return nil, err
×
1644
        }
×
1645

1646
        // Finally, we'll make a second pass over the set of nodes, and delete
1647
        // any nodes that have a ref count of zero.
1648
        var pruned []route.Vertex
256✔
1649
        for nodePubKey, refCount := range nodeRefCounts {
739✔
1650
                // If the ref count of the node isn't zero, then we can safely
483✔
1651
                // skip it as it still has edges to or from it within the
483✔
1652
                // graph.
483✔
1653
                if refCount != 0 {
904✔
1654
                        continue
421✔
1655
                }
1656

1657
                // If we reach this point, then there are no longer any edges
1658
                // that connect this node, so we can delete it.
1659
                err := c.deleteLightningNode(nodes, nodePubKey[:])
65✔
1660
                if err != nil {
65✔
1661
                        if errors.Is(err, ErrGraphNodeNotFound) ||
×
1662
                                errors.Is(err, ErrGraphNodesNotFound) {
×
1663

×
1664
                                log.Warnf("Unable to prune node %x from the "+
×
1665
                                        "graph: %v", nodePubKey, err)
×
1666
                                continue
×
1667
                        }
1668

1669
                        return nil, err
×
1670
                }
1671

1672
                log.Infof("Pruned unconnected node %x from channel graph",
65✔
1673
                        nodePubKey[:])
65✔
1674

65✔
1675
                pruned = append(pruned, nodePubKey)
65✔
1676
        }
1677

1678
        if len(pruned) > 0 {
305✔
1679
                log.Infof("Pruned %v unconnected nodes from the channel graph",
49✔
1680
                        len(pruned))
49✔
1681
        }
49✔
1682

1683
        return pruned, err
256✔
1684
}
1685

1686
// DisconnectBlockAtHeight is used to indicate that the block specified
1687
// by the passed height has been disconnected from the main chain. This
1688
// will "rewind" the graph back to the height below, deleting channels
1689
// that are no longer confirmed from the graph. The prune log will be
1690
// set to the last prune height valid for the remaining chain.
1691
// Channels that were removed from the graph resulting from the
1692
// disconnected block are returned.
1693
func (c *KVStore) DisconnectBlockAtHeight(height uint32) (
1694
        []*models.ChannelEdgeInfo, error) {
161✔
1695

161✔
1696
        // Every channel having a ShortChannelID starting at 'height'
161✔
1697
        // will no longer be confirmed.
161✔
1698
        startShortChanID := lnwire.ShortChannelID{
161✔
1699
                BlockHeight: height,
161✔
1700
        }
161✔
1701

161✔
1702
        // Delete everything after this height from the db up until the
161✔
1703
        // SCID alias range.
161✔
1704
        endShortChanID := aliasmgr.StartingAlias
161✔
1705

161✔
1706
        // The block height will be the 3 first bytes of the channel IDs.
161✔
1707
        var chanIDStart [8]byte
161✔
1708
        byteOrder.PutUint64(chanIDStart[:], startShortChanID.ToUint64())
161✔
1709
        var chanIDEnd [8]byte
161✔
1710
        byteOrder.PutUint64(chanIDEnd[:], endShortChanID.ToUint64())
161✔
1711

161✔
1712
        c.cacheMu.Lock()
161✔
1713
        defer c.cacheMu.Unlock()
161✔
1714

161✔
1715
        // Keep track of the channels that are removed from the graph.
161✔
1716
        var removedChans []*models.ChannelEdgeInfo
161✔
1717

161✔
1718
        if err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
322✔
1719
                edges, err := tx.CreateTopLevelBucket(edgeBucket)
161✔
1720
                if err != nil {
161✔
1721
                        return err
×
1722
                }
×
1723
                edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
161✔
1724
                if err != nil {
161✔
1725
                        return err
×
1726
                }
×
1727
                chanIndex, err := edges.CreateBucketIfNotExists(
161✔
1728
                        channelPointBucket,
161✔
1729
                )
161✔
1730
                if err != nil {
161✔
1731
                        return err
×
1732
                }
×
1733
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
161✔
1734
                if err != nil {
161✔
1735
                        return err
×
1736
                }
×
1737

1738
                // Scan from chanIDStart to chanIDEnd, deleting every
1739
                // found edge.
1740
                // NOTE: we must delete the edges after the cursor loop, since
1741
                // modifying the bucket while traversing is not safe.
1742
                // NOTE: We use a < comparison in bytes.Compare instead of <=
1743
                // so that the StartingAlias itself isn't deleted.
1744
                var keys [][]byte
161✔
1745
                cursor := edgeIndex.ReadWriteCursor()
161✔
1746

161✔
1747
                //nolint:ll
161✔
1748
                for k, _ := cursor.Seek(chanIDStart[:]); k != nil &&
161✔
1749
                        bytes.Compare(k, chanIDEnd[:]) < 0; k, _ = cursor.Next() {
255✔
1750
                        keys = append(keys, k)
94✔
1751
                }
94✔
1752

1753
                for _, k := range keys {
255✔
1754
                        edgeInfo, err := c.delChannelEdgeUnsafe(
94✔
1755
                                edges, edgeIndex, chanIndex, zombieIndex,
94✔
1756
                                k, false, false,
94✔
1757
                        )
94✔
1758
                        if err != nil && !errors.Is(err, ErrEdgeNotFound) {
94✔
1759
                                return err
×
1760
                        }
×
1761

1762
                        removedChans = append(removedChans, edgeInfo)
94✔
1763
                }
1764

1765
                // Delete all the entries in the prune log having a height
1766
                // greater or equal to the block disconnected.
1767
                metaBucket, err := tx.CreateTopLevelBucket(graphMetaBucket)
161✔
1768
                if err != nil {
161✔
1769
                        return err
×
1770
                }
×
1771

1772
                pruneBucket, err := metaBucket.CreateBucketIfNotExists(
161✔
1773
                        pruneLogBucket,
161✔
1774
                )
161✔
1775
                if err != nil {
161✔
1776
                        return err
×
1777
                }
×
1778

1779
                var pruneKeyStart [4]byte
161✔
1780
                byteOrder.PutUint32(pruneKeyStart[:], height)
161✔
1781

161✔
1782
                var pruneKeyEnd [4]byte
161✔
1783
                byteOrder.PutUint32(pruneKeyEnd[:], math.MaxUint32)
161✔
1784

161✔
1785
                // To avoid modifying the bucket while traversing, we delete
161✔
1786
                // the keys in a second loop.
161✔
1787
                var pruneKeys [][]byte
161✔
1788
                pruneCursor := pruneBucket.ReadWriteCursor()
161✔
1789
                //nolint:ll
161✔
1790
                for k, _ := pruneCursor.Seek(pruneKeyStart[:]); k != nil &&
161✔
1791
                        bytes.Compare(k, pruneKeyEnd[:]) <= 0; k, _ = pruneCursor.Next() {
254✔
1792
                        pruneKeys = append(pruneKeys, k)
93✔
1793
                }
93✔
1794

1795
                for _, k := range pruneKeys {
254✔
1796
                        if err := pruneBucket.Delete(k); err != nil {
93✔
1797
                                return err
×
1798
                        }
×
1799
                }
1800

1801
                return nil
161✔
1802
        }, func() {
161✔
1803
                removedChans = nil
161✔
1804
        }); err != nil {
161✔
1805
                return nil, err
×
1806
        }
×
1807

1808
        for _, channel := range removedChans {
255✔
1809
                c.rejectCache.remove(channel.ChannelID)
94✔
1810
                c.chanCache.remove(channel.ChannelID)
94✔
1811
        }
94✔
1812

1813
        return removedChans, nil
161✔
1814
}
1815

1816
// PruneTip returns the block height and hash of the latest block that has been
1817
// used to prune channels in the graph. Knowing the "prune tip" allows callers
1818
// to tell if the graph is currently in sync with the current best known UTXO
1819
// state.
1820
func (c *KVStore) PruneTip() (*chainhash.Hash, uint32, error) {
56✔
1821
        var (
56✔
1822
                tipHash   chainhash.Hash
56✔
1823
                tipHeight uint32
56✔
1824
        )
56✔
1825

56✔
1826
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
112✔
1827
                graphMeta := tx.ReadBucket(graphMetaBucket)
56✔
1828
                if graphMeta == nil {
56✔
1829
                        return ErrGraphNotFound
×
1830
                }
×
1831
                pruneBucket := graphMeta.NestedReadBucket(pruneLogBucket)
56✔
1832
                if pruneBucket == nil {
56✔
1833
                        return ErrGraphNeverPruned
×
1834
                }
×
1835

1836
                pruneCursor := pruneBucket.ReadCursor()
56✔
1837

56✔
1838
                // The prune key with the largest block height will be our
56✔
1839
                // prune tip.
56✔
1840
                k, v := pruneCursor.Last()
56✔
1841
                if k == nil {
77✔
1842
                        return ErrGraphNeverPruned
21✔
1843
                }
21✔
1844

1845
                // Once we have the prune tip, the value will be the block hash,
1846
                // and the key the block height.
1847
                copy(tipHash[:], v)
38✔
1848
                tipHeight = byteOrder.Uint32(k)
38✔
1849

38✔
1850
                return nil
38✔
1851
        }, func() {})
56✔
1852
        if err != nil {
77✔
1853
                return nil, 0, err
21✔
1854
        }
21✔
1855

1856
        return &tipHash, tipHeight, nil
38✔
1857
}
1858

1859
// DeleteChannelEdges removes edges with the given channel IDs from the
1860
// database and marks them as zombies. This ensures that we're unable to re-add
1861
// it to our database once again. If an edge does not exist within the
1862
// database, then ErrEdgeNotFound will be returned. If strictZombiePruning is
1863
// true, then when we mark these edges as zombies, we'll set up the keys such
1864
// that we require the node that failed to send the fresh update to be the one
1865
// that resurrects the channel from its zombie state. The markZombie bool
1866
// denotes whether or not to mark the channel as a zombie.
1867
func (c *KVStore) DeleteChannelEdges(strictZombiePruning, markZombie bool,
1868
        chanIDs ...uint64) ([]*models.ChannelEdgeInfo, error) {
145✔
1869

145✔
1870
        // TODO(roasbeef): possibly delete from node bucket if node has no more
145✔
1871
        // channels
145✔
1872
        // TODO(roasbeef): don't delete both edges?
145✔
1873

145✔
1874
        c.cacheMu.Lock()
145✔
1875
        defer c.cacheMu.Unlock()
145✔
1876

145✔
1877
        var infos []*models.ChannelEdgeInfo
145✔
1878
        err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
290✔
1879
                edges := tx.ReadWriteBucket(edgeBucket)
145✔
1880
                if edges == nil {
145✔
1881
                        return ErrEdgeNotFound
×
1882
                }
×
1883
                edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
145✔
1884
                if edgeIndex == nil {
145✔
1885
                        return ErrEdgeNotFound
×
1886
                }
×
1887
                chanIndex := edges.NestedReadWriteBucket(channelPointBucket)
145✔
1888
                if chanIndex == nil {
145✔
1889
                        return ErrEdgeNotFound
×
1890
                }
×
1891
                nodes := tx.ReadWriteBucket(nodeBucket)
145✔
1892
                if nodes == nil {
145✔
1893
                        return ErrGraphNodeNotFound
×
1894
                }
×
1895
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
145✔
1896
                if err != nil {
145✔
1897
                        return err
×
1898
                }
×
1899

1900
                var rawChanID [8]byte
145✔
1901
                for _, chanID := range chanIDs {
234✔
1902
                        byteOrder.PutUint64(rawChanID[:], chanID)
89✔
1903
                        edgeInfo, err := c.delChannelEdgeUnsafe(
89✔
1904
                                edges, edgeIndex, chanIndex, zombieIndex,
89✔
1905
                                rawChanID[:], markZombie, strictZombiePruning,
89✔
1906
                        )
89✔
1907
                        if err != nil {
151✔
1908
                                return err
62✔
1909
                        }
62✔
1910

1911
                        infos = append(infos, edgeInfo)
27✔
1912
                }
1913

1914
                return nil
83✔
1915
        }, func() {
145✔
1916
                infos = nil
145✔
1917
        })
145✔
1918
        if err != nil {
207✔
1919
                return nil, err
62✔
1920
        }
62✔
1921

1922
        for _, chanID := range chanIDs {
110✔
1923
                c.rejectCache.remove(chanID)
27✔
1924
                c.chanCache.remove(chanID)
27✔
1925
        }
27✔
1926

1927
        return infos, nil
83✔
1928
}
1929

1930
// ChannelID attempt to lookup the 8-byte compact channel ID which maps to the
1931
// passed channel point (outpoint). If the passed channel doesn't exist within
1932
// the database, then ErrEdgeNotFound is returned.
1933
func (c *KVStore) ChannelID(chanPoint *wire.OutPoint) (uint64, error) {
4✔
1934
        var chanID uint64
4✔
1935
        if err := kvdb.View(c.db, func(tx kvdb.RTx) error {
8✔
1936
                var err error
4✔
1937
                chanID, err = getChanID(tx, chanPoint)
4✔
1938
                return err
4✔
1939
        }, func() {
8✔
1940
                chanID = 0
4✔
1941
        }); err != nil {
7✔
1942
                return 0, err
3✔
1943
        }
3✔
1944

1945
        return chanID, nil
4✔
1946
}
1947

1948
// getChanID returns the assigned channel ID for a given channel point.
1949
func getChanID(tx kvdb.RTx, chanPoint *wire.OutPoint) (uint64, error) {
4✔
1950
        var b bytes.Buffer
4✔
1951
        if err := WriteOutpoint(&b, chanPoint); err != nil {
4✔
1952
                return 0, err
×
1953
        }
×
1954

1955
        edges := tx.ReadBucket(edgeBucket)
4✔
1956
        if edges == nil {
4✔
1957
                return 0, ErrGraphNoEdgesFound
×
1958
        }
×
1959
        chanIndex := edges.NestedReadBucket(channelPointBucket)
4✔
1960
        if chanIndex == nil {
4✔
1961
                return 0, ErrGraphNoEdgesFound
×
1962
        }
×
1963

1964
        chanIDBytes := chanIndex.Get(b.Bytes())
4✔
1965
        if chanIDBytes == nil {
7✔
1966
                return 0, ErrEdgeNotFound
3✔
1967
        }
3✔
1968

1969
        chanID := byteOrder.Uint64(chanIDBytes)
4✔
1970

4✔
1971
        return chanID, nil
4✔
1972
}
1973

1974
// TODO(roasbeef): allow updates to use Batch?
1975

1976
// HighestChanID returns the "highest" known channel ID in the channel graph.
1977
// This represents the "newest" channel from the PoV of the chain. This method
1978
// can be used by peers to quickly determine if they're graphs are in sync.
1979
func (c *KVStore) HighestChanID(_ context.Context) (uint64, error) {
6✔
1980
        var cid uint64
6✔
1981

6✔
1982
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
12✔
1983
                edges := tx.ReadBucket(edgeBucket)
6✔
1984
                if edges == nil {
6✔
1985
                        return ErrGraphNoEdgesFound
×
1986
                }
×
1987
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
6✔
1988
                if edgeIndex == nil {
6✔
1989
                        return ErrGraphNoEdgesFound
×
1990
                }
×
1991

1992
                // In order to find the highest chan ID, we'll fetch a cursor
1993
                // and use that to seek to the "end" of our known rage.
1994
                cidCursor := edgeIndex.ReadCursor()
6✔
1995

6✔
1996
                lastChanID, _ := cidCursor.Last()
6✔
1997

6✔
1998
                // If there's no key, then this means that we don't actually
6✔
1999
                // know of any channels, so we'll return a predicable error.
6✔
2000
                if lastChanID == nil {
10✔
2001
                        return ErrGraphNoEdgesFound
4✔
2002
                }
4✔
2003

2004
                // Otherwise, we'll de serialize the channel ID and return it
2005
                // to the caller.
2006
                cid = byteOrder.Uint64(lastChanID)
5✔
2007

5✔
2008
                return nil
5✔
2009
        }, func() {
6✔
2010
                cid = 0
6✔
2011
        })
6✔
2012
        if err != nil && !errors.Is(err, ErrGraphNoEdgesFound) {
6✔
2013
                return 0, err
×
2014
        }
×
2015

2016
        return cid, nil
6✔
2017
}
2018

2019
// ChannelEdge represents the complete set of information for a channel edge in
2020
// the known channel graph. This struct couples the core information of the
2021
// edge as well as each of the known advertised edge policies.
2022
type ChannelEdge struct {
2023
        // Info contains all the static information describing the channel.
2024
        Info *models.ChannelEdgeInfo
2025

2026
        // Policy1 points to the "first" edge policy of the channel containing
2027
        // the dynamic information required to properly route through the edge.
2028
        Policy1 *models.ChannelEdgePolicy
2029

2030
        // Policy2 points to the "second" edge policy of the channel containing
2031
        // the dynamic information required to properly route through the edge.
2032
        Policy2 *models.ChannelEdgePolicy
2033

2034
        // Node1 is "node 1" in the channel. This is the node that would have
2035
        // produced Policy1 if it exists.
2036
        Node1 *models.Node
2037

2038
        // Node2 is "node 2" in the channel. This is the node that would have
2039
        // produced Policy2 if it exists.
2040
        Node2 *models.Node
2041
}
2042

2043
// updateChanCacheBatch updates the channel cache with multiple edges at once.
2044
// This method acquires the cache lock only once for the entire batch.
2045
func (c *KVStore) updateChanCacheBatch(edgesToCache map[uint64]ChannelEdge) {
189✔
2046
        if len(edgesToCache) == 0 {
331✔
2047
                return
142✔
2048
        }
142✔
2049

2050
        c.cacheMu.Lock()
50✔
2051
        defer c.cacheMu.Unlock()
50✔
2052

50✔
2053
        for cid, edge := range edgesToCache {
171✔
2054
                c.chanCache.insert(cid, edge)
121✔
2055
        }
121✔
2056
}
2057

2058
// isEmptyGraphError returns true if the error indicates the graph database
2059
// is empty (no edges or nodes exist). These errors are expected when the
2060
// graph is first created or has no data.
2061
func isEmptyGraphError(err error) bool {
×
2062
        return errors.Is(err, ErrGraphNoEdgesFound) ||
×
2063
                errors.Is(err, ErrGraphNodesNotFound)
×
2064
}
×
2065

2066
// chanUpdatesIterator holds the state for chunked channel update iteration.
2067
type chanUpdatesIterator struct {
2068
        // batchSize is the amount of channel updates to read at a single time.
2069
        batchSize int
2070

2071
        // startTime is the start time of the iteration request.
2072
        startTime time.Time
2073

2074
        // endTime is the end time of the iteration request.
2075
        endTime time.Time
2076

2077
        // edgesSeen is used to dedup edges.
2078
        edgesSeen map[uint64]struct{}
2079

2080
        // edgesToCache houses all the edges that we read from the disk which
2081
        // aren't yet cached. This is used to update the cache after a batch
2082
        // chunk.
2083
        edgesToCache map[uint64]ChannelEdge
2084

2085
        // lastSeenKey is the last index key seen. This is used to resume
2086
        // iteration.
2087
        lastSeenKey []byte
2088

2089
        // hits is the number of cache hits.
2090
        hits int
2091

2092
        // total is the total number of edges requested.
2093
        total int
2094
}
2095

2096
// newChanUpdatesIterator makes a new chan updates iterator.
2097
func newChanUpdatesIterator(batchSize int,
2098
        startTime, endTime time.Time) *chanUpdatesIterator {
151✔
2099

151✔
2100
        return &chanUpdatesIterator{
151✔
2101
                batchSize:    batchSize,
151✔
2102
                startTime:    startTime,
151✔
2103
                endTime:      endTime,
151✔
2104
                edgesSeen:    make(map[uint64]struct{}),
151✔
2105
                edgesToCache: make(map[uint64]ChannelEdge),
151✔
2106
                lastSeenKey:  nil,
151✔
2107
        }
151✔
2108
}
151✔
2109

2110
// fetchNextChanUpdateBatch retrieves the next batch of channel edges within the
2111
// horizon. Returns the batch, whether there are more edges, and any error.
2112
func (c *KVStore) fetchNextChanUpdateBatch(
2113
        state *chanUpdatesIterator) ([]ChannelEdge, bool, error) {
189✔
2114

189✔
2115
        var (
189✔
2116
                batch   []ChannelEdge
189✔
2117
                hasMore bool
189✔
2118
        )
189✔
2119
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
378✔
2120
                edges := tx.ReadBucket(edgeBucket)
189✔
2121
                if edges == nil {
189✔
2122
                        return ErrGraphNoEdgesFound
×
2123
                }
×
2124

2125
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
189✔
2126
                if edgeIndex == nil {
189✔
2127
                        return ErrGraphNoEdgesFound
×
2128
                }
×
2129

2130
                edgeUpdateIndex := edges.NestedReadBucket(edgeUpdateIndexBucket)
189✔
2131
                if edgeUpdateIndex == nil {
189✔
2132
                        return ErrGraphNoEdgesFound
×
2133
                }
×
2134
                nodes := tx.ReadBucket(nodeBucket)
189✔
2135
                if nodes == nil {
189✔
2136
                        return ErrGraphNodesNotFound
×
2137
                }
×
2138

2139
                // With all the relevant buckets read, we'll now create a fresh
2140
                // read cursor.
2141
                updateCursor := edgeUpdateIndex.ReadCursor()
189✔
2142

189✔
2143
                // We'll now use the start and end time to create the keys that
189✔
2144
                // we'll use to seek.
189✔
2145
                var startTimeBytes, endTimeBytes [8 + 8]byte
189✔
2146
                byteOrder.PutUint64(
189✔
2147
                        startTimeBytes[:8], uint64(state.startTime.Unix()),
189✔
2148
                )
189✔
2149
                byteOrder.PutUint64(
189✔
2150
                        endTimeBytes[:8], uint64(state.endTime.Unix()),
189✔
2151
                )
189✔
2152

189✔
2153
                var indexKey []byte
189✔
2154

189✔
2155
                // If we left off earlier, then we'll use that key as the
189✔
2156
                // starting point.
189✔
2157
                switch {
189✔
2158
                case state.lastSeenKey != nil:
38✔
2159
                        // Seek to the last seen key, moving to the key right
38✔
2160
                        // after it.
38✔
2161
                        indexKey, _ = updateCursor.Seek(state.lastSeenKey)
38✔
2162

38✔
2163
                        if bytes.Equal(indexKey, state.lastSeenKey) {
76✔
2164
                                indexKey, _ = updateCursor.Next()
38✔
2165
                        }
38✔
2166

2167
                // Otherwise, we'll move to the very start of the time range.
2168
                default:
151✔
2169
                        indexKey, _ = updateCursor.Seek(startTimeBytes[:])
151✔
2170
                }
2171

2172
                // TODO(roasbeef): iterate the channel graph cache instead w/ a
2173
                // treap ordering?
2174

2175
                // Now we'll read items up to the batch size, exiting early if
2176
                // we exceed the ending time.
2177
                for len(batch) < state.batchSize && indexKey != nil {
346✔
2178
                        // If we're at the end, then we'll break out now.
157✔
2179
                        if bytes.Compare(indexKey, endTimeBytes[:]) > 0 {
165✔
2180
                                break
8✔
2181
                        }
2182

2183
                        chanID := indexKey[8:]
149✔
2184
                        chanIDInt := byteOrder.Uint64(chanID)
149✔
2185

149✔
2186
                        if state.lastSeenKey == nil {
161✔
2187
                                state.lastSeenKey = make([]byte, len(indexKey))
12✔
2188
                        }
12✔
2189
                        copy(state.lastSeenKey, indexKey)
149✔
2190

149✔
2191
                        // If we've seen this channel ID already, then we'll
149✔
2192
                        // skip it.
149✔
2193
                        if _, ok := state.edgesSeen[chanIDInt]; ok {
168✔
2194
                                indexKey, _ = updateCursor.Next()
19✔
2195
                                continue
19✔
2196
                        }
2197

2198
                        // Before we read the edge info, we'll see if this
2199
                        // element is already in the cache or not.
2200
                        c.cacheMu.RLock()
130✔
2201
                        if channel, ok := c.chanCache.get(chanIDInt); ok {
142✔
2202
                                state.edgesSeen[chanIDInt] = struct{}{}
12✔
2203

12✔
2204
                                batch = append(batch, channel)
12✔
2205

12✔
2206
                                state.hits++
12✔
2207
                                state.total++
12✔
2208

12✔
2209
                                indexKey, _ = updateCursor.Next()
12✔
2210

12✔
2211
                                c.cacheMu.RUnlock()
12✔
2212

12✔
2213
                                continue
12✔
2214
                        }
2215
                        c.cacheMu.RUnlock()
121✔
2216

121✔
2217
                        // The edge wasn't in the cache, so we'll fetch it along
121✔
2218
                        // w/ the edge policies and nodes.
121✔
2219
                        edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
121✔
2220
                        if err != nil {
121✔
2221
                                return fmt.Errorf("unable to fetch info "+
×
2222
                                        "for edge with chan_id=%v: %v",
×
2223
                                        chanIDInt, err)
×
2224
                        }
×
2225
                        edge1, edge2, err := fetchChanEdgePolicies(
121✔
2226
                                edgeIndex, edges, chanID,
121✔
2227
                        )
121✔
2228
                        if err != nil {
121✔
2229
                                return fmt.Errorf("unable to fetch "+
×
2230
                                        "policies for edge with chan_id=%v: %v",
×
2231
                                        chanIDInt, err)
×
2232
                        }
×
2233
                        node1, err := fetchLightningNode(
121✔
2234
                                nodes, edgeInfo.NodeKey1Bytes[:],
121✔
2235
                        )
121✔
2236
                        if err != nil {
121✔
2237
                                return err
×
2238
                        }
×
2239
                        node2, err := fetchLightningNode(
121✔
2240
                                nodes, edgeInfo.NodeKey2Bytes[:],
121✔
2241
                        )
121✔
2242
                        if err != nil {
121✔
2243
                                return err
×
2244
                        }
×
2245

2246
                        // Now we have all the information we need to build the
2247
                        // channel edge.
2248
                        channel := ChannelEdge{
121✔
2249
                                Info:    &edgeInfo,
121✔
2250
                                Policy1: edge1,
121✔
2251
                                Policy2: edge2,
121✔
2252
                                Node1:   &node1,
121✔
2253
                                Node2:   &node2,
121✔
2254
                        }
121✔
2255

121✔
2256
                        state.edgesSeen[chanIDInt] = struct{}{}
121✔
2257
                        state.edgesToCache[chanIDInt] = channel
121✔
2258

121✔
2259
                        batch = append(batch, channel)
121✔
2260

121✔
2261
                        state.total++
121✔
2262

121✔
2263
                        // Advance the iterator to the next entry.
121✔
2264
                        indexKey, _ = updateCursor.Next()
121✔
2265
                }
2266

2267
                // If we haven't yet crossed the endTimeBytes, then we still
2268
                // have more entries to deliver.
2269
                if indexKey != nil &&
189✔
2270
                        bytes.Compare(indexKey, endTimeBytes[:]) <= 0 {
227✔
2271

38✔
2272
                        hasMore = true
38✔
2273
                }
38✔
2274

2275
                return nil
189✔
2276
        }, func() {
189✔
2277
                batch = nil
189✔
2278
                hasMore = false
189✔
2279
        })
189✔
2280
        if err != nil {
189✔
2281
                return nil, false, err
×
2282
        }
×
2283

2284
        return batch, hasMore, nil
189✔
2285
}
2286

2287
// ChanUpdatesInHorizon returns all the known channel edges which have at least
2288
// one edge that has an update timestamp within the specified horizon.
2289
func (c *KVStore) ChanUpdatesInHorizon(startTime, endTime time.Time,
2290
        opts ...IteratorOption) iter.Seq2[ChannelEdge, error] {
151✔
2291

151✔
2292
        cfg := defaultIteratorConfig()
151✔
2293
        for _, opt := range opts {
155✔
2294
                opt(cfg)
4✔
2295
        }
4✔
2296

2297
        return func(yield func(ChannelEdge, error) bool) {
302✔
2298
                iterState := newChanUpdatesIterator(
151✔
2299
                        cfg.chanUpdateIterBatchSize, startTime, endTime,
151✔
2300
                )
151✔
2301

151✔
2302
                for {
340✔
2303
                        // At the top of the loop, we'll read the next batch
189✔
2304
                        // chunk from disk. We'll also determine if we have any
189✔
2305
                        // more entries after this or not.
189✔
2306
                        batch, hasMore, err := c.fetchNextChanUpdateBatch(
189✔
2307
                                iterState,
189✔
2308
                        )
189✔
2309
                        if err != nil {
189✔
2310
                                // These errors just mean the graph is empty,
×
2311
                                // which is OK.
×
2312
                                if !isEmptyGraphError(err) {
×
2313
                                        log.Errorf("ChanUpdatesInHorizon "+
×
2314
                                                "batch error: %v", err)
×
2315

×
2316
                                        yield(ChannelEdge{}, err)
×
2317

×
2318
                                        return
×
2319
                                }
×
2320
                                // Continue with empty batch
2321
                        }
2322

2323
                        // We'll now yield each edge that we just read. If yield
2324
                        // returns false, then that means that we'll exit early.
2325
                        for _, edge := range batch {
319✔
2326
                                if !yield(edge, nil) {
130✔
2327
                                        return
×
2328
                                }
×
2329
                        }
2330

2331
                        // Update cache after successful batch yield.
2332
                        c.updateChanCacheBatch(iterState.edgesToCache)
189✔
2333
                        iterState.edgesToCache = make(map[uint64]ChannelEdge)
189✔
2334

189✔
2335
                        // If we we're done, then we can just break out here
189✔
2336
                        // now.
189✔
2337
                        if !hasMore || len(batch) == 0 {
340✔
2338
                                break
151✔
2339
                        }
2340
                }
2341

2342
                if iterState.total > 0 {
163✔
2343
                        log.Tracef("ChanUpdatesInHorizon hit percentage: "+
12✔
2344
                                "%.2f (%d/%d)", float64(iterState.hits)*100/
12✔
2345
                                float64(iterState.total), iterState.hits,
12✔
2346
                                iterState.total)
12✔
2347
                } else {
154✔
2348
                        log.Tracef("ChanUpdatesInHorizon returned no edges "+
142✔
2349
                                "in horizon (%s, %s)", startTime, endTime)
142✔
2350
                }
142✔
2351
        }
2352
}
2353

2354
// nodeUpdatesIterator maintains state for iterating through node updates.
2355
//
2356
// Iterator Lifecycle:
2357
// 1. Initialize state with start/end time, batch size, and filtering options.
2358
// 2. Fetch batch using pagination cursor (lastSeenKey).
2359
// 3. Filter nodes if publicNodesOnly is set.
2360
// 4. Update lastSeenKey to the last processed node's index key.
2361
// 5. Repeat until we exceed endTime or no more nodes exist.
2362
type nodeUpdatesIterator struct {
2363
        // batchSize is the amount of node updates to read at a single time.
2364
        batchSize int
2365

2366
        // startTime is the start time of the iteration request.
2367
        startTime time.Time
2368

2369
        // endTime is the end time of the iteration request.
2370
        endTime time.Time
2371

2372
        // lastSeenKey is the last index key seen. This is used to resume
2373
        // iteration.
2374
        lastSeenKey []byte
2375

2376
        // publicNodesOnly filters to only return public nodes if true.
2377
        publicNodesOnly bool
2378

2379
        // total tracks total nodes processed.
2380
        total int
2381
}
2382

2383
// newNodeUpdatesIterator makes a new node updates iterator.
2384
func newNodeUpdatesIterator(batchSize int, startTime, endTime time.Time,
2385
        publicNodesOnly bool) *nodeUpdatesIterator {
54✔
2386

54✔
2387
        return &nodeUpdatesIterator{
54✔
2388
                batchSize:       batchSize,
54✔
2389
                startTime:       startTime,
54✔
2390
                endTime:         endTime,
54✔
2391
                lastSeenKey:     nil,
54✔
2392
                publicNodesOnly: publicNodesOnly,
54✔
2393
        }
54✔
2394
}
54✔
2395

2396
// fetchNextNodeBatch fetches the next batch of node announcements using the
2397
// iterator state.
2398
func (c *KVStore) fetchNextNodeBatch(
2399
        state *nodeUpdatesIterator) ([]models.Node, bool, error) {
113✔
2400

113✔
2401
        var (
113✔
2402
                nodeBatch []models.Node
113✔
2403
                hasMore   bool
113✔
2404
        )
113✔
2405

113✔
2406
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
226✔
2407
                nodes := tx.ReadBucket(nodeBucket)
113✔
2408
                if nodes == nil {
113✔
2409
                        return ErrGraphNodesNotFound
×
2410
                }
×
2411
                ourPubKey := nodes.Get(sourceKey)
113✔
2412
                if ourPubKey == nil && state.publicNodesOnly {
113✔
2413
                        // If we're filtering for public nodes only but don't
×
2414
                        // have a source node set, we can't determine if nodes
×
2415
                        // are public. A node is considered public if it has at
×
2416
                        // least one channel with our node (the source node).
×
2417
                        return ErrSourceNodeNotSet
×
2418
                }
×
2419
                nodeUpdateIndex := nodes.NestedReadBucket(nodeUpdateIndexBucket)
113✔
2420
                if nodeUpdateIndex == nil {
113✔
2421
                        return ErrGraphNodesNotFound
×
2422
                }
×
2423

2424
                // We'll now obtain a cursor to perform a range query within the
2425
                // index to find all node announcements within the horizon. The
2426
                // nodeUpdateIndex key format is: [8 bytes timestamp][33 bytes
2427
                // node pubkey] This allows efficient range queries by time
2428
                // while maintaining a stable sort order for nodes with the same
2429
                // timestamp.
2430
                updateCursor := nodeUpdateIndex.ReadCursor()
113✔
2431

113✔
2432
                var startTimeBytes, endTimeBytes [8 + 33]byte
113✔
2433
                byteOrder.PutUint64(
113✔
2434
                        startTimeBytes[:8], uint64(state.startTime.Unix()),
113✔
2435
                )
113✔
2436
                byteOrder.PutUint64(
113✔
2437
                        endTimeBytes[:8], uint64(state.endTime.Unix()),
113✔
2438
                )
113✔
2439

113✔
2440
                // If we have a last seen key (existing iteration), then that'll
113✔
2441
                // be our starting point. Otherwise, we'll seek to the start
113✔
2442
                // time.
113✔
2443
                var indexKey []byte
113✔
2444
                if state.lastSeenKey != nil {
172✔
2445
                        indexKey, _ = updateCursor.Seek(state.lastSeenKey)
59✔
2446

59✔
2447
                        if bytes.Equal(indexKey, state.lastSeenKey) {
118✔
2448
                                indexKey, _ = updateCursor.Next()
59✔
2449
                        }
59✔
2450
                } else {
54✔
2451
                        indexKey, _ = updateCursor.Seek(startTimeBytes[:])
54✔
2452
                }
54✔
2453

2454
                // Now we'll read items up to the batch size, exiting early if
2455
                // we exceed the ending time.
2456
                var lastProcessedKey []byte
113✔
2457
                for len(nodeBatch) < state.batchSize && indexKey != nil {
696✔
2458
                        // Extract the timestamp from the index key (first 8
583✔
2459
                        // bytes). Only compare timestamps, not the full key
583✔
2460
                        // with pubkey.
583✔
2461
                        keyTimestamp := byteOrder.Uint64(indexKey[:8])
583✔
2462
                        endTimestamp := uint64(state.endTime.Unix())
583✔
2463
                        if keyTimestamp > endTimestamp {
597✔
2464
                                break
14✔
2465
                        }
2466

2467
                        nodePub := indexKey[8:]
569✔
2468
                        node, err := fetchLightningNode(nodes, nodePub)
569✔
2469
                        if err != nil {
569✔
2470
                                return err
×
2471
                        }
×
2472

2473
                        if state.publicNodesOnly {
572✔
2474
                                nodeIsPublic, err := c.isPublic(
3✔
2475
                                        tx, node.PubKeyBytes, ourPubKey,
3✔
2476
                                )
3✔
2477
                                if err != nil {
3✔
2478
                                        return err
×
2479
                                }
×
2480
                                if !nodeIsPublic {
6✔
2481
                                        indexKey, _ = updateCursor.Next()
3✔
2482
                                        continue
3✔
2483
                                }
2484
                        }
2485

2486
                        nodeBatch = append(nodeBatch, node)
569✔
2487
                        state.total++
569✔
2488

569✔
2489
                        // Remember the last key we actually processed. We'll
569✔
2490
                        // use this to update the last seen key below.
569✔
2491
                        if lastProcessedKey == nil {
665✔
2492
                                lastProcessedKey = make([]byte, len(indexKey))
96✔
2493
                        }
96✔
2494
                        copy(lastProcessedKey, indexKey)
569✔
2495

569✔
2496
                        // Advance the iterator to the next entry.
569✔
2497
                        indexKey, _ = updateCursor.Next()
569✔
2498
                }
2499

2500
                // If we haven't yet crossed the endTime, then we still
2501
                // have more entries to deliver.
2502
                if indexKey != nil {
198✔
2503
                        keyTimestamp := byteOrder.Uint64(indexKey[:8])
85✔
2504
                        endTimestamp := uint64(state.endTime.Unix())
85✔
2505
                        if keyTimestamp <= endTimestamp {
150✔
2506
                                hasMore = true
65✔
2507
                        }
65✔
2508
                }
2509

2510
                // Update the cursor to the last key we actually processed.
2511
                if lastProcessedKey != nil {
209✔
2512
                        if state.lastSeenKey == nil {
133✔
2513
                                state.lastSeenKey = make(
37✔
2514
                                        []byte, len(lastProcessedKey),
37✔
2515
                                )
37✔
2516
                        }
37✔
2517
                        copy(state.lastSeenKey, lastProcessedKey)
96✔
2518
                }
2519

2520
                return nil
113✔
2521
        }, func() {
113✔
2522
                nodeBatch = nil
113✔
2523
        })
113✔
2524
        switch {
113✔
2525
        case errors.Is(err, ErrGraphNoEdgesFound):
×
2526
                fallthrough
×
2527
        case errors.Is(err, ErrGraphNodesNotFound):
×
2528
                break
×
2529

2530
        case err != nil:
×
2531
                return nil, false, err
×
2532
        }
2533

2534
        return nodeBatch, hasMore, nil
113✔
2535
}
2536

2537
// NodeUpdatesInHorizon returns all the known lightning node which have an
2538
// update timestamp within the passed range.
2539
func (c *KVStore) NodeUpdatesInHorizon(startTime,
2540
        endTime time.Time,
2541
        opts ...IteratorOption) iter.Seq2[models.Node, error] {
54✔
2542

54✔
2543
        cfg := defaultIteratorConfig()
54✔
2544
        for _, opt := range opts {
100✔
2545
                opt(cfg)
46✔
2546
        }
46✔
2547

2548
        return func(yield func(models.Node, error) bool) {
108✔
2549
                // Initialize iterator state.
54✔
2550
                state := newNodeUpdatesIterator(
54✔
2551
                        cfg.nodeUpdateIterBatchSize,
54✔
2552
                        startTime, endTime,
54✔
2553
                        cfg.iterPublicNodes,
54✔
2554
                )
54✔
2555

54✔
2556
                for {
167✔
2557
                        nodeAnns, hasMore, err := c.fetchNextNodeBatch(state)
113✔
2558
                        if err != nil {
113✔
2559
                                log.Errorf("unable to read node updates in "+
×
2560
                                        "horizon: %v", err)
×
2561

×
2562
                                yield(models.Node{}, err)
×
2563

×
2564
                                return
×
2565
                        }
×
2566

2567
                        for _, node := range nodeAnns {
637✔
2568
                                if !yield(node, nil) {
531✔
2569
                                        return
7✔
2570
                                }
7✔
2571
                        }
2572

2573
                        // If we we're done, then we can just break out here
2574
                        // now.
2575
                        if !hasMore || len(nodeAnns) == 0 {
153✔
2576
                                break
47✔
2577
                        }
2578
                }
2579
        }
2580
}
2581

2582
// FilterKnownChanIDs takes a set of channel IDs and return the subset of chan
2583
// ID's that we don't know and are not known zombies of the passed set. In other
2584
// words, we perform a set difference of our set of chan ID's and the ones
2585
// passed in. This method can be used by callers to determine the set of
2586
// channels another peer knows of that we don't. The ChannelUpdateInfos for the
2587
// known zombies is also returned.
2588
func (c *KVStore) FilterKnownChanIDs(chansInfo []ChannelUpdateInfo) ([]uint64,
2589
        []ChannelUpdateInfo, error) {
126✔
2590

126✔
2591
        var (
126✔
2592
                newChanIDs   []uint64
126✔
2593
                knownZombies []ChannelUpdateInfo
126✔
2594
        )
126✔
2595

126✔
2596
        c.cacheMu.Lock()
126✔
2597
        defer c.cacheMu.Unlock()
126✔
2598

126✔
2599
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
252✔
2600
                edges := tx.ReadBucket(edgeBucket)
126✔
2601
                if edges == nil {
126✔
2602
                        return ErrGraphNoEdgesFound
×
2603
                }
×
2604
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
126✔
2605
                if edgeIndex == nil {
126✔
2606
                        return ErrGraphNoEdgesFound
×
2607
                }
×
2608

2609
                // Fetch the zombie index, it may not exist if no edges have
2610
                // ever been marked as zombies. If the index has been
2611
                // initialized, we will use it later to skip known zombie edges.
2612
                zombieIndex := edges.NestedReadBucket(zombieBucket)
126✔
2613

126✔
2614
                // We'll run through the set of chanIDs and collate only the
126✔
2615
                // set of channel that are unable to be found within our db.
126✔
2616
                var cidBytes [8]byte
126✔
2617
                for _, info := range chansInfo {
235✔
2618
                        scid := info.ShortChannelID.ToUint64()
109✔
2619
                        byteOrder.PutUint64(cidBytes[:], scid)
109✔
2620

109✔
2621
                        // If the edge is already known, skip it.
109✔
2622
                        if v := edgeIndex.Get(cidBytes[:]); v != nil {
131✔
2623
                                continue
22✔
2624
                        }
2625

2626
                        // If the edge is a known zombie, skip it.
2627
                        if zombieIndex != nil {
180✔
2628
                                isZombie, _, _ := isZombieEdge(
90✔
2629
                                        zombieIndex, scid,
90✔
2630
                                )
90✔
2631

90✔
2632
                                if isZombie {
134✔
2633
                                        knownZombies = append(
44✔
2634
                                                knownZombies, info,
44✔
2635
                                        )
44✔
2636

44✔
2637
                                        continue
44✔
2638
                                }
2639
                        }
2640

2641
                        newChanIDs = append(newChanIDs, scid)
46✔
2642
                }
2643

2644
                return nil
126✔
2645
        }, func() {
126✔
2646
                newChanIDs = nil
126✔
2647
                knownZombies = nil
126✔
2648
        })
126✔
2649
        switch {
126✔
2650
        // If we don't know of any edges yet, then we'll return the entire set
2651
        // of chan IDs specified.
2652
        case errors.Is(err, ErrGraphNoEdgesFound):
×
2653
                ogChanIDs := make([]uint64, len(chansInfo))
×
2654
                for i, info := range chansInfo {
×
2655
                        ogChanIDs[i] = info.ShortChannelID.ToUint64()
×
2656
                }
×
2657

2658
                return ogChanIDs, nil, nil
×
2659

2660
        case err != nil:
×
2661
                return nil, nil, err
×
2662
        }
2663

2664
        return newChanIDs, knownZombies, nil
126✔
2665
}
2666

2667
// ChannelUpdateInfo couples the SCID of a channel with the timestamps of the
2668
// latest received channel updates for the channel.
2669
type ChannelUpdateInfo struct {
2670
        // ShortChannelID is the SCID identifier of the channel.
2671
        ShortChannelID lnwire.ShortChannelID
2672

2673
        // Node1UpdateTimestamp is the timestamp of the latest received update
2674
        // from the node 1 channel peer. This will be set to zero time if no
2675
        // update has yet been received from this node.
2676
        Node1UpdateTimestamp time.Time
2677

2678
        // Node2UpdateTimestamp is the timestamp of the latest received update
2679
        // from the node 2 channel peer. This will be set to zero time if no
2680
        // update has yet been received from this node.
2681
        Node2UpdateTimestamp time.Time
2682
}
2683

2684
// NewChannelUpdateInfo is a constructor which makes sure we initialize the
2685
// timestamps with zero seconds unix timestamp which equals
2686
// `January 1, 1970, 00:00:00 UTC` in case the value is `time.Time{}`.
2687
func NewChannelUpdateInfo(scid lnwire.ShortChannelID, node1Timestamp,
2688
        node2Timestamp time.Time) ChannelUpdateInfo {
199✔
2689

199✔
2690
        chanInfo := ChannelUpdateInfo{
199✔
2691
                ShortChannelID:       scid,
199✔
2692
                Node1UpdateTimestamp: node1Timestamp,
199✔
2693
                Node2UpdateTimestamp: node2Timestamp,
199✔
2694
        }
199✔
2695

199✔
2696
        if node1Timestamp.IsZero() {
388✔
2697
                chanInfo.Node1UpdateTimestamp = time.Unix(0, 0)
189✔
2698
        }
189✔
2699

2700
        if node2Timestamp.IsZero() {
388✔
2701
                chanInfo.Node2UpdateTimestamp = time.Unix(0, 0)
189✔
2702
        }
189✔
2703

2704
        return chanInfo
199✔
2705
}
2706

2707
// BlockChannelRange represents a range of channels for a given block height.
2708
type BlockChannelRange struct {
2709
        // Height is the height of the block all of the channels below were
2710
        // included in.
2711
        Height uint32
2712

2713
        // Channels is the list of channels identified by their short ID
2714
        // representation known to us that were included in the block height
2715
        // above. The list may include channel update timestamp information if
2716
        // requested.
2717
        Channels []ChannelUpdateInfo
2718
}
2719

2720
// FilterChannelRange returns the channel ID's of all known channels which were
2721
// mined in a block height within the passed range. The channel IDs are grouped
2722
// by their common block height. This method can be used to quickly share with a
2723
// peer the set of channels we know of within a particular range to catch them
2724
// up after a period of time offline. If withTimestamps is true then the
2725
// timestamp info of the latest received channel update messages of the channel
2726
// will be included in the response.
2727
func (c *KVStore) FilterChannelRange(startHeight,
2728
        endHeight uint32, withTimestamps bool) ([]BlockChannelRange, error) {
14✔
2729

14✔
2730
        startChanID := &lnwire.ShortChannelID{
14✔
2731
                BlockHeight: startHeight,
14✔
2732
        }
14✔
2733

14✔
2734
        endChanID := lnwire.ShortChannelID{
14✔
2735
                BlockHeight: endHeight,
14✔
2736
                TxIndex:     math.MaxUint32 & 0x00ffffff,
14✔
2737
                TxPosition:  math.MaxUint16,
14✔
2738
        }
14✔
2739

14✔
2740
        // As we need to perform a range scan, we'll convert the starting and
14✔
2741
        // ending height to their corresponding values when encoded using short
14✔
2742
        // channel ID's.
14✔
2743
        var chanIDStart, chanIDEnd [8]byte
14✔
2744
        byteOrder.PutUint64(chanIDStart[:], startChanID.ToUint64())
14✔
2745
        byteOrder.PutUint64(chanIDEnd[:], endChanID.ToUint64())
14✔
2746

14✔
2747
        var channelsPerBlock map[uint32][]ChannelUpdateInfo
14✔
2748
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
28✔
2749
                edges := tx.ReadBucket(edgeBucket)
14✔
2750
                if edges == nil {
14✔
2751
                        return ErrGraphNoEdgesFound
×
2752
                }
×
2753
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
14✔
2754
                if edgeIndex == nil {
14✔
2755
                        return ErrGraphNoEdgesFound
×
2756
                }
×
2757

2758
                cursor := edgeIndex.ReadCursor()
14✔
2759

14✔
2760
                // We'll now iterate through the database, and find each
14✔
2761
                // channel ID that resides within the specified range.
14✔
2762
                //
14✔
2763
                //nolint:ll
14✔
2764
                for k, v := cursor.Seek(chanIDStart[:]); k != nil &&
14✔
2765
                        bytes.Compare(k, chanIDEnd[:]) <= 0; k, v = cursor.Next() {
61✔
2766
                        // Don't send alias SCIDs during gossip sync.
47✔
2767
                        edgeReader := bytes.NewReader(v)
47✔
2768
                        edgeInfo, err := deserializeChanEdgeInfo(edgeReader)
47✔
2769
                        if err != nil {
47✔
2770
                                return err
×
2771
                        }
×
2772

2773
                        if edgeInfo.AuthProof == nil {
50✔
2774
                                continue
3✔
2775
                        }
2776

2777
                        // This channel ID rests within the target range, so
2778
                        // we'll add it to our returned set.
2779
                        rawCid := byteOrder.Uint64(k)
47✔
2780
                        cid := lnwire.NewShortChanIDFromInt(rawCid)
47✔
2781

47✔
2782
                        chanInfo := NewChannelUpdateInfo(
47✔
2783
                                cid, time.Time{}, time.Time{},
47✔
2784
                        )
47✔
2785

47✔
2786
                        if !withTimestamps {
69✔
2787
                                channelsPerBlock[cid.BlockHeight] = append(
22✔
2788
                                        channelsPerBlock[cid.BlockHeight],
22✔
2789
                                        chanInfo,
22✔
2790
                                )
22✔
2791

22✔
2792
                                continue
22✔
2793
                        }
2794

2795
                        node1Key, node2Key := computeEdgePolicyKeys(&edgeInfo)
25✔
2796

25✔
2797
                        rawPolicy := edges.Get(node1Key)
25✔
2798
                        if len(rawPolicy) != 0 {
34✔
2799
                                r := bytes.NewReader(rawPolicy)
9✔
2800

9✔
2801
                                edge, err := deserializeChanEdgePolicyRaw(r)
9✔
2802
                                if err != nil && !errors.Is(
9✔
2803
                                        err, ErrEdgePolicyOptionalFieldNotFound,
9✔
2804
                                ) && !errors.Is(err, ErrParsingExtraTLVBytes) {
9✔
2805

×
2806
                                        return err
×
2807
                                }
×
2808

2809
                                chanInfo.Node1UpdateTimestamp = edge.LastUpdate
9✔
2810
                        }
2811

2812
                        rawPolicy = edges.Get(node2Key)
25✔
2813
                        if len(rawPolicy) != 0 {
39✔
2814
                                r := bytes.NewReader(rawPolicy)
14✔
2815

14✔
2816
                                edge, err := deserializeChanEdgePolicyRaw(r)
14✔
2817
                                if err != nil && !errors.Is(
14✔
2818
                                        err, ErrEdgePolicyOptionalFieldNotFound,
14✔
2819
                                ) && !errors.Is(err, ErrParsingExtraTLVBytes) {
14✔
2820

×
2821
                                        return err
×
2822
                                }
×
2823

2824
                                chanInfo.Node2UpdateTimestamp = edge.LastUpdate
14✔
2825
                        }
2826

2827
                        channelsPerBlock[cid.BlockHeight] = append(
25✔
2828
                                channelsPerBlock[cid.BlockHeight], chanInfo,
25✔
2829
                        )
25✔
2830
                }
2831

2832
                return nil
14✔
2833
        }, func() {
14✔
2834
                channelsPerBlock = make(map[uint32][]ChannelUpdateInfo)
14✔
2835
        })
14✔
2836

2837
        switch {
14✔
2838
        // If we don't know of any channels yet, then there's nothing to
2839
        // filter, so we'll return an empty slice.
2840
        case errors.Is(err, ErrGraphNoEdgesFound) || len(channelsPerBlock) == 0:
6✔
2841
                return nil, nil
6✔
2842

2843
        case err != nil:
×
2844
                return nil, err
×
2845
        }
2846

2847
        // Return the channel ranges in ascending block height order.
2848
        blocks := make([]uint32, 0, len(channelsPerBlock))
11✔
2849
        for block := range channelsPerBlock {
36✔
2850
                blocks = append(blocks, block)
25✔
2851
        }
25✔
2852
        sort.Slice(blocks, func(i, j int) bool {
38✔
2853
                return blocks[i] < blocks[j]
27✔
2854
        })
27✔
2855

2856
        channelRanges := make([]BlockChannelRange, 0, len(channelsPerBlock))
11✔
2857
        for _, block := range blocks {
36✔
2858
                channelRanges = append(channelRanges, BlockChannelRange{
25✔
2859
                        Height:   block,
25✔
2860
                        Channels: channelsPerBlock[block],
25✔
2861
                })
25✔
2862
        }
25✔
2863

2864
        return channelRanges, nil
11✔
2865
}
2866

2867
// FetchChanInfos returns the set of channel edges that correspond to the passed
2868
// channel ID's. If an edge is the query is unknown to the database, it will
2869
// skipped and the result will contain only those edges that exist at the time
2870
// of the query. This can be used to respond to peer queries that are seeking to
2871
// fill in gaps in their view of the channel graph.
2872
func (c *KVStore) FetchChanInfos(chanIDs []uint64) ([]ChannelEdge, error) {
7✔
2873
        return c.fetchChanInfos(nil, chanIDs)
7✔
2874
}
7✔
2875

2876
// fetchChanInfos returns the set of channel edges that correspond to the passed
2877
// channel ID's. If an edge is the query is unknown to the database, it will
2878
// skipped and the result will contain only those edges that exist at the time
2879
// of the query. This can be used to respond to peer queries that are seeking to
2880
// fill in gaps in their view of the channel graph.
2881
//
2882
// NOTE: An optional transaction may be provided. If none is provided, then a
2883
// new one will be created.
2884
func (c *KVStore) fetchChanInfos(tx kvdb.RTx, chanIDs []uint64) (
2885
        []ChannelEdge, error) {
7✔
2886
        // TODO(roasbeef): sort cids?
7✔
2887

7✔
2888
        var (
7✔
2889
                chanEdges []ChannelEdge
7✔
2890
                cidBytes  [8]byte
7✔
2891
        )
7✔
2892

7✔
2893
        fetchChanInfos := func(tx kvdb.RTx) error {
14✔
2894
                edges := tx.ReadBucket(edgeBucket)
7✔
2895
                if edges == nil {
7✔
2896
                        return ErrGraphNoEdgesFound
×
2897
                }
×
2898
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
7✔
2899
                if edgeIndex == nil {
7✔
2900
                        return ErrGraphNoEdgesFound
×
2901
                }
×
2902
                nodes := tx.ReadBucket(nodeBucket)
7✔
2903
                if nodes == nil {
7✔
2904
                        return ErrGraphNotFound
×
2905
                }
×
2906

2907
                for _, cid := range chanIDs {
21✔
2908
                        byteOrder.PutUint64(cidBytes[:], cid)
14✔
2909

14✔
2910
                        // First, we'll fetch the static edge information. If
14✔
2911
                        // the edge is unknown, we will skip the edge and
14✔
2912
                        // continue gathering all known edges.
14✔
2913
                        edgeInfo, err := fetchChanEdgeInfo(
14✔
2914
                                edgeIndex, cidBytes[:],
14✔
2915
                        )
14✔
2916
                        switch {
14✔
2917
                        case errors.Is(err, ErrEdgeNotFound):
3✔
2918
                                continue
3✔
2919
                        case err != nil:
×
2920
                                return err
×
2921
                        }
2922

2923
                        // With the static information obtained, we'll now
2924
                        // fetch the dynamic policy info.
2925
                        edge1, edge2, err := fetchChanEdgePolicies(
11✔
2926
                                edgeIndex, edges, cidBytes[:],
11✔
2927
                        )
11✔
2928
                        if err != nil {
11✔
2929
                                return err
×
2930
                        }
×
2931

2932
                        node1, err := fetchLightningNode(
11✔
2933
                                nodes, edgeInfo.NodeKey1Bytes[:],
11✔
2934
                        )
11✔
2935
                        if err != nil {
11✔
2936
                                return err
×
2937
                        }
×
2938

2939
                        node2, err := fetchLightningNode(
11✔
2940
                                nodes, edgeInfo.NodeKey2Bytes[:],
11✔
2941
                        )
11✔
2942
                        if err != nil {
11✔
2943
                                return err
×
2944
                        }
×
2945

2946
                        chanEdges = append(chanEdges, ChannelEdge{
11✔
2947
                                Info:    &edgeInfo,
11✔
2948
                                Policy1: edge1,
11✔
2949
                                Policy2: edge2,
11✔
2950
                                Node1:   &node1,
11✔
2951
                                Node2:   &node2,
11✔
2952
                        })
11✔
2953
                }
2954

2955
                return nil
7✔
2956
        }
2957

2958
        if tx == nil {
14✔
2959
                err := kvdb.View(c.db, fetchChanInfos, func() {
14✔
2960
                        chanEdges = nil
7✔
2961
                })
7✔
2962
                if err != nil {
7✔
2963
                        return nil, err
×
2964
                }
×
2965

2966
                return chanEdges, nil
7✔
2967
        }
2968

2969
        err := fetchChanInfos(tx)
×
2970
        if err != nil {
×
2971
                return nil, err
×
2972
        }
×
2973

2974
        return chanEdges, nil
×
2975
}
2976

2977
func delEdgeUpdateIndexEntry(edgesBucket kvdb.RwBucket, chanID uint64,
2978
        edge1, edge2 *models.ChannelEdgePolicy) error {
139✔
2979

139✔
2980
        // First, we'll fetch the edge update index bucket which currently
139✔
2981
        // stores an entry for the channel we're about to delete.
139✔
2982
        updateIndex := edgesBucket.NestedReadWriteBucket(edgeUpdateIndexBucket)
139✔
2983
        if updateIndex == nil {
139✔
2984
                // No edges in bucket, return early.
×
2985
                return nil
×
2986
        }
×
2987

2988
        // Now that we have the bucket, we'll attempt to construct a template
2989
        // for the index key: updateTime || chanid.
2990
        var indexKey [8 + 8]byte
139✔
2991
        byteOrder.PutUint64(indexKey[8:], chanID)
139✔
2992

139✔
2993
        // With the template constructed, we'll attempt to delete an entry that
139✔
2994
        // would have been created by both edges: we'll alternate the update
139✔
2995
        // times, as one may had overridden the other.
139✔
2996
        if edge1 != nil {
152✔
2997
                byteOrder.PutUint64(
13✔
2998
                        indexKey[:8], uint64(edge1.LastUpdate.Unix()),
13✔
2999
                )
13✔
3000
                if err := updateIndex.Delete(indexKey[:]); err != nil {
13✔
3001
                        return err
×
3002
                }
×
3003
        }
3004

3005
        // We'll also attempt to delete the entry that may have been created by
3006
        // the second edge.
3007
        if edge2 != nil {
154✔
3008
                byteOrder.PutUint64(
15✔
3009
                        indexKey[:8], uint64(edge2.LastUpdate.Unix()),
15✔
3010
                )
15✔
3011
                if err := updateIndex.Delete(indexKey[:]); err != nil {
15✔
3012
                        return err
×
3013
                }
×
3014
        }
3015

3016
        return nil
139✔
3017
}
3018

3019
// delChannelEdgeUnsafe deletes the edge with the given chanID from the graph
3020
// cache. It then goes on to delete any policy info and edge info for this
3021
// channel from the DB and finally, if isZombie is true, it will add an entry
3022
// for this channel in the zombie index.
3023
//
3024
// NOTE: this method MUST only be called if the cacheMu has already been
3025
// acquired.
3026
func (c *KVStore) delChannelEdgeUnsafe(edges, edgeIndex, chanIndex,
3027
        zombieIndex kvdb.RwBucket, chanID []byte, isZombie,
3028
        strictZombie bool) (*models.ChannelEdgeInfo, error) {
201✔
3029

201✔
3030
        edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
201✔
3031
        if err != nil {
263✔
3032
                return nil, err
62✔
3033
        }
62✔
3034

3035
        // We'll also remove the entry in the edge update index bucket before
3036
        // we delete the edges themselves so we can access their last update
3037
        // times.
3038
        cid := byteOrder.Uint64(chanID)
139✔
3039
        edge1, edge2, err := fetchChanEdgePolicies(edgeIndex, edges, chanID)
139✔
3040
        if err != nil {
139✔
3041
                return nil, err
×
3042
        }
×
3043
        err = delEdgeUpdateIndexEntry(edges, cid, edge1, edge2)
139✔
3044
        if err != nil {
139✔
3045
                return nil, err
×
3046
        }
×
3047

3048
        // The edge key is of the format pubKey || chanID. First we construct
3049
        // the latter half, populating the channel ID.
3050
        var edgeKey [33 + 8]byte
139✔
3051
        copy(edgeKey[33:], chanID)
139✔
3052

139✔
3053
        // With the latter half constructed, copy over the first public key to
139✔
3054
        // delete the edge in this direction, then the second to delete the
139✔
3055
        // edge in the opposite direction.
139✔
3056
        copy(edgeKey[:33], edgeInfo.NodeKey1Bytes[:])
139✔
3057
        if edges.Get(edgeKey[:]) != nil {
278✔
3058
                if err := edges.Delete(edgeKey[:]); err != nil {
139✔
3059
                        return nil, err
×
3060
                }
×
3061
        }
3062
        copy(edgeKey[:33], edgeInfo.NodeKey2Bytes[:])
139✔
3063
        if edges.Get(edgeKey[:]) != nil {
278✔
3064
                if err := edges.Delete(edgeKey[:]); err != nil {
139✔
3065
                        return nil, err
×
3066
                }
×
3067
        }
3068

3069
        // As part of deleting the edge we also remove all disabled entries
3070
        // from the edgePolicyDisabledIndex bucket. We do that for both
3071
        // directions.
3072
        err = updateEdgePolicyDisabledIndex(edges, cid, false, false)
139✔
3073
        if err != nil {
139✔
3074
                return nil, err
×
3075
        }
×
3076
        err = updateEdgePolicyDisabledIndex(edges, cid, true, false)
139✔
3077
        if err != nil {
139✔
3078
                return nil, err
×
3079
        }
×
3080

3081
        // With the edge data deleted, we can purge the information from the two
3082
        // edge indexes.
3083
        if err := edgeIndex.Delete(chanID); err != nil {
139✔
3084
                return nil, err
×
3085
        }
×
3086
        var b bytes.Buffer
139✔
3087
        if err := WriteOutpoint(&b, &edgeInfo.ChannelPoint); err != nil {
139✔
3088
                return nil, err
×
3089
        }
×
3090
        if err := chanIndex.Delete(b.Bytes()); err != nil {
139✔
3091
                return nil, err
×
3092
        }
×
3093

3094
        // Finally, we'll mark the edge as a zombie within our index if it's
3095
        // being removed due to the channel becoming a zombie. We do this to
3096
        // ensure we don't store unnecessary data for spent channels.
3097
        if !isZombie {
255✔
3098
                return &edgeInfo, nil
116✔
3099
        }
116✔
3100

3101
        nodeKey1, nodeKey2 := edgeInfo.NodeKey1Bytes, edgeInfo.NodeKey2Bytes
26✔
3102
        if strictZombie {
29✔
3103
                var e1UpdateTime, e2UpdateTime *time.Time
3✔
3104
                if edge1 != nil {
5✔
3105
                        e1UpdateTime = &edge1.LastUpdate
2✔
3106
                }
2✔
3107
                if edge2 != nil {
6✔
3108
                        e2UpdateTime = &edge2.LastUpdate
3✔
3109
                }
3✔
3110

3111
                nodeKey1, nodeKey2 = makeZombiePubkeys(
3✔
3112
                        edgeInfo.NodeKey1Bytes, edgeInfo.NodeKey2Bytes,
3✔
3113
                        e1UpdateTime, e2UpdateTime,
3✔
3114
                )
3✔
3115
        }
3116

3117
        return &edgeInfo, markEdgeZombie(
26✔
3118
                zombieIndex, byteOrder.Uint64(chanID), nodeKey1, nodeKey2,
26✔
3119
        )
26✔
3120
}
3121

3122
// makeZombiePubkeys derives the node pubkeys to store in the zombie index for a
3123
// particular pair of channel policies. The return values are one of:
3124
//  1. (pubkey1, pubkey2)
3125
//  2. (pubkey1, blank)
3126
//  3. (blank, pubkey2)
3127
//
3128
// A blank pubkey means that corresponding node will be unable to resurrect a
3129
// channel on its own. For example, node1 may continue to publish recent
3130
// updates, but node2 has fallen way behind. After marking an edge as a zombie,
3131
// we don't want another fresh update from node1 to resurrect, as the edge can
3132
// only become live once node2 finally sends something recent.
3133
//
3134
// In the case where we have neither update, we allow either party to resurrect
3135
// the channel. If the channel were to be marked zombie again, it would be
3136
// marked with the correct lagging channel since we received an update from only
3137
// one side.
3138
func makeZombiePubkeys(node1, node2 [33]byte, e1, e2 *time.Time) ([33]byte,
3139
        [33]byte) {
3✔
3140

3✔
3141
        switch {
3✔
3142
        // If we don't have either edge policy, we'll return both pubkeys so
3143
        // that the channel can be resurrected by either party.
UNCOV
3144
        case e1 == nil && e2 == nil:
×
UNCOV
3145
                return node1, node2
×
3146

3147
        // If we're missing edge1, or if both edges are present but edge1 is
3148
        // older, we'll return edge1's pubkey and a blank pubkey for edge2. This
3149
        // means that only an update from edge1 will be able to resurrect the
3150
        // channel.
3151
        case e1 == nil || (e2 != nil && e1.Before(*e2)):
1✔
3152
                return node1, [33]byte{}
1✔
3153

3154
        // Otherwise, we're missing edge2 or edge2 is the older side, so we
3155
        // return a blank pubkey for edge1. In this case, only an update from
3156
        // edge2 can resurect the channel.
3157
        default:
2✔
3158
                return [33]byte{}, node1
2✔
3159
        }
3160
}
3161

3162
// UpdateEdgePolicy updates the edge routing policy for a single directed edge
3163
// within the database for the referenced channel. The `flags` attribute within
3164
// the ChannelEdgePolicy determines which of the directed edges are being
3165
// updated. If the flag is 1, then the first node's information is being
3166
// updated, otherwise it's the second node's information. The node ordering is
3167
// determined by the lexicographical ordering of the identity public keys of the
3168
// nodes on either side of the channel.
3169
func (c *KVStore) UpdateEdgePolicy(ctx context.Context,
3170
        edge *models.ChannelEdgePolicy,
3171
        opts ...batch.SchedulerOption) (route.Vertex, route.Vertex, error) {
2,875✔
3172

2,875✔
3173
        var (
2,875✔
3174
                isUpdate1    bool
2,875✔
3175
                edgeNotFound bool
2,875✔
3176
                from, to     route.Vertex
2,875✔
3177
        )
2,875✔
3178

2,875✔
3179
        r := &batch.Request[kvdb.RwTx]{
2,875✔
3180
                Opts: batch.NewSchedulerOptions(opts...),
2,875✔
3181
                Reset: func() {
5,751✔
3182
                        isUpdate1 = false
2,876✔
3183
                        edgeNotFound = false
2,876✔
3184
                },
2,876✔
3185
                Do: func(tx kvdb.RwTx) error {
2,876✔
3186
                        // Validate that the ExtraOpaqueData is in fact a valid
2,876✔
3187
                        // TLV stream. This is done here instead of within
2,876✔
3188
                        // updateEdgePolicy so that updateEdgePolicy can be used
2,876✔
3189
                        // by unit tests to recreate the case where we already
2,876✔
3190
                        // have nodes persisted with invalid TLV data.
2,876✔
3191
                        err := edge.ExtraOpaqueData.ValidateTLV()
2,876✔
3192
                        if err != nil {
2,878✔
3193
                                return fmt.Errorf("%w: %w",
2✔
3194
                                        ErrParsingExtraTLVBytes, err)
2✔
3195
                        }
2✔
3196

3197
                        from, to, isUpdate1, err = updateEdgePolicy(tx, edge)
2,874✔
3198
                        if err != nil {
2,878✔
3199
                                log.Errorf("UpdateEdgePolicy faild: %v", err)
4✔
3200
                        }
4✔
3201

3202
                        // Silence ErrEdgeNotFound so that the batch can
3203
                        // succeed, but propagate the error via local state.
3204
                        if errors.Is(err, ErrEdgeNotFound) {
2,878✔
3205
                                edgeNotFound = true
4✔
3206
                                return nil
4✔
3207
                        }
4✔
3208

3209
                        return err
2,870✔
3210
                },
3211
                OnCommit: func(err error) error {
2,875✔
3212
                        switch {
2,875✔
3213
                        case err != nil:
1✔
3214
                                return err
1✔
3215
                        case edgeNotFound:
4✔
3216
                                return ErrEdgeNotFound
4✔
3217
                        default:
2,870✔
3218
                                c.updateEdgeCache(edge, isUpdate1)
2,870✔
3219
                                return nil
2,870✔
3220
                        }
3221
                },
3222
        }
3223

3224
        err := c.chanScheduler.Execute(ctx, r)
2,875✔
3225

2,875✔
3226
        return from, to, err
2,875✔
3227
}
3228

3229
func (c *KVStore) updateEdgeCache(e *models.ChannelEdgePolicy,
3230
        isUpdate1 bool) {
2,870✔
3231

2,870✔
3232
        // If an entry for this channel is found in reject cache, we'll modify
2,870✔
3233
        // the entry with the updated timestamp for the direction that was just
2,870✔
3234
        // written. If the edge doesn't exist, we'll load the cache entry lazily
2,870✔
3235
        // during the next query for this edge.
2,870✔
3236
        if entry, ok := c.rejectCache.get(e.ChannelID); ok {
2,878✔
3237
                if isUpdate1 {
14✔
3238
                        entry.upd1Time = e.LastUpdate.Unix()
6✔
3239
                } else {
11✔
3240
                        entry.upd2Time = e.LastUpdate.Unix()
5✔
3241
                }
5✔
3242
                c.rejectCache.insert(e.ChannelID, entry)
8✔
3243
        }
3244

3245
        // If an entry for this channel is found in channel cache, we'll modify
3246
        // the entry with the updated policy for the direction that was just
3247
        // written. If the edge doesn't exist, we'll defer loading the info and
3248
        // policies and lazily read from disk during the next query.
3249
        if channel, ok := c.chanCache.get(e.ChannelID); ok {
2,873✔
3250
                if isUpdate1 {
6✔
3251
                        channel.Policy1 = e
3✔
3252
                } else {
6✔
3253
                        channel.Policy2 = e
3✔
3254
                }
3✔
3255
                c.chanCache.insert(e.ChannelID, channel)
3✔
3256
        }
3257
}
3258

3259
// updateEdgePolicy attempts to update an edge's policy within the relevant
3260
// buckets using an existing database transaction. The returned boolean will be
3261
// true if the updated policy belongs to node1, and false if the policy belonged
3262
// to node2.
3263
func updateEdgePolicy(tx kvdb.RwTx, edge *models.ChannelEdgePolicy) (
3264
        route.Vertex, route.Vertex, bool, error) {
2,874✔
3265

2,874✔
3266
        var noVertex route.Vertex
2,874✔
3267

2,874✔
3268
        edges := tx.ReadWriteBucket(edgeBucket)
2,874✔
3269
        if edges == nil {
2,874✔
3270
                return noVertex, noVertex, false, ErrEdgeNotFound
×
3271
        }
×
3272
        edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
2,874✔
3273
        if edgeIndex == nil {
2,874✔
3274
                return noVertex, noVertex, false, ErrEdgeNotFound
×
3275
        }
×
3276

3277
        // Create the channelID key be converting the channel ID
3278
        // integer into a byte slice.
3279
        var chanID [8]byte
2,874✔
3280
        byteOrder.PutUint64(chanID[:], edge.ChannelID)
2,874✔
3281

2,874✔
3282
        // With the channel ID, we then fetch the value storing the two
2,874✔
3283
        // nodes which connect this channel edge.
2,874✔
3284
        nodeInfo := edgeIndex.Get(chanID[:])
2,874✔
3285
        if nodeInfo == nil {
2,878✔
3286
                return noVertex, noVertex, false, ErrEdgeNotFound
4✔
3287
        }
4✔
3288

3289
        // Depending on the flags value passed above, either the first
3290
        // or second edge policy is being updated.
3291
        var fromNode, toNode []byte
2,870✔
3292
        var isUpdate1 bool
2,870✔
3293
        if edge.ChannelFlags&lnwire.ChanUpdateDirection == 0 {
4,310✔
3294
                fromNode = nodeInfo[:33]
1,440✔
3295
                toNode = nodeInfo[33:66]
1,440✔
3296
                isUpdate1 = true
1,440✔
3297
        } else {
2,873✔
3298
                fromNode = nodeInfo[33:66]
1,433✔
3299
                toNode = nodeInfo[:33]
1,433✔
3300
                isUpdate1 = false
1,433✔
3301
        }
1,433✔
3302

3303
        // Finally, with the direction of the edge being updated
3304
        // identified, we update the on-disk edge representation.
3305
        err := putChanEdgePolicy(edges, edge, fromNode, toNode)
2,870✔
3306
        if err != nil {
2,870✔
3307
                return noVertex, noVertex, false, err
×
3308
        }
×
3309

3310
        var (
2,870✔
3311
                fromNodePubKey route.Vertex
2,870✔
3312
                toNodePubKey   route.Vertex
2,870✔
3313
        )
2,870✔
3314
        copy(fromNodePubKey[:], fromNode)
2,870✔
3315
        copy(toNodePubKey[:], toNode)
2,870✔
3316

2,870✔
3317
        return fromNodePubKey, toNodePubKey, isUpdate1, nil
2,870✔
3318
}
3319

3320
// isPublic determines whether the node is seen as public within the graph from
3321
// the source node's point of view. An existing database transaction can also be
3322
// specified.
3323
func (c *KVStore) isPublic(tx kvdb.RTx, nodePub route.Vertex,
3324
        sourcePubKey []byte) (bool, error) {
16✔
3325

16✔
3326
        // In order to determine whether this node is publicly advertised within
16✔
3327
        // the graph, we'll need to look at all of its edges and check whether
16✔
3328
        // they extend to any other node than the source node. errDone will be
16✔
3329
        // used to terminate the check early.
16✔
3330
        nodeIsPublic := false
16✔
3331
        errDone := errors.New("done")
16✔
3332
        err := c.forEachNodeChannelTx(tx, nodePub, func(tx kvdb.RTx,
16✔
3333
                info *models.ChannelEdgeInfo, _ *models.ChannelEdgePolicy,
16✔
3334
                _ *models.ChannelEdgePolicy) error {
29✔
3335

13✔
3336
                // If this edge doesn't extend to the source node, we'll
13✔
3337
                // terminate our search as we can now conclude that the node is
13✔
3338
                // publicly advertised within the graph due to the local node
13✔
3339
                // knowing of the current edge.
13✔
3340
                if !bytes.Equal(info.NodeKey1Bytes[:], sourcePubKey) &&
13✔
3341
                        !bytes.Equal(info.NodeKey2Bytes[:], sourcePubKey) {
19✔
3342

6✔
3343
                        nodeIsPublic = true
6✔
3344
                        return errDone
6✔
3345
                }
6✔
3346

3347
                // Since the edge _does_ extend to the source node, we'll also
3348
                // need to ensure that this is a public edge.
3349
                if info.AuthProof != nil {
19✔
3350
                        nodeIsPublic = true
9✔
3351
                        return errDone
9✔
3352
                }
9✔
3353

3354
                // Otherwise, we'll continue our search.
3355
                return nil
4✔
3356
        }, func() {
×
3357
                nodeIsPublic = false
×
3358
        })
×
3359
        if err != nil && !errors.Is(err, errDone) {
16✔
3360
                return false, err
×
3361
        }
×
3362

3363
        return nodeIsPublic, nil
16✔
3364
}
3365

3366
// fetchNodeTx attempts to look up a target node by its identity
3367
// public key. If the node isn't found in the database, then
3368
// ErrGraphNodeNotFound is returned. An optional transaction may be provided.
3369
// If none is provided, then a new one will be created.
3370
func (c *KVStore) fetchNodeTx(tx kvdb.RTx, nodePub route.Vertex) (*models.Node,
3371
        error) {
3,654✔
3372

3,654✔
3373
        return c.fetchLightningNode(tx, nodePub)
3,654✔
3374
}
3,654✔
3375

3376
// FetchNode attempts to look up a target node by its identity public
3377
// key. If the node isn't found in the database, then ErrGraphNodeNotFound is
3378
// returned.
3379
func (c *KVStore) FetchNode(_ context.Context,
3380
        nodePub route.Vertex) (*models.Node, error) {
162✔
3381

162✔
3382
        return c.fetchLightningNode(nil, nodePub)
162✔
3383
}
162✔
3384

3385
// fetchLightningNode attempts to look up a target node by its identity public
3386
// key. If the node isn't found in the database, then ErrGraphNodeNotFound is
3387
// returned. An optional transaction may be provided. If none is provided, then
3388
// a new one will be created.
3389
func (c *KVStore) fetchLightningNode(tx kvdb.RTx,
3390
        nodePub route.Vertex) (*models.Node, error) {
3,813✔
3391

3,813✔
3392
        var node *models.Node
3,813✔
3393
        fetch := func(tx kvdb.RTx) error {
7,626✔
3394
                // First grab the nodes bucket which stores the mapping from
3,813✔
3395
                // pubKey to node information.
3,813✔
3396
                nodes := tx.ReadBucket(nodeBucket)
3,813✔
3397
                if nodes == nil {
3,813✔
3398
                        return ErrGraphNotFound
×
3399
                }
×
3400

3401
                // If a key for this serialized public key isn't found, then
3402
                // the target node doesn't exist within the database.
3403
                nodeBytes := nodes.Get(nodePub[:])
3,813✔
3404
                if nodeBytes == nil {
3,831✔
3405
                        return ErrGraphNodeNotFound
18✔
3406
                }
18✔
3407

3408
                // If the node is found, then we can de deserialize the node
3409
                // information to return to the user.
3410
                nodeReader := bytes.NewReader(nodeBytes)
3,798✔
3411
                n, err := deserializeLightningNode(nodeReader)
3,798✔
3412
                if err != nil {
3,798✔
3413
                        return err
×
3414
                }
×
3415

3416
                node = &n
3,798✔
3417

3,798✔
3418
                return nil
3,798✔
3419
        }
3420

3421
        if tx == nil {
3,999✔
3422
                err := kvdb.View(
186✔
3423
                        c.db, fetch, func() {
372✔
3424
                                node = nil
186✔
3425
                        },
186✔
3426
                )
3427
                if err != nil {
193✔
3428
                        return nil, err
7✔
3429
                }
7✔
3430

3431
                return node, nil
182✔
3432
        }
3433

3434
        err := fetch(tx)
3,627✔
3435
        if err != nil {
3,638✔
3436
                return nil, err
11✔
3437
        }
11✔
3438

3439
        return node, nil
3,616✔
3440
}
3441

3442
// HasLightningNode determines if the graph has a vertex identified by the
3443
// target node identity public key. If the node exists in the database, a
3444
// timestamp of when the data for the node was lasted updated is returned along
3445
// with a true boolean. Otherwise, an empty time.Time is returned with a false
3446
// boolean.
3447
func (c *KVStore) HasNode(_ context.Context,
3448
        nodePub [33]byte) (time.Time, bool, error) {
20✔
3449

20✔
3450
        var (
20✔
3451
                updateTime time.Time
20✔
3452
                exists     bool
20✔
3453
        )
20✔
3454

20✔
3455
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
40✔
3456
                // First grab the nodes bucket which stores the mapping from
20✔
3457
                // pubKey to node information.
20✔
3458
                nodes := tx.ReadBucket(nodeBucket)
20✔
3459
                if nodes == nil {
20✔
3460
                        return ErrGraphNotFound
×
3461
                }
×
3462

3463
                // If a key for this serialized public key isn't found, we can
3464
                // exit early.
3465
                nodeBytes := nodes.Get(nodePub[:])
20✔
3466
                if nodeBytes == nil {
26✔
3467
                        exists = false
6✔
3468
                        return nil
6✔
3469
                }
6✔
3470

3471
                // Otherwise we continue on to obtain the time stamp
3472
                // representing the last time the data for this node was
3473
                // updated.
3474
                nodeReader := bytes.NewReader(nodeBytes)
17✔
3475
                node, err := deserializeLightningNode(nodeReader)
17✔
3476
                if err != nil {
17✔
3477
                        return err
×
3478
                }
×
3479

3480
                exists = true
17✔
3481
                updateTime = node.LastUpdate
17✔
3482

17✔
3483
                return nil
17✔
3484
        }, func() {
20✔
3485
                updateTime = time.Time{}
20✔
3486
                exists = false
20✔
3487
        })
20✔
3488
        if err != nil {
20✔
3489
                return time.Time{}, exists, err
×
3490
        }
×
3491

3492
        return updateTime, exists, nil
20✔
3493
}
3494

3495
// nodeTraversal is used to traverse all channels of a node given by its
3496
// public key and passes channel information into the specified callback.
3497
//
3498
// NOTE: the reset param is only meaningful if the tx param is nil. If it is
3499
// not nil, the caller is expected to have passed in a reset to the parent
3500
// function's View/Update call which will then apply to the whole transaction.
3501
func nodeTraversal(tx kvdb.RTx, nodePub []byte, db kvdb.Backend,
3502
        cb func(kvdb.RTx, *models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3503
                *models.ChannelEdgePolicy) error, reset func()) error {
1,271✔
3504

1,271✔
3505
        traversal := func(tx kvdb.RTx) error {
2,542✔
3506
                edges := tx.ReadBucket(edgeBucket)
1,271✔
3507
                if edges == nil {
1,271✔
3508
                        return ErrGraphNotFound
×
3509
                }
×
3510
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
1,271✔
3511
                if edgeIndex == nil {
1,271✔
3512
                        return ErrGraphNoEdgesFound
×
3513
                }
×
3514

3515
                // In order to reach all the edges for this node, we take
3516
                // advantage of the construction of the key-space within the
3517
                // edge bucket. The keys are stored in the form: pubKey ||
3518
                // chanID. Therefore, starting from a chanID of zero, we can
3519
                // scan forward in the bucket, grabbing all the edges for the
3520
                // node. Once the prefix no longer matches, then we know we're
3521
                // done.
3522
                var nodeStart [33 + 8]byte
1,271✔
3523
                copy(nodeStart[:], nodePub)
1,271✔
3524
                copy(nodeStart[33:], chanStart[:])
1,271✔
3525

1,271✔
3526
                // Starting from the key pubKey || 0, we seek forward in the
1,271✔
3527
                // bucket until the retrieved key no longer has the public key
1,271✔
3528
                // as its prefix. This indicates that we've stepped over into
1,271✔
3529
                // another node's edges, so we can terminate our scan.
1,271✔
3530
                edgeCursor := edges.ReadCursor()
1,271✔
3531
                for nodeEdge, _ := edgeCursor.Seek(nodeStart[:]); bytes.HasPrefix(nodeEdge, nodePub); nodeEdge, _ = edgeCursor.Next() { //nolint:ll
5,116✔
3532
                        // If the prefix still matches, the channel id is
3,845✔
3533
                        // returned in nodeEdge. Channel id is used to lookup
3,845✔
3534
                        // the node at the other end of the channel and both
3,845✔
3535
                        // edge policies.
3,845✔
3536
                        chanID := nodeEdge[33:]
3,845✔
3537
                        edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
3,845✔
3538
                        if err != nil {
3,845✔
3539
                                return err
×
3540
                        }
×
3541

3542
                        outgoingPolicy, err := fetchChanEdgePolicy(
3,845✔
3543
                                edges, chanID, nodePub,
3,845✔
3544
                        )
3,845✔
3545
                        if err != nil {
3,845✔
3546
                                return err
×
3547
                        }
×
3548

3549
                        otherNode, err := edgeInfo.OtherNodeKeyBytes(nodePub)
3,845✔
3550
                        if err != nil {
3,845✔
3551
                                return err
×
3552
                        }
×
3553

3554
                        incomingPolicy, err := fetchChanEdgePolicy(
3,845✔
3555
                                edges, chanID, otherNode[:],
3,845✔
3556
                        )
3,845✔
3557
                        if err != nil {
3,845✔
3558
                                return err
×
3559
                        }
×
3560

3561
                        // Finally, we execute the callback.
3562
                        err = cb(tx, &edgeInfo, outgoingPolicy, incomingPolicy)
3,845✔
3563
                        if err != nil {
3,857✔
3564
                                return err
12✔
3565
                        }
12✔
3566
                }
3567

3568
                return nil
1,262✔
3569
        }
3570

3571
        // If no transaction was provided, then we'll create a new transaction
3572
        // to execute the transaction within.
3573
        if tx == nil {
1,303✔
3574
                return kvdb.View(db, traversal, reset)
32✔
3575
        }
32✔
3576

3577
        // Otherwise, we re-use the existing transaction to execute the graph
3578
        // traversal.
3579
        return traversal(tx)
1,242✔
3580
}
3581

3582
// ForEachNodeChannel iterates through all channels of the given node,
3583
// executing the passed callback with an edge info structure and the policies
3584
// of each end of the channel. The first edge policy is the outgoing edge *to*
3585
// the connecting node, while the second is the incoming edge *from* the
3586
// connecting node. If the callback returns an error, then the iteration is
3587
// halted with the error propagated back up to the caller.
3588
//
3589
// Unknown policies are passed into the callback as nil values.
3590
func (c *KVStore) ForEachNodeChannel(_ context.Context, nodePub route.Vertex,
3591
        cb func(*models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3592
                *models.ChannelEdgePolicy) error, reset func()) error {
9✔
3593

9✔
3594
        return nodeTraversal(
9✔
3595
                nil, nodePub[:], c.db, func(_ kvdb.RTx,
9✔
3596
                        info *models.ChannelEdgeInfo, policy,
9✔
3597
                        policy2 *models.ChannelEdgePolicy) error {
22✔
3598

13✔
3599
                        return cb(info, policy, policy2)
13✔
3600
                }, reset,
13✔
3601
        )
3602
}
3603

3604
// ForEachSourceNodeChannel iterates through all channels of the source node,
3605
// executing the passed callback on each. The callback is provided with the
3606
// channel's outpoint, whether we have a policy for the channel and the channel
3607
// peer's node information.
3608
func (c *KVStore) ForEachSourceNodeChannel(_ context.Context,
3609
        cb func(chanPoint wire.OutPoint, havePolicy bool,
3610
                otherNode *models.Node) error, reset func()) error {
4✔
3611

4✔
3612
        return kvdb.View(c.db, func(tx kvdb.RTx) error {
8✔
3613
                nodes := tx.ReadBucket(nodeBucket)
4✔
3614
                if nodes == nil {
4✔
3615
                        return ErrGraphNotFound
×
3616
                }
×
3617

3618
                node, err := sourceNodeWithTx(nodes)
4✔
3619
                if err != nil {
4✔
3620
                        return err
×
3621
                }
×
3622

3623
                return nodeTraversal(
4✔
3624
                        tx, node.PubKeyBytes[:], c.db, func(tx kvdb.RTx,
4✔
3625
                                info *models.ChannelEdgeInfo,
4✔
3626
                                policy, _ *models.ChannelEdgePolicy) error {
9✔
3627

5✔
3628
                                peer, err := c.fetchOtherNode(
5✔
3629
                                        tx, info, node.PubKeyBytes[:],
5✔
3630
                                )
5✔
3631
                                if err != nil {
5✔
3632
                                        return err
×
3633
                                }
×
3634

3635
                                return cb(
5✔
3636
                                        info.ChannelPoint, policy != nil, peer,
5✔
3637
                                )
5✔
3638
                        }, reset,
3639
                )
3640
        }, reset)
3641
}
3642

3643
// forEachNodeChannelTx iterates through all channels of the given node,
3644
// executing the passed callback with an edge info structure and the policies
3645
// of each end of the channel. The first edge policy is the outgoing edge *to*
3646
// the connecting node, while the second is the incoming edge *from* the
3647
// connecting node. If the callback returns an error, then the iteration is
3648
// halted with the error propagated back up to the caller.
3649
//
3650
// Unknown policies are passed into the callback as nil values.
3651
//
3652
// If the caller wishes to re-use an existing boltdb transaction, then it
3653
// should be passed as the first argument.  Otherwise, the first argument should
3654
// be nil and a fresh transaction will be created to execute the graph
3655
// traversal.
3656
//
3657
// NOTE: the reset function is only meaningful if the tx param is nil.
3658
func (c *KVStore) forEachNodeChannelTx(tx kvdb.RTx,
3659
        nodePub route.Vertex, cb func(kvdb.RTx, *models.ChannelEdgeInfo,
3660
                *models.ChannelEdgePolicy, *models.ChannelEdgePolicy) error,
3661
        reset func()) error {
1,002✔
3662

1,002✔
3663
        return nodeTraversal(tx, nodePub[:], c.db, cb, reset)
1,002✔
3664
}
1,002✔
3665

3666
// fetchOtherNode attempts to fetch the full Node that's opposite of
3667
// the target node in the channel. This is useful when one knows the pubkey of
3668
// one of the nodes, and wishes to obtain the full Node for the other
3669
// end of the channel.
3670
func (c *KVStore) fetchOtherNode(tx kvdb.RTx,
3671
        channel *models.ChannelEdgeInfo, thisNodeKey []byte) (
3672
        *models.Node, error) {
5✔
3673

5✔
3674
        // Ensure that the node passed in is actually a member of the channel.
5✔
3675
        var targetNodeBytes [33]byte
5✔
3676
        switch {
5✔
3677
        case bytes.Equal(channel.NodeKey1Bytes[:], thisNodeKey):
3✔
3678
                targetNodeBytes = channel.NodeKey2Bytes
3✔
3679
        case bytes.Equal(channel.NodeKey2Bytes[:], thisNodeKey):
5✔
3680
                targetNodeBytes = channel.NodeKey1Bytes
5✔
3681
        default:
×
3682
                return nil, fmt.Errorf("node not participating in this channel")
×
3683
        }
3684

3685
        var targetNode *models.Node
5✔
3686
        fetchNodeFunc := func(tx kvdb.RTx) error {
10✔
3687
                // First grab the nodes bucket which stores the mapping from
5✔
3688
                // pubKey to node information.
5✔
3689
                nodes := tx.ReadBucket(nodeBucket)
5✔
3690
                if nodes == nil {
5✔
3691
                        return ErrGraphNotFound
×
3692
                }
×
3693

3694
                node, err := fetchLightningNode(nodes, targetNodeBytes[:])
5✔
3695
                if err != nil {
5✔
3696
                        return err
×
3697
                }
×
3698

3699
                targetNode = &node
5✔
3700

5✔
3701
                return nil
5✔
3702
        }
3703

3704
        // If the transaction is nil, then we'll need to create a new one,
3705
        // otherwise we can use the existing db transaction.
3706
        var err error
5✔
3707
        if tx == nil {
5✔
3708
                err = kvdb.View(c.db, fetchNodeFunc, func() {
×
3709
                        targetNode = nil
×
3710
                })
×
3711
        } else {
5✔
3712
                err = fetchNodeFunc(tx)
5✔
3713
        }
5✔
3714

3715
        return targetNode, err
5✔
3716
}
3717

3718
// computeEdgePolicyKeys is a helper function that can be used to compute the
3719
// keys used to index the channel edge policy info for the two nodes of the
3720
// edge. The keys for node 1 and node 2 are returned respectively.
3721
func computeEdgePolicyKeys(info *models.ChannelEdgeInfo) ([]byte, []byte) {
25✔
3722
        var (
25✔
3723
                node1Key [33 + 8]byte
25✔
3724
                node2Key [33 + 8]byte
25✔
3725
        )
25✔
3726

25✔
3727
        copy(node1Key[:], info.NodeKey1Bytes[:])
25✔
3728
        copy(node2Key[:], info.NodeKey2Bytes[:])
25✔
3729

25✔
3730
        byteOrder.PutUint64(node1Key[33:], info.ChannelID)
25✔
3731
        byteOrder.PutUint64(node2Key[33:], info.ChannelID)
25✔
3732

25✔
3733
        return node1Key[:], node2Key[:]
25✔
3734
}
25✔
3735

3736
// FetchChannelEdgesByOutpoint attempts to lookup the two directed edges for
3737
// the channel identified by the funding outpoint. If the channel can't be
3738
// found, then ErrEdgeNotFound is returned. A struct which houses the general
3739
// information for the channel itself is returned as well as two structs that
3740
// contain the routing policies for the channel in either direction.
3741
func (c *KVStore) FetchChannelEdgesByOutpoint(op *wire.OutPoint) (
3742
        *models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3743
        *models.ChannelEdgePolicy, error) {
14✔
3744

14✔
3745
        var (
14✔
3746
                edgeInfo *models.ChannelEdgeInfo
14✔
3747
                policy1  *models.ChannelEdgePolicy
14✔
3748
                policy2  *models.ChannelEdgePolicy
14✔
3749
        )
14✔
3750

14✔
3751
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
28✔
3752
                // First, grab the node bucket. This will be used to populate
14✔
3753
                // the Node pointers in each edge read from disk.
14✔
3754
                nodes := tx.ReadBucket(nodeBucket)
14✔
3755
                if nodes == nil {
14✔
3756
                        return ErrGraphNotFound
×
3757
                }
×
3758

3759
                // Next, grab the edge bucket which stores the edges, and also
3760
                // the index itself so we can group the directed edges together
3761
                // logically.
3762
                edges := tx.ReadBucket(edgeBucket)
14✔
3763
                if edges == nil {
14✔
3764
                        return ErrGraphNoEdgesFound
×
3765
                }
×
3766
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
14✔
3767
                if edgeIndex == nil {
14✔
3768
                        return ErrGraphNoEdgesFound
×
3769
                }
×
3770

3771
                // If the channel's outpoint doesn't exist within the outpoint
3772
                // index, then the edge does not exist.
3773
                chanIndex := edges.NestedReadBucket(channelPointBucket)
14✔
3774
                if chanIndex == nil {
14✔
3775
                        return ErrGraphNoEdgesFound
×
3776
                }
×
3777
                var b bytes.Buffer
14✔
3778
                if err := WriteOutpoint(&b, op); err != nil {
14✔
3779
                        return err
×
3780
                }
×
3781
                chanID := chanIndex.Get(b.Bytes())
14✔
3782
                if chanID == nil {
27✔
3783
                        return fmt.Errorf("%w: op=%v", ErrEdgeNotFound, op)
13✔
3784
                }
13✔
3785

3786
                // If the channel is found to exists, then we'll first retrieve
3787
                // the general information for the channel.
3788
                edge, err := fetchChanEdgeInfo(edgeIndex, chanID)
4✔
3789
                if err != nil {
4✔
3790
                        return fmt.Errorf("%w: chanID=%x", err, chanID)
×
3791
                }
×
3792
                edgeInfo = &edge
4✔
3793

4✔
3794
                // Once we have the information about the channels' parameters,
4✔
3795
                // we'll fetch the routing policies for each for the directed
4✔
3796
                // edges.
4✔
3797
                e1, e2, err := fetchChanEdgePolicies(edgeIndex, edges, chanID)
4✔
3798
                if err != nil {
4✔
3799
                        return fmt.Errorf("failed to find policy: %w", err)
×
3800
                }
×
3801

3802
                policy1 = e1
4✔
3803
                policy2 = e2
4✔
3804

4✔
3805
                return nil
4✔
3806
        }, func() {
14✔
3807
                edgeInfo = nil
14✔
3808
                policy1 = nil
14✔
3809
                policy2 = nil
14✔
3810
        })
14✔
3811
        if err != nil {
27✔
3812
                return nil, nil, nil, err
13✔
3813
        }
13✔
3814

3815
        return edgeInfo, policy1, policy2, nil
4✔
3816
}
3817

3818
// FetchChannelEdgesByID attempts to lookup the two directed edges for the
3819
// channel identified by the channel ID. If the channel can't be found, then
3820
// ErrEdgeNotFound is returned. A struct which houses the general information
3821
// for the channel itself is returned as well as two structs that contain the
3822
// routing policies for the channel in either direction.
3823
//
3824
// ErrZombieEdge an be returned if the edge is currently marked as a zombie
3825
// within the database. In this case, the ChannelEdgePolicy's will be nil, and
3826
// the ChannelEdgeInfo will only include the public keys of each node.
3827
func (c *KVStore) FetchChannelEdgesByID(chanID uint64) (
3828
        *models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3829
        *models.ChannelEdgePolicy, error) {
2,892✔
3830

2,892✔
3831
        var (
2,892✔
3832
                edgeInfo  *models.ChannelEdgeInfo
2,892✔
3833
                policy1   *models.ChannelEdgePolicy
2,892✔
3834
                policy2   *models.ChannelEdgePolicy
2,892✔
3835
                channelID [8]byte
2,892✔
3836
        )
2,892✔
3837

2,892✔
3838
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
5,784✔
3839
                // First, grab the node bucket. This will be used to populate
2,892✔
3840
                // the Node pointers in each edge read from disk.
2,892✔
3841
                nodes := tx.ReadBucket(nodeBucket)
2,892✔
3842
                if nodes == nil {
2,892✔
3843
                        return ErrGraphNotFound
×
3844
                }
×
3845

3846
                // Next, grab the edge bucket which stores the edges, and also
3847
                // the index itself so we can group the directed edges together
3848
                // logically.
3849
                edges := tx.ReadBucket(edgeBucket)
2,892✔
3850
                if edges == nil {
2,892✔
3851
                        return ErrGraphNoEdgesFound
×
3852
                }
×
3853
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
2,892✔
3854
                if edgeIndex == nil {
2,892✔
3855
                        return ErrGraphNoEdgesFound
×
3856
                }
×
3857

3858
                byteOrder.PutUint64(channelID[:], chanID)
2,892✔
3859

2,892✔
3860
                // Now, attempt to fetch edge.
2,892✔
3861
                edge, err := fetchChanEdgeInfo(edgeIndex, channelID[:])
2,892✔
3862

2,892✔
3863
                // If it doesn't exist, we'll quickly check our zombie index to
2,892✔
3864
                // see if we've previously marked it as so.
2,892✔
3865
                if errors.Is(err, ErrEdgeNotFound) {
2,896✔
3866
                        // If the zombie index doesn't exist, or the edge is not
4✔
3867
                        // marked as a zombie within it, then we'll return the
4✔
3868
                        // original ErrEdgeNotFound error.
4✔
3869
                        zombieIndex := edges.NestedReadBucket(zombieBucket)
4✔
3870
                        if zombieIndex == nil {
4✔
3871
                                return ErrEdgeNotFound
×
3872
                        }
×
3873

3874
                        isZombie, pubKey1, pubKey2 := isZombieEdge(
4✔
3875
                                zombieIndex, chanID,
4✔
3876
                        )
4✔
3877
                        if !isZombie {
7✔
3878
                                return ErrEdgeNotFound
3✔
3879
                        }
3✔
3880

3881
                        // Otherwise, the edge is marked as a zombie, so we'll
3882
                        // populate the edge info with the public keys of each
3883
                        // party as this is the only information we have about
3884
                        // it and return an error signaling so.
3885
                        edgeInfo = &models.ChannelEdgeInfo{
4✔
3886
                                NodeKey1Bytes: pubKey1,
4✔
3887
                                NodeKey2Bytes: pubKey2,
4✔
3888
                        }
4✔
3889

4✔
3890
                        return ErrZombieEdge
4✔
3891
                }
3892

3893
                // Otherwise, we'll just return the error if any.
3894
                if err != nil {
2,891✔
3895
                        return err
×
3896
                }
×
3897

3898
                edgeInfo = &edge
2,891✔
3899

2,891✔
3900
                // Then we'll attempt to fetch the accompanying policies of this
2,891✔
3901
                // edge.
2,891✔
3902
                e1, e2, err := fetchChanEdgePolicies(
2,891✔
3903
                        edgeIndex, edges, channelID[:],
2,891✔
3904
                )
2,891✔
3905
                if err != nil {
2,891✔
3906
                        return err
×
3907
                }
×
3908

3909
                policy1 = e1
2,891✔
3910
                policy2 = e2
2,891✔
3911

2,891✔
3912
                return nil
2,891✔
3913
        }, func() {
2,892✔
3914
                edgeInfo = nil
2,892✔
3915
                policy1 = nil
2,892✔
3916
                policy2 = nil
2,892✔
3917
        })
2,892✔
3918
        if errors.Is(err, ErrZombieEdge) {
2,896✔
3919
                return edgeInfo, nil, nil, err
4✔
3920
        }
4✔
3921
        if err != nil {
2,894✔
3922
                return nil, nil, nil, err
3✔
3923
        }
3✔
3924

3925
        return edgeInfo, policy1, policy2, nil
2,891✔
3926
}
3927

3928
// IsPublicNode is a helper method that determines whether the node with the
3929
// given public key is seen as a public node in the graph from the graph's
3930
// source node's point of view.
3931
func (c *KVStore) IsPublicNode(pubKey [33]byte) (bool, error) {
16✔
3932
        var nodeIsPublic bool
16✔
3933
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
32✔
3934
                nodes := tx.ReadBucket(nodeBucket)
16✔
3935
                if nodes == nil {
16✔
3936
                        return ErrGraphNodesNotFound
×
3937
                }
×
3938
                ourPubKey := nodes.Get(sourceKey)
16✔
3939
                if ourPubKey == nil {
16✔
3940
                        return ErrSourceNodeNotSet
×
3941
                }
×
3942
                node, err := fetchLightningNode(nodes, pubKey[:])
16✔
3943
                if err != nil {
16✔
3944
                        return err
×
3945
                }
×
3946

3947
                nodeIsPublic, err = c.isPublic(tx, node.PubKeyBytes, ourPubKey)
16✔
3948

16✔
3949
                return err
16✔
3950
        }, func() {
16✔
3951
                nodeIsPublic = false
16✔
3952
        })
16✔
3953
        if err != nil {
16✔
3954
                return false, err
×
3955
        }
×
3956

3957
        return nodeIsPublic, nil
16✔
3958
}
3959

3960
// genMultiSigP2WSH generates the p2wsh'd multisig script for 2 of 2 pubkeys.
3961
func genMultiSigP2WSH(aPub, bPub []byte) ([]byte, error) {
49✔
3962
        witnessScript, err := input.GenMultiSigScript(aPub, bPub)
49✔
3963
        if err != nil {
49✔
3964
                return nil, err
×
3965
        }
×
3966

3967
        // With the witness script generated, we'll now turn it into a p2wsh
3968
        // script:
3969
        //  * OP_0 <sha256(script)>
3970
        bldr := txscript.NewScriptBuilder(
49✔
3971
                txscript.WithScriptAllocSize(input.P2WSHSize),
49✔
3972
        )
49✔
3973
        bldr.AddOp(txscript.OP_0)
49✔
3974
        scriptHash := sha256.Sum256(witnessScript)
49✔
3975
        bldr.AddData(scriptHash[:])
49✔
3976

49✔
3977
        return bldr.Script()
49✔
3978
}
3979

3980
// EdgePoint couples the outpoint of a channel with the funding script that it
3981
// creates. The FilteredChainView will use this to watch for spends of this
3982
// edge point on chain. We require both of these values as depending on the
3983
// concrete implementation, either the pkScript, or the out point will be used.
3984
type EdgePoint struct {
3985
        // FundingPkScript is the p2wsh multi-sig script of the target channel.
3986
        FundingPkScript []byte
3987

3988
        // OutPoint is the outpoint of the target channel.
3989
        OutPoint wire.OutPoint
3990
}
3991

3992
// String returns a human readable version of the target EdgePoint. We return
3993
// the outpoint directly as it is enough to uniquely identify the edge point.
3994
func (e *EdgePoint) String() string {
×
3995
        return e.OutPoint.String()
×
3996
}
×
3997

3998
// ChannelView returns the verifiable edge information for each active channel
3999
// within the known channel graph. The set of UTXO's (along with their scripts)
4000
// returned are the ones that need to be watched on chain to detect channel
4001
// closes on the resident blockchain.
4002
func (c *KVStore) ChannelView() ([]EdgePoint, error) {
25✔
4003
        var edgePoints []EdgePoint
25✔
4004
        if err := kvdb.View(c.db, func(tx kvdb.RTx) error {
50✔
4005
                // We're going to iterate over the entire channel index, so
25✔
4006
                // we'll need to fetch the edgeBucket to get to the index as
25✔
4007
                // it's a sub-bucket.
25✔
4008
                edges := tx.ReadBucket(edgeBucket)
25✔
4009
                if edges == nil {
25✔
4010
                        return ErrGraphNoEdgesFound
×
4011
                }
×
4012
                chanIndex := edges.NestedReadBucket(channelPointBucket)
25✔
4013
                if chanIndex == nil {
25✔
4014
                        return ErrGraphNoEdgesFound
×
4015
                }
×
4016
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
25✔
4017
                if edgeIndex == nil {
25✔
4018
                        return ErrGraphNoEdgesFound
×
4019
                }
×
4020

4021
                // Once we have the proper bucket, we'll range over each key
4022
                // (which is the channel point for the channel) and decode it,
4023
                // accumulating each entry.
4024
                return chanIndex.ForEach(
25✔
4025
                        func(chanPointBytes, chanID []byte) error {
70✔
4026
                                chanPointReader := bytes.NewReader(
45✔
4027
                                        chanPointBytes,
45✔
4028
                                )
45✔
4029

45✔
4030
                                var chanPoint wire.OutPoint
45✔
4031
                                err := ReadOutpoint(chanPointReader, &chanPoint)
45✔
4032
                                if err != nil {
45✔
4033
                                        return err
×
4034
                                }
×
4035

4036
                                edgeInfo, err := fetchChanEdgeInfo(
45✔
4037
                                        edgeIndex, chanID,
45✔
4038
                                )
45✔
4039
                                if err != nil {
45✔
4040
                                        return err
×
4041
                                }
×
4042

4043
                                pkScript, err := genMultiSigP2WSH(
45✔
4044
                                        edgeInfo.BitcoinKey1Bytes[:],
45✔
4045
                                        edgeInfo.BitcoinKey2Bytes[:],
45✔
4046
                                )
45✔
4047
                                if err != nil {
45✔
4048
                                        return err
×
4049
                                }
×
4050

4051
                                edgePoints = append(edgePoints, EdgePoint{
45✔
4052
                                        FundingPkScript: pkScript,
45✔
4053
                                        OutPoint:        chanPoint,
45✔
4054
                                })
45✔
4055

45✔
4056
                                return nil
45✔
4057
                        },
4058
                )
4059
        }, func() {
25✔
4060
                edgePoints = nil
25✔
4061
        }); err != nil {
25✔
4062
                return nil, err
×
4063
        }
×
4064

4065
        return edgePoints, nil
25✔
4066
}
4067

4068
// MarkEdgeZombie attempts to mark a channel identified by its channel ID as a
4069
// zombie. This method is used on an ad-hoc basis, when channels need to be
4070
// marked as zombies outside the normal pruning cycle.
4071
func (c *KVStore) MarkEdgeZombie(chanID uint64,
4072
        pubKey1, pubKey2 [33]byte) error {
125✔
4073

125✔
4074
        c.cacheMu.Lock()
125✔
4075
        defer c.cacheMu.Unlock()
125✔
4076

125✔
4077
        err := kvdb.Batch(c.db, func(tx kvdb.RwTx) error {
250✔
4078
                edges := tx.ReadWriteBucket(edgeBucket)
125✔
4079
                if edges == nil {
125✔
4080
                        return ErrGraphNoEdgesFound
×
4081
                }
×
4082
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
125✔
4083
                if err != nil {
125✔
4084
                        return fmt.Errorf("unable to create zombie "+
×
4085
                                "bucket: %w", err)
×
4086
                }
×
4087

4088
                return markEdgeZombie(zombieIndex, chanID, pubKey1, pubKey2)
125✔
4089
        })
4090
        if err != nil {
125✔
4091
                return err
×
4092
        }
×
4093

4094
        c.rejectCache.remove(chanID)
125✔
4095
        c.chanCache.remove(chanID)
125✔
4096

125✔
4097
        return nil
125✔
4098
}
4099

4100
// markEdgeZombie marks an edge as a zombie within our zombie index. The public
4101
// keys should represent the node public keys of the two parties involved in the
4102
// edge.
4103
func markEdgeZombie(zombieIndex kvdb.RwBucket, chanID uint64, pubKey1,
4104
        pubKey2 [33]byte) error {
151✔
4105

151✔
4106
        var k [8]byte
151✔
4107
        byteOrder.PutUint64(k[:], chanID)
151✔
4108

151✔
4109
        var v [66]byte
151✔
4110
        copy(v[:33], pubKey1[:])
151✔
4111
        copy(v[33:], pubKey2[:])
151✔
4112

151✔
4113
        return zombieIndex.Put(k[:], v[:])
151✔
4114
}
151✔
4115

4116
// MarkEdgeLive clears an edge from our zombie index, deeming it as live.
4117
func (c *KVStore) MarkEdgeLive(chanID uint64) error {
21✔
4118
        c.cacheMu.Lock()
21✔
4119
        defer c.cacheMu.Unlock()
21✔
4120

21✔
4121
        return c.markEdgeLiveUnsafe(nil, chanID)
21✔
4122
}
21✔
4123

4124
// markEdgeLiveUnsafe clears an edge from the zombie index. This method can be
4125
// called with an existing kvdb.RwTx or the argument can be set to nil in which
4126
// case a new transaction will be created.
4127
//
4128
// NOTE: this method MUST only be called if the cacheMu has already been
4129
// acquired.
4130
func (c *KVStore) markEdgeLiveUnsafe(tx kvdb.RwTx, chanID uint64) error {
21✔
4131
        dbFn := func(tx kvdb.RwTx) error {
42✔
4132
                edges := tx.ReadWriteBucket(edgeBucket)
21✔
4133
                if edges == nil {
21✔
4134
                        return ErrGraphNoEdgesFound
×
4135
                }
×
4136
                zombieIndex := edges.NestedReadWriteBucket(zombieBucket)
21✔
4137
                if zombieIndex == nil {
21✔
4138
                        return nil
×
4139
                }
×
4140

4141
                var k [8]byte
21✔
4142
                byteOrder.PutUint64(k[:], chanID)
21✔
4143

21✔
4144
                if len(zombieIndex.Get(k[:])) == 0 {
22✔
4145
                        return ErrZombieEdgeNotFound
1✔
4146
                }
1✔
4147

4148
                return zombieIndex.Delete(k[:])
20✔
4149
        }
4150

4151
        // If the transaction is nil, we'll create a new one. Otherwise, we use
4152
        // the existing transaction
4153
        var err error
21✔
4154
        if tx == nil {
42✔
4155
                err = kvdb.Update(c.db, dbFn, func() {})
42✔
4156
        } else {
×
4157
                err = dbFn(tx)
×
4158
        }
×
4159
        if err != nil {
22✔
4160
                return err
1✔
4161
        }
1✔
4162

4163
        c.rejectCache.remove(chanID)
20✔
4164
        c.chanCache.remove(chanID)
20✔
4165

20✔
4166
        return nil
20✔
4167
}
4168

4169
// IsZombieEdge returns whether the edge is considered zombie. If it is a
4170
// zombie, then the two node public keys corresponding to this edge are also
4171
// returned.
4172
func (c *KVStore) IsZombieEdge(chanID uint64) (bool, [33]byte, [33]byte,
4173
        error) {
14✔
4174

14✔
4175
        var (
14✔
4176
                isZombie         bool
14✔
4177
                pubKey1, pubKey2 [33]byte
14✔
4178
        )
14✔
4179

14✔
4180
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
28✔
4181
                edges := tx.ReadBucket(edgeBucket)
14✔
4182
                if edges == nil {
14✔
4183
                        return ErrGraphNoEdgesFound
×
4184
                }
×
4185
                zombieIndex := edges.NestedReadBucket(zombieBucket)
14✔
4186
                if zombieIndex == nil {
14✔
4187
                        return nil
×
4188
                }
×
4189

4190
                isZombie, pubKey1, pubKey2 = isZombieEdge(zombieIndex, chanID)
14✔
4191

14✔
4192
                return nil
14✔
4193
        }, func() {
14✔
4194
                isZombie = false
14✔
4195
                pubKey1 = [33]byte{}
14✔
4196
                pubKey2 = [33]byte{}
14✔
4197
        })
14✔
4198
        if err != nil {
14✔
4199
                return false, [33]byte{}, [33]byte{}, fmt.Errorf("%w: %w "+
×
4200
                        "(chanID=%d)", ErrCantCheckIfZombieEdgeStr, err, chanID)
×
4201
        }
×
4202

4203
        return isZombie, pubKey1, pubKey2, nil
14✔
4204
}
4205

4206
// isZombieEdge returns whether an entry exists for the given channel in the
4207
// zombie index. If an entry exists, then the two node public keys corresponding
4208
// to this edge are also returned.
4209
func isZombieEdge(zombieIndex kvdb.RBucket,
4210
        chanID uint64) (bool, [33]byte, [33]byte) {
198✔
4211

198✔
4212
        var k [8]byte
198✔
4213
        byteOrder.PutUint64(k[:], chanID)
198✔
4214

198✔
4215
        v := zombieIndex.Get(k[:])
198✔
4216
        if v == nil {
304✔
4217
                return false, [33]byte{}, [33]byte{}
106✔
4218
        }
106✔
4219

4220
        var pubKey1, pubKey2 [33]byte
95✔
4221
        copy(pubKey1[:], v[:33])
95✔
4222
        copy(pubKey2[:], v[33:])
95✔
4223

95✔
4224
        return true, pubKey1, pubKey2
95✔
4225
}
4226

4227
// NumZombies returns the current number of zombie channels in the graph.
4228
func (c *KVStore) NumZombies() (uint64, error) {
4✔
4229
        var numZombies uint64
4✔
4230
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
8✔
4231
                edges := tx.ReadBucket(edgeBucket)
4✔
4232
                if edges == nil {
4✔
4233
                        return nil
×
4234
                }
×
4235
                zombieIndex := edges.NestedReadBucket(zombieBucket)
4✔
4236
                if zombieIndex == nil {
4✔
4237
                        return nil
×
4238
                }
×
4239

4240
                return zombieIndex.ForEach(func(_, _ []byte) error {
6✔
4241
                        numZombies++
2✔
4242
                        return nil
2✔
4243
                })
2✔
4244
        }, func() {
4✔
4245
                numZombies = 0
4✔
4246
        })
4✔
4247
        if err != nil {
4✔
4248
                return 0, err
×
4249
        }
×
4250

4251
        return numZombies, nil
4✔
4252
}
4253

4254
// PutClosedScid stores a SCID for a closed channel in the database. This is so
4255
// that we can ignore channel announcements that we know to be closed without
4256
// having to validate them and fetch a block.
4257
func (c *KVStore) PutClosedScid(scid lnwire.ShortChannelID) error {
1✔
4258
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
2✔
4259
                closedScids, err := tx.CreateTopLevelBucket(closedScidBucket)
1✔
4260
                if err != nil {
1✔
4261
                        return err
×
4262
                }
×
4263

4264
                var k [8]byte
1✔
4265
                byteOrder.PutUint64(k[:], scid.ToUint64())
1✔
4266

1✔
4267
                return closedScids.Put(k[:], []byte{})
1✔
4268
        }, func() {})
1✔
4269
}
4270

4271
// IsClosedScid checks whether a channel identified by the passed in scid is
4272
// closed. This helps avoid having to perform expensive validation checks.
4273
// TODO: Add an LRU cache to cut down on disc reads.
4274
func (c *KVStore) IsClosedScid(scid lnwire.ShortChannelID) (bool, error) {
5✔
4275
        var isClosed bool
5✔
4276
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
10✔
4277
                closedScids := tx.ReadBucket(closedScidBucket)
5✔
4278
                if closedScids == nil {
5✔
4279
                        return ErrClosedScidsNotFound
×
4280
                }
×
4281

4282
                var k [8]byte
5✔
4283
                byteOrder.PutUint64(k[:], scid.ToUint64())
5✔
4284

5✔
4285
                if closedScids.Get(k[:]) != nil {
6✔
4286
                        isClosed = true
1✔
4287
                        return nil
1✔
4288
                }
1✔
4289

4290
                return nil
4✔
4291
        }, func() {
5✔
4292
                isClosed = false
5✔
4293
        })
5✔
4294
        if err != nil {
5✔
4295
                return false, err
×
4296
        }
×
4297

4298
        return isClosed, nil
5✔
4299
}
4300

4301
// GraphSession will provide the call-back with access to a NodeTraverser
4302
// instance which can be used to perform queries against the channel graph.
4303
func (c *KVStore) GraphSession(cb func(graph NodeTraverser) error,
4304
        reset func()) error {
54✔
4305

54✔
4306
        return c.db.View(func(tx walletdb.ReadTx) error {
108✔
4307
                return cb(&nodeTraverserSession{
54✔
4308
                        db: c,
54✔
4309
                        tx: tx,
54✔
4310
                })
54✔
4311
        }, reset)
54✔
4312
}
4313

4314
// nodeTraverserSession implements the NodeTraverser interface but with a
4315
// backing read only transaction for a consistent view of the graph.
4316
type nodeTraverserSession struct {
4317
        tx kvdb.RTx
4318
        db *KVStore
4319
}
4320

4321
// ForEachNodeDirectedChannel calls the callback for every channel of the given
4322
// node.
4323
//
4324
// NOTE: Part of the NodeTraverser interface.
4325
func (c *nodeTraverserSession) ForEachNodeDirectedChannel(nodePub route.Vertex,
4326
        cb func(channel *DirectedChannel) error, _ func()) error {
239✔
4327

239✔
4328
        return c.db.forEachNodeDirectedChannel(c.tx, nodePub, cb, func() {})
239✔
4329
}
4330

4331
// FetchNodeFeatures returns the features of the given node. If the node is
4332
// unknown, assume no additional features are supported.
4333
//
4334
// NOTE: Part of the NodeTraverser interface.
4335
func (c *nodeTraverserSession) FetchNodeFeatures(nodePub route.Vertex) (
4336
        *lnwire.FeatureVector, error) {
254✔
4337

254✔
4338
        return c.db.fetchNodeFeatures(c.tx, nodePub)
254✔
4339
}
254✔
4340

4341
func putLightningNode(nodeBucket, aliasBucket, updateIndex kvdb.RwBucket,
4342
        node *models.Node) error {
1,168✔
4343

1,168✔
4344
        var (
1,168✔
4345
                scratch [16]byte
1,168✔
4346
                b       bytes.Buffer
1,168✔
4347
        )
1,168✔
4348

1,168✔
4349
        pub, err := node.PubKey()
1,168✔
4350
        if err != nil {
1,168✔
4351
                return err
×
4352
        }
×
4353
        nodePub := pub.SerializeCompressed()
1,168✔
4354

1,168✔
4355
        // If the node has the update time set, write it, else write 0.
1,168✔
4356
        updateUnix := uint64(0)
1,168✔
4357
        if node.LastUpdate.Unix() > 0 {
2,201✔
4358
                updateUnix = uint64(node.LastUpdate.Unix())
1,033✔
4359
        }
1,033✔
4360

4361
        byteOrder.PutUint64(scratch[:8], updateUnix)
1,168✔
4362
        if _, err := b.Write(scratch[:8]); err != nil {
1,168✔
4363
                return err
×
4364
        }
×
4365

4366
        if _, err := b.Write(nodePub); err != nil {
1,168✔
4367
                return err
×
4368
        }
×
4369

4370
        // If we got a node announcement for this node, we will have the rest
4371
        // of the data available. If not we don't have more data to write.
4372
        if !node.HaveNodeAnnouncement {
1,253✔
4373
                // Write HaveNodeAnnouncement=0.
85✔
4374
                byteOrder.PutUint16(scratch[:2], 0)
85✔
4375
                if _, err := b.Write(scratch[:2]); err != nil {
85✔
4376
                        return err
×
4377
                }
×
4378

4379
                return nodeBucket.Put(nodePub, b.Bytes())
85✔
4380
        }
4381

4382
        // Write HaveNodeAnnouncement=1.
4383
        byteOrder.PutUint16(scratch[:2], 1)
1,086✔
4384
        if _, err := b.Write(scratch[:2]); err != nil {
1,086✔
4385
                return err
×
4386
        }
×
4387

4388
        if err := binary.Write(&b, byteOrder, node.Color.R); err != nil {
1,086✔
4389
                return err
×
4390
        }
×
4391
        if err := binary.Write(&b, byteOrder, node.Color.G); err != nil {
1,086✔
4392
                return err
×
4393
        }
×
4394
        if err := binary.Write(&b, byteOrder, node.Color.B); err != nil {
1,086✔
4395
                return err
×
4396
        }
×
4397

4398
        if err := wire.WriteVarString(&b, 0, node.Alias); err != nil {
1,086✔
4399
                return err
×
4400
        }
×
4401

4402
        if err := node.Features.Encode(&b); err != nil {
1,086✔
4403
                return err
×
4404
        }
×
4405

4406
        numAddresses := uint16(len(node.Addresses))
1,086✔
4407
        byteOrder.PutUint16(scratch[:2], numAddresses)
1,086✔
4408
        if _, err := b.Write(scratch[:2]); err != nil {
1,086✔
4409
                return err
×
4410
        }
×
4411

4412
        for _, address := range node.Addresses {
2,668✔
4413
                if err := SerializeAddr(&b, address); err != nil {
1,582✔
4414
                        return err
×
4415
                }
×
4416
        }
4417

4418
        sigLen := len(node.AuthSigBytes)
1,086✔
4419
        if sigLen > 80 {
1,086✔
4420
                return fmt.Errorf("max sig len allowed is 80, had %v",
×
4421
                        sigLen)
×
4422
        }
×
4423

4424
        err = wire.WriteVarBytes(&b, 0, node.AuthSigBytes)
1,086✔
4425
        if err != nil {
1,086✔
4426
                return err
×
4427
        }
×
4428

4429
        if len(node.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
1,086✔
4430
                return ErrTooManyExtraOpaqueBytes(len(node.ExtraOpaqueData))
×
4431
        }
×
4432
        err = wire.WriteVarBytes(&b, 0, node.ExtraOpaqueData)
1,086✔
4433
        if err != nil {
1,086✔
4434
                return err
×
4435
        }
×
4436

4437
        if err := aliasBucket.Put(nodePub, []byte(node.Alias)); err != nil {
1,086✔
4438
                return err
×
4439
        }
×
4440

4441
        // With the alias bucket updated, we'll now update the index that
4442
        // tracks the time series of node updates.
4443
        var indexKey [8 + 33]byte
1,086✔
4444
        byteOrder.PutUint64(indexKey[:8], updateUnix)
1,086✔
4445
        copy(indexKey[8:], nodePub)
1,086✔
4446

1,086✔
4447
        // If there was already an old index entry for this node, then we'll
1,086✔
4448
        // delete the old one before we write the new entry.
1,086✔
4449
        if nodeBytes := nodeBucket.Get(nodePub); nodeBytes != nil {
1,105✔
4450
                // Extract out the old update time to we can reconstruct the
19✔
4451
                // prior index key to delete it from the index.
19✔
4452
                oldUpdateTime := nodeBytes[:8]
19✔
4453

19✔
4454
                var oldIndexKey [8 + 33]byte
19✔
4455
                copy(oldIndexKey[:8], oldUpdateTime)
19✔
4456
                copy(oldIndexKey[8:], nodePub)
19✔
4457

19✔
4458
                if err := updateIndex.Delete(oldIndexKey[:]); err != nil {
19✔
4459
                        return err
×
4460
                }
×
4461
        }
4462

4463
        if err := updateIndex.Put(indexKey[:], nil); err != nil {
1,086✔
4464
                return err
×
4465
        }
×
4466

4467
        return nodeBucket.Put(nodePub, b.Bytes())
1,086✔
4468
}
4469

4470
func fetchLightningNode(nodeBucket kvdb.RBucket,
4471
        nodePub []byte) (models.Node, error) {
4,561✔
4472

4,561✔
4473
        nodeBytes := nodeBucket.Get(nodePub)
4,561✔
4474
        if nodeBytes == nil {
4,646✔
4475
                return models.Node{}, ErrGraphNodeNotFound
85✔
4476
        }
85✔
4477

4478
        nodeReader := bytes.NewReader(nodeBytes)
4,479✔
4479

4,479✔
4480
        return deserializeLightningNode(nodeReader)
4,479✔
4481
}
4482

4483
func deserializeLightningNodeCacheable(r io.Reader) (route.Vertex,
4484
        *lnwire.FeatureVector, error) {
123✔
4485

123✔
4486
        var (
123✔
4487
                pubKey      route.Vertex
123✔
4488
                features    = lnwire.EmptyFeatureVector()
123✔
4489
                nodeScratch [8]byte
123✔
4490
        )
123✔
4491

123✔
4492
        // Skip ahead:
123✔
4493
        // - LastUpdate (8 bytes)
123✔
4494
        if _, err := r.Read(nodeScratch[:]); err != nil {
123✔
4495
                return pubKey, nil, err
×
4496
        }
×
4497

4498
        if _, err := io.ReadFull(r, pubKey[:]); err != nil {
123✔
4499
                return pubKey, nil, err
×
4500
        }
×
4501

4502
        // Read the node announcement flag.
4503
        if _, err := r.Read(nodeScratch[:2]); err != nil {
123✔
4504
                return pubKey, nil, err
×
4505
        }
×
4506
        hasNodeAnn := byteOrder.Uint16(nodeScratch[:2])
123✔
4507

123✔
4508
        // The rest of the data is optional, and will only be there if we got a
123✔
4509
        // node announcement for this node.
123✔
4510
        if hasNodeAnn == 0 {
126✔
4511
                return pubKey, features, nil
3✔
4512
        }
3✔
4513

4514
        // We did get a node announcement for this node, so we'll have the rest
4515
        // of the data available.
4516
        var rgb uint8
123✔
4517
        if err := binary.Read(r, byteOrder, &rgb); err != nil {
123✔
4518
                return pubKey, nil, err
×
4519
        }
×
4520
        if err := binary.Read(r, byteOrder, &rgb); err != nil {
123✔
4521
                return pubKey, nil, err
×
4522
        }
×
4523
        if err := binary.Read(r, byteOrder, &rgb); err != nil {
123✔
4524
                return pubKey, nil, err
×
4525
        }
×
4526

4527
        if _, err := wire.ReadVarString(r, 0); err != nil {
123✔
4528
                return pubKey, nil, err
×
4529
        }
×
4530

4531
        if err := features.Decode(r); err != nil {
123✔
4532
                return pubKey, nil, err
×
4533
        }
×
4534

4535
        return pubKey, features, nil
123✔
4536
}
4537

4538
func deserializeLightningNode(r io.Reader) (models.Node, error) {
9,467✔
4539
        var (
9,467✔
4540
                node    models.Node
9,467✔
4541
                scratch [8]byte
9,467✔
4542
                err     error
9,467✔
4543
        )
9,467✔
4544

9,467✔
4545
        // Always populate a feature vector, even if we don't have a node
9,467✔
4546
        // announcement and short circuit below.
9,467✔
4547
        node.Features = lnwire.EmptyFeatureVector()
9,467✔
4548

9,467✔
4549
        if _, err := r.Read(scratch[:]); err != nil {
9,467✔
4550
                return models.Node{}, err
×
4551
        }
×
4552

4553
        unix := int64(byteOrder.Uint64(scratch[:]))
9,467✔
4554
        node.LastUpdate = time.Unix(unix, 0)
9,467✔
4555

9,467✔
4556
        if _, err := io.ReadFull(r, node.PubKeyBytes[:]); err != nil {
9,467✔
4557
                return models.Node{}, err
×
4558
        }
×
4559

4560
        if _, err := r.Read(scratch[:2]); err != nil {
9,467✔
4561
                return models.Node{}, err
×
4562
        }
×
4563

4564
        hasNodeAnn := byteOrder.Uint16(scratch[:2])
9,467✔
4565
        if hasNodeAnn == 1 {
18,790✔
4566
                node.HaveNodeAnnouncement = true
9,323✔
4567
        } else {
9,470✔
4568
                node.HaveNodeAnnouncement = false
147✔
4569
        }
147✔
4570

4571
        // The rest of the data is optional, and will only be there if we got a
4572
        // node announcement for this node.
4573
        if !node.HaveNodeAnnouncement {
9,614✔
4574
                return node, nil
147✔
4575
        }
147✔
4576

4577
        // We did get a node announcement for this node, so we'll have the rest
4578
        // of the data available.
4579
        if err := binary.Read(r, byteOrder, &node.Color.R); err != nil {
9,323✔
4580
                return models.Node{}, err
×
4581
        }
×
4582
        if err := binary.Read(r, byteOrder, &node.Color.G); err != nil {
9,323✔
4583
                return models.Node{}, err
×
4584
        }
×
4585
        if err := binary.Read(r, byteOrder, &node.Color.B); err != nil {
9,323✔
4586
                return models.Node{}, err
×
4587
        }
×
4588

4589
        node.Alias, err = wire.ReadVarString(r, 0)
9,323✔
4590
        if err != nil {
9,323✔
4591
                return models.Node{}, err
×
4592
        }
×
4593

4594
        err = node.Features.Decode(r)
9,323✔
4595
        if err != nil {
9,323✔
4596
                return models.Node{}, err
×
4597
        }
×
4598

4599
        if _, err := r.Read(scratch[:2]); err != nil {
9,323✔
4600
                return models.Node{}, err
×
4601
        }
×
4602
        numAddresses := int(byteOrder.Uint16(scratch[:2]))
9,323✔
4603

9,323✔
4604
        var addresses []net.Addr
9,323✔
4605
        for i := 0; i < numAddresses; i++ {
21,842✔
4606
                address, err := DeserializeAddr(r)
12,519✔
4607
                if err != nil {
12,519✔
4608
                        return models.Node{}, err
×
4609
                }
×
4610
                addresses = append(addresses, address)
12,519✔
4611
        }
4612
        node.Addresses = addresses
9,323✔
4613

9,323✔
4614
        node.AuthSigBytes, err = wire.ReadVarBytes(r, 0, 80, "sig")
9,323✔
4615
        if err != nil {
9,323✔
4616
                return models.Node{}, err
×
4617
        }
×
4618

4619
        // We'll try and see if there are any opaque bytes left, if not, then
4620
        // we'll ignore the EOF error and return the node as is.
4621
        extraBytes, err := wire.ReadVarBytes(
9,323✔
4622
                r, 0, MaxAllowedExtraOpaqueBytes, "blob",
9,323✔
4623
        )
9,323✔
4624
        switch {
9,323✔
4625
        case errors.Is(err, io.ErrUnexpectedEOF):
×
4626
        case errors.Is(err, io.EOF):
×
4627
        case err != nil:
×
4628
                return models.Node{}, err
×
4629
        }
4630

4631
        if len(extraBytes) > 0 {
9,334✔
4632
                node.ExtraOpaqueData = extraBytes
11✔
4633
        }
11✔
4634

4635
        return node, nil
9,323✔
4636
}
4637

4638
func putChanEdgeInfo(edgeIndex kvdb.RwBucket,
4639
        edgeInfo *models.ChannelEdgeInfo, chanID [8]byte) error {
1,589✔
4640

1,589✔
4641
        var b bytes.Buffer
1,589✔
4642

1,589✔
4643
        if _, err := b.Write(edgeInfo.NodeKey1Bytes[:]); err != nil {
1,589✔
4644
                return err
×
4645
        }
×
4646
        if _, err := b.Write(edgeInfo.NodeKey2Bytes[:]); err != nil {
1,589✔
4647
                return err
×
4648
        }
×
4649
        if _, err := b.Write(edgeInfo.BitcoinKey1Bytes[:]); err != nil {
1,589✔
4650
                return err
×
4651
        }
×
4652
        if _, err := b.Write(edgeInfo.BitcoinKey2Bytes[:]); err != nil {
1,589✔
4653
                return err
×
4654
        }
×
4655

4656
        var featureBuf bytes.Buffer
1,589✔
4657
        if err := edgeInfo.Features.Encode(&featureBuf); err != nil {
1,589✔
4658
                return fmt.Errorf("unable to encode features: %w", err)
×
4659
        }
×
4660

4661
        if err := wire.WriteVarBytes(&b, 0, featureBuf.Bytes()); err != nil {
1,589✔
4662
                return err
×
4663
        }
×
4664

4665
        authProof := edgeInfo.AuthProof
1,589✔
4666
        var nodeSig1, nodeSig2, bitcoinSig1, bitcoinSig2 []byte
1,589✔
4667
        if authProof != nil {
3,094✔
4668
                nodeSig1 = authProof.NodeSig1Bytes
1,505✔
4669
                nodeSig2 = authProof.NodeSig2Bytes
1,505✔
4670
                bitcoinSig1 = authProof.BitcoinSig1Bytes
1,505✔
4671
                bitcoinSig2 = authProof.BitcoinSig2Bytes
1,505✔
4672
        }
1,505✔
4673

4674
        if err := wire.WriteVarBytes(&b, 0, nodeSig1); err != nil {
1,589✔
4675
                return err
×
4676
        }
×
4677
        if err := wire.WriteVarBytes(&b, 0, nodeSig2); err != nil {
1,589✔
4678
                return err
×
4679
        }
×
4680
        if err := wire.WriteVarBytes(&b, 0, bitcoinSig1); err != nil {
1,589✔
4681
                return err
×
4682
        }
×
4683
        if err := wire.WriteVarBytes(&b, 0, bitcoinSig2); err != nil {
1,589✔
4684
                return err
×
4685
        }
×
4686

4687
        if err := WriteOutpoint(&b, &edgeInfo.ChannelPoint); err != nil {
1,589✔
4688
                return err
×
4689
        }
×
4690
        err := binary.Write(&b, byteOrder, uint64(edgeInfo.Capacity))
1,589✔
4691
        if err != nil {
1,589✔
4692
                return err
×
4693
        }
×
4694
        if _, err := b.Write(chanID[:]); err != nil {
1,589✔
4695
                return err
×
4696
        }
×
4697
        if _, err := b.Write(edgeInfo.ChainHash[:]); err != nil {
1,589✔
4698
                return err
×
4699
        }
×
4700

4701
        if len(edgeInfo.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
1,589✔
4702
                return ErrTooManyExtraOpaqueBytes(len(edgeInfo.ExtraOpaqueData))
×
4703
        }
×
4704
        err = wire.WriteVarBytes(&b, 0, edgeInfo.ExtraOpaqueData)
1,589✔
4705
        if err != nil {
1,589✔
4706
                return err
×
4707
        }
×
4708

4709
        return edgeIndex.Put(chanID[:], b.Bytes())
1,589✔
4710
}
4711

4712
func fetchChanEdgeInfo(edgeIndex kvdb.RBucket,
4713
        chanID []byte) (models.ChannelEdgeInfo, error) {
7,106✔
4714

7,106✔
4715
        edgeInfoBytes := edgeIndex.Get(chanID)
7,106✔
4716
        if edgeInfoBytes == nil {
7,175✔
4717
                return models.ChannelEdgeInfo{}, ErrEdgeNotFound
69✔
4718
        }
69✔
4719

4720
        edgeInfoReader := bytes.NewReader(edgeInfoBytes)
7,040✔
4721

7,040✔
4722
        return deserializeChanEdgeInfo(edgeInfoReader)
7,040✔
4723
}
4724

4725
func deserializeChanEdgeInfo(r io.Reader) (models.ChannelEdgeInfo, error) {
7,582✔
4726
        var (
7,582✔
4727
                err      error
7,582✔
4728
                edgeInfo models.ChannelEdgeInfo
7,582✔
4729
        )
7,582✔
4730

7,582✔
4731
        if _, err := io.ReadFull(r, edgeInfo.NodeKey1Bytes[:]); err != nil {
7,582✔
4732
                return models.ChannelEdgeInfo{}, err
×
4733
        }
×
4734
        if _, err := io.ReadFull(r, edgeInfo.NodeKey2Bytes[:]); err != nil {
7,582✔
4735
                return models.ChannelEdgeInfo{}, err
×
4736
        }
×
4737
        if _, err := io.ReadFull(r, edgeInfo.BitcoinKey1Bytes[:]); err != nil {
7,582✔
4738
                return models.ChannelEdgeInfo{}, err
×
4739
        }
×
4740
        if _, err := io.ReadFull(r, edgeInfo.BitcoinKey2Bytes[:]); err != nil {
7,582✔
4741
                return models.ChannelEdgeInfo{}, err
×
4742
        }
×
4743

4744
        featureBytes, err := wire.ReadVarBytes(r, 0, 900, "features")
7,582✔
4745
        if err != nil {
7,582✔
4746
                return models.ChannelEdgeInfo{}, err
×
4747
        }
×
4748

4749
        features := lnwire.NewRawFeatureVector()
7,582✔
4750
        err = features.Decode(bytes.NewReader(featureBytes))
7,582✔
4751
        if err != nil {
7,582✔
4752
                return models.ChannelEdgeInfo{}, fmt.Errorf("unable to decode "+
×
4753
                        "features: %w", err)
×
4754
        }
×
4755
        edgeInfo.Features = lnwire.NewFeatureVector(features, lnwire.Features)
7,582✔
4756

7,582✔
4757
        proof := &models.ChannelAuthProof{}
7,582✔
4758

7,582✔
4759
        proof.NodeSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
7,582✔
4760
        if err != nil {
7,582✔
4761
                return models.ChannelEdgeInfo{}, err
×
4762
        }
×
4763
        proof.NodeSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
7,582✔
4764
        if err != nil {
7,582✔
4765
                return models.ChannelEdgeInfo{}, err
×
4766
        }
×
4767
        proof.BitcoinSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
7,582✔
4768
        if err != nil {
7,582✔
4769
                return models.ChannelEdgeInfo{}, err
×
4770
        }
×
4771
        proof.BitcoinSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
7,582✔
4772
        if err != nil {
7,582✔
4773
                return models.ChannelEdgeInfo{}, err
×
4774
        }
×
4775

4776
        if !proof.IsEmpty() {
12,061✔
4777
                edgeInfo.AuthProof = proof
4,479✔
4778
        }
4,479✔
4779

4780
        edgeInfo.ChannelPoint = wire.OutPoint{}
7,582✔
4781
        if err := ReadOutpoint(r, &edgeInfo.ChannelPoint); err != nil {
7,582✔
4782
                return models.ChannelEdgeInfo{}, err
×
4783
        }
×
4784
        if err := binary.Read(r, byteOrder, &edgeInfo.Capacity); err != nil {
7,582✔
4785
                return models.ChannelEdgeInfo{}, err
×
4786
        }
×
4787
        if err := binary.Read(r, byteOrder, &edgeInfo.ChannelID); err != nil {
7,582✔
4788
                return models.ChannelEdgeInfo{}, err
×
4789
        }
×
4790

4791
        if _, err := io.ReadFull(r, edgeInfo.ChainHash[:]); err != nil {
7,582✔
4792
                return models.ChannelEdgeInfo{}, err
×
4793
        }
×
4794

4795
        // We'll try and see if there are any opaque bytes left, if not, then
4796
        // we'll ignore the EOF error and return the edge as is.
4797
        edgeInfo.ExtraOpaqueData, err = wire.ReadVarBytes(
7,582✔
4798
                r, 0, MaxAllowedExtraOpaqueBytes, "blob",
7,582✔
4799
        )
7,582✔
4800
        switch {
7,582✔
4801
        case errors.Is(err, io.ErrUnexpectedEOF):
×
4802
        case errors.Is(err, io.EOF):
×
4803
        case err != nil:
×
4804
                return models.ChannelEdgeInfo{}, err
×
4805
        }
4806

4807
        return edgeInfo, nil
7,582✔
4808
}
4809

4810
func putChanEdgePolicy(edges kvdb.RwBucket, edge *models.ChannelEdgePolicy,
4811
        from, to []byte) error {
2,870✔
4812

2,870✔
4813
        var edgeKey [33 + 8]byte
2,870✔
4814
        copy(edgeKey[:], from)
2,870✔
4815
        byteOrder.PutUint64(edgeKey[33:], edge.ChannelID)
2,870✔
4816

2,870✔
4817
        var b bytes.Buffer
2,870✔
4818
        if err := serializeChanEdgePolicy(&b, edge, to); err != nil {
2,870✔
4819
                return err
×
4820
        }
×
4821

4822
        // Before we write out the new edge, we'll create a new entry in the
4823
        // update index in order to keep it fresh.
4824
        updateUnix := uint64(edge.LastUpdate.Unix())
2,870✔
4825
        var indexKey [8 + 8]byte
2,870✔
4826
        byteOrder.PutUint64(indexKey[:8], updateUnix)
2,870✔
4827
        byteOrder.PutUint64(indexKey[8:], edge.ChannelID)
2,870✔
4828

2,870✔
4829
        updateIndex, err := edges.CreateBucketIfNotExists(edgeUpdateIndexBucket)
2,870✔
4830
        if err != nil {
2,870✔
4831
                return err
×
4832
        }
×
4833

4834
        // If there was already an entry for this edge, then we'll need to
4835
        // delete the old one to ensure we don't leave around any after-images.
4836
        // An unknown policy value does not have a update time recorded, so
4837
        // it also does not need to be removed.
4838
        if edgeBytes := edges.Get(edgeKey[:]); edgeBytes != nil &&
2,870✔
4839
                !bytes.Equal(edgeBytes, unknownPolicy) {
2,900✔
4840

30✔
4841
                // In order to delete the old entry, we'll need to obtain the
30✔
4842
                // *prior* update time in order to delete it. To do this, we'll
30✔
4843
                // need to deserialize the existing policy within the database
30✔
4844
                // (now outdated by the new one), and delete its corresponding
30✔
4845
                // entry within the update index. We'll ignore any
30✔
4846
                // ErrEdgePolicyOptionalFieldNotFound or ErrParsingExtraTLVBytes
30✔
4847
                // errors, as we only need the channel ID and update time to
30✔
4848
                // delete the entry.
30✔
4849
                //
30✔
4850
                // TODO(halseth): get rid of these invalid policies in a
30✔
4851
                // migration.
30✔
4852
                //
30✔
4853
                // NOTE: the above TODO was completed in the SQL migration and
30✔
4854
                // so such edge cases no longer need to be handled there.
30✔
4855
                oldEdgePolicy, err := deserializeChanEdgePolicy(
30✔
4856
                        bytes.NewReader(edgeBytes),
30✔
4857
                )
30✔
4858
                if err != nil &&
30✔
4859
                        !errors.Is(err, ErrEdgePolicyOptionalFieldNotFound) &&
30✔
4860
                        !errors.Is(err, ErrParsingExtraTLVBytes) {
30✔
4861

×
4862
                        return err
×
4863
                }
×
4864

4865
                oldUpdateTime := uint64(oldEdgePolicy.LastUpdate.Unix())
30✔
4866

30✔
4867
                var oldIndexKey [8 + 8]byte
30✔
4868
                byteOrder.PutUint64(oldIndexKey[:8], oldUpdateTime)
30✔
4869
                byteOrder.PutUint64(oldIndexKey[8:], edge.ChannelID)
30✔
4870

30✔
4871
                if err := updateIndex.Delete(oldIndexKey[:]); err != nil {
30✔
4872
                        return err
×
4873
                }
×
4874
        }
4875

4876
        if err := updateIndex.Put(indexKey[:], nil); err != nil {
2,870✔
4877
                return err
×
4878
        }
×
4879

4880
        err = updateEdgePolicyDisabledIndex(
2,870✔
4881
                edges, edge.ChannelID,
2,870✔
4882
                edge.ChannelFlags&lnwire.ChanUpdateDirection > 0,
2,870✔
4883
                edge.IsDisabled(),
2,870✔
4884
        )
2,870✔
4885
        if err != nil {
2,870✔
4886
                return err
×
4887
        }
×
4888

4889
        return edges.Put(edgeKey[:], b.Bytes())
2,870✔
4890
}
4891

4892
// updateEdgePolicyDisabledIndex is used to update the disabledEdgePolicyIndex
4893
// bucket by either add a new disabled ChannelEdgePolicy or remove an existing
4894
// one.
4895
// The direction represents the direction of the edge and disabled is used for
4896
// deciding whether to remove or add an entry to the bucket.
4897
// In general a channel is disabled if two entries for the same chanID exist
4898
// in this bucket.
4899
// Maintaining the bucket this way allows a fast retrieval of disabled
4900
// channels, for example when prune is needed.
4901
func updateEdgePolicyDisabledIndex(edges kvdb.RwBucket, chanID uint64,
4902
        direction bool, disabled bool) error {
3,142✔
4903

3,142✔
4904
        var disabledEdgeKey [8 + 1]byte
3,142✔
4905
        byteOrder.PutUint64(disabledEdgeKey[0:], chanID)
3,142✔
4906
        if direction {
4,711✔
4907
                disabledEdgeKey[8] = 1
1,569✔
4908
        }
1,569✔
4909

4910
        disabledEdgePolicyIndex, err := edges.CreateBucketIfNotExists(
3,142✔
4911
                disabledEdgePolicyBucket,
3,142✔
4912
        )
3,142✔
4913
        if err != nil {
3,142✔
4914
                return err
×
4915
        }
×
4916

4917
        if disabled {
3,171✔
4918
                return disabledEdgePolicyIndex.Put(disabledEdgeKey[:], []byte{})
29✔
4919
        }
29✔
4920

4921
        return disabledEdgePolicyIndex.Delete(disabledEdgeKey[:])
3,116✔
4922
}
4923

4924
// putChanEdgePolicyUnknown marks the edge policy as unknown
4925
// in the edges bucket.
4926
func putChanEdgePolicyUnknown(edges kvdb.RwBucket, channelID uint64,
4927
        from []byte) error {
3,171✔
4928

3,171✔
4929
        var edgeKey [33 + 8]byte
3,171✔
4930
        copy(edgeKey[:], from)
3,171✔
4931
        byteOrder.PutUint64(edgeKey[33:], channelID)
3,171✔
4932

3,171✔
4933
        if edges.Get(edgeKey[:]) != nil {
3,171✔
4934
                return fmt.Errorf("cannot write unknown policy for channel %v "+
×
4935
                        " when there is already a policy present", channelID)
×
4936
        }
×
4937

4938
        return edges.Put(edgeKey[:], unknownPolicy)
3,171✔
4939
}
4940

4941
func fetchChanEdgePolicy(edges kvdb.RBucket, chanID []byte,
4942
        nodePub []byte) (*models.ChannelEdgePolicy, error) {
14,085✔
4943

14,085✔
4944
        var edgeKey [33 + 8]byte
14,085✔
4945
        copy(edgeKey[:], nodePub)
14,085✔
4946
        copy(edgeKey[33:], chanID)
14,085✔
4947

14,085✔
4948
        edgeBytes := edges.Get(edgeKey[:])
14,085✔
4949
        if edgeBytes == nil {
14,085✔
4950
                return nil, ErrEdgeNotFound
×
4951
        }
×
4952

4953
        // No need to deserialize unknown policy.
4954
        if bytes.Equal(edgeBytes, unknownPolicy) {
15,664✔
4955
                return nil, nil
1,579✔
4956
        }
1,579✔
4957

4958
        edgeReader := bytes.NewReader(edgeBytes)
12,509✔
4959

12,509✔
4960
        ep, err := deserializeChanEdgePolicy(edgeReader)
12,509✔
4961
        switch {
12,509✔
4962
        // If the db policy was missing an expected optional field, we return
4963
        // nil as if the policy was unknown.
4964
        case errors.Is(err, ErrEdgePolicyOptionalFieldNotFound):
2✔
4965
                return nil, nil
2✔
4966

4967
        // If the policy contains invalid TLV bytes, we return nil as if
4968
        // the policy was unknown.
4969
        case errors.Is(err, ErrParsingExtraTLVBytes):
×
4970
                return nil, nil
×
4971

4972
        case err != nil:
×
4973
                return nil, err
×
4974
        }
4975

4976
        return ep, nil
12,507✔
4977
}
4978

4979
func fetchChanEdgePolicies(edgeIndex kvdb.RBucket, edges kvdb.RBucket,
4980
        chanID []byte) (*models.ChannelEdgePolicy, *models.ChannelEdgePolicy,
4981
        error) {
3,202✔
4982

3,202✔
4983
        edgeInfo := edgeIndex.Get(chanID)
3,202✔
4984
        if edgeInfo == nil {
3,202✔
4985
                return nil, nil, fmt.Errorf("%w: chanID=%x", ErrEdgeNotFound,
×
4986
                        chanID)
×
4987
        }
×
4988

4989
        // The first node is contained within the first half of the edge
4990
        // information. We only propagate the error here and below if it's
4991
        // something other than edge non-existence.
4992
        node1Pub := edgeInfo[:33]
3,202✔
4993
        edge1, err := fetchChanEdgePolicy(edges, chanID, node1Pub)
3,202✔
4994
        if err != nil {
3,202✔
4995
                return nil, nil, fmt.Errorf("%w: node1Pub=%x", ErrEdgeNotFound,
×
4996
                        node1Pub)
×
4997
        }
×
4998

4999
        // Similarly, the second node is contained within the latter
5000
        // half of the edge information.
5001
        node2Pub := edgeInfo[33:66]
3,202✔
5002
        edge2, err := fetchChanEdgePolicy(edges, chanID, node2Pub)
3,202✔
5003
        if err != nil {
3,202✔
5004
                return nil, nil, fmt.Errorf("%w: node2Pub=%x", ErrEdgeNotFound,
×
5005
                        node2Pub)
×
5006
        }
×
5007

5008
        return edge1, edge2, nil
3,202✔
5009
}
5010

5011
func serializeChanEdgePolicy(w io.Writer, edge *models.ChannelEdgePolicy,
5012
        to []byte) error {
2,872✔
5013

2,872✔
5014
        err := wire.WriteVarBytes(w, 0, edge.SigBytes)
2,872✔
5015
        if err != nil {
2,872✔
5016
                return err
×
5017
        }
×
5018

5019
        if err := binary.Write(w, byteOrder, edge.ChannelID); err != nil {
2,872✔
5020
                return err
×
5021
        }
×
5022

5023
        var scratch [8]byte
2,872✔
5024
        updateUnix := uint64(edge.LastUpdate.Unix())
2,872✔
5025
        byteOrder.PutUint64(scratch[:], updateUnix)
2,872✔
5026
        if _, err := w.Write(scratch[:]); err != nil {
2,872✔
5027
                return err
×
5028
        }
×
5029

5030
        if err := binary.Write(w, byteOrder, edge.MessageFlags); err != nil {
2,872✔
5031
                return err
×
5032
        }
×
5033
        if err := binary.Write(w, byteOrder, edge.ChannelFlags); err != nil {
2,872✔
5034
                return err
×
5035
        }
×
5036
        if err := binary.Write(w, byteOrder, edge.TimeLockDelta); err != nil {
2,872✔
5037
                return err
×
5038
        }
×
5039
        if err := binary.Write(w, byteOrder, uint64(edge.MinHTLC)); err != nil {
2,872✔
5040
                return err
×
5041
        }
×
5042
        err = binary.Write(w, byteOrder, uint64(edge.FeeBaseMSat))
2,872✔
5043
        if err != nil {
2,872✔
5044
                return err
×
5045
        }
×
5046
        err = binary.Write(
2,872✔
5047
                w, byteOrder, uint64(edge.FeeProportionalMillionths),
2,872✔
5048
        )
2,872✔
5049
        if err != nil {
2,872✔
5050
                return err
×
5051
        }
×
5052

5053
        if _, err := w.Write(to); err != nil {
2,872✔
5054
                return err
×
5055
        }
×
5056

5057
        // If the max_htlc field is present, we write it. To be compatible with
5058
        // older versions that wasn't aware of this field, we write it as part
5059
        // of the opaque data.
5060
        // TODO(halseth): clean up when moving to TLV.
5061
        var opaqueBuf bytes.Buffer
2,872✔
5062
        if edge.MessageFlags.HasMaxHtlc() {
5,360✔
5063
                err := binary.Write(&opaqueBuf, byteOrder, uint64(edge.MaxHTLC))
2,488✔
5064
                if err != nil {
2,488✔
5065
                        return err
×
5066
                }
×
5067
        }
5068

5069
        if len(edge.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
2,872✔
5070
                return ErrTooManyExtraOpaqueBytes(len(edge.ExtraOpaqueData))
×
5071
        }
×
5072
        if _, err := opaqueBuf.Write(edge.ExtraOpaqueData); err != nil {
2,872✔
5073
                return err
×
5074
        }
×
5075

5076
        if err := wire.WriteVarBytes(w, 0, opaqueBuf.Bytes()); err != nil {
2,872✔
5077
                return err
×
5078
        }
×
5079

5080
        return nil
2,872✔
5081
}
5082

5083
func deserializeChanEdgePolicy(r io.Reader) (*models.ChannelEdgePolicy, error) {
12,537✔
5084
        // Deserialize the policy. Note that in case an optional field is not
12,537✔
5085
        // found or if the edge has invalid TLV data, then both an error and a
12,537✔
5086
        // populated policy object are returned so that the caller can decide
12,537✔
5087
        // if it still wants to use the edge or not.
12,537✔
5088
        edge, err := deserializeChanEdgePolicyRaw(r)
12,537✔
5089
        if err != nil &&
12,537✔
5090
                !errors.Is(err, ErrEdgePolicyOptionalFieldNotFound) &&
12,537✔
5091
                !errors.Is(err, ErrParsingExtraTLVBytes) {
12,537✔
5092

×
5093
                return nil, err
×
5094
        }
×
5095

5096
        return edge, err
12,537✔
5097
}
5098

5099
func deserializeChanEdgePolicyRaw(r io.Reader) (*models.ChannelEdgePolicy,
5100
        error) {
13,550✔
5101

13,550✔
5102
        edge := &models.ChannelEdgePolicy{}
13,550✔
5103

13,550✔
5104
        var err error
13,550✔
5105
        edge.SigBytes, err = wire.ReadVarBytes(r, 0, 80, "sig")
13,550✔
5106
        if err != nil {
13,550✔
5107
                return nil, err
×
5108
        }
×
5109

5110
        if err := binary.Read(r, byteOrder, &edge.ChannelID); err != nil {
13,550✔
5111
                return nil, err
×
5112
        }
×
5113

5114
        var scratch [8]byte
13,550✔
5115
        if _, err := r.Read(scratch[:]); err != nil {
13,550✔
5116
                return nil, err
×
5117
        }
×
5118
        unix := int64(byteOrder.Uint64(scratch[:]))
13,550✔
5119
        edge.LastUpdate = time.Unix(unix, 0)
13,550✔
5120

13,550✔
5121
        if err := binary.Read(r, byteOrder, &edge.MessageFlags); err != nil {
13,550✔
5122
                return nil, err
×
5123
        }
×
5124
        if err := binary.Read(r, byteOrder, &edge.ChannelFlags); err != nil {
13,550✔
5125
                return nil, err
×
5126
        }
×
5127
        if err := binary.Read(r, byteOrder, &edge.TimeLockDelta); err != nil {
13,550✔
5128
                return nil, err
×
5129
        }
×
5130

5131
        var n uint64
13,550✔
5132
        if err := binary.Read(r, byteOrder, &n); err != nil {
13,550✔
5133
                return nil, err
×
5134
        }
×
5135
        edge.MinHTLC = lnwire.MilliSatoshi(n)
13,550✔
5136

13,550✔
5137
        if err := binary.Read(r, byteOrder, &n); err != nil {
13,550✔
5138
                return nil, err
×
5139
        }
×
5140
        edge.FeeBaseMSat = lnwire.MilliSatoshi(n)
13,550✔
5141

13,550✔
5142
        if err := binary.Read(r, byteOrder, &n); err != nil {
13,550✔
5143
                return nil, err
×
5144
        }
×
5145
        edge.FeeProportionalMillionths = lnwire.MilliSatoshi(n)
13,550✔
5146

13,550✔
5147
        if _, err := r.Read(edge.ToNode[:]); err != nil {
13,550✔
5148
                return nil, err
×
5149
        }
×
5150

5151
        // We'll try and see if there are any opaque bytes left, if not, then
5152
        // we'll ignore the EOF error and return the edge as is.
5153
        edge.ExtraOpaqueData, err = wire.ReadVarBytes(
13,550✔
5154
                r, 0, MaxAllowedExtraOpaqueBytes, "blob",
13,550✔
5155
        )
13,550✔
5156
        switch {
13,550✔
5157
        case errors.Is(err, io.ErrUnexpectedEOF):
×
5158
        case errors.Is(err, io.EOF):
4✔
5159
        case err != nil:
×
5160
                return nil, err
×
5161
        }
5162

5163
        // See if optional fields are present.
5164
        if edge.MessageFlags.HasMaxHtlc() {
26,143✔
5165
                // The max_htlc field should be at the beginning of the opaque
12,593✔
5166
                // bytes.
12,593✔
5167
                opq := edge.ExtraOpaqueData
12,593✔
5168

12,593✔
5169
                // If the max_htlc field is not present, it might be old data
12,593✔
5170
                // stored before this field was validated. We'll return the
12,593✔
5171
                // edge along with an error.
12,593✔
5172
                if len(opq) < 8 {
12,597✔
5173
                        return edge, ErrEdgePolicyOptionalFieldNotFound
4✔
5174
                }
4✔
5175

5176
                maxHtlc := byteOrder.Uint64(opq[:8])
12,589✔
5177
                edge.MaxHTLC = lnwire.MilliSatoshi(maxHtlc)
12,589✔
5178

12,589✔
5179
                // Exclude the parsed field from the rest of the opaque data.
12,589✔
5180
                edge.ExtraOpaqueData = opq[8:]
12,589✔
5181
        }
5182

5183
        // Attempt to extract the inbound fee from the opaque data. If we fail
5184
        // to parse the TLV here, we return an error we also return the edge
5185
        // so that the caller can still use it. This is for backwards
5186
        // compatibility in case we have already persisted some policies that
5187
        // have invalid TLV data.
5188
        var inboundFee lnwire.Fee
13,546✔
5189
        typeMap, err := edge.ExtraOpaqueData.ExtractRecords(&inboundFee)
13,546✔
5190
        if err != nil {
13,546✔
5191
                return edge, fmt.Errorf("%w: %w", ErrParsingExtraTLVBytes, err)
×
5192
        }
×
5193

5194
        val, ok := typeMap[lnwire.FeeRecordType]
13,546✔
5195
        if ok && val == nil {
15,238✔
5196
                edge.InboundFee = fn.Some(inboundFee)
1,692✔
5197
        }
1,692✔
5198

5199
        return edge, nil
13,546✔
5200
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc