• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 15760555435

19 Jun 2025 02:43PM UTC coverage: 58.115% (-10.0%) from 68.141%
15760555435

Pull #9945

github

web-flow
Merge e0d95ca83 into 40efefeb6
Pull Request #9945: Decayed log optional migration

114 of 163 new or added lines in 10 files covered. (69.94%)

28704 existing lines in 453 files now uncovered.

97921 of 168495 relevant lines covered (58.12%)

1.8 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

68.32
/graph/db/kv_store.go
1
package graphdb
2

3
import (
4
        "bytes"
5
        "context"
6
        "crypto/sha256"
7
        "encoding/binary"
8
        "errors"
9
        "fmt"
10
        "io"
11
        "math"
12
        "net"
13
        "sort"
14
        "sync"
15
        "testing"
16
        "time"
17

18
        "github.com/btcsuite/btcd/btcec/v2"
19
        "github.com/btcsuite/btcd/chaincfg/chainhash"
20
        "github.com/btcsuite/btcd/txscript"
21
        "github.com/btcsuite/btcd/wire"
22
        "github.com/btcsuite/btcwallet/walletdb"
23
        "github.com/lightningnetwork/lnd/aliasmgr"
24
        "github.com/lightningnetwork/lnd/batch"
25
        "github.com/lightningnetwork/lnd/fn/v2"
26
        "github.com/lightningnetwork/lnd/graph/db/models"
27
        "github.com/lightningnetwork/lnd/input"
28
        "github.com/lightningnetwork/lnd/kvdb"
29
        "github.com/lightningnetwork/lnd/lnwire"
30
        "github.com/lightningnetwork/lnd/routing/route"
31
        "github.com/stretchr/testify/require"
32
)
33

34
var (
35
        // nodeBucket is a bucket which houses all the vertices or nodes within
36
        // the channel graph. This bucket has a single-sub bucket which adds an
37
        // additional index from pubkey -> alias. Within the top-level of this
38
        // bucket, the key space maps a node's compressed public key to the
39
        // serialized information for that node. Additionally, there's a
40
        // special key "source" which stores the pubkey of the source node. The
41
        // source node is used as the starting point for all graph/queries and
42
        // traversals. The graph is formed as a star-graph with the source node
43
        // at the center.
44
        //
45
        // maps: pubKey -> nodeInfo
46
        // maps: source -> selfPubKey
47
        nodeBucket = []byte("graph-node")
48

49
        // nodeUpdateIndexBucket is a sub-bucket of the nodeBucket. This bucket
50
        // will be used to quickly look up the "freshness" of a node's last
51
        // update to the network. The bucket only contains keys, and no values,
52
        // it's mapping:
53
        //
54
        // maps: updateTime || nodeID -> nil
55
        nodeUpdateIndexBucket = []byte("graph-node-update-index")
56

57
        // sourceKey is a special key that resides within the nodeBucket. The
58
        // sourceKey maps a key to the public key of the "self node".
59
        sourceKey = []byte("source")
60

61
        // aliasIndexBucket is a sub-bucket that's nested within the main
62
        // nodeBucket. This bucket maps the public key of a node to its
63
        // current alias. This bucket is provided as it can be used within a
64
        // future UI layer to add an additional degree of confirmation.
65
        aliasIndexBucket = []byte("alias")
66

67
        // edgeBucket is a bucket which houses all of the edge or channel
68
        // information within the channel graph. This bucket essentially acts
69
        // as an adjacency list, which in conjunction with a range scan, can be
70
        // used to iterate over all the incoming and outgoing edges for a
71
        // particular node. Key in the bucket use a prefix scheme which leads
72
        // with the node's public key and sends with the compact edge ID.
73
        // For each chanID, there will be two entries within the bucket, as the
74
        // graph is directed: nodes may have different policies w.r.t to fees
75
        // for their respective directions.
76
        //
77
        // maps: pubKey || chanID -> channel edge policy for node
78
        edgeBucket = []byte("graph-edge")
79

80
        // unknownPolicy is represented as an empty slice. It is
81
        // used as the value in edgeBucket for unknown channel edge policies.
82
        // Unknown policies are still stored in the database to enable efficient
83
        // lookup of incoming channel edges.
84
        unknownPolicy = []byte{}
85

86
        // chanStart is an array of all zero bytes which is used to perform
87
        // range scans within the edgeBucket to obtain all of the outgoing
88
        // edges for a particular node.
89
        chanStart [8]byte
90

91
        // edgeIndexBucket is an index which can be used to iterate all edges
92
        // in the bucket, grouping them according to their in/out nodes.
93
        // Additionally, the items in this bucket also contain the complete
94
        // edge information for a channel. The edge information includes the
95
        // capacity of the channel, the nodes that made the channel, etc. This
96
        // bucket resides within the edgeBucket above. Creation of an edge
97
        // proceeds in two phases: first the edge is added to the edge index,
98
        // afterwards the edgeBucket can be updated with the latest details of
99
        // the edge as they are announced on the network.
100
        //
101
        // maps: chanID -> pubKey1 || pubKey2 || restofEdgeInfo
102
        edgeIndexBucket = []byte("edge-index")
103

104
        // edgeUpdateIndexBucket is a sub-bucket of the main edgeBucket. This
105
        // bucket contains an index which allows us to gauge the "freshness" of
106
        // a channel's last updates.
107
        //
108
        // maps: updateTime || chanID -> nil
109
        edgeUpdateIndexBucket = []byte("edge-update-index")
110

111
        // channelPointBucket maps a channel's full outpoint (txid:index) to
112
        // its short 8-byte channel ID. This bucket resides within the
113
        // edgeBucket above, and can be used to quickly remove an edge due to
114
        // the outpoint being spent, or to query for existence of a channel.
115
        //
116
        // maps: outPoint -> chanID
117
        channelPointBucket = []byte("chan-index")
118

119
        // zombieBucket is a sub-bucket of the main edgeBucket bucket
120
        // responsible for maintaining an index of zombie channels. Each entry
121
        // exists within the bucket as follows:
122
        //
123
        // maps: chanID -> pubKey1 || pubKey2
124
        //
125
        // The chanID represents the channel ID of the edge that is marked as a
126
        // zombie and is used as the key, which maps to the public keys of the
127
        // edge's participants.
128
        zombieBucket = []byte("zombie-index")
129

130
        // disabledEdgePolicyBucket is a sub-bucket of the main edgeBucket
131
        // bucket responsible for maintaining an index of disabled edge
132
        // policies. Each entry exists within the bucket as follows:
133
        //
134
        // maps: <chanID><direction> -> []byte{}
135
        //
136
        // The chanID represents the channel ID of the edge and the direction is
137
        // one byte representing the direction of the edge. The main purpose of
138
        // this index is to allow pruning disabled channels in a fast way
139
        // without the need to iterate all over the graph.
140
        disabledEdgePolicyBucket = []byte("disabled-edge-policy-index")
141

142
        // graphMetaBucket is a top-level bucket which stores various meta-deta
143
        // related to the on-disk channel graph. Data stored in this bucket
144
        // includes the block to which the graph has been synced to, the total
145
        // number of channels, etc.
146
        graphMetaBucket = []byte("graph-meta")
147

148
        // pruneLogBucket is a bucket within the graphMetaBucket that stores
149
        // a mapping from the block height to the hash for the blocks used to
150
        // prune the graph.
151
        // Once a new block is discovered, any channels that have been closed
152
        // (by spending the outpoint) can safely be removed from the graph, and
153
        // the block is added to the prune log. We need to keep such a log for
154
        // the case where a reorg happens, and we must "rewind" the state of the
155
        // graph by removing channels that were previously confirmed. In such a
156
        // case we'll remove all entries from the prune log with a block height
157
        // that no longer exists.
158
        pruneLogBucket = []byte("prune-log")
159

160
        // closedScidBucket is a top-level bucket that stores scids for
161
        // channels that we know to be closed. This is used so that we don't
162
        // need to perform expensive validation checks if we receive a channel
163
        // announcement for the channel again.
164
        //
165
        // maps: scid -> []byte{}
166
        closedScidBucket = []byte("closed-scid")
167
)
168

169
const (
170
        // MaxAllowedExtraOpaqueBytes is the largest amount of opaque bytes that
171
        // we'll permit to be written to disk. We limit this as otherwise, it
172
        // would be possible for a node to create a ton of updates and slowly
173
        // fill our disk, and also waste bandwidth due to relaying.
174
        MaxAllowedExtraOpaqueBytes = 10000
175
)
176

177
// KVStore is a persistent, on-disk graph representation of the Lightning
178
// Network. This struct can be used to implement path finding algorithms on top
179
// of, and also to update a node's view based on information received from the
180
// p2p network. Internally, the graph is stored using a modified adjacency list
181
// representation with some added object interaction possible with each
182
// serialized edge/node. The graph is stored is directed, meaning that are two
183
// edges stored for each channel: an inbound/outbound edge for each node pair.
184
// Nodes, edges, and edge information can all be added to the graph
185
// independently. Edge removal results in the deletion of all edge information
186
// for that edge.
187
type KVStore struct {
188
        db kvdb.Backend
189

190
        // cacheMu guards all caches (rejectCache and chanCache). If
191
        // this mutex will be acquired at the same time as the DB mutex then
192
        // the cacheMu MUST be acquired first to prevent deadlock.
193
        cacheMu     sync.RWMutex
194
        rejectCache *rejectCache
195
        chanCache   *channelCache
196

197
        chanScheduler batch.Scheduler[kvdb.RwTx]
198
        nodeScheduler batch.Scheduler[kvdb.RwTx]
199
}
200

201
// A compile-time assertion to ensure that the KVStore struct implements the
202
// V1Store interface.
203
var _ V1Store = (*KVStore)(nil)
204

205
// NewKVStore allocates a new KVStore backed by a DB instance. The
206
// returned instance has its own unique reject cache and channel cache.
207
func NewKVStore(db kvdb.Backend, options ...StoreOptionModifier) (*KVStore,
208
        error) {
3✔
209

3✔
210
        opts := DefaultOptions()
3✔
211
        for _, o := range options {
6✔
212
                o(opts)
3✔
213
        }
3✔
214

215
        if !opts.NoMigration {
6✔
216
                if err := initKVStore(db); err != nil {
3✔
217
                        return nil, err
×
218
                }
×
219
        }
220

221
        g := &KVStore{
3✔
222
                db:          db,
3✔
223
                rejectCache: newRejectCache(opts.RejectCacheSize),
3✔
224
                chanCache:   newChannelCache(opts.ChannelCacheSize),
3✔
225
        }
3✔
226
        g.chanScheduler = batch.NewTimeScheduler(
3✔
227
                batch.NewBoltBackend[kvdb.RwTx](db), &g.cacheMu,
3✔
228
                opts.BatchCommitInterval,
3✔
229
        )
3✔
230
        g.nodeScheduler = batch.NewTimeScheduler(
3✔
231
                batch.NewBoltBackend[kvdb.RwTx](db), nil,
3✔
232
                opts.BatchCommitInterval,
3✔
233
        )
3✔
234

3✔
235
        return g, nil
3✔
236
}
237

238
// channelMapKey is the key structure used for storing channel edge policies.
239
type channelMapKey struct {
240
        nodeKey route.Vertex
241
        chanID  [8]byte
242
}
243

244
// String returns a human-readable representation of the key.
245
func (c channelMapKey) String() string {
×
246
        return fmt.Sprintf("node=%v, chanID=%x", c.nodeKey, c.chanID)
×
247
}
×
248

249
// getChannelMap loads all channel edge policies from the database and stores
250
// them in a map.
251
func (c *KVStore) getChannelMap(edges kvdb.RBucket) (
252
        map[channelMapKey]*models.ChannelEdgePolicy, error) {
3✔
253

3✔
254
        // Create a map to store all channel edge policies.
3✔
255
        channelMap := make(map[channelMapKey]*models.ChannelEdgePolicy)
3✔
256

3✔
257
        err := kvdb.ForAll(edges, func(k, edgeBytes []byte) error {
6✔
258
                // Skip embedded buckets.
3✔
259
                if bytes.Equal(k, edgeIndexBucket) ||
3✔
260
                        bytes.Equal(k, edgeUpdateIndexBucket) ||
3✔
261
                        bytes.Equal(k, zombieBucket) ||
3✔
262
                        bytes.Equal(k, disabledEdgePolicyBucket) ||
3✔
263
                        bytes.Equal(k, channelPointBucket) {
6✔
264

3✔
265
                        return nil
3✔
266
                }
3✔
267

268
                // Validate key length.
269
                if len(k) != 33+8 {
3✔
270
                        return fmt.Errorf("invalid edge key %x encountered", k)
×
271
                }
×
272

273
                var key channelMapKey
3✔
274
                copy(key.nodeKey[:], k[:33])
3✔
275
                copy(key.chanID[:], k[33:])
3✔
276

3✔
277
                // No need to deserialize unknown policy.
3✔
278
                if bytes.Equal(edgeBytes, unknownPolicy) {
3✔
279
                        return nil
×
280
                }
×
281

282
                edgeReader := bytes.NewReader(edgeBytes)
3✔
283
                edge, err := deserializeChanEdgePolicyRaw(
3✔
284
                        edgeReader,
3✔
285
                )
3✔
286

3✔
287
                switch {
3✔
288
                // If the db policy was missing an expected optional field, we
289
                // return nil as if the policy was unknown.
290
                case errors.Is(err, ErrEdgePolicyOptionalFieldNotFound):
×
291
                        return nil
×
292

293
                // We don't want a single policy with bad TLV data to stop us
294
                // from loading the rest of the data, so we just skip this
295
                // policy. This is for backwards compatibility since we did not
296
                // use to validate TLV data in the past before persisting it.
297
                case errors.Is(err, ErrParsingExtraTLVBytes):
×
298
                        return nil
×
299

300
                case err != nil:
×
301
                        return err
×
302
                }
303

304
                channelMap[key] = edge
3✔
305

3✔
306
                return nil
3✔
307
        })
308
        if err != nil {
3✔
309
                return nil, err
×
310
        }
×
311

312
        return channelMap, nil
3✔
313
}
314

315
var graphTopLevelBuckets = [][]byte{
316
        nodeBucket,
317
        edgeBucket,
318
        graphMetaBucket,
319
        closedScidBucket,
320
}
321

322
// createChannelDB creates and initializes a fresh version of  In
323
// the case that the target path has not yet been created or doesn't yet exist,
324
// then the path is created. Additionally, all required top-level buckets used
325
// within the database are created.
326
func initKVStore(db kvdb.Backend) error {
3✔
327
        err := kvdb.Update(db, func(tx kvdb.RwTx) error {
6✔
328
                for _, tlb := range graphTopLevelBuckets {
6✔
329
                        if _, err := tx.CreateTopLevelBucket(tlb); err != nil {
3✔
330
                                return err
×
331
                        }
×
332
                }
333

334
                nodes := tx.ReadWriteBucket(nodeBucket)
3✔
335
                _, err := nodes.CreateBucketIfNotExists(aliasIndexBucket)
3✔
336
                if err != nil {
3✔
337
                        return err
×
338
                }
×
339
                _, err = nodes.CreateBucketIfNotExists(nodeUpdateIndexBucket)
3✔
340
                if err != nil {
3✔
341
                        return err
×
342
                }
×
343

344
                edges := tx.ReadWriteBucket(edgeBucket)
3✔
345
                _, err = edges.CreateBucketIfNotExists(edgeIndexBucket)
3✔
346
                if err != nil {
3✔
347
                        return err
×
348
                }
×
349
                _, err = edges.CreateBucketIfNotExists(edgeUpdateIndexBucket)
3✔
350
                if err != nil {
3✔
351
                        return err
×
352
                }
×
353
                _, err = edges.CreateBucketIfNotExists(channelPointBucket)
3✔
354
                if err != nil {
3✔
355
                        return err
×
356
                }
×
357
                _, err = edges.CreateBucketIfNotExists(zombieBucket)
3✔
358
                if err != nil {
3✔
359
                        return err
×
360
                }
×
361

362
                graphMeta := tx.ReadWriteBucket(graphMetaBucket)
3✔
363
                _, err = graphMeta.CreateBucketIfNotExists(pruneLogBucket)
3✔
364

3✔
365
                return err
3✔
366
        }, func() {})
3✔
367
        if err != nil {
3✔
368
                return fmt.Errorf("unable to create new channel graph: %w", err)
×
369
        }
×
370

371
        return nil
3✔
372
}
373

374
// AddrsForNode returns all known addresses for the target node public key that
375
// the graph DB is aware of. The returned boolean indicates if the given node is
376
// unknown to the graph DB or not.
377
//
378
// NOTE: this is part of the channeldb.AddrSource interface.
379
func (c *KVStore) AddrsForNode(ctx context.Context,
380
        nodePub *btcec.PublicKey) (bool, []net.Addr, error) {
3✔
381

3✔
382
        pubKey, err := route.NewVertexFromBytes(nodePub.SerializeCompressed())
3✔
383
        if err != nil {
3✔
384
                return false, nil, err
×
385
        }
×
386

387
        node, err := c.FetchLightningNode(ctx, pubKey)
3✔
388
        // We don't consider it an error if the graph is unaware of the node.
3✔
389
        switch {
3✔
390
        case err != nil && !errors.Is(err, ErrGraphNodeNotFound):
×
391
                return false, nil, err
×
392

393
        case errors.Is(err, ErrGraphNodeNotFound):
3✔
394
                return false, nil, nil
3✔
395
        }
396

397
        return true, node.Addresses, nil
3✔
398
}
399

400
// ForEachChannel iterates through all the channel edges stored within the
401
// graph and invokes the passed callback for each edge. The callback takes two
402
// edges as since this is a directed graph, both the in/out edges are visited.
403
// If the callback returns an error, then the transaction is aborted and the
404
// iteration stops early.
405
//
406
// NOTE: If an edge can't be found, or wasn't advertised, then a nil pointer
407
// for that particular channel edge routing policy will be passed into the
408
// callback.
409
func (c *KVStore) ForEachChannel(cb func(*models.ChannelEdgeInfo,
410
        *models.ChannelEdgePolicy, *models.ChannelEdgePolicy) error) error {
3✔
411

3✔
412
        return c.db.View(func(tx kvdb.RTx) error {
6✔
413
                edges := tx.ReadBucket(edgeBucket)
3✔
414
                if edges == nil {
3✔
415
                        return ErrGraphNoEdgesFound
×
416
                }
×
417

418
                // First, load all edges in memory indexed by node and channel
419
                // id.
420
                channelMap, err := c.getChannelMap(edges)
3✔
421
                if err != nil {
3✔
422
                        return err
×
423
                }
×
424

425
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
3✔
426
                if edgeIndex == nil {
3✔
427
                        return ErrGraphNoEdgesFound
×
428
                }
×
429

430
                // Load edge index, recombine each channel with the policies
431
                // loaded above and invoke the callback.
432
                return kvdb.ForAll(
3✔
433
                        edgeIndex, func(k, edgeInfoBytes []byte) error {
6✔
434
                                var chanID [8]byte
3✔
435
                                copy(chanID[:], k)
3✔
436

3✔
437
                                edgeInfoReader := bytes.NewReader(edgeInfoBytes)
3✔
438
                                info, err := deserializeChanEdgeInfo(
3✔
439
                                        edgeInfoReader,
3✔
440
                                )
3✔
441
                                if err != nil {
3✔
442
                                        return err
×
443
                                }
×
444

445
                                policy1 := channelMap[channelMapKey{
3✔
446
                                        nodeKey: info.NodeKey1Bytes,
3✔
447
                                        chanID:  chanID,
3✔
448
                                }]
3✔
449

3✔
450
                                policy2 := channelMap[channelMapKey{
3✔
451
                                        nodeKey: info.NodeKey2Bytes,
3✔
452
                                        chanID:  chanID,
3✔
453
                                }]
3✔
454

3✔
455
                                return cb(&info, policy1, policy2)
3✔
456
                        },
457
                )
458
        }, func() {})
3✔
459
}
460

461
// ForEachChannelCacheable iterates through all the channel edges stored within
462
// the graph and invokes the passed callback for each edge. The callback takes
463
// two edges as since this is a directed graph, both the in/out edges are
464
// visited. If the callback returns an error, then the transaction is aborted
465
// and the iteration stops early.
466
//
467
// NOTE: If an edge can't be found, or wasn't advertised, then a nil pointer
468
// for that particular channel edge routing policy will be passed into the
469
// callback.
470
//
471
// NOTE: this method is like ForEachChannel but fetches only the data required
472
// for the graph cache.
473
func (c *KVStore) ForEachChannelCacheable(cb func(*models.CachedEdgeInfo,
474
        *models.CachedEdgePolicy, *models.CachedEdgePolicy) error) error {
3✔
475

3✔
476
        return c.db.View(func(tx kvdb.RTx) error {
6✔
477
                edges := tx.ReadBucket(edgeBucket)
3✔
478
                if edges == nil {
3✔
479
                        return ErrGraphNoEdgesFound
×
480
                }
×
481

482
                // First, load all edges in memory indexed by node and channel
483
                // id.
484
                channelMap, err := c.getChannelMap(edges)
3✔
485
                if err != nil {
3✔
486
                        return err
×
487
                }
×
488

489
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
3✔
490
                if edgeIndex == nil {
3✔
491
                        return ErrGraphNoEdgesFound
×
492
                }
×
493

494
                // Load edge index, recombine each channel with the policies
495
                // loaded above and invoke the callback.
496
                return kvdb.ForAll(
3✔
497
                        edgeIndex, func(k, edgeInfoBytes []byte) error {
6✔
498
                                var chanID [8]byte
3✔
499
                                copy(chanID[:], k)
3✔
500

3✔
501
                                edgeInfoReader := bytes.NewReader(edgeInfoBytes)
3✔
502
                                info, err := deserializeChanEdgeInfo(
3✔
503
                                        edgeInfoReader,
3✔
504
                                )
3✔
505
                                if err != nil {
3✔
506
                                        return err
×
507
                                }
×
508

509
                                key1 := channelMapKey{
3✔
510
                                        nodeKey: info.NodeKey1Bytes,
3✔
511
                                        chanID:  chanID,
3✔
512
                                }
3✔
513
                                policy1 := channelMap[key1]
3✔
514

3✔
515
                                key2 := channelMapKey{
3✔
516
                                        nodeKey: info.NodeKey2Bytes,
3✔
517
                                        chanID:  chanID,
3✔
518
                                }
3✔
519
                                policy2 := channelMap[key2]
3✔
520

3✔
521
                                // We now create the cached edge policies, but
3✔
522
                                // only when the above policies are found in the
3✔
523
                                // `channelMap`.
3✔
524
                                var (
3✔
525
                                        cachedPolicy1 *models.CachedEdgePolicy
3✔
526
                                        cachedPolicy2 *models.CachedEdgePolicy
3✔
527
                                )
3✔
528

3✔
529
                                if policy1 != nil {
6✔
530
                                        cachedPolicy1 = models.NewCachedPolicy(
3✔
531
                                                policy1,
3✔
532
                                        )
3✔
533
                                } else {
3✔
534
                                        log.Warnf("ChannelEdgePolicy not "+
×
535
                                                "found using %v", key1)
×
536
                                }
×
537

538
                                if policy2 != nil {
6✔
539
                                        cachedPolicy2 = models.NewCachedPolicy(
3✔
540
                                                policy2,
3✔
541
                                        )
3✔
542
                                } else {
3✔
543
                                        log.Warnf("ChannelEdgePolicy not "+
×
544
                                                "found using %v", key2)
×
545
                                }
×
546

547
                                return cb(
3✔
548
                                        models.NewCachedEdge(&info),
3✔
549
                                        cachedPolicy1, cachedPolicy2,
3✔
550
                                )
3✔
551
                        },
552
                )
553
        }, func() {})
3✔
554
}
555

556
// forEachNodeDirectedChannel iterates through all channels of a given node,
557
// executing the passed callback on the directed edge representing the channel
558
// and its incoming policy. If the callback returns an error, then the iteration
559
// is halted with the error propagated back up to the caller. An optional read
560
// transaction may be provided. If none is provided, a new one will be created.
561
//
562
// Unknown policies are passed into the callback as nil values.
563
func (c *KVStore) forEachNodeDirectedChannel(tx kvdb.RTx,
564
        node route.Vertex, cb func(channel *DirectedChannel) error) error {
3✔
565

3✔
566
        // Fallback that uses the database.
3✔
567
        toNodeCallback := func() route.Vertex {
6✔
568
                return node
3✔
569
        }
3✔
570
        toNodeFeatures, err := c.fetchNodeFeatures(tx, node)
3✔
571
        if err != nil {
3✔
572
                return err
×
573
        }
×
574

575
        dbCallback := func(tx kvdb.RTx, e *models.ChannelEdgeInfo, p1,
3✔
576
                p2 *models.ChannelEdgePolicy) error {
6✔
577

3✔
578
                var cachedInPolicy *models.CachedEdgePolicy
3✔
579
                if p2 != nil {
6✔
580
                        cachedInPolicy = models.NewCachedPolicy(p2)
3✔
581
                        cachedInPolicy.ToNodePubKey = toNodeCallback
3✔
582
                        cachedInPolicy.ToNodeFeatures = toNodeFeatures
3✔
583
                }
3✔
584

585
                directedChannel := &DirectedChannel{
3✔
586
                        ChannelID:    e.ChannelID,
3✔
587
                        IsNode1:      node == e.NodeKey1Bytes,
3✔
588
                        OtherNode:    e.NodeKey2Bytes,
3✔
589
                        Capacity:     e.Capacity,
3✔
590
                        OutPolicySet: p1 != nil,
3✔
591
                        InPolicy:     cachedInPolicy,
3✔
592
                }
3✔
593

3✔
594
                if p1 != nil {
6✔
595
                        p1.InboundFee.WhenSome(func(fee lnwire.Fee) {
3✔
UNCOV
596
                                directedChannel.InboundFee = fee
×
UNCOV
597
                        })
×
598
                }
599

600
                if node == e.NodeKey2Bytes {
6✔
601
                        directedChannel.OtherNode = e.NodeKey1Bytes
3✔
602
                }
3✔
603

604
                return cb(directedChannel)
3✔
605
        }
606

607
        return nodeTraversal(tx, node[:], c.db, dbCallback)
3✔
608
}
609

610
// fetchNodeFeatures returns the features of a given node. If no features are
611
// known for the node, an empty feature vector is returned. An optional read
612
// transaction may be provided. If none is provided, a new one will be created.
613
func (c *KVStore) fetchNodeFeatures(tx kvdb.RTx,
614
        node route.Vertex) (*lnwire.FeatureVector, error) {
3✔
615

3✔
616
        // Fallback that uses the database.
3✔
617
        targetNode, err := c.FetchLightningNodeTx(tx, node)
3✔
618
        switch {
3✔
619
        // If the node exists and has features, return them directly.
620
        case err == nil:
3✔
621
                return targetNode.Features, nil
3✔
622

623
        // If we couldn't find a node announcement, populate a blank feature
624
        // vector.
UNCOV
625
        case errors.Is(err, ErrGraphNodeNotFound):
×
UNCOV
626
                return lnwire.EmptyFeatureVector(), nil
×
627

628
        // Otherwise, bubble the error up.
629
        default:
×
630
                return nil, err
×
631
        }
632
}
633

634
// ForEachNodeDirectedChannel iterates through all channels of a given node,
635
// executing the passed callback on the directed edge representing the channel
636
// and its incoming policy. If the callback returns an error, then the iteration
637
// is halted with the error propagated back up to the caller.
638
//
639
// Unknown policies are passed into the callback as nil values.
640
//
641
// NOTE: this is part of the graphdb.NodeTraverser interface.
642
func (c *KVStore) ForEachNodeDirectedChannel(nodePub route.Vertex,
643
        cb func(channel *DirectedChannel) error) error {
3✔
644

3✔
645
        return c.forEachNodeDirectedChannel(nil, nodePub, cb)
3✔
646
}
3✔
647

648
// FetchNodeFeatures returns the features of the given node. If no features are
649
// known for the node, an empty feature vector is returned.
650
//
651
// NOTE: this is part of the graphdb.NodeTraverser interface.
652
func (c *KVStore) FetchNodeFeatures(nodePub route.Vertex) (
653
        *lnwire.FeatureVector, error) {
3✔
654

3✔
655
        return c.fetchNodeFeatures(nil, nodePub)
3✔
656
}
3✔
657

658
// ForEachNodeCached is similar to forEachNode, but it returns DirectedChannel
659
// data to the call-back.
660
//
661
// NOTE: The callback contents MUST not be modified.
662
func (c *KVStore) ForEachNodeCached(cb func(node route.Vertex,
UNCOV
663
        chans map[uint64]*DirectedChannel) error) error {
×
UNCOV
664

×
UNCOV
665
        // Otherwise call back to a version that uses the database directly.
×
UNCOV
666
        // We'll iterate over each node, then the set of channels for each
×
UNCOV
667
        // node, and construct a similar callback functiopn signature as the
×
UNCOV
668
        // main funcotin expects.
×
UNCOV
669
        return c.forEachNode(func(tx kvdb.RTx,
×
UNCOV
670
                node *models.LightningNode) error {
×
UNCOV
671

×
UNCOV
672
                channels := make(map[uint64]*DirectedChannel)
×
UNCOV
673

×
UNCOV
674
                err := c.forEachNodeChannelTx(tx, node.PubKeyBytes,
×
UNCOV
675
                        func(tx kvdb.RTx, e *models.ChannelEdgeInfo,
×
UNCOV
676
                                p1 *models.ChannelEdgePolicy,
×
UNCOV
677
                                p2 *models.ChannelEdgePolicy) error {
×
UNCOV
678

×
UNCOV
679
                                toNodeCallback := func() route.Vertex {
×
680
                                        return node.PubKeyBytes
×
681
                                }
×
UNCOV
682
                                toNodeFeatures, err := c.fetchNodeFeatures(
×
UNCOV
683
                                        tx, node.PubKeyBytes,
×
UNCOV
684
                                )
×
UNCOV
685
                                if err != nil {
×
686
                                        return err
×
687
                                }
×
688

UNCOV
689
                                var cachedInPolicy *models.CachedEdgePolicy
×
UNCOV
690
                                if p2 != nil {
×
UNCOV
691
                                        cachedInPolicy =
×
UNCOV
692
                                                models.NewCachedPolicy(p2)
×
UNCOV
693
                                        cachedInPolicy.ToNodePubKey =
×
UNCOV
694
                                                toNodeCallback
×
UNCOV
695
                                        cachedInPolicy.ToNodeFeatures =
×
UNCOV
696
                                                toNodeFeatures
×
UNCOV
697
                                }
×
698

UNCOV
699
                                directedChannel := &DirectedChannel{
×
UNCOV
700
                                        ChannelID: e.ChannelID,
×
UNCOV
701
                                        IsNode1: node.PubKeyBytes ==
×
UNCOV
702
                                                e.NodeKey1Bytes,
×
UNCOV
703
                                        OtherNode:    e.NodeKey2Bytes,
×
UNCOV
704
                                        Capacity:     e.Capacity,
×
UNCOV
705
                                        OutPolicySet: p1 != nil,
×
UNCOV
706
                                        InPolicy:     cachedInPolicy,
×
UNCOV
707
                                }
×
UNCOV
708

×
UNCOV
709
                                if node.PubKeyBytes == e.NodeKey2Bytes {
×
UNCOV
710
                                        directedChannel.OtherNode =
×
UNCOV
711
                                                e.NodeKey1Bytes
×
UNCOV
712
                                }
×
713

UNCOV
714
                                channels[e.ChannelID] = directedChannel
×
UNCOV
715

×
UNCOV
716
                                return nil
×
717
                        })
UNCOV
718
                if err != nil {
×
719
                        return err
×
720
                }
×
721

UNCOV
722
                return cb(node.PubKeyBytes, channels)
×
723
        })
724
}
725

726
// DisabledChannelIDs returns the channel ids of disabled channels.
727
// A channel is disabled when two of the associated ChanelEdgePolicies
728
// have their disabled bit on.
UNCOV
729
func (c *KVStore) DisabledChannelIDs() ([]uint64, error) {
×
UNCOV
730
        var disabledChanIDs []uint64
×
UNCOV
731
        var chanEdgeFound map[uint64]struct{}
×
UNCOV
732

×
UNCOV
733
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
×
UNCOV
734
                edges := tx.ReadBucket(edgeBucket)
×
UNCOV
735
                if edges == nil {
×
736
                        return ErrGraphNoEdgesFound
×
737
                }
×
738

UNCOV
739
                disabledEdgePolicyIndex := edges.NestedReadBucket(
×
UNCOV
740
                        disabledEdgePolicyBucket,
×
UNCOV
741
                )
×
UNCOV
742
                if disabledEdgePolicyIndex == nil {
×
UNCOV
743
                        return nil
×
UNCOV
744
                }
×
745

746
                // We iterate over all disabled policies and we add each channel
747
                // that has more than one disabled policy to disabledChanIDs
748
                // array.
UNCOV
749
                return disabledEdgePolicyIndex.ForEach(
×
UNCOV
750
                        func(k, v []byte) error {
×
UNCOV
751
                                chanID := byteOrder.Uint64(k[:8])
×
UNCOV
752
                                _, edgeFound := chanEdgeFound[chanID]
×
UNCOV
753
                                if edgeFound {
×
UNCOV
754
                                        delete(chanEdgeFound, chanID)
×
UNCOV
755
                                        disabledChanIDs = append(
×
UNCOV
756
                                                disabledChanIDs, chanID,
×
UNCOV
757
                                        )
×
UNCOV
758

×
UNCOV
759
                                        return nil
×
UNCOV
760
                                }
×
761

UNCOV
762
                                chanEdgeFound[chanID] = struct{}{}
×
UNCOV
763

×
UNCOV
764
                                return nil
×
765
                        },
766
                )
UNCOV
767
        }, func() {
×
UNCOV
768
                disabledChanIDs = nil
×
UNCOV
769
                chanEdgeFound = make(map[uint64]struct{})
×
UNCOV
770
        })
×
UNCOV
771
        if err != nil {
×
772
                return nil, err
×
773
        }
×
774

UNCOV
775
        return disabledChanIDs, nil
×
776
}
777

778
// ForEachNode iterates through all the stored vertices/nodes in the graph,
779
// executing the passed callback with each node encountered. If the callback
780
// returns an error, then the transaction is aborted and the iteration stops
781
// early. Any operations performed on the NodeTx passed to the call-back are
782
// executed under the same read transaction and so, methods on the NodeTx object
783
// _MUST_ only be called from within the call-back.
784
func (c *KVStore) ForEachNode(cb func(tx NodeRTx) error) error {
3✔
785
        return c.forEachNode(func(tx kvdb.RTx,
3✔
786
                node *models.LightningNode) error {
6✔
787

3✔
788
                return cb(newChanGraphNodeTx(tx, c, node))
3✔
789
        })
3✔
790
}
791

792
// forEachNode iterates through all the stored vertices/nodes in the graph,
793
// executing the passed callback with each node encountered. If the callback
794
// returns an error, then the transaction is aborted and the iteration stops
795
// early.
796
//
797
// TODO(roasbeef): add iterator interface to allow for memory efficient graph
798
// traversal when graph gets mega.
799
func (c *KVStore) forEachNode(
800
        cb func(kvdb.RTx, *models.LightningNode) error) error {
3✔
801

3✔
802
        traversal := func(tx kvdb.RTx) error {
6✔
803
                // First grab the nodes bucket which stores the mapping from
3✔
804
                // pubKey to node information.
3✔
805
                nodes := tx.ReadBucket(nodeBucket)
3✔
806
                if nodes == nil {
3✔
807
                        return ErrGraphNotFound
×
808
                }
×
809

810
                return nodes.ForEach(func(pubKey, nodeBytes []byte) error {
6✔
811
                        // If this is the source key, then we skip this
3✔
812
                        // iteration as the value for this key is a pubKey
3✔
813
                        // rather than raw node information.
3✔
814
                        if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 {
6✔
815
                                return nil
3✔
816
                        }
3✔
817

818
                        nodeReader := bytes.NewReader(nodeBytes)
3✔
819
                        node, err := deserializeLightningNode(nodeReader)
3✔
820
                        if err != nil {
3✔
821
                                return err
×
822
                        }
×
823

824
                        // Execute the callback, the transaction will abort if
825
                        // this returns an error.
826
                        return cb(tx, &node)
3✔
827
                })
828
        }
829

830
        return kvdb.View(c.db, traversal, func() {})
6✔
831
}
832

833
// ForEachNodeCacheable iterates through all the stored vertices/nodes in the
834
// graph, executing the passed callback with each node encountered. If the
835
// callback returns an error, then the transaction is aborted and the iteration
836
// stops early.
837
func (c *KVStore) ForEachNodeCacheable(cb func(route.Vertex,
838
        *lnwire.FeatureVector) error) error {
3✔
839

3✔
840
        traversal := func(tx kvdb.RTx) error {
6✔
841
                // First grab the nodes bucket which stores the mapping from
3✔
842
                // pubKey to node information.
3✔
843
                nodes := tx.ReadBucket(nodeBucket)
3✔
844
                if nodes == nil {
3✔
845
                        return ErrGraphNotFound
×
846
                }
×
847

848
                return nodes.ForEach(func(pubKey, nodeBytes []byte) error {
6✔
849
                        // If this is the source key, then we skip this
3✔
850
                        // iteration as the value for this key is a pubKey
3✔
851
                        // rather than raw node information.
3✔
852
                        if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 {
6✔
853
                                return nil
3✔
854
                        }
3✔
855

856
                        nodeReader := bytes.NewReader(nodeBytes)
3✔
857
                        node, features, err := deserializeLightningNodeCacheable( //nolint:ll
3✔
858
                                nodeReader,
3✔
859
                        )
3✔
860
                        if err != nil {
3✔
861
                                return err
×
862
                        }
×
863

864
                        // Execute the callback, the transaction will abort if
865
                        // this returns an error.
866
                        return cb(node, features)
3✔
867
                })
868
        }
869

870
        return kvdb.View(c.db, traversal, func() {})
6✔
871
}
872

873
// SourceNode returns the source node of the graph. The source node is treated
874
// as the center node within a star-graph. This method may be used to kick off
875
// a path finding algorithm in order to explore the reachability of another
876
// node based off the source node.
877
func (c *KVStore) SourceNode(_ context.Context) (*models.LightningNode,
878
        error) {
3✔
879

3✔
880
        var source *models.LightningNode
3✔
881
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
882
                // First grab the nodes bucket which stores the mapping from
3✔
883
                // pubKey to node information.
3✔
884
                nodes := tx.ReadBucket(nodeBucket)
3✔
885
                if nodes == nil {
3✔
886
                        return ErrGraphNotFound
×
887
                }
×
888

889
                node, err := c.sourceNode(nodes)
3✔
890
                if err != nil {
3✔
UNCOV
891
                        return err
×
UNCOV
892
                }
×
893
                source = node
3✔
894

3✔
895
                return nil
3✔
896
        }, func() {
3✔
897
                source = nil
3✔
898
        })
3✔
899
        if err != nil {
3✔
UNCOV
900
                return nil, err
×
UNCOV
901
        }
×
902

903
        return source, nil
3✔
904
}
905

906
// sourceNode uses an existing database transaction and returns the source node
907
// of the graph. The source node is treated as the center node within a
908
// star-graph. This method may be used to kick off a path finding algorithm in
909
// order to explore the reachability of another node based off the source node.
910
func (c *KVStore) sourceNode(nodes kvdb.RBucket) (*models.LightningNode,
911
        error) {
3✔
912

3✔
913
        selfPub := nodes.Get(sourceKey)
3✔
914
        if selfPub == nil {
3✔
UNCOV
915
                return nil, ErrSourceNodeNotSet
×
UNCOV
916
        }
×
917

918
        // With the pubKey of the source node retrieved, we're able to
919
        // fetch the full node information.
920
        node, err := fetchLightningNode(nodes, selfPub)
3✔
921
        if err != nil {
3✔
922
                return nil, err
×
923
        }
×
924

925
        return &node, nil
3✔
926
}
927

928
// SetSourceNode sets the source node within the graph database. The source
929
// node is to be used as the center of a star-graph within path finding
930
// algorithms.
931
func (c *KVStore) SetSourceNode(_ context.Context,
932
        node *models.LightningNode) error {
3✔
933

3✔
934
        nodePubBytes := node.PubKeyBytes[:]
3✔
935

3✔
936
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
6✔
937
                // First grab the nodes bucket which stores the mapping from
3✔
938
                // pubKey to node information.
3✔
939
                nodes, err := tx.CreateTopLevelBucket(nodeBucket)
3✔
940
                if err != nil {
3✔
941
                        return err
×
942
                }
×
943

944
                // Next we create the mapping from source to the targeted
945
                // public key.
946
                if err := nodes.Put(sourceKey, nodePubBytes); err != nil {
3✔
947
                        return err
×
948
                }
×
949

950
                // Finally, we commit the information of the lightning node
951
                // itself.
952
                return addLightningNode(tx, node)
3✔
953
        }, func() {})
3✔
954
}
955

956
// AddLightningNode adds a vertex/node to the graph database. If the node is not
957
// in the database from before, this will add a new, unconnected one to the
958
// graph. If it is present from before, this will update that node's
959
// information. Note that this method is expected to only be called to update an
960
// already present node from a node announcement, or to insert a node found in a
961
// channel update.
962
//
963
// TODO(roasbeef): also need sig of announcement.
964
func (c *KVStore) AddLightningNode(ctx context.Context,
965
        node *models.LightningNode, opts ...batch.SchedulerOption) error {
3✔
966

3✔
967
        r := &batch.Request[kvdb.RwTx]{
3✔
968
                Opts: batch.NewSchedulerOptions(opts...),
3✔
969
                Do: func(tx kvdb.RwTx) error {
6✔
970
                        return addLightningNode(tx, node)
3✔
971
                },
3✔
972
        }
973

974
        return c.nodeScheduler.Execute(ctx, r)
3✔
975
}
976

977
func addLightningNode(tx kvdb.RwTx, node *models.LightningNode) error {
3✔
978
        nodes, err := tx.CreateTopLevelBucket(nodeBucket)
3✔
979
        if err != nil {
3✔
980
                return err
×
981
        }
×
982

983
        aliases, err := nodes.CreateBucketIfNotExists(aliasIndexBucket)
3✔
984
        if err != nil {
3✔
985
                return err
×
986
        }
×
987

988
        updateIndex, err := nodes.CreateBucketIfNotExists(
3✔
989
                nodeUpdateIndexBucket,
3✔
990
        )
3✔
991
        if err != nil {
3✔
992
                return err
×
993
        }
×
994

995
        return putLightningNode(nodes, aliases, updateIndex, node)
3✔
996
}
997

998
// LookupAlias attempts to return the alias as advertised by the target node.
999
// TODO(roasbeef): currently assumes that aliases are unique...
1000
func (c *KVStore) LookupAlias(_ context.Context,
1001
        pub *btcec.PublicKey) (string, error) {
3✔
1002

3✔
1003
        var alias string
3✔
1004

3✔
1005
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
1006
                nodes := tx.ReadBucket(nodeBucket)
3✔
1007
                if nodes == nil {
3✔
1008
                        return ErrGraphNodesNotFound
×
1009
                }
×
1010

1011
                aliases := nodes.NestedReadBucket(aliasIndexBucket)
3✔
1012
                if aliases == nil {
3✔
1013
                        return ErrGraphNodesNotFound
×
1014
                }
×
1015

1016
                nodePub := pub.SerializeCompressed()
3✔
1017
                a := aliases.Get(nodePub)
3✔
1018
                if a == nil {
3✔
UNCOV
1019
                        return ErrNodeAliasNotFound
×
UNCOV
1020
                }
×
1021

1022
                // TODO(roasbeef): should actually be using the utf-8
1023
                // package...
1024
                alias = string(a)
3✔
1025

3✔
1026
                return nil
3✔
1027
        }, func() {
3✔
1028
                alias = ""
3✔
1029
        })
3✔
1030
        if err != nil {
3✔
UNCOV
1031
                return "", err
×
UNCOV
1032
        }
×
1033

1034
        return alias, nil
3✔
1035
}
1036

1037
// DeleteLightningNode starts a new database transaction to remove a vertex/node
1038
// from the database according to the node's public key.
1039
func (c *KVStore) DeleteLightningNode(_ context.Context,
UNCOV
1040
        nodePub route.Vertex) error {
×
UNCOV
1041

×
UNCOV
1042
        // TODO(roasbeef): ensure dangling edges are removed...
×
UNCOV
1043
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
×
UNCOV
1044
                nodes := tx.ReadWriteBucket(nodeBucket)
×
UNCOV
1045
                if nodes == nil {
×
1046
                        return ErrGraphNodeNotFound
×
1047
                }
×
1048

UNCOV
1049
                return c.deleteLightningNode(nodes, nodePub[:])
×
UNCOV
1050
        }, func() {})
×
1051
}
1052

1053
// deleteLightningNode uses an existing database transaction to remove a
1054
// vertex/node from the database according to the node's public key.
1055
func (c *KVStore) deleteLightningNode(nodes kvdb.RwBucket,
1056
        compressedPubKey []byte) error {
3✔
1057

3✔
1058
        aliases := nodes.NestedReadWriteBucket(aliasIndexBucket)
3✔
1059
        if aliases == nil {
3✔
1060
                return ErrGraphNodesNotFound
×
1061
        }
×
1062

1063
        if err := aliases.Delete(compressedPubKey); err != nil {
3✔
1064
                return err
×
1065
        }
×
1066

1067
        // Before we delete the node, we'll fetch its current state so we can
1068
        // determine when its last update was to clear out the node update
1069
        // index.
1070
        node, err := fetchLightningNode(nodes, compressedPubKey)
3✔
1071
        if err != nil {
3✔
UNCOV
1072
                return err
×
UNCOV
1073
        }
×
1074

1075
        if err := nodes.Delete(compressedPubKey); err != nil {
3✔
1076
                return err
×
1077
        }
×
1078

1079
        // Finally, we'll delete the index entry for the node within the
1080
        // nodeUpdateIndexBucket as this node is no longer active, so we don't
1081
        // need to track its last update.
1082
        nodeUpdateIndex := nodes.NestedReadWriteBucket(nodeUpdateIndexBucket)
3✔
1083
        if nodeUpdateIndex == nil {
3✔
1084
                return ErrGraphNodesNotFound
×
1085
        }
×
1086

1087
        // In order to delete the entry, we'll need to reconstruct the key for
1088
        // its last update.
1089
        updateUnix := uint64(node.LastUpdate.Unix())
3✔
1090
        var indexKey [8 + 33]byte
3✔
1091
        byteOrder.PutUint64(indexKey[:8], updateUnix)
3✔
1092
        copy(indexKey[8:], compressedPubKey)
3✔
1093

3✔
1094
        return nodeUpdateIndex.Delete(indexKey[:])
3✔
1095
}
1096

1097
// AddChannelEdge adds a new (undirected, blank) edge to the graph database. An
1098
// undirected edge from the two target nodes are created. The information stored
1099
// denotes the static attributes of the channel, such as the channelID, the keys
1100
// involved in creation of the channel, and the set of features that the channel
1101
// supports. The chanPoint and chanID are used to uniquely identify the edge
1102
// globally within the database.
1103
func (c *KVStore) AddChannelEdge(ctx context.Context,
1104
        edge *models.ChannelEdgeInfo, opts ...batch.SchedulerOption) error {
3✔
1105

3✔
1106
        var alreadyExists bool
3✔
1107
        r := &batch.Request[kvdb.RwTx]{
3✔
1108
                Opts: batch.NewSchedulerOptions(opts...),
3✔
1109
                Reset: func() {
6✔
1110
                        alreadyExists = false
3✔
1111
                },
3✔
1112
                Do: func(tx kvdb.RwTx) error {
3✔
1113
                        err := c.addChannelEdge(tx, edge)
3✔
1114

3✔
1115
                        // Silence ErrEdgeAlreadyExist so that the batch can
3✔
1116
                        // succeed, but propagate the error via local state.
3✔
1117
                        if errors.Is(err, ErrEdgeAlreadyExist) {
3✔
UNCOV
1118
                                alreadyExists = true
×
UNCOV
1119
                                return nil
×
UNCOV
1120
                        }
×
1121

1122
                        return err
3✔
1123
                },
1124
                OnCommit: func(err error) error {
3✔
1125
                        switch {
3✔
1126
                        case err != nil:
×
1127
                                return err
×
UNCOV
1128
                        case alreadyExists:
×
UNCOV
1129
                                return ErrEdgeAlreadyExist
×
1130
                        default:
3✔
1131
                                c.rejectCache.remove(edge.ChannelID)
3✔
1132
                                c.chanCache.remove(edge.ChannelID)
3✔
1133
                                return nil
3✔
1134
                        }
1135
                },
1136
        }
1137

1138
        return c.chanScheduler.Execute(ctx, r)
3✔
1139
}
1140

1141
// addChannelEdge is the private form of AddChannelEdge that allows callers to
1142
// utilize an existing db transaction.
1143
func (c *KVStore) addChannelEdge(tx kvdb.RwTx,
1144
        edge *models.ChannelEdgeInfo) error {
3✔
1145

3✔
1146
        // Construct the channel's primary key which is the 8-byte channel ID.
3✔
1147
        var chanKey [8]byte
3✔
1148
        binary.BigEndian.PutUint64(chanKey[:], edge.ChannelID)
3✔
1149

3✔
1150
        nodes, err := tx.CreateTopLevelBucket(nodeBucket)
3✔
1151
        if err != nil {
3✔
1152
                return err
×
1153
        }
×
1154
        edges, err := tx.CreateTopLevelBucket(edgeBucket)
3✔
1155
        if err != nil {
3✔
1156
                return err
×
1157
        }
×
1158
        edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
3✔
1159
        if err != nil {
3✔
1160
                return err
×
1161
        }
×
1162
        chanIndex, err := edges.CreateBucketIfNotExists(channelPointBucket)
3✔
1163
        if err != nil {
3✔
1164
                return err
×
1165
        }
×
1166

1167
        // First, attempt to check if this edge has already been created. If
1168
        // so, then we can exit early as this method is meant to be idempotent.
1169
        if edgeInfo := edgeIndex.Get(chanKey[:]); edgeInfo != nil {
3✔
UNCOV
1170
                return ErrEdgeAlreadyExist
×
UNCOV
1171
        }
×
1172

1173
        // Before we insert the channel into the database, we'll ensure that
1174
        // both nodes already exist in the channel graph. If either node
1175
        // doesn't, then we'll insert a "shell" node that just includes its
1176
        // public key, so subsequent validation and queries can work properly.
1177
        _, node1Err := fetchLightningNode(nodes, edge.NodeKey1Bytes[:])
3✔
1178
        switch {
3✔
1179
        case errors.Is(node1Err, ErrGraphNodeNotFound):
3✔
1180
                node1Shell := models.LightningNode{
3✔
1181
                        PubKeyBytes:          edge.NodeKey1Bytes,
3✔
1182
                        HaveNodeAnnouncement: false,
3✔
1183
                }
3✔
1184
                err := addLightningNode(tx, &node1Shell)
3✔
1185
                if err != nil {
3✔
1186
                        return fmt.Errorf("unable to create shell node "+
×
1187
                                "for: %x: %w", edge.NodeKey1Bytes, err)
×
1188
                }
×
1189
        case node1Err != nil:
×
1190
                return node1Err
×
1191
        }
1192

1193
        _, node2Err := fetchLightningNode(nodes, edge.NodeKey2Bytes[:])
3✔
1194
        switch {
3✔
1195
        case errors.Is(node2Err, ErrGraphNodeNotFound):
3✔
1196
                node2Shell := models.LightningNode{
3✔
1197
                        PubKeyBytes:          edge.NodeKey2Bytes,
3✔
1198
                        HaveNodeAnnouncement: false,
3✔
1199
                }
3✔
1200
                err := addLightningNode(tx, &node2Shell)
3✔
1201
                if err != nil {
3✔
1202
                        return fmt.Errorf("unable to create shell node "+
×
1203
                                "for: %x: %w", edge.NodeKey2Bytes, err)
×
1204
                }
×
1205
        case node2Err != nil:
×
1206
                return node2Err
×
1207
        }
1208

1209
        // If the edge hasn't been created yet, then we'll first add it to the
1210
        // edge index in order to associate the edge between two nodes and also
1211
        // store the static components of the channel.
1212
        if err := putChanEdgeInfo(edgeIndex, edge, chanKey); err != nil {
3✔
1213
                return err
×
1214
        }
×
1215

1216
        // Mark edge policies for both sides as unknown. This is to enable
1217
        // efficient incoming channel lookup for a node.
1218
        keys := []*[33]byte{
3✔
1219
                &edge.NodeKey1Bytes,
3✔
1220
                &edge.NodeKey2Bytes,
3✔
1221
        }
3✔
1222
        for _, key := range keys {
6✔
1223
                err := putChanEdgePolicyUnknown(edges, edge.ChannelID, key[:])
3✔
1224
                if err != nil {
3✔
1225
                        return err
×
1226
                }
×
1227
        }
1228

1229
        // Finally we add it to the channel index which maps channel points
1230
        // (outpoints) to the shorter channel ID's.
1231
        var b bytes.Buffer
3✔
1232
        if err := WriteOutpoint(&b, &edge.ChannelPoint); err != nil {
3✔
1233
                return err
×
1234
        }
×
1235

1236
        return chanIndex.Put(b.Bytes(), chanKey[:])
3✔
1237
}
1238

1239
// HasChannelEdge returns true if the database knows of a channel edge with the
1240
// passed channel ID, and false otherwise. If an edge with that ID is found
1241
// within the graph, then two time stamps representing the last time the edge
1242
// was updated for both directed edges are returned along with the boolean. If
1243
// it is not found, then the zombie index is checked and its result is returned
1244
// as the second boolean.
1245
func (c *KVStore) HasChannelEdge(
1246
        chanID uint64) (time.Time, time.Time, bool, bool, error) {
3✔
1247

3✔
1248
        var (
3✔
1249
                upd1Time time.Time
3✔
1250
                upd2Time time.Time
3✔
1251
                exists   bool
3✔
1252
                isZombie bool
3✔
1253
        )
3✔
1254

3✔
1255
        // We'll query the cache with the shared lock held to allow multiple
3✔
1256
        // readers to access values in the cache concurrently if they exist.
3✔
1257
        c.cacheMu.RLock()
3✔
1258
        if entry, ok := c.rejectCache.get(chanID); ok {
6✔
1259
                c.cacheMu.RUnlock()
3✔
1260
                upd1Time = time.Unix(entry.upd1Time, 0)
3✔
1261
                upd2Time = time.Unix(entry.upd2Time, 0)
3✔
1262
                exists, isZombie = entry.flags.unpack()
3✔
1263

3✔
1264
                return upd1Time, upd2Time, exists, isZombie, nil
3✔
1265
        }
3✔
1266
        c.cacheMu.RUnlock()
3✔
1267

3✔
1268
        c.cacheMu.Lock()
3✔
1269
        defer c.cacheMu.Unlock()
3✔
1270

3✔
1271
        // The item was not found with the shared lock, so we'll acquire the
3✔
1272
        // exclusive lock and check the cache again in case another method added
3✔
1273
        // the entry to the cache while no lock was held.
3✔
1274
        if entry, ok := c.rejectCache.get(chanID); ok {
5✔
1275
                upd1Time = time.Unix(entry.upd1Time, 0)
2✔
1276
                upd2Time = time.Unix(entry.upd2Time, 0)
2✔
1277
                exists, isZombie = entry.flags.unpack()
2✔
1278

2✔
1279
                return upd1Time, upd2Time, exists, isZombie, nil
2✔
1280
        }
2✔
1281

1282
        if err := kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
1283
                edges := tx.ReadBucket(edgeBucket)
3✔
1284
                if edges == nil {
3✔
1285
                        return ErrGraphNoEdgesFound
×
1286
                }
×
1287
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
3✔
1288
                if edgeIndex == nil {
3✔
1289
                        return ErrGraphNoEdgesFound
×
1290
                }
×
1291

1292
                var channelID [8]byte
3✔
1293
                byteOrder.PutUint64(channelID[:], chanID)
3✔
1294

3✔
1295
                // If the edge doesn't exist, then we'll also check our zombie
3✔
1296
                // index.
3✔
1297
                if edgeIndex.Get(channelID[:]) == nil {
6✔
1298
                        exists = false
3✔
1299
                        zombieIndex := edges.NestedReadBucket(zombieBucket)
3✔
1300
                        if zombieIndex != nil {
6✔
1301
                                isZombie, _, _ = isZombieEdge(
3✔
1302
                                        zombieIndex, chanID,
3✔
1303
                                )
3✔
1304
                        }
3✔
1305

1306
                        return nil
3✔
1307
                }
1308

1309
                exists = true
3✔
1310
                isZombie = false
3✔
1311

3✔
1312
                // If the channel has been found in the graph, then retrieve
3✔
1313
                // the edges itself so we can return the last updated
3✔
1314
                // timestamps.
3✔
1315
                nodes := tx.ReadBucket(nodeBucket)
3✔
1316
                if nodes == nil {
3✔
1317
                        return ErrGraphNodeNotFound
×
1318
                }
×
1319

1320
                e1, e2, err := fetchChanEdgePolicies(
3✔
1321
                        edgeIndex, edges, channelID[:],
3✔
1322
                )
3✔
1323
                if err != nil {
3✔
1324
                        return err
×
1325
                }
×
1326

1327
                // As we may have only one of the edges populated, only set the
1328
                // update time if the edge was found in the database.
1329
                if e1 != nil {
6✔
1330
                        upd1Time = e1.LastUpdate
3✔
1331
                }
3✔
1332
                if e2 != nil {
6✔
1333
                        upd2Time = e2.LastUpdate
3✔
1334
                }
3✔
1335

1336
                return nil
3✔
1337
        }, func() {}); err != nil {
3✔
1338
                return time.Time{}, time.Time{}, exists, isZombie, err
×
1339
        }
×
1340

1341
        c.rejectCache.insert(chanID, rejectCacheEntry{
3✔
1342
                upd1Time: upd1Time.Unix(),
3✔
1343
                upd2Time: upd2Time.Unix(),
3✔
1344
                flags:    packRejectFlags(exists, isZombie),
3✔
1345
        })
3✔
1346

3✔
1347
        return upd1Time, upd2Time, exists, isZombie, nil
3✔
1348
}
1349

1350
// AddEdgeProof sets the proof of an existing edge in the graph database.
1351
func (c *KVStore) AddEdgeProof(chanID lnwire.ShortChannelID,
1352
        proof *models.ChannelAuthProof) error {
3✔
1353

3✔
1354
        // Construct the channel's primary key which is the 8-byte channel ID.
3✔
1355
        var chanKey [8]byte
3✔
1356
        binary.BigEndian.PutUint64(chanKey[:], chanID.ToUint64())
3✔
1357

3✔
1358
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
6✔
1359
                edges := tx.ReadWriteBucket(edgeBucket)
3✔
1360
                if edges == nil {
3✔
1361
                        return ErrEdgeNotFound
×
1362
                }
×
1363

1364
                edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
3✔
1365
                if edgeIndex == nil {
3✔
1366
                        return ErrEdgeNotFound
×
1367
                }
×
1368

1369
                edge, err := fetchChanEdgeInfo(edgeIndex, chanKey[:])
3✔
1370
                if err != nil {
3✔
1371
                        return err
×
1372
                }
×
1373

1374
                edge.AuthProof = proof
3✔
1375

3✔
1376
                return putChanEdgeInfo(edgeIndex, &edge, chanKey)
3✔
1377
        }, func() {})
3✔
1378
}
1379

1380
const (
1381
        // pruneTipBytes is the total size of the value which stores a prune
1382
        // entry of the graph in the prune log. The "prune tip" is the last
1383
        // entry in the prune log, and indicates if the channel graph is in
1384
        // sync with the current UTXO state. The structure of the value
1385
        // is: blockHash, taking 32 bytes total.
1386
        pruneTipBytes = 32
1387
)
1388

1389
// PruneGraph prunes newly closed channels from the channel graph in response
1390
// to a new block being solved on the network. Any transactions which spend the
1391
// funding output of any known channels within he graph will be deleted.
1392
// Additionally, the "prune tip", or the last block which has been used to
1393
// prune the graph is stored so callers can ensure the graph is fully in sync
1394
// with the current UTXO state. A slice of channels that have been closed by
1395
// the target block along with any pruned nodes are returned if the function
1396
// succeeds without error.
1397
func (c *KVStore) PruneGraph(spentOutputs []*wire.OutPoint,
1398
        blockHash *chainhash.Hash, blockHeight uint32) (
1399
        []*models.ChannelEdgeInfo, []route.Vertex, error) {
3✔
1400

3✔
1401
        c.cacheMu.Lock()
3✔
1402
        defer c.cacheMu.Unlock()
3✔
1403

3✔
1404
        var (
3✔
1405
                chansClosed []*models.ChannelEdgeInfo
3✔
1406
                prunedNodes []route.Vertex
3✔
1407
        )
3✔
1408

3✔
1409
        err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
6✔
1410
                // First grab the edges bucket which houses the information
3✔
1411
                // we'd like to delete
3✔
1412
                edges, err := tx.CreateTopLevelBucket(edgeBucket)
3✔
1413
                if err != nil {
3✔
1414
                        return err
×
1415
                }
×
1416

1417
                // Next grab the two edge indexes which will also need to be
1418
                // updated.
1419
                edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
3✔
1420
                if err != nil {
3✔
1421
                        return err
×
1422
                }
×
1423
                chanIndex, err := edges.CreateBucketIfNotExists(
3✔
1424
                        channelPointBucket,
3✔
1425
                )
3✔
1426
                if err != nil {
3✔
1427
                        return err
×
1428
                }
×
1429
                nodes := tx.ReadWriteBucket(nodeBucket)
3✔
1430
                if nodes == nil {
3✔
1431
                        return ErrSourceNodeNotSet
×
1432
                }
×
1433
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
3✔
1434
                if err != nil {
3✔
1435
                        return err
×
1436
                }
×
1437

1438
                // For each of the outpoints that have been spent within the
1439
                // block, we attempt to delete them from the graph as if that
1440
                // outpoint was a channel, then it has now been closed.
1441
                for _, chanPoint := range spentOutputs {
6✔
1442
                        // TODO(roasbeef): load channel bloom filter, continue
3✔
1443
                        // if NOT if filter
3✔
1444

3✔
1445
                        var opBytes bytes.Buffer
3✔
1446
                        err := WriteOutpoint(&opBytes, chanPoint)
3✔
1447
                        if err != nil {
3✔
1448
                                return err
×
1449
                        }
×
1450

1451
                        // First attempt to see if the channel exists within
1452
                        // the database, if not, then we can exit early.
1453
                        chanID := chanIndex.Get(opBytes.Bytes())
3✔
1454
                        if chanID == nil {
3✔
UNCOV
1455
                                continue
×
1456
                        }
1457

1458
                        // Attempt to delete the channel, an ErrEdgeNotFound
1459
                        // will be returned if that outpoint isn't known to be
1460
                        // a channel. If no error is returned, then a channel
1461
                        // was successfully pruned.
1462
                        edgeInfo, err := c.delChannelEdgeUnsafe(
3✔
1463
                                edges, edgeIndex, chanIndex, zombieIndex,
3✔
1464
                                chanID, false, false,
3✔
1465
                        )
3✔
1466
                        if err != nil && !errors.Is(err, ErrEdgeNotFound) {
3✔
1467
                                return err
×
1468
                        }
×
1469

1470
                        chansClosed = append(chansClosed, edgeInfo)
3✔
1471
                }
1472

1473
                metaBucket, err := tx.CreateTopLevelBucket(graphMetaBucket)
3✔
1474
                if err != nil {
3✔
1475
                        return err
×
1476
                }
×
1477

1478
                pruneBucket, err := metaBucket.CreateBucketIfNotExists(
3✔
1479
                        pruneLogBucket,
3✔
1480
                )
3✔
1481
                if err != nil {
3✔
1482
                        return err
×
1483
                }
×
1484

1485
                // With the graph pruned, add a new entry to the prune log,
1486
                // which can be used to check if the graph is fully synced with
1487
                // the current UTXO state.
1488
                var blockHeightBytes [4]byte
3✔
1489
                byteOrder.PutUint32(blockHeightBytes[:], blockHeight)
3✔
1490

3✔
1491
                var newTip [pruneTipBytes]byte
3✔
1492
                copy(newTip[:], blockHash[:])
3✔
1493

3✔
1494
                err = pruneBucket.Put(blockHeightBytes[:], newTip[:])
3✔
1495
                if err != nil {
3✔
1496
                        return err
×
1497
                }
×
1498

1499
                // Now that the graph has been pruned, we'll also attempt to
1500
                // prune any nodes that have had a channel closed within the
1501
                // latest block.
1502
                prunedNodes, err = c.pruneGraphNodes(nodes, edgeIndex)
3✔
1503

3✔
1504
                return err
3✔
1505
        }, func() {
3✔
1506
                chansClosed = nil
3✔
1507
                prunedNodes = nil
3✔
1508
        })
3✔
1509
        if err != nil {
3✔
1510
                return nil, nil, err
×
1511
        }
×
1512

1513
        for _, channel := range chansClosed {
6✔
1514
                c.rejectCache.remove(channel.ChannelID)
3✔
1515
                c.chanCache.remove(channel.ChannelID)
3✔
1516
        }
3✔
1517

1518
        return chansClosed, prunedNodes, nil
3✔
1519
}
1520

1521
// PruneGraphNodes is a garbage collection method which attempts to prune out
1522
// any nodes from the channel graph that are currently unconnected. This ensure
1523
// that we only maintain a graph of reachable nodes. In the event that a pruned
1524
// node gains more channels, it will be re-added back to the graph.
1525
func (c *KVStore) PruneGraphNodes() ([]route.Vertex, error) {
3✔
1526
        var prunedNodes []route.Vertex
3✔
1527
        err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
6✔
1528
                nodes := tx.ReadWriteBucket(nodeBucket)
3✔
1529
                if nodes == nil {
3✔
1530
                        return ErrGraphNodesNotFound
×
1531
                }
×
1532
                edges := tx.ReadWriteBucket(edgeBucket)
3✔
1533
                if edges == nil {
3✔
1534
                        return ErrGraphNotFound
×
1535
                }
×
1536
                edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
3✔
1537
                if edgeIndex == nil {
3✔
1538
                        return ErrGraphNoEdgesFound
×
1539
                }
×
1540

1541
                var err error
3✔
1542
                prunedNodes, err = c.pruneGraphNodes(nodes, edgeIndex)
3✔
1543
                if err != nil {
3✔
1544
                        return err
×
1545
                }
×
1546

1547
                return nil
3✔
1548
        }, func() {
3✔
1549
                prunedNodes = nil
3✔
1550
        })
3✔
1551

1552
        return prunedNodes, err
3✔
1553
}
1554

1555
// pruneGraphNodes attempts to remove any nodes from the graph who have had a
1556
// channel closed within the current block. If the node still has existing
1557
// channels in the graph, this will act as a no-op.
1558
func (c *KVStore) pruneGraphNodes(nodes kvdb.RwBucket,
1559
        edgeIndex kvdb.RwBucket) ([]route.Vertex, error) {
3✔
1560

3✔
1561
        log.Trace("Pruning nodes from graph with no open channels")
3✔
1562

3✔
1563
        // We'll retrieve the graph's source node to ensure we don't remove it
3✔
1564
        // even if it no longer has any open channels.
3✔
1565
        sourceNode, err := c.sourceNode(nodes)
3✔
1566
        if err != nil {
3✔
1567
                return nil, err
×
1568
        }
×
1569

1570
        // We'll use this map to keep count the number of references to a node
1571
        // in the graph. A node should only be removed once it has no more
1572
        // references in the graph.
1573
        nodeRefCounts := make(map[[33]byte]int)
3✔
1574
        err = nodes.ForEach(func(pubKey, nodeBytes []byte) error {
6✔
1575
                // If this is the source key, then we skip this
3✔
1576
                // iteration as the value for this key is a pubKey
3✔
1577
                // rather than raw node information.
3✔
1578
                if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 {
6✔
1579
                        return nil
3✔
1580
                }
3✔
1581

1582
                var nodePub [33]byte
3✔
1583
                copy(nodePub[:], pubKey)
3✔
1584
                nodeRefCounts[nodePub] = 0
3✔
1585

3✔
1586
                return nil
3✔
1587
        })
1588
        if err != nil {
3✔
1589
                return nil, err
×
1590
        }
×
1591

1592
        // To ensure we never delete the source node, we'll start off by
1593
        // bumping its ref count to 1.
1594
        nodeRefCounts[sourceNode.PubKeyBytes] = 1
3✔
1595

3✔
1596
        // Next, we'll run through the edgeIndex which maps a channel ID to the
3✔
1597
        // edge info. We'll use this scan to populate our reference count map
3✔
1598
        // above.
3✔
1599
        err = edgeIndex.ForEach(func(chanID, edgeInfoBytes []byte) error {
6✔
1600
                // The first 66 bytes of the edge info contain the pubkeys of
3✔
1601
                // the nodes that this edge attaches. We'll extract them, and
3✔
1602
                // add them to the ref count map.
3✔
1603
                var node1, node2 [33]byte
3✔
1604
                copy(node1[:], edgeInfoBytes[:33])
3✔
1605
                copy(node2[:], edgeInfoBytes[33:])
3✔
1606

3✔
1607
                // With the nodes extracted, we'll increase the ref count of
3✔
1608
                // each of the nodes.
3✔
1609
                nodeRefCounts[node1]++
3✔
1610
                nodeRefCounts[node2]++
3✔
1611

3✔
1612
                return nil
3✔
1613
        })
3✔
1614
        if err != nil {
3✔
1615
                return nil, err
×
1616
        }
×
1617

1618
        // Finally, we'll make a second pass over the set of nodes, and delete
1619
        // any nodes that have a ref count of zero.
1620
        var pruned []route.Vertex
3✔
1621
        for nodePubKey, refCount := range nodeRefCounts {
6✔
1622
                // If the ref count of the node isn't zero, then we can safely
3✔
1623
                // skip it as it still has edges to or from it within the
3✔
1624
                // graph.
3✔
1625
                if refCount != 0 {
6✔
1626
                        continue
3✔
1627
                }
1628

1629
                // If we reach this point, then there are no longer any edges
1630
                // that connect this node, so we can delete it.
1631
                err := c.deleteLightningNode(nodes, nodePubKey[:])
3✔
1632
                if err != nil {
3✔
1633
                        if errors.Is(err, ErrGraphNodeNotFound) ||
×
1634
                                errors.Is(err, ErrGraphNodesNotFound) {
×
1635

×
1636
                                log.Warnf("Unable to prune node %x from the "+
×
1637
                                        "graph: %v", nodePubKey, err)
×
1638
                                continue
×
1639
                        }
1640

1641
                        return nil, err
×
1642
                }
1643

1644
                log.Infof("Pruned unconnected node %x from channel graph",
3✔
1645
                        nodePubKey[:])
3✔
1646

3✔
1647
                pruned = append(pruned, nodePubKey)
3✔
1648
        }
1649

1650
        if len(pruned) > 0 {
6✔
1651
                log.Infof("Pruned %v unconnected nodes from the channel graph",
3✔
1652
                        len(pruned))
3✔
1653
        }
3✔
1654

1655
        return pruned, err
3✔
1656
}
1657

1658
// DisconnectBlockAtHeight is used to indicate that the block specified
1659
// by the passed height has been disconnected from the main chain. This
1660
// will "rewind" the graph back to the height below, deleting channels
1661
// that are no longer confirmed from the graph. The prune log will be
1662
// set to the last prune height valid for the remaining chain.
1663
// Channels that were removed from the graph resulting from the
1664
// disconnected block are returned.
1665
func (c *KVStore) DisconnectBlockAtHeight(height uint32) (
1666
        []*models.ChannelEdgeInfo, error) {
2✔
1667

2✔
1668
        // Every channel having a ShortChannelID starting at 'height'
2✔
1669
        // will no longer be confirmed.
2✔
1670
        startShortChanID := lnwire.ShortChannelID{
2✔
1671
                BlockHeight: height,
2✔
1672
        }
2✔
1673

2✔
1674
        // Delete everything after this height from the db up until the
2✔
1675
        // SCID alias range.
2✔
1676
        endShortChanID := aliasmgr.StartingAlias
2✔
1677

2✔
1678
        // The block height will be the 3 first bytes of the channel IDs.
2✔
1679
        var chanIDStart [8]byte
2✔
1680
        byteOrder.PutUint64(chanIDStart[:], startShortChanID.ToUint64())
2✔
1681
        var chanIDEnd [8]byte
2✔
1682
        byteOrder.PutUint64(chanIDEnd[:], endShortChanID.ToUint64())
2✔
1683

2✔
1684
        c.cacheMu.Lock()
2✔
1685
        defer c.cacheMu.Unlock()
2✔
1686

2✔
1687
        // Keep track of the channels that are removed from the graph.
2✔
1688
        var removedChans []*models.ChannelEdgeInfo
2✔
1689

2✔
1690
        if err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
4✔
1691
                edges, err := tx.CreateTopLevelBucket(edgeBucket)
2✔
1692
                if err != nil {
2✔
1693
                        return err
×
1694
                }
×
1695
                edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
2✔
1696
                if err != nil {
2✔
1697
                        return err
×
1698
                }
×
1699
                chanIndex, err := edges.CreateBucketIfNotExists(
2✔
1700
                        channelPointBucket,
2✔
1701
                )
2✔
1702
                if err != nil {
2✔
1703
                        return err
×
1704
                }
×
1705
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
2✔
1706
                if err != nil {
2✔
1707
                        return err
×
1708
                }
×
1709

1710
                // Scan from chanIDStart to chanIDEnd, deleting every
1711
                // found edge.
1712
                // NOTE: we must delete the edges after the cursor loop, since
1713
                // modifying the bucket while traversing is not safe.
1714
                // NOTE: We use a < comparison in bytes.Compare instead of <=
1715
                // so that the StartingAlias itself isn't deleted.
1716
                var keys [][]byte
2✔
1717
                cursor := edgeIndex.ReadWriteCursor()
2✔
1718

2✔
1719
                //nolint:ll
2✔
1720
                for k, _ := cursor.Seek(chanIDStart[:]); k != nil &&
2✔
1721
                        bytes.Compare(k, chanIDEnd[:]) < 0; k, _ = cursor.Next() {
4✔
1722
                        keys = append(keys, k)
2✔
1723
                }
2✔
1724

1725
                for _, k := range keys {
4✔
1726
                        edgeInfo, err := c.delChannelEdgeUnsafe(
2✔
1727
                                edges, edgeIndex, chanIndex, zombieIndex,
2✔
1728
                                k, false, false,
2✔
1729
                        )
2✔
1730
                        if err != nil && !errors.Is(err, ErrEdgeNotFound) {
2✔
1731
                                return err
×
1732
                        }
×
1733

1734
                        removedChans = append(removedChans, edgeInfo)
2✔
1735
                }
1736

1737
                // Delete all the entries in the prune log having a height
1738
                // greater or equal to the block disconnected.
1739
                metaBucket, err := tx.CreateTopLevelBucket(graphMetaBucket)
2✔
1740
                if err != nil {
2✔
1741
                        return err
×
1742
                }
×
1743

1744
                pruneBucket, err := metaBucket.CreateBucketIfNotExists(
2✔
1745
                        pruneLogBucket,
2✔
1746
                )
2✔
1747
                if err != nil {
2✔
1748
                        return err
×
1749
                }
×
1750

1751
                var pruneKeyStart [4]byte
2✔
1752
                byteOrder.PutUint32(pruneKeyStart[:], height)
2✔
1753

2✔
1754
                var pruneKeyEnd [4]byte
2✔
1755
                byteOrder.PutUint32(pruneKeyEnd[:], math.MaxUint32)
2✔
1756

2✔
1757
                // To avoid modifying the bucket while traversing, we delete
2✔
1758
                // the keys in a second loop.
2✔
1759
                var pruneKeys [][]byte
2✔
1760
                pruneCursor := pruneBucket.ReadWriteCursor()
2✔
1761
                //nolint:ll
2✔
1762
                for k, _ := pruneCursor.Seek(pruneKeyStart[:]); k != nil &&
2✔
1763
                        bytes.Compare(k, pruneKeyEnd[:]) <= 0; k, _ = pruneCursor.Next() {
4✔
1764
                        pruneKeys = append(pruneKeys, k)
2✔
1765
                }
2✔
1766

1767
                for _, k := range pruneKeys {
4✔
1768
                        if err := pruneBucket.Delete(k); err != nil {
2✔
1769
                                return err
×
1770
                        }
×
1771
                }
1772

1773
                return nil
2✔
1774
        }, func() {
2✔
1775
                removedChans = nil
2✔
1776
        }); err != nil {
2✔
1777
                return nil, err
×
1778
        }
×
1779

1780
        for _, channel := range removedChans {
4✔
1781
                c.rejectCache.remove(channel.ChannelID)
2✔
1782
                c.chanCache.remove(channel.ChannelID)
2✔
1783
        }
2✔
1784

1785
        return removedChans, nil
2✔
1786
}
1787

1788
// PruneTip returns the block height and hash of the latest block that has been
1789
// used to prune channels in the graph. Knowing the "prune tip" allows callers
1790
// to tell if the graph is currently in sync with the current best known UTXO
1791
// state.
1792
func (c *KVStore) PruneTip() (*chainhash.Hash, uint32, error) {
3✔
1793
        var (
3✔
1794
                tipHash   chainhash.Hash
3✔
1795
                tipHeight uint32
3✔
1796
        )
3✔
1797

3✔
1798
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
1799
                graphMeta := tx.ReadBucket(graphMetaBucket)
3✔
1800
                if graphMeta == nil {
3✔
1801
                        return ErrGraphNotFound
×
1802
                }
×
1803
                pruneBucket := graphMeta.NestedReadBucket(pruneLogBucket)
3✔
1804
                if pruneBucket == nil {
3✔
1805
                        return ErrGraphNeverPruned
×
1806
                }
×
1807

1808
                pruneCursor := pruneBucket.ReadCursor()
3✔
1809

3✔
1810
                // The prune key with the largest block height will be our
3✔
1811
                // prune tip.
3✔
1812
                k, v := pruneCursor.Last()
3✔
1813
                if k == nil {
6✔
1814
                        return ErrGraphNeverPruned
3✔
1815
                }
3✔
1816

1817
                // Once we have the prune tip, the value will be the block hash,
1818
                // and the key the block height.
1819
                copy(tipHash[:], v)
3✔
1820
                tipHeight = byteOrder.Uint32(k)
3✔
1821

3✔
1822
                return nil
3✔
1823
        }, func() {})
3✔
1824
        if err != nil {
6✔
1825
                return nil, 0, err
3✔
1826
        }
3✔
1827

1828
        return &tipHash, tipHeight, nil
3✔
1829
}
1830

1831
// DeleteChannelEdges removes edges with the given channel IDs from the
1832
// database and marks them as zombies. This ensures that we're unable to re-add
1833
// it to our database once again. If an edge does not exist within the
1834
// database, then ErrEdgeNotFound will be returned. If strictZombiePruning is
1835
// true, then when we mark these edges as zombies, we'll set up the keys such
1836
// that we require the node that failed to send the fresh update to be the one
1837
// that resurrects the channel from its zombie state. The markZombie bool
1838
// denotes whether or not to mark the channel as a zombie.
1839
func (c *KVStore) DeleteChannelEdges(strictZombiePruning, markZombie bool,
1840
        chanIDs ...uint64) ([]*models.ChannelEdgeInfo, error) {
3✔
1841

3✔
1842
        // TODO(roasbeef): possibly delete from node bucket if node has no more
3✔
1843
        // channels
3✔
1844
        // TODO(roasbeef): don't delete both edges?
3✔
1845

3✔
1846
        c.cacheMu.Lock()
3✔
1847
        defer c.cacheMu.Unlock()
3✔
1848

3✔
1849
        var infos []*models.ChannelEdgeInfo
3✔
1850
        err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
6✔
1851
                edges := tx.ReadWriteBucket(edgeBucket)
3✔
1852
                if edges == nil {
3✔
1853
                        return ErrEdgeNotFound
×
1854
                }
×
1855
                edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
3✔
1856
                if edgeIndex == nil {
3✔
1857
                        return ErrEdgeNotFound
×
1858
                }
×
1859
                chanIndex := edges.NestedReadWriteBucket(channelPointBucket)
3✔
1860
                if chanIndex == nil {
3✔
1861
                        return ErrEdgeNotFound
×
1862
                }
×
1863
                nodes := tx.ReadWriteBucket(nodeBucket)
3✔
1864
                if nodes == nil {
3✔
1865
                        return ErrGraphNodeNotFound
×
1866
                }
×
1867
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
3✔
1868
                if err != nil {
3✔
1869
                        return err
×
1870
                }
×
1871

1872
                var rawChanID [8]byte
3✔
1873
                for _, chanID := range chanIDs {
6✔
1874
                        byteOrder.PutUint64(rawChanID[:], chanID)
3✔
1875
                        edgeInfo, err := c.delChannelEdgeUnsafe(
3✔
1876
                                edges, edgeIndex, chanIndex, zombieIndex,
3✔
1877
                                rawChanID[:], markZombie, strictZombiePruning,
3✔
1878
                        )
3✔
1879
                        if err != nil {
3✔
UNCOV
1880
                                return err
×
UNCOV
1881
                        }
×
1882

1883
                        infos = append(infos, edgeInfo)
3✔
1884
                }
1885

1886
                return nil
3✔
1887
        }, func() {
3✔
1888
                infos = nil
3✔
1889
        })
3✔
1890
        if err != nil {
3✔
UNCOV
1891
                return nil, err
×
UNCOV
1892
        }
×
1893

1894
        for _, chanID := range chanIDs {
6✔
1895
                c.rejectCache.remove(chanID)
3✔
1896
                c.chanCache.remove(chanID)
3✔
1897
        }
3✔
1898

1899
        return infos, nil
3✔
1900
}
1901

1902
// ChannelID attempt to lookup the 8-byte compact channel ID which maps to the
1903
// passed channel point (outpoint). If the passed channel doesn't exist within
1904
// the database, then ErrEdgeNotFound is returned.
1905
func (c *KVStore) ChannelID(chanPoint *wire.OutPoint) (uint64, error) {
3✔
1906
        var chanID uint64
3✔
1907
        if err := kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
1908
                var err error
3✔
1909
                chanID, err = getChanID(tx, chanPoint)
3✔
1910
                return err
3✔
1911
        }, func() {
6✔
1912
                chanID = 0
3✔
1913
        }); err != nil {
6✔
1914
                return 0, err
3✔
1915
        }
3✔
1916

1917
        return chanID, nil
3✔
1918
}
1919

1920
// getChanID returns the assigned channel ID for a given channel point.
1921
func getChanID(tx kvdb.RTx, chanPoint *wire.OutPoint) (uint64, error) {
3✔
1922
        var b bytes.Buffer
3✔
1923
        if err := WriteOutpoint(&b, chanPoint); err != nil {
3✔
1924
                return 0, err
×
1925
        }
×
1926

1927
        edges := tx.ReadBucket(edgeBucket)
3✔
1928
        if edges == nil {
3✔
1929
                return 0, ErrGraphNoEdgesFound
×
1930
        }
×
1931
        chanIndex := edges.NestedReadBucket(channelPointBucket)
3✔
1932
        if chanIndex == nil {
3✔
1933
                return 0, ErrGraphNoEdgesFound
×
1934
        }
×
1935

1936
        chanIDBytes := chanIndex.Get(b.Bytes())
3✔
1937
        if chanIDBytes == nil {
6✔
1938
                return 0, ErrEdgeNotFound
3✔
1939
        }
3✔
1940

1941
        chanID := byteOrder.Uint64(chanIDBytes)
3✔
1942

3✔
1943
        return chanID, nil
3✔
1944
}
1945

1946
// TODO(roasbeef): allow updates to use Batch?
1947

1948
// HighestChanID returns the "highest" known channel ID in the channel graph.
1949
// This represents the "newest" channel from the PoV of the chain. This method
1950
// can be used by peers to quickly determine if they're graphs are in sync.
1951
func (c *KVStore) HighestChanID(_ context.Context) (uint64, error) {
3✔
1952
        var cid uint64
3✔
1953

3✔
1954
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
1955
                edges := tx.ReadBucket(edgeBucket)
3✔
1956
                if edges == nil {
3✔
1957
                        return ErrGraphNoEdgesFound
×
1958
                }
×
1959
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
3✔
1960
                if edgeIndex == nil {
3✔
1961
                        return ErrGraphNoEdgesFound
×
1962
                }
×
1963

1964
                // In order to find the highest chan ID, we'll fetch a cursor
1965
                // and use that to seek to the "end" of our known rage.
1966
                cidCursor := edgeIndex.ReadCursor()
3✔
1967

3✔
1968
                lastChanID, _ := cidCursor.Last()
3✔
1969

3✔
1970
                // If there's no key, then this means that we don't actually
3✔
1971
                // know of any channels, so we'll return a predicable error.
3✔
1972
                if lastChanID == nil {
6✔
1973
                        return ErrGraphNoEdgesFound
3✔
1974
                }
3✔
1975

1976
                // Otherwise, we'll de serialize the channel ID and return it
1977
                // to the caller.
1978
                cid = byteOrder.Uint64(lastChanID)
3✔
1979

3✔
1980
                return nil
3✔
1981
        }, func() {
3✔
1982
                cid = 0
3✔
1983
        })
3✔
1984
        if err != nil && !errors.Is(err, ErrGraphNoEdgesFound) {
3✔
1985
                return 0, err
×
1986
        }
×
1987

1988
        return cid, nil
3✔
1989
}
1990

1991
// ChannelEdge represents the complete set of information for a channel edge in
1992
// the known channel graph. This struct couples the core information of the
1993
// edge as well as each of the known advertised edge policies.
1994
type ChannelEdge struct {
1995
        // Info contains all the static information describing the channel.
1996
        Info *models.ChannelEdgeInfo
1997

1998
        // Policy1 points to the "first" edge policy of the channel containing
1999
        // the dynamic information required to properly route through the edge.
2000
        Policy1 *models.ChannelEdgePolicy
2001

2002
        // Policy2 points to the "second" edge policy of the channel containing
2003
        // the dynamic information required to properly route through the edge.
2004
        Policy2 *models.ChannelEdgePolicy
2005

2006
        // Node1 is "node 1" in the channel. This is the node that would have
2007
        // produced Policy1 if it exists.
2008
        Node1 *models.LightningNode
2009

2010
        // Node2 is "node 2" in the channel. This is the node that would have
2011
        // produced Policy2 if it exists.
2012
        Node2 *models.LightningNode
2013
}
2014

2015
// ChanUpdatesInHorizon returns all the known channel edges which have at least
2016
// one edge that has an update timestamp within the specified horizon.
2017
func (c *KVStore) ChanUpdatesInHorizon(startTime,
2018
        endTime time.Time) ([]ChannelEdge, error) {
3✔
2019

3✔
2020
        // To ensure we don't return duplicate ChannelEdges, we'll use an
3✔
2021
        // additional map to keep track of the edges already seen to prevent
3✔
2022
        // re-adding it.
3✔
2023
        var edgesSeen map[uint64]struct{}
3✔
2024
        var edgesToCache map[uint64]ChannelEdge
3✔
2025
        var edgesInHorizon []ChannelEdge
3✔
2026

3✔
2027
        c.cacheMu.Lock()
3✔
2028
        defer c.cacheMu.Unlock()
3✔
2029

3✔
2030
        var hits int
3✔
2031
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
2032
                edges := tx.ReadBucket(edgeBucket)
3✔
2033
                if edges == nil {
3✔
2034
                        return ErrGraphNoEdgesFound
×
2035
                }
×
2036
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
3✔
2037
                if edgeIndex == nil {
3✔
2038
                        return ErrGraphNoEdgesFound
×
2039
                }
×
2040
                edgeUpdateIndex := edges.NestedReadBucket(edgeUpdateIndexBucket)
3✔
2041
                if edgeUpdateIndex == nil {
3✔
2042
                        return ErrGraphNoEdgesFound
×
2043
                }
×
2044

2045
                nodes := tx.ReadBucket(nodeBucket)
3✔
2046
                if nodes == nil {
3✔
2047
                        return ErrGraphNodesNotFound
×
2048
                }
×
2049

2050
                // We'll now obtain a cursor to perform a range query within
2051
                // the index to find all channels within the horizon.
2052
                updateCursor := edgeUpdateIndex.ReadCursor()
3✔
2053

3✔
2054
                var startTimeBytes, endTimeBytes [8 + 8]byte
3✔
2055
                byteOrder.PutUint64(
3✔
2056
                        startTimeBytes[:8], uint64(startTime.Unix()),
3✔
2057
                )
3✔
2058
                byteOrder.PutUint64(
3✔
2059
                        endTimeBytes[:8], uint64(endTime.Unix()),
3✔
2060
                )
3✔
2061

3✔
2062
                // With our start and end times constructed, we'll step through
3✔
2063
                // the index collecting the info and policy of each update of
3✔
2064
                // each channel that has a last update within the time range.
3✔
2065
                //
3✔
2066
                //nolint:ll
3✔
2067
                for indexKey, _ := updateCursor.Seek(startTimeBytes[:]); indexKey != nil &&
3✔
2068
                        bytes.Compare(indexKey, endTimeBytes[:]) <= 0; indexKey, _ = updateCursor.Next() {
6✔
2069
                        // We have a new eligible entry, so we'll slice of the
3✔
2070
                        // chan ID so we can query it in the DB.
3✔
2071
                        chanID := indexKey[8:]
3✔
2072

3✔
2073
                        // If we've already retrieved the info and policies for
3✔
2074
                        // this edge, then we can skip it as we don't need to do
3✔
2075
                        // so again.
3✔
2076
                        chanIDInt := byteOrder.Uint64(chanID)
3✔
2077
                        if _, ok := edgesSeen[chanIDInt]; ok {
3✔
UNCOV
2078
                                continue
×
2079
                        }
2080

2081
                        if channel, ok := c.chanCache.get(chanIDInt); ok {
6✔
2082
                                hits++
3✔
2083
                                edgesSeen[chanIDInt] = struct{}{}
3✔
2084
                                edgesInHorizon = append(edgesInHorizon, channel)
3✔
2085

3✔
2086
                                continue
3✔
2087
                        }
2088

2089
                        // First, we'll fetch the static edge information.
2090
                        edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
3✔
2091
                        if err != nil {
3✔
2092
                                chanID := byteOrder.Uint64(chanID)
×
2093
                                return fmt.Errorf("unable to fetch info for "+
×
2094
                                        "edge with chan_id=%v: %v", chanID, err)
×
2095
                        }
×
2096

2097
                        // With the static information obtained, we'll now
2098
                        // fetch the dynamic policy info.
2099
                        edge1, edge2, err := fetchChanEdgePolicies(
3✔
2100
                                edgeIndex, edges, chanID,
3✔
2101
                        )
3✔
2102
                        if err != nil {
3✔
2103
                                chanID := byteOrder.Uint64(chanID)
×
2104
                                return fmt.Errorf("unable to fetch policies "+
×
2105
                                        "for edge with chan_id=%v: %v", chanID,
×
2106
                                        err)
×
2107
                        }
×
2108

2109
                        node1, err := fetchLightningNode(
3✔
2110
                                nodes, edgeInfo.NodeKey1Bytes[:],
3✔
2111
                        )
3✔
2112
                        if err != nil {
3✔
2113
                                return err
×
2114
                        }
×
2115

2116
                        node2, err := fetchLightningNode(
3✔
2117
                                nodes, edgeInfo.NodeKey2Bytes[:],
3✔
2118
                        )
3✔
2119
                        if err != nil {
3✔
2120
                                return err
×
2121
                        }
×
2122

2123
                        // Finally, we'll collate this edge with the rest of
2124
                        // edges to be returned.
2125
                        edgesSeen[chanIDInt] = struct{}{}
3✔
2126
                        channel := ChannelEdge{
3✔
2127
                                Info:    &edgeInfo,
3✔
2128
                                Policy1: edge1,
3✔
2129
                                Policy2: edge2,
3✔
2130
                                Node1:   &node1,
3✔
2131
                                Node2:   &node2,
3✔
2132
                        }
3✔
2133
                        edgesInHorizon = append(edgesInHorizon, channel)
3✔
2134
                        edgesToCache[chanIDInt] = channel
3✔
2135
                }
2136

2137
                return nil
3✔
2138
        }, func() {
3✔
2139
                edgesSeen = make(map[uint64]struct{})
3✔
2140
                edgesToCache = make(map[uint64]ChannelEdge)
3✔
2141
                edgesInHorizon = nil
3✔
2142
        })
3✔
2143
        switch {
3✔
2144
        case errors.Is(err, ErrGraphNoEdgesFound):
×
2145
                fallthrough
×
2146
        case errors.Is(err, ErrGraphNodesNotFound):
×
2147
                break
×
2148

2149
        case err != nil:
×
2150
                return nil, err
×
2151
        }
2152

2153
        // Insert any edges loaded from disk into the cache.
2154
        for chanid, channel := range edgesToCache {
6✔
2155
                c.chanCache.insert(chanid, channel)
3✔
2156
        }
3✔
2157

2158
        log.Debugf("ChanUpdatesInHorizon hit percentage: %f (%d/%d)",
3✔
2159
                float64(hits)/float64(len(edgesInHorizon)), hits,
3✔
2160
                len(edgesInHorizon))
3✔
2161

3✔
2162
        return edgesInHorizon, nil
3✔
2163
}
2164

2165
// NodeUpdatesInHorizon returns all the known lightning node which have an
2166
// update timestamp within the passed range. This method can be used by two
2167
// nodes to quickly determine if they have the same set of up to date node
2168
// announcements.
2169
func (c *KVStore) NodeUpdatesInHorizon(startTime,
2170
        endTime time.Time) ([]models.LightningNode, error) {
3✔
2171

3✔
2172
        var nodesInHorizon []models.LightningNode
3✔
2173

3✔
2174
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
2175
                nodes := tx.ReadBucket(nodeBucket)
3✔
2176
                if nodes == nil {
3✔
2177
                        return ErrGraphNodesNotFound
×
2178
                }
×
2179

2180
                nodeUpdateIndex := nodes.NestedReadBucket(nodeUpdateIndexBucket)
3✔
2181
                if nodeUpdateIndex == nil {
3✔
2182
                        return ErrGraphNodesNotFound
×
2183
                }
×
2184

2185
                // We'll now obtain a cursor to perform a range query within
2186
                // the index to find all node announcements within the horizon.
2187
                updateCursor := nodeUpdateIndex.ReadCursor()
3✔
2188

3✔
2189
                var startTimeBytes, endTimeBytes [8 + 33]byte
3✔
2190
                byteOrder.PutUint64(
3✔
2191
                        startTimeBytes[:8], uint64(startTime.Unix()),
3✔
2192
                )
3✔
2193
                byteOrder.PutUint64(
3✔
2194
                        endTimeBytes[:8], uint64(endTime.Unix()),
3✔
2195
                )
3✔
2196

3✔
2197
                // With our start and end times constructed, we'll step through
3✔
2198
                // the index collecting info for each node within the time
3✔
2199
                // range.
3✔
2200
                //
3✔
2201
                //nolint:ll
3✔
2202
                for indexKey, _ := updateCursor.Seek(startTimeBytes[:]); indexKey != nil &&
3✔
2203
                        bytes.Compare(indexKey, endTimeBytes[:]) <= 0; indexKey, _ = updateCursor.Next() {
6✔
2204
                        nodePub := indexKey[8:]
3✔
2205
                        node, err := fetchLightningNode(nodes, nodePub)
3✔
2206
                        if err != nil {
3✔
2207
                                return err
×
2208
                        }
×
2209

2210
                        nodesInHorizon = append(nodesInHorizon, node)
3✔
2211
                }
2212

2213
                return nil
3✔
2214
        }, func() {
3✔
2215
                nodesInHorizon = nil
3✔
2216
        })
3✔
2217
        switch {
3✔
2218
        case errors.Is(err, ErrGraphNoEdgesFound):
×
2219
                fallthrough
×
2220
        case errors.Is(err, ErrGraphNodesNotFound):
×
2221
                break
×
2222

2223
        case err != nil:
×
2224
                return nil, err
×
2225
        }
2226

2227
        return nodesInHorizon, nil
3✔
2228
}
2229

2230
// FilterKnownChanIDs takes a set of channel IDs and return the subset of chan
2231
// ID's that we don't know and are not known zombies of the passed set. In other
2232
// words, we perform a set difference of our set of chan ID's and the ones
2233
// passed in. This method can be used by callers to determine the set of
2234
// channels another peer knows of that we don't. The ChannelUpdateInfos for the
2235
// known zombies is also returned.
2236
func (c *KVStore) FilterKnownChanIDs(chansInfo []ChannelUpdateInfo) ([]uint64,
2237
        []ChannelUpdateInfo, error) {
3✔
2238

3✔
2239
        var (
3✔
2240
                newChanIDs   []uint64
3✔
2241
                knownZombies []ChannelUpdateInfo
3✔
2242
        )
3✔
2243

3✔
2244
        c.cacheMu.Lock()
3✔
2245
        defer c.cacheMu.Unlock()
3✔
2246

3✔
2247
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
2248
                edges := tx.ReadBucket(edgeBucket)
3✔
2249
                if edges == nil {
3✔
2250
                        return ErrGraphNoEdgesFound
×
2251
                }
×
2252
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
3✔
2253
                if edgeIndex == nil {
3✔
2254
                        return ErrGraphNoEdgesFound
×
2255
                }
×
2256

2257
                // Fetch the zombie index, it may not exist if no edges have
2258
                // ever been marked as zombies. If the index has been
2259
                // initialized, we will use it later to skip known zombie edges.
2260
                zombieIndex := edges.NestedReadBucket(zombieBucket)
3✔
2261

3✔
2262
                // We'll run through the set of chanIDs and collate only the
3✔
2263
                // set of channel that are unable to be found within our db.
3✔
2264
                var cidBytes [8]byte
3✔
2265
                for _, info := range chansInfo {
6✔
2266
                        scid := info.ShortChannelID.ToUint64()
3✔
2267
                        byteOrder.PutUint64(cidBytes[:], scid)
3✔
2268

3✔
2269
                        // If the edge is already known, skip it.
3✔
2270
                        if v := edgeIndex.Get(cidBytes[:]); v != nil {
6✔
2271
                                continue
3✔
2272
                        }
2273

2274
                        // If the edge is a known zombie, skip it.
2275
                        if zombieIndex != nil {
6✔
2276
                                isZombie, _, _ := isZombieEdge(
3✔
2277
                                        zombieIndex, scid,
3✔
2278
                                )
3✔
2279

3✔
2280
                                if isZombie {
3✔
UNCOV
2281
                                        knownZombies = append(
×
UNCOV
2282
                                                knownZombies, info,
×
UNCOV
2283
                                        )
×
UNCOV
2284

×
UNCOV
2285
                                        continue
×
2286
                                }
2287
                        }
2288

2289
                        newChanIDs = append(newChanIDs, scid)
3✔
2290
                }
2291

2292
                return nil
3✔
2293
        }, func() {
3✔
2294
                newChanIDs = nil
3✔
2295
                knownZombies = nil
3✔
2296
        })
3✔
2297
        switch {
3✔
2298
        // If we don't know of any edges yet, then we'll return the entire set
2299
        // of chan IDs specified.
2300
        case errors.Is(err, ErrGraphNoEdgesFound):
×
2301
                ogChanIDs := make([]uint64, len(chansInfo))
×
2302
                for i, info := range chansInfo {
×
2303
                        ogChanIDs[i] = info.ShortChannelID.ToUint64()
×
2304
                }
×
2305

2306
                return ogChanIDs, nil, nil
×
2307

2308
        case err != nil:
×
2309
                return nil, nil, err
×
2310
        }
2311

2312
        return newChanIDs, knownZombies, nil
3✔
2313
}
2314

2315
// ChannelUpdateInfo couples the SCID of a channel with the timestamps of the
2316
// latest received channel updates for the channel.
2317
type ChannelUpdateInfo struct {
2318
        // ShortChannelID is the SCID identifier of the channel.
2319
        ShortChannelID lnwire.ShortChannelID
2320

2321
        // Node1UpdateTimestamp is the timestamp of the latest received update
2322
        // from the node 1 channel peer. This will be set to zero time if no
2323
        // update has yet been received from this node.
2324
        Node1UpdateTimestamp time.Time
2325

2326
        // Node2UpdateTimestamp is the timestamp of the latest received update
2327
        // from the node 2 channel peer. This will be set to zero time if no
2328
        // update has yet been received from this node.
2329
        Node2UpdateTimestamp time.Time
2330
}
2331

2332
// NewChannelUpdateInfo is a constructor which makes sure we initialize the
2333
// timestamps with zero seconds unix timestamp which equals
2334
// `January 1, 1970, 00:00:00 UTC` in case the value is `time.Time{}`.
2335
func NewChannelUpdateInfo(scid lnwire.ShortChannelID, node1Timestamp,
2336
        node2Timestamp time.Time) ChannelUpdateInfo {
3✔
2337

3✔
2338
        chanInfo := ChannelUpdateInfo{
3✔
2339
                ShortChannelID:       scid,
3✔
2340
                Node1UpdateTimestamp: node1Timestamp,
3✔
2341
                Node2UpdateTimestamp: node2Timestamp,
3✔
2342
        }
3✔
2343

3✔
2344
        if node1Timestamp.IsZero() {
6✔
2345
                chanInfo.Node1UpdateTimestamp = time.Unix(0, 0)
3✔
2346
        }
3✔
2347

2348
        if node2Timestamp.IsZero() {
6✔
2349
                chanInfo.Node2UpdateTimestamp = time.Unix(0, 0)
3✔
2350
        }
3✔
2351

2352
        return chanInfo
3✔
2353
}
2354

2355
// BlockChannelRange represents a range of channels for a given block height.
2356
type BlockChannelRange struct {
2357
        // Height is the height of the block all of the channels below were
2358
        // included in.
2359
        Height uint32
2360

2361
        // Channels is the list of channels identified by their short ID
2362
        // representation known to us that were included in the block height
2363
        // above. The list may include channel update timestamp information if
2364
        // requested.
2365
        Channels []ChannelUpdateInfo
2366
}
2367

2368
// FilterChannelRange returns the channel ID's of all known channels which were
2369
// mined in a block height within the passed range. The channel IDs are grouped
2370
// by their common block height. This method can be used to quickly share with a
2371
// peer the set of channels we know of within a particular range to catch them
2372
// up after a period of time offline. If withTimestamps is true then the
2373
// timestamp info of the latest received channel update messages of the channel
2374
// will be included in the response.
2375
func (c *KVStore) FilterChannelRange(startHeight,
2376
        endHeight uint32, withTimestamps bool) ([]BlockChannelRange, error) {
3✔
2377

3✔
2378
        startChanID := &lnwire.ShortChannelID{
3✔
2379
                BlockHeight: startHeight,
3✔
2380
        }
3✔
2381

3✔
2382
        endChanID := lnwire.ShortChannelID{
3✔
2383
                BlockHeight: endHeight,
3✔
2384
                TxIndex:     math.MaxUint32 & 0x00ffffff,
3✔
2385
                TxPosition:  math.MaxUint16,
3✔
2386
        }
3✔
2387

3✔
2388
        // As we need to perform a range scan, we'll convert the starting and
3✔
2389
        // ending height to their corresponding values when encoded using short
3✔
2390
        // channel ID's.
3✔
2391
        var chanIDStart, chanIDEnd [8]byte
3✔
2392
        byteOrder.PutUint64(chanIDStart[:], startChanID.ToUint64())
3✔
2393
        byteOrder.PutUint64(chanIDEnd[:], endChanID.ToUint64())
3✔
2394

3✔
2395
        var channelsPerBlock map[uint32][]ChannelUpdateInfo
3✔
2396
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
2397
                edges := tx.ReadBucket(edgeBucket)
3✔
2398
                if edges == nil {
3✔
2399
                        return ErrGraphNoEdgesFound
×
2400
                }
×
2401
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
3✔
2402
                if edgeIndex == nil {
3✔
2403
                        return ErrGraphNoEdgesFound
×
2404
                }
×
2405

2406
                cursor := edgeIndex.ReadCursor()
3✔
2407

3✔
2408
                // We'll now iterate through the database, and find each
3✔
2409
                // channel ID that resides within the specified range.
3✔
2410
                //
3✔
2411
                //nolint:ll
3✔
2412
                for k, v := cursor.Seek(chanIDStart[:]); k != nil &&
3✔
2413
                        bytes.Compare(k, chanIDEnd[:]) <= 0; k, v = cursor.Next() {
6✔
2414
                        // Don't send alias SCIDs during gossip sync.
3✔
2415
                        edgeReader := bytes.NewReader(v)
3✔
2416
                        edgeInfo, err := deserializeChanEdgeInfo(edgeReader)
3✔
2417
                        if err != nil {
3✔
2418
                                return err
×
2419
                        }
×
2420

2421
                        if edgeInfo.AuthProof == nil {
6✔
2422
                                continue
3✔
2423
                        }
2424

2425
                        // This channel ID rests within the target range, so
2426
                        // we'll add it to our returned set.
2427
                        rawCid := byteOrder.Uint64(k)
3✔
2428
                        cid := lnwire.NewShortChanIDFromInt(rawCid)
3✔
2429

3✔
2430
                        chanInfo := NewChannelUpdateInfo(
3✔
2431
                                cid, time.Time{}, time.Time{},
3✔
2432
                        )
3✔
2433

3✔
2434
                        if !withTimestamps {
3✔
UNCOV
2435
                                channelsPerBlock[cid.BlockHeight] = append(
×
UNCOV
2436
                                        channelsPerBlock[cid.BlockHeight],
×
UNCOV
2437
                                        chanInfo,
×
UNCOV
2438
                                )
×
UNCOV
2439

×
UNCOV
2440
                                continue
×
2441
                        }
2442

2443
                        node1Key, node2Key := computeEdgePolicyKeys(&edgeInfo)
3✔
2444

3✔
2445
                        rawPolicy := edges.Get(node1Key)
3✔
2446
                        if len(rawPolicy) != 0 {
6✔
2447
                                r := bytes.NewReader(rawPolicy)
3✔
2448

3✔
2449
                                edge, err := deserializeChanEdgePolicyRaw(r)
3✔
2450
                                if err != nil && !errors.Is(
3✔
2451
                                        err, ErrEdgePolicyOptionalFieldNotFound,
3✔
2452
                                ) && !errors.Is(err, ErrParsingExtraTLVBytes) {
3✔
2453

×
2454
                                        return err
×
2455
                                }
×
2456

2457
                                chanInfo.Node1UpdateTimestamp = edge.LastUpdate
3✔
2458
                        }
2459

2460
                        rawPolicy = edges.Get(node2Key)
3✔
2461
                        if len(rawPolicy) != 0 {
6✔
2462
                                r := bytes.NewReader(rawPolicy)
3✔
2463

3✔
2464
                                edge, err := deserializeChanEdgePolicyRaw(r)
3✔
2465
                                if err != nil && !errors.Is(
3✔
2466
                                        err, ErrEdgePolicyOptionalFieldNotFound,
3✔
2467
                                ) && !errors.Is(err, ErrParsingExtraTLVBytes) {
3✔
2468

×
2469
                                        return err
×
2470
                                }
×
2471

2472
                                chanInfo.Node2UpdateTimestamp = edge.LastUpdate
3✔
2473
                        }
2474

2475
                        channelsPerBlock[cid.BlockHeight] = append(
3✔
2476
                                channelsPerBlock[cid.BlockHeight], chanInfo,
3✔
2477
                        )
3✔
2478
                }
2479

2480
                return nil
3✔
2481
        }, func() {
3✔
2482
                channelsPerBlock = make(map[uint32][]ChannelUpdateInfo)
3✔
2483
        })
3✔
2484

2485
        switch {
3✔
2486
        // If we don't know of any channels yet, then there's nothing to
2487
        // filter, so we'll return an empty slice.
2488
        case errors.Is(err, ErrGraphNoEdgesFound) || len(channelsPerBlock) == 0:
3✔
2489
                return nil, nil
3✔
2490

2491
        case err != nil:
×
2492
                return nil, err
×
2493
        }
2494

2495
        // Return the channel ranges in ascending block height order.
2496
        blocks := make([]uint32, 0, len(channelsPerBlock))
3✔
2497
        for block := range channelsPerBlock {
6✔
2498
                blocks = append(blocks, block)
3✔
2499
        }
3✔
2500
        sort.Slice(blocks, func(i, j int) bool {
6✔
2501
                return blocks[i] < blocks[j]
3✔
2502
        })
3✔
2503

2504
        channelRanges := make([]BlockChannelRange, 0, len(channelsPerBlock))
3✔
2505
        for _, block := range blocks {
6✔
2506
                channelRanges = append(channelRanges, BlockChannelRange{
3✔
2507
                        Height:   block,
3✔
2508
                        Channels: channelsPerBlock[block],
3✔
2509
                })
3✔
2510
        }
3✔
2511

2512
        return channelRanges, nil
3✔
2513
}
2514

2515
// FetchChanInfos returns the set of channel edges that correspond to the passed
2516
// channel ID's. If an edge is the query is unknown to the database, it will
2517
// skipped and the result will contain only those edges that exist at the time
2518
// of the query. This can be used to respond to peer queries that are seeking to
2519
// fill in gaps in their view of the channel graph.
2520
func (c *KVStore) FetchChanInfos(chanIDs []uint64) ([]ChannelEdge, error) {
3✔
2521
        return c.fetchChanInfos(nil, chanIDs)
3✔
2522
}
3✔
2523

2524
// fetchChanInfos returns the set of channel edges that correspond to the passed
2525
// channel ID's. If an edge is the query is unknown to the database, it will
2526
// skipped and the result will contain only those edges that exist at the time
2527
// of the query. This can be used to respond to peer queries that are seeking to
2528
// fill in gaps in their view of the channel graph.
2529
//
2530
// NOTE: An optional transaction may be provided. If none is provided, then a
2531
// new one will be created.
2532
func (c *KVStore) fetchChanInfos(tx kvdb.RTx, chanIDs []uint64) (
2533
        []ChannelEdge, error) {
3✔
2534
        // TODO(roasbeef): sort cids?
3✔
2535

3✔
2536
        var (
3✔
2537
                chanEdges []ChannelEdge
3✔
2538
                cidBytes  [8]byte
3✔
2539
        )
3✔
2540

3✔
2541
        fetchChanInfos := func(tx kvdb.RTx) error {
6✔
2542
                edges := tx.ReadBucket(edgeBucket)
3✔
2543
                if edges == nil {
3✔
2544
                        return ErrGraphNoEdgesFound
×
2545
                }
×
2546
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
3✔
2547
                if edgeIndex == nil {
3✔
2548
                        return ErrGraphNoEdgesFound
×
2549
                }
×
2550
                nodes := tx.ReadBucket(nodeBucket)
3✔
2551
                if nodes == nil {
3✔
2552
                        return ErrGraphNotFound
×
2553
                }
×
2554

2555
                for _, cid := range chanIDs {
6✔
2556
                        byteOrder.PutUint64(cidBytes[:], cid)
3✔
2557

3✔
2558
                        // First, we'll fetch the static edge information. If
3✔
2559
                        // the edge is unknown, we will skip the edge and
3✔
2560
                        // continue gathering all known edges.
3✔
2561
                        edgeInfo, err := fetchChanEdgeInfo(
3✔
2562
                                edgeIndex, cidBytes[:],
3✔
2563
                        )
3✔
2564
                        switch {
3✔
UNCOV
2565
                        case errors.Is(err, ErrEdgeNotFound):
×
UNCOV
2566
                                continue
×
2567
                        case err != nil:
×
2568
                                return err
×
2569
                        }
2570

2571
                        // With the static information obtained, we'll now
2572
                        // fetch the dynamic policy info.
2573
                        edge1, edge2, err := fetchChanEdgePolicies(
3✔
2574
                                edgeIndex, edges, cidBytes[:],
3✔
2575
                        )
3✔
2576
                        if err != nil {
3✔
2577
                                return err
×
2578
                        }
×
2579

2580
                        node1, err := fetchLightningNode(
3✔
2581
                                nodes, edgeInfo.NodeKey1Bytes[:],
3✔
2582
                        )
3✔
2583
                        if err != nil {
3✔
2584
                                return err
×
2585
                        }
×
2586

2587
                        node2, err := fetchLightningNode(
3✔
2588
                                nodes, edgeInfo.NodeKey2Bytes[:],
3✔
2589
                        )
3✔
2590
                        if err != nil {
3✔
2591
                                return err
×
2592
                        }
×
2593

2594
                        chanEdges = append(chanEdges, ChannelEdge{
3✔
2595
                                Info:    &edgeInfo,
3✔
2596
                                Policy1: edge1,
3✔
2597
                                Policy2: edge2,
3✔
2598
                                Node1:   &node1,
3✔
2599
                                Node2:   &node2,
3✔
2600
                        })
3✔
2601
                }
2602

2603
                return nil
3✔
2604
        }
2605

2606
        if tx == nil {
6✔
2607
                err := kvdb.View(c.db, fetchChanInfos, func() {
6✔
2608
                        chanEdges = nil
3✔
2609
                })
3✔
2610
                if err != nil {
3✔
2611
                        return nil, err
×
2612
                }
×
2613

2614
                return chanEdges, nil
3✔
2615
        }
2616

2617
        err := fetchChanInfos(tx)
×
2618
        if err != nil {
×
2619
                return nil, err
×
2620
        }
×
2621

2622
        return chanEdges, nil
×
2623
}
2624

2625
func delEdgeUpdateIndexEntry(edgesBucket kvdb.RwBucket, chanID uint64,
2626
        edge1, edge2 *models.ChannelEdgePolicy) error {
3✔
2627

3✔
2628
        // First, we'll fetch the edge update index bucket which currently
3✔
2629
        // stores an entry for the channel we're about to delete.
3✔
2630
        updateIndex := edgesBucket.NestedReadWriteBucket(edgeUpdateIndexBucket)
3✔
2631
        if updateIndex == nil {
3✔
2632
                // No edges in bucket, return early.
×
2633
                return nil
×
2634
        }
×
2635

2636
        // Now that we have the bucket, we'll attempt to construct a template
2637
        // for the index key: updateTime || chanid.
2638
        var indexKey [8 + 8]byte
3✔
2639
        byteOrder.PutUint64(indexKey[8:], chanID)
3✔
2640

3✔
2641
        // With the template constructed, we'll attempt to delete an entry that
3✔
2642
        // would have been created by both edges: we'll alternate the update
3✔
2643
        // times, as one may had overridden the other.
3✔
2644
        if edge1 != nil {
6✔
2645
                byteOrder.PutUint64(
3✔
2646
                        indexKey[:8], uint64(edge1.LastUpdate.Unix()),
3✔
2647
                )
3✔
2648
                if err := updateIndex.Delete(indexKey[:]); err != nil {
3✔
2649
                        return err
×
2650
                }
×
2651
        }
2652

2653
        // We'll also attempt to delete the entry that may have been created by
2654
        // the second edge.
2655
        if edge2 != nil {
6✔
2656
                byteOrder.PutUint64(
3✔
2657
                        indexKey[:8], uint64(edge2.LastUpdate.Unix()),
3✔
2658
                )
3✔
2659
                if err := updateIndex.Delete(indexKey[:]); err != nil {
3✔
2660
                        return err
×
2661
                }
×
2662
        }
2663

2664
        return nil
3✔
2665
}
2666

2667
// delChannelEdgeUnsafe deletes the edge with the given chanID from the graph
2668
// cache. It then goes on to delete any policy info and edge info for this
2669
// channel from the DB and finally, if isZombie is true, it will add an entry
2670
// for this channel in the zombie index.
2671
//
2672
// NOTE: this method MUST only be called if the cacheMu has already been
2673
// acquired.
2674
func (c *KVStore) delChannelEdgeUnsafe(edges, edgeIndex, chanIndex,
2675
        zombieIndex kvdb.RwBucket, chanID []byte, isZombie,
2676
        strictZombie bool) (*models.ChannelEdgeInfo, error) {
3✔
2677

3✔
2678
        edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
3✔
2679
        if err != nil {
3✔
UNCOV
2680
                return nil, err
×
UNCOV
2681
        }
×
2682

2683
        // We'll also remove the entry in the edge update index bucket before
2684
        // we delete the edges themselves so we can access their last update
2685
        // times.
2686
        cid := byteOrder.Uint64(chanID)
3✔
2687
        edge1, edge2, err := fetchChanEdgePolicies(edgeIndex, edges, chanID)
3✔
2688
        if err != nil {
3✔
2689
                return nil, err
×
2690
        }
×
2691
        err = delEdgeUpdateIndexEntry(edges, cid, edge1, edge2)
3✔
2692
        if err != nil {
3✔
2693
                return nil, err
×
2694
        }
×
2695

2696
        // The edge key is of the format pubKey || chanID. First we construct
2697
        // the latter half, populating the channel ID.
2698
        var edgeKey [33 + 8]byte
3✔
2699
        copy(edgeKey[33:], chanID)
3✔
2700

3✔
2701
        // With the latter half constructed, copy over the first public key to
3✔
2702
        // delete the edge in this direction, then the second to delete the
3✔
2703
        // edge in the opposite direction.
3✔
2704
        copy(edgeKey[:33], edgeInfo.NodeKey1Bytes[:])
3✔
2705
        if edges.Get(edgeKey[:]) != nil {
6✔
2706
                if err := edges.Delete(edgeKey[:]); err != nil {
3✔
2707
                        return nil, err
×
2708
                }
×
2709
        }
2710
        copy(edgeKey[:33], edgeInfo.NodeKey2Bytes[:])
3✔
2711
        if edges.Get(edgeKey[:]) != nil {
6✔
2712
                if err := edges.Delete(edgeKey[:]); err != nil {
3✔
2713
                        return nil, err
×
2714
                }
×
2715
        }
2716

2717
        // As part of deleting the edge we also remove all disabled entries
2718
        // from the edgePolicyDisabledIndex bucket. We do that for both
2719
        // directions.
2720
        err = updateEdgePolicyDisabledIndex(edges, cid, false, false)
3✔
2721
        if err != nil {
3✔
2722
                return nil, err
×
2723
        }
×
2724
        err = updateEdgePolicyDisabledIndex(edges, cid, true, false)
3✔
2725
        if err != nil {
3✔
2726
                return nil, err
×
2727
        }
×
2728

2729
        // With the edge data deleted, we can purge the information from the two
2730
        // edge indexes.
2731
        if err := edgeIndex.Delete(chanID); err != nil {
3✔
2732
                return nil, err
×
2733
        }
×
2734
        var b bytes.Buffer
3✔
2735
        if err := WriteOutpoint(&b, &edgeInfo.ChannelPoint); err != nil {
3✔
2736
                return nil, err
×
2737
        }
×
2738
        if err := chanIndex.Delete(b.Bytes()); err != nil {
3✔
2739
                return nil, err
×
2740
        }
×
2741

2742
        // Finally, we'll mark the edge as a zombie within our index if it's
2743
        // being removed due to the channel becoming a zombie. We do this to
2744
        // ensure we don't store unnecessary data for spent channels.
2745
        if !isZombie {
6✔
2746
                return &edgeInfo, nil
3✔
2747
        }
3✔
2748

2749
        nodeKey1, nodeKey2 := edgeInfo.NodeKey1Bytes, edgeInfo.NodeKey2Bytes
3✔
2750
        if strictZombie {
3✔
UNCOV
2751
                nodeKey1, nodeKey2 = makeZombiePubkeys(&edgeInfo, edge1, edge2)
×
UNCOV
2752
        }
×
2753

2754
        return &edgeInfo, markEdgeZombie(
3✔
2755
                zombieIndex, byteOrder.Uint64(chanID), nodeKey1, nodeKey2,
3✔
2756
        )
3✔
2757
}
2758

2759
// makeZombiePubkeys derives the node pubkeys to store in the zombie index for a
2760
// particular pair of channel policies. The return values are one of:
2761
//  1. (pubkey1, pubkey2)
2762
//  2. (pubkey1, blank)
2763
//  3. (blank, pubkey2)
2764
//
2765
// A blank pubkey means that corresponding node will be unable to resurrect a
2766
// channel on its own. For example, node1 may continue to publish recent
2767
// updates, but node2 has fallen way behind. After marking an edge as a zombie,
2768
// we don't want another fresh update from node1 to resurrect, as the edge can
2769
// only become live once node2 finally sends something recent.
2770
//
2771
// In the case where we have neither update, we allow either party to resurrect
2772
// the channel. If the channel were to be marked zombie again, it would be
2773
// marked with the correct lagging channel since we received an update from only
2774
// one side.
2775
func makeZombiePubkeys(info *models.ChannelEdgeInfo,
UNCOV
2776
        e1, e2 *models.ChannelEdgePolicy) ([33]byte, [33]byte) {
×
UNCOV
2777

×
UNCOV
2778
        switch {
×
2779
        // If we don't have either edge policy, we'll return both pubkeys so
2780
        // that the channel can be resurrected by either party.
2781
        case e1 == nil && e2 == nil:
×
2782
                return info.NodeKey1Bytes, info.NodeKey2Bytes
×
2783

2784
        // If we're missing edge1, or if both edges are present but edge1 is
2785
        // older, we'll return edge1's pubkey and a blank pubkey for edge2. This
2786
        // means that only an update from edge1 will be able to resurrect the
2787
        // channel.
UNCOV
2788
        case e1 == nil || (e2 != nil && e1.LastUpdate.Before(e2.LastUpdate)):
×
UNCOV
2789
                return info.NodeKey1Bytes, [33]byte{}
×
2790

2791
        // Otherwise, we're missing edge2 or edge2 is the older side, so we
2792
        // return a blank pubkey for edge1. In this case, only an update from
2793
        // edge2 can resurect the channel.
UNCOV
2794
        default:
×
UNCOV
2795
                return [33]byte{}, info.NodeKey2Bytes
×
2796
        }
2797
}
2798

2799
// UpdateEdgePolicy updates the edge routing policy for a single directed edge
2800
// within the database for the referenced channel. The `flags` attribute within
2801
// the ChannelEdgePolicy determines which of the directed edges are being
2802
// updated. If the flag is 1, then the first node's information is being
2803
// updated, otherwise it's the second node's information. The node ordering is
2804
// determined by the lexicographical ordering of the identity public keys of the
2805
// nodes on either side of the channel.
2806
func (c *KVStore) UpdateEdgePolicy(ctx context.Context,
2807
        edge *models.ChannelEdgePolicy,
2808
        opts ...batch.SchedulerOption) (route.Vertex, route.Vertex, error) {
3✔
2809

3✔
2810
        var (
3✔
2811
                isUpdate1    bool
3✔
2812
                edgeNotFound bool
3✔
2813
                from, to     route.Vertex
3✔
2814
        )
3✔
2815

3✔
2816
        r := &batch.Request[kvdb.RwTx]{
3✔
2817
                Opts: batch.NewSchedulerOptions(opts...),
3✔
2818
                Reset: func() {
6✔
2819
                        isUpdate1 = false
3✔
2820
                        edgeNotFound = false
3✔
2821
                },
3✔
2822
                Do: func(tx kvdb.RwTx) error {
3✔
2823
                        var err error
3✔
2824
                        from, to, isUpdate1, err = updateEdgePolicy(tx, edge)
3✔
2825
                        if err != nil {
3✔
UNCOV
2826
                                log.Errorf("UpdateEdgePolicy faild: %v", err)
×
UNCOV
2827
                        }
×
2828

2829
                        // Silence ErrEdgeNotFound so that the batch can
2830
                        // succeed, but propagate the error via local state.
2831
                        if errors.Is(err, ErrEdgeNotFound) {
3✔
UNCOV
2832
                                edgeNotFound = true
×
UNCOV
2833
                                return nil
×
UNCOV
2834
                        }
×
2835

2836
                        return err
3✔
2837
                },
2838
                OnCommit: func(err error) error {
3✔
2839
                        switch {
3✔
UNCOV
2840
                        case err != nil:
×
UNCOV
2841
                                return err
×
UNCOV
2842
                        case edgeNotFound:
×
UNCOV
2843
                                return ErrEdgeNotFound
×
2844
                        default:
3✔
2845
                                c.updateEdgeCache(edge, isUpdate1)
3✔
2846
                                return nil
3✔
2847
                        }
2848
                },
2849
        }
2850

2851
        err := c.chanScheduler.Execute(ctx, r)
3✔
2852

3✔
2853
        return from, to, err
3✔
2854
}
2855

2856
func (c *KVStore) updateEdgeCache(e *models.ChannelEdgePolicy,
2857
        isUpdate1 bool) {
3✔
2858

3✔
2859
        // If an entry for this channel is found in reject cache, we'll modify
3✔
2860
        // the entry with the updated timestamp for the direction that was just
3✔
2861
        // written. If the edge doesn't exist, we'll load the cache entry lazily
3✔
2862
        // during the next query for this edge.
3✔
2863
        if entry, ok := c.rejectCache.get(e.ChannelID); ok {
6✔
2864
                if isUpdate1 {
6✔
2865
                        entry.upd1Time = e.LastUpdate.Unix()
3✔
2866
                } else {
6✔
2867
                        entry.upd2Time = e.LastUpdate.Unix()
3✔
2868
                }
3✔
2869
                c.rejectCache.insert(e.ChannelID, entry)
3✔
2870
        }
2871

2872
        // If an entry for this channel is found in channel cache, we'll modify
2873
        // the entry with the updated policy for the direction that was just
2874
        // written. If the edge doesn't exist, we'll defer loading the info and
2875
        // policies and lazily read from disk during the next query.
2876
        if channel, ok := c.chanCache.get(e.ChannelID); ok {
6✔
2877
                if isUpdate1 {
6✔
2878
                        channel.Policy1 = e
3✔
2879
                } else {
6✔
2880
                        channel.Policy2 = e
3✔
2881
                }
3✔
2882
                c.chanCache.insert(e.ChannelID, channel)
3✔
2883
        }
2884
}
2885

2886
// updateEdgePolicy attempts to update an edge's policy within the relevant
2887
// buckets using an existing database transaction. The returned boolean will be
2888
// true if the updated policy belongs to node1, and false if the policy belonged
2889
// to node2.
2890
func updateEdgePolicy(tx kvdb.RwTx, edge *models.ChannelEdgePolicy) (
2891
        route.Vertex, route.Vertex, bool, error) {
3✔
2892

3✔
2893
        var noVertex route.Vertex
3✔
2894

3✔
2895
        edges := tx.ReadWriteBucket(edgeBucket)
3✔
2896
        if edges == nil {
3✔
2897
                return noVertex, noVertex, false, ErrEdgeNotFound
×
2898
        }
×
2899
        edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
3✔
2900
        if edgeIndex == nil {
3✔
2901
                return noVertex, noVertex, false, ErrEdgeNotFound
×
2902
        }
×
2903

2904
        // Create the channelID key be converting the channel ID
2905
        // integer into a byte slice.
2906
        var chanID [8]byte
3✔
2907
        byteOrder.PutUint64(chanID[:], edge.ChannelID)
3✔
2908

3✔
2909
        // With the channel ID, we then fetch the value storing the two
3✔
2910
        // nodes which connect this channel edge.
3✔
2911
        nodeInfo := edgeIndex.Get(chanID[:])
3✔
2912
        if nodeInfo == nil {
3✔
UNCOV
2913
                return noVertex, noVertex, false, ErrEdgeNotFound
×
UNCOV
2914
        }
×
2915

2916
        // Depending on the flags value passed above, either the first
2917
        // or second edge policy is being updated.
2918
        var fromNode, toNode []byte
3✔
2919
        var isUpdate1 bool
3✔
2920
        if edge.ChannelFlags&lnwire.ChanUpdateDirection == 0 {
6✔
2921
                fromNode = nodeInfo[:33]
3✔
2922
                toNode = nodeInfo[33:66]
3✔
2923
                isUpdate1 = true
3✔
2924
        } else {
6✔
2925
                fromNode = nodeInfo[33:66]
3✔
2926
                toNode = nodeInfo[:33]
3✔
2927
                isUpdate1 = false
3✔
2928
        }
3✔
2929

2930
        // Finally, with the direction of the edge being updated
2931
        // identified, we update the on-disk edge representation.
2932
        err := putChanEdgePolicy(edges, edge, fromNode, toNode)
3✔
2933
        if err != nil {
3✔
UNCOV
2934
                return noVertex, noVertex, false, err
×
UNCOV
2935
        }
×
2936

2937
        var (
3✔
2938
                fromNodePubKey route.Vertex
3✔
2939
                toNodePubKey   route.Vertex
3✔
2940
        )
3✔
2941
        copy(fromNodePubKey[:], fromNode)
3✔
2942
        copy(toNodePubKey[:], toNode)
3✔
2943

3✔
2944
        return fromNodePubKey, toNodePubKey, isUpdate1, nil
3✔
2945
}
2946

2947
// isPublic determines whether the node is seen as public within the graph from
2948
// the source node's point of view. An existing database transaction can also be
2949
// specified.
2950
func (c *KVStore) isPublic(tx kvdb.RTx, nodePub route.Vertex,
2951
        sourcePubKey []byte) (bool, error) {
3✔
2952

3✔
2953
        // In order to determine whether this node is publicly advertised within
3✔
2954
        // the graph, we'll need to look at all of its edges and check whether
3✔
2955
        // they extend to any other node than the source node. errDone will be
3✔
2956
        // used to terminate the check early.
3✔
2957
        nodeIsPublic := false
3✔
2958
        errDone := errors.New("done")
3✔
2959
        err := c.forEachNodeChannelTx(tx, nodePub, func(tx kvdb.RTx,
3✔
2960
                info *models.ChannelEdgeInfo, _ *models.ChannelEdgePolicy,
3✔
2961
                _ *models.ChannelEdgePolicy) error {
6✔
2962

3✔
2963
                // If this edge doesn't extend to the source node, we'll
3✔
2964
                // terminate our search as we can now conclude that the node is
3✔
2965
                // publicly advertised within the graph due to the local node
3✔
2966
                // knowing of the current edge.
3✔
2967
                if !bytes.Equal(info.NodeKey1Bytes[:], sourcePubKey) &&
3✔
2968
                        !bytes.Equal(info.NodeKey2Bytes[:], sourcePubKey) {
6✔
2969

3✔
2970
                        nodeIsPublic = true
3✔
2971
                        return errDone
3✔
2972
                }
3✔
2973

2974
                // Since the edge _does_ extend to the source node, we'll also
2975
                // need to ensure that this is a public edge.
2976
                if info.AuthProof != nil {
6✔
2977
                        nodeIsPublic = true
3✔
2978
                        return errDone
3✔
2979
                }
3✔
2980

2981
                // Otherwise, we'll continue our search.
2982
                return nil
3✔
2983
        })
2984
        if err != nil && !errors.Is(err, errDone) {
3✔
2985
                return false, err
×
2986
        }
×
2987

2988
        return nodeIsPublic, nil
3✔
2989
}
2990

2991
// FetchLightningNodeTx attempts to look up a target node by its identity
2992
// public key. If the node isn't found in the database, then
2993
// ErrGraphNodeNotFound is returned. An optional transaction may be provided.
2994
// If none is provided, then a new one will be created.
2995
func (c *KVStore) FetchLightningNodeTx(tx kvdb.RTx, nodePub route.Vertex) (
2996
        *models.LightningNode, error) {
3✔
2997

3✔
2998
        return c.fetchLightningNode(tx, nodePub)
3✔
2999
}
3✔
3000

3001
// FetchLightningNode attempts to look up a target node by its identity public
3002
// key. If the node isn't found in the database, then ErrGraphNodeNotFound is
3003
// returned.
3004
func (c *KVStore) FetchLightningNode(_ context.Context,
3005
        nodePub route.Vertex) (*models.LightningNode, error) {
3✔
3006

3✔
3007
        return c.fetchLightningNode(nil, nodePub)
3✔
3008
}
3✔
3009

3010
// fetchLightningNode attempts to look up a target node by its identity public
3011
// key. If the node isn't found in the database, then ErrGraphNodeNotFound is
3012
// returned. An optional transaction may be provided. If none is provided, then
3013
// a new one will be created.
3014
func (c *KVStore) fetchLightningNode(tx kvdb.RTx,
3015
        nodePub route.Vertex) (*models.LightningNode, error) {
3✔
3016

3✔
3017
        var node *models.LightningNode
3✔
3018
        fetch := func(tx kvdb.RTx) error {
6✔
3019
                // First grab the nodes bucket which stores the mapping from
3✔
3020
                // pubKey to node information.
3✔
3021
                nodes := tx.ReadBucket(nodeBucket)
3✔
3022
                if nodes == nil {
3✔
3023
                        return ErrGraphNotFound
×
3024
                }
×
3025

3026
                // If a key for this serialized public key isn't found, then
3027
                // the target node doesn't exist within the database.
3028
                nodeBytes := nodes.Get(nodePub[:])
3✔
3029
                if nodeBytes == nil {
6✔
3030
                        return ErrGraphNodeNotFound
3✔
3031
                }
3✔
3032

3033
                // If the node is found, then we can de deserialize the node
3034
                // information to return to the user.
3035
                nodeReader := bytes.NewReader(nodeBytes)
3✔
3036
                n, err := deserializeLightningNode(nodeReader)
3✔
3037
                if err != nil {
3✔
3038
                        return err
×
3039
                }
×
3040

3041
                node = &n
3✔
3042

3✔
3043
                return nil
3✔
3044
        }
3045

3046
        if tx == nil {
6✔
3047
                err := kvdb.View(
3✔
3048
                        c.db, fetch, func() {
6✔
3049
                                node = nil
3✔
3050
                        },
3✔
3051
                )
3052
                if err != nil {
6✔
3053
                        return nil, err
3✔
3054
                }
3✔
3055

3056
                return node, nil
3✔
3057
        }
3058

UNCOV
3059
        err := fetch(tx)
×
UNCOV
3060
        if err != nil {
×
UNCOV
3061
                return nil, err
×
UNCOV
3062
        }
×
3063

UNCOV
3064
        return node, nil
×
3065
}
3066

3067
// HasLightningNode determines if the graph has a vertex identified by the
3068
// target node identity public key. If the node exists in the database, a
3069
// timestamp of when the data for the node was lasted updated is returned along
3070
// with a true boolean. Otherwise, an empty time.Time is returned with a false
3071
// boolean.
3072
func (c *KVStore) HasLightningNode(_ context.Context,
3073
        nodePub [33]byte) (time.Time, bool, error) {
3✔
3074

3✔
3075
        var (
3✔
3076
                updateTime time.Time
3✔
3077
                exists     bool
3✔
3078
        )
3✔
3079

3✔
3080
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
3081
                // First grab the nodes bucket which stores the mapping from
3✔
3082
                // pubKey to node information.
3✔
3083
                nodes := tx.ReadBucket(nodeBucket)
3✔
3084
                if nodes == nil {
3✔
3085
                        return ErrGraphNotFound
×
3086
                }
×
3087

3088
                // If a key for this serialized public key isn't found, we can
3089
                // exit early.
3090
                nodeBytes := nodes.Get(nodePub[:])
3✔
3091
                if nodeBytes == nil {
6✔
3092
                        exists = false
3✔
3093
                        return nil
3✔
3094
                }
3✔
3095

3096
                // Otherwise we continue on to obtain the time stamp
3097
                // representing the last time the data for this node was
3098
                // updated.
3099
                nodeReader := bytes.NewReader(nodeBytes)
3✔
3100
                node, err := deserializeLightningNode(nodeReader)
3✔
3101
                if err != nil {
3✔
3102
                        return err
×
3103
                }
×
3104

3105
                exists = true
3✔
3106
                updateTime = node.LastUpdate
3✔
3107

3✔
3108
                return nil
3✔
3109
        }, func() {
3✔
3110
                updateTime = time.Time{}
3✔
3111
                exists = false
3✔
3112
        })
3✔
3113
        if err != nil {
3✔
3114
                return time.Time{}, exists, err
×
3115
        }
×
3116

3117
        return updateTime, exists, nil
3✔
3118
}
3119

3120
// nodeTraversal is used to traverse all channels of a node given by its
3121
// public key and passes channel information into the specified callback.
3122
func nodeTraversal(tx kvdb.RTx, nodePub []byte, db kvdb.Backend,
3123
        cb func(kvdb.RTx, *models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3124
                *models.ChannelEdgePolicy) error) error {
3✔
3125

3✔
3126
        traversal := func(tx kvdb.RTx) error {
6✔
3127
                edges := tx.ReadBucket(edgeBucket)
3✔
3128
                if edges == nil {
3✔
3129
                        return ErrGraphNotFound
×
3130
                }
×
3131
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
3✔
3132
                if edgeIndex == nil {
3✔
3133
                        return ErrGraphNoEdgesFound
×
3134
                }
×
3135

3136
                // In order to reach all the edges for this node, we take
3137
                // advantage of the construction of the key-space within the
3138
                // edge bucket. The keys are stored in the form: pubKey ||
3139
                // chanID. Therefore, starting from a chanID of zero, we can
3140
                // scan forward in the bucket, grabbing all the edges for the
3141
                // node. Once the prefix no longer matches, then we know we're
3142
                // done.
3143
                var nodeStart [33 + 8]byte
3✔
3144
                copy(nodeStart[:], nodePub)
3✔
3145
                copy(nodeStart[33:], chanStart[:])
3✔
3146

3✔
3147
                // Starting from the key pubKey || 0, we seek forward in the
3✔
3148
                // bucket until the retrieved key no longer has the public key
3✔
3149
                // as its prefix. This indicates that we've stepped over into
3✔
3150
                // another node's edges, so we can terminate our scan.
3✔
3151
                edgeCursor := edges.ReadCursor()
3✔
3152
                for nodeEdge, _ := edgeCursor.Seek(nodeStart[:]); bytes.HasPrefix(nodeEdge, nodePub); nodeEdge, _ = edgeCursor.Next() { //nolint:ll
6✔
3153
                        // If the prefix still matches, the channel id is
3✔
3154
                        // returned in nodeEdge. Channel id is used to lookup
3✔
3155
                        // the node at the other end of the channel and both
3✔
3156
                        // edge policies.
3✔
3157
                        chanID := nodeEdge[33:]
3✔
3158
                        edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
3✔
3159
                        if err != nil {
3✔
3160
                                return err
×
3161
                        }
×
3162

3163
                        outgoingPolicy, err := fetchChanEdgePolicy(
3✔
3164
                                edges, chanID, nodePub,
3✔
3165
                        )
3✔
3166
                        if err != nil {
3✔
3167
                                return err
×
3168
                        }
×
3169

3170
                        otherNode, err := edgeInfo.OtherNodeKeyBytes(nodePub)
3✔
3171
                        if err != nil {
3✔
3172
                                return err
×
3173
                        }
×
3174

3175
                        incomingPolicy, err := fetchChanEdgePolicy(
3✔
3176
                                edges, chanID, otherNode[:],
3✔
3177
                        )
3✔
3178
                        if err != nil {
3✔
3179
                                return err
×
3180
                        }
×
3181

3182
                        // Finally, we execute the callback.
3183
                        err = cb(tx, &edgeInfo, outgoingPolicy, incomingPolicy)
3✔
3184
                        if err != nil {
6✔
3185
                                return err
3✔
3186
                        }
3✔
3187
                }
3188

3189
                return nil
3✔
3190
        }
3191

3192
        // If no transaction was provided, then we'll create a new transaction
3193
        // to execute the transaction within.
3194
        if tx == nil {
6✔
3195
                return kvdb.View(db, traversal, func() {})
6✔
3196
        }
3197

3198
        // Otherwise, we re-use the existing transaction to execute the graph
3199
        // traversal.
3200
        return traversal(tx)
3✔
3201
}
3202

3203
// ForEachNodeChannel iterates through all channels of the given node,
3204
// executing the passed callback with an edge info structure and the policies
3205
// of each end of the channel. The first edge policy is the outgoing edge *to*
3206
// the connecting node, while the second is the incoming edge *from* the
3207
// connecting node. If the callback returns an error, then the iteration is
3208
// halted with the error propagated back up to the caller.
3209
//
3210
// Unknown policies are passed into the callback as nil values.
3211
func (c *KVStore) ForEachNodeChannel(nodePub route.Vertex,
3212
        cb func(*models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3213
                *models.ChannelEdgePolicy) error) error {
3✔
3214

3✔
3215
        return nodeTraversal(nil, nodePub[:], c.db, func(_ kvdb.RTx,
3✔
3216
                info *models.ChannelEdgeInfo, policy,
3✔
3217
                policy2 *models.ChannelEdgePolicy) error {
6✔
3218

3✔
3219
                return cb(info, policy, policy2)
3✔
3220
        })
3✔
3221
}
3222

3223
// ForEachSourceNodeChannel iterates through all channels of the source node,
3224
// executing the passed callback on each. The callback is provided with the
3225
// channel's outpoint, whether we have a policy for the channel and the channel
3226
// peer's node information.
3227
func (c *KVStore) ForEachSourceNodeChannel(cb func(chanPoint wire.OutPoint,
3228
        havePolicy bool, otherNode *models.LightningNode) error) error {
3✔
3229

3✔
3230
        return kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
3231
                nodes := tx.ReadBucket(nodeBucket)
3✔
3232
                if nodes == nil {
3✔
3233
                        return ErrGraphNotFound
×
3234
                }
×
3235

3236
                node, err := c.sourceNode(nodes)
3✔
3237
                if err != nil {
3✔
3238
                        return err
×
3239
                }
×
3240

3241
                return nodeTraversal(
3✔
3242
                        tx, node.PubKeyBytes[:], c.db, func(tx kvdb.RTx,
3✔
3243
                                info *models.ChannelEdgeInfo,
3✔
3244
                                policy, _ *models.ChannelEdgePolicy) error {
6✔
3245

3✔
3246
                                peer, err := c.fetchOtherNode(
3✔
3247
                                        tx, info, node.PubKeyBytes[:],
3✔
3248
                                )
3✔
3249
                                if err != nil {
3✔
3250
                                        return err
×
3251
                                }
×
3252

3253
                                return cb(
3✔
3254
                                        info.ChannelPoint, policy != nil, peer,
3✔
3255
                                )
3✔
3256
                        },
3257
                )
3258
        }, func() {})
3✔
3259
}
3260

3261
// forEachNodeChannelTx iterates through all channels of the given node,
3262
// executing the passed callback with an edge info structure and the policies
3263
// of each end of the channel. The first edge policy is the outgoing edge *to*
3264
// the connecting node, while the second is the incoming edge *from* the
3265
// connecting node. If the callback returns an error, then the iteration is
3266
// halted with the error propagated back up to the caller.
3267
//
3268
// Unknown policies are passed into the callback as nil values.
3269
//
3270
// If the caller wishes to re-use an existing boltdb transaction, then it
3271
// should be passed as the first argument.  Otherwise, the first argument should
3272
// be nil and a fresh transaction will be created to execute the graph
3273
// traversal.
3274
func (c *KVStore) forEachNodeChannelTx(tx kvdb.RTx,
3275
        nodePub route.Vertex, cb func(kvdb.RTx, *models.ChannelEdgeInfo,
3276
                *models.ChannelEdgePolicy,
3277
                *models.ChannelEdgePolicy) error) error {
3✔
3278

3✔
3279
        return nodeTraversal(tx, nodePub[:], c.db, cb)
3✔
3280
}
3✔
3281

3282
// fetchOtherNode attempts to fetch the full LightningNode that's opposite of
3283
// the target node in the channel. This is useful when one knows the pubkey of
3284
// one of the nodes, and wishes to obtain the full LightningNode for the other
3285
// end of the channel.
3286
func (c *KVStore) fetchOtherNode(tx kvdb.RTx,
3287
        channel *models.ChannelEdgeInfo, thisNodeKey []byte) (
3288
        *models.LightningNode, error) {
3✔
3289

3✔
3290
        // Ensure that the node passed in is actually a member of the channel.
3✔
3291
        var targetNodeBytes [33]byte
3✔
3292
        switch {
3✔
3293
        case bytes.Equal(channel.NodeKey1Bytes[:], thisNodeKey):
3✔
3294
                targetNodeBytes = channel.NodeKey2Bytes
3✔
3295
        case bytes.Equal(channel.NodeKey2Bytes[:], thisNodeKey):
3✔
3296
                targetNodeBytes = channel.NodeKey1Bytes
3✔
3297
        default:
×
3298
                return nil, fmt.Errorf("node not participating in this channel")
×
3299
        }
3300

3301
        var targetNode *models.LightningNode
3✔
3302
        fetchNodeFunc := func(tx kvdb.RTx) error {
6✔
3303
                // First grab the nodes bucket which stores the mapping from
3✔
3304
                // pubKey to node information.
3✔
3305
                nodes := tx.ReadBucket(nodeBucket)
3✔
3306
                if nodes == nil {
3✔
3307
                        return ErrGraphNotFound
×
3308
                }
×
3309

3310
                node, err := fetchLightningNode(nodes, targetNodeBytes[:])
3✔
3311
                if err != nil {
3✔
3312
                        return err
×
3313
                }
×
3314

3315
                targetNode = &node
3✔
3316

3✔
3317
                return nil
3✔
3318
        }
3319

3320
        // If the transaction is nil, then we'll need to create a new one,
3321
        // otherwise we can use the existing db transaction.
3322
        var err error
3✔
3323
        if tx == nil {
3✔
3324
                err = kvdb.View(c.db, fetchNodeFunc, func() {
×
3325
                        targetNode = nil
×
3326
                })
×
3327
        } else {
3✔
3328
                err = fetchNodeFunc(tx)
3✔
3329
        }
3✔
3330

3331
        return targetNode, err
3✔
3332
}
3333

3334
// computeEdgePolicyKeys is a helper function that can be used to compute the
3335
// keys used to index the channel edge policy info for the two nodes of the
3336
// edge. The keys for node 1 and node 2 are returned respectively.
3337
func computeEdgePolicyKeys(info *models.ChannelEdgeInfo) ([]byte, []byte) {
3✔
3338
        var (
3✔
3339
                node1Key [33 + 8]byte
3✔
3340
                node2Key [33 + 8]byte
3✔
3341
        )
3✔
3342

3✔
3343
        copy(node1Key[:], info.NodeKey1Bytes[:])
3✔
3344
        copy(node2Key[:], info.NodeKey2Bytes[:])
3✔
3345

3✔
3346
        byteOrder.PutUint64(node1Key[33:], info.ChannelID)
3✔
3347
        byteOrder.PutUint64(node2Key[33:], info.ChannelID)
3✔
3348

3✔
3349
        return node1Key[:], node2Key[:]
3✔
3350
}
3✔
3351

3352
// FetchChannelEdgesByOutpoint attempts to lookup the two directed edges for
3353
// the channel identified by the funding outpoint. If the channel can't be
3354
// found, then ErrEdgeNotFound is returned. A struct which houses the general
3355
// information for the channel itself is returned as well as two structs that
3356
// contain the routing policies for the channel in either direction.
3357
func (c *KVStore) FetchChannelEdgesByOutpoint(op *wire.OutPoint) (
3358
        *models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3359
        *models.ChannelEdgePolicy, error) {
3✔
3360

3✔
3361
        var (
3✔
3362
                edgeInfo *models.ChannelEdgeInfo
3✔
3363
                policy1  *models.ChannelEdgePolicy
3✔
3364
                policy2  *models.ChannelEdgePolicy
3✔
3365
        )
3✔
3366

3✔
3367
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
3368
                // First, grab the node bucket. This will be used to populate
3✔
3369
                // the Node pointers in each edge read from disk.
3✔
3370
                nodes := tx.ReadBucket(nodeBucket)
3✔
3371
                if nodes == nil {
3✔
3372
                        return ErrGraphNotFound
×
3373
                }
×
3374

3375
                // Next, grab the edge bucket which stores the edges, and also
3376
                // the index itself so we can group the directed edges together
3377
                // logically.
3378
                edges := tx.ReadBucket(edgeBucket)
3✔
3379
                if edges == nil {
3✔
3380
                        return ErrGraphNoEdgesFound
×
3381
                }
×
3382
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
3✔
3383
                if edgeIndex == nil {
3✔
3384
                        return ErrGraphNoEdgesFound
×
3385
                }
×
3386

3387
                // If the channel's outpoint doesn't exist within the outpoint
3388
                // index, then the edge does not exist.
3389
                chanIndex := edges.NestedReadBucket(channelPointBucket)
3✔
3390
                if chanIndex == nil {
3✔
3391
                        return ErrGraphNoEdgesFound
×
3392
                }
×
3393
                var b bytes.Buffer
3✔
3394
                if err := WriteOutpoint(&b, op); err != nil {
3✔
3395
                        return err
×
3396
                }
×
3397
                chanID := chanIndex.Get(b.Bytes())
3✔
3398
                if chanID == nil {
6✔
3399
                        return fmt.Errorf("%w: op=%v", ErrEdgeNotFound, op)
3✔
3400
                }
3✔
3401

3402
                // If the channel is found to exists, then we'll first retrieve
3403
                // the general information for the channel.
3404
                edge, err := fetchChanEdgeInfo(edgeIndex, chanID)
3✔
3405
                if err != nil {
3✔
3406
                        return fmt.Errorf("%w: chanID=%x", err, chanID)
×
3407
                }
×
3408
                edgeInfo = &edge
3✔
3409

3✔
3410
                // Once we have the information about the channels' parameters,
3✔
3411
                // we'll fetch the routing policies for each for the directed
3✔
3412
                // edges.
3✔
3413
                e1, e2, err := fetchChanEdgePolicies(edgeIndex, edges, chanID)
3✔
3414
                if err != nil {
3✔
3415
                        return fmt.Errorf("failed to find policy: %w", err)
×
3416
                }
×
3417

3418
                policy1 = e1
3✔
3419
                policy2 = e2
3✔
3420

3✔
3421
                return nil
3✔
3422
        }, func() {
3✔
3423
                edgeInfo = nil
3✔
3424
                policy1 = nil
3✔
3425
                policy2 = nil
3✔
3426
        })
3✔
3427
        if err != nil {
6✔
3428
                return nil, nil, nil, err
3✔
3429
        }
3✔
3430

3431
        return edgeInfo, policy1, policy2, nil
3✔
3432
}
3433

3434
// FetchChannelEdgesByID attempts to lookup the two directed edges for the
3435
// channel identified by the channel ID. If the channel can't be found, then
3436
// ErrEdgeNotFound is returned. A struct which houses the general information
3437
// for the channel itself is returned as well as two structs that contain the
3438
// routing policies for the channel in either direction.
3439
//
3440
// ErrZombieEdge an be returned if the edge is currently marked as a zombie
3441
// within the database. In this case, the ChannelEdgePolicy's will be nil, and
3442
// the ChannelEdgeInfo will only include the public keys of each node.
3443
func (c *KVStore) FetchChannelEdgesByID(chanID uint64) (
3444
        *models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3445
        *models.ChannelEdgePolicy, error) {
3✔
3446

3✔
3447
        var (
3✔
3448
                edgeInfo  *models.ChannelEdgeInfo
3✔
3449
                policy1   *models.ChannelEdgePolicy
3✔
3450
                policy2   *models.ChannelEdgePolicy
3✔
3451
                channelID [8]byte
3✔
3452
        )
3✔
3453

3✔
3454
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
3455
                // First, grab the node bucket. This will be used to populate
3✔
3456
                // the Node pointers in each edge read from disk.
3✔
3457
                nodes := tx.ReadBucket(nodeBucket)
3✔
3458
                if nodes == nil {
3✔
3459
                        return ErrGraphNotFound
×
3460
                }
×
3461

3462
                // Next, grab the edge bucket which stores the edges, and also
3463
                // the index itself so we can group the directed edges together
3464
                // logically.
3465
                edges := tx.ReadBucket(edgeBucket)
3✔
3466
                if edges == nil {
3✔
3467
                        return ErrGraphNoEdgesFound
×
3468
                }
×
3469
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
3✔
3470
                if edgeIndex == nil {
3✔
3471
                        return ErrGraphNoEdgesFound
×
3472
                }
×
3473

3474
                byteOrder.PutUint64(channelID[:], chanID)
3✔
3475

3✔
3476
                // Now, attempt to fetch edge.
3✔
3477
                edge, err := fetchChanEdgeInfo(edgeIndex, channelID[:])
3✔
3478

3✔
3479
                // If it doesn't exist, we'll quickly check our zombie index to
3✔
3480
                // see if we've previously marked it as so.
3✔
3481
                if errors.Is(err, ErrEdgeNotFound) {
6✔
3482
                        // If the zombie index doesn't exist, or the edge is not
3✔
3483
                        // marked as a zombie within it, then we'll return the
3✔
3484
                        // original ErrEdgeNotFound error.
3✔
3485
                        zombieIndex := edges.NestedReadBucket(zombieBucket)
3✔
3486
                        if zombieIndex == nil {
3✔
3487
                                return ErrEdgeNotFound
×
3488
                        }
×
3489

3490
                        isZombie, pubKey1, pubKey2 := isZombieEdge(
3✔
3491
                                zombieIndex, chanID,
3✔
3492
                        )
3✔
3493
                        if !isZombie {
6✔
3494
                                return ErrEdgeNotFound
3✔
3495
                        }
3✔
3496

3497
                        // Otherwise, the edge is marked as a zombie, so we'll
3498
                        // populate the edge info with the public keys of each
3499
                        // party as this is the only information we have about
3500
                        // it and return an error signaling so.
3501
                        edgeInfo = &models.ChannelEdgeInfo{
3✔
3502
                                NodeKey1Bytes: pubKey1,
3✔
3503
                                NodeKey2Bytes: pubKey2,
3✔
3504
                        }
3✔
3505

3✔
3506
                        return ErrZombieEdge
3✔
3507
                }
3508

3509
                // Otherwise, we'll just return the error if any.
3510
                if err != nil {
3✔
3511
                        return err
×
3512
                }
×
3513

3514
                edgeInfo = &edge
3✔
3515

3✔
3516
                // Then we'll attempt to fetch the accompanying policies of this
3✔
3517
                // edge.
3✔
3518
                e1, e2, err := fetchChanEdgePolicies(
3✔
3519
                        edgeIndex, edges, channelID[:],
3✔
3520
                )
3✔
3521
                if err != nil {
3✔
3522
                        return err
×
3523
                }
×
3524

3525
                policy1 = e1
3✔
3526
                policy2 = e2
3✔
3527

3✔
3528
                return nil
3✔
3529
        }, func() {
3✔
3530
                edgeInfo = nil
3✔
3531
                policy1 = nil
3✔
3532
                policy2 = nil
3✔
3533
        })
3✔
3534
        if errors.Is(err, ErrZombieEdge) {
6✔
3535
                return edgeInfo, nil, nil, err
3✔
3536
        }
3✔
3537
        if err != nil {
6✔
3538
                return nil, nil, nil, err
3✔
3539
        }
3✔
3540

3541
        return edgeInfo, policy1, policy2, nil
3✔
3542
}
3543

3544
// IsPublicNode is a helper method that determines whether the node with the
3545
// given public key is seen as a public node in the graph from the graph's
3546
// source node's point of view.
3547
func (c *KVStore) IsPublicNode(pubKey [33]byte) (bool, error) {
3✔
3548
        var nodeIsPublic bool
3✔
3549
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
3550
                nodes := tx.ReadBucket(nodeBucket)
3✔
3551
                if nodes == nil {
3✔
3552
                        return ErrGraphNodesNotFound
×
3553
                }
×
3554
                ourPubKey := nodes.Get(sourceKey)
3✔
3555
                if ourPubKey == nil {
3✔
3556
                        return ErrSourceNodeNotSet
×
3557
                }
×
3558
                node, err := fetchLightningNode(nodes, pubKey[:])
3✔
3559
                if err != nil {
3✔
3560
                        return err
×
3561
                }
×
3562

3563
                nodeIsPublic, err = c.isPublic(tx, node.PubKeyBytes, ourPubKey)
3✔
3564

3✔
3565
                return err
3✔
3566
        }, func() {
3✔
3567
                nodeIsPublic = false
3✔
3568
        })
3✔
3569
        if err != nil {
3✔
3570
                return false, err
×
3571
        }
×
3572

3573
        return nodeIsPublic, nil
3✔
3574
}
3575

3576
// genMultiSigP2WSH generates the p2wsh'd multisig script for 2 of 2 pubkeys.
3577
func genMultiSigP2WSH(aPub, bPub []byte) ([]byte, error) {
3✔
3578
        witnessScript, err := input.GenMultiSigScript(aPub, bPub)
3✔
3579
        if err != nil {
3✔
3580
                return nil, err
×
3581
        }
×
3582

3583
        // With the witness script generated, we'll now turn it into a p2wsh
3584
        // script:
3585
        //  * OP_0 <sha256(script)>
3586
        bldr := txscript.NewScriptBuilder(
3✔
3587
                txscript.WithScriptAllocSize(input.P2WSHSize),
3✔
3588
        )
3✔
3589
        bldr.AddOp(txscript.OP_0)
3✔
3590
        scriptHash := sha256.Sum256(witnessScript)
3✔
3591
        bldr.AddData(scriptHash[:])
3✔
3592

3✔
3593
        return bldr.Script()
3✔
3594
}
3595

3596
// EdgePoint couples the outpoint of a channel with the funding script that it
3597
// creates. The FilteredChainView will use this to watch for spends of this
3598
// edge point on chain. We require both of these values as depending on the
3599
// concrete implementation, either the pkScript, or the out point will be used.
3600
type EdgePoint struct {
3601
        // FundingPkScript is the p2wsh multi-sig script of the target channel.
3602
        FundingPkScript []byte
3603

3604
        // OutPoint is the outpoint of the target channel.
3605
        OutPoint wire.OutPoint
3606
}
3607

3608
// String returns a human readable version of the target EdgePoint. We return
3609
// the outpoint directly as it is enough to uniquely identify the edge point.
3610
func (e *EdgePoint) String() string {
×
3611
        return e.OutPoint.String()
×
3612
}
×
3613

3614
// ChannelView returns the verifiable edge information for each active channel
3615
// within the known channel graph. The set of UTXO's (along with their scripts)
3616
// returned are the ones that need to be watched on chain to detect channel
3617
// closes on the resident blockchain.
3618
func (c *KVStore) ChannelView() ([]EdgePoint, error) {
3✔
3619
        var edgePoints []EdgePoint
3✔
3620
        if err := kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
3621
                // We're going to iterate over the entire channel index, so
3✔
3622
                // we'll need to fetch the edgeBucket to get to the index as
3✔
3623
                // it's a sub-bucket.
3✔
3624
                edges := tx.ReadBucket(edgeBucket)
3✔
3625
                if edges == nil {
3✔
3626
                        return ErrGraphNoEdgesFound
×
3627
                }
×
3628
                chanIndex := edges.NestedReadBucket(channelPointBucket)
3✔
3629
                if chanIndex == nil {
3✔
3630
                        return ErrGraphNoEdgesFound
×
3631
                }
×
3632
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
3✔
3633
                if edgeIndex == nil {
3✔
3634
                        return ErrGraphNoEdgesFound
×
3635
                }
×
3636

3637
                // Once we have the proper bucket, we'll range over each key
3638
                // (which is the channel point for the channel) and decode it,
3639
                // accumulating each entry.
3640
                return chanIndex.ForEach(
3✔
3641
                        func(chanPointBytes, chanID []byte) error {
6✔
3642
                                chanPointReader := bytes.NewReader(
3✔
3643
                                        chanPointBytes,
3✔
3644
                                )
3✔
3645

3✔
3646
                                var chanPoint wire.OutPoint
3✔
3647
                                err := ReadOutpoint(chanPointReader, &chanPoint)
3✔
3648
                                if err != nil {
3✔
3649
                                        return err
×
3650
                                }
×
3651

3652
                                edgeInfo, err := fetchChanEdgeInfo(
3✔
3653
                                        edgeIndex, chanID,
3✔
3654
                                )
3✔
3655
                                if err != nil {
3✔
3656
                                        return err
×
3657
                                }
×
3658

3659
                                pkScript, err := genMultiSigP2WSH(
3✔
3660
                                        edgeInfo.BitcoinKey1Bytes[:],
3✔
3661
                                        edgeInfo.BitcoinKey2Bytes[:],
3✔
3662
                                )
3✔
3663
                                if err != nil {
3✔
3664
                                        return err
×
3665
                                }
×
3666

3667
                                edgePoints = append(edgePoints, EdgePoint{
3✔
3668
                                        FundingPkScript: pkScript,
3✔
3669
                                        OutPoint:        chanPoint,
3✔
3670
                                })
3✔
3671

3✔
3672
                                return nil
3✔
3673
                        },
3674
                )
3675
        }, func() {
3✔
3676
                edgePoints = nil
3✔
3677
        }); err != nil {
3✔
3678
                return nil, err
×
3679
        }
×
3680

3681
        return edgePoints, nil
3✔
3682
}
3683

3684
// MarkEdgeZombie attempts to mark a channel identified by its channel ID as a
3685
// zombie. This method is used on an ad-hoc basis, when channels need to be
3686
// marked as zombies outside the normal pruning cycle.
3687
func (c *KVStore) MarkEdgeZombie(chanID uint64,
UNCOV
3688
        pubKey1, pubKey2 [33]byte) error {
×
UNCOV
3689

×
UNCOV
3690
        c.cacheMu.Lock()
×
UNCOV
3691
        defer c.cacheMu.Unlock()
×
UNCOV
3692

×
UNCOV
3693
        err := kvdb.Batch(c.db, func(tx kvdb.RwTx) error {
×
UNCOV
3694
                edges := tx.ReadWriteBucket(edgeBucket)
×
UNCOV
3695
                if edges == nil {
×
3696
                        return ErrGraphNoEdgesFound
×
3697
                }
×
UNCOV
3698
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
×
UNCOV
3699
                if err != nil {
×
3700
                        return fmt.Errorf("unable to create zombie "+
×
3701
                                "bucket: %w", err)
×
3702
                }
×
3703

UNCOV
3704
                return markEdgeZombie(zombieIndex, chanID, pubKey1, pubKey2)
×
3705
        })
UNCOV
3706
        if err != nil {
×
3707
                return err
×
3708
        }
×
3709

UNCOV
3710
        c.rejectCache.remove(chanID)
×
UNCOV
3711
        c.chanCache.remove(chanID)
×
UNCOV
3712

×
UNCOV
3713
        return nil
×
3714
}
3715

3716
// markEdgeZombie marks an edge as a zombie within our zombie index. The public
3717
// keys should represent the node public keys of the two parties involved in the
3718
// edge.
3719
func markEdgeZombie(zombieIndex kvdb.RwBucket, chanID uint64, pubKey1,
3720
        pubKey2 [33]byte) error {
3✔
3721

3✔
3722
        var k [8]byte
3✔
3723
        byteOrder.PutUint64(k[:], chanID)
3✔
3724

3✔
3725
        var v [66]byte
3✔
3726
        copy(v[:33], pubKey1[:])
3✔
3727
        copy(v[33:], pubKey2[:])
3✔
3728

3✔
3729
        return zombieIndex.Put(k[:], v[:])
3✔
3730
}
3✔
3731

3732
// MarkEdgeLive clears an edge from our zombie index, deeming it as live.
UNCOV
3733
func (c *KVStore) MarkEdgeLive(chanID uint64) error {
×
UNCOV
3734
        c.cacheMu.Lock()
×
UNCOV
3735
        defer c.cacheMu.Unlock()
×
UNCOV
3736

×
UNCOV
3737
        return c.markEdgeLiveUnsafe(nil, chanID)
×
UNCOV
3738
}
×
3739

3740
// markEdgeLiveUnsafe clears an edge from the zombie index. This method can be
3741
// called with an existing kvdb.RwTx or the argument can be set to nil in which
3742
// case a new transaction will be created.
3743
//
3744
// NOTE: this method MUST only be called if the cacheMu has already been
3745
// acquired.
UNCOV
3746
func (c *KVStore) markEdgeLiveUnsafe(tx kvdb.RwTx, chanID uint64) error {
×
UNCOV
3747
        dbFn := func(tx kvdb.RwTx) error {
×
UNCOV
3748
                edges := tx.ReadWriteBucket(edgeBucket)
×
UNCOV
3749
                if edges == nil {
×
3750
                        return ErrGraphNoEdgesFound
×
3751
                }
×
UNCOV
3752
                zombieIndex := edges.NestedReadWriteBucket(zombieBucket)
×
UNCOV
3753
                if zombieIndex == nil {
×
3754
                        return nil
×
3755
                }
×
3756

UNCOV
3757
                var k [8]byte
×
UNCOV
3758
                byteOrder.PutUint64(k[:], chanID)
×
UNCOV
3759

×
UNCOV
3760
                if len(zombieIndex.Get(k[:])) == 0 {
×
UNCOV
3761
                        return ErrZombieEdgeNotFound
×
UNCOV
3762
                }
×
3763

UNCOV
3764
                return zombieIndex.Delete(k[:])
×
3765
        }
3766

3767
        // If the transaction is nil, we'll create a new one. Otherwise, we use
3768
        // the existing transaction
UNCOV
3769
        var err error
×
UNCOV
3770
        if tx == nil {
×
UNCOV
3771
                err = kvdb.Update(c.db, dbFn, func() {})
×
3772
        } else {
×
3773
                err = dbFn(tx)
×
3774
        }
×
UNCOV
3775
        if err != nil {
×
UNCOV
3776
                return err
×
UNCOV
3777
        }
×
3778

UNCOV
3779
        c.rejectCache.remove(chanID)
×
UNCOV
3780
        c.chanCache.remove(chanID)
×
UNCOV
3781

×
UNCOV
3782
        return nil
×
3783
}
3784

3785
// IsZombieEdge returns whether the edge is considered zombie. If it is a
3786
// zombie, then the two node public keys corresponding to this edge are also
3787
// returned.
UNCOV
3788
func (c *KVStore) IsZombieEdge(chanID uint64) (bool, [33]byte, [33]byte) {
×
UNCOV
3789
        var (
×
UNCOV
3790
                isZombie         bool
×
UNCOV
3791
                pubKey1, pubKey2 [33]byte
×
UNCOV
3792
        )
×
UNCOV
3793

×
UNCOV
3794
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
×
UNCOV
3795
                edges := tx.ReadBucket(edgeBucket)
×
UNCOV
3796
                if edges == nil {
×
3797
                        return ErrGraphNoEdgesFound
×
3798
                }
×
UNCOV
3799
                zombieIndex := edges.NestedReadBucket(zombieBucket)
×
UNCOV
3800
                if zombieIndex == nil {
×
3801
                        return nil
×
3802
                }
×
3803

UNCOV
3804
                isZombie, pubKey1, pubKey2 = isZombieEdge(zombieIndex, chanID)
×
UNCOV
3805

×
UNCOV
3806
                return nil
×
UNCOV
3807
        }, func() {
×
UNCOV
3808
                isZombie = false
×
UNCOV
3809
                pubKey1 = [33]byte{}
×
UNCOV
3810
                pubKey2 = [33]byte{}
×
UNCOV
3811
        })
×
UNCOV
3812
        if err != nil {
×
3813
                return false, [33]byte{}, [33]byte{}
×
3814
        }
×
3815

UNCOV
3816
        return isZombie, pubKey1, pubKey2
×
3817
}
3818

3819
// isZombieEdge returns whether an entry exists for the given channel in the
3820
// zombie index. If an entry exists, then the two node public keys corresponding
3821
// to this edge are also returned.
3822
func isZombieEdge(zombieIndex kvdb.RBucket,
3823
        chanID uint64) (bool, [33]byte, [33]byte) {
3✔
3824

3✔
3825
        var k [8]byte
3✔
3826
        byteOrder.PutUint64(k[:], chanID)
3✔
3827

3✔
3828
        v := zombieIndex.Get(k[:])
3✔
3829
        if v == nil {
6✔
3830
                return false, [33]byte{}, [33]byte{}
3✔
3831
        }
3✔
3832

3833
        var pubKey1, pubKey2 [33]byte
3✔
3834
        copy(pubKey1[:], v[:33])
3✔
3835
        copy(pubKey2[:], v[33:])
3✔
3836

3✔
3837
        return true, pubKey1, pubKey2
3✔
3838
}
3839

3840
// NumZombies returns the current number of zombie channels in the graph.
UNCOV
3841
func (c *KVStore) NumZombies() (uint64, error) {
×
UNCOV
3842
        var numZombies uint64
×
UNCOV
3843
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
×
UNCOV
3844
                edges := tx.ReadBucket(edgeBucket)
×
UNCOV
3845
                if edges == nil {
×
3846
                        return nil
×
3847
                }
×
UNCOV
3848
                zombieIndex := edges.NestedReadBucket(zombieBucket)
×
UNCOV
3849
                if zombieIndex == nil {
×
3850
                        return nil
×
3851
                }
×
3852

UNCOV
3853
                return zombieIndex.ForEach(func(_, _ []byte) error {
×
UNCOV
3854
                        numZombies++
×
UNCOV
3855
                        return nil
×
UNCOV
3856
                })
×
UNCOV
3857
        }, func() {
×
UNCOV
3858
                numZombies = 0
×
UNCOV
3859
        })
×
UNCOV
3860
        if err != nil {
×
3861
                return 0, err
×
3862
        }
×
3863

UNCOV
3864
        return numZombies, nil
×
3865
}
3866

3867
// PutClosedScid stores a SCID for a closed channel in the database. This is so
3868
// that we can ignore channel announcements that we know to be closed without
3869
// having to validate them and fetch a block.
UNCOV
3870
func (c *KVStore) PutClosedScid(scid lnwire.ShortChannelID) error {
×
UNCOV
3871
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
×
UNCOV
3872
                closedScids, err := tx.CreateTopLevelBucket(closedScidBucket)
×
UNCOV
3873
                if err != nil {
×
3874
                        return err
×
3875
                }
×
3876

UNCOV
3877
                var k [8]byte
×
UNCOV
3878
                byteOrder.PutUint64(k[:], scid.ToUint64())
×
UNCOV
3879

×
UNCOV
3880
                return closedScids.Put(k[:], []byte{})
×
UNCOV
3881
        }, func() {})
×
3882
}
3883

3884
// IsClosedScid checks whether a channel identified by the passed in scid is
3885
// closed. This helps avoid having to perform expensive validation checks.
3886
// TODO: Add an LRU cache to cut down on disc reads.
3887
func (c *KVStore) IsClosedScid(scid lnwire.ShortChannelID) (bool, error) {
3✔
3888
        var isClosed bool
3✔
3889
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
3890
                closedScids := tx.ReadBucket(closedScidBucket)
3✔
3891
                if closedScids == nil {
3✔
3892
                        return ErrClosedScidsNotFound
×
3893
                }
×
3894

3895
                var k [8]byte
3✔
3896
                byteOrder.PutUint64(k[:], scid.ToUint64())
3✔
3897

3✔
3898
                if closedScids.Get(k[:]) != nil {
3✔
UNCOV
3899
                        isClosed = true
×
UNCOV
3900
                        return nil
×
UNCOV
3901
                }
×
3902

3903
                return nil
3✔
3904
        }, func() {
3✔
3905
                isClosed = false
3✔
3906
        })
3✔
3907
        if err != nil {
3✔
3908
                return false, err
×
3909
        }
×
3910

3911
        return isClosed, nil
3✔
3912
}
3913

3914
// GraphSession will provide the call-back with access to a NodeTraverser
3915
// instance which can be used to perform queries against the channel graph.
UNCOV
3916
func (c *KVStore) GraphSession(cb func(graph NodeTraverser) error) error {
×
UNCOV
3917
        return c.db.View(func(tx walletdb.ReadTx) error {
×
UNCOV
3918
                return cb(&nodeTraverserSession{
×
UNCOV
3919
                        db: c,
×
UNCOV
3920
                        tx: tx,
×
UNCOV
3921
                })
×
UNCOV
3922
        }, func() {})
×
3923
}
3924

3925
// nodeTraverserSession implements the NodeTraverser interface but with a
3926
// backing read only transaction for a consistent view of the graph.
3927
type nodeTraverserSession struct {
3928
        tx kvdb.RTx
3929
        db *KVStore
3930
}
3931

3932
// ForEachNodeDirectedChannel calls the callback for every channel of the given
3933
// node.
3934
//
3935
// NOTE: Part of the NodeTraverser interface.
3936
func (c *nodeTraverserSession) ForEachNodeDirectedChannel(nodePub route.Vertex,
UNCOV
3937
        cb func(channel *DirectedChannel) error) error {
×
UNCOV
3938

×
UNCOV
3939
        return c.db.forEachNodeDirectedChannel(c.tx, nodePub, cb)
×
UNCOV
3940
}
×
3941

3942
// FetchNodeFeatures returns the features of the given node. If the node is
3943
// unknown, assume no additional features are supported.
3944
//
3945
// NOTE: Part of the NodeTraverser interface.
3946
func (c *nodeTraverserSession) FetchNodeFeatures(nodePub route.Vertex) (
UNCOV
3947
        *lnwire.FeatureVector, error) {
×
UNCOV
3948

×
UNCOV
3949
        return c.db.fetchNodeFeatures(c.tx, nodePub)
×
UNCOV
3950
}
×
3951

3952
func putLightningNode(nodeBucket, aliasBucket, updateIndex kvdb.RwBucket,
3953
        node *models.LightningNode) error {
3✔
3954

3✔
3955
        var (
3✔
3956
                scratch [16]byte
3✔
3957
                b       bytes.Buffer
3✔
3958
        )
3✔
3959

3✔
3960
        pub, err := node.PubKey()
3✔
3961
        if err != nil {
3✔
3962
                return err
×
3963
        }
×
3964
        nodePub := pub.SerializeCompressed()
3✔
3965

3✔
3966
        // If the node has the update time set, write it, else write 0.
3✔
3967
        updateUnix := uint64(0)
3✔
3968
        if node.LastUpdate.Unix() > 0 {
6✔
3969
                updateUnix = uint64(node.LastUpdate.Unix())
3✔
3970
        }
3✔
3971

3972
        byteOrder.PutUint64(scratch[:8], updateUnix)
3✔
3973
        if _, err := b.Write(scratch[:8]); err != nil {
3✔
3974
                return err
×
3975
        }
×
3976

3977
        if _, err := b.Write(nodePub); err != nil {
3✔
3978
                return err
×
3979
        }
×
3980

3981
        // If we got a node announcement for this node, we will have the rest
3982
        // of the data available. If not we don't have more data to write.
3983
        if !node.HaveNodeAnnouncement {
6✔
3984
                // Write HaveNodeAnnouncement=0.
3✔
3985
                byteOrder.PutUint16(scratch[:2], 0)
3✔
3986
                if _, err := b.Write(scratch[:2]); err != nil {
3✔
3987
                        return err
×
3988
                }
×
3989

3990
                return nodeBucket.Put(nodePub, b.Bytes())
3✔
3991
        }
3992

3993
        // Write HaveNodeAnnouncement=1.
3994
        byteOrder.PutUint16(scratch[:2], 1)
3✔
3995
        if _, err := b.Write(scratch[:2]); err != nil {
3✔
3996
                return err
×
3997
        }
×
3998

3999
        if err := binary.Write(&b, byteOrder, node.Color.R); err != nil {
3✔
4000
                return err
×
4001
        }
×
4002
        if err := binary.Write(&b, byteOrder, node.Color.G); err != nil {
3✔
4003
                return err
×
4004
        }
×
4005
        if err := binary.Write(&b, byteOrder, node.Color.B); err != nil {
3✔
4006
                return err
×
4007
        }
×
4008

4009
        if err := wire.WriteVarString(&b, 0, node.Alias); err != nil {
3✔
4010
                return err
×
4011
        }
×
4012

4013
        if err := node.Features.Encode(&b); err != nil {
3✔
4014
                return err
×
4015
        }
×
4016

4017
        numAddresses := uint16(len(node.Addresses))
3✔
4018
        byteOrder.PutUint16(scratch[:2], numAddresses)
3✔
4019
        if _, err := b.Write(scratch[:2]); err != nil {
3✔
4020
                return err
×
4021
        }
×
4022

4023
        for _, address := range node.Addresses {
6✔
4024
                if err := SerializeAddr(&b, address); err != nil {
3✔
4025
                        return err
×
4026
                }
×
4027
        }
4028

4029
        sigLen := len(node.AuthSigBytes)
3✔
4030
        if sigLen > 80 {
3✔
4031
                return fmt.Errorf("max sig len allowed is 80, had %v",
×
4032
                        sigLen)
×
4033
        }
×
4034

4035
        err = wire.WriteVarBytes(&b, 0, node.AuthSigBytes)
3✔
4036
        if err != nil {
3✔
4037
                return err
×
4038
        }
×
4039

4040
        if len(node.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
3✔
4041
                return ErrTooManyExtraOpaqueBytes(len(node.ExtraOpaqueData))
×
4042
        }
×
4043
        err = wire.WriteVarBytes(&b, 0, node.ExtraOpaqueData)
3✔
4044
        if err != nil {
3✔
4045
                return err
×
4046
        }
×
4047

4048
        if err := aliasBucket.Put(nodePub, []byte(node.Alias)); err != nil {
3✔
4049
                return err
×
4050
        }
×
4051

4052
        // With the alias bucket updated, we'll now update the index that
4053
        // tracks the time series of node updates.
4054
        var indexKey [8 + 33]byte
3✔
4055
        byteOrder.PutUint64(indexKey[:8], updateUnix)
3✔
4056
        copy(indexKey[8:], nodePub)
3✔
4057

3✔
4058
        // If there was already an old index entry for this node, then we'll
3✔
4059
        // delete the old one before we write the new entry.
3✔
4060
        if nodeBytes := nodeBucket.Get(nodePub); nodeBytes != nil {
6✔
4061
                // Extract out the old update time to we can reconstruct the
3✔
4062
                // prior index key to delete it from the index.
3✔
4063
                oldUpdateTime := nodeBytes[:8]
3✔
4064

3✔
4065
                var oldIndexKey [8 + 33]byte
3✔
4066
                copy(oldIndexKey[:8], oldUpdateTime)
3✔
4067
                copy(oldIndexKey[8:], nodePub)
3✔
4068

3✔
4069
                if err := updateIndex.Delete(oldIndexKey[:]); err != nil {
3✔
4070
                        return err
×
4071
                }
×
4072
        }
4073

4074
        if err := updateIndex.Put(indexKey[:], nil); err != nil {
3✔
4075
                return err
×
4076
        }
×
4077

4078
        return nodeBucket.Put(nodePub, b.Bytes())
3✔
4079
}
4080

4081
func fetchLightningNode(nodeBucket kvdb.RBucket,
4082
        nodePub []byte) (models.LightningNode, error) {
3✔
4083

3✔
4084
        nodeBytes := nodeBucket.Get(nodePub)
3✔
4085
        if nodeBytes == nil {
6✔
4086
                return models.LightningNode{}, ErrGraphNodeNotFound
3✔
4087
        }
3✔
4088

4089
        nodeReader := bytes.NewReader(nodeBytes)
3✔
4090

3✔
4091
        return deserializeLightningNode(nodeReader)
3✔
4092
}
4093

4094
func deserializeLightningNodeCacheable(r io.Reader) (route.Vertex,
4095
        *lnwire.FeatureVector, error) {
3✔
4096

3✔
4097
        var (
3✔
4098
                pubKey      route.Vertex
3✔
4099
                features    = lnwire.EmptyFeatureVector()
3✔
4100
                nodeScratch [8]byte
3✔
4101
        )
3✔
4102

3✔
4103
        // Skip ahead:
3✔
4104
        // - LastUpdate (8 bytes)
3✔
4105
        if _, err := r.Read(nodeScratch[:]); err != nil {
3✔
4106
                return pubKey, nil, err
×
4107
        }
×
4108

4109
        if _, err := io.ReadFull(r, pubKey[:]); err != nil {
3✔
4110
                return pubKey, nil, err
×
4111
        }
×
4112

4113
        // Read the node announcement flag.
4114
        if _, err := r.Read(nodeScratch[:2]); err != nil {
3✔
4115
                return pubKey, nil, err
×
4116
        }
×
4117
        hasNodeAnn := byteOrder.Uint16(nodeScratch[:2])
3✔
4118

3✔
4119
        // The rest of the data is optional, and will only be there if we got a
3✔
4120
        // node announcement for this node.
3✔
4121
        if hasNodeAnn == 0 {
6✔
4122
                return pubKey, features, nil
3✔
4123
        }
3✔
4124

4125
        // We did get a node announcement for this node, so we'll have the rest
4126
        // of the data available.
4127
        var rgb uint8
3✔
4128
        if err := binary.Read(r, byteOrder, &rgb); err != nil {
3✔
4129
                return pubKey, nil, err
×
4130
        }
×
4131
        if err := binary.Read(r, byteOrder, &rgb); err != nil {
3✔
4132
                return pubKey, nil, err
×
4133
        }
×
4134
        if err := binary.Read(r, byteOrder, &rgb); err != nil {
3✔
4135
                return pubKey, nil, err
×
4136
        }
×
4137

4138
        if _, err := wire.ReadVarString(r, 0); err != nil {
3✔
4139
                return pubKey, nil, err
×
4140
        }
×
4141

4142
        if err := features.Decode(r); err != nil {
3✔
4143
                return pubKey, nil, err
×
4144
        }
×
4145

4146
        return pubKey, features, nil
3✔
4147
}
4148

4149
func deserializeLightningNode(r io.Reader) (models.LightningNode, error) {
3✔
4150
        var (
3✔
4151
                node    models.LightningNode
3✔
4152
                scratch [8]byte
3✔
4153
                err     error
3✔
4154
        )
3✔
4155

3✔
4156
        // Always populate a feature vector, even if we don't have a node
3✔
4157
        // announcement and short circuit below.
3✔
4158
        node.Features = lnwire.EmptyFeatureVector()
3✔
4159

3✔
4160
        if _, err := r.Read(scratch[:]); err != nil {
3✔
4161
                return models.LightningNode{}, err
×
4162
        }
×
4163

4164
        unix := int64(byteOrder.Uint64(scratch[:]))
3✔
4165
        node.LastUpdate = time.Unix(unix, 0)
3✔
4166

3✔
4167
        if _, err := io.ReadFull(r, node.PubKeyBytes[:]); err != nil {
3✔
4168
                return models.LightningNode{}, err
×
4169
        }
×
4170

4171
        if _, err := r.Read(scratch[:2]); err != nil {
3✔
4172
                return models.LightningNode{}, err
×
4173
        }
×
4174

4175
        hasNodeAnn := byteOrder.Uint16(scratch[:2])
3✔
4176
        if hasNodeAnn == 1 {
6✔
4177
                node.HaveNodeAnnouncement = true
3✔
4178
        } else {
6✔
4179
                node.HaveNodeAnnouncement = false
3✔
4180
        }
3✔
4181

4182
        // The rest of the data is optional, and will only be there if we got a
4183
        // node announcement for this node.
4184
        if !node.HaveNodeAnnouncement {
6✔
4185
                return node, nil
3✔
4186
        }
3✔
4187

4188
        // We did get a node announcement for this node, so we'll have the rest
4189
        // of the data available.
4190
        if err := binary.Read(r, byteOrder, &node.Color.R); err != nil {
3✔
4191
                return models.LightningNode{}, err
×
4192
        }
×
4193
        if err := binary.Read(r, byteOrder, &node.Color.G); err != nil {
3✔
4194
                return models.LightningNode{}, err
×
4195
        }
×
4196
        if err := binary.Read(r, byteOrder, &node.Color.B); err != nil {
3✔
4197
                return models.LightningNode{}, err
×
4198
        }
×
4199

4200
        node.Alias, err = wire.ReadVarString(r, 0)
3✔
4201
        if err != nil {
3✔
4202
                return models.LightningNode{}, err
×
4203
        }
×
4204

4205
        err = node.Features.Decode(r)
3✔
4206
        if err != nil {
3✔
4207
                return models.LightningNode{}, err
×
4208
        }
×
4209

4210
        if _, err := r.Read(scratch[:2]); err != nil {
3✔
4211
                return models.LightningNode{}, err
×
4212
        }
×
4213
        numAddresses := int(byteOrder.Uint16(scratch[:2]))
3✔
4214

3✔
4215
        var addresses []net.Addr
3✔
4216
        for i := 0; i < numAddresses; i++ {
6✔
4217
                address, err := DeserializeAddr(r)
3✔
4218
                if err != nil {
3✔
4219
                        return models.LightningNode{}, err
×
4220
                }
×
4221
                addresses = append(addresses, address)
3✔
4222
        }
4223
        node.Addresses = addresses
3✔
4224

3✔
4225
        node.AuthSigBytes, err = wire.ReadVarBytes(r, 0, 80, "sig")
3✔
4226
        if err != nil {
3✔
4227
                return models.LightningNode{}, err
×
4228
        }
×
4229

4230
        // We'll try and see if there are any opaque bytes left, if not, then
4231
        // we'll ignore the EOF error and return the node as is.
4232
        extraBytes, err := wire.ReadVarBytes(
3✔
4233
                r, 0, MaxAllowedExtraOpaqueBytes, "blob",
3✔
4234
        )
3✔
4235
        switch {
3✔
4236
        case errors.Is(err, io.ErrUnexpectedEOF):
×
4237
        case errors.Is(err, io.EOF):
×
4238
        case err != nil:
×
4239
                return models.LightningNode{}, err
×
4240
        }
4241

4242
        if len(extraBytes) > 0 {
3✔
UNCOV
4243
                node.ExtraOpaqueData = extraBytes
×
UNCOV
4244
        }
×
4245

4246
        return node, nil
3✔
4247
}
4248

4249
func putChanEdgeInfo(edgeIndex kvdb.RwBucket,
4250
        edgeInfo *models.ChannelEdgeInfo, chanID [8]byte) error {
3✔
4251

3✔
4252
        var b bytes.Buffer
3✔
4253

3✔
4254
        if _, err := b.Write(edgeInfo.NodeKey1Bytes[:]); err != nil {
3✔
4255
                return err
×
4256
        }
×
4257
        if _, err := b.Write(edgeInfo.NodeKey2Bytes[:]); err != nil {
3✔
4258
                return err
×
4259
        }
×
4260
        if _, err := b.Write(edgeInfo.BitcoinKey1Bytes[:]); err != nil {
3✔
4261
                return err
×
4262
        }
×
4263
        if _, err := b.Write(edgeInfo.BitcoinKey2Bytes[:]); err != nil {
3✔
4264
                return err
×
4265
        }
×
4266

4267
        if err := wire.WriteVarBytes(&b, 0, edgeInfo.Features); err != nil {
3✔
4268
                return err
×
4269
        }
×
4270

4271
        authProof := edgeInfo.AuthProof
3✔
4272
        var nodeSig1, nodeSig2, bitcoinSig1, bitcoinSig2 []byte
3✔
4273
        if authProof != nil {
6✔
4274
                nodeSig1 = authProof.NodeSig1Bytes
3✔
4275
                nodeSig2 = authProof.NodeSig2Bytes
3✔
4276
                bitcoinSig1 = authProof.BitcoinSig1Bytes
3✔
4277
                bitcoinSig2 = authProof.BitcoinSig2Bytes
3✔
4278
        }
3✔
4279

4280
        if err := wire.WriteVarBytes(&b, 0, nodeSig1); err != nil {
3✔
4281
                return err
×
4282
        }
×
4283
        if err := wire.WriteVarBytes(&b, 0, nodeSig2); err != nil {
3✔
4284
                return err
×
4285
        }
×
4286
        if err := wire.WriteVarBytes(&b, 0, bitcoinSig1); err != nil {
3✔
4287
                return err
×
4288
        }
×
4289
        if err := wire.WriteVarBytes(&b, 0, bitcoinSig2); err != nil {
3✔
4290
                return err
×
4291
        }
×
4292

4293
        if err := WriteOutpoint(&b, &edgeInfo.ChannelPoint); err != nil {
3✔
4294
                return err
×
4295
        }
×
4296
        err := binary.Write(&b, byteOrder, uint64(edgeInfo.Capacity))
3✔
4297
        if err != nil {
3✔
4298
                return err
×
4299
        }
×
4300
        if _, err := b.Write(chanID[:]); err != nil {
3✔
4301
                return err
×
4302
        }
×
4303
        if _, err := b.Write(edgeInfo.ChainHash[:]); err != nil {
3✔
4304
                return err
×
4305
        }
×
4306

4307
        if len(edgeInfo.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
3✔
4308
                return ErrTooManyExtraOpaqueBytes(len(edgeInfo.ExtraOpaqueData))
×
4309
        }
×
4310
        err = wire.WriteVarBytes(&b, 0, edgeInfo.ExtraOpaqueData)
3✔
4311
        if err != nil {
3✔
4312
                return err
×
4313
        }
×
4314

4315
        return edgeIndex.Put(chanID[:], b.Bytes())
3✔
4316
}
4317

4318
func fetchChanEdgeInfo(edgeIndex kvdb.RBucket,
4319
        chanID []byte) (models.ChannelEdgeInfo, error) {
3✔
4320

3✔
4321
        edgeInfoBytes := edgeIndex.Get(chanID)
3✔
4322
        if edgeInfoBytes == nil {
6✔
4323
                return models.ChannelEdgeInfo{}, ErrEdgeNotFound
3✔
4324
        }
3✔
4325

4326
        edgeInfoReader := bytes.NewReader(edgeInfoBytes)
3✔
4327

3✔
4328
        return deserializeChanEdgeInfo(edgeInfoReader)
3✔
4329
}
4330

4331
func deserializeChanEdgeInfo(r io.Reader) (models.ChannelEdgeInfo, error) {
3✔
4332
        var (
3✔
4333
                err      error
3✔
4334
                edgeInfo models.ChannelEdgeInfo
3✔
4335
        )
3✔
4336

3✔
4337
        if _, err := io.ReadFull(r, edgeInfo.NodeKey1Bytes[:]); err != nil {
3✔
4338
                return models.ChannelEdgeInfo{}, err
×
4339
        }
×
4340
        if _, err := io.ReadFull(r, edgeInfo.NodeKey2Bytes[:]); err != nil {
3✔
4341
                return models.ChannelEdgeInfo{}, err
×
4342
        }
×
4343
        if _, err := io.ReadFull(r, edgeInfo.BitcoinKey1Bytes[:]); err != nil {
3✔
4344
                return models.ChannelEdgeInfo{}, err
×
4345
        }
×
4346
        if _, err := io.ReadFull(r, edgeInfo.BitcoinKey2Bytes[:]); err != nil {
3✔
4347
                return models.ChannelEdgeInfo{}, err
×
4348
        }
×
4349

4350
        edgeInfo.Features, err = wire.ReadVarBytes(r, 0, 900, "features")
3✔
4351
        if err != nil {
3✔
4352
                return models.ChannelEdgeInfo{}, err
×
4353
        }
×
4354

4355
        proof := &models.ChannelAuthProof{}
3✔
4356

3✔
4357
        proof.NodeSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
3✔
4358
        if err != nil {
3✔
4359
                return models.ChannelEdgeInfo{}, err
×
4360
        }
×
4361
        proof.NodeSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
3✔
4362
        if err != nil {
3✔
4363
                return models.ChannelEdgeInfo{}, err
×
4364
        }
×
4365
        proof.BitcoinSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
3✔
4366
        if err != nil {
3✔
4367
                return models.ChannelEdgeInfo{}, err
×
4368
        }
×
4369
        proof.BitcoinSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
3✔
4370
        if err != nil {
3✔
4371
                return models.ChannelEdgeInfo{}, err
×
4372
        }
×
4373

4374
        if !proof.IsEmpty() {
6✔
4375
                edgeInfo.AuthProof = proof
3✔
4376
        }
3✔
4377

4378
        edgeInfo.ChannelPoint = wire.OutPoint{}
3✔
4379
        if err := ReadOutpoint(r, &edgeInfo.ChannelPoint); err != nil {
3✔
4380
                return models.ChannelEdgeInfo{}, err
×
4381
        }
×
4382
        if err := binary.Read(r, byteOrder, &edgeInfo.Capacity); err != nil {
3✔
4383
                return models.ChannelEdgeInfo{}, err
×
4384
        }
×
4385
        if err := binary.Read(r, byteOrder, &edgeInfo.ChannelID); err != nil {
3✔
4386
                return models.ChannelEdgeInfo{}, err
×
4387
        }
×
4388

4389
        if _, err := io.ReadFull(r, edgeInfo.ChainHash[:]); err != nil {
3✔
4390
                return models.ChannelEdgeInfo{}, err
×
4391
        }
×
4392

4393
        // We'll try and see if there are any opaque bytes left, if not, then
4394
        // we'll ignore the EOF error and return the edge as is.
4395
        edgeInfo.ExtraOpaqueData, err = wire.ReadVarBytes(
3✔
4396
                r, 0, MaxAllowedExtraOpaqueBytes, "blob",
3✔
4397
        )
3✔
4398
        switch {
3✔
4399
        case errors.Is(err, io.ErrUnexpectedEOF):
×
4400
        case errors.Is(err, io.EOF):
×
4401
        case err != nil:
×
4402
                return models.ChannelEdgeInfo{}, err
×
4403
        }
4404

4405
        return edgeInfo, nil
3✔
4406
}
4407

4408
func putChanEdgePolicy(edges kvdb.RwBucket, edge *models.ChannelEdgePolicy,
4409
        from, to []byte) error {
3✔
4410

3✔
4411
        var edgeKey [33 + 8]byte
3✔
4412
        copy(edgeKey[:], from)
3✔
4413
        byteOrder.PutUint64(edgeKey[33:], edge.ChannelID)
3✔
4414

3✔
4415
        var b bytes.Buffer
3✔
4416
        if err := serializeChanEdgePolicy(&b, edge, to); err != nil {
3✔
UNCOV
4417
                return err
×
UNCOV
4418
        }
×
4419

4420
        // Before we write out the new edge, we'll create a new entry in the
4421
        // update index in order to keep it fresh.
4422
        updateUnix := uint64(edge.LastUpdate.Unix())
3✔
4423
        var indexKey [8 + 8]byte
3✔
4424
        byteOrder.PutUint64(indexKey[:8], updateUnix)
3✔
4425
        byteOrder.PutUint64(indexKey[8:], edge.ChannelID)
3✔
4426

3✔
4427
        updateIndex, err := edges.CreateBucketIfNotExists(edgeUpdateIndexBucket)
3✔
4428
        if err != nil {
3✔
4429
                return err
×
4430
        }
×
4431

4432
        // If there was already an entry for this edge, then we'll need to
4433
        // delete the old one to ensure we don't leave around any after-images.
4434
        // An unknown policy value does not have a update time recorded, so
4435
        // it also does not need to be removed.
4436
        if edgeBytes := edges.Get(edgeKey[:]); edgeBytes != nil &&
3✔
4437
                !bytes.Equal(edgeBytes, unknownPolicy) {
6✔
4438

3✔
4439
                // In order to delete the old entry, we'll need to obtain the
3✔
4440
                // *prior* update time in order to delete it. To do this, we'll
3✔
4441
                // need to deserialize the existing policy within the database
3✔
4442
                // (now outdated by the new one), and delete its corresponding
3✔
4443
                // entry within the update index. We'll ignore any
3✔
4444
                // ErrEdgePolicyOptionalFieldNotFound or ErrParsingExtraTLVBytes
3✔
4445
                // errors, as we only need the channel ID and update time to
3✔
4446
                // delete the entry.
3✔
4447
                //
3✔
4448
                // TODO(halseth): get rid of these invalid policies in a
3✔
4449
                // migration.
3✔
4450
                // TODO(elle): complete the above TODO in migration from kvdb
3✔
4451
                // to SQL.
3✔
4452
                oldEdgePolicy, err := deserializeChanEdgePolicy(
3✔
4453
                        bytes.NewReader(edgeBytes),
3✔
4454
                )
3✔
4455
                if err != nil &&
3✔
4456
                        !errors.Is(err, ErrEdgePolicyOptionalFieldNotFound) &&
3✔
4457
                        !errors.Is(err, ErrParsingExtraTLVBytes) {
3✔
4458

×
4459
                        return err
×
4460
                }
×
4461

4462
                oldUpdateTime := uint64(oldEdgePolicy.LastUpdate.Unix())
3✔
4463

3✔
4464
                var oldIndexKey [8 + 8]byte
3✔
4465
                byteOrder.PutUint64(oldIndexKey[:8], oldUpdateTime)
3✔
4466
                byteOrder.PutUint64(oldIndexKey[8:], edge.ChannelID)
3✔
4467

3✔
4468
                if err := updateIndex.Delete(oldIndexKey[:]); err != nil {
3✔
4469
                        return err
×
4470
                }
×
4471
        }
4472

4473
        if err := updateIndex.Put(indexKey[:], nil); err != nil {
3✔
4474
                return err
×
4475
        }
×
4476

4477
        err = updateEdgePolicyDisabledIndex(
3✔
4478
                edges, edge.ChannelID,
3✔
4479
                edge.ChannelFlags&lnwire.ChanUpdateDirection > 0,
3✔
4480
                edge.IsDisabled(),
3✔
4481
        )
3✔
4482
        if err != nil {
3✔
4483
                return err
×
4484
        }
×
4485

4486
        return edges.Put(edgeKey[:], b.Bytes())
3✔
4487
}
4488

4489
// updateEdgePolicyDisabledIndex is used to update the disabledEdgePolicyIndex
4490
// bucket by either add a new disabled ChannelEdgePolicy or remove an existing
4491
// one.
4492
// The direction represents the direction of the edge and disabled is used for
4493
// deciding whether to remove or add an entry to the bucket.
4494
// In general a channel is disabled if two entries for the same chanID exist
4495
// in this bucket.
4496
// Maintaining the bucket this way allows a fast retrieval of disabled
4497
// channels, for example when prune is needed.
4498
func updateEdgePolicyDisabledIndex(edges kvdb.RwBucket, chanID uint64,
4499
        direction bool, disabled bool) error {
3✔
4500

3✔
4501
        var disabledEdgeKey [8 + 1]byte
3✔
4502
        byteOrder.PutUint64(disabledEdgeKey[0:], chanID)
3✔
4503
        if direction {
6✔
4504
                disabledEdgeKey[8] = 1
3✔
4505
        }
3✔
4506

4507
        disabledEdgePolicyIndex, err := edges.CreateBucketIfNotExists(
3✔
4508
                disabledEdgePolicyBucket,
3✔
4509
        )
3✔
4510
        if err != nil {
3✔
4511
                return err
×
4512
        }
×
4513

4514
        if disabled {
6✔
4515
                return disabledEdgePolicyIndex.Put(disabledEdgeKey[:], []byte{})
3✔
4516
        }
3✔
4517

4518
        return disabledEdgePolicyIndex.Delete(disabledEdgeKey[:])
3✔
4519
}
4520

4521
// putChanEdgePolicyUnknown marks the edge policy as unknown
4522
// in the edges bucket.
4523
func putChanEdgePolicyUnknown(edges kvdb.RwBucket, channelID uint64,
4524
        from []byte) error {
3✔
4525

3✔
4526
        var edgeKey [33 + 8]byte
3✔
4527
        copy(edgeKey[:], from)
3✔
4528
        byteOrder.PutUint64(edgeKey[33:], channelID)
3✔
4529

3✔
4530
        if edges.Get(edgeKey[:]) != nil {
3✔
4531
                return fmt.Errorf("cannot write unknown policy for channel %v "+
×
4532
                        " when there is already a policy present", channelID)
×
4533
        }
×
4534

4535
        return edges.Put(edgeKey[:], unknownPolicy)
3✔
4536
}
4537

4538
func fetchChanEdgePolicy(edges kvdb.RBucket, chanID []byte,
4539
        nodePub []byte) (*models.ChannelEdgePolicy, error) {
3✔
4540

3✔
4541
        var edgeKey [33 + 8]byte
3✔
4542
        copy(edgeKey[:], nodePub)
3✔
4543
        copy(edgeKey[33:], chanID)
3✔
4544

3✔
4545
        edgeBytes := edges.Get(edgeKey[:])
3✔
4546
        if edgeBytes == nil {
3✔
4547
                return nil, ErrEdgeNotFound
×
4548
        }
×
4549

4550
        // No need to deserialize unknown policy.
4551
        if bytes.Equal(edgeBytes, unknownPolicy) {
6✔
4552
                return nil, nil
3✔
4553
        }
3✔
4554

4555
        edgeReader := bytes.NewReader(edgeBytes)
3✔
4556

3✔
4557
        ep, err := deserializeChanEdgePolicy(edgeReader)
3✔
4558
        switch {
3✔
4559
        // If the db policy was missing an expected optional field, we return
4560
        // nil as if the policy was unknown.
UNCOV
4561
        case errors.Is(err, ErrEdgePolicyOptionalFieldNotFound):
×
UNCOV
4562
                return nil, nil
×
4563

4564
        // If the policy contains invalid TLV bytes, we return nil as if
4565
        // the policy was unknown.
4566
        case errors.Is(err, ErrParsingExtraTLVBytes):
×
4567
                return nil, nil
×
4568

4569
        case err != nil:
×
4570
                return nil, err
×
4571
        }
4572

4573
        return ep, nil
3✔
4574
}
4575

4576
func fetchChanEdgePolicies(edgeIndex kvdb.RBucket, edges kvdb.RBucket,
4577
        chanID []byte) (*models.ChannelEdgePolicy, *models.ChannelEdgePolicy,
4578
        error) {
3✔
4579

3✔
4580
        edgeInfo := edgeIndex.Get(chanID)
3✔
4581
        if edgeInfo == nil {
3✔
4582
                return nil, nil, fmt.Errorf("%w: chanID=%x", ErrEdgeNotFound,
×
4583
                        chanID)
×
4584
        }
×
4585

4586
        // The first node is contained within the first half of the edge
4587
        // information. We only propagate the error here and below if it's
4588
        // something other than edge non-existence.
4589
        node1Pub := edgeInfo[:33]
3✔
4590
        edge1, err := fetchChanEdgePolicy(edges, chanID, node1Pub)
3✔
4591
        if err != nil {
3✔
4592
                return nil, nil, fmt.Errorf("%w: node1Pub=%x", ErrEdgeNotFound,
×
4593
                        node1Pub)
×
4594
        }
×
4595

4596
        // Similarly, the second node is contained within the latter
4597
        // half of the edge information.
4598
        node2Pub := edgeInfo[33:66]
3✔
4599
        edge2, err := fetchChanEdgePolicy(edges, chanID, node2Pub)
3✔
4600
        if err != nil {
3✔
4601
                return nil, nil, fmt.Errorf("%w: node2Pub=%x", ErrEdgeNotFound,
×
4602
                        node2Pub)
×
4603
        }
×
4604

4605
        return edge1, edge2, nil
3✔
4606
}
4607

4608
func serializeChanEdgePolicy(w io.Writer, edge *models.ChannelEdgePolicy,
4609
        to []byte) error {
3✔
4610

3✔
4611
        err := wire.WriteVarBytes(w, 0, edge.SigBytes)
3✔
4612
        if err != nil {
3✔
4613
                return err
×
4614
        }
×
4615

4616
        if err := binary.Write(w, byteOrder, edge.ChannelID); err != nil {
3✔
4617
                return err
×
4618
        }
×
4619

4620
        var scratch [8]byte
3✔
4621
        updateUnix := uint64(edge.LastUpdate.Unix())
3✔
4622
        byteOrder.PutUint64(scratch[:], updateUnix)
3✔
4623
        if _, err := w.Write(scratch[:]); err != nil {
3✔
4624
                return err
×
4625
        }
×
4626

4627
        if err := binary.Write(w, byteOrder, edge.MessageFlags); err != nil {
3✔
4628
                return err
×
4629
        }
×
4630
        if err := binary.Write(w, byteOrder, edge.ChannelFlags); err != nil {
3✔
4631
                return err
×
4632
        }
×
4633
        if err := binary.Write(w, byteOrder, edge.TimeLockDelta); err != nil {
3✔
4634
                return err
×
4635
        }
×
4636
        if err := binary.Write(w, byteOrder, uint64(edge.MinHTLC)); err != nil {
3✔
4637
                return err
×
4638
        }
×
4639
        err = binary.Write(w, byteOrder, uint64(edge.FeeBaseMSat))
3✔
4640
        if err != nil {
3✔
4641
                return err
×
4642
        }
×
4643
        err = binary.Write(
3✔
4644
                w, byteOrder, uint64(edge.FeeProportionalMillionths),
3✔
4645
        )
3✔
4646
        if err != nil {
3✔
4647
                return err
×
4648
        }
×
4649

4650
        if _, err := w.Write(to); err != nil {
3✔
4651
                return err
×
4652
        }
×
4653

4654
        // If the max_htlc field is present, we write it. To be compatible with
4655
        // older versions that wasn't aware of this field, we write it as part
4656
        // of the opaque data.
4657
        // TODO(halseth): clean up when moving to TLV.
4658
        var opaqueBuf bytes.Buffer
3✔
4659
        if edge.MessageFlags.HasMaxHtlc() {
6✔
4660
                err := binary.Write(&opaqueBuf, byteOrder, uint64(edge.MaxHTLC))
3✔
4661
                if err != nil {
3✔
4662
                        return err
×
4663
                }
×
4664
        }
4665

4666
        // Validate that the ExtraOpaqueData is in fact a valid TLV stream.
4667
        err = edge.ExtraOpaqueData.ValidateTLV()
3✔
4668
        if err != nil {
3✔
UNCOV
4669
                return fmt.Errorf("%w: %w", ErrParsingExtraTLVBytes, err)
×
UNCOV
4670
        }
×
4671

4672
        if len(edge.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
3✔
4673
                return ErrTooManyExtraOpaqueBytes(len(edge.ExtraOpaqueData))
×
4674
        }
×
4675
        if _, err := opaqueBuf.Write(edge.ExtraOpaqueData); err != nil {
3✔
4676
                return err
×
4677
        }
×
4678

4679
        if err := wire.WriteVarBytes(w, 0, opaqueBuf.Bytes()); err != nil {
3✔
4680
                return err
×
4681
        }
×
4682

4683
        return nil
3✔
4684
}
4685

4686
func deserializeChanEdgePolicy(r io.Reader) (*models.ChannelEdgePolicy, error) {
3✔
4687
        // Deserialize the policy. Note that in case an optional field is not
3✔
4688
        // found or if the edge has invalid TLV data, then both an error and a
3✔
4689
        // populated policy object are returned so that the caller can decide
3✔
4690
        // if it still wants to use the edge or not.
3✔
4691
        edge, err := deserializeChanEdgePolicyRaw(r)
3✔
4692
        if err != nil &&
3✔
4693
                !errors.Is(err, ErrEdgePolicyOptionalFieldNotFound) &&
3✔
4694
                !errors.Is(err, ErrParsingExtraTLVBytes) {
3✔
4695

×
4696
                return nil, err
×
4697
        }
×
4698

4699
        return edge, err
3✔
4700
}
4701

4702
func deserializeChanEdgePolicyRaw(r io.Reader) (*models.ChannelEdgePolicy,
4703
        error) {
3✔
4704

3✔
4705
        edge := &models.ChannelEdgePolicy{}
3✔
4706

3✔
4707
        var err error
3✔
4708
        edge.SigBytes, err = wire.ReadVarBytes(r, 0, 80, "sig")
3✔
4709
        if err != nil {
3✔
4710
                return nil, err
×
4711
        }
×
4712

4713
        if err := binary.Read(r, byteOrder, &edge.ChannelID); err != nil {
3✔
4714
                return nil, err
×
4715
        }
×
4716

4717
        var scratch [8]byte
3✔
4718
        if _, err := r.Read(scratch[:]); err != nil {
3✔
4719
                return nil, err
×
4720
        }
×
4721
        unix := int64(byteOrder.Uint64(scratch[:]))
3✔
4722
        edge.LastUpdate = time.Unix(unix, 0)
3✔
4723

3✔
4724
        if err := binary.Read(r, byteOrder, &edge.MessageFlags); err != nil {
3✔
4725
                return nil, err
×
4726
        }
×
4727
        if err := binary.Read(r, byteOrder, &edge.ChannelFlags); err != nil {
3✔
4728
                return nil, err
×
4729
        }
×
4730
        if err := binary.Read(r, byteOrder, &edge.TimeLockDelta); err != nil {
3✔
4731
                return nil, err
×
4732
        }
×
4733

4734
        var n uint64
3✔
4735
        if err := binary.Read(r, byteOrder, &n); err != nil {
3✔
4736
                return nil, err
×
4737
        }
×
4738
        edge.MinHTLC = lnwire.MilliSatoshi(n)
3✔
4739

3✔
4740
        if err := binary.Read(r, byteOrder, &n); err != nil {
3✔
4741
                return nil, err
×
4742
        }
×
4743
        edge.FeeBaseMSat = lnwire.MilliSatoshi(n)
3✔
4744

3✔
4745
        if err := binary.Read(r, byteOrder, &n); err != nil {
3✔
4746
                return nil, err
×
4747
        }
×
4748
        edge.FeeProportionalMillionths = lnwire.MilliSatoshi(n)
3✔
4749

3✔
4750
        if _, err := r.Read(edge.ToNode[:]); err != nil {
3✔
4751
                return nil, err
×
4752
        }
×
4753

4754
        // We'll try and see if there are any opaque bytes left, if not, then
4755
        // we'll ignore the EOF error and return the edge as is.
4756
        edge.ExtraOpaqueData, err = wire.ReadVarBytes(
3✔
4757
                r, 0, MaxAllowedExtraOpaqueBytes, "blob",
3✔
4758
        )
3✔
4759
        switch {
3✔
4760
        case errors.Is(err, io.ErrUnexpectedEOF):
×
UNCOV
4761
        case errors.Is(err, io.EOF):
×
4762
        case err != nil:
×
4763
                return nil, err
×
4764
        }
4765

4766
        // See if optional fields are present.
4767
        if edge.MessageFlags.HasMaxHtlc() {
6✔
4768
                // The max_htlc field should be at the beginning of the opaque
3✔
4769
                // bytes.
3✔
4770
                opq := edge.ExtraOpaqueData
3✔
4771

3✔
4772
                // If the max_htlc field is not present, it might be old data
3✔
4773
                // stored before this field was validated. We'll return the
3✔
4774
                // edge along with an error.
3✔
4775
                if len(opq) < 8 {
3✔
UNCOV
4776
                        return edge, ErrEdgePolicyOptionalFieldNotFound
×
UNCOV
4777
                }
×
4778

4779
                maxHtlc := byteOrder.Uint64(opq[:8])
3✔
4780
                edge.MaxHTLC = lnwire.MilliSatoshi(maxHtlc)
3✔
4781

3✔
4782
                // Exclude the parsed field from the rest of the opaque data.
3✔
4783
                edge.ExtraOpaqueData = opq[8:]
3✔
4784
        }
4785

4786
        // Attempt to extract the inbound fee from the opaque data. If we fail
4787
        // to parse the TLV here, we return an error we also return the edge
4788
        // so that the caller can still use it. This is for backwards
4789
        // compatibility in case we have already persisted some policies that
4790
        // have invalid TLV data.
4791
        var inboundFee lnwire.Fee
3✔
4792
        typeMap, err := edge.ExtraOpaqueData.ExtractRecords(&inboundFee)
3✔
4793
        if err != nil {
3✔
4794
                return edge, fmt.Errorf("%w: %w", ErrParsingExtraTLVBytes, err)
×
4795
        }
×
4796

4797
        val, ok := typeMap[lnwire.FeeRecordType]
3✔
4798
        if ok && val == nil {
6✔
4799
                edge.InboundFee = fn.Some(inboundFee)
3✔
4800
        }
3✔
4801

4802
        return edge, nil
3✔
4803
}
4804

4805
// chanGraphNodeTx is an implementation of the NodeRTx interface backed by the
4806
// KVStore and a kvdb.RTx.
4807
type chanGraphNodeTx struct {
4808
        tx   kvdb.RTx
4809
        db   *KVStore
4810
        node *models.LightningNode
4811
}
4812

4813
// A compile-time constraint to ensure chanGraphNodeTx implements the NodeRTx
4814
// interface.
4815
var _ NodeRTx = (*chanGraphNodeTx)(nil)
4816

4817
func newChanGraphNodeTx(tx kvdb.RTx, db *KVStore,
4818
        node *models.LightningNode) *chanGraphNodeTx {
3✔
4819

3✔
4820
        return &chanGraphNodeTx{
3✔
4821
                tx:   tx,
3✔
4822
                db:   db,
3✔
4823
                node: node,
3✔
4824
        }
3✔
4825
}
3✔
4826

4827
// Node returns the raw information of the node.
4828
//
4829
// NOTE: This is a part of the NodeRTx interface.
4830
func (c *chanGraphNodeTx) Node() *models.LightningNode {
3✔
4831
        return c.node
3✔
4832
}
3✔
4833

4834
// FetchNode fetches the node with the given pub key under the same transaction
4835
// used to fetch the current node. The returned node is also a NodeRTx and any
4836
// operations on that NodeRTx will also be done under the same transaction.
4837
//
4838
// NOTE: This is a part of the NodeRTx interface.
UNCOV
4839
func (c *chanGraphNodeTx) FetchNode(nodePub route.Vertex) (NodeRTx, error) {
×
UNCOV
4840
        node, err := c.db.FetchLightningNodeTx(c.tx, nodePub)
×
UNCOV
4841
        if err != nil {
×
4842
                return nil, err
×
4843
        }
×
4844

UNCOV
4845
        return newChanGraphNodeTx(c.tx, c.db, node), nil
×
4846
}
4847

4848
// ForEachChannel can be used to iterate over the node's channels under
4849
// the same transaction used to fetch the node.
4850
//
4851
// NOTE: This is a part of the NodeRTx interface.
4852
func (c *chanGraphNodeTx) ForEachChannel(f func(*models.ChannelEdgeInfo,
UNCOV
4853
        *models.ChannelEdgePolicy, *models.ChannelEdgePolicy) error) error {
×
UNCOV
4854

×
UNCOV
4855
        return c.db.forEachNodeChannelTx(c.tx, c.node.PubKeyBytes,
×
UNCOV
4856
                func(_ kvdb.RTx, info *models.ChannelEdgeInfo, policy1,
×
UNCOV
4857
                        policy2 *models.ChannelEdgePolicy) error {
×
UNCOV
4858

×
UNCOV
4859
                        return f(info, policy1, policy2)
×
UNCOV
4860
                },
×
4861
        )
4862
}
4863

4864
// MakeTestGraph creates a new instance of the ChannelGraph for testing
4865
// purposes.
4866
//
4867
// NOTE: this helper currently creates a ChannelGraph that is only ever backed
4868
// by the `KVStore` of the `V1Store` interface.
UNCOV
4869
func MakeTestGraph(t testing.TB, opts ...ChanGraphOption) *ChannelGraph {
×
UNCOV
4870
        t.Helper()
×
UNCOV
4871

×
UNCOV
4872
        // Next, create KVStore for the first time.
×
UNCOV
4873
        backend, backendCleanup, err := kvdb.GetTestBackend(t.TempDir(), "cgr")
×
UNCOV
4874
        t.Cleanup(backendCleanup)
×
UNCOV
4875
        require.NoError(t, err)
×
UNCOV
4876
        t.Cleanup(func() {
×
UNCOV
4877
                require.NoError(t, backend.Close())
×
UNCOV
4878
        })
×
4879

UNCOV
4880
        graphStore, err := NewKVStore(backend)
×
UNCOV
4881
        require.NoError(t, err)
×
UNCOV
4882

×
UNCOV
4883
        graph, err := NewChannelGraph(graphStore, opts...)
×
UNCOV
4884
        require.NoError(t, err)
×
UNCOV
4885
        require.NoError(t, graph.Start())
×
UNCOV
4886
        t.Cleanup(func() {
×
UNCOV
4887
                require.NoError(t, graph.Stop())
×
UNCOV
4888
        })
×
4889

UNCOV
4890
        return graph
×
4891
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc