• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 15978799235

30 Jun 2025 04:47PM UTC coverage: 57.813% (-9.8%) from 67.608%
15978799235

Pull #10011

github

web-flow
Merge d0538fdbe into e54206f8c
Pull Request #10011: refactor+graph/db: refactor preparations required for incoming SQL migration code

18 of 69 new or added lines in 2 files covered. (26.09%)

28400 existing lines in 458 files now uncovered.

98467 of 170321 relevant lines covered (57.81%)

1.79 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

68.93
/graph/db/kv_store.go
1
package graphdb
2

3
import (
4
        "bytes"
5
        "context"
6
        "crypto/sha256"
7
        "encoding/binary"
8
        "errors"
9
        "fmt"
10
        "io"
11
        "math"
12
        "net"
13
        "sort"
14
        "sync"
15
        "time"
16

17
        "github.com/btcsuite/btcd/btcec/v2"
18
        "github.com/btcsuite/btcd/chaincfg/chainhash"
19
        "github.com/btcsuite/btcd/txscript"
20
        "github.com/btcsuite/btcd/wire"
21
        "github.com/btcsuite/btcwallet/walletdb"
22
        "github.com/lightningnetwork/lnd/aliasmgr"
23
        "github.com/lightningnetwork/lnd/batch"
24
        "github.com/lightningnetwork/lnd/fn/v2"
25
        "github.com/lightningnetwork/lnd/graph/db/models"
26
        "github.com/lightningnetwork/lnd/input"
27
        "github.com/lightningnetwork/lnd/kvdb"
28
        "github.com/lightningnetwork/lnd/lnwire"
29
        "github.com/lightningnetwork/lnd/routing/route"
30
)
31

32
var (
33
        // nodeBucket is a bucket which houses all the vertices or nodes within
34
        // the channel graph. This bucket has a single-sub bucket which adds an
35
        // additional index from pubkey -> alias. Within the top-level of this
36
        // bucket, the key space maps a node's compressed public key to the
37
        // serialized information for that node. Additionally, there's a
38
        // special key "source" which stores the pubkey of the source node. The
39
        // source node is used as the starting point for all graph/queries and
40
        // traversals. The graph is formed as a star-graph with the source node
41
        // at the center.
42
        //
43
        // maps: pubKey -> nodeInfo
44
        // maps: source -> selfPubKey
45
        nodeBucket = []byte("graph-node")
46

47
        // nodeUpdateIndexBucket is a sub-bucket of the nodeBucket. This bucket
48
        // will be used to quickly look up the "freshness" of a node's last
49
        // update to the network. The bucket only contains keys, and no values,
50
        // it's mapping:
51
        //
52
        // maps: updateTime || nodeID -> nil
53
        nodeUpdateIndexBucket = []byte("graph-node-update-index")
54

55
        // sourceKey is a special key that resides within the nodeBucket. The
56
        // sourceKey maps a key to the public key of the "self node".
57
        sourceKey = []byte("source")
58

59
        // aliasIndexBucket is a sub-bucket that's nested within the main
60
        // nodeBucket. This bucket maps the public key of a node to its
61
        // current alias. This bucket is provided as it can be used within a
62
        // future UI layer to add an additional degree of confirmation.
63
        aliasIndexBucket = []byte("alias")
64

65
        // edgeBucket is a bucket which houses all of the edge or channel
66
        // information within the channel graph. This bucket essentially acts
67
        // as an adjacency list, which in conjunction with a range scan, can be
68
        // used to iterate over all the incoming and outgoing edges for a
69
        // particular node. Key in the bucket use a prefix scheme which leads
70
        // with the node's public key and sends with the compact edge ID.
71
        // For each chanID, there will be two entries within the bucket, as the
72
        // graph is directed: nodes may have different policies w.r.t to fees
73
        // for their respective directions.
74
        //
75
        // maps: pubKey || chanID -> channel edge policy for node
76
        edgeBucket = []byte("graph-edge")
77

78
        // unknownPolicy is represented as an empty slice. It is
79
        // used as the value in edgeBucket for unknown channel edge policies.
80
        // Unknown policies are still stored in the database to enable efficient
81
        // lookup of incoming channel edges.
82
        unknownPolicy = []byte{}
83

84
        // chanStart is an array of all zero bytes which is used to perform
85
        // range scans within the edgeBucket to obtain all of the outgoing
86
        // edges for a particular node.
87
        chanStart [8]byte
88

89
        // edgeIndexBucket is an index which can be used to iterate all edges
90
        // in the bucket, grouping them according to their in/out nodes.
91
        // Additionally, the items in this bucket also contain the complete
92
        // edge information for a channel. The edge information includes the
93
        // capacity of the channel, the nodes that made the channel, etc. This
94
        // bucket resides within the edgeBucket above. Creation of an edge
95
        // proceeds in two phases: first the edge is added to the edge index,
96
        // afterwards the edgeBucket can be updated with the latest details of
97
        // the edge as they are announced on the network.
98
        //
99
        // maps: chanID -> pubKey1 || pubKey2 || restofEdgeInfo
100
        edgeIndexBucket = []byte("edge-index")
101

102
        // edgeUpdateIndexBucket is a sub-bucket of the main edgeBucket. This
103
        // bucket contains an index which allows us to gauge the "freshness" of
104
        // a channel's last updates.
105
        //
106
        // maps: updateTime || chanID -> nil
107
        edgeUpdateIndexBucket = []byte("edge-update-index")
108

109
        // channelPointBucket maps a channel's full outpoint (txid:index) to
110
        // its short 8-byte channel ID. This bucket resides within the
111
        // edgeBucket above, and can be used to quickly remove an edge due to
112
        // the outpoint being spent, or to query for existence of a channel.
113
        //
114
        // maps: outPoint -> chanID
115
        channelPointBucket = []byte("chan-index")
116

117
        // zombieBucket is a sub-bucket of the main edgeBucket bucket
118
        // responsible for maintaining an index of zombie channels. Each entry
119
        // exists within the bucket as follows:
120
        //
121
        // maps: chanID -> pubKey1 || pubKey2
122
        //
123
        // The chanID represents the channel ID of the edge that is marked as a
124
        // zombie and is used as the key, which maps to the public keys of the
125
        // edge's participants.
126
        zombieBucket = []byte("zombie-index")
127

128
        // disabledEdgePolicyBucket is a sub-bucket of the main edgeBucket
129
        // bucket responsible for maintaining an index of disabled edge
130
        // policies. Each entry exists within the bucket as follows:
131
        //
132
        // maps: <chanID><direction> -> []byte{}
133
        //
134
        // The chanID represents the channel ID of the edge and the direction is
135
        // one byte representing the direction of the edge. The main purpose of
136
        // this index is to allow pruning disabled channels in a fast way
137
        // without the need to iterate all over the graph.
138
        disabledEdgePolicyBucket = []byte("disabled-edge-policy-index")
139

140
        // graphMetaBucket is a top-level bucket which stores various meta-deta
141
        // related to the on-disk channel graph. Data stored in this bucket
142
        // includes the block to which the graph has been synced to, the total
143
        // number of channels, etc.
144
        graphMetaBucket = []byte("graph-meta")
145

146
        // pruneLogBucket is a bucket within the graphMetaBucket that stores
147
        // a mapping from the block height to the hash for the blocks used to
148
        // prune the graph.
149
        // Once a new block is discovered, any channels that have been closed
150
        // (by spending the outpoint) can safely be removed from the graph, and
151
        // the block is added to the prune log. We need to keep such a log for
152
        // the case where a reorg happens, and we must "rewind" the state of the
153
        // graph by removing channels that were previously confirmed. In such a
154
        // case we'll remove all entries from the prune log with a block height
155
        // that no longer exists.
156
        pruneLogBucket = []byte("prune-log")
157

158
        // closedScidBucket is a top-level bucket that stores scids for
159
        // channels that we know to be closed. This is used so that we don't
160
        // need to perform expensive validation checks if we receive a channel
161
        // announcement for the channel again.
162
        //
163
        // maps: scid -> []byte{}
164
        closedScidBucket = []byte("closed-scid")
165
)
166

167
const (
168
        // MaxAllowedExtraOpaqueBytes is the largest amount of opaque bytes that
169
        // we'll permit to be written to disk. We limit this as otherwise, it
170
        // would be possible for a node to create a ton of updates and slowly
171
        // fill our disk, and also waste bandwidth due to relaying.
172
        MaxAllowedExtraOpaqueBytes = 10000
173
)
174

175
// KVStore is a persistent, on-disk graph representation of the Lightning
176
// Network. This struct can be used to implement path finding algorithms on top
177
// of, and also to update a node's view based on information received from the
178
// p2p network. Internally, the graph is stored using a modified adjacency list
179
// representation with some added object interaction possible with each
180
// serialized edge/node. The graph is stored is directed, meaning that are two
181
// edges stored for each channel: an inbound/outbound edge for each node pair.
182
// Nodes, edges, and edge information can all be added to the graph
183
// independently. Edge removal results in the deletion of all edge information
184
// for that edge.
185
type KVStore struct {
186
        db kvdb.Backend
187

188
        // cacheMu guards all caches (rejectCache and chanCache). If
189
        // this mutex will be acquired at the same time as the DB mutex then
190
        // the cacheMu MUST be acquired first to prevent deadlock.
191
        cacheMu     sync.RWMutex
192
        rejectCache *rejectCache
193
        chanCache   *channelCache
194

195
        chanScheduler batch.Scheduler[kvdb.RwTx]
196
        nodeScheduler batch.Scheduler[kvdb.RwTx]
197
}
198

199
// A compile-time assertion to ensure that the KVStore struct implements the
200
// V1Store interface.
201
var _ V1Store = (*KVStore)(nil)
202

203
// NewKVStore allocates a new KVStore backed by a DB instance. The
204
// returned instance has its own unique reject cache and channel cache.
205
func NewKVStore(db kvdb.Backend, options ...StoreOptionModifier) (*KVStore,
206
        error) {
3✔
207

3✔
208
        opts := DefaultOptions()
3✔
209
        for _, o := range options {
6✔
210
                o(opts)
3✔
211
        }
3✔
212

213
        if !opts.NoMigration {
6✔
214
                if err := initKVStore(db); err != nil {
3✔
215
                        return nil, err
×
216
                }
×
217
        }
218

219
        g := &KVStore{
3✔
220
                db:          db,
3✔
221
                rejectCache: newRejectCache(opts.RejectCacheSize),
3✔
222
                chanCache:   newChannelCache(opts.ChannelCacheSize),
3✔
223
        }
3✔
224
        g.chanScheduler = batch.NewTimeScheduler(
3✔
225
                batch.NewBoltBackend[kvdb.RwTx](db), &g.cacheMu,
3✔
226
                opts.BatchCommitInterval,
3✔
227
        )
3✔
228
        g.nodeScheduler = batch.NewTimeScheduler(
3✔
229
                batch.NewBoltBackend[kvdb.RwTx](db), nil,
3✔
230
                opts.BatchCommitInterval,
3✔
231
        )
3✔
232

3✔
233
        return g, nil
3✔
234
}
235

236
// channelMapKey is the key structure used for storing channel edge policies.
237
type channelMapKey struct {
238
        nodeKey route.Vertex
239
        chanID  [8]byte
240
}
241

242
// String returns a human-readable representation of the key.
243
func (c channelMapKey) String() string {
×
244
        return fmt.Sprintf("node=%v, chanID=%x", c.nodeKey, c.chanID)
×
245
}
×
246

247
// getChannelMap loads all channel edge policies from the database and stores
248
// them in a map.
249
func getChannelMap(edges kvdb.RBucket) (
250
        map[channelMapKey]*models.ChannelEdgePolicy, error) {
3✔
251

3✔
252
        // Create a map to store all channel edge policies.
3✔
253
        channelMap := make(map[channelMapKey]*models.ChannelEdgePolicy)
3✔
254

3✔
255
        err := kvdb.ForAll(edges, func(k, edgeBytes []byte) error {
6✔
256
                // Skip embedded buckets.
3✔
257
                if bytes.Equal(k, edgeIndexBucket) ||
3✔
258
                        bytes.Equal(k, edgeUpdateIndexBucket) ||
3✔
259
                        bytes.Equal(k, zombieBucket) ||
3✔
260
                        bytes.Equal(k, disabledEdgePolicyBucket) ||
3✔
261
                        bytes.Equal(k, channelPointBucket) {
6✔
262

3✔
263
                        return nil
3✔
264
                }
3✔
265

266
                // Validate key length.
267
                if len(k) != 33+8 {
3✔
268
                        return fmt.Errorf("invalid edge key %x encountered", k)
×
269
                }
×
270

271
                var key channelMapKey
3✔
272
                copy(key.nodeKey[:], k[:33])
3✔
273
                copy(key.chanID[:], k[33:])
3✔
274

3✔
275
                // No need to deserialize unknown policy.
3✔
276
                if bytes.Equal(edgeBytes, unknownPolicy) {
3✔
277
                        return nil
×
278
                }
×
279

280
                edgeReader := bytes.NewReader(edgeBytes)
3✔
281
                edge, err := deserializeChanEdgePolicyRaw(
3✔
282
                        edgeReader,
3✔
283
                )
3✔
284

3✔
285
                switch {
3✔
286
                // If the db policy was missing an expected optional field, we
287
                // return nil as if the policy was unknown.
288
                case errors.Is(err, ErrEdgePolicyOptionalFieldNotFound):
×
289
                        return nil
×
290

291
                // We don't want a single policy with bad TLV data to stop us
292
                // from loading the rest of the data, so we just skip this
293
                // policy. This is for backwards compatibility since we did not
294
                // use to validate TLV data in the past before persisting it.
295
                case errors.Is(err, ErrParsingExtraTLVBytes):
×
296
                        return nil
×
297

298
                case err != nil:
×
299
                        return err
×
300
                }
301

302
                channelMap[key] = edge
3✔
303

3✔
304
                return nil
3✔
305
        })
306
        if err != nil {
3✔
307
                return nil, err
×
308
        }
×
309

310
        return channelMap, nil
3✔
311
}
312

313
var graphTopLevelBuckets = [][]byte{
314
        nodeBucket,
315
        edgeBucket,
316
        graphMetaBucket,
317
        closedScidBucket,
318
}
319

320
// createChannelDB creates and initializes a fresh version of  In
321
// the case that the target path has not yet been created or doesn't yet exist,
322
// then the path is created. Additionally, all required top-level buckets used
323
// within the database are created.
324
func initKVStore(db kvdb.Backend) error {
3✔
325
        err := kvdb.Update(db, func(tx kvdb.RwTx) error {
6✔
326
                for _, tlb := range graphTopLevelBuckets {
6✔
327
                        if _, err := tx.CreateTopLevelBucket(tlb); err != nil {
3✔
328
                                return err
×
329
                        }
×
330
                }
331

332
                nodes := tx.ReadWriteBucket(nodeBucket)
3✔
333
                _, err := nodes.CreateBucketIfNotExists(aliasIndexBucket)
3✔
334
                if err != nil {
3✔
335
                        return err
×
336
                }
×
337
                _, err = nodes.CreateBucketIfNotExists(nodeUpdateIndexBucket)
3✔
338
                if err != nil {
3✔
339
                        return err
×
340
                }
×
341

342
                edges := tx.ReadWriteBucket(edgeBucket)
3✔
343
                _, err = edges.CreateBucketIfNotExists(edgeIndexBucket)
3✔
344
                if err != nil {
3✔
345
                        return err
×
346
                }
×
347
                _, err = edges.CreateBucketIfNotExists(edgeUpdateIndexBucket)
3✔
348
                if err != nil {
3✔
349
                        return err
×
350
                }
×
351
                _, err = edges.CreateBucketIfNotExists(channelPointBucket)
3✔
352
                if err != nil {
3✔
353
                        return err
×
354
                }
×
355
                _, err = edges.CreateBucketIfNotExists(zombieBucket)
3✔
356
                if err != nil {
3✔
357
                        return err
×
358
                }
×
359

360
                graphMeta := tx.ReadWriteBucket(graphMetaBucket)
3✔
361
                _, err = graphMeta.CreateBucketIfNotExists(pruneLogBucket)
3✔
362

3✔
363
                return err
3✔
364
        }, func() {})
3✔
365
        if err != nil {
3✔
366
                return fmt.Errorf("unable to create new channel graph: %w", err)
×
367
        }
×
368

369
        return nil
3✔
370
}
371

372
// AddrsForNode returns all known addresses for the target node public key that
373
// the graph DB is aware of. The returned boolean indicates if the given node is
374
// unknown to the graph DB or not.
375
//
376
// NOTE: this is part of the channeldb.AddrSource interface.
377
func (c *KVStore) AddrsForNode(ctx context.Context,
378
        nodePub *btcec.PublicKey) (bool, []net.Addr, error) {
3✔
379

3✔
380
        pubKey, err := route.NewVertexFromBytes(nodePub.SerializeCompressed())
3✔
381
        if err != nil {
3✔
382
                return false, nil, err
×
383
        }
×
384

385
        node, err := c.FetchLightningNode(ctx, pubKey)
3✔
386
        // We don't consider it an error if the graph is unaware of the node.
3✔
387
        switch {
3✔
388
        case err != nil && !errors.Is(err, ErrGraphNodeNotFound):
×
389
                return false, nil, err
×
390

391
        case errors.Is(err, ErrGraphNodeNotFound):
3✔
392
                return false, nil, nil
3✔
393
        }
394

395
        return true, node.Addresses, nil
3✔
396
}
397

398
// ForEachChannel iterates through all the channel edges stored within the
399
// graph and invokes the passed callback for each edge. The callback takes two
400
// edges as since this is a directed graph, both the in/out edges are visited.
401
// If the callback returns an error, then the transaction is aborted and the
402
// iteration stops early.
403
//
404
// NOTE: If an edge can't be found, or wasn't advertised, then a nil pointer
405
// for that particular channel edge routing policy will be passed into the
406
// callback.
407
func (c *KVStore) ForEachChannel(cb func(*models.ChannelEdgeInfo,
408
        *models.ChannelEdgePolicy, *models.ChannelEdgePolicy) error) error {
3✔
409

3✔
410
        return forEachChannel(c.db, cb)
3✔
411
}
3✔
412

413
// forEachChannel iterates through all the channel edges stored within the
414
// graph and invokes the passed callback for each edge. The callback takes two
415
// edges as since this is a directed graph, both the in/out edges are visited.
416
// If the callback returns an error, then the transaction is aborted and the
417
// iteration stops early.
418
//
419
// NOTE: If an edge can't be found, or wasn't advertised, then a nil pointer
420
// for that particular channel edge routing policy will be passed into the
421
// callback.
422
func forEachChannel(db kvdb.Backend, cb func(*models.ChannelEdgeInfo,
423
        *models.ChannelEdgePolicy, *models.ChannelEdgePolicy) error) error {
3✔
424

3✔
425
        return db.View(func(tx kvdb.RTx) error {
6✔
426
                edges := tx.ReadBucket(edgeBucket)
3✔
427
                if edges == nil {
3✔
428
                        return ErrGraphNoEdgesFound
×
429
                }
×
430

431
                // First, load all edges in memory indexed by node and channel
432
                // id.
433
                channelMap, err := getChannelMap(edges)
3✔
434
                if err != nil {
3✔
435
                        return err
×
436
                }
×
437

438
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
3✔
439
                if edgeIndex == nil {
3✔
440
                        return ErrGraphNoEdgesFound
×
441
                }
×
442

443
                // Load edge index, recombine each channel with the policies
444
                // loaded above and invoke the callback.
445
                return kvdb.ForAll(
3✔
446
                        edgeIndex, func(k, edgeInfoBytes []byte) error {
6✔
447
                                var chanID [8]byte
3✔
448
                                copy(chanID[:], k)
3✔
449

3✔
450
                                edgeInfoReader := bytes.NewReader(edgeInfoBytes)
3✔
451
                                info, err := deserializeChanEdgeInfo(
3✔
452
                                        edgeInfoReader,
3✔
453
                                )
3✔
454
                                if err != nil {
3✔
455
                                        return err
×
456
                                }
×
457

458
                                policy1 := channelMap[channelMapKey{
3✔
459
                                        nodeKey: info.NodeKey1Bytes,
3✔
460
                                        chanID:  chanID,
3✔
461
                                }]
3✔
462

3✔
463
                                policy2 := channelMap[channelMapKey{
3✔
464
                                        nodeKey: info.NodeKey2Bytes,
3✔
465
                                        chanID:  chanID,
3✔
466
                                }]
3✔
467

3✔
468
                                return cb(&info, policy1, policy2)
3✔
469
                        },
470
                )
471
        }, func() {})
3✔
472
}
473

474
// ForEachChannelCacheable iterates through all the channel edges stored within
475
// the graph and invokes the passed callback for each edge. The callback takes
476
// two edges as since this is a directed graph, both the in/out edges are
477
// visited. If the callback returns an error, then the transaction is aborted
478
// and the iteration stops early.
479
//
480
// NOTE: If an edge can't be found, or wasn't advertised, then a nil pointer
481
// for that particular channel edge routing policy will be passed into the
482
// callback.
483
//
484
// NOTE: this method is like ForEachChannel but fetches only the data required
485
// for the graph cache.
486
func (c *KVStore) ForEachChannelCacheable(cb func(*models.CachedEdgeInfo,
487
        *models.CachedEdgePolicy, *models.CachedEdgePolicy) error) error {
3✔
488

3✔
489
        return c.db.View(func(tx kvdb.RTx) error {
6✔
490
                edges := tx.ReadBucket(edgeBucket)
3✔
491
                if edges == nil {
3✔
492
                        return ErrGraphNoEdgesFound
×
493
                }
×
494

495
                // First, load all edges in memory indexed by node and channel
496
                // id.
497
                channelMap, err := getChannelMap(edges)
3✔
498
                if err != nil {
3✔
499
                        return err
×
500
                }
×
501

502
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
3✔
503
                if edgeIndex == nil {
3✔
504
                        return ErrGraphNoEdgesFound
×
505
                }
×
506

507
                // Load edge index, recombine each channel with the policies
508
                // loaded above and invoke the callback.
509
                return kvdb.ForAll(
3✔
510
                        edgeIndex, func(k, edgeInfoBytes []byte) error {
6✔
511
                                var chanID [8]byte
3✔
512
                                copy(chanID[:], k)
3✔
513

3✔
514
                                edgeInfoReader := bytes.NewReader(edgeInfoBytes)
3✔
515
                                info, err := deserializeChanEdgeInfo(
3✔
516
                                        edgeInfoReader,
3✔
517
                                )
3✔
518
                                if err != nil {
3✔
519
                                        return err
×
520
                                }
×
521

522
                                key1 := channelMapKey{
3✔
523
                                        nodeKey: info.NodeKey1Bytes,
3✔
524
                                        chanID:  chanID,
3✔
525
                                }
3✔
526
                                policy1 := channelMap[key1]
3✔
527

3✔
528
                                key2 := channelMapKey{
3✔
529
                                        nodeKey: info.NodeKey2Bytes,
3✔
530
                                        chanID:  chanID,
3✔
531
                                }
3✔
532
                                policy2 := channelMap[key2]
3✔
533

3✔
534
                                // We now create the cached edge policies, but
3✔
535
                                // only when the above policies are found in the
3✔
536
                                // `channelMap`.
3✔
537
                                var (
3✔
538
                                        cachedPolicy1 *models.CachedEdgePolicy
3✔
539
                                        cachedPolicy2 *models.CachedEdgePolicy
3✔
540
                                )
3✔
541

3✔
542
                                if policy1 != nil {
6✔
543
                                        cachedPolicy1 = models.NewCachedPolicy(
3✔
544
                                                policy1,
3✔
545
                                        )
3✔
546
                                }
3✔
547

548
                                if policy2 != nil {
6✔
549
                                        cachedPolicy2 = models.NewCachedPolicy(
3✔
550
                                                policy2,
3✔
551
                                        )
3✔
552
                                }
3✔
553

554
                                return cb(
3✔
555
                                        models.NewCachedEdge(&info),
3✔
556
                                        cachedPolicy1, cachedPolicy2,
3✔
557
                                )
3✔
558
                        },
559
                )
560
        }, func() {})
3✔
561
}
562

563
// forEachNodeDirectedChannel iterates through all channels of a given node,
564
// executing the passed callback on the directed edge representing the channel
565
// and its incoming policy. If the callback returns an error, then the iteration
566
// is halted with the error propagated back up to the caller. An optional read
567
// transaction may be provided. If none is provided, a new one will be created.
568
//
569
// Unknown policies are passed into the callback as nil values.
570
func (c *KVStore) forEachNodeDirectedChannel(tx kvdb.RTx,
571
        node route.Vertex, cb func(channel *DirectedChannel) error) error {
3✔
572

3✔
573
        // Fallback that uses the database.
3✔
574
        toNodeCallback := func() route.Vertex {
6✔
575
                return node
3✔
576
        }
3✔
577
        toNodeFeatures, err := c.fetchNodeFeatures(tx, node)
3✔
578
        if err != nil {
3✔
579
                return err
×
580
        }
×
581

582
        dbCallback := func(tx kvdb.RTx, e *models.ChannelEdgeInfo, p1,
3✔
583
                p2 *models.ChannelEdgePolicy) error {
6✔
584

3✔
585
                var cachedInPolicy *models.CachedEdgePolicy
3✔
586
                if p2 != nil {
6✔
587
                        cachedInPolicy = models.NewCachedPolicy(p2)
3✔
588
                        cachedInPolicy.ToNodePubKey = toNodeCallback
3✔
589
                        cachedInPolicy.ToNodeFeatures = toNodeFeatures
3✔
590
                }
3✔
591

592
                directedChannel := &DirectedChannel{
3✔
593
                        ChannelID:    e.ChannelID,
3✔
594
                        IsNode1:      node == e.NodeKey1Bytes,
3✔
595
                        OtherNode:    e.NodeKey2Bytes,
3✔
596
                        Capacity:     e.Capacity,
3✔
597
                        OutPolicySet: p1 != nil,
3✔
598
                        InPolicy:     cachedInPolicy,
3✔
599
                }
3✔
600

3✔
601
                if p1 != nil {
6✔
602
                        p1.InboundFee.WhenSome(func(fee lnwire.Fee) {
3✔
UNCOV
603
                                directedChannel.InboundFee = fee
×
UNCOV
604
                        })
×
605
                }
606

607
                if node == e.NodeKey2Bytes {
6✔
608
                        directedChannel.OtherNode = e.NodeKey1Bytes
3✔
609
                }
3✔
610

611
                return cb(directedChannel)
3✔
612
        }
613

614
        return nodeTraversal(tx, node[:], c.db, dbCallback)
3✔
615
}
616

617
// fetchNodeFeatures returns the features of a given node. If no features are
618
// known for the node, an empty feature vector is returned. An optional read
619
// transaction may be provided. If none is provided, a new one will be created.
620
func (c *KVStore) fetchNodeFeatures(tx kvdb.RTx,
621
        node route.Vertex) (*lnwire.FeatureVector, error) {
3✔
622

3✔
623
        // Fallback that uses the database.
3✔
624
        targetNode, err := c.FetchLightningNodeTx(tx, node)
3✔
625
        switch {
3✔
626
        // If the node exists and has features, return them directly.
627
        case err == nil:
3✔
628
                return targetNode.Features, nil
3✔
629

630
        // If we couldn't find a node announcement, populate a blank feature
631
        // vector.
UNCOV
632
        case errors.Is(err, ErrGraphNodeNotFound):
×
UNCOV
633
                return lnwire.EmptyFeatureVector(), nil
×
634

635
        // Otherwise, bubble the error up.
636
        default:
×
637
                return nil, err
×
638
        }
639
}
640

641
// ForEachNodeDirectedChannel iterates through all channels of a given node,
642
// executing the passed callback on the directed edge representing the channel
643
// and its incoming policy. If the callback returns an error, then the iteration
644
// is halted with the error propagated back up to the caller.
645
//
646
// Unknown policies are passed into the callback as nil values.
647
//
648
// NOTE: this is part of the graphdb.NodeTraverser interface.
649
func (c *KVStore) ForEachNodeDirectedChannel(nodePub route.Vertex,
650
        cb func(channel *DirectedChannel) error) error {
3✔
651

3✔
652
        return c.forEachNodeDirectedChannel(nil, nodePub, cb)
3✔
653
}
3✔
654

655
// FetchNodeFeatures returns the features of the given node. If no features are
656
// known for the node, an empty feature vector is returned.
657
//
658
// NOTE: this is part of the graphdb.NodeTraverser interface.
659
func (c *KVStore) FetchNodeFeatures(nodePub route.Vertex) (
660
        *lnwire.FeatureVector, error) {
3✔
661

3✔
662
        return c.fetchNodeFeatures(nil, nodePub)
3✔
663
}
3✔
664

665
// ForEachNodeCached is similar to forEachNode, but it returns DirectedChannel
666
// data to the call-back.
667
//
668
// NOTE: The callback contents MUST not be modified.
669
func (c *KVStore) ForEachNodeCached(cb func(node route.Vertex,
UNCOV
670
        chans map[uint64]*DirectedChannel) error) error {
×
UNCOV
671

×
UNCOV
672
        // Otherwise call back to a version that uses the database directly.
×
UNCOV
673
        // We'll iterate over each node, then the set of channels for each
×
UNCOV
674
        // node, and construct a similar callback functiopn signature as the
×
UNCOV
675
        // main funcotin expects.
×
NEW
676
        return forEachNode(c.db, func(tx kvdb.RTx,
×
UNCOV
677
                node *models.LightningNode) error {
×
UNCOV
678

×
UNCOV
679
                channels := make(map[uint64]*DirectedChannel)
×
UNCOV
680

×
UNCOV
681
                err := c.forEachNodeChannelTx(tx, node.PubKeyBytes,
×
UNCOV
682
                        func(tx kvdb.RTx, e *models.ChannelEdgeInfo,
×
UNCOV
683
                                p1 *models.ChannelEdgePolicy,
×
UNCOV
684
                                p2 *models.ChannelEdgePolicy) error {
×
UNCOV
685

×
UNCOV
686
                                toNodeCallback := func() route.Vertex {
×
687
                                        return node.PubKeyBytes
×
688
                                }
×
UNCOV
689
                                toNodeFeatures, err := c.fetchNodeFeatures(
×
UNCOV
690
                                        tx, node.PubKeyBytes,
×
UNCOV
691
                                )
×
UNCOV
692
                                if err != nil {
×
693
                                        return err
×
694
                                }
×
695

UNCOV
696
                                var cachedInPolicy *models.CachedEdgePolicy
×
UNCOV
697
                                if p2 != nil {
×
UNCOV
698
                                        cachedInPolicy =
×
UNCOV
699
                                                models.NewCachedPolicy(p2)
×
UNCOV
700
                                        cachedInPolicy.ToNodePubKey =
×
UNCOV
701
                                                toNodeCallback
×
UNCOV
702
                                        cachedInPolicy.ToNodeFeatures =
×
UNCOV
703
                                                toNodeFeatures
×
UNCOV
704
                                }
×
705

UNCOV
706
                                directedChannel := &DirectedChannel{
×
UNCOV
707
                                        ChannelID: e.ChannelID,
×
UNCOV
708
                                        IsNode1: node.PubKeyBytes ==
×
UNCOV
709
                                                e.NodeKey1Bytes,
×
UNCOV
710
                                        OtherNode:    e.NodeKey2Bytes,
×
UNCOV
711
                                        Capacity:     e.Capacity,
×
UNCOV
712
                                        OutPolicySet: p1 != nil,
×
UNCOV
713
                                        InPolicy:     cachedInPolicy,
×
UNCOV
714
                                }
×
UNCOV
715

×
UNCOV
716
                                if node.PubKeyBytes == e.NodeKey2Bytes {
×
UNCOV
717
                                        directedChannel.OtherNode =
×
UNCOV
718
                                                e.NodeKey1Bytes
×
UNCOV
719
                                }
×
720

UNCOV
721
                                channels[e.ChannelID] = directedChannel
×
UNCOV
722

×
UNCOV
723
                                return nil
×
724
                        })
UNCOV
725
                if err != nil {
×
726
                        return err
×
727
                }
×
728

UNCOV
729
                return cb(node.PubKeyBytes, channels)
×
730
        })
731
}
732

733
// DisabledChannelIDs returns the channel ids of disabled channels.
734
// A channel is disabled when two of the associated ChanelEdgePolicies
735
// have their disabled bit on.
UNCOV
736
func (c *KVStore) DisabledChannelIDs() ([]uint64, error) {
×
UNCOV
737
        var disabledChanIDs []uint64
×
UNCOV
738
        var chanEdgeFound map[uint64]struct{}
×
UNCOV
739

×
UNCOV
740
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
×
UNCOV
741
                edges := tx.ReadBucket(edgeBucket)
×
UNCOV
742
                if edges == nil {
×
743
                        return ErrGraphNoEdgesFound
×
744
                }
×
745

UNCOV
746
                disabledEdgePolicyIndex := edges.NestedReadBucket(
×
UNCOV
747
                        disabledEdgePolicyBucket,
×
UNCOV
748
                )
×
UNCOV
749
                if disabledEdgePolicyIndex == nil {
×
UNCOV
750
                        return nil
×
UNCOV
751
                }
×
752

753
                // We iterate over all disabled policies and we add each channel
754
                // that has more than one disabled policy to disabledChanIDs
755
                // array.
UNCOV
756
                return disabledEdgePolicyIndex.ForEach(
×
UNCOV
757
                        func(k, v []byte) error {
×
UNCOV
758
                                chanID := byteOrder.Uint64(k[:8])
×
UNCOV
759
                                _, edgeFound := chanEdgeFound[chanID]
×
UNCOV
760
                                if edgeFound {
×
UNCOV
761
                                        delete(chanEdgeFound, chanID)
×
UNCOV
762
                                        disabledChanIDs = append(
×
UNCOV
763
                                                disabledChanIDs, chanID,
×
UNCOV
764
                                        )
×
UNCOV
765

×
UNCOV
766
                                        return nil
×
UNCOV
767
                                }
×
768

UNCOV
769
                                chanEdgeFound[chanID] = struct{}{}
×
UNCOV
770

×
UNCOV
771
                                return nil
×
772
                        },
773
                )
UNCOV
774
        }, func() {
×
UNCOV
775
                disabledChanIDs = nil
×
UNCOV
776
                chanEdgeFound = make(map[uint64]struct{})
×
UNCOV
777
        })
×
UNCOV
778
        if err != nil {
×
779
                return nil, err
×
780
        }
×
781

UNCOV
782
        return disabledChanIDs, nil
×
783
}
784

785
// ForEachNode iterates through all the stored vertices/nodes in the graph,
786
// executing the passed callback with each node encountered. If the callback
787
// returns an error, then the transaction is aborted and the iteration stops
788
// early. Any operations performed on the NodeTx passed to the call-back are
789
// executed under the same read transaction and so, methods on the NodeTx object
790
// _MUST_ only be called from within the call-back.
791
func (c *KVStore) ForEachNode(cb func(tx NodeRTx) error) error {
3✔
792
        return forEachNode(c.db, func(tx kvdb.RTx,
3✔
793
                node *models.LightningNode) error {
6✔
794

3✔
795
                return cb(newChanGraphNodeTx(tx, c, node))
3✔
796
        })
3✔
797
}
798

799
// forEachNode iterates through all the stored vertices/nodes in the graph,
800
// executing the passed callback with each node encountered. If the callback
801
// returns an error, then the transaction is aborted and the iteration stops
802
// early.
803
//
804
// TODO(roasbeef): add iterator interface to allow for memory efficient graph
805
// traversal when graph gets mega.
806
func forEachNode(db kvdb.Backend,
807
        cb func(kvdb.RTx, *models.LightningNode) error) error {
3✔
808

3✔
809
        traversal := func(tx kvdb.RTx) error {
6✔
810
                // First grab the nodes bucket which stores the mapping from
3✔
811
                // pubKey to node information.
3✔
812
                nodes := tx.ReadBucket(nodeBucket)
3✔
813
                if nodes == nil {
3✔
814
                        return ErrGraphNotFound
×
815
                }
×
816

817
                return nodes.ForEach(func(pubKey, nodeBytes []byte) error {
6✔
818
                        // If this is the source key, then we skip this
3✔
819
                        // iteration as the value for this key is a pubKey
3✔
820
                        // rather than raw node information.
3✔
821
                        if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 {
6✔
822
                                return nil
3✔
823
                        }
3✔
824

825
                        nodeReader := bytes.NewReader(nodeBytes)
3✔
826
                        node, err := deserializeLightningNode(nodeReader)
3✔
827
                        if err != nil {
3✔
828
                                return err
×
829
                        }
×
830

831
                        // Execute the callback, the transaction will abort if
832
                        // this returns an error.
833
                        return cb(tx, &node)
3✔
834
                })
835
        }
836

837
        return kvdb.View(db, traversal, func() {})
6✔
838
}
839

840
// ForEachNodeCacheable iterates through all the stored vertices/nodes in the
841
// graph, executing the passed callback with each node encountered. If the
842
// callback returns an error, then the transaction is aborted and the iteration
843
// stops early.
844
func (c *KVStore) ForEachNodeCacheable(cb func(route.Vertex,
845
        *lnwire.FeatureVector) error) error {
3✔
846

3✔
847
        traversal := func(tx kvdb.RTx) error {
6✔
848
                // First grab the nodes bucket which stores the mapping from
3✔
849
                // pubKey to node information.
3✔
850
                nodes := tx.ReadBucket(nodeBucket)
3✔
851
                if nodes == nil {
3✔
852
                        return ErrGraphNotFound
×
853
                }
×
854

855
                return nodes.ForEach(func(pubKey, nodeBytes []byte) error {
6✔
856
                        // If this is the source key, then we skip this
3✔
857
                        // iteration as the value for this key is a pubKey
3✔
858
                        // rather than raw node information.
3✔
859
                        if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 {
6✔
860
                                return nil
3✔
861
                        }
3✔
862

863
                        nodeReader := bytes.NewReader(nodeBytes)
3✔
864
                        node, features, err := deserializeLightningNodeCacheable( //nolint:ll
3✔
865
                                nodeReader,
3✔
866
                        )
3✔
867
                        if err != nil {
3✔
868
                                return err
×
869
                        }
×
870

871
                        // Execute the callback, the transaction will abort if
872
                        // this returns an error.
873
                        return cb(node, features)
3✔
874
                })
875
        }
876

877
        return kvdb.View(c.db, traversal, func() {})
6✔
878
}
879

880
// SourceNode returns the source node of the graph. The source node is treated
881
// as the center node within a star-graph. This method may be used to kick off
882
// a path finding algorithm in order to explore the reachability of another
883
// node based off the source node.
884
func (c *KVStore) SourceNode(_ context.Context) (*models.LightningNode, error) {
3✔
885
        return getSourceNode(c.db)
3✔
886
}
3✔
887

888
func getSourceNode(db kvdb.Backend) (*models.LightningNode, error) {
3✔
889
        var source *models.LightningNode
3✔
890
        err := kvdb.View(db, func(tx kvdb.RTx) error {
6✔
891
                // First grab the nodes bucket which stores the mapping from
3✔
892
                // pubKey to node information.
3✔
893
                nodes := tx.ReadBucket(nodeBucket)
3✔
894
                if nodes == nil {
3✔
895
                        return ErrGraphNotFound
×
896
                }
×
897

898
                node, err := sourceNode(nodes)
3✔
899
                if err != nil {
6✔
900
                        return err
3✔
901
                }
3✔
902
                source = node
3✔
903

3✔
904
                return nil
3✔
905
        }, func() {
3✔
906
                source = nil
3✔
907
        })
3✔
908
        if err != nil {
6✔
909
                return nil, err
3✔
910
        }
3✔
911

912
        return source, nil
3✔
913
}
914

915
// sourceNode uses an existing database transaction and returns the source node
916
// of the graph. The source node is treated as the center node within a
917
// star-graph. This method may be used to kick off a path finding algorithm in
918
// order to explore the reachability of another node based off the source node.
919
func sourceNode(nodes kvdb.RBucket) (*models.LightningNode, error) {
3✔
920
        selfPub := nodes.Get(sourceKey)
3✔
921
        if selfPub == nil {
6✔
922
                return nil, ErrSourceNodeNotSet
3✔
923
        }
3✔
924

925
        // With the pubKey of the source node retrieved, we're able to
926
        // fetch the full node information.
927
        node, err := fetchLightningNode(nodes, selfPub)
3✔
928
        if err != nil {
3✔
929
                return nil, err
×
930
        }
×
931

932
        return &node, nil
3✔
933
}
934

935
// SetSourceNode sets the source node within the graph database. The source
936
// node is to be used as the center of a star-graph within path finding
937
// algorithms.
938
func (c *KVStore) SetSourceNode(_ context.Context,
939
        node *models.LightningNode) error {
3✔
940

3✔
941
        nodePubBytes := node.PubKeyBytes[:]
3✔
942

3✔
943
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
6✔
944
                // First grab the nodes bucket which stores the mapping from
3✔
945
                // pubKey to node information.
3✔
946
                nodes, err := tx.CreateTopLevelBucket(nodeBucket)
3✔
947
                if err != nil {
3✔
948
                        return err
×
949
                }
×
950

951
                // Next we create the mapping from source to the targeted
952
                // public key.
953
                if err := nodes.Put(sourceKey, nodePubBytes); err != nil {
3✔
954
                        return err
×
955
                }
×
956

957
                // Finally, we commit the information of the lightning node
958
                // itself.
959
                return addLightningNode(tx, node)
3✔
960
        }, func() {})
3✔
961
}
962

963
// AddLightningNode adds a vertex/node to the graph database. If the node is not
964
// in the database from before, this will add a new, unconnected one to the
965
// graph. If it is present from before, this will update that node's
966
// information. Note that this method is expected to only be called to update an
967
// already present node from a node announcement, or to insert a node found in a
968
// channel update.
969
//
970
// TODO(roasbeef): also need sig of announcement.
971
func (c *KVStore) AddLightningNode(ctx context.Context,
972
        node *models.LightningNode, opts ...batch.SchedulerOption) error {
3✔
973

3✔
974
        r := &batch.Request[kvdb.RwTx]{
3✔
975
                Opts: batch.NewSchedulerOptions(opts...),
3✔
976
                Do: func(tx kvdb.RwTx) error {
6✔
977
                        return addLightningNode(tx, node)
3✔
978
                },
3✔
979
        }
980

981
        return c.nodeScheduler.Execute(ctx, r)
3✔
982
}
983

984
func addLightningNode(tx kvdb.RwTx, node *models.LightningNode) error {
3✔
985
        nodes, err := tx.CreateTopLevelBucket(nodeBucket)
3✔
986
        if err != nil {
3✔
987
                return err
×
988
        }
×
989

990
        aliases, err := nodes.CreateBucketIfNotExists(aliasIndexBucket)
3✔
991
        if err != nil {
3✔
992
                return err
×
993
        }
×
994

995
        updateIndex, err := nodes.CreateBucketIfNotExists(
3✔
996
                nodeUpdateIndexBucket,
3✔
997
        )
3✔
998
        if err != nil {
3✔
999
                return err
×
1000
        }
×
1001

1002
        return putLightningNode(nodes, aliases, updateIndex, node)
3✔
1003
}
1004

1005
// LookupAlias attempts to return the alias as advertised by the target node.
1006
// TODO(roasbeef): currently assumes that aliases are unique...
1007
func (c *KVStore) LookupAlias(_ context.Context,
1008
        pub *btcec.PublicKey) (string, error) {
3✔
1009

3✔
1010
        var alias string
3✔
1011

3✔
1012
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
1013
                nodes := tx.ReadBucket(nodeBucket)
3✔
1014
                if nodes == nil {
3✔
1015
                        return ErrGraphNodesNotFound
×
1016
                }
×
1017

1018
                aliases := nodes.NestedReadBucket(aliasIndexBucket)
3✔
1019
                if aliases == nil {
3✔
1020
                        return ErrGraphNodesNotFound
×
1021
                }
×
1022

1023
                nodePub := pub.SerializeCompressed()
3✔
1024
                a := aliases.Get(nodePub)
3✔
1025
                if a == nil {
3✔
UNCOV
1026
                        return ErrNodeAliasNotFound
×
UNCOV
1027
                }
×
1028

1029
                // TODO(roasbeef): should actually be using the utf-8
1030
                // package...
1031
                alias = string(a)
3✔
1032

3✔
1033
                return nil
3✔
1034
        }, func() {
3✔
1035
                alias = ""
3✔
1036
        })
3✔
1037
        if err != nil {
3✔
UNCOV
1038
                return "", err
×
UNCOV
1039
        }
×
1040

1041
        return alias, nil
3✔
1042
}
1043

1044
// DeleteLightningNode starts a new database transaction to remove a vertex/node
1045
// from the database according to the node's public key.
1046
func (c *KVStore) DeleteLightningNode(_ context.Context,
UNCOV
1047
        nodePub route.Vertex) error {
×
UNCOV
1048

×
UNCOV
1049
        // TODO(roasbeef): ensure dangling edges are removed...
×
UNCOV
1050
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
×
UNCOV
1051
                nodes := tx.ReadWriteBucket(nodeBucket)
×
UNCOV
1052
                if nodes == nil {
×
1053
                        return ErrGraphNodeNotFound
×
1054
                }
×
1055

UNCOV
1056
                return c.deleteLightningNode(nodes, nodePub[:])
×
UNCOV
1057
        }, func() {})
×
1058
}
1059

1060
// deleteLightningNode uses an existing database transaction to remove a
1061
// vertex/node from the database according to the node's public key.
1062
func (c *KVStore) deleteLightningNode(nodes kvdb.RwBucket,
1063
        compressedPubKey []byte) error {
3✔
1064

3✔
1065
        aliases := nodes.NestedReadWriteBucket(aliasIndexBucket)
3✔
1066
        if aliases == nil {
3✔
1067
                return ErrGraphNodesNotFound
×
1068
        }
×
1069

1070
        if err := aliases.Delete(compressedPubKey); err != nil {
3✔
1071
                return err
×
1072
        }
×
1073

1074
        // Before we delete the node, we'll fetch its current state so we can
1075
        // determine when its last update was to clear out the node update
1076
        // index.
1077
        node, err := fetchLightningNode(nodes, compressedPubKey)
3✔
1078
        if err != nil {
3✔
UNCOV
1079
                return err
×
UNCOV
1080
        }
×
1081

1082
        if err := nodes.Delete(compressedPubKey); err != nil {
3✔
1083
                return err
×
1084
        }
×
1085

1086
        // Finally, we'll delete the index entry for the node within the
1087
        // nodeUpdateIndexBucket as this node is no longer active, so we don't
1088
        // need to track its last update.
1089
        nodeUpdateIndex := nodes.NestedReadWriteBucket(nodeUpdateIndexBucket)
3✔
1090
        if nodeUpdateIndex == nil {
3✔
1091
                return ErrGraphNodesNotFound
×
1092
        }
×
1093

1094
        // In order to delete the entry, we'll need to reconstruct the key for
1095
        // its last update.
1096
        updateUnix := uint64(node.LastUpdate.Unix())
3✔
1097
        var indexKey [8 + 33]byte
3✔
1098
        byteOrder.PutUint64(indexKey[:8], updateUnix)
3✔
1099
        copy(indexKey[8:], compressedPubKey)
3✔
1100

3✔
1101
        return nodeUpdateIndex.Delete(indexKey[:])
3✔
1102
}
1103

1104
// AddChannelEdge adds a new (undirected, blank) edge to the graph database. An
1105
// undirected edge from the two target nodes are created. The information stored
1106
// denotes the static attributes of the channel, such as the channelID, the keys
1107
// involved in creation of the channel, and the set of features that the channel
1108
// supports. The chanPoint and chanID are used to uniquely identify the edge
1109
// globally within the database.
1110
func (c *KVStore) AddChannelEdge(ctx context.Context,
1111
        edge *models.ChannelEdgeInfo, opts ...batch.SchedulerOption) error {
3✔
1112

3✔
1113
        var alreadyExists bool
3✔
1114
        r := &batch.Request[kvdb.RwTx]{
3✔
1115
                Opts: batch.NewSchedulerOptions(opts...),
3✔
1116
                Reset: func() {
6✔
1117
                        alreadyExists = false
3✔
1118
                },
3✔
1119
                Do: func(tx kvdb.RwTx) error {
3✔
1120
                        err := c.addChannelEdge(tx, edge)
3✔
1121

3✔
1122
                        // Silence ErrEdgeAlreadyExist so that the batch can
3✔
1123
                        // succeed, but propagate the error via local state.
3✔
1124
                        if errors.Is(err, ErrEdgeAlreadyExist) {
3✔
UNCOV
1125
                                alreadyExists = true
×
UNCOV
1126
                                return nil
×
UNCOV
1127
                        }
×
1128

1129
                        return err
3✔
1130
                },
1131
                OnCommit: func(err error) error {
3✔
1132
                        switch {
3✔
1133
                        case err != nil:
×
1134
                                return err
×
UNCOV
1135
                        case alreadyExists:
×
UNCOV
1136
                                return ErrEdgeAlreadyExist
×
1137
                        default:
3✔
1138
                                c.rejectCache.remove(edge.ChannelID)
3✔
1139
                                c.chanCache.remove(edge.ChannelID)
3✔
1140
                                return nil
3✔
1141
                        }
1142
                },
1143
        }
1144

1145
        return c.chanScheduler.Execute(ctx, r)
3✔
1146
}
1147

1148
// addChannelEdge is the private form of AddChannelEdge that allows callers to
1149
// utilize an existing db transaction.
1150
func (c *KVStore) addChannelEdge(tx kvdb.RwTx,
1151
        edge *models.ChannelEdgeInfo) error {
3✔
1152

3✔
1153
        // Construct the channel's primary key which is the 8-byte channel ID.
3✔
1154
        var chanKey [8]byte
3✔
1155
        binary.BigEndian.PutUint64(chanKey[:], edge.ChannelID)
3✔
1156

3✔
1157
        nodes, err := tx.CreateTopLevelBucket(nodeBucket)
3✔
1158
        if err != nil {
3✔
1159
                return err
×
1160
        }
×
1161
        edges, err := tx.CreateTopLevelBucket(edgeBucket)
3✔
1162
        if err != nil {
3✔
1163
                return err
×
1164
        }
×
1165
        edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
3✔
1166
        if err != nil {
3✔
1167
                return err
×
1168
        }
×
1169
        chanIndex, err := edges.CreateBucketIfNotExists(channelPointBucket)
3✔
1170
        if err != nil {
3✔
1171
                return err
×
1172
        }
×
1173

1174
        // First, attempt to check if this edge has already been created. If
1175
        // so, then we can exit early as this method is meant to be idempotent.
1176
        if edgeInfo := edgeIndex.Get(chanKey[:]); edgeInfo != nil {
3✔
UNCOV
1177
                return ErrEdgeAlreadyExist
×
UNCOV
1178
        }
×
1179

1180
        // Before we insert the channel into the database, we'll ensure that
1181
        // both nodes already exist in the channel graph. If either node
1182
        // doesn't, then we'll insert a "shell" node that just includes its
1183
        // public key, so subsequent validation and queries can work properly.
1184
        _, node1Err := fetchLightningNode(nodes, edge.NodeKey1Bytes[:])
3✔
1185
        switch {
3✔
1186
        case errors.Is(node1Err, ErrGraphNodeNotFound):
3✔
1187
                node1Shell := models.LightningNode{
3✔
1188
                        PubKeyBytes:          edge.NodeKey1Bytes,
3✔
1189
                        HaveNodeAnnouncement: false,
3✔
1190
                }
3✔
1191
                err := addLightningNode(tx, &node1Shell)
3✔
1192
                if err != nil {
3✔
1193
                        return fmt.Errorf("unable to create shell node "+
×
1194
                                "for: %x: %w", edge.NodeKey1Bytes, err)
×
1195
                }
×
1196
        case node1Err != nil:
×
1197
                return node1Err
×
1198
        }
1199

1200
        _, node2Err := fetchLightningNode(nodes, edge.NodeKey2Bytes[:])
3✔
1201
        switch {
3✔
1202
        case errors.Is(node2Err, ErrGraphNodeNotFound):
3✔
1203
                node2Shell := models.LightningNode{
3✔
1204
                        PubKeyBytes:          edge.NodeKey2Bytes,
3✔
1205
                        HaveNodeAnnouncement: false,
3✔
1206
                }
3✔
1207
                err := addLightningNode(tx, &node2Shell)
3✔
1208
                if err != nil {
3✔
1209
                        return fmt.Errorf("unable to create shell node "+
×
1210
                                "for: %x: %w", edge.NodeKey2Bytes, err)
×
1211
                }
×
1212
        case node2Err != nil:
×
1213
                return node2Err
×
1214
        }
1215

1216
        // If the edge hasn't been created yet, then we'll first add it to the
1217
        // edge index in order to associate the edge between two nodes and also
1218
        // store the static components of the channel.
1219
        if err := putChanEdgeInfo(edgeIndex, edge, chanKey); err != nil {
3✔
1220
                return err
×
1221
        }
×
1222

1223
        // Mark edge policies for both sides as unknown. This is to enable
1224
        // efficient incoming channel lookup for a node.
1225
        keys := []*[33]byte{
3✔
1226
                &edge.NodeKey1Bytes,
3✔
1227
                &edge.NodeKey2Bytes,
3✔
1228
        }
3✔
1229
        for _, key := range keys {
6✔
1230
                err := putChanEdgePolicyUnknown(edges, edge.ChannelID, key[:])
3✔
1231
                if err != nil {
3✔
1232
                        return err
×
1233
                }
×
1234
        }
1235

1236
        // Finally we add it to the channel index which maps channel points
1237
        // (outpoints) to the shorter channel ID's.
1238
        var b bytes.Buffer
3✔
1239
        if err := WriteOutpoint(&b, &edge.ChannelPoint); err != nil {
3✔
1240
                return err
×
1241
        }
×
1242

1243
        return chanIndex.Put(b.Bytes(), chanKey[:])
3✔
1244
}
1245

1246
// HasChannelEdge returns true if the database knows of a channel edge with the
1247
// passed channel ID, and false otherwise. If an edge with that ID is found
1248
// within the graph, then two time stamps representing the last time the edge
1249
// was updated for both directed edges are returned along with the boolean. If
1250
// it is not found, then the zombie index is checked and its result is returned
1251
// as the second boolean.
1252
func (c *KVStore) HasChannelEdge(
1253
        chanID uint64) (time.Time, time.Time, bool, bool, error) {
3✔
1254

3✔
1255
        var (
3✔
1256
                upd1Time time.Time
3✔
1257
                upd2Time time.Time
3✔
1258
                exists   bool
3✔
1259
                isZombie bool
3✔
1260
        )
3✔
1261

3✔
1262
        // We'll query the cache with the shared lock held to allow multiple
3✔
1263
        // readers to access values in the cache concurrently if they exist.
3✔
1264
        c.cacheMu.RLock()
3✔
1265
        if entry, ok := c.rejectCache.get(chanID); ok {
6✔
1266
                c.cacheMu.RUnlock()
3✔
1267
                upd1Time = time.Unix(entry.upd1Time, 0)
3✔
1268
                upd2Time = time.Unix(entry.upd2Time, 0)
3✔
1269
                exists, isZombie = entry.flags.unpack()
3✔
1270

3✔
1271
                return upd1Time, upd2Time, exists, isZombie, nil
3✔
1272
        }
3✔
1273
        c.cacheMu.RUnlock()
3✔
1274

3✔
1275
        c.cacheMu.Lock()
3✔
1276
        defer c.cacheMu.Unlock()
3✔
1277

3✔
1278
        // The item was not found with the shared lock, so we'll acquire the
3✔
1279
        // exclusive lock and check the cache again in case another method added
3✔
1280
        // the entry to the cache while no lock was held.
3✔
1281
        if entry, ok := c.rejectCache.get(chanID); ok {
6✔
1282
                upd1Time = time.Unix(entry.upd1Time, 0)
3✔
1283
                upd2Time = time.Unix(entry.upd2Time, 0)
3✔
1284
                exists, isZombie = entry.flags.unpack()
3✔
1285

3✔
1286
                return upd1Time, upd2Time, exists, isZombie, nil
3✔
1287
        }
3✔
1288

1289
        if err := kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
1290
                edges := tx.ReadBucket(edgeBucket)
3✔
1291
                if edges == nil {
3✔
1292
                        return ErrGraphNoEdgesFound
×
1293
                }
×
1294
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
3✔
1295
                if edgeIndex == nil {
3✔
1296
                        return ErrGraphNoEdgesFound
×
1297
                }
×
1298

1299
                var channelID [8]byte
3✔
1300
                byteOrder.PutUint64(channelID[:], chanID)
3✔
1301

3✔
1302
                // If the edge doesn't exist, then we'll also check our zombie
3✔
1303
                // index.
3✔
1304
                if edgeIndex.Get(channelID[:]) == nil {
6✔
1305
                        exists = false
3✔
1306
                        zombieIndex := edges.NestedReadBucket(zombieBucket)
3✔
1307
                        if zombieIndex != nil {
6✔
1308
                                isZombie, _, _ = isZombieEdge(
3✔
1309
                                        zombieIndex, chanID,
3✔
1310
                                )
3✔
1311
                        }
3✔
1312

1313
                        return nil
3✔
1314
                }
1315

1316
                exists = true
3✔
1317
                isZombie = false
3✔
1318

3✔
1319
                // If the channel has been found in the graph, then retrieve
3✔
1320
                // the edges itself so we can return the last updated
3✔
1321
                // timestamps.
3✔
1322
                nodes := tx.ReadBucket(nodeBucket)
3✔
1323
                if nodes == nil {
3✔
1324
                        return ErrGraphNodeNotFound
×
1325
                }
×
1326

1327
                e1, e2, err := fetchChanEdgePolicies(
3✔
1328
                        edgeIndex, edges, channelID[:],
3✔
1329
                )
3✔
1330
                if err != nil {
3✔
1331
                        return err
×
1332
                }
×
1333

1334
                // As we may have only one of the edges populated, only set the
1335
                // update time if the edge was found in the database.
1336
                if e1 != nil {
6✔
1337
                        upd1Time = e1.LastUpdate
3✔
1338
                }
3✔
1339
                if e2 != nil {
6✔
1340
                        upd2Time = e2.LastUpdate
3✔
1341
                }
3✔
1342

1343
                return nil
3✔
1344
        }, func() {}); err != nil {
3✔
1345
                return time.Time{}, time.Time{}, exists, isZombie, err
×
1346
        }
×
1347

1348
        c.rejectCache.insert(chanID, rejectCacheEntry{
3✔
1349
                upd1Time: upd1Time.Unix(),
3✔
1350
                upd2Time: upd2Time.Unix(),
3✔
1351
                flags:    packRejectFlags(exists, isZombie),
3✔
1352
        })
3✔
1353

3✔
1354
        return upd1Time, upd2Time, exists, isZombie, nil
3✔
1355
}
1356

1357
// AddEdgeProof sets the proof of an existing edge in the graph database.
1358
func (c *KVStore) AddEdgeProof(chanID lnwire.ShortChannelID,
1359
        proof *models.ChannelAuthProof) error {
3✔
1360

3✔
1361
        // Construct the channel's primary key which is the 8-byte channel ID.
3✔
1362
        var chanKey [8]byte
3✔
1363
        binary.BigEndian.PutUint64(chanKey[:], chanID.ToUint64())
3✔
1364

3✔
1365
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
6✔
1366
                edges := tx.ReadWriteBucket(edgeBucket)
3✔
1367
                if edges == nil {
3✔
1368
                        return ErrEdgeNotFound
×
1369
                }
×
1370

1371
                edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
3✔
1372
                if edgeIndex == nil {
3✔
1373
                        return ErrEdgeNotFound
×
1374
                }
×
1375

1376
                edge, err := fetchChanEdgeInfo(edgeIndex, chanKey[:])
3✔
1377
                if err != nil {
3✔
1378
                        return err
×
1379
                }
×
1380

1381
                edge.AuthProof = proof
3✔
1382

3✔
1383
                return putChanEdgeInfo(edgeIndex, &edge, chanKey)
3✔
1384
        }, func() {})
3✔
1385
}
1386

1387
const (
1388
        // pruneTipBytes is the total size of the value which stores a prune
1389
        // entry of the graph in the prune log. The "prune tip" is the last
1390
        // entry in the prune log, and indicates if the channel graph is in
1391
        // sync with the current UTXO state. The structure of the value
1392
        // is: blockHash, taking 32 bytes total.
1393
        pruneTipBytes = 32
1394
)
1395

1396
// PruneGraph prunes newly closed channels from the channel graph in response
1397
// to a new block being solved on the network. Any transactions which spend the
1398
// funding output of any known channels within he graph will be deleted.
1399
// Additionally, the "prune tip", or the last block which has been used to
1400
// prune the graph is stored so callers can ensure the graph is fully in sync
1401
// with the current UTXO state. A slice of channels that have been closed by
1402
// the target block along with any pruned nodes are returned if the function
1403
// succeeds without error.
1404
func (c *KVStore) PruneGraph(spentOutputs []*wire.OutPoint,
1405
        blockHash *chainhash.Hash, blockHeight uint32) (
1406
        []*models.ChannelEdgeInfo, []route.Vertex, error) {
3✔
1407

3✔
1408
        c.cacheMu.Lock()
3✔
1409
        defer c.cacheMu.Unlock()
3✔
1410

3✔
1411
        var (
3✔
1412
                chansClosed []*models.ChannelEdgeInfo
3✔
1413
                prunedNodes []route.Vertex
3✔
1414
        )
3✔
1415

3✔
1416
        err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
6✔
1417
                // First grab the edges bucket which houses the information
3✔
1418
                // we'd like to delete
3✔
1419
                edges, err := tx.CreateTopLevelBucket(edgeBucket)
3✔
1420
                if err != nil {
3✔
1421
                        return err
×
1422
                }
×
1423

1424
                // Next grab the two edge indexes which will also need to be
1425
                // updated.
1426
                edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
3✔
1427
                if err != nil {
3✔
1428
                        return err
×
1429
                }
×
1430
                chanIndex, err := edges.CreateBucketIfNotExists(
3✔
1431
                        channelPointBucket,
3✔
1432
                )
3✔
1433
                if err != nil {
3✔
1434
                        return err
×
1435
                }
×
1436
                nodes := tx.ReadWriteBucket(nodeBucket)
3✔
1437
                if nodes == nil {
3✔
1438
                        return ErrSourceNodeNotSet
×
1439
                }
×
1440
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
3✔
1441
                if err != nil {
3✔
1442
                        return err
×
1443
                }
×
1444

1445
                // For each of the outpoints that have been spent within the
1446
                // block, we attempt to delete them from the graph as if that
1447
                // outpoint was a channel, then it has now been closed.
1448
                for _, chanPoint := range spentOutputs {
6✔
1449
                        // TODO(roasbeef): load channel bloom filter, continue
3✔
1450
                        // if NOT if filter
3✔
1451

3✔
1452
                        var opBytes bytes.Buffer
3✔
1453
                        err := WriteOutpoint(&opBytes, chanPoint)
3✔
1454
                        if err != nil {
3✔
1455
                                return err
×
1456
                        }
×
1457

1458
                        // First attempt to see if the channel exists within
1459
                        // the database, if not, then we can exit early.
1460
                        chanID := chanIndex.Get(opBytes.Bytes())
3✔
1461
                        if chanID == nil {
3✔
UNCOV
1462
                                continue
×
1463
                        }
1464

1465
                        // Attempt to delete the channel, an ErrEdgeNotFound
1466
                        // will be returned if that outpoint isn't known to be
1467
                        // a channel. If no error is returned, then a channel
1468
                        // was successfully pruned.
1469
                        edgeInfo, err := c.delChannelEdgeUnsafe(
3✔
1470
                                edges, edgeIndex, chanIndex, zombieIndex,
3✔
1471
                                chanID, false, false,
3✔
1472
                        )
3✔
1473
                        if err != nil && !errors.Is(err, ErrEdgeNotFound) {
3✔
1474
                                return err
×
1475
                        }
×
1476

1477
                        chansClosed = append(chansClosed, edgeInfo)
3✔
1478
                }
1479

1480
                metaBucket, err := tx.CreateTopLevelBucket(graphMetaBucket)
3✔
1481
                if err != nil {
3✔
1482
                        return err
×
1483
                }
×
1484

1485
                pruneBucket, err := metaBucket.CreateBucketIfNotExists(
3✔
1486
                        pruneLogBucket,
3✔
1487
                )
3✔
1488
                if err != nil {
3✔
1489
                        return err
×
1490
                }
×
1491

1492
                // With the graph pruned, add a new entry to the prune log,
1493
                // which can be used to check if the graph is fully synced with
1494
                // the current UTXO state.
1495
                var blockHeightBytes [4]byte
3✔
1496
                byteOrder.PutUint32(blockHeightBytes[:], blockHeight)
3✔
1497

3✔
1498
                var newTip [pruneTipBytes]byte
3✔
1499
                copy(newTip[:], blockHash[:])
3✔
1500

3✔
1501
                err = pruneBucket.Put(blockHeightBytes[:], newTip[:])
3✔
1502
                if err != nil {
3✔
1503
                        return err
×
1504
                }
×
1505

1506
                // Now that the graph has been pruned, we'll also attempt to
1507
                // prune any nodes that have had a channel closed within the
1508
                // latest block.
1509
                prunedNodes, err = c.pruneGraphNodes(nodes, edgeIndex)
3✔
1510

3✔
1511
                return err
3✔
1512
        }, func() {
3✔
1513
                chansClosed = nil
3✔
1514
                prunedNodes = nil
3✔
1515
        })
3✔
1516
        if err != nil {
3✔
1517
                return nil, nil, err
×
1518
        }
×
1519

1520
        for _, channel := range chansClosed {
6✔
1521
                c.rejectCache.remove(channel.ChannelID)
3✔
1522
                c.chanCache.remove(channel.ChannelID)
3✔
1523
        }
3✔
1524

1525
        return chansClosed, prunedNodes, nil
3✔
1526
}
1527

1528
// PruneGraphNodes is a garbage collection method which attempts to prune out
1529
// any nodes from the channel graph that are currently unconnected. This ensure
1530
// that we only maintain a graph of reachable nodes. In the event that a pruned
1531
// node gains more channels, it will be re-added back to the graph.
1532
func (c *KVStore) PruneGraphNodes() ([]route.Vertex, error) {
3✔
1533
        var prunedNodes []route.Vertex
3✔
1534
        err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
6✔
1535
                nodes := tx.ReadWriteBucket(nodeBucket)
3✔
1536
                if nodes == nil {
3✔
1537
                        return ErrGraphNodesNotFound
×
1538
                }
×
1539
                edges := tx.ReadWriteBucket(edgeBucket)
3✔
1540
                if edges == nil {
3✔
1541
                        return ErrGraphNotFound
×
1542
                }
×
1543
                edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
3✔
1544
                if edgeIndex == nil {
3✔
1545
                        return ErrGraphNoEdgesFound
×
1546
                }
×
1547

1548
                var err error
3✔
1549
                prunedNodes, err = c.pruneGraphNodes(nodes, edgeIndex)
3✔
1550
                if err != nil {
3✔
1551
                        return err
×
1552
                }
×
1553

1554
                return nil
3✔
1555
        }, func() {
3✔
1556
                prunedNodes = nil
3✔
1557
        })
3✔
1558

1559
        return prunedNodes, err
3✔
1560
}
1561

1562
// pruneGraphNodes attempts to remove any nodes from the graph who have had a
1563
// channel closed within the current block. If the node still has existing
1564
// channels in the graph, this will act as a no-op.
1565
func (c *KVStore) pruneGraphNodes(nodes kvdb.RwBucket,
1566
        edgeIndex kvdb.RwBucket) ([]route.Vertex, error) {
3✔
1567

3✔
1568
        log.Trace("Pruning nodes from graph with no open channels")
3✔
1569

3✔
1570
        // We'll retrieve the graph's source node to ensure we don't remove it
3✔
1571
        // even if it no longer has any open channels.
3✔
1572
        sourceNode, err := sourceNode(nodes)
3✔
1573
        if err != nil {
3✔
1574
                return nil, err
×
1575
        }
×
1576

1577
        // We'll use this map to keep count the number of references to a node
1578
        // in the graph. A node should only be removed once it has no more
1579
        // references in the graph.
1580
        nodeRefCounts := make(map[[33]byte]int)
3✔
1581
        err = nodes.ForEach(func(pubKey, nodeBytes []byte) error {
6✔
1582
                // If this is the source key, then we skip this
3✔
1583
                // iteration as the value for this key is a pubKey
3✔
1584
                // rather than raw node information.
3✔
1585
                if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 {
6✔
1586
                        return nil
3✔
1587
                }
3✔
1588

1589
                var nodePub [33]byte
3✔
1590
                copy(nodePub[:], pubKey)
3✔
1591
                nodeRefCounts[nodePub] = 0
3✔
1592

3✔
1593
                return nil
3✔
1594
        })
1595
        if err != nil {
3✔
1596
                return nil, err
×
1597
        }
×
1598

1599
        // To ensure we never delete the source node, we'll start off by
1600
        // bumping its ref count to 1.
1601
        nodeRefCounts[sourceNode.PubKeyBytes] = 1
3✔
1602

3✔
1603
        // Next, we'll run through the edgeIndex which maps a channel ID to the
3✔
1604
        // edge info. We'll use this scan to populate our reference count map
3✔
1605
        // above.
3✔
1606
        err = edgeIndex.ForEach(func(chanID, edgeInfoBytes []byte) error {
6✔
1607
                // The first 66 bytes of the edge info contain the pubkeys of
3✔
1608
                // the nodes that this edge attaches. We'll extract them, and
3✔
1609
                // add them to the ref count map.
3✔
1610
                var node1, node2 [33]byte
3✔
1611
                copy(node1[:], edgeInfoBytes[:33])
3✔
1612
                copy(node2[:], edgeInfoBytes[33:])
3✔
1613

3✔
1614
                // With the nodes extracted, we'll increase the ref count of
3✔
1615
                // each of the nodes.
3✔
1616
                nodeRefCounts[node1]++
3✔
1617
                nodeRefCounts[node2]++
3✔
1618

3✔
1619
                return nil
3✔
1620
        })
3✔
1621
        if err != nil {
3✔
1622
                return nil, err
×
1623
        }
×
1624

1625
        // Finally, we'll make a second pass over the set of nodes, and delete
1626
        // any nodes that have a ref count of zero.
1627
        var pruned []route.Vertex
3✔
1628
        for nodePubKey, refCount := range nodeRefCounts {
6✔
1629
                // If the ref count of the node isn't zero, then we can safely
3✔
1630
                // skip it as it still has edges to or from it within the
3✔
1631
                // graph.
3✔
1632
                if refCount != 0 {
6✔
1633
                        continue
3✔
1634
                }
1635

1636
                // If we reach this point, then there are no longer any edges
1637
                // that connect this node, so we can delete it.
1638
                err := c.deleteLightningNode(nodes, nodePubKey[:])
3✔
1639
                if err != nil {
3✔
1640
                        if errors.Is(err, ErrGraphNodeNotFound) ||
×
1641
                                errors.Is(err, ErrGraphNodesNotFound) {
×
1642

×
1643
                                log.Warnf("Unable to prune node %x from the "+
×
1644
                                        "graph: %v", nodePubKey, err)
×
1645
                                continue
×
1646
                        }
1647

1648
                        return nil, err
×
1649
                }
1650

1651
                log.Infof("Pruned unconnected node %x from channel graph",
3✔
1652
                        nodePubKey[:])
3✔
1653

3✔
1654
                pruned = append(pruned, nodePubKey)
3✔
1655
        }
1656

1657
        if len(pruned) > 0 {
6✔
1658
                log.Infof("Pruned %v unconnected nodes from the channel graph",
3✔
1659
                        len(pruned))
3✔
1660
        }
3✔
1661

1662
        return pruned, err
3✔
1663
}
1664

1665
// DisconnectBlockAtHeight is used to indicate that the block specified
1666
// by the passed height has been disconnected from the main chain. This
1667
// will "rewind" the graph back to the height below, deleting channels
1668
// that are no longer confirmed from the graph. The prune log will be
1669
// set to the last prune height valid for the remaining chain.
1670
// Channels that were removed from the graph resulting from the
1671
// disconnected block are returned.
1672
func (c *KVStore) DisconnectBlockAtHeight(height uint32) (
1673
        []*models.ChannelEdgeInfo, error) {
2✔
1674

2✔
1675
        // Every channel having a ShortChannelID starting at 'height'
2✔
1676
        // will no longer be confirmed.
2✔
1677
        startShortChanID := lnwire.ShortChannelID{
2✔
1678
                BlockHeight: height,
2✔
1679
        }
2✔
1680

2✔
1681
        // Delete everything after this height from the db up until the
2✔
1682
        // SCID alias range.
2✔
1683
        endShortChanID := aliasmgr.StartingAlias
2✔
1684

2✔
1685
        // The block height will be the 3 first bytes of the channel IDs.
2✔
1686
        var chanIDStart [8]byte
2✔
1687
        byteOrder.PutUint64(chanIDStart[:], startShortChanID.ToUint64())
2✔
1688
        var chanIDEnd [8]byte
2✔
1689
        byteOrder.PutUint64(chanIDEnd[:], endShortChanID.ToUint64())
2✔
1690

2✔
1691
        c.cacheMu.Lock()
2✔
1692
        defer c.cacheMu.Unlock()
2✔
1693

2✔
1694
        // Keep track of the channels that are removed from the graph.
2✔
1695
        var removedChans []*models.ChannelEdgeInfo
2✔
1696

2✔
1697
        if err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
4✔
1698
                edges, err := tx.CreateTopLevelBucket(edgeBucket)
2✔
1699
                if err != nil {
2✔
1700
                        return err
×
1701
                }
×
1702
                edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
2✔
1703
                if err != nil {
2✔
1704
                        return err
×
1705
                }
×
1706
                chanIndex, err := edges.CreateBucketIfNotExists(
2✔
1707
                        channelPointBucket,
2✔
1708
                )
2✔
1709
                if err != nil {
2✔
1710
                        return err
×
1711
                }
×
1712
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
2✔
1713
                if err != nil {
2✔
1714
                        return err
×
1715
                }
×
1716

1717
                // Scan from chanIDStart to chanIDEnd, deleting every
1718
                // found edge.
1719
                // NOTE: we must delete the edges after the cursor loop, since
1720
                // modifying the bucket while traversing is not safe.
1721
                // NOTE: We use a < comparison in bytes.Compare instead of <=
1722
                // so that the StartingAlias itself isn't deleted.
1723
                var keys [][]byte
2✔
1724
                cursor := edgeIndex.ReadWriteCursor()
2✔
1725

2✔
1726
                //nolint:ll
2✔
1727
                for k, _ := cursor.Seek(chanIDStart[:]); k != nil &&
2✔
1728
                        bytes.Compare(k, chanIDEnd[:]) < 0; k, _ = cursor.Next() {
4✔
1729
                        keys = append(keys, k)
2✔
1730
                }
2✔
1731

1732
                for _, k := range keys {
4✔
1733
                        edgeInfo, err := c.delChannelEdgeUnsafe(
2✔
1734
                                edges, edgeIndex, chanIndex, zombieIndex,
2✔
1735
                                k, false, false,
2✔
1736
                        )
2✔
1737
                        if err != nil && !errors.Is(err, ErrEdgeNotFound) {
2✔
1738
                                return err
×
1739
                        }
×
1740

1741
                        removedChans = append(removedChans, edgeInfo)
2✔
1742
                }
1743

1744
                // Delete all the entries in the prune log having a height
1745
                // greater or equal to the block disconnected.
1746
                metaBucket, err := tx.CreateTopLevelBucket(graphMetaBucket)
2✔
1747
                if err != nil {
2✔
1748
                        return err
×
1749
                }
×
1750

1751
                pruneBucket, err := metaBucket.CreateBucketIfNotExists(
2✔
1752
                        pruneLogBucket,
2✔
1753
                )
2✔
1754
                if err != nil {
2✔
1755
                        return err
×
1756
                }
×
1757

1758
                var pruneKeyStart [4]byte
2✔
1759
                byteOrder.PutUint32(pruneKeyStart[:], height)
2✔
1760

2✔
1761
                var pruneKeyEnd [4]byte
2✔
1762
                byteOrder.PutUint32(pruneKeyEnd[:], math.MaxUint32)
2✔
1763

2✔
1764
                // To avoid modifying the bucket while traversing, we delete
2✔
1765
                // the keys in a second loop.
2✔
1766
                var pruneKeys [][]byte
2✔
1767
                pruneCursor := pruneBucket.ReadWriteCursor()
2✔
1768
                //nolint:ll
2✔
1769
                for k, _ := pruneCursor.Seek(pruneKeyStart[:]); k != nil &&
2✔
1770
                        bytes.Compare(k, pruneKeyEnd[:]) <= 0; k, _ = pruneCursor.Next() {
4✔
1771
                        pruneKeys = append(pruneKeys, k)
2✔
1772
                }
2✔
1773

1774
                for _, k := range pruneKeys {
4✔
1775
                        if err := pruneBucket.Delete(k); err != nil {
2✔
1776
                                return err
×
1777
                        }
×
1778
                }
1779

1780
                return nil
2✔
1781
        }, func() {
2✔
1782
                removedChans = nil
2✔
1783
        }); err != nil {
2✔
1784
                return nil, err
×
1785
        }
×
1786

1787
        for _, channel := range removedChans {
4✔
1788
                c.rejectCache.remove(channel.ChannelID)
2✔
1789
                c.chanCache.remove(channel.ChannelID)
2✔
1790
        }
2✔
1791

1792
        return removedChans, nil
2✔
1793
}
1794

1795
// PruneTip returns the block height and hash of the latest block that has been
1796
// used to prune channels in the graph. Knowing the "prune tip" allows callers
1797
// to tell if the graph is currently in sync with the current best known UTXO
1798
// state.
1799
func (c *KVStore) PruneTip() (*chainhash.Hash, uint32, error) {
3✔
1800
        var (
3✔
1801
                tipHash   chainhash.Hash
3✔
1802
                tipHeight uint32
3✔
1803
        )
3✔
1804

3✔
1805
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
1806
                graphMeta := tx.ReadBucket(graphMetaBucket)
3✔
1807
                if graphMeta == nil {
3✔
1808
                        return ErrGraphNotFound
×
1809
                }
×
1810
                pruneBucket := graphMeta.NestedReadBucket(pruneLogBucket)
3✔
1811
                if pruneBucket == nil {
3✔
1812
                        return ErrGraphNeverPruned
×
1813
                }
×
1814

1815
                pruneCursor := pruneBucket.ReadCursor()
3✔
1816

3✔
1817
                // The prune key with the largest block height will be our
3✔
1818
                // prune tip.
3✔
1819
                k, v := pruneCursor.Last()
3✔
1820
                if k == nil {
6✔
1821
                        return ErrGraphNeverPruned
3✔
1822
                }
3✔
1823

1824
                // Once we have the prune tip, the value will be the block hash,
1825
                // and the key the block height.
1826
                copy(tipHash[:], v)
3✔
1827
                tipHeight = byteOrder.Uint32(k)
3✔
1828

3✔
1829
                return nil
3✔
1830
        }, func() {})
3✔
1831
        if err != nil {
6✔
1832
                return nil, 0, err
3✔
1833
        }
3✔
1834

1835
        return &tipHash, tipHeight, nil
3✔
1836
}
1837

1838
// DeleteChannelEdges removes edges with the given channel IDs from the
1839
// database and marks them as zombies. This ensures that we're unable to re-add
1840
// it to our database once again. If an edge does not exist within the
1841
// database, then ErrEdgeNotFound will be returned. If strictZombiePruning is
1842
// true, then when we mark these edges as zombies, we'll set up the keys such
1843
// that we require the node that failed to send the fresh update to be the one
1844
// that resurrects the channel from its zombie state. The markZombie bool
1845
// denotes whether or not to mark the channel as a zombie.
1846
func (c *KVStore) DeleteChannelEdges(strictZombiePruning, markZombie bool,
1847
        chanIDs ...uint64) ([]*models.ChannelEdgeInfo, error) {
3✔
1848

3✔
1849
        // TODO(roasbeef): possibly delete from node bucket if node has no more
3✔
1850
        // channels
3✔
1851
        // TODO(roasbeef): don't delete both edges?
3✔
1852

3✔
1853
        c.cacheMu.Lock()
3✔
1854
        defer c.cacheMu.Unlock()
3✔
1855

3✔
1856
        var infos []*models.ChannelEdgeInfo
3✔
1857
        err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
6✔
1858
                edges := tx.ReadWriteBucket(edgeBucket)
3✔
1859
                if edges == nil {
3✔
1860
                        return ErrEdgeNotFound
×
1861
                }
×
1862
                edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
3✔
1863
                if edgeIndex == nil {
3✔
1864
                        return ErrEdgeNotFound
×
1865
                }
×
1866
                chanIndex := edges.NestedReadWriteBucket(channelPointBucket)
3✔
1867
                if chanIndex == nil {
3✔
1868
                        return ErrEdgeNotFound
×
1869
                }
×
1870
                nodes := tx.ReadWriteBucket(nodeBucket)
3✔
1871
                if nodes == nil {
3✔
1872
                        return ErrGraphNodeNotFound
×
1873
                }
×
1874
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
3✔
1875
                if err != nil {
3✔
1876
                        return err
×
1877
                }
×
1878

1879
                var rawChanID [8]byte
3✔
1880
                for _, chanID := range chanIDs {
6✔
1881
                        byteOrder.PutUint64(rawChanID[:], chanID)
3✔
1882
                        edgeInfo, err := c.delChannelEdgeUnsafe(
3✔
1883
                                edges, edgeIndex, chanIndex, zombieIndex,
3✔
1884
                                rawChanID[:], markZombie, strictZombiePruning,
3✔
1885
                        )
3✔
1886
                        if err != nil {
3✔
UNCOV
1887
                                return err
×
UNCOV
1888
                        }
×
1889

1890
                        infos = append(infos, edgeInfo)
3✔
1891
                }
1892

1893
                return nil
3✔
1894
        }, func() {
3✔
1895
                infos = nil
3✔
1896
        })
3✔
1897
        if err != nil {
3✔
UNCOV
1898
                return nil, err
×
UNCOV
1899
        }
×
1900

1901
        for _, chanID := range chanIDs {
6✔
1902
                c.rejectCache.remove(chanID)
3✔
1903
                c.chanCache.remove(chanID)
3✔
1904
        }
3✔
1905

1906
        return infos, nil
3✔
1907
}
1908

1909
// ChannelID attempt to lookup the 8-byte compact channel ID which maps to the
1910
// passed channel point (outpoint). If the passed channel doesn't exist within
1911
// the database, then ErrEdgeNotFound is returned.
1912
func (c *KVStore) ChannelID(chanPoint *wire.OutPoint) (uint64, error) {
3✔
1913
        var chanID uint64
3✔
1914
        if err := kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
1915
                var err error
3✔
1916
                chanID, err = getChanID(tx, chanPoint)
3✔
1917
                return err
3✔
1918
        }, func() {
6✔
1919
                chanID = 0
3✔
1920
        }); err != nil {
6✔
1921
                return 0, err
3✔
1922
        }
3✔
1923

1924
        return chanID, nil
3✔
1925
}
1926

1927
// getChanID returns the assigned channel ID for a given channel point.
1928
func getChanID(tx kvdb.RTx, chanPoint *wire.OutPoint) (uint64, error) {
3✔
1929
        var b bytes.Buffer
3✔
1930
        if err := WriteOutpoint(&b, chanPoint); err != nil {
3✔
1931
                return 0, err
×
1932
        }
×
1933

1934
        edges := tx.ReadBucket(edgeBucket)
3✔
1935
        if edges == nil {
3✔
1936
                return 0, ErrGraphNoEdgesFound
×
1937
        }
×
1938
        chanIndex := edges.NestedReadBucket(channelPointBucket)
3✔
1939
        if chanIndex == nil {
3✔
1940
                return 0, ErrGraphNoEdgesFound
×
1941
        }
×
1942

1943
        chanIDBytes := chanIndex.Get(b.Bytes())
3✔
1944
        if chanIDBytes == nil {
6✔
1945
                return 0, ErrEdgeNotFound
3✔
1946
        }
3✔
1947

1948
        chanID := byteOrder.Uint64(chanIDBytes)
3✔
1949

3✔
1950
        return chanID, nil
3✔
1951
}
1952

1953
// TODO(roasbeef): allow updates to use Batch?
1954

1955
// HighestChanID returns the "highest" known channel ID in the channel graph.
1956
// This represents the "newest" channel from the PoV of the chain. This method
1957
// can be used by peers to quickly determine if they're graphs are in sync.
1958
func (c *KVStore) HighestChanID(_ context.Context) (uint64, error) {
3✔
1959
        var cid uint64
3✔
1960

3✔
1961
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
1962
                edges := tx.ReadBucket(edgeBucket)
3✔
1963
                if edges == nil {
3✔
1964
                        return ErrGraphNoEdgesFound
×
1965
                }
×
1966
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
3✔
1967
                if edgeIndex == nil {
3✔
1968
                        return ErrGraphNoEdgesFound
×
1969
                }
×
1970

1971
                // In order to find the highest chan ID, we'll fetch a cursor
1972
                // and use that to seek to the "end" of our known rage.
1973
                cidCursor := edgeIndex.ReadCursor()
3✔
1974

3✔
1975
                lastChanID, _ := cidCursor.Last()
3✔
1976

3✔
1977
                // If there's no key, then this means that we don't actually
3✔
1978
                // know of any channels, so we'll return a predicable error.
3✔
1979
                if lastChanID == nil {
6✔
1980
                        return ErrGraphNoEdgesFound
3✔
1981
                }
3✔
1982

1983
                // Otherwise, we'll de serialize the channel ID and return it
1984
                // to the caller.
1985
                cid = byteOrder.Uint64(lastChanID)
3✔
1986

3✔
1987
                return nil
3✔
1988
        }, func() {
3✔
1989
                cid = 0
3✔
1990
        })
3✔
1991
        if err != nil && !errors.Is(err, ErrGraphNoEdgesFound) {
3✔
1992
                return 0, err
×
1993
        }
×
1994

1995
        return cid, nil
3✔
1996
}
1997

1998
// ChannelEdge represents the complete set of information for a channel edge in
1999
// the known channel graph. This struct couples the core information of the
2000
// edge as well as each of the known advertised edge policies.
2001
type ChannelEdge struct {
2002
        // Info contains all the static information describing the channel.
2003
        Info *models.ChannelEdgeInfo
2004

2005
        // Policy1 points to the "first" edge policy of the channel containing
2006
        // the dynamic information required to properly route through the edge.
2007
        Policy1 *models.ChannelEdgePolicy
2008

2009
        // Policy2 points to the "second" edge policy of the channel containing
2010
        // the dynamic information required to properly route through the edge.
2011
        Policy2 *models.ChannelEdgePolicy
2012

2013
        // Node1 is "node 1" in the channel. This is the node that would have
2014
        // produced Policy1 if it exists.
2015
        Node1 *models.LightningNode
2016

2017
        // Node2 is "node 2" in the channel. This is the node that would have
2018
        // produced Policy2 if it exists.
2019
        Node2 *models.LightningNode
2020
}
2021

2022
// ChanUpdatesInHorizon returns all the known channel edges which have at least
2023
// one edge that has an update timestamp within the specified horizon.
2024
func (c *KVStore) ChanUpdatesInHorizon(startTime,
2025
        endTime time.Time) ([]ChannelEdge, error) {
3✔
2026

3✔
2027
        // To ensure we don't return duplicate ChannelEdges, we'll use an
3✔
2028
        // additional map to keep track of the edges already seen to prevent
3✔
2029
        // re-adding it.
3✔
2030
        var edgesSeen map[uint64]struct{}
3✔
2031
        var edgesToCache map[uint64]ChannelEdge
3✔
2032
        var edgesInHorizon []ChannelEdge
3✔
2033

3✔
2034
        c.cacheMu.Lock()
3✔
2035
        defer c.cacheMu.Unlock()
3✔
2036

3✔
2037
        var hits int
3✔
2038
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
2039
                edges := tx.ReadBucket(edgeBucket)
3✔
2040
                if edges == nil {
3✔
2041
                        return ErrGraphNoEdgesFound
×
2042
                }
×
2043
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
3✔
2044
                if edgeIndex == nil {
3✔
2045
                        return ErrGraphNoEdgesFound
×
2046
                }
×
2047
                edgeUpdateIndex := edges.NestedReadBucket(edgeUpdateIndexBucket)
3✔
2048
                if edgeUpdateIndex == nil {
3✔
2049
                        return ErrGraphNoEdgesFound
×
2050
                }
×
2051

2052
                nodes := tx.ReadBucket(nodeBucket)
3✔
2053
                if nodes == nil {
3✔
2054
                        return ErrGraphNodesNotFound
×
2055
                }
×
2056

2057
                // We'll now obtain a cursor to perform a range query within
2058
                // the index to find all channels within the horizon.
2059
                updateCursor := edgeUpdateIndex.ReadCursor()
3✔
2060

3✔
2061
                var startTimeBytes, endTimeBytes [8 + 8]byte
3✔
2062
                byteOrder.PutUint64(
3✔
2063
                        startTimeBytes[:8], uint64(startTime.Unix()),
3✔
2064
                )
3✔
2065
                byteOrder.PutUint64(
3✔
2066
                        endTimeBytes[:8], uint64(endTime.Unix()),
3✔
2067
                )
3✔
2068

3✔
2069
                // With our start and end times constructed, we'll step through
3✔
2070
                // the index collecting the info and policy of each update of
3✔
2071
                // each channel that has a last update within the time range.
3✔
2072
                //
3✔
2073
                //nolint:ll
3✔
2074
                for indexKey, _ := updateCursor.Seek(startTimeBytes[:]); indexKey != nil &&
3✔
2075
                        bytes.Compare(indexKey, endTimeBytes[:]) <= 0; indexKey, _ = updateCursor.Next() {
6✔
2076
                        // We have a new eligible entry, so we'll slice of the
3✔
2077
                        // chan ID so we can query it in the DB.
3✔
2078
                        chanID := indexKey[8:]
3✔
2079

3✔
2080
                        // If we've already retrieved the info and policies for
3✔
2081
                        // this edge, then we can skip it as we don't need to do
3✔
2082
                        // so again.
3✔
2083
                        chanIDInt := byteOrder.Uint64(chanID)
3✔
2084
                        if _, ok := edgesSeen[chanIDInt]; ok {
3✔
UNCOV
2085
                                continue
×
2086
                        }
2087

2088
                        if channel, ok := c.chanCache.get(chanIDInt); ok {
6✔
2089
                                hits++
3✔
2090
                                edgesSeen[chanIDInt] = struct{}{}
3✔
2091
                                edgesInHorizon = append(edgesInHorizon, channel)
3✔
2092

3✔
2093
                                continue
3✔
2094
                        }
2095

2096
                        // First, we'll fetch the static edge information.
2097
                        edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
3✔
2098
                        if err != nil {
3✔
2099
                                chanID := byteOrder.Uint64(chanID)
×
2100
                                return fmt.Errorf("unable to fetch info for "+
×
2101
                                        "edge with chan_id=%v: %v", chanID, err)
×
2102
                        }
×
2103

2104
                        // With the static information obtained, we'll now
2105
                        // fetch the dynamic policy info.
2106
                        edge1, edge2, err := fetchChanEdgePolicies(
3✔
2107
                                edgeIndex, edges, chanID,
3✔
2108
                        )
3✔
2109
                        if err != nil {
3✔
2110
                                chanID := byteOrder.Uint64(chanID)
×
2111
                                return fmt.Errorf("unable to fetch policies "+
×
2112
                                        "for edge with chan_id=%v: %v", chanID,
×
2113
                                        err)
×
2114
                        }
×
2115

2116
                        node1, err := fetchLightningNode(
3✔
2117
                                nodes, edgeInfo.NodeKey1Bytes[:],
3✔
2118
                        )
3✔
2119
                        if err != nil {
3✔
2120
                                return err
×
2121
                        }
×
2122

2123
                        node2, err := fetchLightningNode(
3✔
2124
                                nodes, edgeInfo.NodeKey2Bytes[:],
3✔
2125
                        )
3✔
2126
                        if err != nil {
3✔
2127
                                return err
×
2128
                        }
×
2129

2130
                        // Finally, we'll collate this edge with the rest of
2131
                        // edges to be returned.
2132
                        edgesSeen[chanIDInt] = struct{}{}
3✔
2133
                        channel := ChannelEdge{
3✔
2134
                                Info:    &edgeInfo,
3✔
2135
                                Policy1: edge1,
3✔
2136
                                Policy2: edge2,
3✔
2137
                                Node1:   &node1,
3✔
2138
                                Node2:   &node2,
3✔
2139
                        }
3✔
2140
                        edgesInHorizon = append(edgesInHorizon, channel)
3✔
2141
                        edgesToCache[chanIDInt] = channel
3✔
2142
                }
2143

2144
                return nil
3✔
2145
        }, func() {
3✔
2146
                edgesSeen = make(map[uint64]struct{})
3✔
2147
                edgesToCache = make(map[uint64]ChannelEdge)
3✔
2148
                edgesInHorizon = nil
3✔
2149
        })
3✔
2150
        switch {
3✔
2151
        case errors.Is(err, ErrGraphNoEdgesFound):
×
2152
                fallthrough
×
2153
        case errors.Is(err, ErrGraphNodesNotFound):
×
2154
                break
×
2155

2156
        case err != nil:
×
2157
                return nil, err
×
2158
        }
2159

2160
        // Insert any edges loaded from disk into the cache.
2161
        for chanid, channel := range edgesToCache {
6✔
2162
                c.chanCache.insert(chanid, channel)
3✔
2163
        }
3✔
2164

2165
        if len(edgesInHorizon) > 0 {
6✔
2166
                log.Debugf("ChanUpdatesInHorizon hit percentage: %.2f (%d/%d)",
3✔
2167
                        float64(hits)*100/float64(len(edgesInHorizon)), hits,
3✔
2168
                        len(edgesInHorizon))
3✔
2169
        } else {
6✔
2170
                log.Debugf("ChanUpdatesInHorizon returned no edges in "+
3✔
2171
                        "horizon (%s, %s)", startTime, endTime)
3✔
2172
        }
3✔
2173

2174
        return edgesInHorizon, nil
3✔
2175
}
2176

2177
// NodeUpdatesInHorizon returns all the known lightning node which have an
2178
// update timestamp within the passed range. This method can be used by two
2179
// nodes to quickly determine if they have the same set of up to date node
2180
// announcements.
2181
func (c *KVStore) NodeUpdatesInHorizon(startTime,
2182
        endTime time.Time) ([]models.LightningNode, error) {
3✔
2183

3✔
2184
        var nodesInHorizon []models.LightningNode
3✔
2185

3✔
2186
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
2187
                nodes := tx.ReadBucket(nodeBucket)
3✔
2188
                if nodes == nil {
3✔
2189
                        return ErrGraphNodesNotFound
×
2190
                }
×
2191

2192
                nodeUpdateIndex := nodes.NestedReadBucket(nodeUpdateIndexBucket)
3✔
2193
                if nodeUpdateIndex == nil {
3✔
2194
                        return ErrGraphNodesNotFound
×
2195
                }
×
2196

2197
                // We'll now obtain a cursor to perform a range query within
2198
                // the index to find all node announcements within the horizon.
2199
                updateCursor := nodeUpdateIndex.ReadCursor()
3✔
2200

3✔
2201
                var startTimeBytes, endTimeBytes [8 + 33]byte
3✔
2202
                byteOrder.PutUint64(
3✔
2203
                        startTimeBytes[:8], uint64(startTime.Unix()),
3✔
2204
                )
3✔
2205
                byteOrder.PutUint64(
3✔
2206
                        endTimeBytes[:8], uint64(endTime.Unix()),
3✔
2207
                )
3✔
2208

3✔
2209
                // With our start and end times constructed, we'll step through
3✔
2210
                // the index collecting info for each node within the time
3✔
2211
                // range.
3✔
2212
                //
3✔
2213
                //nolint:ll
3✔
2214
                for indexKey, _ := updateCursor.Seek(startTimeBytes[:]); indexKey != nil &&
3✔
2215
                        bytes.Compare(indexKey, endTimeBytes[:]) <= 0; indexKey, _ = updateCursor.Next() {
6✔
2216
                        nodePub := indexKey[8:]
3✔
2217
                        node, err := fetchLightningNode(nodes, nodePub)
3✔
2218
                        if err != nil {
3✔
2219
                                return err
×
2220
                        }
×
2221

2222
                        nodesInHorizon = append(nodesInHorizon, node)
3✔
2223
                }
2224

2225
                return nil
3✔
2226
        }, func() {
3✔
2227
                nodesInHorizon = nil
3✔
2228
        })
3✔
2229
        switch {
3✔
2230
        case errors.Is(err, ErrGraphNoEdgesFound):
×
2231
                fallthrough
×
2232
        case errors.Is(err, ErrGraphNodesNotFound):
×
2233
                break
×
2234

2235
        case err != nil:
×
2236
                return nil, err
×
2237
        }
2238

2239
        return nodesInHorizon, nil
3✔
2240
}
2241

2242
// FilterKnownChanIDs takes a set of channel IDs and return the subset of chan
2243
// ID's that we don't know and are not known zombies of the passed set. In other
2244
// words, we perform a set difference of our set of chan ID's and the ones
2245
// passed in. This method can be used by callers to determine the set of
2246
// channels another peer knows of that we don't. The ChannelUpdateInfos for the
2247
// known zombies is also returned.
2248
func (c *KVStore) FilterKnownChanIDs(chansInfo []ChannelUpdateInfo) ([]uint64,
2249
        []ChannelUpdateInfo, error) {
3✔
2250

3✔
2251
        var (
3✔
2252
                newChanIDs   []uint64
3✔
2253
                knownZombies []ChannelUpdateInfo
3✔
2254
        )
3✔
2255

3✔
2256
        c.cacheMu.Lock()
3✔
2257
        defer c.cacheMu.Unlock()
3✔
2258

3✔
2259
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
2260
                edges := tx.ReadBucket(edgeBucket)
3✔
2261
                if edges == nil {
3✔
2262
                        return ErrGraphNoEdgesFound
×
2263
                }
×
2264
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
3✔
2265
                if edgeIndex == nil {
3✔
2266
                        return ErrGraphNoEdgesFound
×
2267
                }
×
2268

2269
                // Fetch the zombie index, it may not exist if no edges have
2270
                // ever been marked as zombies. If the index has been
2271
                // initialized, we will use it later to skip known zombie edges.
2272
                zombieIndex := edges.NestedReadBucket(zombieBucket)
3✔
2273

3✔
2274
                // We'll run through the set of chanIDs and collate only the
3✔
2275
                // set of channel that are unable to be found within our db.
3✔
2276
                var cidBytes [8]byte
3✔
2277
                for _, info := range chansInfo {
6✔
2278
                        scid := info.ShortChannelID.ToUint64()
3✔
2279
                        byteOrder.PutUint64(cidBytes[:], scid)
3✔
2280

3✔
2281
                        // If the edge is already known, skip it.
3✔
2282
                        if v := edgeIndex.Get(cidBytes[:]); v != nil {
6✔
2283
                                continue
3✔
2284
                        }
2285

2286
                        // If the edge is a known zombie, skip it.
2287
                        if zombieIndex != nil {
6✔
2288
                                isZombie, _, _ := isZombieEdge(
3✔
2289
                                        zombieIndex, scid,
3✔
2290
                                )
3✔
2291

3✔
2292
                                if isZombie {
3✔
UNCOV
2293
                                        knownZombies = append(
×
UNCOV
2294
                                                knownZombies, info,
×
UNCOV
2295
                                        )
×
UNCOV
2296

×
UNCOV
2297
                                        continue
×
2298
                                }
2299
                        }
2300

2301
                        newChanIDs = append(newChanIDs, scid)
3✔
2302
                }
2303

2304
                return nil
3✔
2305
        }, func() {
3✔
2306
                newChanIDs = nil
3✔
2307
                knownZombies = nil
3✔
2308
        })
3✔
2309
        switch {
3✔
2310
        // If we don't know of any edges yet, then we'll return the entire set
2311
        // of chan IDs specified.
2312
        case errors.Is(err, ErrGraphNoEdgesFound):
×
2313
                ogChanIDs := make([]uint64, len(chansInfo))
×
2314
                for i, info := range chansInfo {
×
2315
                        ogChanIDs[i] = info.ShortChannelID.ToUint64()
×
2316
                }
×
2317

2318
                return ogChanIDs, nil, nil
×
2319

2320
        case err != nil:
×
2321
                return nil, nil, err
×
2322
        }
2323

2324
        return newChanIDs, knownZombies, nil
3✔
2325
}
2326

2327
// ChannelUpdateInfo couples the SCID of a channel with the timestamps of the
2328
// latest received channel updates for the channel.
2329
type ChannelUpdateInfo struct {
2330
        // ShortChannelID is the SCID identifier of the channel.
2331
        ShortChannelID lnwire.ShortChannelID
2332

2333
        // Node1UpdateTimestamp is the timestamp of the latest received update
2334
        // from the node 1 channel peer. This will be set to zero time if no
2335
        // update has yet been received from this node.
2336
        Node1UpdateTimestamp time.Time
2337

2338
        // Node2UpdateTimestamp is the timestamp of the latest received update
2339
        // from the node 2 channel peer. This will be set to zero time if no
2340
        // update has yet been received from this node.
2341
        Node2UpdateTimestamp time.Time
2342
}
2343

2344
// NewChannelUpdateInfo is a constructor which makes sure we initialize the
2345
// timestamps with zero seconds unix timestamp which equals
2346
// `January 1, 1970, 00:00:00 UTC` in case the value is `time.Time{}`.
2347
func NewChannelUpdateInfo(scid lnwire.ShortChannelID, node1Timestamp,
2348
        node2Timestamp time.Time) ChannelUpdateInfo {
3✔
2349

3✔
2350
        chanInfo := ChannelUpdateInfo{
3✔
2351
                ShortChannelID:       scid,
3✔
2352
                Node1UpdateTimestamp: node1Timestamp,
3✔
2353
                Node2UpdateTimestamp: node2Timestamp,
3✔
2354
        }
3✔
2355

3✔
2356
        if node1Timestamp.IsZero() {
6✔
2357
                chanInfo.Node1UpdateTimestamp = time.Unix(0, 0)
3✔
2358
        }
3✔
2359

2360
        if node2Timestamp.IsZero() {
6✔
2361
                chanInfo.Node2UpdateTimestamp = time.Unix(0, 0)
3✔
2362
        }
3✔
2363

2364
        return chanInfo
3✔
2365
}
2366

2367
// BlockChannelRange represents a range of channels for a given block height.
2368
type BlockChannelRange struct {
2369
        // Height is the height of the block all of the channels below were
2370
        // included in.
2371
        Height uint32
2372

2373
        // Channels is the list of channels identified by their short ID
2374
        // representation known to us that were included in the block height
2375
        // above. The list may include channel update timestamp information if
2376
        // requested.
2377
        Channels []ChannelUpdateInfo
2378
}
2379

2380
// FilterChannelRange returns the channel ID's of all known channels which were
2381
// mined in a block height within the passed range. The channel IDs are grouped
2382
// by their common block height. This method can be used to quickly share with a
2383
// peer the set of channels we know of within a particular range to catch them
2384
// up after a period of time offline. If withTimestamps is true then the
2385
// timestamp info of the latest received channel update messages of the channel
2386
// will be included in the response.
2387
func (c *KVStore) FilterChannelRange(startHeight,
2388
        endHeight uint32, withTimestamps bool) ([]BlockChannelRange, error) {
3✔
2389

3✔
2390
        startChanID := &lnwire.ShortChannelID{
3✔
2391
                BlockHeight: startHeight,
3✔
2392
        }
3✔
2393

3✔
2394
        endChanID := lnwire.ShortChannelID{
3✔
2395
                BlockHeight: endHeight,
3✔
2396
                TxIndex:     math.MaxUint32 & 0x00ffffff,
3✔
2397
                TxPosition:  math.MaxUint16,
3✔
2398
        }
3✔
2399

3✔
2400
        // As we need to perform a range scan, we'll convert the starting and
3✔
2401
        // ending height to their corresponding values when encoded using short
3✔
2402
        // channel ID's.
3✔
2403
        var chanIDStart, chanIDEnd [8]byte
3✔
2404
        byteOrder.PutUint64(chanIDStart[:], startChanID.ToUint64())
3✔
2405
        byteOrder.PutUint64(chanIDEnd[:], endChanID.ToUint64())
3✔
2406

3✔
2407
        var channelsPerBlock map[uint32][]ChannelUpdateInfo
3✔
2408
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
2409
                edges := tx.ReadBucket(edgeBucket)
3✔
2410
                if edges == nil {
3✔
2411
                        return ErrGraphNoEdgesFound
×
2412
                }
×
2413
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
3✔
2414
                if edgeIndex == nil {
3✔
2415
                        return ErrGraphNoEdgesFound
×
2416
                }
×
2417

2418
                cursor := edgeIndex.ReadCursor()
3✔
2419

3✔
2420
                // We'll now iterate through the database, and find each
3✔
2421
                // channel ID that resides within the specified range.
3✔
2422
                //
3✔
2423
                //nolint:ll
3✔
2424
                for k, v := cursor.Seek(chanIDStart[:]); k != nil &&
3✔
2425
                        bytes.Compare(k, chanIDEnd[:]) <= 0; k, v = cursor.Next() {
6✔
2426
                        // Don't send alias SCIDs during gossip sync.
3✔
2427
                        edgeReader := bytes.NewReader(v)
3✔
2428
                        edgeInfo, err := deserializeChanEdgeInfo(edgeReader)
3✔
2429
                        if err != nil {
3✔
2430
                                return err
×
2431
                        }
×
2432

2433
                        if edgeInfo.AuthProof == nil {
6✔
2434
                                continue
3✔
2435
                        }
2436

2437
                        // This channel ID rests within the target range, so
2438
                        // we'll add it to our returned set.
2439
                        rawCid := byteOrder.Uint64(k)
3✔
2440
                        cid := lnwire.NewShortChanIDFromInt(rawCid)
3✔
2441

3✔
2442
                        chanInfo := NewChannelUpdateInfo(
3✔
2443
                                cid, time.Time{}, time.Time{},
3✔
2444
                        )
3✔
2445

3✔
2446
                        if !withTimestamps {
3✔
UNCOV
2447
                                channelsPerBlock[cid.BlockHeight] = append(
×
UNCOV
2448
                                        channelsPerBlock[cid.BlockHeight],
×
UNCOV
2449
                                        chanInfo,
×
UNCOV
2450
                                )
×
UNCOV
2451

×
UNCOV
2452
                                continue
×
2453
                        }
2454

2455
                        node1Key, node2Key := computeEdgePolicyKeys(&edgeInfo)
3✔
2456

3✔
2457
                        rawPolicy := edges.Get(node1Key)
3✔
2458
                        if len(rawPolicy) != 0 {
6✔
2459
                                r := bytes.NewReader(rawPolicy)
3✔
2460

3✔
2461
                                edge, err := deserializeChanEdgePolicyRaw(r)
3✔
2462
                                if err != nil && !errors.Is(
3✔
2463
                                        err, ErrEdgePolicyOptionalFieldNotFound,
3✔
2464
                                ) && !errors.Is(err, ErrParsingExtraTLVBytes) {
3✔
2465

×
2466
                                        return err
×
2467
                                }
×
2468

2469
                                chanInfo.Node1UpdateTimestamp = edge.LastUpdate
3✔
2470
                        }
2471

2472
                        rawPolicy = edges.Get(node2Key)
3✔
2473
                        if len(rawPolicy) != 0 {
6✔
2474
                                r := bytes.NewReader(rawPolicy)
3✔
2475

3✔
2476
                                edge, err := deserializeChanEdgePolicyRaw(r)
3✔
2477
                                if err != nil && !errors.Is(
3✔
2478
                                        err, ErrEdgePolicyOptionalFieldNotFound,
3✔
2479
                                ) && !errors.Is(err, ErrParsingExtraTLVBytes) {
3✔
2480

×
2481
                                        return err
×
2482
                                }
×
2483

2484
                                chanInfo.Node2UpdateTimestamp = edge.LastUpdate
3✔
2485
                        }
2486

2487
                        channelsPerBlock[cid.BlockHeight] = append(
3✔
2488
                                channelsPerBlock[cid.BlockHeight], chanInfo,
3✔
2489
                        )
3✔
2490
                }
2491

2492
                return nil
3✔
2493
        }, func() {
3✔
2494
                channelsPerBlock = make(map[uint32][]ChannelUpdateInfo)
3✔
2495
        })
3✔
2496

2497
        switch {
3✔
2498
        // If we don't know of any channels yet, then there's nothing to
2499
        // filter, so we'll return an empty slice.
2500
        case errors.Is(err, ErrGraphNoEdgesFound) || len(channelsPerBlock) == 0:
3✔
2501
                return nil, nil
3✔
2502

2503
        case err != nil:
×
2504
                return nil, err
×
2505
        }
2506

2507
        // Return the channel ranges in ascending block height order.
2508
        blocks := make([]uint32, 0, len(channelsPerBlock))
3✔
2509
        for block := range channelsPerBlock {
6✔
2510
                blocks = append(blocks, block)
3✔
2511
        }
3✔
2512
        sort.Slice(blocks, func(i, j int) bool {
6✔
2513
                return blocks[i] < blocks[j]
3✔
2514
        })
3✔
2515

2516
        channelRanges := make([]BlockChannelRange, 0, len(channelsPerBlock))
3✔
2517
        for _, block := range blocks {
6✔
2518
                channelRanges = append(channelRanges, BlockChannelRange{
3✔
2519
                        Height:   block,
3✔
2520
                        Channels: channelsPerBlock[block],
3✔
2521
                })
3✔
2522
        }
3✔
2523

2524
        return channelRanges, nil
3✔
2525
}
2526

2527
// FetchChanInfos returns the set of channel edges that correspond to the passed
2528
// channel ID's. If an edge is the query is unknown to the database, it will
2529
// skipped and the result will contain only those edges that exist at the time
2530
// of the query. This can be used to respond to peer queries that are seeking to
2531
// fill in gaps in their view of the channel graph.
2532
func (c *KVStore) FetchChanInfos(chanIDs []uint64) ([]ChannelEdge, error) {
3✔
2533
        return c.fetchChanInfos(nil, chanIDs)
3✔
2534
}
3✔
2535

2536
// fetchChanInfos returns the set of channel edges that correspond to the passed
2537
// channel ID's. If an edge is the query is unknown to the database, it will
2538
// skipped and the result will contain only those edges that exist at the time
2539
// of the query. This can be used to respond to peer queries that are seeking to
2540
// fill in gaps in their view of the channel graph.
2541
//
2542
// NOTE: An optional transaction may be provided. If none is provided, then a
2543
// new one will be created.
2544
func (c *KVStore) fetchChanInfos(tx kvdb.RTx, chanIDs []uint64) (
2545
        []ChannelEdge, error) {
3✔
2546
        // TODO(roasbeef): sort cids?
3✔
2547

3✔
2548
        var (
3✔
2549
                chanEdges []ChannelEdge
3✔
2550
                cidBytes  [8]byte
3✔
2551
        )
3✔
2552

3✔
2553
        fetchChanInfos := func(tx kvdb.RTx) error {
6✔
2554
                edges := tx.ReadBucket(edgeBucket)
3✔
2555
                if edges == nil {
3✔
2556
                        return ErrGraphNoEdgesFound
×
2557
                }
×
2558
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
3✔
2559
                if edgeIndex == nil {
3✔
2560
                        return ErrGraphNoEdgesFound
×
2561
                }
×
2562
                nodes := tx.ReadBucket(nodeBucket)
3✔
2563
                if nodes == nil {
3✔
2564
                        return ErrGraphNotFound
×
2565
                }
×
2566

2567
                for _, cid := range chanIDs {
6✔
2568
                        byteOrder.PutUint64(cidBytes[:], cid)
3✔
2569

3✔
2570
                        // First, we'll fetch the static edge information. If
3✔
2571
                        // the edge is unknown, we will skip the edge and
3✔
2572
                        // continue gathering all known edges.
3✔
2573
                        edgeInfo, err := fetchChanEdgeInfo(
3✔
2574
                                edgeIndex, cidBytes[:],
3✔
2575
                        )
3✔
2576
                        switch {
3✔
UNCOV
2577
                        case errors.Is(err, ErrEdgeNotFound):
×
UNCOV
2578
                                continue
×
2579
                        case err != nil:
×
2580
                                return err
×
2581
                        }
2582

2583
                        // With the static information obtained, we'll now
2584
                        // fetch the dynamic policy info.
2585
                        edge1, edge2, err := fetchChanEdgePolicies(
3✔
2586
                                edgeIndex, edges, cidBytes[:],
3✔
2587
                        )
3✔
2588
                        if err != nil {
3✔
2589
                                return err
×
2590
                        }
×
2591

2592
                        node1, err := fetchLightningNode(
3✔
2593
                                nodes, edgeInfo.NodeKey1Bytes[:],
3✔
2594
                        )
3✔
2595
                        if err != nil {
3✔
2596
                                return err
×
2597
                        }
×
2598

2599
                        node2, err := fetchLightningNode(
3✔
2600
                                nodes, edgeInfo.NodeKey2Bytes[:],
3✔
2601
                        )
3✔
2602
                        if err != nil {
3✔
2603
                                return err
×
2604
                        }
×
2605

2606
                        chanEdges = append(chanEdges, ChannelEdge{
3✔
2607
                                Info:    &edgeInfo,
3✔
2608
                                Policy1: edge1,
3✔
2609
                                Policy2: edge2,
3✔
2610
                                Node1:   &node1,
3✔
2611
                                Node2:   &node2,
3✔
2612
                        })
3✔
2613
                }
2614

2615
                return nil
3✔
2616
        }
2617

2618
        if tx == nil {
6✔
2619
                err := kvdb.View(c.db, fetchChanInfos, func() {
6✔
2620
                        chanEdges = nil
3✔
2621
                })
3✔
2622
                if err != nil {
3✔
2623
                        return nil, err
×
2624
                }
×
2625

2626
                return chanEdges, nil
3✔
2627
        }
2628

2629
        err := fetchChanInfos(tx)
×
2630
        if err != nil {
×
2631
                return nil, err
×
2632
        }
×
2633

2634
        return chanEdges, nil
×
2635
}
2636

2637
func delEdgeUpdateIndexEntry(edgesBucket kvdb.RwBucket, chanID uint64,
2638
        edge1, edge2 *models.ChannelEdgePolicy) error {
3✔
2639

3✔
2640
        // First, we'll fetch the edge update index bucket which currently
3✔
2641
        // stores an entry for the channel we're about to delete.
3✔
2642
        updateIndex := edgesBucket.NestedReadWriteBucket(edgeUpdateIndexBucket)
3✔
2643
        if updateIndex == nil {
3✔
2644
                // No edges in bucket, return early.
×
2645
                return nil
×
2646
        }
×
2647

2648
        // Now that we have the bucket, we'll attempt to construct a template
2649
        // for the index key: updateTime || chanid.
2650
        var indexKey [8 + 8]byte
3✔
2651
        byteOrder.PutUint64(indexKey[8:], chanID)
3✔
2652

3✔
2653
        // With the template constructed, we'll attempt to delete an entry that
3✔
2654
        // would have been created by both edges: we'll alternate the update
3✔
2655
        // times, as one may had overridden the other.
3✔
2656
        if edge1 != nil {
6✔
2657
                byteOrder.PutUint64(
3✔
2658
                        indexKey[:8], uint64(edge1.LastUpdate.Unix()),
3✔
2659
                )
3✔
2660
                if err := updateIndex.Delete(indexKey[:]); err != nil {
3✔
2661
                        return err
×
2662
                }
×
2663
        }
2664

2665
        // We'll also attempt to delete the entry that may have been created by
2666
        // the second edge.
2667
        if edge2 != nil {
6✔
2668
                byteOrder.PutUint64(
3✔
2669
                        indexKey[:8], uint64(edge2.LastUpdate.Unix()),
3✔
2670
                )
3✔
2671
                if err := updateIndex.Delete(indexKey[:]); err != nil {
3✔
2672
                        return err
×
2673
                }
×
2674
        }
2675

2676
        return nil
3✔
2677
}
2678

2679
// delChannelEdgeUnsafe deletes the edge with the given chanID from the graph
2680
// cache. It then goes on to delete any policy info and edge info for this
2681
// channel from the DB and finally, if isZombie is true, it will add an entry
2682
// for this channel in the zombie index.
2683
//
2684
// NOTE: this method MUST only be called if the cacheMu has already been
2685
// acquired.
2686
func (c *KVStore) delChannelEdgeUnsafe(edges, edgeIndex, chanIndex,
2687
        zombieIndex kvdb.RwBucket, chanID []byte, isZombie,
2688
        strictZombie bool) (*models.ChannelEdgeInfo, error) {
3✔
2689

3✔
2690
        edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
3✔
2691
        if err != nil {
3✔
UNCOV
2692
                return nil, err
×
UNCOV
2693
        }
×
2694

2695
        // We'll also remove the entry in the edge update index bucket before
2696
        // we delete the edges themselves so we can access their last update
2697
        // times.
2698
        cid := byteOrder.Uint64(chanID)
3✔
2699
        edge1, edge2, err := fetchChanEdgePolicies(edgeIndex, edges, chanID)
3✔
2700
        if err != nil {
3✔
2701
                return nil, err
×
2702
        }
×
2703
        err = delEdgeUpdateIndexEntry(edges, cid, edge1, edge2)
3✔
2704
        if err != nil {
3✔
2705
                return nil, err
×
2706
        }
×
2707

2708
        // The edge key is of the format pubKey || chanID. First we construct
2709
        // the latter half, populating the channel ID.
2710
        var edgeKey [33 + 8]byte
3✔
2711
        copy(edgeKey[33:], chanID)
3✔
2712

3✔
2713
        // With the latter half constructed, copy over the first public key to
3✔
2714
        // delete the edge in this direction, then the second to delete the
3✔
2715
        // edge in the opposite direction.
3✔
2716
        copy(edgeKey[:33], edgeInfo.NodeKey1Bytes[:])
3✔
2717
        if edges.Get(edgeKey[:]) != nil {
6✔
2718
                if err := edges.Delete(edgeKey[:]); err != nil {
3✔
2719
                        return nil, err
×
2720
                }
×
2721
        }
2722
        copy(edgeKey[:33], edgeInfo.NodeKey2Bytes[:])
3✔
2723
        if edges.Get(edgeKey[:]) != nil {
6✔
2724
                if err := edges.Delete(edgeKey[:]); err != nil {
3✔
2725
                        return nil, err
×
2726
                }
×
2727
        }
2728

2729
        // As part of deleting the edge we also remove all disabled entries
2730
        // from the edgePolicyDisabledIndex bucket. We do that for both
2731
        // directions.
2732
        err = updateEdgePolicyDisabledIndex(edges, cid, false, false)
3✔
2733
        if err != nil {
3✔
2734
                return nil, err
×
2735
        }
×
2736
        err = updateEdgePolicyDisabledIndex(edges, cid, true, false)
3✔
2737
        if err != nil {
3✔
2738
                return nil, err
×
2739
        }
×
2740

2741
        // With the edge data deleted, we can purge the information from the two
2742
        // edge indexes.
2743
        if err := edgeIndex.Delete(chanID); err != nil {
3✔
2744
                return nil, err
×
2745
        }
×
2746
        var b bytes.Buffer
3✔
2747
        if err := WriteOutpoint(&b, &edgeInfo.ChannelPoint); err != nil {
3✔
2748
                return nil, err
×
2749
        }
×
2750
        if err := chanIndex.Delete(b.Bytes()); err != nil {
3✔
2751
                return nil, err
×
2752
        }
×
2753

2754
        // Finally, we'll mark the edge as a zombie within our index if it's
2755
        // being removed due to the channel becoming a zombie. We do this to
2756
        // ensure we don't store unnecessary data for spent channels.
2757
        if !isZombie {
6✔
2758
                return &edgeInfo, nil
3✔
2759
        }
3✔
2760

2761
        nodeKey1, nodeKey2 := edgeInfo.NodeKey1Bytes, edgeInfo.NodeKey2Bytes
3✔
2762
        if strictZombie {
3✔
UNCOV
2763
                var e1UpdateTime, e2UpdateTime *time.Time
×
UNCOV
2764
                if edge1 != nil {
×
UNCOV
2765
                        e1UpdateTime = &edge1.LastUpdate
×
UNCOV
2766
                }
×
UNCOV
2767
                if edge2 != nil {
×
UNCOV
2768
                        e2UpdateTime = &edge2.LastUpdate
×
UNCOV
2769
                }
×
2770

UNCOV
2771
                nodeKey1, nodeKey2 = makeZombiePubkeys(
×
UNCOV
2772
                        &edgeInfo, e1UpdateTime, e2UpdateTime,
×
UNCOV
2773
                )
×
2774
        }
2775

2776
        return &edgeInfo, markEdgeZombie(
3✔
2777
                zombieIndex, byteOrder.Uint64(chanID), nodeKey1, nodeKey2,
3✔
2778
        )
3✔
2779
}
2780

2781
// makeZombiePubkeys derives the node pubkeys to store in the zombie index for a
2782
// particular pair of channel policies. The return values are one of:
2783
//  1. (pubkey1, pubkey2)
2784
//  2. (pubkey1, blank)
2785
//  3. (blank, pubkey2)
2786
//
2787
// A blank pubkey means that corresponding node will be unable to resurrect a
2788
// channel on its own. For example, node1 may continue to publish recent
2789
// updates, but node2 has fallen way behind. After marking an edge as a zombie,
2790
// we don't want another fresh update from node1 to resurrect, as the edge can
2791
// only become live once node2 finally sends something recent.
2792
//
2793
// In the case where we have neither update, we allow either party to resurrect
2794
// the channel. If the channel were to be marked zombie again, it would be
2795
// marked with the correct lagging channel since we received an update from only
2796
// one side.
2797
func makeZombiePubkeys(info *models.ChannelEdgeInfo,
UNCOV
2798
        e1, e2 *time.Time) ([33]byte, [33]byte) {
×
UNCOV
2799

×
UNCOV
2800
        switch {
×
2801
        // If we don't have either edge policy, we'll return both pubkeys so
2802
        // that the channel can be resurrected by either party.
UNCOV
2803
        case e1 == nil && e2 == nil:
×
UNCOV
2804
                return info.NodeKey1Bytes, info.NodeKey2Bytes
×
2805

2806
        // If we're missing edge1, or if both edges are present but edge1 is
2807
        // older, we'll return edge1's pubkey and a blank pubkey for edge2. This
2808
        // means that only an update from edge1 will be able to resurrect the
2809
        // channel.
UNCOV
2810
        case e1 == nil || (e2 != nil && e1.Before(*e2)):
×
UNCOV
2811
                return info.NodeKey1Bytes, [33]byte{}
×
2812

2813
        // Otherwise, we're missing edge2 or edge2 is the older side, so we
2814
        // return a blank pubkey for edge1. In this case, only an update from
2815
        // edge2 can resurect the channel.
UNCOV
2816
        default:
×
UNCOV
2817
                return [33]byte{}, info.NodeKey2Bytes
×
2818
        }
2819
}
2820

2821
// UpdateEdgePolicy updates the edge routing policy for a single directed edge
2822
// within the database for the referenced channel. The `flags` attribute within
2823
// the ChannelEdgePolicy determines which of the directed edges are being
2824
// updated. If the flag is 1, then the first node's information is being
2825
// updated, otherwise it's the second node's information. The node ordering is
2826
// determined by the lexicographical ordering of the identity public keys of the
2827
// nodes on either side of the channel.
2828
func (c *KVStore) UpdateEdgePolicy(ctx context.Context,
2829
        edge *models.ChannelEdgePolicy,
2830
        opts ...batch.SchedulerOption) (route.Vertex, route.Vertex, error) {
3✔
2831

3✔
2832
        var (
3✔
2833
                isUpdate1    bool
3✔
2834
                edgeNotFound bool
3✔
2835
                from, to     route.Vertex
3✔
2836
        )
3✔
2837

3✔
2838
        r := &batch.Request[kvdb.RwTx]{
3✔
2839
                Opts: batch.NewSchedulerOptions(opts...),
3✔
2840
                Reset: func() {
6✔
2841
                        isUpdate1 = false
3✔
2842
                        edgeNotFound = false
3✔
2843
                },
3✔
2844
                Do: func(tx kvdb.RwTx) error {
3✔
2845
                        var err error
3✔
2846
                        from, to, isUpdate1, err = updateEdgePolicy(tx, edge)
3✔
2847
                        if err != nil {
3✔
UNCOV
2848
                                log.Errorf("UpdateEdgePolicy faild: %v", err)
×
UNCOV
2849
                        }
×
2850

2851
                        // Silence ErrEdgeNotFound so that the batch can
2852
                        // succeed, but propagate the error via local state.
2853
                        if errors.Is(err, ErrEdgeNotFound) {
3✔
UNCOV
2854
                                edgeNotFound = true
×
UNCOV
2855
                                return nil
×
UNCOV
2856
                        }
×
2857

2858
                        return err
3✔
2859
                },
2860
                OnCommit: func(err error) error {
3✔
2861
                        switch {
3✔
UNCOV
2862
                        case err != nil:
×
UNCOV
2863
                                return err
×
UNCOV
2864
                        case edgeNotFound:
×
UNCOV
2865
                                return ErrEdgeNotFound
×
2866
                        default:
3✔
2867
                                c.updateEdgeCache(edge, isUpdate1)
3✔
2868
                                return nil
3✔
2869
                        }
2870
                },
2871
        }
2872

2873
        err := c.chanScheduler.Execute(ctx, r)
3✔
2874

3✔
2875
        return from, to, err
3✔
2876
}
2877

2878
func (c *KVStore) updateEdgeCache(e *models.ChannelEdgePolicy,
2879
        isUpdate1 bool) {
3✔
2880

3✔
2881
        // If an entry for this channel is found in reject cache, we'll modify
3✔
2882
        // the entry with the updated timestamp for the direction that was just
3✔
2883
        // written. If the edge doesn't exist, we'll load the cache entry lazily
3✔
2884
        // during the next query for this edge.
3✔
2885
        if entry, ok := c.rejectCache.get(e.ChannelID); ok {
6✔
2886
                if isUpdate1 {
6✔
2887
                        entry.upd1Time = e.LastUpdate.Unix()
3✔
2888
                } else {
6✔
2889
                        entry.upd2Time = e.LastUpdate.Unix()
3✔
2890
                }
3✔
2891
                c.rejectCache.insert(e.ChannelID, entry)
3✔
2892
        }
2893

2894
        // If an entry for this channel is found in channel cache, we'll modify
2895
        // the entry with the updated policy for the direction that was just
2896
        // written. If the edge doesn't exist, we'll defer loading the info and
2897
        // policies and lazily read from disk during the next query.
2898
        if channel, ok := c.chanCache.get(e.ChannelID); ok {
6✔
2899
                if isUpdate1 {
6✔
2900
                        channel.Policy1 = e
3✔
2901
                } else {
6✔
2902
                        channel.Policy2 = e
3✔
2903
                }
3✔
2904
                c.chanCache.insert(e.ChannelID, channel)
3✔
2905
        }
2906
}
2907

2908
// updateEdgePolicy attempts to update an edge's policy within the relevant
2909
// buckets using an existing database transaction. The returned boolean will be
2910
// true if the updated policy belongs to node1, and false if the policy belonged
2911
// to node2.
2912
func updateEdgePolicy(tx kvdb.RwTx, edge *models.ChannelEdgePolicy) (
2913
        route.Vertex, route.Vertex, bool, error) {
3✔
2914

3✔
2915
        var noVertex route.Vertex
3✔
2916

3✔
2917
        edges := tx.ReadWriteBucket(edgeBucket)
3✔
2918
        if edges == nil {
3✔
2919
                return noVertex, noVertex, false, ErrEdgeNotFound
×
2920
        }
×
2921
        edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
3✔
2922
        if edgeIndex == nil {
3✔
2923
                return noVertex, noVertex, false, ErrEdgeNotFound
×
2924
        }
×
2925

2926
        // Create the channelID key be converting the channel ID
2927
        // integer into a byte slice.
2928
        var chanID [8]byte
3✔
2929
        byteOrder.PutUint64(chanID[:], edge.ChannelID)
3✔
2930

3✔
2931
        // With the channel ID, we then fetch the value storing the two
3✔
2932
        // nodes which connect this channel edge.
3✔
2933
        nodeInfo := edgeIndex.Get(chanID[:])
3✔
2934
        if nodeInfo == nil {
3✔
UNCOV
2935
                return noVertex, noVertex, false, ErrEdgeNotFound
×
UNCOV
2936
        }
×
2937

2938
        // Depending on the flags value passed above, either the first
2939
        // or second edge policy is being updated.
2940
        var fromNode, toNode []byte
3✔
2941
        var isUpdate1 bool
3✔
2942
        if edge.ChannelFlags&lnwire.ChanUpdateDirection == 0 {
6✔
2943
                fromNode = nodeInfo[:33]
3✔
2944
                toNode = nodeInfo[33:66]
3✔
2945
                isUpdate1 = true
3✔
2946
        } else {
6✔
2947
                fromNode = nodeInfo[33:66]
3✔
2948
                toNode = nodeInfo[:33]
3✔
2949
                isUpdate1 = false
3✔
2950
        }
3✔
2951

2952
        // Finally, with the direction of the edge being updated
2953
        // identified, we update the on-disk edge representation.
2954
        err := putChanEdgePolicy(edges, edge, fromNode, toNode)
3✔
2955
        if err != nil {
3✔
UNCOV
2956
                return noVertex, noVertex, false, err
×
UNCOV
2957
        }
×
2958

2959
        var (
3✔
2960
                fromNodePubKey route.Vertex
3✔
2961
                toNodePubKey   route.Vertex
3✔
2962
        )
3✔
2963
        copy(fromNodePubKey[:], fromNode)
3✔
2964
        copy(toNodePubKey[:], toNode)
3✔
2965

3✔
2966
        return fromNodePubKey, toNodePubKey, isUpdate1, nil
3✔
2967
}
2968

2969
// isPublic determines whether the node is seen as public within the graph from
2970
// the source node's point of view. An existing database transaction can also be
2971
// specified.
2972
func (c *KVStore) isPublic(tx kvdb.RTx, nodePub route.Vertex,
2973
        sourcePubKey []byte) (bool, error) {
3✔
2974

3✔
2975
        // In order to determine whether this node is publicly advertised within
3✔
2976
        // the graph, we'll need to look at all of its edges and check whether
3✔
2977
        // they extend to any other node than the source node. errDone will be
3✔
2978
        // used to terminate the check early.
3✔
2979
        nodeIsPublic := false
3✔
2980
        errDone := errors.New("done")
3✔
2981
        err := c.forEachNodeChannelTx(tx, nodePub, func(tx kvdb.RTx,
3✔
2982
                info *models.ChannelEdgeInfo, _ *models.ChannelEdgePolicy,
3✔
2983
                _ *models.ChannelEdgePolicy) error {
6✔
2984

3✔
2985
                // If this edge doesn't extend to the source node, we'll
3✔
2986
                // terminate our search as we can now conclude that the node is
3✔
2987
                // publicly advertised within the graph due to the local node
3✔
2988
                // knowing of the current edge.
3✔
2989
                if !bytes.Equal(info.NodeKey1Bytes[:], sourcePubKey) &&
3✔
2990
                        !bytes.Equal(info.NodeKey2Bytes[:], sourcePubKey) {
6✔
2991

3✔
2992
                        nodeIsPublic = true
3✔
2993
                        return errDone
3✔
2994
                }
3✔
2995

2996
                // Since the edge _does_ extend to the source node, we'll also
2997
                // need to ensure that this is a public edge.
2998
                if info.AuthProof != nil {
6✔
2999
                        nodeIsPublic = true
3✔
3000
                        return errDone
3✔
3001
                }
3✔
3002

3003
                // Otherwise, we'll continue our search.
3004
                return nil
3✔
3005
        })
3006
        if err != nil && !errors.Is(err, errDone) {
3✔
3007
                return false, err
×
3008
        }
×
3009

3010
        return nodeIsPublic, nil
3✔
3011
}
3012

3013
// FetchLightningNodeTx attempts to look up a target node by its identity
3014
// public key. If the node isn't found in the database, then
3015
// ErrGraphNodeNotFound is returned. An optional transaction may be provided.
3016
// If none is provided, then a new one will be created.
3017
func (c *KVStore) FetchLightningNodeTx(tx kvdb.RTx, nodePub route.Vertex) (
3018
        *models.LightningNode, error) {
3✔
3019

3✔
3020
        return c.fetchLightningNode(tx, nodePub)
3✔
3021
}
3✔
3022

3023
// FetchLightningNode attempts to look up a target node by its identity public
3024
// key. If the node isn't found in the database, then ErrGraphNodeNotFound is
3025
// returned.
3026
func (c *KVStore) FetchLightningNode(_ context.Context,
3027
        nodePub route.Vertex) (*models.LightningNode, error) {
3✔
3028

3✔
3029
        return c.fetchLightningNode(nil, nodePub)
3✔
3030
}
3✔
3031

3032
// fetchLightningNode attempts to look up a target node by its identity public
3033
// key. If the node isn't found in the database, then ErrGraphNodeNotFound is
3034
// returned. An optional transaction may be provided. If none is provided, then
3035
// a new one will be created.
3036
func (c *KVStore) fetchLightningNode(tx kvdb.RTx,
3037
        nodePub route.Vertex) (*models.LightningNode, error) {
3✔
3038

3✔
3039
        var node *models.LightningNode
3✔
3040
        fetch := func(tx kvdb.RTx) error {
6✔
3041
                // First grab the nodes bucket which stores the mapping from
3✔
3042
                // pubKey to node information.
3✔
3043
                nodes := tx.ReadBucket(nodeBucket)
3✔
3044
                if nodes == nil {
3✔
3045
                        return ErrGraphNotFound
×
3046
                }
×
3047

3048
                // If a key for this serialized public key isn't found, then
3049
                // the target node doesn't exist within the database.
3050
                nodeBytes := nodes.Get(nodePub[:])
3✔
3051
                if nodeBytes == nil {
6✔
3052
                        return ErrGraphNodeNotFound
3✔
3053
                }
3✔
3054

3055
                // If the node is found, then we can de deserialize the node
3056
                // information to return to the user.
3057
                nodeReader := bytes.NewReader(nodeBytes)
3✔
3058
                n, err := deserializeLightningNode(nodeReader)
3✔
3059
                if err != nil {
3✔
3060
                        return err
×
3061
                }
×
3062

3063
                node = &n
3✔
3064

3✔
3065
                return nil
3✔
3066
        }
3067

3068
        if tx == nil {
6✔
3069
                err := kvdb.View(
3✔
3070
                        c.db, fetch, func() {
6✔
3071
                                node = nil
3✔
3072
                        },
3✔
3073
                )
3074
                if err != nil {
6✔
3075
                        return nil, err
3✔
3076
                }
3✔
3077

3078
                return node, nil
3✔
3079
        }
3080

UNCOV
3081
        err := fetch(tx)
×
UNCOV
3082
        if err != nil {
×
UNCOV
3083
                return nil, err
×
UNCOV
3084
        }
×
3085

UNCOV
3086
        return node, nil
×
3087
}
3088

3089
// HasLightningNode determines if the graph has a vertex identified by the
3090
// target node identity public key. If the node exists in the database, a
3091
// timestamp of when the data for the node was lasted updated is returned along
3092
// with a true boolean. Otherwise, an empty time.Time is returned with a false
3093
// boolean.
3094
func (c *KVStore) HasLightningNode(_ context.Context,
3095
        nodePub [33]byte) (time.Time, bool, error) {
3✔
3096

3✔
3097
        var (
3✔
3098
                updateTime time.Time
3✔
3099
                exists     bool
3✔
3100
        )
3✔
3101

3✔
3102
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
3103
                // First grab the nodes bucket which stores the mapping from
3✔
3104
                // pubKey to node information.
3✔
3105
                nodes := tx.ReadBucket(nodeBucket)
3✔
3106
                if nodes == nil {
3✔
3107
                        return ErrGraphNotFound
×
3108
                }
×
3109

3110
                // If a key for this serialized public key isn't found, we can
3111
                // exit early.
3112
                nodeBytes := nodes.Get(nodePub[:])
3✔
3113
                if nodeBytes == nil {
6✔
3114
                        exists = false
3✔
3115
                        return nil
3✔
3116
                }
3✔
3117

3118
                // Otherwise we continue on to obtain the time stamp
3119
                // representing the last time the data for this node was
3120
                // updated.
3121
                nodeReader := bytes.NewReader(nodeBytes)
3✔
3122
                node, err := deserializeLightningNode(nodeReader)
3✔
3123
                if err != nil {
3✔
3124
                        return err
×
3125
                }
×
3126

3127
                exists = true
3✔
3128
                updateTime = node.LastUpdate
3✔
3129

3✔
3130
                return nil
3✔
3131
        }, func() {
3✔
3132
                updateTime = time.Time{}
3✔
3133
                exists = false
3✔
3134
        })
3✔
3135
        if err != nil {
3✔
3136
                return time.Time{}, exists, err
×
3137
        }
×
3138

3139
        return updateTime, exists, nil
3✔
3140
}
3141

3142
// nodeTraversal is used to traverse all channels of a node given by its
3143
// public key and passes channel information into the specified callback.
3144
func nodeTraversal(tx kvdb.RTx, nodePub []byte, db kvdb.Backend,
3145
        cb func(kvdb.RTx, *models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3146
                *models.ChannelEdgePolicy) error) error {
3✔
3147

3✔
3148
        traversal := func(tx kvdb.RTx) error {
6✔
3149
                edges := tx.ReadBucket(edgeBucket)
3✔
3150
                if edges == nil {
3✔
3151
                        return ErrGraphNotFound
×
3152
                }
×
3153
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
3✔
3154
                if edgeIndex == nil {
3✔
3155
                        return ErrGraphNoEdgesFound
×
3156
                }
×
3157

3158
                // In order to reach all the edges for this node, we take
3159
                // advantage of the construction of the key-space within the
3160
                // edge bucket. The keys are stored in the form: pubKey ||
3161
                // chanID. Therefore, starting from a chanID of zero, we can
3162
                // scan forward in the bucket, grabbing all the edges for the
3163
                // node. Once the prefix no longer matches, then we know we're
3164
                // done.
3165
                var nodeStart [33 + 8]byte
3✔
3166
                copy(nodeStart[:], nodePub)
3✔
3167
                copy(nodeStart[33:], chanStart[:])
3✔
3168

3✔
3169
                // Starting from the key pubKey || 0, we seek forward in the
3✔
3170
                // bucket until the retrieved key no longer has the public key
3✔
3171
                // as its prefix. This indicates that we've stepped over into
3✔
3172
                // another node's edges, so we can terminate our scan.
3✔
3173
                edgeCursor := edges.ReadCursor()
3✔
3174
                for nodeEdge, _ := edgeCursor.Seek(nodeStart[:]); bytes.HasPrefix(nodeEdge, nodePub); nodeEdge, _ = edgeCursor.Next() { //nolint:ll
6✔
3175
                        // If the prefix still matches, the channel id is
3✔
3176
                        // returned in nodeEdge. Channel id is used to lookup
3✔
3177
                        // the node at the other end of the channel and both
3✔
3178
                        // edge policies.
3✔
3179
                        chanID := nodeEdge[33:]
3✔
3180
                        edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
3✔
3181
                        if err != nil {
3✔
3182
                                return err
×
3183
                        }
×
3184

3185
                        outgoingPolicy, err := fetchChanEdgePolicy(
3✔
3186
                                edges, chanID, nodePub,
3✔
3187
                        )
3✔
3188
                        if err != nil {
3✔
3189
                                return err
×
3190
                        }
×
3191

3192
                        otherNode, err := edgeInfo.OtherNodeKeyBytes(nodePub)
3✔
3193
                        if err != nil {
3✔
3194
                                return err
×
3195
                        }
×
3196

3197
                        incomingPolicy, err := fetchChanEdgePolicy(
3✔
3198
                                edges, chanID, otherNode[:],
3✔
3199
                        )
3✔
3200
                        if err != nil {
3✔
3201
                                return err
×
3202
                        }
×
3203

3204
                        // Finally, we execute the callback.
3205
                        err = cb(tx, &edgeInfo, outgoingPolicy, incomingPolicy)
3✔
3206
                        if err != nil {
6✔
3207
                                return err
3✔
3208
                        }
3✔
3209
                }
3210

3211
                return nil
3✔
3212
        }
3213

3214
        // If no transaction was provided, then we'll create a new transaction
3215
        // to execute the transaction within.
3216
        if tx == nil {
6✔
3217
                return kvdb.View(db, traversal, func() {})
6✔
3218
        }
3219

3220
        // Otherwise, we re-use the existing transaction to execute the graph
3221
        // traversal.
3222
        return traversal(tx)
3✔
3223
}
3224

3225
// ForEachNodeChannel iterates through all channels of the given node,
3226
// executing the passed callback with an edge info structure and the policies
3227
// of each end of the channel. The first edge policy is the outgoing edge *to*
3228
// the connecting node, while the second is the incoming edge *from* the
3229
// connecting node. If the callback returns an error, then the iteration is
3230
// halted with the error propagated back up to the caller.
3231
//
3232
// Unknown policies are passed into the callback as nil values.
3233
func (c *KVStore) ForEachNodeChannel(nodePub route.Vertex,
3234
        cb func(*models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3235
                *models.ChannelEdgePolicy) error) error {
3✔
3236

3✔
3237
        return nodeTraversal(nil, nodePub[:], c.db, func(_ kvdb.RTx,
3✔
3238
                info *models.ChannelEdgeInfo, policy,
3✔
3239
                policy2 *models.ChannelEdgePolicy) error {
6✔
3240

3✔
3241
                return cb(info, policy, policy2)
3✔
3242
        })
3✔
3243
}
3244

3245
// ForEachSourceNodeChannel iterates through all channels of the source node,
3246
// executing the passed callback on each. The callback is provided with the
3247
// channel's outpoint, whether we have a policy for the channel and the channel
3248
// peer's node information.
3249
func (c *KVStore) ForEachSourceNodeChannel(cb func(chanPoint wire.OutPoint,
3250
        havePolicy bool, otherNode *models.LightningNode) error) error {
3✔
3251

3✔
3252
        return kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
3253
                nodes := tx.ReadBucket(nodeBucket)
3✔
3254
                if nodes == nil {
3✔
3255
                        return ErrGraphNotFound
×
3256
                }
×
3257

3258
                node, err := sourceNode(nodes)
3✔
3259
                if err != nil {
3✔
3260
                        return err
×
3261
                }
×
3262

3263
                return nodeTraversal(
3✔
3264
                        tx, node.PubKeyBytes[:], c.db, func(tx kvdb.RTx,
3✔
3265
                                info *models.ChannelEdgeInfo,
3✔
3266
                                policy, _ *models.ChannelEdgePolicy) error {
6✔
3267

3✔
3268
                                peer, err := c.fetchOtherNode(
3✔
3269
                                        tx, info, node.PubKeyBytes[:],
3✔
3270
                                )
3✔
3271
                                if err != nil {
3✔
3272
                                        return err
×
3273
                                }
×
3274

3275
                                return cb(
3✔
3276
                                        info.ChannelPoint, policy != nil, peer,
3✔
3277
                                )
3✔
3278
                        },
3279
                )
3280
        }, func() {})
3✔
3281
}
3282

3283
// forEachNodeChannelTx iterates through all channels of the given node,
3284
// executing the passed callback with an edge info structure and the policies
3285
// of each end of the channel. The first edge policy is the outgoing edge *to*
3286
// the connecting node, while the second is the incoming edge *from* the
3287
// connecting node. If the callback returns an error, then the iteration is
3288
// halted with the error propagated back up to the caller.
3289
//
3290
// Unknown policies are passed into the callback as nil values.
3291
//
3292
// If the caller wishes to re-use an existing boltdb transaction, then it
3293
// should be passed as the first argument.  Otherwise, the first argument should
3294
// be nil and a fresh transaction will be created to execute the graph
3295
// traversal.
3296
func (c *KVStore) forEachNodeChannelTx(tx kvdb.RTx,
3297
        nodePub route.Vertex, cb func(kvdb.RTx, *models.ChannelEdgeInfo,
3298
                *models.ChannelEdgePolicy,
3299
                *models.ChannelEdgePolicy) error) error {
3✔
3300

3✔
3301
        return nodeTraversal(tx, nodePub[:], c.db, cb)
3✔
3302
}
3✔
3303

3304
// fetchOtherNode attempts to fetch the full LightningNode that's opposite of
3305
// the target node in the channel. This is useful when one knows the pubkey of
3306
// one of the nodes, and wishes to obtain the full LightningNode for the other
3307
// end of the channel.
3308
func (c *KVStore) fetchOtherNode(tx kvdb.RTx,
3309
        channel *models.ChannelEdgeInfo, thisNodeKey []byte) (
3310
        *models.LightningNode, error) {
3✔
3311

3✔
3312
        // Ensure that the node passed in is actually a member of the channel.
3✔
3313
        var targetNodeBytes [33]byte
3✔
3314
        switch {
3✔
3315
        case bytes.Equal(channel.NodeKey1Bytes[:], thisNodeKey):
3✔
3316
                targetNodeBytes = channel.NodeKey2Bytes
3✔
3317
        case bytes.Equal(channel.NodeKey2Bytes[:], thisNodeKey):
3✔
3318
                targetNodeBytes = channel.NodeKey1Bytes
3✔
3319
        default:
×
3320
                return nil, fmt.Errorf("node not participating in this channel")
×
3321
        }
3322

3323
        var targetNode *models.LightningNode
3✔
3324
        fetchNodeFunc := func(tx kvdb.RTx) error {
6✔
3325
                // First grab the nodes bucket which stores the mapping from
3✔
3326
                // pubKey to node information.
3✔
3327
                nodes := tx.ReadBucket(nodeBucket)
3✔
3328
                if nodes == nil {
3✔
3329
                        return ErrGraphNotFound
×
3330
                }
×
3331

3332
                node, err := fetchLightningNode(nodes, targetNodeBytes[:])
3✔
3333
                if err != nil {
3✔
3334
                        return err
×
3335
                }
×
3336

3337
                targetNode = &node
3✔
3338

3✔
3339
                return nil
3✔
3340
        }
3341

3342
        // If the transaction is nil, then we'll need to create a new one,
3343
        // otherwise we can use the existing db transaction.
3344
        var err error
3✔
3345
        if tx == nil {
3✔
3346
                err = kvdb.View(c.db, fetchNodeFunc, func() {
×
3347
                        targetNode = nil
×
3348
                })
×
3349
        } else {
3✔
3350
                err = fetchNodeFunc(tx)
3✔
3351
        }
3✔
3352

3353
        return targetNode, err
3✔
3354
}
3355

3356
// computeEdgePolicyKeys is a helper function that can be used to compute the
3357
// keys used to index the channel edge policy info for the two nodes of the
3358
// edge. The keys for node 1 and node 2 are returned respectively.
3359
func computeEdgePolicyKeys(info *models.ChannelEdgeInfo) ([]byte, []byte) {
3✔
3360
        var (
3✔
3361
                node1Key [33 + 8]byte
3✔
3362
                node2Key [33 + 8]byte
3✔
3363
        )
3✔
3364

3✔
3365
        copy(node1Key[:], info.NodeKey1Bytes[:])
3✔
3366
        copy(node2Key[:], info.NodeKey2Bytes[:])
3✔
3367

3✔
3368
        byteOrder.PutUint64(node1Key[33:], info.ChannelID)
3✔
3369
        byteOrder.PutUint64(node2Key[33:], info.ChannelID)
3✔
3370

3✔
3371
        return node1Key[:], node2Key[:]
3✔
3372
}
3✔
3373

3374
// FetchChannelEdgesByOutpoint attempts to lookup the two directed edges for
3375
// the channel identified by the funding outpoint. If the channel can't be
3376
// found, then ErrEdgeNotFound is returned. A struct which houses the general
3377
// information for the channel itself is returned as well as two structs that
3378
// contain the routing policies for the channel in either direction.
3379
func (c *KVStore) FetchChannelEdgesByOutpoint(op *wire.OutPoint) (
3380
        *models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3381
        *models.ChannelEdgePolicy, error) {
3✔
3382

3✔
3383
        var (
3✔
3384
                edgeInfo *models.ChannelEdgeInfo
3✔
3385
                policy1  *models.ChannelEdgePolicy
3✔
3386
                policy2  *models.ChannelEdgePolicy
3✔
3387
        )
3✔
3388

3✔
3389
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
3390
                // First, grab the node bucket. This will be used to populate
3✔
3391
                // the Node pointers in each edge read from disk.
3✔
3392
                nodes := tx.ReadBucket(nodeBucket)
3✔
3393
                if nodes == nil {
3✔
3394
                        return ErrGraphNotFound
×
3395
                }
×
3396

3397
                // Next, grab the edge bucket which stores the edges, and also
3398
                // the index itself so we can group the directed edges together
3399
                // logically.
3400
                edges := tx.ReadBucket(edgeBucket)
3✔
3401
                if edges == nil {
3✔
3402
                        return ErrGraphNoEdgesFound
×
3403
                }
×
3404
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
3✔
3405
                if edgeIndex == nil {
3✔
3406
                        return ErrGraphNoEdgesFound
×
3407
                }
×
3408

3409
                // If the channel's outpoint doesn't exist within the outpoint
3410
                // index, then the edge does not exist.
3411
                chanIndex := edges.NestedReadBucket(channelPointBucket)
3✔
3412
                if chanIndex == nil {
3✔
3413
                        return ErrGraphNoEdgesFound
×
3414
                }
×
3415
                var b bytes.Buffer
3✔
3416
                if err := WriteOutpoint(&b, op); err != nil {
3✔
3417
                        return err
×
3418
                }
×
3419
                chanID := chanIndex.Get(b.Bytes())
3✔
3420
                if chanID == nil {
6✔
3421
                        return fmt.Errorf("%w: op=%v", ErrEdgeNotFound, op)
3✔
3422
                }
3✔
3423

3424
                // If the channel is found to exists, then we'll first retrieve
3425
                // the general information for the channel.
3426
                edge, err := fetchChanEdgeInfo(edgeIndex, chanID)
3✔
3427
                if err != nil {
3✔
3428
                        return fmt.Errorf("%w: chanID=%x", err, chanID)
×
3429
                }
×
3430
                edgeInfo = &edge
3✔
3431

3✔
3432
                // Once we have the information about the channels' parameters,
3✔
3433
                // we'll fetch the routing policies for each for the directed
3✔
3434
                // edges.
3✔
3435
                e1, e2, err := fetchChanEdgePolicies(edgeIndex, edges, chanID)
3✔
3436
                if err != nil {
3✔
3437
                        return fmt.Errorf("failed to find policy: %w", err)
×
3438
                }
×
3439

3440
                policy1 = e1
3✔
3441
                policy2 = e2
3✔
3442

3✔
3443
                return nil
3✔
3444
        }, func() {
3✔
3445
                edgeInfo = nil
3✔
3446
                policy1 = nil
3✔
3447
                policy2 = nil
3✔
3448
        })
3✔
3449
        if err != nil {
6✔
3450
                return nil, nil, nil, err
3✔
3451
        }
3✔
3452

3453
        return edgeInfo, policy1, policy2, nil
3✔
3454
}
3455

3456
// FetchChannelEdgesByID attempts to lookup the two directed edges for the
3457
// channel identified by the channel ID. If the channel can't be found, then
3458
// ErrEdgeNotFound is returned. A struct which houses the general information
3459
// for the channel itself is returned as well as two structs that contain the
3460
// routing policies for the channel in either direction.
3461
//
3462
// ErrZombieEdge an be returned if the edge is currently marked as a zombie
3463
// within the database. In this case, the ChannelEdgePolicy's will be nil, and
3464
// the ChannelEdgeInfo will only include the public keys of each node.
3465
func (c *KVStore) FetchChannelEdgesByID(chanID uint64) (
3466
        *models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3467
        *models.ChannelEdgePolicy, error) {
3✔
3468

3✔
3469
        var (
3✔
3470
                edgeInfo  *models.ChannelEdgeInfo
3✔
3471
                policy1   *models.ChannelEdgePolicy
3✔
3472
                policy2   *models.ChannelEdgePolicy
3✔
3473
                channelID [8]byte
3✔
3474
        )
3✔
3475

3✔
3476
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
3477
                // First, grab the node bucket. This will be used to populate
3✔
3478
                // the Node pointers in each edge read from disk.
3✔
3479
                nodes := tx.ReadBucket(nodeBucket)
3✔
3480
                if nodes == nil {
3✔
3481
                        return ErrGraphNotFound
×
3482
                }
×
3483

3484
                // Next, grab the edge bucket which stores the edges, and also
3485
                // the index itself so we can group the directed edges together
3486
                // logically.
3487
                edges := tx.ReadBucket(edgeBucket)
3✔
3488
                if edges == nil {
3✔
3489
                        return ErrGraphNoEdgesFound
×
3490
                }
×
3491
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
3✔
3492
                if edgeIndex == nil {
3✔
3493
                        return ErrGraphNoEdgesFound
×
3494
                }
×
3495

3496
                byteOrder.PutUint64(channelID[:], chanID)
3✔
3497

3✔
3498
                // Now, attempt to fetch edge.
3✔
3499
                edge, err := fetchChanEdgeInfo(edgeIndex, channelID[:])
3✔
3500

3✔
3501
                // If it doesn't exist, we'll quickly check our zombie index to
3✔
3502
                // see if we've previously marked it as so.
3✔
3503
                if errors.Is(err, ErrEdgeNotFound) {
6✔
3504
                        // If the zombie index doesn't exist, or the edge is not
3✔
3505
                        // marked as a zombie within it, then we'll return the
3✔
3506
                        // original ErrEdgeNotFound error.
3✔
3507
                        zombieIndex := edges.NestedReadBucket(zombieBucket)
3✔
3508
                        if zombieIndex == nil {
3✔
3509
                                return ErrEdgeNotFound
×
3510
                        }
×
3511

3512
                        isZombie, pubKey1, pubKey2 := isZombieEdge(
3✔
3513
                                zombieIndex, chanID,
3✔
3514
                        )
3✔
3515
                        if !isZombie {
6✔
3516
                                return ErrEdgeNotFound
3✔
3517
                        }
3✔
3518

3519
                        // Otherwise, the edge is marked as a zombie, so we'll
3520
                        // populate the edge info with the public keys of each
3521
                        // party as this is the only information we have about
3522
                        // it and return an error signaling so.
3523
                        edgeInfo = &models.ChannelEdgeInfo{
3✔
3524
                                NodeKey1Bytes: pubKey1,
3✔
3525
                                NodeKey2Bytes: pubKey2,
3✔
3526
                        }
3✔
3527

3✔
3528
                        return ErrZombieEdge
3✔
3529
                }
3530

3531
                // Otherwise, we'll just return the error if any.
3532
                if err != nil {
3✔
3533
                        return err
×
3534
                }
×
3535

3536
                edgeInfo = &edge
3✔
3537

3✔
3538
                // Then we'll attempt to fetch the accompanying policies of this
3✔
3539
                // edge.
3✔
3540
                e1, e2, err := fetchChanEdgePolicies(
3✔
3541
                        edgeIndex, edges, channelID[:],
3✔
3542
                )
3✔
3543
                if err != nil {
3✔
3544
                        return err
×
3545
                }
×
3546

3547
                policy1 = e1
3✔
3548
                policy2 = e2
3✔
3549

3✔
3550
                return nil
3✔
3551
        }, func() {
3✔
3552
                edgeInfo = nil
3✔
3553
                policy1 = nil
3✔
3554
                policy2 = nil
3✔
3555
        })
3✔
3556
        if errors.Is(err, ErrZombieEdge) {
6✔
3557
                return edgeInfo, nil, nil, err
3✔
3558
        }
3✔
3559
        if err != nil {
6✔
3560
                return nil, nil, nil, err
3✔
3561
        }
3✔
3562

3563
        return edgeInfo, policy1, policy2, nil
3✔
3564
}
3565

3566
// IsPublicNode is a helper method that determines whether the node with the
3567
// given public key is seen as a public node in the graph from the graph's
3568
// source node's point of view.
3569
func (c *KVStore) IsPublicNode(pubKey [33]byte) (bool, error) {
3✔
3570
        var nodeIsPublic bool
3✔
3571
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
3572
                nodes := tx.ReadBucket(nodeBucket)
3✔
3573
                if nodes == nil {
3✔
3574
                        return ErrGraphNodesNotFound
×
3575
                }
×
3576
                ourPubKey := nodes.Get(sourceKey)
3✔
3577
                if ourPubKey == nil {
3✔
3578
                        return ErrSourceNodeNotSet
×
3579
                }
×
3580
                node, err := fetchLightningNode(nodes, pubKey[:])
3✔
3581
                if err != nil {
3✔
3582
                        return err
×
3583
                }
×
3584

3585
                nodeIsPublic, err = c.isPublic(tx, node.PubKeyBytes, ourPubKey)
3✔
3586

3✔
3587
                return err
3✔
3588
        }, func() {
3✔
3589
                nodeIsPublic = false
3✔
3590
        })
3✔
3591
        if err != nil {
3✔
3592
                return false, err
×
3593
        }
×
3594

3595
        return nodeIsPublic, nil
3✔
3596
}
3597

3598
// genMultiSigP2WSH generates the p2wsh'd multisig script for 2 of 2 pubkeys.
3599
func genMultiSigP2WSH(aPub, bPub []byte) ([]byte, error) {
3✔
3600
        witnessScript, err := input.GenMultiSigScript(aPub, bPub)
3✔
3601
        if err != nil {
3✔
3602
                return nil, err
×
3603
        }
×
3604

3605
        // With the witness script generated, we'll now turn it into a p2wsh
3606
        // script:
3607
        //  * OP_0 <sha256(script)>
3608
        bldr := txscript.NewScriptBuilder(
3✔
3609
                txscript.WithScriptAllocSize(input.P2WSHSize),
3✔
3610
        )
3✔
3611
        bldr.AddOp(txscript.OP_0)
3✔
3612
        scriptHash := sha256.Sum256(witnessScript)
3✔
3613
        bldr.AddData(scriptHash[:])
3✔
3614

3✔
3615
        return bldr.Script()
3✔
3616
}
3617

3618
// EdgePoint couples the outpoint of a channel with the funding script that it
3619
// creates. The FilteredChainView will use this to watch for spends of this
3620
// edge point on chain. We require both of these values as depending on the
3621
// concrete implementation, either the pkScript, or the out point will be used.
3622
type EdgePoint struct {
3623
        // FundingPkScript is the p2wsh multi-sig script of the target channel.
3624
        FundingPkScript []byte
3625

3626
        // OutPoint is the outpoint of the target channel.
3627
        OutPoint wire.OutPoint
3628
}
3629

3630
// String returns a human readable version of the target EdgePoint. We return
3631
// the outpoint directly as it is enough to uniquely identify the edge point.
3632
func (e *EdgePoint) String() string {
×
3633
        return e.OutPoint.String()
×
3634
}
×
3635

3636
// ChannelView returns the verifiable edge information for each active channel
3637
// within the known channel graph. The set of UTXO's (along with their scripts)
3638
// returned are the ones that need to be watched on chain to detect channel
3639
// closes on the resident blockchain.
3640
func (c *KVStore) ChannelView() ([]EdgePoint, error) {
3✔
3641
        var edgePoints []EdgePoint
3✔
3642
        if err := kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
3643
                // We're going to iterate over the entire channel index, so
3✔
3644
                // we'll need to fetch the edgeBucket to get to the index as
3✔
3645
                // it's a sub-bucket.
3✔
3646
                edges := tx.ReadBucket(edgeBucket)
3✔
3647
                if edges == nil {
3✔
3648
                        return ErrGraphNoEdgesFound
×
3649
                }
×
3650
                chanIndex := edges.NestedReadBucket(channelPointBucket)
3✔
3651
                if chanIndex == nil {
3✔
3652
                        return ErrGraphNoEdgesFound
×
3653
                }
×
3654
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
3✔
3655
                if edgeIndex == nil {
3✔
3656
                        return ErrGraphNoEdgesFound
×
3657
                }
×
3658

3659
                // Once we have the proper bucket, we'll range over each key
3660
                // (which is the channel point for the channel) and decode it,
3661
                // accumulating each entry.
3662
                return chanIndex.ForEach(
3✔
3663
                        func(chanPointBytes, chanID []byte) error {
6✔
3664
                                chanPointReader := bytes.NewReader(
3✔
3665
                                        chanPointBytes,
3✔
3666
                                )
3✔
3667

3✔
3668
                                var chanPoint wire.OutPoint
3✔
3669
                                err := ReadOutpoint(chanPointReader, &chanPoint)
3✔
3670
                                if err != nil {
3✔
3671
                                        return err
×
3672
                                }
×
3673

3674
                                edgeInfo, err := fetchChanEdgeInfo(
3✔
3675
                                        edgeIndex, chanID,
3✔
3676
                                )
3✔
3677
                                if err != nil {
3✔
3678
                                        return err
×
3679
                                }
×
3680

3681
                                pkScript, err := genMultiSigP2WSH(
3✔
3682
                                        edgeInfo.BitcoinKey1Bytes[:],
3✔
3683
                                        edgeInfo.BitcoinKey2Bytes[:],
3✔
3684
                                )
3✔
3685
                                if err != nil {
3✔
3686
                                        return err
×
3687
                                }
×
3688

3689
                                edgePoints = append(edgePoints, EdgePoint{
3✔
3690
                                        FundingPkScript: pkScript,
3✔
3691
                                        OutPoint:        chanPoint,
3✔
3692
                                })
3✔
3693

3✔
3694
                                return nil
3✔
3695
                        },
3696
                )
3697
        }, func() {
3✔
3698
                edgePoints = nil
3✔
3699
        }); err != nil {
3✔
3700
                return nil, err
×
3701
        }
×
3702

3703
        return edgePoints, nil
3✔
3704
}
3705

3706
// MarkEdgeZombie attempts to mark a channel identified by its channel ID as a
3707
// zombie. This method is used on an ad-hoc basis, when channels need to be
3708
// marked as zombies outside the normal pruning cycle.
3709
func (c *KVStore) MarkEdgeZombie(chanID uint64,
UNCOV
3710
        pubKey1, pubKey2 [33]byte) error {
×
UNCOV
3711

×
UNCOV
3712
        c.cacheMu.Lock()
×
UNCOV
3713
        defer c.cacheMu.Unlock()
×
UNCOV
3714

×
UNCOV
3715
        err := kvdb.Batch(c.db, func(tx kvdb.RwTx) error {
×
UNCOV
3716
                edges := tx.ReadWriteBucket(edgeBucket)
×
UNCOV
3717
                if edges == nil {
×
3718
                        return ErrGraphNoEdgesFound
×
3719
                }
×
UNCOV
3720
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
×
UNCOV
3721
                if err != nil {
×
3722
                        return fmt.Errorf("unable to create zombie "+
×
3723
                                "bucket: %w", err)
×
3724
                }
×
3725

UNCOV
3726
                return markEdgeZombie(zombieIndex, chanID, pubKey1, pubKey2)
×
3727
        })
UNCOV
3728
        if err != nil {
×
3729
                return err
×
3730
        }
×
3731

UNCOV
3732
        c.rejectCache.remove(chanID)
×
UNCOV
3733
        c.chanCache.remove(chanID)
×
UNCOV
3734

×
UNCOV
3735
        return nil
×
3736
}
3737

3738
// markEdgeZombie marks an edge as a zombie within our zombie index. The public
3739
// keys should represent the node public keys of the two parties involved in the
3740
// edge.
3741
func markEdgeZombie(zombieIndex kvdb.RwBucket, chanID uint64, pubKey1,
3742
        pubKey2 [33]byte) error {
3✔
3743

3✔
3744
        var k [8]byte
3✔
3745
        byteOrder.PutUint64(k[:], chanID)
3✔
3746

3✔
3747
        var v [66]byte
3✔
3748
        copy(v[:33], pubKey1[:])
3✔
3749
        copy(v[33:], pubKey2[:])
3✔
3750

3✔
3751
        return zombieIndex.Put(k[:], v[:])
3✔
3752
}
3✔
3753

3754
// MarkEdgeLive clears an edge from our zombie index, deeming it as live.
UNCOV
3755
func (c *KVStore) MarkEdgeLive(chanID uint64) error {
×
UNCOV
3756
        c.cacheMu.Lock()
×
UNCOV
3757
        defer c.cacheMu.Unlock()
×
UNCOV
3758

×
UNCOV
3759
        return c.markEdgeLiveUnsafe(nil, chanID)
×
UNCOV
3760
}
×
3761

3762
// markEdgeLiveUnsafe clears an edge from the zombie index. This method can be
3763
// called with an existing kvdb.RwTx or the argument can be set to nil in which
3764
// case a new transaction will be created.
3765
//
3766
// NOTE: this method MUST only be called if the cacheMu has already been
3767
// acquired.
UNCOV
3768
func (c *KVStore) markEdgeLiveUnsafe(tx kvdb.RwTx, chanID uint64) error {
×
UNCOV
3769
        dbFn := func(tx kvdb.RwTx) error {
×
UNCOV
3770
                edges := tx.ReadWriteBucket(edgeBucket)
×
UNCOV
3771
                if edges == nil {
×
3772
                        return ErrGraphNoEdgesFound
×
3773
                }
×
UNCOV
3774
                zombieIndex := edges.NestedReadWriteBucket(zombieBucket)
×
UNCOV
3775
                if zombieIndex == nil {
×
3776
                        return nil
×
3777
                }
×
3778

UNCOV
3779
                var k [8]byte
×
UNCOV
3780
                byteOrder.PutUint64(k[:], chanID)
×
UNCOV
3781

×
UNCOV
3782
                if len(zombieIndex.Get(k[:])) == 0 {
×
UNCOV
3783
                        return ErrZombieEdgeNotFound
×
UNCOV
3784
                }
×
3785

UNCOV
3786
                return zombieIndex.Delete(k[:])
×
3787
        }
3788

3789
        // If the transaction is nil, we'll create a new one. Otherwise, we use
3790
        // the existing transaction
UNCOV
3791
        var err error
×
UNCOV
3792
        if tx == nil {
×
UNCOV
3793
                err = kvdb.Update(c.db, dbFn, func() {})
×
3794
        } else {
×
3795
                err = dbFn(tx)
×
3796
        }
×
UNCOV
3797
        if err != nil {
×
UNCOV
3798
                return err
×
UNCOV
3799
        }
×
3800

UNCOV
3801
        c.rejectCache.remove(chanID)
×
UNCOV
3802
        c.chanCache.remove(chanID)
×
UNCOV
3803

×
UNCOV
3804
        return nil
×
3805
}
3806

3807
// IsZombieEdge returns whether the edge is considered zombie. If it is a
3808
// zombie, then the two node public keys corresponding to this edge are also
3809
// returned.
3810
func (c *KVStore) IsZombieEdge(chanID uint64) (bool, [33]byte, [33]byte,
UNCOV
3811
        error) {
×
UNCOV
3812

×
UNCOV
3813
        var (
×
UNCOV
3814
                isZombie         bool
×
UNCOV
3815
                pubKey1, pubKey2 [33]byte
×
UNCOV
3816
        )
×
UNCOV
3817

×
UNCOV
3818
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
×
UNCOV
3819
                edges := tx.ReadBucket(edgeBucket)
×
UNCOV
3820
                if edges == nil {
×
3821
                        return ErrGraphNoEdgesFound
×
3822
                }
×
UNCOV
3823
                zombieIndex := edges.NestedReadBucket(zombieBucket)
×
UNCOV
3824
                if zombieIndex == nil {
×
3825
                        return nil
×
3826
                }
×
3827

UNCOV
3828
                isZombie, pubKey1, pubKey2 = isZombieEdge(zombieIndex, chanID)
×
UNCOV
3829

×
UNCOV
3830
                return nil
×
UNCOV
3831
        }, func() {
×
UNCOV
3832
                isZombie = false
×
UNCOV
3833
                pubKey1 = [33]byte{}
×
UNCOV
3834
                pubKey2 = [33]byte{}
×
UNCOV
3835
        })
×
UNCOV
3836
        if err != nil {
×
3837
                return false, [33]byte{}, [33]byte{}, fmt.Errorf("%w: %w "+
×
3838
                        "(chanID=%d)", ErrCantCheckIfZombieEdgeStr, err, chanID)
×
3839
        }
×
3840

UNCOV
3841
        return isZombie, pubKey1, pubKey2, nil
×
3842
}
3843

3844
// isZombieEdge returns whether an entry exists for the given channel in the
3845
// zombie index. If an entry exists, then the two node public keys corresponding
3846
// to this edge are also returned.
3847
func isZombieEdge(zombieIndex kvdb.RBucket,
3848
        chanID uint64) (bool, [33]byte, [33]byte) {
3✔
3849

3✔
3850
        var k [8]byte
3✔
3851
        byteOrder.PutUint64(k[:], chanID)
3✔
3852

3✔
3853
        v := zombieIndex.Get(k[:])
3✔
3854
        if v == nil {
6✔
3855
                return false, [33]byte{}, [33]byte{}
3✔
3856
        }
3✔
3857

3858
        var pubKey1, pubKey2 [33]byte
3✔
3859
        copy(pubKey1[:], v[:33])
3✔
3860
        copy(pubKey2[:], v[33:])
3✔
3861

3✔
3862
        return true, pubKey1, pubKey2
3✔
3863
}
3864

3865
// NumZombies returns the current number of zombie channels in the graph.
UNCOV
3866
func (c *KVStore) NumZombies() (uint64, error) {
×
UNCOV
3867
        var numZombies uint64
×
UNCOV
3868
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
×
UNCOV
3869
                edges := tx.ReadBucket(edgeBucket)
×
UNCOV
3870
                if edges == nil {
×
3871
                        return nil
×
3872
                }
×
UNCOV
3873
                zombieIndex := edges.NestedReadBucket(zombieBucket)
×
UNCOV
3874
                if zombieIndex == nil {
×
3875
                        return nil
×
3876
                }
×
3877

UNCOV
3878
                return zombieIndex.ForEach(func(_, _ []byte) error {
×
UNCOV
3879
                        numZombies++
×
UNCOV
3880
                        return nil
×
UNCOV
3881
                })
×
UNCOV
3882
        }, func() {
×
UNCOV
3883
                numZombies = 0
×
UNCOV
3884
        })
×
UNCOV
3885
        if err != nil {
×
3886
                return 0, err
×
3887
        }
×
3888

UNCOV
3889
        return numZombies, nil
×
3890
}
3891

3892
// PutClosedScid stores a SCID for a closed channel in the database. This is so
3893
// that we can ignore channel announcements that we know to be closed without
3894
// having to validate them and fetch a block.
UNCOV
3895
func (c *KVStore) PutClosedScid(scid lnwire.ShortChannelID) error {
×
UNCOV
3896
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
×
UNCOV
3897
                closedScids, err := tx.CreateTopLevelBucket(closedScidBucket)
×
UNCOV
3898
                if err != nil {
×
3899
                        return err
×
3900
                }
×
3901

UNCOV
3902
                var k [8]byte
×
UNCOV
3903
                byteOrder.PutUint64(k[:], scid.ToUint64())
×
UNCOV
3904

×
UNCOV
3905
                return closedScids.Put(k[:], []byte{})
×
UNCOV
3906
        }, func() {})
×
3907
}
3908

3909
// IsClosedScid checks whether a channel identified by the passed in scid is
3910
// closed. This helps avoid having to perform expensive validation checks.
3911
// TODO: Add an LRU cache to cut down on disc reads.
3912
func (c *KVStore) IsClosedScid(scid lnwire.ShortChannelID) (bool, error) {
3✔
3913
        var isClosed bool
3✔
3914
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
3915
                closedScids := tx.ReadBucket(closedScidBucket)
3✔
3916
                if closedScids == nil {
3✔
3917
                        return ErrClosedScidsNotFound
×
3918
                }
×
3919

3920
                var k [8]byte
3✔
3921
                byteOrder.PutUint64(k[:], scid.ToUint64())
3✔
3922

3✔
3923
                if closedScids.Get(k[:]) != nil {
3✔
UNCOV
3924
                        isClosed = true
×
UNCOV
3925
                        return nil
×
UNCOV
3926
                }
×
3927

3928
                return nil
3✔
3929
        }, func() {
3✔
3930
                isClosed = false
3✔
3931
        })
3✔
3932
        if err != nil {
3✔
3933
                return false, err
×
3934
        }
×
3935

3936
        return isClosed, nil
3✔
3937
}
3938

3939
// GraphSession will provide the call-back with access to a NodeTraverser
3940
// instance which can be used to perform queries against the channel graph.
UNCOV
3941
func (c *KVStore) GraphSession(cb func(graph NodeTraverser) error) error {
×
UNCOV
3942
        return c.db.View(func(tx walletdb.ReadTx) error {
×
UNCOV
3943
                return cb(&nodeTraverserSession{
×
UNCOV
3944
                        db: c,
×
UNCOV
3945
                        tx: tx,
×
UNCOV
3946
                })
×
UNCOV
3947
        }, func() {})
×
3948
}
3949

3950
// nodeTraverserSession implements the NodeTraverser interface but with a
3951
// backing read only transaction for a consistent view of the graph.
3952
type nodeTraverserSession struct {
3953
        tx kvdb.RTx
3954
        db *KVStore
3955
}
3956

3957
// ForEachNodeDirectedChannel calls the callback for every channel of the given
3958
// node.
3959
//
3960
// NOTE: Part of the NodeTraverser interface.
3961
func (c *nodeTraverserSession) ForEachNodeDirectedChannel(nodePub route.Vertex,
UNCOV
3962
        cb func(channel *DirectedChannel) error) error {
×
UNCOV
3963

×
UNCOV
3964
        return c.db.forEachNodeDirectedChannel(c.tx, nodePub, cb)
×
UNCOV
3965
}
×
3966

3967
// FetchNodeFeatures returns the features of the given node. If the node is
3968
// unknown, assume no additional features are supported.
3969
//
3970
// NOTE: Part of the NodeTraverser interface.
3971
func (c *nodeTraverserSession) FetchNodeFeatures(nodePub route.Vertex) (
UNCOV
3972
        *lnwire.FeatureVector, error) {
×
UNCOV
3973

×
UNCOV
3974
        return c.db.fetchNodeFeatures(c.tx, nodePub)
×
UNCOV
3975
}
×
3976

3977
func putLightningNode(nodeBucket, aliasBucket, updateIndex kvdb.RwBucket,
3978
        node *models.LightningNode) error {
3✔
3979

3✔
3980
        var (
3✔
3981
                scratch [16]byte
3✔
3982
                b       bytes.Buffer
3✔
3983
        )
3✔
3984

3✔
3985
        pub, err := node.PubKey()
3✔
3986
        if err != nil {
3✔
3987
                return err
×
3988
        }
×
3989
        nodePub := pub.SerializeCompressed()
3✔
3990

3✔
3991
        // If the node has the update time set, write it, else write 0.
3✔
3992
        updateUnix := uint64(0)
3✔
3993
        if node.LastUpdate.Unix() > 0 {
6✔
3994
                updateUnix = uint64(node.LastUpdate.Unix())
3✔
3995
        }
3✔
3996

3997
        byteOrder.PutUint64(scratch[:8], updateUnix)
3✔
3998
        if _, err := b.Write(scratch[:8]); err != nil {
3✔
3999
                return err
×
4000
        }
×
4001

4002
        if _, err := b.Write(nodePub); err != nil {
3✔
4003
                return err
×
4004
        }
×
4005

4006
        // If we got a node announcement for this node, we will have the rest
4007
        // of the data available. If not we don't have more data to write.
4008
        if !node.HaveNodeAnnouncement {
6✔
4009
                // Write HaveNodeAnnouncement=0.
3✔
4010
                byteOrder.PutUint16(scratch[:2], 0)
3✔
4011
                if _, err := b.Write(scratch[:2]); err != nil {
3✔
4012
                        return err
×
4013
                }
×
4014

4015
                return nodeBucket.Put(nodePub, b.Bytes())
3✔
4016
        }
4017

4018
        // Write HaveNodeAnnouncement=1.
4019
        byteOrder.PutUint16(scratch[:2], 1)
3✔
4020
        if _, err := b.Write(scratch[:2]); err != nil {
3✔
4021
                return err
×
4022
        }
×
4023

4024
        if err := binary.Write(&b, byteOrder, node.Color.R); err != nil {
3✔
4025
                return err
×
4026
        }
×
4027
        if err := binary.Write(&b, byteOrder, node.Color.G); err != nil {
3✔
4028
                return err
×
4029
        }
×
4030
        if err := binary.Write(&b, byteOrder, node.Color.B); err != nil {
3✔
4031
                return err
×
4032
        }
×
4033

4034
        if err := wire.WriteVarString(&b, 0, node.Alias); err != nil {
3✔
4035
                return err
×
4036
        }
×
4037

4038
        if err := node.Features.Encode(&b); err != nil {
3✔
4039
                return err
×
4040
        }
×
4041

4042
        numAddresses := uint16(len(node.Addresses))
3✔
4043
        byteOrder.PutUint16(scratch[:2], numAddresses)
3✔
4044
        if _, err := b.Write(scratch[:2]); err != nil {
3✔
4045
                return err
×
4046
        }
×
4047

4048
        for _, address := range node.Addresses {
6✔
4049
                if err := SerializeAddr(&b, address); err != nil {
3✔
4050
                        return err
×
4051
                }
×
4052
        }
4053

4054
        sigLen := len(node.AuthSigBytes)
3✔
4055
        if sigLen > 80 {
3✔
4056
                return fmt.Errorf("max sig len allowed is 80, had %v",
×
4057
                        sigLen)
×
4058
        }
×
4059

4060
        err = wire.WriteVarBytes(&b, 0, node.AuthSigBytes)
3✔
4061
        if err != nil {
3✔
4062
                return err
×
4063
        }
×
4064

4065
        if len(node.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
3✔
4066
                return ErrTooManyExtraOpaqueBytes(len(node.ExtraOpaqueData))
×
4067
        }
×
4068
        err = wire.WriteVarBytes(&b, 0, node.ExtraOpaqueData)
3✔
4069
        if err != nil {
3✔
4070
                return err
×
4071
        }
×
4072

4073
        if err := aliasBucket.Put(nodePub, []byte(node.Alias)); err != nil {
3✔
4074
                return err
×
4075
        }
×
4076

4077
        // With the alias bucket updated, we'll now update the index that
4078
        // tracks the time series of node updates.
4079
        var indexKey [8 + 33]byte
3✔
4080
        byteOrder.PutUint64(indexKey[:8], updateUnix)
3✔
4081
        copy(indexKey[8:], nodePub)
3✔
4082

3✔
4083
        // If there was already an old index entry for this node, then we'll
3✔
4084
        // delete the old one before we write the new entry.
3✔
4085
        if nodeBytes := nodeBucket.Get(nodePub); nodeBytes != nil {
6✔
4086
                // Extract out the old update time to we can reconstruct the
3✔
4087
                // prior index key to delete it from the index.
3✔
4088
                oldUpdateTime := nodeBytes[:8]
3✔
4089

3✔
4090
                var oldIndexKey [8 + 33]byte
3✔
4091
                copy(oldIndexKey[:8], oldUpdateTime)
3✔
4092
                copy(oldIndexKey[8:], nodePub)
3✔
4093

3✔
4094
                if err := updateIndex.Delete(oldIndexKey[:]); err != nil {
3✔
4095
                        return err
×
4096
                }
×
4097
        }
4098

4099
        if err := updateIndex.Put(indexKey[:], nil); err != nil {
3✔
4100
                return err
×
4101
        }
×
4102

4103
        return nodeBucket.Put(nodePub, b.Bytes())
3✔
4104
}
4105

4106
func fetchLightningNode(nodeBucket kvdb.RBucket,
4107
        nodePub []byte) (models.LightningNode, error) {
3✔
4108

3✔
4109
        nodeBytes := nodeBucket.Get(nodePub)
3✔
4110
        if nodeBytes == nil {
6✔
4111
                return models.LightningNode{}, ErrGraphNodeNotFound
3✔
4112
        }
3✔
4113

4114
        nodeReader := bytes.NewReader(nodeBytes)
3✔
4115

3✔
4116
        return deserializeLightningNode(nodeReader)
3✔
4117
}
4118

4119
func deserializeLightningNodeCacheable(r io.Reader) (route.Vertex,
4120
        *lnwire.FeatureVector, error) {
3✔
4121

3✔
4122
        var (
3✔
4123
                pubKey      route.Vertex
3✔
4124
                features    = lnwire.EmptyFeatureVector()
3✔
4125
                nodeScratch [8]byte
3✔
4126
        )
3✔
4127

3✔
4128
        // Skip ahead:
3✔
4129
        // - LastUpdate (8 bytes)
3✔
4130
        if _, err := r.Read(nodeScratch[:]); err != nil {
3✔
4131
                return pubKey, nil, err
×
4132
        }
×
4133

4134
        if _, err := io.ReadFull(r, pubKey[:]); err != nil {
3✔
4135
                return pubKey, nil, err
×
4136
        }
×
4137

4138
        // Read the node announcement flag.
4139
        if _, err := r.Read(nodeScratch[:2]); err != nil {
3✔
4140
                return pubKey, nil, err
×
4141
        }
×
4142
        hasNodeAnn := byteOrder.Uint16(nodeScratch[:2])
3✔
4143

3✔
4144
        // The rest of the data is optional, and will only be there if we got a
3✔
4145
        // node announcement for this node.
3✔
4146
        if hasNodeAnn == 0 {
6✔
4147
                return pubKey, features, nil
3✔
4148
        }
3✔
4149

4150
        // We did get a node announcement for this node, so we'll have the rest
4151
        // of the data available.
4152
        var rgb uint8
3✔
4153
        if err := binary.Read(r, byteOrder, &rgb); err != nil {
3✔
4154
                return pubKey, nil, err
×
4155
        }
×
4156
        if err := binary.Read(r, byteOrder, &rgb); err != nil {
3✔
4157
                return pubKey, nil, err
×
4158
        }
×
4159
        if err := binary.Read(r, byteOrder, &rgb); err != nil {
3✔
4160
                return pubKey, nil, err
×
4161
        }
×
4162

4163
        if _, err := wire.ReadVarString(r, 0); err != nil {
3✔
4164
                return pubKey, nil, err
×
4165
        }
×
4166

4167
        if err := features.Decode(r); err != nil {
3✔
4168
                return pubKey, nil, err
×
4169
        }
×
4170

4171
        return pubKey, features, nil
3✔
4172
}
4173

4174
func deserializeLightningNode(r io.Reader) (models.LightningNode, error) {
3✔
4175
        var (
3✔
4176
                node    models.LightningNode
3✔
4177
                scratch [8]byte
3✔
4178
                err     error
3✔
4179
        )
3✔
4180

3✔
4181
        // Always populate a feature vector, even if we don't have a node
3✔
4182
        // announcement and short circuit below.
3✔
4183
        node.Features = lnwire.EmptyFeatureVector()
3✔
4184

3✔
4185
        if _, err := r.Read(scratch[:]); err != nil {
3✔
4186
                return models.LightningNode{}, err
×
4187
        }
×
4188

4189
        unix := int64(byteOrder.Uint64(scratch[:]))
3✔
4190
        node.LastUpdate = time.Unix(unix, 0)
3✔
4191

3✔
4192
        if _, err := io.ReadFull(r, node.PubKeyBytes[:]); err != nil {
3✔
4193
                return models.LightningNode{}, err
×
4194
        }
×
4195

4196
        if _, err := r.Read(scratch[:2]); err != nil {
3✔
4197
                return models.LightningNode{}, err
×
4198
        }
×
4199

4200
        hasNodeAnn := byteOrder.Uint16(scratch[:2])
3✔
4201
        if hasNodeAnn == 1 {
6✔
4202
                node.HaveNodeAnnouncement = true
3✔
4203
        } else {
6✔
4204
                node.HaveNodeAnnouncement = false
3✔
4205
        }
3✔
4206

4207
        // The rest of the data is optional, and will only be there if we got a
4208
        // node announcement for this node.
4209
        if !node.HaveNodeAnnouncement {
6✔
4210
                return node, nil
3✔
4211
        }
3✔
4212

4213
        // We did get a node announcement for this node, so we'll have the rest
4214
        // of the data available.
4215
        if err := binary.Read(r, byteOrder, &node.Color.R); err != nil {
3✔
4216
                return models.LightningNode{}, err
×
4217
        }
×
4218
        if err := binary.Read(r, byteOrder, &node.Color.G); err != nil {
3✔
4219
                return models.LightningNode{}, err
×
4220
        }
×
4221
        if err := binary.Read(r, byteOrder, &node.Color.B); err != nil {
3✔
4222
                return models.LightningNode{}, err
×
4223
        }
×
4224

4225
        node.Alias, err = wire.ReadVarString(r, 0)
3✔
4226
        if err != nil {
3✔
4227
                return models.LightningNode{}, err
×
4228
        }
×
4229

4230
        err = node.Features.Decode(r)
3✔
4231
        if err != nil {
3✔
4232
                return models.LightningNode{}, err
×
4233
        }
×
4234

4235
        if _, err := r.Read(scratch[:2]); err != nil {
3✔
4236
                return models.LightningNode{}, err
×
4237
        }
×
4238
        numAddresses := int(byteOrder.Uint16(scratch[:2]))
3✔
4239

3✔
4240
        var addresses []net.Addr
3✔
4241
        for i := 0; i < numAddresses; i++ {
6✔
4242
                address, err := DeserializeAddr(r)
3✔
4243
                if err != nil {
3✔
4244
                        return models.LightningNode{}, err
×
4245
                }
×
4246
                addresses = append(addresses, address)
3✔
4247
        }
4248
        node.Addresses = addresses
3✔
4249

3✔
4250
        node.AuthSigBytes, err = wire.ReadVarBytes(r, 0, 80, "sig")
3✔
4251
        if err != nil {
3✔
4252
                return models.LightningNode{}, err
×
4253
        }
×
4254

4255
        // We'll try and see if there are any opaque bytes left, if not, then
4256
        // we'll ignore the EOF error and return the node as is.
4257
        extraBytes, err := wire.ReadVarBytes(
3✔
4258
                r, 0, MaxAllowedExtraOpaqueBytes, "blob",
3✔
4259
        )
3✔
4260
        switch {
3✔
4261
        case errors.Is(err, io.ErrUnexpectedEOF):
×
4262
        case errors.Is(err, io.EOF):
×
4263
        case err != nil:
×
4264
                return models.LightningNode{}, err
×
4265
        }
4266

4267
        if len(extraBytes) > 0 {
3✔
UNCOV
4268
                node.ExtraOpaqueData = extraBytes
×
UNCOV
4269
        }
×
4270

4271
        return node, nil
3✔
4272
}
4273

4274
func putChanEdgeInfo(edgeIndex kvdb.RwBucket,
4275
        edgeInfo *models.ChannelEdgeInfo, chanID [8]byte) error {
3✔
4276

3✔
4277
        var b bytes.Buffer
3✔
4278

3✔
4279
        if _, err := b.Write(edgeInfo.NodeKey1Bytes[:]); err != nil {
3✔
4280
                return err
×
4281
        }
×
4282
        if _, err := b.Write(edgeInfo.NodeKey2Bytes[:]); err != nil {
3✔
4283
                return err
×
4284
        }
×
4285
        if _, err := b.Write(edgeInfo.BitcoinKey1Bytes[:]); err != nil {
3✔
4286
                return err
×
4287
        }
×
4288
        if _, err := b.Write(edgeInfo.BitcoinKey2Bytes[:]); err != nil {
3✔
4289
                return err
×
4290
        }
×
4291

4292
        if err := wire.WriteVarBytes(&b, 0, edgeInfo.Features); err != nil {
3✔
4293
                return err
×
4294
        }
×
4295

4296
        authProof := edgeInfo.AuthProof
3✔
4297
        var nodeSig1, nodeSig2, bitcoinSig1, bitcoinSig2 []byte
3✔
4298
        if authProof != nil {
6✔
4299
                nodeSig1 = authProof.NodeSig1Bytes
3✔
4300
                nodeSig2 = authProof.NodeSig2Bytes
3✔
4301
                bitcoinSig1 = authProof.BitcoinSig1Bytes
3✔
4302
                bitcoinSig2 = authProof.BitcoinSig2Bytes
3✔
4303
        }
3✔
4304

4305
        if err := wire.WriteVarBytes(&b, 0, nodeSig1); err != nil {
3✔
4306
                return err
×
4307
        }
×
4308
        if err := wire.WriteVarBytes(&b, 0, nodeSig2); err != nil {
3✔
4309
                return err
×
4310
        }
×
4311
        if err := wire.WriteVarBytes(&b, 0, bitcoinSig1); err != nil {
3✔
4312
                return err
×
4313
        }
×
4314
        if err := wire.WriteVarBytes(&b, 0, bitcoinSig2); err != nil {
3✔
4315
                return err
×
4316
        }
×
4317

4318
        if err := WriteOutpoint(&b, &edgeInfo.ChannelPoint); err != nil {
3✔
4319
                return err
×
4320
        }
×
4321
        err := binary.Write(&b, byteOrder, uint64(edgeInfo.Capacity))
3✔
4322
        if err != nil {
3✔
4323
                return err
×
4324
        }
×
4325
        if _, err := b.Write(chanID[:]); err != nil {
3✔
4326
                return err
×
4327
        }
×
4328
        if _, err := b.Write(edgeInfo.ChainHash[:]); err != nil {
3✔
4329
                return err
×
4330
        }
×
4331

4332
        if len(edgeInfo.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
3✔
4333
                return ErrTooManyExtraOpaqueBytes(len(edgeInfo.ExtraOpaqueData))
×
4334
        }
×
4335
        err = wire.WriteVarBytes(&b, 0, edgeInfo.ExtraOpaqueData)
3✔
4336
        if err != nil {
3✔
4337
                return err
×
4338
        }
×
4339

4340
        return edgeIndex.Put(chanID[:], b.Bytes())
3✔
4341
}
4342

4343
func fetchChanEdgeInfo(edgeIndex kvdb.RBucket,
4344
        chanID []byte) (models.ChannelEdgeInfo, error) {
3✔
4345

3✔
4346
        edgeInfoBytes := edgeIndex.Get(chanID)
3✔
4347
        if edgeInfoBytes == nil {
6✔
4348
                return models.ChannelEdgeInfo{}, ErrEdgeNotFound
3✔
4349
        }
3✔
4350

4351
        edgeInfoReader := bytes.NewReader(edgeInfoBytes)
3✔
4352

3✔
4353
        return deserializeChanEdgeInfo(edgeInfoReader)
3✔
4354
}
4355

4356
func deserializeChanEdgeInfo(r io.Reader) (models.ChannelEdgeInfo, error) {
3✔
4357
        var (
3✔
4358
                err      error
3✔
4359
                edgeInfo models.ChannelEdgeInfo
3✔
4360
        )
3✔
4361

3✔
4362
        if _, err := io.ReadFull(r, edgeInfo.NodeKey1Bytes[:]); err != nil {
3✔
4363
                return models.ChannelEdgeInfo{}, err
×
4364
        }
×
4365
        if _, err := io.ReadFull(r, edgeInfo.NodeKey2Bytes[:]); err != nil {
3✔
4366
                return models.ChannelEdgeInfo{}, err
×
4367
        }
×
4368
        if _, err := io.ReadFull(r, edgeInfo.BitcoinKey1Bytes[:]); err != nil {
3✔
4369
                return models.ChannelEdgeInfo{}, err
×
4370
        }
×
4371
        if _, err := io.ReadFull(r, edgeInfo.BitcoinKey2Bytes[:]); err != nil {
3✔
4372
                return models.ChannelEdgeInfo{}, err
×
4373
        }
×
4374

4375
        edgeInfo.Features, err = wire.ReadVarBytes(r, 0, 900, "features")
3✔
4376
        if err != nil {
3✔
4377
                return models.ChannelEdgeInfo{}, err
×
4378
        }
×
4379

4380
        proof := &models.ChannelAuthProof{}
3✔
4381

3✔
4382
        proof.NodeSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
3✔
4383
        if err != nil {
3✔
4384
                return models.ChannelEdgeInfo{}, err
×
4385
        }
×
4386
        proof.NodeSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
3✔
4387
        if err != nil {
3✔
4388
                return models.ChannelEdgeInfo{}, err
×
4389
        }
×
4390
        proof.BitcoinSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
3✔
4391
        if err != nil {
3✔
4392
                return models.ChannelEdgeInfo{}, err
×
4393
        }
×
4394
        proof.BitcoinSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
3✔
4395
        if err != nil {
3✔
4396
                return models.ChannelEdgeInfo{}, err
×
4397
        }
×
4398

4399
        if !proof.IsEmpty() {
6✔
4400
                edgeInfo.AuthProof = proof
3✔
4401
        }
3✔
4402

4403
        edgeInfo.ChannelPoint = wire.OutPoint{}
3✔
4404
        if err := ReadOutpoint(r, &edgeInfo.ChannelPoint); err != nil {
3✔
4405
                return models.ChannelEdgeInfo{}, err
×
4406
        }
×
4407
        if err := binary.Read(r, byteOrder, &edgeInfo.Capacity); err != nil {
3✔
4408
                return models.ChannelEdgeInfo{}, err
×
4409
        }
×
4410
        if err := binary.Read(r, byteOrder, &edgeInfo.ChannelID); err != nil {
3✔
4411
                return models.ChannelEdgeInfo{}, err
×
4412
        }
×
4413

4414
        if _, err := io.ReadFull(r, edgeInfo.ChainHash[:]); err != nil {
3✔
4415
                return models.ChannelEdgeInfo{}, err
×
4416
        }
×
4417

4418
        // We'll try and see if there are any opaque bytes left, if not, then
4419
        // we'll ignore the EOF error and return the edge as is.
4420
        edgeInfo.ExtraOpaqueData, err = wire.ReadVarBytes(
3✔
4421
                r, 0, MaxAllowedExtraOpaqueBytes, "blob",
3✔
4422
        )
3✔
4423
        switch {
3✔
4424
        case errors.Is(err, io.ErrUnexpectedEOF):
×
4425
        case errors.Is(err, io.EOF):
×
4426
        case err != nil:
×
4427
                return models.ChannelEdgeInfo{}, err
×
4428
        }
4429

4430
        return edgeInfo, nil
3✔
4431
}
4432

4433
func putChanEdgePolicy(edges kvdb.RwBucket, edge *models.ChannelEdgePolicy,
4434
        from, to []byte) error {
3✔
4435

3✔
4436
        var edgeKey [33 + 8]byte
3✔
4437
        copy(edgeKey[:], from)
3✔
4438
        byteOrder.PutUint64(edgeKey[33:], edge.ChannelID)
3✔
4439

3✔
4440
        var b bytes.Buffer
3✔
4441
        if err := serializeChanEdgePolicy(&b, edge, to); err != nil {
3✔
UNCOV
4442
                return err
×
UNCOV
4443
        }
×
4444

4445
        // Before we write out the new edge, we'll create a new entry in the
4446
        // update index in order to keep it fresh.
4447
        updateUnix := uint64(edge.LastUpdate.Unix())
3✔
4448
        var indexKey [8 + 8]byte
3✔
4449
        byteOrder.PutUint64(indexKey[:8], updateUnix)
3✔
4450
        byteOrder.PutUint64(indexKey[8:], edge.ChannelID)
3✔
4451

3✔
4452
        updateIndex, err := edges.CreateBucketIfNotExists(edgeUpdateIndexBucket)
3✔
4453
        if err != nil {
3✔
4454
                return err
×
4455
        }
×
4456

4457
        // If there was already an entry for this edge, then we'll need to
4458
        // delete the old one to ensure we don't leave around any after-images.
4459
        // An unknown policy value does not have a update time recorded, so
4460
        // it also does not need to be removed.
4461
        if edgeBytes := edges.Get(edgeKey[:]); edgeBytes != nil &&
3✔
4462
                !bytes.Equal(edgeBytes, unknownPolicy) {
6✔
4463

3✔
4464
                // In order to delete the old entry, we'll need to obtain the
3✔
4465
                // *prior* update time in order to delete it. To do this, we'll
3✔
4466
                // need to deserialize the existing policy within the database
3✔
4467
                // (now outdated by the new one), and delete its corresponding
3✔
4468
                // entry within the update index. We'll ignore any
3✔
4469
                // ErrEdgePolicyOptionalFieldNotFound or ErrParsingExtraTLVBytes
3✔
4470
                // errors, as we only need the channel ID and update time to
3✔
4471
                // delete the entry.
3✔
4472
                //
3✔
4473
                // TODO(halseth): get rid of these invalid policies in a
3✔
4474
                // migration.
3✔
4475
                // TODO(elle): complete the above TODO in migration from kvdb
3✔
4476
                // to SQL.
3✔
4477
                oldEdgePolicy, err := deserializeChanEdgePolicy(
3✔
4478
                        bytes.NewReader(edgeBytes),
3✔
4479
                )
3✔
4480
                if err != nil &&
3✔
4481
                        !errors.Is(err, ErrEdgePolicyOptionalFieldNotFound) &&
3✔
4482
                        !errors.Is(err, ErrParsingExtraTLVBytes) {
3✔
4483

×
4484
                        return err
×
4485
                }
×
4486

4487
                oldUpdateTime := uint64(oldEdgePolicy.LastUpdate.Unix())
3✔
4488

3✔
4489
                var oldIndexKey [8 + 8]byte
3✔
4490
                byteOrder.PutUint64(oldIndexKey[:8], oldUpdateTime)
3✔
4491
                byteOrder.PutUint64(oldIndexKey[8:], edge.ChannelID)
3✔
4492

3✔
4493
                if err := updateIndex.Delete(oldIndexKey[:]); err != nil {
3✔
4494
                        return err
×
4495
                }
×
4496
        }
4497

4498
        if err := updateIndex.Put(indexKey[:], nil); err != nil {
3✔
4499
                return err
×
4500
        }
×
4501

4502
        err = updateEdgePolicyDisabledIndex(
3✔
4503
                edges, edge.ChannelID,
3✔
4504
                edge.ChannelFlags&lnwire.ChanUpdateDirection > 0,
3✔
4505
                edge.IsDisabled(),
3✔
4506
        )
3✔
4507
        if err != nil {
3✔
4508
                return err
×
4509
        }
×
4510

4511
        return edges.Put(edgeKey[:], b.Bytes())
3✔
4512
}
4513

4514
// updateEdgePolicyDisabledIndex is used to update the disabledEdgePolicyIndex
4515
// bucket by either add a new disabled ChannelEdgePolicy or remove an existing
4516
// one.
4517
// The direction represents the direction of the edge and disabled is used for
4518
// deciding whether to remove or add an entry to the bucket.
4519
// In general a channel is disabled if two entries for the same chanID exist
4520
// in this bucket.
4521
// Maintaining the bucket this way allows a fast retrieval of disabled
4522
// channels, for example when prune is needed.
4523
func updateEdgePolicyDisabledIndex(edges kvdb.RwBucket, chanID uint64,
4524
        direction bool, disabled bool) error {
3✔
4525

3✔
4526
        var disabledEdgeKey [8 + 1]byte
3✔
4527
        byteOrder.PutUint64(disabledEdgeKey[0:], chanID)
3✔
4528
        if direction {
6✔
4529
                disabledEdgeKey[8] = 1
3✔
4530
        }
3✔
4531

4532
        disabledEdgePolicyIndex, err := edges.CreateBucketIfNotExists(
3✔
4533
                disabledEdgePolicyBucket,
3✔
4534
        )
3✔
4535
        if err != nil {
3✔
4536
                return err
×
4537
        }
×
4538

4539
        if disabled {
6✔
4540
                return disabledEdgePolicyIndex.Put(disabledEdgeKey[:], []byte{})
3✔
4541
        }
3✔
4542

4543
        return disabledEdgePolicyIndex.Delete(disabledEdgeKey[:])
3✔
4544
}
4545

4546
// putChanEdgePolicyUnknown marks the edge policy as unknown
4547
// in the edges bucket.
4548
func putChanEdgePolicyUnknown(edges kvdb.RwBucket, channelID uint64,
4549
        from []byte) error {
3✔
4550

3✔
4551
        var edgeKey [33 + 8]byte
3✔
4552
        copy(edgeKey[:], from)
3✔
4553
        byteOrder.PutUint64(edgeKey[33:], channelID)
3✔
4554

3✔
4555
        if edges.Get(edgeKey[:]) != nil {
3✔
4556
                return fmt.Errorf("cannot write unknown policy for channel %v "+
×
4557
                        " when there is already a policy present", channelID)
×
4558
        }
×
4559

4560
        return edges.Put(edgeKey[:], unknownPolicy)
3✔
4561
}
4562

4563
func fetchChanEdgePolicy(edges kvdb.RBucket, chanID []byte,
4564
        nodePub []byte) (*models.ChannelEdgePolicy, error) {
3✔
4565

3✔
4566
        var edgeKey [33 + 8]byte
3✔
4567
        copy(edgeKey[:], nodePub)
3✔
4568
        copy(edgeKey[33:], chanID)
3✔
4569

3✔
4570
        edgeBytes := edges.Get(edgeKey[:])
3✔
4571
        if edgeBytes == nil {
3✔
4572
                return nil, ErrEdgeNotFound
×
4573
        }
×
4574

4575
        // No need to deserialize unknown policy.
4576
        if bytes.Equal(edgeBytes, unknownPolicy) {
6✔
4577
                return nil, nil
3✔
4578
        }
3✔
4579

4580
        edgeReader := bytes.NewReader(edgeBytes)
3✔
4581

3✔
4582
        ep, err := deserializeChanEdgePolicy(edgeReader)
3✔
4583
        switch {
3✔
4584
        // If the db policy was missing an expected optional field, we return
4585
        // nil as if the policy was unknown.
UNCOV
4586
        case errors.Is(err, ErrEdgePolicyOptionalFieldNotFound):
×
UNCOV
4587
                return nil, nil
×
4588

4589
        // If the policy contains invalid TLV bytes, we return nil as if
4590
        // the policy was unknown.
4591
        case errors.Is(err, ErrParsingExtraTLVBytes):
×
4592
                return nil, nil
×
4593

4594
        case err != nil:
×
4595
                return nil, err
×
4596
        }
4597

4598
        return ep, nil
3✔
4599
}
4600

4601
func fetchChanEdgePolicies(edgeIndex kvdb.RBucket, edges kvdb.RBucket,
4602
        chanID []byte) (*models.ChannelEdgePolicy, *models.ChannelEdgePolicy,
4603
        error) {
3✔
4604

3✔
4605
        edgeInfo := edgeIndex.Get(chanID)
3✔
4606
        if edgeInfo == nil {
3✔
4607
                return nil, nil, fmt.Errorf("%w: chanID=%x", ErrEdgeNotFound,
×
4608
                        chanID)
×
4609
        }
×
4610

4611
        // The first node is contained within the first half of the edge
4612
        // information. We only propagate the error here and below if it's
4613
        // something other than edge non-existence.
4614
        node1Pub := edgeInfo[:33]
3✔
4615
        edge1, err := fetchChanEdgePolicy(edges, chanID, node1Pub)
3✔
4616
        if err != nil {
3✔
4617
                return nil, nil, fmt.Errorf("%w: node1Pub=%x", ErrEdgeNotFound,
×
4618
                        node1Pub)
×
4619
        }
×
4620

4621
        // Similarly, the second node is contained within the latter
4622
        // half of the edge information.
4623
        node2Pub := edgeInfo[33:66]
3✔
4624
        edge2, err := fetchChanEdgePolicy(edges, chanID, node2Pub)
3✔
4625
        if err != nil {
3✔
4626
                return nil, nil, fmt.Errorf("%w: node2Pub=%x", ErrEdgeNotFound,
×
4627
                        node2Pub)
×
4628
        }
×
4629

4630
        return edge1, edge2, nil
3✔
4631
}
4632

4633
func serializeChanEdgePolicy(w io.Writer, edge *models.ChannelEdgePolicy,
4634
        to []byte) error {
3✔
4635

3✔
4636
        err := wire.WriteVarBytes(w, 0, edge.SigBytes)
3✔
4637
        if err != nil {
3✔
4638
                return err
×
4639
        }
×
4640

4641
        if err := binary.Write(w, byteOrder, edge.ChannelID); err != nil {
3✔
4642
                return err
×
4643
        }
×
4644

4645
        var scratch [8]byte
3✔
4646
        updateUnix := uint64(edge.LastUpdate.Unix())
3✔
4647
        byteOrder.PutUint64(scratch[:], updateUnix)
3✔
4648
        if _, err := w.Write(scratch[:]); err != nil {
3✔
4649
                return err
×
4650
        }
×
4651

4652
        if err := binary.Write(w, byteOrder, edge.MessageFlags); err != nil {
3✔
4653
                return err
×
4654
        }
×
4655
        if err := binary.Write(w, byteOrder, edge.ChannelFlags); err != nil {
3✔
4656
                return err
×
4657
        }
×
4658
        if err := binary.Write(w, byteOrder, edge.TimeLockDelta); err != nil {
3✔
4659
                return err
×
4660
        }
×
4661
        if err := binary.Write(w, byteOrder, uint64(edge.MinHTLC)); err != nil {
3✔
4662
                return err
×
4663
        }
×
4664
        err = binary.Write(w, byteOrder, uint64(edge.FeeBaseMSat))
3✔
4665
        if err != nil {
3✔
4666
                return err
×
4667
        }
×
4668
        err = binary.Write(
3✔
4669
                w, byteOrder, uint64(edge.FeeProportionalMillionths),
3✔
4670
        )
3✔
4671
        if err != nil {
3✔
4672
                return err
×
4673
        }
×
4674

4675
        if _, err := w.Write(to); err != nil {
3✔
4676
                return err
×
4677
        }
×
4678

4679
        // If the max_htlc field is present, we write it. To be compatible with
4680
        // older versions that wasn't aware of this field, we write it as part
4681
        // of the opaque data.
4682
        // TODO(halseth): clean up when moving to TLV.
4683
        var opaqueBuf bytes.Buffer
3✔
4684
        if edge.MessageFlags.HasMaxHtlc() {
6✔
4685
                err := binary.Write(&opaqueBuf, byteOrder, uint64(edge.MaxHTLC))
3✔
4686
                if err != nil {
3✔
4687
                        return err
×
4688
                }
×
4689
        }
4690

4691
        // Validate that the ExtraOpaqueData is in fact a valid TLV stream.
4692
        err = edge.ExtraOpaqueData.ValidateTLV()
3✔
4693
        if err != nil {
3✔
UNCOV
4694
                return fmt.Errorf("%w: %w", ErrParsingExtraTLVBytes, err)
×
UNCOV
4695
        }
×
4696

4697
        if len(edge.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
3✔
4698
                return ErrTooManyExtraOpaqueBytes(len(edge.ExtraOpaqueData))
×
4699
        }
×
4700
        if _, err := opaqueBuf.Write(edge.ExtraOpaqueData); err != nil {
3✔
4701
                return err
×
4702
        }
×
4703

4704
        if err := wire.WriteVarBytes(w, 0, opaqueBuf.Bytes()); err != nil {
3✔
4705
                return err
×
4706
        }
×
4707

4708
        return nil
3✔
4709
}
4710

4711
func deserializeChanEdgePolicy(r io.Reader) (*models.ChannelEdgePolicy, error) {
3✔
4712
        // Deserialize the policy. Note that in case an optional field is not
3✔
4713
        // found or if the edge has invalid TLV data, then both an error and a
3✔
4714
        // populated policy object are returned so that the caller can decide
3✔
4715
        // if it still wants to use the edge or not.
3✔
4716
        edge, err := deserializeChanEdgePolicyRaw(r)
3✔
4717
        if err != nil &&
3✔
4718
                !errors.Is(err, ErrEdgePolicyOptionalFieldNotFound) &&
3✔
4719
                !errors.Is(err, ErrParsingExtraTLVBytes) {
3✔
4720

×
4721
                return nil, err
×
4722
        }
×
4723

4724
        return edge, err
3✔
4725
}
4726

4727
func deserializeChanEdgePolicyRaw(r io.Reader) (*models.ChannelEdgePolicy,
4728
        error) {
3✔
4729

3✔
4730
        edge := &models.ChannelEdgePolicy{}
3✔
4731

3✔
4732
        var err error
3✔
4733
        edge.SigBytes, err = wire.ReadVarBytes(r, 0, 80, "sig")
3✔
4734
        if err != nil {
3✔
4735
                return nil, err
×
4736
        }
×
4737

4738
        if err := binary.Read(r, byteOrder, &edge.ChannelID); err != nil {
3✔
4739
                return nil, err
×
4740
        }
×
4741

4742
        var scratch [8]byte
3✔
4743
        if _, err := r.Read(scratch[:]); err != nil {
3✔
4744
                return nil, err
×
4745
        }
×
4746
        unix := int64(byteOrder.Uint64(scratch[:]))
3✔
4747
        edge.LastUpdate = time.Unix(unix, 0)
3✔
4748

3✔
4749
        if err := binary.Read(r, byteOrder, &edge.MessageFlags); err != nil {
3✔
4750
                return nil, err
×
4751
        }
×
4752
        if err := binary.Read(r, byteOrder, &edge.ChannelFlags); err != nil {
3✔
4753
                return nil, err
×
4754
        }
×
4755
        if err := binary.Read(r, byteOrder, &edge.TimeLockDelta); err != nil {
3✔
4756
                return nil, err
×
4757
        }
×
4758

4759
        var n uint64
3✔
4760
        if err := binary.Read(r, byteOrder, &n); err != nil {
3✔
4761
                return nil, err
×
4762
        }
×
4763
        edge.MinHTLC = lnwire.MilliSatoshi(n)
3✔
4764

3✔
4765
        if err := binary.Read(r, byteOrder, &n); err != nil {
3✔
4766
                return nil, err
×
4767
        }
×
4768
        edge.FeeBaseMSat = lnwire.MilliSatoshi(n)
3✔
4769

3✔
4770
        if err := binary.Read(r, byteOrder, &n); err != nil {
3✔
4771
                return nil, err
×
4772
        }
×
4773
        edge.FeeProportionalMillionths = lnwire.MilliSatoshi(n)
3✔
4774

3✔
4775
        if _, err := r.Read(edge.ToNode[:]); err != nil {
3✔
4776
                return nil, err
×
4777
        }
×
4778

4779
        // We'll try and see if there are any opaque bytes left, if not, then
4780
        // we'll ignore the EOF error and return the edge as is.
4781
        edge.ExtraOpaqueData, err = wire.ReadVarBytes(
3✔
4782
                r, 0, MaxAllowedExtraOpaqueBytes, "blob",
3✔
4783
        )
3✔
4784
        switch {
3✔
4785
        case errors.Is(err, io.ErrUnexpectedEOF):
×
UNCOV
4786
        case errors.Is(err, io.EOF):
×
4787
        case err != nil:
×
4788
                return nil, err
×
4789
        }
4790

4791
        // See if optional fields are present.
4792
        if edge.MessageFlags.HasMaxHtlc() {
6✔
4793
                // The max_htlc field should be at the beginning of the opaque
3✔
4794
                // bytes.
3✔
4795
                opq := edge.ExtraOpaqueData
3✔
4796

3✔
4797
                // If the max_htlc field is not present, it might be old data
3✔
4798
                // stored before this field was validated. We'll return the
3✔
4799
                // edge along with an error.
3✔
4800
                if len(opq) < 8 {
3✔
UNCOV
4801
                        return edge, ErrEdgePolicyOptionalFieldNotFound
×
UNCOV
4802
                }
×
4803

4804
                maxHtlc := byteOrder.Uint64(opq[:8])
3✔
4805
                edge.MaxHTLC = lnwire.MilliSatoshi(maxHtlc)
3✔
4806

3✔
4807
                // Exclude the parsed field from the rest of the opaque data.
3✔
4808
                edge.ExtraOpaqueData = opq[8:]
3✔
4809
        }
4810

4811
        // Attempt to extract the inbound fee from the opaque data. If we fail
4812
        // to parse the TLV here, we return an error we also return the edge
4813
        // so that the caller can still use it. This is for backwards
4814
        // compatibility in case we have already persisted some policies that
4815
        // have invalid TLV data.
4816
        var inboundFee lnwire.Fee
3✔
4817
        typeMap, err := edge.ExtraOpaqueData.ExtractRecords(&inboundFee)
3✔
4818
        if err != nil {
3✔
4819
                return edge, fmt.Errorf("%w: %w", ErrParsingExtraTLVBytes, err)
×
4820
        }
×
4821

4822
        val, ok := typeMap[lnwire.FeeRecordType]
3✔
4823
        if ok && val == nil {
6✔
4824
                edge.InboundFee = fn.Some(inboundFee)
3✔
4825
        }
3✔
4826

4827
        return edge, nil
3✔
4828
}
4829

4830
// chanGraphNodeTx is an implementation of the NodeRTx interface backed by the
4831
// KVStore and a kvdb.RTx.
4832
type chanGraphNodeTx struct {
4833
        tx   kvdb.RTx
4834
        db   *KVStore
4835
        node *models.LightningNode
4836
}
4837

4838
// A compile-time constraint to ensure chanGraphNodeTx implements the NodeRTx
4839
// interface.
4840
var _ NodeRTx = (*chanGraphNodeTx)(nil)
4841

4842
func newChanGraphNodeTx(tx kvdb.RTx, db *KVStore,
4843
        node *models.LightningNode) *chanGraphNodeTx {
3✔
4844

3✔
4845
        return &chanGraphNodeTx{
3✔
4846
                tx:   tx,
3✔
4847
                db:   db,
3✔
4848
                node: node,
3✔
4849
        }
3✔
4850
}
3✔
4851

4852
// Node returns the raw information of the node.
4853
//
4854
// NOTE: This is a part of the NodeRTx interface.
4855
func (c *chanGraphNodeTx) Node() *models.LightningNode {
3✔
4856
        return c.node
3✔
4857
}
3✔
4858

4859
// FetchNode fetches the node with the given pub key under the same transaction
4860
// used to fetch the current node. The returned node is also a NodeRTx and any
4861
// operations on that NodeRTx will also be done under the same transaction.
4862
//
4863
// NOTE: This is a part of the NodeRTx interface.
UNCOV
4864
func (c *chanGraphNodeTx) FetchNode(nodePub route.Vertex) (NodeRTx, error) {
×
UNCOV
4865
        node, err := c.db.FetchLightningNodeTx(c.tx, nodePub)
×
UNCOV
4866
        if err != nil {
×
4867
                return nil, err
×
4868
        }
×
4869

UNCOV
4870
        return newChanGraphNodeTx(c.tx, c.db, node), nil
×
4871
}
4872

4873
// ForEachChannel can be used to iterate over the node's channels under
4874
// the same transaction used to fetch the node.
4875
//
4876
// NOTE: This is a part of the NodeRTx interface.
4877
func (c *chanGraphNodeTx) ForEachChannel(f func(*models.ChannelEdgeInfo,
UNCOV
4878
        *models.ChannelEdgePolicy, *models.ChannelEdgePolicy) error) error {
×
UNCOV
4879

×
UNCOV
4880
        return c.db.forEachNodeChannelTx(c.tx, c.node.PubKeyBytes,
×
UNCOV
4881
                func(_ kvdb.RTx, info *models.ChannelEdgeInfo, policy1,
×
UNCOV
4882
                        policy2 *models.ChannelEdgePolicy) error {
×
UNCOV
4883

×
UNCOV
4884
                        return f(info, policy1, policy2)
×
UNCOV
4885
                },
×
4886
        )
4887
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc