• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 15646853642

14 Jun 2025 01:16AM UTC coverage: 58.354% (-10.2%) from 68.521%
15646853642

Pull #9950

github

web-flow
Merge 5e0003c0b into fe405426c
Pull Request #9950: lnrpc: add auth_proof to graph APIs

6 of 20 new or added lines in 1 file covered. (30.0%)

28413 existing lines in 461 files now uncovered.

97795 of 167589 relevant lines covered (58.35%)

1.81 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

68.36
/graph/db/kv_store.go
1
package graphdb
2

3
import (
4
        "bytes"
5
        "context"
6
        "crypto/sha256"
7
        "encoding/binary"
8
        "errors"
9
        "fmt"
10
        "io"
11
        "math"
12
        "net"
13
        "sort"
14
        "sync"
15
        "testing"
16
        "time"
17

18
        "github.com/btcsuite/btcd/btcec/v2"
19
        "github.com/btcsuite/btcd/chaincfg/chainhash"
20
        "github.com/btcsuite/btcd/txscript"
21
        "github.com/btcsuite/btcd/wire"
22
        "github.com/btcsuite/btcwallet/walletdb"
23
        "github.com/lightningnetwork/lnd/aliasmgr"
24
        "github.com/lightningnetwork/lnd/batch"
25
        "github.com/lightningnetwork/lnd/fn/v2"
26
        "github.com/lightningnetwork/lnd/graph/db/models"
27
        "github.com/lightningnetwork/lnd/input"
28
        "github.com/lightningnetwork/lnd/kvdb"
29
        "github.com/lightningnetwork/lnd/lnwire"
30
        "github.com/lightningnetwork/lnd/routing/route"
31
        "github.com/stretchr/testify/require"
32
)
33

34
var (
35
        // nodeBucket is a bucket which houses all the vertices or nodes within
36
        // the channel graph. This bucket has a single-sub bucket which adds an
37
        // additional index from pubkey -> alias. Within the top-level of this
38
        // bucket, the key space maps a node's compressed public key to the
39
        // serialized information for that node. Additionally, there's a
40
        // special key "source" which stores the pubkey of the source node. The
41
        // source node is used as the starting point for all graph/queries and
42
        // traversals. The graph is formed as a star-graph with the source node
43
        // at the center.
44
        //
45
        // maps: pubKey -> nodeInfo
46
        // maps: source -> selfPubKey
47
        nodeBucket = []byte("graph-node")
48

49
        // nodeUpdateIndexBucket is a sub-bucket of the nodeBucket. This bucket
50
        // will be used to quickly look up the "freshness" of a node's last
51
        // update to the network. The bucket only contains keys, and no values,
52
        // it's mapping:
53
        //
54
        // maps: updateTime || nodeID -> nil
55
        nodeUpdateIndexBucket = []byte("graph-node-update-index")
56

57
        // sourceKey is a special key that resides within the nodeBucket. The
58
        // sourceKey maps a key to the public key of the "self node".
59
        sourceKey = []byte("source")
60

61
        // aliasIndexBucket is a sub-bucket that's nested within the main
62
        // nodeBucket. This bucket maps the public key of a node to its
63
        // current alias. This bucket is provided as it can be used within a
64
        // future UI layer to add an additional degree of confirmation.
65
        aliasIndexBucket = []byte("alias")
66

67
        // edgeBucket is a bucket which houses all of the edge or channel
68
        // information within the channel graph. This bucket essentially acts
69
        // as an adjacency list, which in conjunction with a range scan, can be
70
        // used to iterate over all the incoming and outgoing edges for a
71
        // particular node. Key in the bucket use a prefix scheme which leads
72
        // with the node's public key and sends with the compact edge ID.
73
        // For each chanID, there will be two entries within the bucket, as the
74
        // graph is directed: nodes may have different policies w.r.t to fees
75
        // for their respective directions.
76
        //
77
        // maps: pubKey || chanID -> channel edge policy for node
78
        edgeBucket = []byte("graph-edge")
79

80
        // unknownPolicy is represented as an empty slice. It is
81
        // used as the value in edgeBucket for unknown channel edge policies.
82
        // Unknown policies are still stored in the database to enable efficient
83
        // lookup of incoming channel edges.
84
        unknownPolicy = []byte{}
85

86
        // chanStart is an array of all zero bytes which is used to perform
87
        // range scans within the edgeBucket to obtain all of the outgoing
88
        // edges for a particular node.
89
        chanStart [8]byte
90

91
        // edgeIndexBucket is an index which can be used to iterate all edges
92
        // in the bucket, grouping them according to their in/out nodes.
93
        // Additionally, the items in this bucket also contain the complete
94
        // edge information for a channel. The edge information includes the
95
        // capacity of the channel, the nodes that made the channel, etc. This
96
        // bucket resides within the edgeBucket above. Creation of an edge
97
        // proceeds in two phases: first the edge is added to the edge index,
98
        // afterwards the edgeBucket can be updated with the latest details of
99
        // the edge as they are announced on the network.
100
        //
101
        // maps: chanID -> pubKey1 || pubKey2 || restofEdgeInfo
102
        edgeIndexBucket = []byte("edge-index")
103

104
        // edgeUpdateIndexBucket is a sub-bucket of the main edgeBucket. This
105
        // bucket contains an index which allows us to gauge the "freshness" of
106
        // a channel's last updates.
107
        //
108
        // maps: updateTime || chanID -> nil
109
        edgeUpdateIndexBucket = []byte("edge-update-index")
110

111
        // channelPointBucket maps a channel's full outpoint (txid:index) to
112
        // its short 8-byte channel ID. This bucket resides within the
113
        // edgeBucket above, and can be used to quickly remove an edge due to
114
        // the outpoint being spent, or to query for existence of a channel.
115
        //
116
        // maps: outPoint -> chanID
117
        channelPointBucket = []byte("chan-index")
118

119
        // zombieBucket is a sub-bucket of the main edgeBucket bucket
120
        // responsible for maintaining an index of zombie channels. Each entry
121
        // exists within the bucket as follows:
122
        //
123
        // maps: chanID -> pubKey1 || pubKey2
124
        //
125
        // The chanID represents the channel ID of the edge that is marked as a
126
        // zombie and is used as the key, which maps to the public keys of the
127
        // edge's participants.
128
        zombieBucket = []byte("zombie-index")
129

130
        // disabledEdgePolicyBucket is a sub-bucket of the main edgeBucket
131
        // bucket responsible for maintaining an index of disabled edge
132
        // policies. Each entry exists within the bucket as follows:
133
        //
134
        // maps: <chanID><direction> -> []byte{}
135
        //
136
        // The chanID represents the channel ID of the edge and the direction is
137
        // one byte representing the direction of the edge. The main purpose of
138
        // this index is to allow pruning disabled channels in a fast way
139
        // without the need to iterate all over the graph.
140
        disabledEdgePolicyBucket = []byte("disabled-edge-policy-index")
141

142
        // graphMetaBucket is a top-level bucket which stores various meta-deta
143
        // related to the on-disk channel graph. Data stored in this bucket
144
        // includes the block to which the graph has been synced to, the total
145
        // number of channels, etc.
146
        graphMetaBucket = []byte("graph-meta")
147

148
        // pruneLogBucket is a bucket within the graphMetaBucket that stores
149
        // a mapping from the block height to the hash for the blocks used to
150
        // prune the graph.
151
        // Once a new block is discovered, any channels that have been closed
152
        // (by spending the outpoint) can safely be removed from the graph, and
153
        // the block is added to the prune log. We need to keep such a log for
154
        // the case where a reorg happens, and we must "rewind" the state of the
155
        // graph by removing channels that were previously confirmed. In such a
156
        // case we'll remove all entries from the prune log with a block height
157
        // that no longer exists.
158
        pruneLogBucket = []byte("prune-log")
159

160
        // closedScidBucket is a top-level bucket that stores scids for
161
        // channels that we know to be closed. This is used so that we don't
162
        // need to perform expensive validation checks if we receive a channel
163
        // announcement for the channel again.
164
        //
165
        // maps: scid -> []byte{}
166
        closedScidBucket = []byte("closed-scid")
167
)
168

169
const (
170
        // MaxAllowedExtraOpaqueBytes is the largest amount of opaque bytes that
171
        // we'll permit to be written to disk. We limit this as otherwise, it
172
        // would be possible for a node to create a ton of updates and slowly
173
        // fill our disk, and also waste bandwidth due to relaying.
174
        MaxAllowedExtraOpaqueBytes = 10000
175
)
176

177
// KVStore is a persistent, on-disk graph representation of the Lightning
178
// Network. This struct can be used to implement path finding algorithms on top
179
// of, and also to update a node's view based on information received from the
180
// p2p network. Internally, the graph is stored using a modified adjacency list
181
// representation with some added object interaction possible with each
182
// serialized edge/node. The graph is stored is directed, meaning that are two
183
// edges stored for each channel: an inbound/outbound edge for each node pair.
184
// Nodes, edges, and edge information can all be added to the graph
185
// independently. Edge removal results in the deletion of all edge information
186
// for that edge.
187
type KVStore struct {
188
        db kvdb.Backend
189

190
        // cacheMu guards all caches (rejectCache and chanCache). If
191
        // this mutex will be acquired at the same time as the DB mutex then
192
        // the cacheMu MUST be acquired first to prevent deadlock.
193
        cacheMu     sync.RWMutex
194
        rejectCache *rejectCache
195
        chanCache   *channelCache
196

197
        chanScheduler batch.Scheduler[kvdb.RwTx]
198
        nodeScheduler batch.Scheduler[kvdb.RwTx]
199
}
200

201
// A compile-time assertion to ensure that the KVStore struct implements the
202
// V1Store interface.
203
var _ V1Store = (*KVStore)(nil)
204

205
// NewKVStore allocates a new KVStore backed by a DB instance. The
206
// returned instance has its own unique reject cache and channel cache.
207
func NewKVStore(db kvdb.Backend, options ...StoreOptionModifier) (*KVStore,
208
        error) {
3✔
209

3✔
210
        opts := DefaultOptions()
3✔
211
        for _, o := range options {
6✔
212
                o(opts)
3✔
213
        }
3✔
214

215
        if !opts.NoMigration {
6✔
216
                if err := initKVStore(db); err != nil {
3✔
217
                        return nil, err
×
218
                }
×
219
        }
220

221
        g := &KVStore{
3✔
222
                db:          db,
3✔
223
                rejectCache: newRejectCache(opts.RejectCacheSize),
3✔
224
                chanCache:   newChannelCache(opts.ChannelCacheSize),
3✔
225
        }
3✔
226
        g.chanScheduler = batch.NewTimeScheduler(
3✔
227
                batch.NewBoltBackend[kvdb.RwTx](db), &g.cacheMu,
3✔
228
                opts.BatchCommitInterval,
3✔
229
        )
3✔
230
        g.nodeScheduler = batch.NewTimeScheduler(
3✔
231
                batch.NewBoltBackend[kvdb.RwTx](db), nil,
3✔
232
                opts.BatchCommitInterval,
3✔
233
        )
3✔
234

3✔
235
        return g, nil
3✔
236
}
237

238
// channelMapKey is the key structure used for storing channel edge policies.
239
type channelMapKey struct {
240
        nodeKey route.Vertex
241
        chanID  [8]byte
242
}
243

244
// getChannelMap loads all channel edge policies from the database and stores
245
// them in a map.
246
func (c *KVStore) getChannelMap(edges kvdb.RBucket) (
247
        map[channelMapKey]*models.ChannelEdgePolicy, error) {
3✔
248

3✔
249
        // Create a map to store all channel edge policies.
3✔
250
        channelMap := make(map[channelMapKey]*models.ChannelEdgePolicy)
3✔
251

3✔
252
        err := kvdb.ForAll(edges, func(k, edgeBytes []byte) error {
6✔
253
                // Skip embedded buckets.
3✔
254
                if bytes.Equal(k, edgeIndexBucket) ||
3✔
255
                        bytes.Equal(k, edgeUpdateIndexBucket) ||
3✔
256
                        bytes.Equal(k, zombieBucket) ||
3✔
257
                        bytes.Equal(k, disabledEdgePolicyBucket) ||
3✔
258
                        bytes.Equal(k, channelPointBucket) {
6✔
259

3✔
260
                        return nil
3✔
261
                }
3✔
262

263
                // Validate key length.
264
                if len(k) != 33+8 {
3✔
265
                        return fmt.Errorf("invalid edge key %x encountered", k)
×
266
                }
×
267

268
                var key channelMapKey
3✔
269
                copy(key.nodeKey[:], k[:33])
3✔
270
                copy(key.chanID[:], k[33:])
3✔
271

3✔
272
                // No need to deserialize unknown policy.
3✔
273
                if bytes.Equal(edgeBytes, unknownPolicy) {
3✔
274
                        return nil
×
275
                }
×
276

277
                edgeReader := bytes.NewReader(edgeBytes)
3✔
278
                edge, err := deserializeChanEdgePolicyRaw(
3✔
279
                        edgeReader,
3✔
280
                )
3✔
281

3✔
282
                switch {
3✔
283
                // If the db policy was missing an expected optional field, we
284
                // return nil as if the policy was unknown.
285
                case errors.Is(err, ErrEdgePolicyOptionalFieldNotFound):
×
286
                        return nil
×
287

288
                // We don't want a single policy with bad TLV data to stop us
289
                // from loading the rest of the data, so we just skip this
290
                // policy. This is for backwards compatibility since we did not
291
                // use to validate TLV data in the past before persisting it.
292
                case errors.Is(err, ErrParsingExtraTLVBytes):
×
293
                        return nil
×
294

295
                case err != nil:
×
296
                        return err
×
297
                }
298

299
                channelMap[key] = edge
3✔
300

3✔
301
                return nil
3✔
302
        })
303
        if err != nil {
3✔
304
                return nil, err
×
305
        }
×
306

307
        return channelMap, nil
3✔
308
}
309

310
var graphTopLevelBuckets = [][]byte{
311
        nodeBucket,
312
        edgeBucket,
313
        graphMetaBucket,
314
        closedScidBucket,
315
}
316

317
// createChannelDB creates and initializes a fresh version of  In
318
// the case that the target path has not yet been created or doesn't yet exist,
319
// then the path is created. Additionally, all required top-level buckets used
320
// within the database are created.
321
func initKVStore(db kvdb.Backend) error {
3✔
322
        err := kvdb.Update(db, func(tx kvdb.RwTx) error {
6✔
323
                for _, tlb := range graphTopLevelBuckets {
6✔
324
                        if _, err := tx.CreateTopLevelBucket(tlb); err != nil {
3✔
325
                                return err
×
326
                        }
×
327
                }
328

329
                nodes := tx.ReadWriteBucket(nodeBucket)
3✔
330
                _, err := nodes.CreateBucketIfNotExists(aliasIndexBucket)
3✔
331
                if err != nil {
3✔
332
                        return err
×
333
                }
×
334
                _, err = nodes.CreateBucketIfNotExists(nodeUpdateIndexBucket)
3✔
335
                if err != nil {
3✔
336
                        return err
×
337
                }
×
338

339
                edges := tx.ReadWriteBucket(edgeBucket)
3✔
340
                _, err = edges.CreateBucketIfNotExists(edgeIndexBucket)
3✔
341
                if err != nil {
3✔
342
                        return err
×
343
                }
×
344
                _, err = edges.CreateBucketIfNotExists(edgeUpdateIndexBucket)
3✔
345
                if err != nil {
3✔
346
                        return err
×
347
                }
×
348
                _, err = edges.CreateBucketIfNotExists(channelPointBucket)
3✔
349
                if err != nil {
3✔
350
                        return err
×
351
                }
×
352
                _, err = edges.CreateBucketIfNotExists(zombieBucket)
3✔
353
                if err != nil {
3✔
354
                        return err
×
355
                }
×
356

357
                graphMeta := tx.ReadWriteBucket(graphMetaBucket)
3✔
358
                _, err = graphMeta.CreateBucketIfNotExists(pruneLogBucket)
3✔
359

3✔
360
                return err
3✔
361
        }, func() {})
3✔
362
        if err != nil {
3✔
363
                return fmt.Errorf("unable to create new channel graph: %w", err)
×
364
        }
×
365

366
        return nil
3✔
367
}
368

369
// AddrsForNode returns all known addresses for the target node public key that
370
// the graph DB is aware of. The returned boolean indicates if the given node is
371
// unknown to the graph DB or not.
372
//
373
// NOTE: this is part of the channeldb.AddrSource interface.
374
func (c *KVStore) AddrsForNode(nodePub *btcec.PublicKey) (bool, []net.Addr,
375
        error) {
3✔
376

3✔
377
        pubKey, err := route.NewVertexFromBytes(nodePub.SerializeCompressed())
3✔
378
        if err != nil {
3✔
379
                return false, nil, err
×
380
        }
×
381

382
        node, err := c.FetchLightningNode(pubKey)
3✔
383
        // We don't consider it an error if the graph is unaware of the node.
3✔
384
        switch {
3✔
385
        case err != nil && !errors.Is(err, ErrGraphNodeNotFound):
×
386
                return false, nil, err
×
387

388
        case errors.Is(err, ErrGraphNodeNotFound):
3✔
389
                return false, nil, nil
3✔
390
        }
391

392
        return true, node.Addresses, nil
3✔
393
}
394

395
// ForEachChannel iterates through all the channel edges stored within the
396
// graph and invokes the passed callback for each edge. The callback takes two
397
// edges as since this is a directed graph, both the in/out edges are visited.
398
// If the callback returns an error, then the transaction is aborted and the
399
// iteration stops early.
400
//
401
// NOTE: If an edge can't be found, or wasn't advertised, then a nil pointer
402
// for that particular channel edge routing policy will be passed into the
403
// callback.
404
func (c *KVStore) ForEachChannel(cb func(*models.ChannelEdgeInfo,
405
        *models.ChannelEdgePolicy, *models.ChannelEdgePolicy) error) error {
3✔
406

3✔
407
        return c.db.View(func(tx kvdb.RTx) error {
6✔
408
                edges := tx.ReadBucket(edgeBucket)
3✔
409
                if edges == nil {
3✔
410
                        return ErrGraphNoEdgesFound
×
411
                }
×
412

413
                // First, load all edges in memory indexed by node and channel
414
                // id.
415
                channelMap, err := c.getChannelMap(edges)
3✔
416
                if err != nil {
3✔
417
                        return err
×
418
                }
×
419

420
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
3✔
421
                if edgeIndex == nil {
3✔
422
                        return ErrGraphNoEdgesFound
×
423
                }
×
424

425
                // Load edge index, recombine each channel with the policies
426
                // loaded above and invoke the callback.
427
                return kvdb.ForAll(
3✔
428
                        edgeIndex, func(k, edgeInfoBytes []byte) error {
6✔
429
                                var chanID [8]byte
3✔
430
                                copy(chanID[:], k)
3✔
431

3✔
432
                                edgeInfoReader := bytes.NewReader(edgeInfoBytes)
3✔
433
                                info, err := deserializeChanEdgeInfo(
3✔
434
                                        edgeInfoReader,
3✔
435
                                )
3✔
436
                                if err != nil {
3✔
437
                                        return err
×
438
                                }
×
439

440
                                policy1 := channelMap[channelMapKey{
3✔
441
                                        nodeKey: info.NodeKey1Bytes,
3✔
442
                                        chanID:  chanID,
3✔
443
                                }]
3✔
444

3✔
445
                                policy2 := channelMap[channelMapKey{
3✔
446
                                        nodeKey: info.NodeKey2Bytes,
3✔
447
                                        chanID:  chanID,
3✔
448
                                }]
3✔
449

3✔
450
                                return cb(&info, policy1, policy2)
3✔
451
                        },
452
                )
453
        }, func() {})
3✔
454
}
455

456
// ForEachChannelCacheable iterates through all the channel edges stored within
457
// the graph and invokes the passed callback for each edge. The callback takes
458
// two edges as since this is a directed graph, both the in/out edges are
459
// visited. If the callback returns an error, then the transaction is aborted
460
// and the iteration stops early.
461
//
462
// NOTE: If an edge can't be found, or wasn't advertised, then a nil pointer
463
// for that particular channel edge routing policy will be passed into the
464
// callback.
465
//
466
// NOTE: this method is like ForEachChannel but fetches only the data required
467
// for the graph cache.
468
func (c *KVStore) ForEachChannelCacheable(cb func(*models.CachedEdgeInfo,
469
        *models.CachedEdgePolicy, *models.CachedEdgePolicy) error) error {
3✔
470

3✔
471
        return c.db.View(func(tx kvdb.RTx) error {
6✔
472
                edges := tx.ReadBucket(edgeBucket)
3✔
473
                if edges == nil {
3✔
474
                        return ErrGraphNoEdgesFound
×
475
                }
×
476

477
                // First, load all edges in memory indexed by node and channel
478
                // id.
479
                channelMap, err := c.getChannelMap(edges)
3✔
480
                if err != nil {
3✔
481
                        return err
×
482
                }
×
483

484
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
3✔
485
                if edgeIndex == nil {
3✔
486
                        return ErrGraphNoEdgesFound
×
487
                }
×
488

489
                // Load edge index, recombine each channel with the policies
490
                // loaded above and invoke the callback.
491
                return kvdb.ForAll(
3✔
492
                        edgeIndex, func(k, edgeInfoBytes []byte) error {
6✔
493
                                var chanID [8]byte
3✔
494
                                copy(chanID[:], k)
3✔
495

3✔
496
                                edgeInfoReader := bytes.NewReader(edgeInfoBytes)
3✔
497
                                info, err := deserializeChanEdgeInfo(
3✔
498
                                        edgeInfoReader,
3✔
499
                                )
3✔
500
                                if err != nil {
3✔
501
                                        return err
×
502
                                }
×
503

504
                                policy1 := channelMap[channelMapKey{
3✔
505
                                        nodeKey: info.NodeKey1Bytes,
3✔
506
                                        chanID:  chanID,
3✔
507
                                }]
3✔
508

3✔
509
                                policy2 := channelMap[channelMapKey{
3✔
510
                                        nodeKey: info.NodeKey2Bytes,
3✔
511
                                        chanID:  chanID,
3✔
512
                                }]
3✔
513

3✔
514
                                return cb(
3✔
515
                                        models.NewCachedEdge(&info),
3✔
516
                                        models.NewCachedPolicy(policy1),
3✔
517
                                        models.NewCachedPolicy(policy2),
3✔
518
                                )
3✔
519
                        },
520
                )
521
        }, func() {})
3✔
522
}
523

524
// forEachNodeDirectedChannel iterates through all channels of a given node,
525
// executing the passed callback on the directed edge representing the channel
526
// and its incoming policy. If the callback returns an error, then the iteration
527
// is halted with the error propagated back up to the caller. An optional read
528
// transaction may be provided. If none is provided, a new one will be created.
529
//
530
// Unknown policies are passed into the callback as nil values.
531
func (c *KVStore) forEachNodeDirectedChannel(tx kvdb.RTx,
532
        node route.Vertex, cb func(channel *DirectedChannel) error) error {
3✔
533

3✔
534
        // Fallback that uses the database.
3✔
535
        toNodeCallback := func() route.Vertex {
6✔
536
                return node
3✔
537
        }
3✔
538
        toNodeFeatures, err := c.fetchNodeFeatures(tx, node)
3✔
539
        if err != nil {
3✔
540
                return err
×
541
        }
×
542

543
        dbCallback := func(tx kvdb.RTx, e *models.ChannelEdgeInfo, p1,
3✔
544
                p2 *models.ChannelEdgePolicy) error {
6✔
545

3✔
546
                var cachedInPolicy *models.CachedEdgePolicy
3✔
547
                if p2 != nil {
6✔
548
                        cachedInPolicy = models.NewCachedPolicy(p2)
3✔
549
                        cachedInPolicy.ToNodePubKey = toNodeCallback
3✔
550
                        cachedInPolicy.ToNodeFeatures = toNodeFeatures
3✔
551
                }
3✔
552

553
                directedChannel := &DirectedChannel{
3✔
554
                        ChannelID:    e.ChannelID,
3✔
555
                        IsNode1:      node == e.NodeKey1Bytes,
3✔
556
                        OtherNode:    e.NodeKey2Bytes,
3✔
557
                        Capacity:     e.Capacity,
3✔
558
                        OutPolicySet: p1 != nil,
3✔
559
                        InPolicy:     cachedInPolicy,
3✔
560
                }
3✔
561

3✔
562
                if p1 != nil {
6✔
563
                        p1.InboundFee.WhenSome(func(fee lnwire.Fee) {
3✔
UNCOV
564
                                directedChannel.InboundFee = fee
×
UNCOV
565
                        })
×
566
                }
567

568
                if node == e.NodeKey2Bytes {
6✔
569
                        directedChannel.OtherNode = e.NodeKey1Bytes
3✔
570
                }
3✔
571

572
                return cb(directedChannel)
3✔
573
        }
574

575
        return nodeTraversal(tx, node[:], c.db, dbCallback)
3✔
576
}
577

578
// fetchNodeFeatures returns the features of a given node. If no features are
579
// known for the node, an empty feature vector is returned. An optional read
580
// transaction may be provided. If none is provided, a new one will be created.
581
func (c *KVStore) fetchNodeFeatures(tx kvdb.RTx,
582
        node route.Vertex) (*lnwire.FeatureVector, error) {
3✔
583

3✔
584
        // Fallback that uses the database.
3✔
585
        targetNode, err := c.FetchLightningNodeTx(tx, node)
3✔
586
        switch {
3✔
587
        // If the node exists and has features, return them directly.
588
        case err == nil:
3✔
589
                return targetNode.Features, nil
3✔
590

591
        // If we couldn't find a node announcement, populate a blank feature
592
        // vector.
UNCOV
593
        case errors.Is(err, ErrGraphNodeNotFound):
×
UNCOV
594
                return lnwire.EmptyFeatureVector(), nil
×
595

596
        // Otherwise, bubble the error up.
597
        default:
×
598
                return nil, err
×
599
        }
600
}
601

602
// ForEachNodeDirectedChannel iterates through all channels of a given node,
603
// executing the passed callback on the directed edge representing the channel
604
// and its incoming policy. If the callback returns an error, then the iteration
605
// is halted with the error propagated back up to the caller.
606
//
607
// Unknown policies are passed into the callback as nil values.
608
//
609
// NOTE: this is part of the graphdb.NodeTraverser interface.
610
func (c *KVStore) ForEachNodeDirectedChannel(nodePub route.Vertex,
611
        cb func(channel *DirectedChannel) error) error {
3✔
612

3✔
613
        return c.forEachNodeDirectedChannel(nil, nodePub, cb)
3✔
614
}
3✔
615

616
// FetchNodeFeatures returns the features of the given node. If no features are
617
// known for the node, an empty feature vector is returned.
618
//
619
// NOTE: this is part of the graphdb.NodeTraverser interface.
620
func (c *KVStore) FetchNodeFeatures(nodePub route.Vertex) (
621
        *lnwire.FeatureVector, error) {
3✔
622

3✔
623
        return c.fetchNodeFeatures(nil, nodePub)
3✔
624
}
3✔
625

626
// ForEachNodeCached is similar to forEachNode, but it returns DirectedChannel
627
// data to the call-back.
628
//
629
// NOTE: The callback contents MUST not be modified.
630
func (c *KVStore) ForEachNodeCached(cb func(node route.Vertex,
UNCOV
631
        chans map[uint64]*DirectedChannel) error) error {
×
UNCOV
632

×
UNCOV
633
        // Otherwise call back to a version that uses the database directly.
×
UNCOV
634
        // We'll iterate over each node, then the set of channels for each
×
UNCOV
635
        // node, and construct a similar callback functiopn signature as the
×
UNCOV
636
        // main funcotin expects.
×
UNCOV
637
        return c.forEachNode(func(tx kvdb.RTx,
×
UNCOV
638
                node *models.LightningNode) error {
×
UNCOV
639

×
UNCOV
640
                channels := make(map[uint64]*DirectedChannel)
×
UNCOV
641

×
UNCOV
642
                err := c.forEachNodeChannelTx(tx, node.PubKeyBytes,
×
UNCOV
643
                        func(tx kvdb.RTx, e *models.ChannelEdgeInfo,
×
UNCOV
644
                                p1 *models.ChannelEdgePolicy,
×
UNCOV
645
                                p2 *models.ChannelEdgePolicy) error {
×
UNCOV
646

×
UNCOV
647
                                toNodeCallback := func() route.Vertex {
×
648
                                        return node.PubKeyBytes
×
649
                                }
×
UNCOV
650
                                toNodeFeatures, err := c.fetchNodeFeatures(
×
UNCOV
651
                                        tx, node.PubKeyBytes,
×
UNCOV
652
                                )
×
UNCOV
653
                                if err != nil {
×
654
                                        return err
×
655
                                }
×
656

UNCOV
657
                                var cachedInPolicy *models.CachedEdgePolicy
×
UNCOV
658
                                if p2 != nil {
×
UNCOV
659
                                        cachedInPolicy =
×
UNCOV
660
                                                models.NewCachedPolicy(p2)
×
UNCOV
661
                                        cachedInPolicy.ToNodePubKey =
×
UNCOV
662
                                                toNodeCallback
×
UNCOV
663
                                        cachedInPolicy.ToNodeFeatures =
×
UNCOV
664
                                                toNodeFeatures
×
UNCOV
665
                                }
×
666

UNCOV
667
                                directedChannel := &DirectedChannel{
×
UNCOV
668
                                        ChannelID: e.ChannelID,
×
UNCOV
669
                                        IsNode1: node.PubKeyBytes ==
×
UNCOV
670
                                                e.NodeKey1Bytes,
×
UNCOV
671
                                        OtherNode:    e.NodeKey2Bytes,
×
UNCOV
672
                                        Capacity:     e.Capacity,
×
UNCOV
673
                                        OutPolicySet: p1 != nil,
×
UNCOV
674
                                        InPolicy:     cachedInPolicy,
×
UNCOV
675
                                }
×
UNCOV
676

×
UNCOV
677
                                if node.PubKeyBytes == e.NodeKey2Bytes {
×
UNCOV
678
                                        directedChannel.OtherNode =
×
UNCOV
679
                                                e.NodeKey1Bytes
×
UNCOV
680
                                }
×
681

UNCOV
682
                                channels[e.ChannelID] = directedChannel
×
UNCOV
683

×
UNCOV
684
                                return nil
×
685
                        })
UNCOV
686
                if err != nil {
×
687
                        return err
×
688
                }
×
689

UNCOV
690
                return cb(node.PubKeyBytes, channels)
×
691
        })
692
}
693

694
// DisabledChannelIDs returns the channel ids of disabled channels.
695
// A channel is disabled when two of the associated ChanelEdgePolicies
696
// have their disabled bit on.
UNCOV
697
func (c *KVStore) DisabledChannelIDs() ([]uint64, error) {
×
UNCOV
698
        var disabledChanIDs []uint64
×
UNCOV
699
        var chanEdgeFound map[uint64]struct{}
×
UNCOV
700

×
UNCOV
701
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
×
UNCOV
702
                edges := tx.ReadBucket(edgeBucket)
×
UNCOV
703
                if edges == nil {
×
704
                        return ErrGraphNoEdgesFound
×
705
                }
×
706

UNCOV
707
                disabledEdgePolicyIndex := edges.NestedReadBucket(
×
UNCOV
708
                        disabledEdgePolicyBucket,
×
UNCOV
709
                )
×
UNCOV
710
                if disabledEdgePolicyIndex == nil {
×
UNCOV
711
                        return nil
×
UNCOV
712
                }
×
713

714
                // We iterate over all disabled policies and we add each channel
715
                // that has more than one disabled policy to disabledChanIDs
716
                // array.
UNCOV
717
                return disabledEdgePolicyIndex.ForEach(
×
UNCOV
718
                        func(k, v []byte) error {
×
UNCOV
719
                                chanID := byteOrder.Uint64(k[:8])
×
UNCOV
720
                                _, edgeFound := chanEdgeFound[chanID]
×
UNCOV
721
                                if edgeFound {
×
UNCOV
722
                                        delete(chanEdgeFound, chanID)
×
UNCOV
723
                                        disabledChanIDs = append(
×
UNCOV
724
                                                disabledChanIDs, chanID,
×
UNCOV
725
                                        )
×
UNCOV
726

×
UNCOV
727
                                        return nil
×
UNCOV
728
                                }
×
729

UNCOV
730
                                chanEdgeFound[chanID] = struct{}{}
×
UNCOV
731

×
UNCOV
732
                                return nil
×
733
                        },
734
                )
UNCOV
735
        }, func() {
×
UNCOV
736
                disabledChanIDs = nil
×
UNCOV
737
                chanEdgeFound = make(map[uint64]struct{})
×
UNCOV
738
        })
×
UNCOV
739
        if err != nil {
×
740
                return nil, err
×
741
        }
×
742

UNCOV
743
        return disabledChanIDs, nil
×
744
}
745

746
// ForEachNode iterates through all the stored vertices/nodes in the graph,
747
// executing the passed callback with each node encountered. If the callback
748
// returns an error, then the transaction is aborted and the iteration stops
749
// early. Any operations performed on the NodeTx passed to the call-back are
750
// executed under the same read transaction and so, methods on the NodeTx object
751
// _MUST_ only be called from within the call-back.
752
func (c *KVStore) ForEachNode(cb func(tx NodeRTx) error) error {
3✔
753
        return c.forEachNode(func(tx kvdb.RTx,
3✔
754
                node *models.LightningNode) error {
6✔
755

3✔
756
                return cb(newChanGraphNodeTx(tx, c, node))
3✔
757
        })
3✔
758
}
759

760
// forEachNode iterates through all the stored vertices/nodes in the graph,
761
// executing the passed callback with each node encountered. If the callback
762
// returns an error, then the transaction is aborted and the iteration stops
763
// early.
764
//
765
// TODO(roasbeef): add iterator interface to allow for memory efficient graph
766
// traversal when graph gets mega.
767
func (c *KVStore) forEachNode(
768
        cb func(kvdb.RTx, *models.LightningNode) error) error {
3✔
769

3✔
770
        traversal := func(tx kvdb.RTx) error {
6✔
771
                // First grab the nodes bucket which stores the mapping from
3✔
772
                // pubKey to node information.
3✔
773
                nodes := tx.ReadBucket(nodeBucket)
3✔
774
                if nodes == nil {
3✔
775
                        return ErrGraphNotFound
×
776
                }
×
777

778
                return nodes.ForEach(func(pubKey, nodeBytes []byte) error {
6✔
779
                        // If this is the source key, then we skip this
3✔
780
                        // iteration as the value for this key is a pubKey
3✔
781
                        // rather than raw node information.
3✔
782
                        if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 {
6✔
783
                                return nil
3✔
784
                        }
3✔
785

786
                        nodeReader := bytes.NewReader(nodeBytes)
3✔
787
                        node, err := deserializeLightningNode(nodeReader)
3✔
788
                        if err != nil {
3✔
789
                                return err
×
790
                        }
×
791

792
                        // Execute the callback, the transaction will abort if
793
                        // this returns an error.
794
                        return cb(tx, &node)
3✔
795
                })
796
        }
797

798
        return kvdb.View(c.db, traversal, func() {})
6✔
799
}
800

801
// ForEachNodeCacheable iterates through all the stored vertices/nodes in the
802
// graph, executing the passed callback with each node encountered. If the
803
// callback returns an error, then the transaction is aborted and the iteration
804
// stops early.
805
func (c *KVStore) ForEachNodeCacheable(cb func(route.Vertex,
806
        *lnwire.FeatureVector) error) error {
3✔
807

3✔
808
        traversal := func(tx kvdb.RTx) error {
6✔
809
                // First grab the nodes bucket which stores the mapping from
3✔
810
                // pubKey to node information.
3✔
811
                nodes := tx.ReadBucket(nodeBucket)
3✔
812
                if nodes == nil {
3✔
813
                        return ErrGraphNotFound
×
814
                }
×
815

816
                return nodes.ForEach(func(pubKey, nodeBytes []byte) error {
6✔
817
                        // If this is the source key, then we skip this
3✔
818
                        // iteration as the value for this key is a pubKey
3✔
819
                        // rather than raw node information.
3✔
820
                        if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 {
6✔
821
                                return nil
3✔
822
                        }
3✔
823

824
                        nodeReader := bytes.NewReader(nodeBytes)
3✔
825
                        node, features, err := deserializeLightningNodeCacheable( //nolint:ll
3✔
826
                                nodeReader,
3✔
827
                        )
3✔
828
                        if err != nil {
3✔
829
                                return err
×
830
                        }
×
831

832
                        // Execute the callback, the transaction will abort if
833
                        // this returns an error.
834
                        return cb(node, features)
3✔
835
                })
836
        }
837

838
        return kvdb.View(c.db, traversal, func() {})
6✔
839
}
840

841
// SourceNode returns the source node of the graph. The source node is treated
842
// as the center node within a star-graph. This method may be used to kick off
843
// a path finding algorithm in order to explore the reachability of another
844
// node based off the source node.
845
func (c *KVStore) SourceNode() (*models.LightningNode, error) {
3✔
846
        var source *models.LightningNode
3✔
847
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
848
                // First grab the nodes bucket which stores the mapping from
3✔
849
                // pubKey to node information.
3✔
850
                nodes := tx.ReadBucket(nodeBucket)
3✔
851
                if nodes == nil {
3✔
852
                        return ErrGraphNotFound
×
853
                }
×
854

855
                node, err := c.sourceNode(nodes)
3✔
856
                if err != nil {
3✔
UNCOV
857
                        return err
×
UNCOV
858
                }
×
859
                source = node
3✔
860

3✔
861
                return nil
3✔
862
        }, func() {
3✔
863
                source = nil
3✔
864
        })
3✔
865
        if err != nil {
3✔
UNCOV
866
                return nil, err
×
UNCOV
867
        }
×
868

869
        return source, nil
3✔
870
}
871

872
// sourceNode uses an existing database transaction and returns the source node
873
// of the graph. The source node is treated as the center node within a
874
// star-graph. This method may be used to kick off a path finding algorithm in
875
// order to explore the reachability of another node based off the source node.
876
func (c *KVStore) sourceNode(nodes kvdb.RBucket) (*models.LightningNode,
877
        error) {
3✔
878

3✔
879
        selfPub := nodes.Get(sourceKey)
3✔
880
        if selfPub == nil {
3✔
UNCOV
881
                return nil, ErrSourceNodeNotSet
×
UNCOV
882
        }
×
883

884
        // With the pubKey of the source node retrieved, we're able to
885
        // fetch the full node information.
886
        node, err := fetchLightningNode(nodes, selfPub)
3✔
887
        if err != nil {
3✔
888
                return nil, err
×
889
        }
×
890

891
        return &node, nil
3✔
892
}
893

894
// SetSourceNode sets the source node within the graph database. The source
895
// node is to be used as the center of a star-graph within path finding
896
// algorithms.
897
func (c *KVStore) SetSourceNode(node *models.LightningNode) error {
3✔
898
        nodePubBytes := node.PubKeyBytes[:]
3✔
899

3✔
900
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
6✔
901
                // First grab the nodes bucket which stores the mapping from
3✔
902
                // pubKey to node information.
3✔
903
                nodes, err := tx.CreateTopLevelBucket(nodeBucket)
3✔
904
                if err != nil {
3✔
905
                        return err
×
906
                }
×
907

908
                // Next we create the mapping from source to the targeted
909
                // public key.
910
                if err := nodes.Put(sourceKey, nodePubBytes); err != nil {
3✔
911
                        return err
×
912
                }
×
913

914
                // Finally, we commit the information of the lightning node
915
                // itself.
916
                return addLightningNode(tx, node)
3✔
917
        }, func() {})
3✔
918
}
919

920
// AddLightningNode adds a vertex/node to the graph database. If the node is not
921
// in the database from before, this will add a new, unconnected one to the
922
// graph. If it is present from before, this will update that node's
923
// information. Note that this method is expected to only be called to update an
924
// already present node from a node announcement, or to insert a node found in a
925
// channel update.
926
//
927
// TODO(roasbeef): also need sig of announcement.
928
func (c *KVStore) AddLightningNode(node *models.LightningNode,
929
        opts ...batch.SchedulerOption) error {
3✔
930

3✔
931
        ctx := context.TODO()
3✔
932

3✔
933
        r := &batch.Request[kvdb.RwTx]{
3✔
934
                Opts: batch.NewSchedulerOptions(opts...),
3✔
935
                Do: func(tx kvdb.RwTx) error {
6✔
936
                        return addLightningNode(tx, node)
3✔
937
                },
3✔
938
        }
939

940
        return c.nodeScheduler.Execute(ctx, r)
3✔
941
}
942

943
func addLightningNode(tx kvdb.RwTx, node *models.LightningNode) error {
3✔
944
        nodes, err := tx.CreateTopLevelBucket(nodeBucket)
3✔
945
        if err != nil {
3✔
946
                return err
×
947
        }
×
948

949
        aliases, err := nodes.CreateBucketIfNotExists(aliasIndexBucket)
3✔
950
        if err != nil {
3✔
951
                return err
×
952
        }
×
953

954
        updateIndex, err := nodes.CreateBucketIfNotExists(
3✔
955
                nodeUpdateIndexBucket,
3✔
956
        )
3✔
957
        if err != nil {
3✔
958
                return err
×
959
        }
×
960

961
        return putLightningNode(nodes, aliases, updateIndex, node)
3✔
962
}
963

964
// LookupAlias attempts to return the alias as advertised by the target node.
965
// TODO(roasbeef): currently assumes that aliases are unique...
966
func (c *KVStore) LookupAlias(pub *btcec.PublicKey) (string, error) {
3✔
967
        var alias string
3✔
968

3✔
969
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
970
                nodes := tx.ReadBucket(nodeBucket)
3✔
971
                if nodes == nil {
3✔
972
                        return ErrGraphNodesNotFound
×
973
                }
×
974

975
                aliases := nodes.NestedReadBucket(aliasIndexBucket)
3✔
976
                if aliases == nil {
3✔
977
                        return ErrGraphNodesNotFound
×
978
                }
×
979

980
                nodePub := pub.SerializeCompressed()
3✔
981
                a := aliases.Get(nodePub)
3✔
982
                if a == nil {
3✔
UNCOV
983
                        return ErrNodeAliasNotFound
×
UNCOV
984
                }
×
985

986
                // TODO(roasbeef): should actually be using the utf-8
987
                // package...
988
                alias = string(a)
3✔
989

3✔
990
                return nil
3✔
991
        }, func() {
3✔
992
                alias = ""
3✔
993
        })
3✔
994
        if err != nil {
3✔
UNCOV
995
                return "", err
×
UNCOV
996
        }
×
997

998
        return alias, nil
3✔
999
}
1000

1001
// DeleteLightningNode starts a new database transaction to remove a vertex/node
1002
// from the database according to the node's public key.
UNCOV
1003
func (c *KVStore) DeleteLightningNode(nodePub route.Vertex) error {
×
UNCOV
1004
        // TODO(roasbeef): ensure dangling edges are removed...
×
UNCOV
1005
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
×
UNCOV
1006
                nodes := tx.ReadWriteBucket(nodeBucket)
×
UNCOV
1007
                if nodes == nil {
×
1008
                        return ErrGraphNodeNotFound
×
1009
                }
×
1010

UNCOV
1011
                return c.deleteLightningNode(nodes, nodePub[:])
×
UNCOV
1012
        }, func() {})
×
1013
}
1014

1015
// deleteLightningNode uses an existing database transaction to remove a
1016
// vertex/node from the database according to the node's public key.
1017
func (c *KVStore) deleteLightningNode(nodes kvdb.RwBucket,
1018
        compressedPubKey []byte) error {
3✔
1019

3✔
1020
        aliases := nodes.NestedReadWriteBucket(aliasIndexBucket)
3✔
1021
        if aliases == nil {
3✔
1022
                return ErrGraphNodesNotFound
×
1023
        }
×
1024

1025
        if err := aliases.Delete(compressedPubKey); err != nil {
3✔
1026
                return err
×
1027
        }
×
1028

1029
        // Before we delete the node, we'll fetch its current state so we can
1030
        // determine when its last update was to clear out the node update
1031
        // index.
1032
        node, err := fetchLightningNode(nodes, compressedPubKey)
3✔
1033
        if err != nil {
3✔
UNCOV
1034
                return err
×
UNCOV
1035
        }
×
1036

1037
        if err := nodes.Delete(compressedPubKey); err != nil {
3✔
1038
                return err
×
1039
        }
×
1040

1041
        // Finally, we'll delete the index entry for the node within the
1042
        // nodeUpdateIndexBucket as this node is no longer active, so we don't
1043
        // need to track its last update.
1044
        nodeUpdateIndex := nodes.NestedReadWriteBucket(nodeUpdateIndexBucket)
3✔
1045
        if nodeUpdateIndex == nil {
3✔
1046
                return ErrGraphNodesNotFound
×
1047
        }
×
1048

1049
        // In order to delete the entry, we'll need to reconstruct the key for
1050
        // its last update.
1051
        updateUnix := uint64(node.LastUpdate.Unix())
3✔
1052
        var indexKey [8 + 33]byte
3✔
1053
        byteOrder.PutUint64(indexKey[:8], updateUnix)
3✔
1054
        copy(indexKey[8:], compressedPubKey)
3✔
1055

3✔
1056
        return nodeUpdateIndex.Delete(indexKey[:])
3✔
1057
}
1058

1059
// AddChannelEdge adds a new (undirected, blank) edge to the graph database. An
1060
// undirected edge from the two target nodes are created. The information stored
1061
// denotes the static attributes of the channel, such as the channelID, the keys
1062
// involved in creation of the channel, and the set of features that the channel
1063
// supports. The chanPoint and chanID are used to uniquely identify the edge
1064
// globally within the database.
1065
func (c *KVStore) AddChannelEdge(edge *models.ChannelEdgeInfo,
1066
        opts ...batch.SchedulerOption) error {
3✔
1067

3✔
1068
        ctx := context.TODO()
3✔
1069

3✔
1070
        var alreadyExists bool
3✔
1071
        r := &batch.Request[kvdb.RwTx]{
3✔
1072
                Opts: batch.NewSchedulerOptions(opts...),
3✔
1073
                Reset: func() {
6✔
1074
                        alreadyExists = false
3✔
1075
                },
3✔
1076
                Do: func(tx kvdb.RwTx) error {
3✔
1077
                        err := c.addChannelEdge(tx, edge)
3✔
1078

3✔
1079
                        // Silence ErrEdgeAlreadyExist so that the batch can
3✔
1080
                        // succeed, but propagate the error via local state.
3✔
1081
                        if errors.Is(err, ErrEdgeAlreadyExist) {
3✔
UNCOV
1082
                                alreadyExists = true
×
UNCOV
1083
                                return nil
×
UNCOV
1084
                        }
×
1085

1086
                        return err
3✔
1087
                },
1088
                OnCommit: func(err error) error {
3✔
1089
                        switch {
3✔
1090
                        case err != nil:
×
1091
                                return err
×
UNCOV
1092
                        case alreadyExists:
×
UNCOV
1093
                                return ErrEdgeAlreadyExist
×
1094
                        default:
3✔
1095
                                c.rejectCache.remove(edge.ChannelID)
3✔
1096
                                c.chanCache.remove(edge.ChannelID)
3✔
1097
                                return nil
3✔
1098
                        }
1099
                },
1100
        }
1101

1102
        return c.chanScheduler.Execute(ctx, r)
3✔
1103
}
1104

1105
// addChannelEdge is the private form of AddChannelEdge that allows callers to
1106
// utilize an existing db transaction.
1107
func (c *KVStore) addChannelEdge(tx kvdb.RwTx,
1108
        edge *models.ChannelEdgeInfo) error {
3✔
1109

3✔
1110
        // Construct the channel's primary key which is the 8-byte channel ID.
3✔
1111
        var chanKey [8]byte
3✔
1112
        binary.BigEndian.PutUint64(chanKey[:], edge.ChannelID)
3✔
1113

3✔
1114
        nodes, err := tx.CreateTopLevelBucket(nodeBucket)
3✔
1115
        if err != nil {
3✔
1116
                return err
×
1117
        }
×
1118
        edges, err := tx.CreateTopLevelBucket(edgeBucket)
3✔
1119
        if err != nil {
3✔
1120
                return err
×
1121
        }
×
1122
        edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
3✔
1123
        if err != nil {
3✔
1124
                return err
×
1125
        }
×
1126
        chanIndex, err := edges.CreateBucketIfNotExists(channelPointBucket)
3✔
1127
        if err != nil {
3✔
1128
                return err
×
1129
        }
×
1130

1131
        // First, attempt to check if this edge has already been created. If
1132
        // so, then we can exit early as this method is meant to be idempotent.
1133
        if edgeInfo := edgeIndex.Get(chanKey[:]); edgeInfo != nil {
3✔
UNCOV
1134
                return ErrEdgeAlreadyExist
×
UNCOV
1135
        }
×
1136

1137
        // Before we insert the channel into the database, we'll ensure that
1138
        // both nodes already exist in the channel graph. If either node
1139
        // doesn't, then we'll insert a "shell" node that just includes its
1140
        // public key, so subsequent validation and queries can work properly.
1141
        _, node1Err := fetchLightningNode(nodes, edge.NodeKey1Bytes[:])
3✔
1142
        switch {
3✔
1143
        case errors.Is(node1Err, ErrGraphNodeNotFound):
3✔
1144
                node1Shell := models.LightningNode{
3✔
1145
                        PubKeyBytes:          edge.NodeKey1Bytes,
3✔
1146
                        HaveNodeAnnouncement: false,
3✔
1147
                }
3✔
1148
                err := addLightningNode(tx, &node1Shell)
3✔
1149
                if err != nil {
3✔
1150
                        return fmt.Errorf("unable to create shell node "+
×
1151
                                "for: %x: %w", edge.NodeKey1Bytes, err)
×
1152
                }
×
1153
        case node1Err != nil:
×
1154
                return node1Err
×
1155
        }
1156

1157
        _, node2Err := fetchLightningNode(nodes, edge.NodeKey2Bytes[:])
3✔
1158
        switch {
3✔
1159
        case errors.Is(node2Err, ErrGraphNodeNotFound):
3✔
1160
                node2Shell := models.LightningNode{
3✔
1161
                        PubKeyBytes:          edge.NodeKey2Bytes,
3✔
1162
                        HaveNodeAnnouncement: false,
3✔
1163
                }
3✔
1164
                err := addLightningNode(tx, &node2Shell)
3✔
1165
                if err != nil {
3✔
1166
                        return fmt.Errorf("unable to create shell node "+
×
1167
                                "for: %x: %w", edge.NodeKey2Bytes, err)
×
1168
                }
×
1169
        case node2Err != nil:
×
1170
                return node2Err
×
1171
        }
1172

1173
        // If the edge hasn't been created yet, then we'll first add it to the
1174
        // edge index in order to associate the edge between two nodes and also
1175
        // store the static components of the channel.
1176
        if err := putChanEdgeInfo(edgeIndex, edge, chanKey); err != nil {
3✔
1177
                return err
×
1178
        }
×
1179

1180
        // Mark edge policies for both sides as unknown. This is to enable
1181
        // efficient incoming channel lookup for a node.
1182
        keys := []*[33]byte{
3✔
1183
                &edge.NodeKey1Bytes,
3✔
1184
                &edge.NodeKey2Bytes,
3✔
1185
        }
3✔
1186
        for _, key := range keys {
6✔
1187
                err := putChanEdgePolicyUnknown(edges, edge.ChannelID, key[:])
3✔
1188
                if err != nil {
3✔
1189
                        return err
×
1190
                }
×
1191
        }
1192

1193
        // Finally we add it to the channel index which maps channel points
1194
        // (outpoints) to the shorter channel ID's.
1195
        var b bytes.Buffer
3✔
1196
        if err := WriteOutpoint(&b, &edge.ChannelPoint); err != nil {
3✔
1197
                return err
×
1198
        }
×
1199

1200
        return chanIndex.Put(b.Bytes(), chanKey[:])
3✔
1201
}
1202

1203
// HasChannelEdge returns true if the database knows of a channel edge with the
1204
// passed channel ID, and false otherwise. If an edge with that ID is found
1205
// within the graph, then two time stamps representing the last time the edge
1206
// was updated for both directed edges are returned along with the boolean. If
1207
// it is not found, then the zombie index is checked and its result is returned
1208
// as the second boolean.
1209
func (c *KVStore) HasChannelEdge(
1210
        chanID uint64) (time.Time, time.Time, bool, bool, error) {
3✔
1211

3✔
1212
        var (
3✔
1213
                upd1Time time.Time
3✔
1214
                upd2Time time.Time
3✔
1215
                exists   bool
3✔
1216
                isZombie bool
3✔
1217
        )
3✔
1218

3✔
1219
        // We'll query the cache with the shared lock held to allow multiple
3✔
1220
        // readers to access values in the cache concurrently if they exist.
3✔
1221
        c.cacheMu.RLock()
3✔
1222
        if entry, ok := c.rejectCache.get(chanID); ok {
6✔
1223
                c.cacheMu.RUnlock()
3✔
1224
                upd1Time = time.Unix(entry.upd1Time, 0)
3✔
1225
                upd2Time = time.Unix(entry.upd2Time, 0)
3✔
1226
                exists, isZombie = entry.flags.unpack()
3✔
1227

3✔
1228
                return upd1Time, upd2Time, exists, isZombie, nil
3✔
1229
        }
3✔
1230
        c.cacheMu.RUnlock()
3✔
1231

3✔
1232
        c.cacheMu.Lock()
3✔
1233
        defer c.cacheMu.Unlock()
3✔
1234

3✔
1235
        // The item was not found with the shared lock, so we'll acquire the
3✔
1236
        // exclusive lock and check the cache again in case another method added
3✔
1237
        // the entry to the cache while no lock was held.
3✔
1238
        if entry, ok := c.rejectCache.get(chanID); ok {
4✔
1239
                upd1Time = time.Unix(entry.upd1Time, 0)
1✔
1240
                upd2Time = time.Unix(entry.upd2Time, 0)
1✔
1241
                exists, isZombie = entry.flags.unpack()
1✔
1242

1✔
1243
                return upd1Time, upd2Time, exists, isZombie, nil
1✔
1244
        }
1✔
1245

1246
        if err := kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
1247
                edges := tx.ReadBucket(edgeBucket)
3✔
1248
                if edges == nil {
3✔
1249
                        return ErrGraphNoEdgesFound
×
1250
                }
×
1251
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
3✔
1252
                if edgeIndex == nil {
3✔
1253
                        return ErrGraphNoEdgesFound
×
1254
                }
×
1255

1256
                var channelID [8]byte
3✔
1257
                byteOrder.PutUint64(channelID[:], chanID)
3✔
1258

3✔
1259
                // If the edge doesn't exist, then we'll also check our zombie
3✔
1260
                // index.
3✔
1261
                if edgeIndex.Get(channelID[:]) == nil {
6✔
1262
                        exists = false
3✔
1263
                        zombieIndex := edges.NestedReadBucket(zombieBucket)
3✔
1264
                        if zombieIndex != nil {
6✔
1265
                                isZombie, _, _ = isZombieEdge(
3✔
1266
                                        zombieIndex, chanID,
3✔
1267
                                )
3✔
1268
                        }
3✔
1269

1270
                        return nil
3✔
1271
                }
1272

1273
                exists = true
3✔
1274
                isZombie = false
3✔
1275

3✔
1276
                // If the channel has been found in the graph, then retrieve
3✔
1277
                // the edges itself so we can return the last updated
3✔
1278
                // timestamps.
3✔
1279
                nodes := tx.ReadBucket(nodeBucket)
3✔
1280
                if nodes == nil {
3✔
1281
                        return ErrGraphNodeNotFound
×
1282
                }
×
1283

1284
                e1, e2, err := fetchChanEdgePolicies(
3✔
1285
                        edgeIndex, edges, channelID[:],
3✔
1286
                )
3✔
1287
                if err != nil {
3✔
1288
                        return err
×
1289
                }
×
1290

1291
                // As we may have only one of the edges populated, only set the
1292
                // update time if the edge was found in the database.
1293
                if e1 != nil {
6✔
1294
                        upd1Time = e1.LastUpdate
3✔
1295
                }
3✔
1296
                if e2 != nil {
6✔
1297
                        upd2Time = e2.LastUpdate
3✔
1298
                }
3✔
1299

1300
                return nil
3✔
1301
        }, func() {}); err != nil {
3✔
1302
                return time.Time{}, time.Time{}, exists, isZombie, err
×
1303
        }
×
1304

1305
        c.rejectCache.insert(chanID, rejectCacheEntry{
3✔
1306
                upd1Time: upd1Time.Unix(),
3✔
1307
                upd2Time: upd2Time.Unix(),
3✔
1308
                flags:    packRejectFlags(exists, isZombie),
3✔
1309
        })
3✔
1310

3✔
1311
        return upd1Time, upd2Time, exists, isZombie, nil
3✔
1312
}
1313

1314
// AddEdgeProof sets the proof of an existing edge in the graph database.
1315
func (c *KVStore) AddEdgeProof(chanID lnwire.ShortChannelID,
1316
        proof *models.ChannelAuthProof) error {
3✔
1317

3✔
1318
        // Construct the channel's primary key which is the 8-byte channel ID.
3✔
1319
        var chanKey [8]byte
3✔
1320
        binary.BigEndian.PutUint64(chanKey[:], chanID.ToUint64())
3✔
1321

3✔
1322
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
6✔
1323
                edges := tx.ReadWriteBucket(edgeBucket)
3✔
1324
                if edges == nil {
3✔
1325
                        return ErrEdgeNotFound
×
1326
                }
×
1327

1328
                edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
3✔
1329
                if edgeIndex == nil {
3✔
1330
                        return ErrEdgeNotFound
×
1331
                }
×
1332

1333
                edge, err := fetchChanEdgeInfo(edgeIndex, chanKey[:])
3✔
1334
                if err != nil {
3✔
1335
                        return err
×
1336
                }
×
1337

1338
                edge.AuthProof = proof
3✔
1339

3✔
1340
                return putChanEdgeInfo(edgeIndex, &edge, chanKey)
3✔
1341
        }, func() {})
3✔
1342
}
1343

1344
const (
1345
        // pruneTipBytes is the total size of the value which stores a prune
1346
        // entry of the graph in the prune log. The "prune tip" is the last
1347
        // entry in the prune log, and indicates if the channel graph is in
1348
        // sync with the current UTXO state. The structure of the value
1349
        // is: blockHash, taking 32 bytes total.
1350
        pruneTipBytes = 32
1351
)
1352

1353
// PruneGraph prunes newly closed channels from the channel graph in response
1354
// to a new block being solved on the network. Any transactions which spend the
1355
// funding output of any known channels within he graph will be deleted.
1356
// Additionally, the "prune tip", or the last block which has been used to
1357
// prune the graph is stored so callers can ensure the graph is fully in sync
1358
// with the current UTXO state. A slice of channels that have been closed by
1359
// the target block along with any pruned nodes are returned if the function
1360
// succeeds without error.
1361
func (c *KVStore) PruneGraph(spentOutputs []*wire.OutPoint,
1362
        blockHash *chainhash.Hash, blockHeight uint32) (
1363
        []*models.ChannelEdgeInfo, []route.Vertex, error) {
3✔
1364

3✔
1365
        c.cacheMu.Lock()
3✔
1366
        defer c.cacheMu.Unlock()
3✔
1367

3✔
1368
        var (
3✔
1369
                chansClosed []*models.ChannelEdgeInfo
3✔
1370
                prunedNodes []route.Vertex
3✔
1371
        )
3✔
1372

3✔
1373
        err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
6✔
1374
                // First grab the edges bucket which houses the information
3✔
1375
                // we'd like to delete
3✔
1376
                edges, err := tx.CreateTopLevelBucket(edgeBucket)
3✔
1377
                if err != nil {
3✔
1378
                        return err
×
1379
                }
×
1380

1381
                // Next grab the two edge indexes which will also need to be
1382
                // updated.
1383
                edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
3✔
1384
                if err != nil {
3✔
1385
                        return err
×
1386
                }
×
1387
                chanIndex, err := edges.CreateBucketIfNotExists(
3✔
1388
                        channelPointBucket,
3✔
1389
                )
3✔
1390
                if err != nil {
3✔
1391
                        return err
×
1392
                }
×
1393
                nodes := tx.ReadWriteBucket(nodeBucket)
3✔
1394
                if nodes == nil {
3✔
1395
                        return ErrSourceNodeNotSet
×
1396
                }
×
1397
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
3✔
1398
                if err != nil {
3✔
1399
                        return err
×
1400
                }
×
1401

1402
                // For each of the outpoints that have been spent within the
1403
                // block, we attempt to delete them from the graph as if that
1404
                // outpoint was a channel, then it has now been closed.
1405
                for _, chanPoint := range spentOutputs {
6✔
1406
                        // TODO(roasbeef): load channel bloom filter, continue
3✔
1407
                        // if NOT if filter
3✔
1408

3✔
1409
                        var opBytes bytes.Buffer
3✔
1410
                        err := WriteOutpoint(&opBytes, chanPoint)
3✔
1411
                        if err != nil {
3✔
1412
                                return err
×
1413
                        }
×
1414

1415
                        // First attempt to see if the channel exists within
1416
                        // the database, if not, then we can exit early.
1417
                        chanID := chanIndex.Get(opBytes.Bytes())
3✔
1418
                        if chanID == nil {
3✔
UNCOV
1419
                                continue
×
1420
                        }
1421

1422
                        // Attempt to delete the channel, an ErrEdgeNotFound
1423
                        // will be returned if that outpoint isn't known to be
1424
                        // a channel. If no error is returned, then a channel
1425
                        // was successfully pruned.
1426
                        edgeInfo, err := c.delChannelEdgeUnsafe(
3✔
1427
                                edges, edgeIndex, chanIndex, zombieIndex,
3✔
1428
                                chanID, false, false,
3✔
1429
                        )
3✔
1430
                        if err != nil && !errors.Is(err, ErrEdgeNotFound) {
3✔
1431
                                return err
×
1432
                        }
×
1433

1434
                        chansClosed = append(chansClosed, edgeInfo)
3✔
1435
                }
1436

1437
                metaBucket, err := tx.CreateTopLevelBucket(graphMetaBucket)
3✔
1438
                if err != nil {
3✔
1439
                        return err
×
1440
                }
×
1441

1442
                pruneBucket, err := metaBucket.CreateBucketIfNotExists(
3✔
1443
                        pruneLogBucket,
3✔
1444
                )
3✔
1445
                if err != nil {
3✔
1446
                        return err
×
1447
                }
×
1448

1449
                // With the graph pruned, add a new entry to the prune log,
1450
                // which can be used to check if the graph is fully synced with
1451
                // the current UTXO state.
1452
                var blockHeightBytes [4]byte
3✔
1453
                byteOrder.PutUint32(blockHeightBytes[:], blockHeight)
3✔
1454

3✔
1455
                var newTip [pruneTipBytes]byte
3✔
1456
                copy(newTip[:], blockHash[:])
3✔
1457

3✔
1458
                err = pruneBucket.Put(blockHeightBytes[:], newTip[:])
3✔
1459
                if err != nil {
3✔
1460
                        return err
×
1461
                }
×
1462

1463
                // Now that the graph has been pruned, we'll also attempt to
1464
                // prune any nodes that have had a channel closed within the
1465
                // latest block.
1466
                prunedNodes, err = c.pruneGraphNodes(nodes, edgeIndex)
3✔
1467

3✔
1468
                return err
3✔
1469
        }, func() {
3✔
1470
                chansClosed = nil
3✔
1471
                prunedNodes = nil
3✔
1472
        })
3✔
1473
        if err != nil {
3✔
1474
                return nil, nil, err
×
1475
        }
×
1476

1477
        for _, channel := range chansClosed {
6✔
1478
                c.rejectCache.remove(channel.ChannelID)
3✔
1479
                c.chanCache.remove(channel.ChannelID)
3✔
1480
        }
3✔
1481

1482
        return chansClosed, prunedNodes, nil
3✔
1483
}
1484

1485
// PruneGraphNodes is a garbage collection method which attempts to prune out
1486
// any nodes from the channel graph that are currently unconnected. This ensure
1487
// that we only maintain a graph of reachable nodes. In the event that a pruned
1488
// node gains more channels, it will be re-added back to the graph.
1489
func (c *KVStore) PruneGraphNodes() ([]route.Vertex, error) {
3✔
1490
        var prunedNodes []route.Vertex
3✔
1491
        err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
6✔
1492
                nodes := tx.ReadWriteBucket(nodeBucket)
3✔
1493
                if nodes == nil {
3✔
1494
                        return ErrGraphNodesNotFound
×
1495
                }
×
1496
                edges := tx.ReadWriteBucket(edgeBucket)
3✔
1497
                if edges == nil {
3✔
1498
                        return ErrGraphNotFound
×
1499
                }
×
1500
                edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
3✔
1501
                if edgeIndex == nil {
3✔
1502
                        return ErrGraphNoEdgesFound
×
1503
                }
×
1504

1505
                var err error
3✔
1506
                prunedNodes, err = c.pruneGraphNodes(nodes, edgeIndex)
3✔
1507
                if err != nil {
3✔
1508
                        return err
×
1509
                }
×
1510

1511
                return nil
3✔
1512
        }, func() {
3✔
1513
                prunedNodes = nil
3✔
1514
        })
3✔
1515

1516
        return prunedNodes, err
3✔
1517
}
1518

1519
// pruneGraphNodes attempts to remove any nodes from the graph who have had a
1520
// channel closed within the current block. If the node still has existing
1521
// channels in the graph, this will act as a no-op.
1522
func (c *KVStore) pruneGraphNodes(nodes kvdb.RwBucket,
1523
        edgeIndex kvdb.RwBucket) ([]route.Vertex, error) {
3✔
1524

3✔
1525
        log.Trace("Pruning nodes from graph with no open channels")
3✔
1526

3✔
1527
        // We'll retrieve the graph's source node to ensure we don't remove it
3✔
1528
        // even if it no longer has any open channels.
3✔
1529
        sourceNode, err := c.sourceNode(nodes)
3✔
1530
        if err != nil {
3✔
1531
                return nil, err
×
1532
        }
×
1533

1534
        // We'll use this map to keep count the number of references to a node
1535
        // in the graph. A node should only be removed once it has no more
1536
        // references in the graph.
1537
        nodeRefCounts := make(map[[33]byte]int)
3✔
1538
        err = nodes.ForEach(func(pubKey, nodeBytes []byte) error {
6✔
1539
                // If this is the source key, then we skip this
3✔
1540
                // iteration as the value for this key is a pubKey
3✔
1541
                // rather than raw node information.
3✔
1542
                if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 {
6✔
1543
                        return nil
3✔
1544
                }
3✔
1545

1546
                var nodePub [33]byte
3✔
1547
                copy(nodePub[:], pubKey)
3✔
1548
                nodeRefCounts[nodePub] = 0
3✔
1549

3✔
1550
                return nil
3✔
1551
        })
1552
        if err != nil {
3✔
1553
                return nil, err
×
1554
        }
×
1555

1556
        // To ensure we never delete the source node, we'll start off by
1557
        // bumping its ref count to 1.
1558
        nodeRefCounts[sourceNode.PubKeyBytes] = 1
3✔
1559

3✔
1560
        // Next, we'll run through the edgeIndex which maps a channel ID to the
3✔
1561
        // edge info. We'll use this scan to populate our reference count map
3✔
1562
        // above.
3✔
1563
        err = edgeIndex.ForEach(func(chanID, edgeInfoBytes []byte) error {
6✔
1564
                // The first 66 bytes of the edge info contain the pubkeys of
3✔
1565
                // the nodes that this edge attaches. We'll extract them, and
3✔
1566
                // add them to the ref count map.
3✔
1567
                var node1, node2 [33]byte
3✔
1568
                copy(node1[:], edgeInfoBytes[:33])
3✔
1569
                copy(node2[:], edgeInfoBytes[33:])
3✔
1570

3✔
1571
                // With the nodes extracted, we'll increase the ref count of
3✔
1572
                // each of the nodes.
3✔
1573
                nodeRefCounts[node1]++
3✔
1574
                nodeRefCounts[node2]++
3✔
1575

3✔
1576
                return nil
3✔
1577
        })
3✔
1578
        if err != nil {
3✔
1579
                return nil, err
×
1580
        }
×
1581

1582
        // Finally, we'll make a second pass over the set of nodes, and delete
1583
        // any nodes that have a ref count of zero.
1584
        var pruned []route.Vertex
3✔
1585
        for nodePubKey, refCount := range nodeRefCounts {
6✔
1586
                // If the ref count of the node isn't zero, then we can safely
3✔
1587
                // skip it as it still has edges to or from it within the
3✔
1588
                // graph.
3✔
1589
                if refCount != 0 {
6✔
1590
                        continue
3✔
1591
                }
1592

1593
                // If we reach this point, then there are no longer any edges
1594
                // that connect this node, so we can delete it.
1595
                err := c.deleteLightningNode(nodes, nodePubKey[:])
3✔
1596
                if err != nil {
3✔
1597
                        if errors.Is(err, ErrGraphNodeNotFound) ||
×
1598
                                errors.Is(err, ErrGraphNodesNotFound) {
×
1599

×
1600
                                log.Warnf("Unable to prune node %x from the "+
×
1601
                                        "graph: %v", nodePubKey, err)
×
1602
                                continue
×
1603
                        }
1604

1605
                        return nil, err
×
1606
                }
1607

1608
                log.Infof("Pruned unconnected node %x from channel graph",
3✔
1609
                        nodePubKey[:])
3✔
1610

3✔
1611
                pruned = append(pruned, nodePubKey)
3✔
1612
        }
1613

1614
        if len(pruned) > 0 {
6✔
1615
                log.Infof("Pruned %v unconnected nodes from the channel graph",
3✔
1616
                        len(pruned))
3✔
1617
        }
3✔
1618

1619
        return pruned, err
3✔
1620
}
1621

1622
// DisconnectBlockAtHeight is used to indicate that the block specified
1623
// by the passed height has been disconnected from the main chain. This
1624
// will "rewind" the graph back to the height below, deleting channels
1625
// that are no longer confirmed from the graph. The prune log will be
1626
// set to the last prune height valid for the remaining chain.
1627
// Channels that were removed from the graph resulting from the
1628
// disconnected block are returned.
1629
func (c *KVStore) DisconnectBlockAtHeight(height uint32) (
1630
        []*models.ChannelEdgeInfo, error) {
2✔
1631

2✔
1632
        // Every channel having a ShortChannelID starting at 'height'
2✔
1633
        // will no longer be confirmed.
2✔
1634
        startShortChanID := lnwire.ShortChannelID{
2✔
1635
                BlockHeight: height,
2✔
1636
        }
2✔
1637

2✔
1638
        // Delete everything after this height from the db up until the
2✔
1639
        // SCID alias range.
2✔
1640
        endShortChanID := aliasmgr.StartingAlias
2✔
1641

2✔
1642
        // The block height will be the 3 first bytes of the channel IDs.
2✔
1643
        var chanIDStart [8]byte
2✔
1644
        byteOrder.PutUint64(chanIDStart[:], startShortChanID.ToUint64())
2✔
1645
        var chanIDEnd [8]byte
2✔
1646
        byteOrder.PutUint64(chanIDEnd[:], endShortChanID.ToUint64())
2✔
1647

2✔
1648
        c.cacheMu.Lock()
2✔
1649
        defer c.cacheMu.Unlock()
2✔
1650

2✔
1651
        // Keep track of the channels that are removed from the graph.
2✔
1652
        var removedChans []*models.ChannelEdgeInfo
2✔
1653

2✔
1654
        if err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
4✔
1655
                edges, err := tx.CreateTopLevelBucket(edgeBucket)
2✔
1656
                if err != nil {
2✔
1657
                        return err
×
1658
                }
×
1659
                edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
2✔
1660
                if err != nil {
2✔
1661
                        return err
×
1662
                }
×
1663
                chanIndex, err := edges.CreateBucketIfNotExists(
2✔
1664
                        channelPointBucket,
2✔
1665
                )
2✔
1666
                if err != nil {
2✔
1667
                        return err
×
1668
                }
×
1669
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
2✔
1670
                if err != nil {
2✔
1671
                        return err
×
1672
                }
×
1673

1674
                // Scan from chanIDStart to chanIDEnd, deleting every
1675
                // found edge.
1676
                // NOTE: we must delete the edges after the cursor loop, since
1677
                // modifying the bucket while traversing is not safe.
1678
                // NOTE: We use a < comparison in bytes.Compare instead of <=
1679
                // so that the StartingAlias itself isn't deleted.
1680
                var keys [][]byte
2✔
1681
                cursor := edgeIndex.ReadWriteCursor()
2✔
1682

2✔
1683
                //nolint:ll
2✔
1684
                for k, _ := cursor.Seek(chanIDStart[:]); k != nil &&
2✔
1685
                        bytes.Compare(k, chanIDEnd[:]) < 0; k, _ = cursor.Next() {
4✔
1686
                        keys = append(keys, k)
2✔
1687
                }
2✔
1688

1689
                for _, k := range keys {
4✔
1690
                        edgeInfo, err := c.delChannelEdgeUnsafe(
2✔
1691
                                edges, edgeIndex, chanIndex, zombieIndex,
2✔
1692
                                k, false, false,
2✔
1693
                        )
2✔
1694
                        if err != nil && !errors.Is(err, ErrEdgeNotFound) {
2✔
1695
                                return err
×
1696
                        }
×
1697

1698
                        removedChans = append(removedChans, edgeInfo)
2✔
1699
                }
1700

1701
                // Delete all the entries in the prune log having a height
1702
                // greater or equal to the block disconnected.
1703
                metaBucket, err := tx.CreateTopLevelBucket(graphMetaBucket)
2✔
1704
                if err != nil {
2✔
1705
                        return err
×
1706
                }
×
1707

1708
                pruneBucket, err := metaBucket.CreateBucketIfNotExists(
2✔
1709
                        pruneLogBucket,
2✔
1710
                )
2✔
1711
                if err != nil {
2✔
1712
                        return err
×
1713
                }
×
1714

1715
                var pruneKeyStart [4]byte
2✔
1716
                byteOrder.PutUint32(pruneKeyStart[:], height)
2✔
1717

2✔
1718
                var pruneKeyEnd [4]byte
2✔
1719
                byteOrder.PutUint32(pruneKeyEnd[:], math.MaxUint32)
2✔
1720

2✔
1721
                // To avoid modifying the bucket while traversing, we delete
2✔
1722
                // the keys in a second loop.
2✔
1723
                var pruneKeys [][]byte
2✔
1724
                pruneCursor := pruneBucket.ReadWriteCursor()
2✔
1725
                //nolint:ll
2✔
1726
                for k, _ := pruneCursor.Seek(pruneKeyStart[:]); k != nil &&
2✔
1727
                        bytes.Compare(k, pruneKeyEnd[:]) <= 0; k, _ = pruneCursor.Next() {
4✔
1728
                        pruneKeys = append(pruneKeys, k)
2✔
1729
                }
2✔
1730

1731
                for _, k := range pruneKeys {
4✔
1732
                        if err := pruneBucket.Delete(k); err != nil {
2✔
1733
                                return err
×
1734
                        }
×
1735
                }
1736

1737
                return nil
2✔
1738
        }, func() {
2✔
1739
                removedChans = nil
2✔
1740
        }); err != nil {
2✔
1741
                return nil, err
×
1742
        }
×
1743

1744
        for _, channel := range removedChans {
4✔
1745
                c.rejectCache.remove(channel.ChannelID)
2✔
1746
                c.chanCache.remove(channel.ChannelID)
2✔
1747
        }
2✔
1748

1749
        return removedChans, nil
2✔
1750
}
1751

1752
// PruneTip returns the block height and hash of the latest block that has been
1753
// used to prune channels in the graph. Knowing the "prune tip" allows callers
1754
// to tell if the graph is currently in sync with the current best known UTXO
1755
// state.
1756
func (c *KVStore) PruneTip() (*chainhash.Hash, uint32, error) {
3✔
1757
        var (
3✔
1758
                tipHash   chainhash.Hash
3✔
1759
                tipHeight uint32
3✔
1760
        )
3✔
1761

3✔
1762
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
1763
                graphMeta := tx.ReadBucket(graphMetaBucket)
3✔
1764
                if graphMeta == nil {
3✔
1765
                        return ErrGraphNotFound
×
1766
                }
×
1767
                pruneBucket := graphMeta.NestedReadBucket(pruneLogBucket)
3✔
1768
                if pruneBucket == nil {
3✔
1769
                        return ErrGraphNeverPruned
×
1770
                }
×
1771

1772
                pruneCursor := pruneBucket.ReadCursor()
3✔
1773

3✔
1774
                // The prune key with the largest block height will be our
3✔
1775
                // prune tip.
3✔
1776
                k, v := pruneCursor.Last()
3✔
1777
                if k == nil {
6✔
1778
                        return ErrGraphNeverPruned
3✔
1779
                }
3✔
1780

1781
                // Once we have the prune tip, the value will be the block hash,
1782
                // and the key the block height.
1783
                copy(tipHash[:], v)
3✔
1784
                tipHeight = byteOrder.Uint32(k)
3✔
1785

3✔
1786
                return nil
3✔
1787
        }, func() {})
3✔
1788
        if err != nil {
6✔
1789
                return nil, 0, err
3✔
1790
        }
3✔
1791

1792
        return &tipHash, tipHeight, nil
3✔
1793
}
1794

1795
// DeleteChannelEdges removes edges with the given channel IDs from the
1796
// database and marks them as zombies. This ensures that we're unable to re-add
1797
// it to our database once again. If an edge does not exist within the
1798
// database, then ErrEdgeNotFound will be returned. If strictZombiePruning is
1799
// true, then when we mark these edges as zombies, we'll set up the keys such
1800
// that we require the node that failed to send the fresh update to be the one
1801
// that resurrects the channel from its zombie state. The markZombie bool
1802
// denotes whether or not to mark the channel as a zombie.
1803
func (c *KVStore) DeleteChannelEdges(strictZombiePruning, markZombie bool,
1804
        chanIDs ...uint64) ([]*models.ChannelEdgeInfo, error) {
3✔
1805

3✔
1806
        // TODO(roasbeef): possibly delete from node bucket if node has no more
3✔
1807
        // channels
3✔
1808
        // TODO(roasbeef): don't delete both edges?
3✔
1809

3✔
1810
        c.cacheMu.Lock()
3✔
1811
        defer c.cacheMu.Unlock()
3✔
1812

3✔
1813
        var infos []*models.ChannelEdgeInfo
3✔
1814
        err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
6✔
1815
                edges := tx.ReadWriteBucket(edgeBucket)
3✔
1816
                if edges == nil {
3✔
1817
                        return ErrEdgeNotFound
×
1818
                }
×
1819
                edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
3✔
1820
                if edgeIndex == nil {
3✔
1821
                        return ErrEdgeNotFound
×
1822
                }
×
1823
                chanIndex := edges.NestedReadWriteBucket(channelPointBucket)
3✔
1824
                if chanIndex == nil {
3✔
1825
                        return ErrEdgeNotFound
×
1826
                }
×
1827
                nodes := tx.ReadWriteBucket(nodeBucket)
3✔
1828
                if nodes == nil {
3✔
1829
                        return ErrGraphNodeNotFound
×
1830
                }
×
1831
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
3✔
1832
                if err != nil {
3✔
1833
                        return err
×
1834
                }
×
1835

1836
                var rawChanID [8]byte
3✔
1837
                for _, chanID := range chanIDs {
6✔
1838
                        byteOrder.PutUint64(rawChanID[:], chanID)
3✔
1839
                        edgeInfo, err := c.delChannelEdgeUnsafe(
3✔
1840
                                edges, edgeIndex, chanIndex, zombieIndex,
3✔
1841
                                rawChanID[:], markZombie, strictZombiePruning,
3✔
1842
                        )
3✔
1843
                        if err != nil {
3✔
UNCOV
1844
                                return err
×
UNCOV
1845
                        }
×
1846

1847
                        infos = append(infos, edgeInfo)
3✔
1848
                }
1849

1850
                return nil
3✔
1851
        }, func() {
3✔
1852
                infos = nil
3✔
1853
        })
3✔
1854
        if err != nil {
3✔
UNCOV
1855
                return nil, err
×
UNCOV
1856
        }
×
1857

1858
        for _, chanID := range chanIDs {
6✔
1859
                c.rejectCache.remove(chanID)
3✔
1860
                c.chanCache.remove(chanID)
3✔
1861
        }
3✔
1862

1863
        return infos, nil
3✔
1864
}
1865

1866
// ChannelID attempt to lookup the 8-byte compact channel ID which maps to the
1867
// passed channel point (outpoint). If the passed channel doesn't exist within
1868
// the database, then ErrEdgeNotFound is returned.
1869
func (c *KVStore) ChannelID(chanPoint *wire.OutPoint) (uint64, error) {
3✔
1870
        var chanID uint64
3✔
1871
        if err := kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
1872
                var err error
3✔
1873
                chanID, err = getChanID(tx, chanPoint)
3✔
1874
                return err
3✔
1875
        }, func() {
6✔
1876
                chanID = 0
3✔
1877
        }); err != nil {
6✔
1878
                return 0, err
3✔
1879
        }
3✔
1880

1881
        return chanID, nil
3✔
1882
}
1883

1884
// getChanID returns the assigned channel ID for a given channel point.
1885
func getChanID(tx kvdb.RTx, chanPoint *wire.OutPoint) (uint64, error) {
3✔
1886
        var b bytes.Buffer
3✔
1887
        if err := WriteOutpoint(&b, chanPoint); err != nil {
3✔
1888
                return 0, err
×
1889
        }
×
1890

1891
        edges := tx.ReadBucket(edgeBucket)
3✔
1892
        if edges == nil {
3✔
1893
                return 0, ErrGraphNoEdgesFound
×
1894
        }
×
1895
        chanIndex := edges.NestedReadBucket(channelPointBucket)
3✔
1896
        if chanIndex == nil {
3✔
1897
                return 0, ErrGraphNoEdgesFound
×
1898
        }
×
1899

1900
        chanIDBytes := chanIndex.Get(b.Bytes())
3✔
1901
        if chanIDBytes == nil {
6✔
1902
                return 0, ErrEdgeNotFound
3✔
1903
        }
3✔
1904

1905
        chanID := byteOrder.Uint64(chanIDBytes)
3✔
1906

3✔
1907
        return chanID, nil
3✔
1908
}
1909

1910
// TODO(roasbeef): allow updates to use Batch?
1911

1912
// HighestChanID returns the "highest" known channel ID in the channel graph.
1913
// This represents the "newest" channel from the PoV of the chain. This method
1914
// can be used by peers to quickly determine if they're graphs are in sync.
1915
func (c *KVStore) HighestChanID() (uint64, error) {
3✔
1916
        var cid uint64
3✔
1917

3✔
1918
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
1919
                edges := tx.ReadBucket(edgeBucket)
3✔
1920
                if edges == nil {
3✔
1921
                        return ErrGraphNoEdgesFound
×
1922
                }
×
1923
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
3✔
1924
                if edgeIndex == nil {
3✔
1925
                        return ErrGraphNoEdgesFound
×
1926
                }
×
1927

1928
                // In order to find the highest chan ID, we'll fetch a cursor
1929
                // and use that to seek to the "end" of our known rage.
1930
                cidCursor := edgeIndex.ReadCursor()
3✔
1931

3✔
1932
                lastChanID, _ := cidCursor.Last()
3✔
1933

3✔
1934
                // If there's no key, then this means that we don't actually
3✔
1935
                // know of any channels, so we'll return a predicable error.
3✔
1936
                if lastChanID == nil {
6✔
1937
                        return ErrGraphNoEdgesFound
3✔
1938
                }
3✔
1939

1940
                // Otherwise, we'll de serialize the channel ID and return it
1941
                // to the caller.
1942
                cid = byteOrder.Uint64(lastChanID)
3✔
1943

3✔
1944
                return nil
3✔
1945
        }, func() {
3✔
1946
                cid = 0
3✔
1947
        })
3✔
1948
        if err != nil && !errors.Is(err, ErrGraphNoEdgesFound) {
3✔
1949
                return 0, err
×
1950
        }
×
1951

1952
        return cid, nil
3✔
1953
}
1954

1955
// ChannelEdge represents the complete set of information for a channel edge in
1956
// the known channel graph. This struct couples the core information of the
1957
// edge as well as each of the known advertised edge policies.
1958
type ChannelEdge struct {
1959
        // Info contains all the static information describing the channel.
1960
        Info *models.ChannelEdgeInfo
1961

1962
        // Policy1 points to the "first" edge policy of the channel containing
1963
        // the dynamic information required to properly route through the edge.
1964
        Policy1 *models.ChannelEdgePolicy
1965

1966
        // Policy2 points to the "second" edge policy of the channel containing
1967
        // the dynamic information required to properly route through the edge.
1968
        Policy2 *models.ChannelEdgePolicy
1969

1970
        // Node1 is "node 1" in the channel. This is the node that would have
1971
        // produced Policy1 if it exists.
1972
        Node1 *models.LightningNode
1973

1974
        // Node2 is "node 2" in the channel. This is the node that would have
1975
        // produced Policy2 if it exists.
1976
        Node2 *models.LightningNode
1977
}
1978

1979
// ChanUpdatesInHorizon returns all the known channel edges which have at least
1980
// one edge that has an update timestamp within the specified horizon.
1981
func (c *KVStore) ChanUpdatesInHorizon(startTime,
1982
        endTime time.Time) ([]ChannelEdge, error) {
3✔
1983

3✔
1984
        // To ensure we don't return duplicate ChannelEdges, we'll use an
3✔
1985
        // additional map to keep track of the edges already seen to prevent
3✔
1986
        // re-adding it.
3✔
1987
        var edgesSeen map[uint64]struct{}
3✔
1988
        var edgesToCache map[uint64]ChannelEdge
3✔
1989
        var edgesInHorizon []ChannelEdge
3✔
1990

3✔
1991
        c.cacheMu.Lock()
3✔
1992
        defer c.cacheMu.Unlock()
3✔
1993

3✔
1994
        var hits int
3✔
1995
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
1996
                edges := tx.ReadBucket(edgeBucket)
3✔
1997
                if edges == nil {
3✔
1998
                        return ErrGraphNoEdgesFound
×
1999
                }
×
2000
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
3✔
2001
                if edgeIndex == nil {
3✔
2002
                        return ErrGraphNoEdgesFound
×
2003
                }
×
2004
                edgeUpdateIndex := edges.NestedReadBucket(edgeUpdateIndexBucket)
3✔
2005
                if edgeUpdateIndex == nil {
3✔
2006
                        return ErrGraphNoEdgesFound
×
2007
                }
×
2008

2009
                nodes := tx.ReadBucket(nodeBucket)
3✔
2010
                if nodes == nil {
3✔
2011
                        return ErrGraphNodesNotFound
×
2012
                }
×
2013

2014
                // We'll now obtain a cursor to perform a range query within
2015
                // the index to find all channels within the horizon.
2016
                updateCursor := edgeUpdateIndex.ReadCursor()
3✔
2017

3✔
2018
                var startTimeBytes, endTimeBytes [8 + 8]byte
3✔
2019
                byteOrder.PutUint64(
3✔
2020
                        startTimeBytes[:8], uint64(startTime.Unix()),
3✔
2021
                )
3✔
2022
                byteOrder.PutUint64(
3✔
2023
                        endTimeBytes[:8], uint64(endTime.Unix()),
3✔
2024
                )
3✔
2025

3✔
2026
                // With our start and end times constructed, we'll step through
3✔
2027
                // the index collecting the info and policy of each update of
3✔
2028
                // each channel that has a last update within the time range.
3✔
2029
                //
3✔
2030
                //nolint:ll
3✔
2031
                for indexKey, _ := updateCursor.Seek(startTimeBytes[:]); indexKey != nil &&
3✔
2032
                        bytes.Compare(indexKey, endTimeBytes[:]) <= 0; indexKey, _ = updateCursor.Next() {
6✔
2033
                        // We have a new eligible entry, so we'll slice of the
3✔
2034
                        // chan ID so we can query it in the DB.
3✔
2035
                        chanID := indexKey[8:]
3✔
2036

3✔
2037
                        // If we've already retrieved the info and policies for
3✔
2038
                        // this edge, then we can skip it as we don't need to do
3✔
2039
                        // so again.
3✔
2040
                        chanIDInt := byteOrder.Uint64(chanID)
3✔
2041
                        if _, ok := edgesSeen[chanIDInt]; ok {
3✔
UNCOV
2042
                                continue
×
2043
                        }
2044

2045
                        if channel, ok := c.chanCache.get(chanIDInt); ok {
5✔
2046
                                hits++
2✔
2047
                                edgesSeen[chanIDInt] = struct{}{}
2✔
2048
                                edgesInHorizon = append(edgesInHorizon, channel)
2✔
2049

2✔
2050
                                continue
2✔
2051
                        }
2052

2053
                        // First, we'll fetch the static edge information.
2054
                        edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
3✔
2055
                        if err != nil {
3✔
2056
                                chanID := byteOrder.Uint64(chanID)
×
2057
                                return fmt.Errorf("unable to fetch info for "+
×
2058
                                        "edge with chan_id=%v: %v", chanID, err)
×
2059
                        }
×
2060

2061
                        // With the static information obtained, we'll now
2062
                        // fetch the dynamic policy info.
2063
                        edge1, edge2, err := fetchChanEdgePolicies(
3✔
2064
                                edgeIndex, edges, chanID,
3✔
2065
                        )
3✔
2066
                        if err != nil {
3✔
2067
                                chanID := byteOrder.Uint64(chanID)
×
2068
                                return fmt.Errorf("unable to fetch policies "+
×
2069
                                        "for edge with chan_id=%v: %v", chanID,
×
2070
                                        err)
×
2071
                        }
×
2072

2073
                        node1, err := fetchLightningNode(
3✔
2074
                                nodes, edgeInfo.NodeKey1Bytes[:],
3✔
2075
                        )
3✔
2076
                        if err != nil {
3✔
2077
                                return err
×
2078
                        }
×
2079

2080
                        node2, err := fetchLightningNode(
3✔
2081
                                nodes, edgeInfo.NodeKey2Bytes[:],
3✔
2082
                        )
3✔
2083
                        if err != nil {
3✔
2084
                                return err
×
2085
                        }
×
2086

2087
                        // Finally, we'll collate this edge with the rest of
2088
                        // edges to be returned.
2089
                        edgesSeen[chanIDInt] = struct{}{}
3✔
2090
                        channel := ChannelEdge{
3✔
2091
                                Info:    &edgeInfo,
3✔
2092
                                Policy1: edge1,
3✔
2093
                                Policy2: edge2,
3✔
2094
                                Node1:   &node1,
3✔
2095
                                Node2:   &node2,
3✔
2096
                        }
3✔
2097
                        edgesInHorizon = append(edgesInHorizon, channel)
3✔
2098
                        edgesToCache[chanIDInt] = channel
3✔
2099
                }
2100

2101
                return nil
3✔
2102
        }, func() {
3✔
2103
                edgesSeen = make(map[uint64]struct{})
3✔
2104
                edgesToCache = make(map[uint64]ChannelEdge)
3✔
2105
                edgesInHorizon = nil
3✔
2106
        })
3✔
2107
        switch {
3✔
2108
        case errors.Is(err, ErrGraphNoEdgesFound):
×
2109
                fallthrough
×
2110
        case errors.Is(err, ErrGraphNodesNotFound):
×
2111
                break
×
2112

2113
        case err != nil:
×
2114
                return nil, err
×
2115
        }
2116

2117
        // Insert any edges loaded from disk into the cache.
2118
        for chanid, channel := range edgesToCache {
6✔
2119
                c.chanCache.insert(chanid, channel)
3✔
2120
        }
3✔
2121

2122
        log.Debugf("ChanUpdatesInHorizon hit percentage: %f (%d/%d)",
3✔
2123
                float64(hits)/float64(len(edgesInHorizon)), hits,
3✔
2124
                len(edgesInHorizon))
3✔
2125

3✔
2126
        return edgesInHorizon, nil
3✔
2127
}
2128

2129
// NodeUpdatesInHorizon returns all the known lightning node which have an
2130
// update timestamp within the passed range. This method can be used by two
2131
// nodes to quickly determine if they have the same set of up to date node
2132
// announcements.
2133
func (c *KVStore) NodeUpdatesInHorizon(startTime,
2134
        endTime time.Time) ([]models.LightningNode, error) {
3✔
2135

3✔
2136
        var nodesInHorizon []models.LightningNode
3✔
2137

3✔
2138
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
2139
                nodes := tx.ReadBucket(nodeBucket)
3✔
2140
                if nodes == nil {
3✔
2141
                        return ErrGraphNodesNotFound
×
2142
                }
×
2143

2144
                nodeUpdateIndex := nodes.NestedReadBucket(nodeUpdateIndexBucket)
3✔
2145
                if nodeUpdateIndex == nil {
3✔
2146
                        return ErrGraphNodesNotFound
×
2147
                }
×
2148

2149
                // We'll now obtain a cursor to perform a range query within
2150
                // the index to find all node announcements within the horizon.
2151
                updateCursor := nodeUpdateIndex.ReadCursor()
3✔
2152

3✔
2153
                var startTimeBytes, endTimeBytes [8 + 33]byte
3✔
2154
                byteOrder.PutUint64(
3✔
2155
                        startTimeBytes[:8], uint64(startTime.Unix()),
3✔
2156
                )
3✔
2157
                byteOrder.PutUint64(
3✔
2158
                        endTimeBytes[:8], uint64(endTime.Unix()),
3✔
2159
                )
3✔
2160

3✔
2161
                // With our start and end times constructed, we'll step through
3✔
2162
                // the index collecting info for each node within the time
3✔
2163
                // range.
3✔
2164
                //
3✔
2165
                //nolint:ll
3✔
2166
                for indexKey, _ := updateCursor.Seek(startTimeBytes[:]); indexKey != nil &&
3✔
2167
                        bytes.Compare(indexKey, endTimeBytes[:]) <= 0; indexKey, _ = updateCursor.Next() {
6✔
2168
                        nodePub := indexKey[8:]
3✔
2169
                        node, err := fetchLightningNode(nodes, nodePub)
3✔
2170
                        if err != nil {
3✔
2171
                                return err
×
2172
                        }
×
2173

2174
                        nodesInHorizon = append(nodesInHorizon, node)
3✔
2175
                }
2176

2177
                return nil
3✔
2178
        }, func() {
3✔
2179
                nodesInHorizon = nil
3✔
2180
        })
3✔
2181
        switch {
3✔
2182
        case errors.Is(err, ErrGraphNoEdgesFound):
×
2183
                fallthrough
×
2184
        case errors.Is(err, ErrGraphNodesNotFound):
×
2185
                break
×
2186

2187
        case err != nil:
×
2188
                return nil, err
×
2189
        }
2190

2191
        return nodesInHorizon, nil
3✔
2192
}
2193

2194
// FilterKnownChanIDs takes a set of channel IDs and return the subset of chan
2195
// ID's that we don't know and are not known zombies of the passed set. In other
2196
// words, we perform a set difference of our set of chan ID's and the ones
2197
// passed in. This method can be used by callers to determine the set of
2198
// channels another peer knows of that we don't. The ChannelUpdateInfos for the
2199
// known zombies is also returned.
2200
func (c *KVStore) FilterKnownChanIDs(chansInfo []ChannelUpdateInfo) ([]uint64,
2201
        []ChannelUpdateInfo, error) {
3✔
2202

3✔
2203
        var (
3✔
2204
                newChanIDs   []uint64
3✔
2205
                knownZombies []ChannelUpdateInfo
3✔
2206
        )
3✔
2207

3✔
2208
        c.cacheMu.Lock()
3✔
2209
        defer c.cacheMu.Unlock()
3✔
2210

3✔
2211
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
2212
                edges := tx.ReadBucket(edgeBucket)
3✔
2213
                if edges == nil {
3✔
2214
                        return ErrGraphNoEdgesFound
×
2215
                }
×
2216
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
3✔
2217
                if edgeIndex == nil {
3✔
2218
                        return ErrGraphNoEdgesFound
×
2219
                }
×
2220

2221
                // Fetch the zombie index, it may not exist if no edges have
2222
                // ever been marked as zombies. If the index has been
2223
                // initialized, we will use it later to skip known zombie edges.
2224
                zombieIndex := edges.NestedReadBucket(zombieBucket)
3✔
2225

3✔
2226
                // We'll run through the set of chanIDs and collate only the
3✔
2227
                // set of channel that are unable to be found within our db.
3✔
2228
                var cidBytes [8]byte
3✔
2229
                for _, info := range chansInfo {
6✔
2230
                        scid := info.ShortChannelID.ToUint64()
3✔
2231
                        byteOrder.PutUint64(cidBytes[:], scid)
3✔
2232

3✔
2233
                        // If the edge is already known, skip it.
3✔
2234
                        if v := edgeIndex.Get(cidBytes[:]); v != nil {
6✔
2235
                                continue
3✔
2236
                        }
2237

2238
                        // If the edge is a known zombie, skip it.
2239
                        if zombieIndex != nil {
6✔
2240
                                isZombie, _, _ := isZombieEdge(
3✔
2241
                                        zombieIndex, scid,
3✔
2242
                                )
3✔
2243

3✔
2244
                                if isZombie {
3✔
UNCOV
2245
                                        knownZombies = append(
×
UNCOV
2246
                                                knownZombies, info,
×
UNCOV
2247
                                        )
×
UNCOV
2248

×
UNCOV
2249
                                        continue
×
2250
                                }
2251
                        }
2252

2253
                        newChanIDs = append(newChanIDs, scid)
3✔
2254
                }
2255

2256
                return nil
3✔
2257
        }, func() {
3✔
2258
                newChanIDs = nil
3✔
2259
                knownZombies = nil
3✔
2260
        })
3✔
2261
        switch {
3✔
2262
        // If we don't know of any edges yet, then we'll return the entire set
2263
        // of chan IDs specified.
2264
        case errors.Is(err, ErrGraphNoEdgesFound):
×
2265
                ogChanIDs := make([]uint64, len(chansInfo))
×
2266
                for i, info := range chansInfo {
×
2267
                        ogChanIDs[i] = info.ShortChannelID.ToUint64()
×
2268
                }
×
2269

2270
                return ogChanIDs, nil, nil
×
2271

2272
        case err != nil:
×
2273
                return nil, nil, err
×
2274
        }
2275

2276
        return newChanIDs, knownZombies, nil
3✔
2277
}
2278

2279
// ChannelUpdateInfo couples the SCID of a channel with the timestamps of the
2280
// latest received channel updates for the channel.
2281
type ChannelUpdateInfo struct {
2282
        // ShortChannelID is the SCID identifier of the channel.
2283
        ShortChannelID lnwire.ShortChannelID
2284

2285
        // Node1UpdateTimestamp is the timestamp of the latest received update
2286
        // from the node 1 channel peer. This will be set to zero time if no
2287
        // update has yet been received from this node.
2288
        Node1UpdateTimestamp time.Time
2289

2290
        // Node2UpdateTimestamp is the timestamp of the latest received update
2291
        // from the node 2 channel peer. This will be set to zero time if no
2292
        // update has yet been received from this node.
2293
        Node2UpdateTimestamp time.Time
2294
}
2295

2296
// NewChannelUpdateInfo is a constructor which makes sure we initialize the
2297
// timestamps with zero seconds unix timestamp which equals
2298
// `January 1, 1970, 00:00:00 UTC` in case the value is `time.Time{}`.
2299
func NewChannelUpdateInfo(scid lnwire.ShortChannelID, node1Timestamp,
2300
        node2Timestamp time.Time) ChannelUpdateInfo {
3✔
2301

3✔
2302
        chanInfo := ChannelUpdateInfo{
3✔
2303
                ShortChannelID:       scid,
3✔
2304
                Node1UpdateTimestamp: node1Timestamp,
3✔
2305
                Node2UpdateTimestamp: node2Timestamp,
3✔
2306
        }
3✔
2307

3✔
2308
        if node1Timestamp.IsZero() {
6✔
2309
                chanInfo.Node1UpdateTimestamp = time.Unix(0, 0)
3✔
2310
        }
3✔
2311

2312
        if node2Timestamp.IsZero() {
6✔
2313
                chanInfo.Node2UpdateTimestamp = time.Unix(0, 0)
3✔
2314
        }
3✔
2315

2316
        return chanInfo
3✔
2317
}
2318

2319
// BlockChannelRange represents a range of channels for a given block height.
2320
type BlockChannelRange struct {
2321
        // Height is the height of the block all of the channels below were
2322
        // included in.
2323
        Height uint32
2324

2325
        // Channels is the list of channels identified by their short ID
2326
        // representation known to us that were included in the block height
2327
        // above. The list may include channel update timestamp information if
2328
        // requested.
2329
        Channels []ChannelUpdateInfo
2330
}
2331

2332
// FilterChannelRange returns the channel ID's of all known channels which were
2333
// mined in a block height within the passed range. The channel IDs are grouped
2334
// by their common block height. This method can be used to quickly share with a
2335
// peer the set of channels we know of within a particular range to catch them
2336
// up after a period of time offline. If withTimestamps is true then the
2337
// timestamp info of the latest received channel update messages of the channel
2338
// will be included in the response.
2339
func (c *KVStore) FilterChannelRange(startHeight,
2340
        endHeight uint32, withTimestamps bool) ([]BlockChannelRange, error) {
3✔
2341

3✔
2342
        startChanID := &lnwire.ShortChannelID{
3✔
2343
                BlockHeight: startHeight,
3✔
2344
        }
3✔
2345

3✔
2346
        endChanID := lnwire.ShortChannelID{
3✔
2347
                BlockHeight: endHeight,
3✔
2348
                TxIndex:     math.MaxUint32 & 0x00ffffff,
3✔
2349
                TxPosition:  math.MaxUint16,
3✔
2350
        }
3✔
2351

3✔
2352
        // As we need to perform a range scan, we'll convert the starting and
3✔
2353
        // ending height to their corresponding values when encoded using short
3✔
2354
        // channel ID's.
3✔
2355
        var chanIDStart, chanIDEnd [8]byte
3✔
2356
        byteOrder.PutUint64(chanIDStart[:], startChanID.ToUint64())
3✔
2357
        byteOrder.PutUint64(chanIDEnd[:], endChanID.ToUint64())
3✔
2358

3✔
2359
        var channelsPerBlock map[uint32][]ChannelUpdateInfo
3✔
2360
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
2361
                edges := tx.ReadBucket(edgeBucket)
3✔
2362
                if edges == nil {
3✔
2363
                        return ErrGraphNoEdgesFound
×
2364
                }
×
2365
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
3✔
2366
                if edgeIndex == nil {
3✔
2367
                        return ErrGraphNoEdgesFound
×
2368
                }
×
2369

2370
                cursor := edgeIndex.ReadCursor()
3✔
2371

3✔
2372
                // We'll now iterate through the database, and find each
3✔
2373
                // channel ID that resides within the specified range.
3✔
2374
                //
3✔
2375
                //nolint:ll
3✔
2376
                for k, v := cursor.Seek(chanIDStart[:]); k != nil &&
3✔
2377
                        bytes.Compare(k, chanIDEnd[:]) <= 0; k, v = cursor.Next() {
6✔
2378
                        // Don't send alias SCIDs during gossip sync.
3✔
2379
                        edgeReader := bytes.NewReader(v)
3✔
2380
                        edgeInfo, err := deserializeChanEdgeInfo(edgeReader)
3✔
2381
                        if err != nil {
3✔
2382
                                return err
×
2383
                        }
×
2384

2385
                        if edgeInfo.AuthProof == nil {
6✔
2386
                                continue
3✔
2387
                        }
2388

2389
                        // This channel ID rests within the target range, so
2390
                        // we'll add it to our returned set.
2391
                        rawCid := byteOrder.Uint64(k)
3✔
2392
                        cid := lnwire.NewShortChanIDFromInt(rawCid)
3✔
2393

3✔
2394
                        chanInfo := NewChannelUpdateInfo(
3✔
2395
                                cid, time.Time{}, time.Time{},
3✔
2396
                        )
3✔
2397

3✔
2398
                        if !withTimestamps {
3✔
UNCOV
2399
                                channelsPerBlock[cid.BlockHeight] = append(
×
UNCOV
2400
                                        channelsPerBlock[cid.BlockHeight],
×
UNCOV
2401
                                        chanInfo,
×
UNCOV
2402
                                )
×
UNCOV
2403

×
UNCOV
2404
                                continue
×
2405
                        }
2406

2407
                        node1Key, node2Key := computeEdgePolicyKeys(&edgeInfo)
3✔
2408

3✔
2409
                        rawPolicy := edges.Get(node1Key)
3✔
2410
                        if len(rawPolicy) != 0 {
6✔
2411
                                r := bytes.NewReader(rawPolicy)
3✔
2412

3✔
2413
                                edge, err := deserializeChanEdgePolicyRaw(r)
3✔
2414
                                if err != nil && !errors.Is(
3✔
2415
                                        err, ErrEdgePolicyOptionalFieldNotFound,
3✔
2416
                                ) && !errors.Is(err, ErrParsingExtraTLVBytes) {
3✔
2417

×
2418
                                        return err
×
2419
                                }
×
2420

2421
                                chanInfo.Node1UpdateTimestamp = edge.LastUpdate
3✔
2422
                        }
2423

2424
                        rawPolicy = edges.Get(node2Key)
3✔
2425
                        if len(rawPolicy) != 0 {
6✔
2426
                                r := bytes.NewReader(rawPolicy)
3✔
2427

3✔
2428
                                edge, err := deserializeChanEdgePolicyRaw(r)
3✔
2429
                                if err != nil && !errors.Is(
3✔
2430
                                        err, ErrEdgePolicyOptionalFieldNotFound,
3✔
2431
                                ) && !errors.Is(err, ErrParsingExtraTLVBytes) {
3✔
2432

×
2433
                                        return err
×
2434
                                }
×
2435

2436
                                chanInfo.Node2UpdateTimestamp = edge.LastUpdate
3✔
2437
                        }
2438

2439
                        channelsPerBlock[cid.BlockHeight] = append(
3✔
2440
                                channelsPerBlock[cid.BlockHeight], chanInfo,
3✔
2441
                        )
3✔
2442
                }
2443

2444
                return nil
3✔
2445
        }, func() {
3✔
2446
                channelsPerBlock = make(map[uint32][]ChannelUpdateInfo)
3✔
2447
        })
3✔
2448

2449
        switch {
3✔
2450
        // If we don't know of any channels yet, then there's nothing to
2451
        // filter, so we'll return an empty slice.
2452
        case errors.Is(err, ErrGraphNoEdgesFound) || len(channelsPerBlock) == 0:
3✔
2453
                return nil, nil
3✔
2454

2455
        case err != nil:
×
2456
                return nil, err
×
2457
        }
2458

2459
        // Return the channel ranges in ascending block height order.
2460
        blocks := make([]uint32, 0, len(channelsPerBlock))
3✔
2461
        for block := range channelsPerBlock {
6✔
2462
                blocks = append(blocks, block)
3✔
2463
        }
3✔
2464
        sort.Slice(blocks, func(i, j int) bool {
6✔
2465
                return blocks[i] < blocks[j]
3✔
2466
        })
3✔
2467

2468
        channelRanges := make([]BlockChannelRange, 0, len(channelsPerBlock))
3✔
2469
        for _, block := range blocks {
6✔
2470
                channelRanges = append(channelRanges, BlockChannelRange{
3✔
2471
                        Height:   block,
3✔
2472
                        Channels: channelsPerBlock[block],
3✔
2473
                })
3✔
2474
        }
3✔
2475

2476
        return channelRanges, nil
3✔
2477
}
2478

2479
// FetchChanInfos returns the set of channel edges that correspond to the passed
2480
// channel ID's. If an edge is the query is unknown to the database, it will
2481
// skipped and the result will contain only those edges that exist at the time
2482
// of the query. This can be used to respond to peer queries that are seeking to
2483
// fill in gaps in their view of the channel graph.
2484
func (c *KVStore) FetchChanInfos(chanIDs []uint64) ([]ChannelEdge, error) {
3✔
2485
        return c.fetchChanInfos(nil, chanIDs)
3✔
2486
}
3✔
2487

2488
// fetchChanInfos returns the set of channel edges that correspond to the passed
2489
// channel ID's. If an edge is the query is unknown to the database, it will
2490
// skipped and the result will contain only those edges that exist at the time
2491
// of the query. This can be used to respond to peer queries that are seeking to
2492
// fill in gaps in their view of the channel graph.
2493
//
2494
// NOTE: An optional transaction may be provided. If none is provided, then a
2495
// new one will be created.
2496
func (c *KVStore) fetchChanInfos(tx kvdb.RTx, chanIDs []uint64) (
2497
        []ChannelEdge, error) {
3✔
2498
        // TODO(roasbeef): sort cids?
3✔
2499

3✔
2500
        var (
3✔
2501
                chanEdges []ChannelEdge
3✔
2502
                cidBytes  [8]byte
3✔
2503
        )
3✔
2504

3✔
2505
        fetchChanInfos := func(tx kvdb.RTx) error {
6✔
2506
                edges := tx.ReadBucket(edgeBucket)
3✔
2507
                if edges == nil {
3✔
2508
                        return ErrGraphNoEdgesFound
×
2509
                }
×
2510
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
3✔
2511
                if edgeIndex == nil {
3✔
2512
                        return ErrGraphNoEdgesFound
×
2513
                }
×
2514
                nodes := tx.ReadBucket(nodeBucket)
3✔
2515
                if nodes == nil {
3✔
2516
                        return ErrGraphNotFound
×
2517
                }
×
2518

2519
                for _, cid := range chanIDs {
6✔
2520
                        byteOrder.PutUint64(cidBytes[:], cid)
3✔
2521

3✔
2522
                        // First, we'll fetch the static edge information. If
3✔
2523
                        // the edge is unknown, we will skip the edge and
3✔
2524
                        // continue gathering all known edges.
3✔
2525
                        edgeInfo, err := fetchChanEdgeInfo(
3✔
2526
                                edgeIndex, cidBytes[:],
3✔
2527
                        )
3✔
2528
                        switch {
3✔
UNCOV
2529
                        case errors.Is(err, ErrEdgeNotFound):
×
UNCOV
2530
                                continue
×
2531
                        case err != nil:
×
2532
                                return err
×
2533
                        }
2534

2535
                        // With the static information obtained, we'll now
2536
                        // fetch the dynamic policy info.
2537
                        edge1, edge2, err := fetchChanEdgePolicies(
3✔
2538
                                edgeIndex, edges, cidBytes[:],
3✔
2539
                        )
3✔
2540
                        if err != nil {
3✔
2541
                                return err
×
2542
                        }
×
2543

2544
                        node1, err := fetchLightningNode(
3✔
2545
                                nodes, edgeInfo.NodeKey1Bytes[:],
3✔
2546
                        )
3✔
2547
                        if err != nil {
3✔
2548
                                return err
×
2549
                        }
×
2550

2551
                        node2, err := fetchLightningNode(
3✔
2552
                                nodes, edgeInfo.NodeKey2Bytes[:],
3✔
2553
                        )
3✔
2554
                        if err != nil {
3✔
2555
                                return err
×
2556
                        }
×
2557

2558
                        chanEdges = append(chanEdges, ChannelEdge{
3✔
2559
                                Info:    &edgeInfo,
3✔
2560
                                Policy1: edge1,
3✔
2561
                                Policy2: edge2,
3✔
2562
                                Node1:   &node1,
3✔
2563
                                Node2:   &node2,
3✔
2564
                        })
3✔
2565
                }
2566

2567
                return nil
3✔
2568
        }
2569

2570
        if tx == nil {
6✔
2571
                err := kvdb.View(c.db, fetchChanInfos, func() {
6✔
2572
                        chanEdges = nil
3✔
2573
                })
3✔
2574
                if err != nil {
3✔
2575
                        return nil, err
×
2576
                }
×
2577

2578
                return chanEdges, nil
3✔
2579
        }
2580

2581
        err := fetchChanInfos(tx)
×
2582
        if err != nil {
×
2583
                return nil, err
×
2584
        }
×
2585

2586
        return chanEdges, nil
×
2587
}
2588

2589
func delEdgeUpdateIndexEntry(edgesBucket kvdb.RwBucket, chanID uint64,
2590
        edge1, edge2 *models.ChannelEdgePolicy) error {
3✔
2591

3✔
2592
        // First, we'll fetch the edge update index bucket which currently
3✔
2593
        // stores an entry for the channel we're about to delete.
3✔
2594
        updateIndex := edgesBucket.NestedReadWriteBucket(edgeUpdateIndexBucket)
3✔
2595
        if updateIndex == nil {
3✔
2596
                // No edges in bucket, return early.
×
2597
                return nil
×
2598
        }
×
2599

2600
        // Now that we have the bucket, we'll attempt to construct a template
2601
        // for the index key: updateTime || chanid.
2602
        var indexKey [8 + 8]byte
3✔
2603
        byteOrder.PutUint64(indexKey[8:], chanID)
3✔
2604

3✔
2605
        // With the template constructed, we'll attempt to delete an entry that
3✔
2606
        // would have been created by both edges: we'll alternate the update
3✔
2607
        // times, as one may had overridden the other.
3✔
2608
        if edge1 != nil {
6✔
2609
                byteOrder.PutUint64(
3✔
2610
                        indexKey[:8], uint64(edge1.LastUpdate.Unix()),
3✔
2611
                )
3✔
2612
                if err := updateIndex.Delete(indexKey[:]); err != nil {
3✔
2613
                        return err
×
2614
                }
×
2615
        }
2616

2617
        // We'll also attempt to delete the entry that may have been created by
2618
        // the second edge.
2619
        if edge2 != nil {
6✔
2620
                byteOrder.PutUint64(
3✔
2621
                        indexKey[:8], uint64(edge2.LastUpdate.Unix()),
3✔
2622
                )
3✔
2623
                if err := updateIndex.Delete(indexKey[:]); err != nil {
3✔
2624
                        return err
×
2625
                }
×
2626
        }
2627

2628
        return nil
3✔
2629
}
2630

2631
// delChannelEdgeUnsafe deletes the edge with the given chanID from the graph
2632
// cache. It then goes on to delete any policy info and edge info for this
2633
// channel from the DB and finally, if isZombie is true, it will add an entry
2634
// for this channel in the zombie index.
2635
//
2636
// NOTE: this method MUST only be called if the cacheMu has already been
2637
// acquired.
2638
func (c *KVStore) delChannelEdgeUnsafe(edges, edgeIndex, chanIndex,
2639
        zombieIndex kvdb.RwBucket, chanID []byte, isZombie,
2640
        strictZombie bool) (*models.ChannelEdgeInfo, error) {
3✔
2641

3✔
2642
        edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
3✔
2643
        if err != nil {
3✔
UNCOV
2644
                return nil, err
×
UNCOV
2645
        }
×
2646

2647
        // We'll also remove the entry in the edge update index bucket before
2648
        // we delete the edges themselves so we can access their last update
2649
        // times.
2650
        cid := byteOrder.Uint64(chanID)
3✔
2651
        edge1, edge2, err := fetchChanEdgePolicies(edgeIndex, edges, chanID)
3✔
2652
        if err != nil {
3✔
2653
                return nil, err
×
2654
        }
×
2655
        err = delEdgeUpdateIndexEntry(edges, cid, edge1, edge2)
3✔
2656
        if err != nil {
3✔
2657
                return nil, err
×
2658
        }
×
2659

2660
        // The edge key is of the format pubKey || chanID. First we construct
2661
        // the latter half, populating the channel ID.
2662
        var edgeKey [33 + 8]byte
3✔
2663
        copy(edgeKey[33:], chanID)
3✔
2664

3✔
2665
        // With the latter half constructed, copy over the first public key to
3✔
2666
        // delete the edge in this direction, then the second to delete the
3✔
2667
        // edge in the opposite direction.
3✔
2668
        copy(edgeKey[:33], edgeInfo.NodeKey1Bytes[:])
3✔
2669
        if edges.Get(edgeKey[:]) != nil {
6✔
2670
                if err := edges.Delete(edgeKey[:]); err != nil {
3✔
2671
                        return nil, err
×
2672
                }
×
2673
        }
2674
        copy(edgeKey[:33], edgeInfo.NodeKey2Bytes[:])
3✔
2675
        if edges.Get(edgeKey[:]) != nil {
6✔
2676
                if err := edges.Delete(edgeKey[:]); err != nil {
3✔
2677
                        return nil, err
×
2678
                }
×
2679
        }
2680

2681
        // As part of deleting the edge we also remove all disabled entries
2682
        // from the edgePolicyDisabledIndex bucket. We do that for both
2683
        // directions.
2684
        err = updateEdgePolicyDisabledIndex(edges, cid, false, false)
3✔
2685
        if err != nil {
3✔
2686
                return nil, err
×
2687
        }
×
2688
        err = updateEdgePolicyDisabledIndex(edges, cid, true, false)
3✔
2689
        if err != nil {
3✔
2690
                return nil, err
×
2691
        }
×
2692

2693
        // With the edge data deleted, we can purge the information from the two
2694
        // edge indexes.
2695
        if err := edgeIndex.Delete(chanID); err != nil {
3✔
2696
                return nil, err
×
2697
        }
×
2698
        var b bytes.Buffer
3✔
2699
        if err := WriteOutpoint(&b, &edgeInfo.ChannelPoint); err != nil {
3✔
2700
                return nil, err
×
2701
        }
×
2702
        if err := chanIndex.Delete(b.Bytes()); err != nil {
3✔
2703
                return nil, err
×
2704
        }
×
2705

2706
        // Finally, we'll mark the edge as a zombie within our index if it's
2707
        // being removed due to the channel becoming a zombie. We do this to
2708
        // ensure we don't store unnecessary data for spent channels.
2709
        if !isZombie {
6✔
2710
                return &edgeInfo, nil
3✔
2711
        }
3✔
2712

2713
        nodeKey1, nodeKey2 := edgeInfo.NodeKey1Bytes, edgeInfo.NodeKey2Bytes
3✔
2714
        if strictZombie {
3✔
UNCOV
2715
                nodeKey1, nodeKey2 = makeZombiePubkeys(&edgeInfo, edge1, edge2)
×
UNCOV
2716
        }
×
2717

2718
        return &edgeInfo, markEdgeZombie(
3✔
2719
                zombieIndex, byteOrder.Uint64(chanID), nodeKey1, nodeKey2,
3✔
2720
        )
3✔
2721
}
2722

2723
// makeZombiePubkeys derives the node pubkeys to store in the zombie index for a
2724
// particular pair of channel policies. The return values are one of:
2725
//  1. (pubkey1, pubkey2)
2726
//  2. (pubkey1, blank)
2727
//  3. (blank, pubkey2)
2728
//
2729
// A blank pubkey means that corresponding node will be unable to resurrect a
2730
// channel on its own. For example, node1 may continue to publish recent
2731
// updates, but node2 has fallen way behind. After marking an edge as a zombie,
2732
// we don't want another fresh update from node1 to resurrect, as the edge can
2733
// only become live once node2 finally sends something recent.
2734
//
2735
// In the case where we have neither update, we allow either party to resurrect
2736
// the channel. If the channel were to be marked zombie again, it would be
2737
// marked with the correct lagging channel since we received an update from only
2738
// one side.
2739
func makeZombiePubkeys(info *models.ChannelEdgeInfo,
UNCOV
2740
        e1, e2 *models.ChannelEdgePolicy) ([33]byte, [33]byte) {
×
UNCOV
2741

×
UNCOV
2742
        switch {
×
2743
        // If we don't have either edge policy, we'll return both pubkeys so
2744
        // that the channel can be resurrected by either party.
UNCOV
2745
        case e1 == nil && e2 == nil:
×
UNCOV
2746
                return info.NodeKey1Bytes, info.NodeKey2Bytes
×
2747

2748
        // If we're missing edge1, or if both edges are present but edge1 is
2749
        // older, we'll return edge1's pubkey and a blank pubkey for edge2. This
2750
        // means that only an update from edge1 will be able to resurrect the
2751
        // channel.
UNCOV
2752
        case e1 == nil || (e2 != nil && e1.LastUpdate.Before(e2.LastUpdate)):
×
UNCOV
2753
                return info.NodeKey1Bytes, [33]byte{}
×
2754

2755
        // Otherwise, we're missing edge2 or edge2 is the older side, so we
2756
        // return a blank pubkey for edge1. In this case, only an update from
2757
        // edge2 can resurect the channel.
UNCOV
2758
        default:
×
UNCOV
2759
                return [33]byte{}, info.NodeKey2Bytes
×
2760
        }
2761
}
2762

2763
// UpdateEdgePolicy updates the edge routing policy for a single directed edge
2764
// within the database for the referenced channel. The `flags` attribute within
2765
// the ChannelEdgePolicy determines which of the directed edges are being
2766
// updated. If the flag is 1, then the first node's information is being
2767
// updated, otherwise it's the second node's information. The node ordering is
2768
// determined by the lexicographical ordering of the identity public keys of the
2769
// nodes on either side of the channel.
2770
func (c *KVStore) UpdateEdgePolicy(edge *models.ChannelEdgePolicy,
2771
        opts ...batch.SchedulerOption) (route.Vertex, route.Vertex, error) {
3✔
2772

3✔
2773
        var (
3✔
2774
                ctx          = context.TODO()
3✔
2775
                isUpdate1    bool
3✔
2776
                edgeNotFound bool
3✔
2777
                from, to     route.Vertex
3✔
2778
        )
3✔
2779

3✔
2780
        r := &batch.Request[kvdb.RwTx]{
3✔
2781
                Opts: batch.NewSchedulerOptions(opts...),
3✔
2782
                Reset: func() {
6✔
2783
                        isUpdate1 = false
3✔
2784
                        edgeNotFound = false
3✔
2785
                },
3✔
2786
                Do: func(tx kvdb.RwTx) error {
3✔
2787
                        var err error
3✔
2788
                        from, to, isUpdate1, err = updateEdgePolicy(tx, edge)
3✔
2789
                        if err != nil {
3✔
UNCOV
2790
                                log.Errorf("UpdateEdgePolicy faild: %v", err)
×
UNCOV
2791
                        }
×
2792

2793
                        // Silence ErrEdgeNotFound so that the batch can
2794
                        // succeed, but propagate the error via local state.
2795
                        if errors.Is(err, ErrEdgeNotFound) {
3✔
UNCOV
2796
                                edgeNotFound = true
×
UNCOV
2797
                                return nil
×
UNCOV
2798
                        }
×
2799

2800
                        return err
3✔
2801
                },
2802
                OnCommit: func(err error) error {
3✔
2803
                        switch {
3✔
UNCOV
2804
                        case err != nil:
×
UNCOV
2805
                                return err
×
UNCOV
2806
                        case edgeNotFound:
×
UNCOV
2807
                                return ErrEdgeNotFound
×
2808
                        default:
3✔
2809
                                c.updateEdgeCache(edge, isUpdate1)
3✔
2810
                                return nil
3✔
2811
                        }
2812
                },
2813
        }
2814

2815
        err := c.chanScheduler.Execute(ctx, r)
3✔
2816

3✔
2817
        return from, to, err
3✔
2818
}
2819

2820
func (c *KVStore) updateEdgeCache(e *models.ChannelEdgePolicy,
2821
        isUpdate1 bool) {
3✔
2822

3✔
2823
        // If an entry for this channel is found in reject cache, we'll modify
3✔
2824
        // the entry with the updated timestamp for the direction that was just
3✔
2825
        // written. If the edge doesn't exist, we'll load the cache entry lazily
3✔
2826
        // during the next query for this edge.
3✔
2827
        if entry, ok := c.rejectCache.get(e.ChannelID); ok {
6✔
2828
                if isUpdate1 {
6✔
2829
                        entry.upd1Time = e.LastUpdate.Unix()
3✔
2830
                } else {
6✔
2831
                        entry.upd2Time = e.LastUpdate.Unix()
3✔
2832
                }
3✔
2833
                c.rejectCache.insert(e.ChannelID, entry)
3✔
2834
        }
2835

2836
        // If an entry for this channel is found in channel cache, we'll modify
2837
        // the entry with the updated policy for the direction that was just
2838
        // written. If the edge doesn't exist, we'll defer loading the info and
2839
        // policies and lazily read from disk during the next query.
2840
        if channel, ok := c.chanCache.get(e.ChannelID); ok {
6✔
2841
                if isUpdate1 {
6✔
2842
                        channel.Policy1 = e
3✔
2843
                } else {
6✔
2844
                        channel.Policy2 = e
3✔
2845
                }
3✔
2846
                c.chanCache.insert(e.ChannelID, channel)
3✔
2847
        }
2848
}
2849

2850
// updateEdgePolicy attempts to update an edge's policy within the relevant
2851
// buckets using an existing database transaction. The returned boolean will be
2852
// true if the updated policy belongs to node1, and false if the policy belonged
2853
// to node2.
2854
func updateEdgePolicy(tx kvdb.RwTx, edge *models.ChannelEdgePolicy) (
2855
        route.Vertex, route.Vertex, bool, error) {
3✔
2856

3✔
2857
        var noVertex route.Vertex
3✔
2858

3✔
2859
        edges := tx.ReadWriteBucket(edgeBucket)
3✔
2860
        if edges == nil {
3✔
2861
                return noVertex, noVertex, false, ErrEdgeNotFound
×
2862
        }
×
2863
        edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
3✔
2864
        if edgeIndex == nil {
3✔
2865
                return noVertex, noVertex, false, ErrEdgeNotFound
×
2866
        }
×
2867

2868
        // Create the channelID key be converting the channel ID
2869
        // integer into a byte slice.
2870
        var chanID [8]byte
3✔
2871
        byteOrder.PutUint64(chanID[:], edge.ChannelID)
3✔
2872

3✔
2873
        // With the channel ID, we then fetch the value storing the two
3✔
2874
        // nodes which connect this channel edge.
3✔
2875
        nodeInfo := edgeIndex.Get(chanID[:])
3✔
2876
        if nodeInfo == nil {
3✔
UNCOV
2877
                return noVertex, noVertex, false, ErrEdgeNotFound
×
UNCOV
2878
        }
×
2879

2880
        // Depending on the flags value passed above, either the first
2881
        // or second edge policy is being updated.
2882
        var fromNode, toNode []byte
3✔
2883
        var isUpdate1 bool
3✔
2884
        if edge.ChannelFlags&lnwire.ChanUpdateDirection == 0 {
6✔
2885
                fromNode = nodeInfo[:33]
3✔
2886
                toNode = nodeInfo[33:66]
3✔
2887
                isUpdate1 = true
3✔
2888
        } else {
6✔
2889
                fromNode = nodeInfo[33:66]
3✔
2890
                toNode = nodeInfo[:33]
3✔
2891
                isUpdate1 = false
3✔
2892
        }
3✔
2893

2894
        // Finally, with the direction of the edge being updated
2895
        // identified, we update the on-disk edge representation.
2896
        err := putChanEdgePolicy(edges, edge, fromNode, toNode)
3✔
2897
        if err != nil {
3✔
UNCOV
2898
                return noVertex, noVertex, false, err
×
UNCOV
2899
        }
×
2900

2901
        var (
3✔
2902
                fromNodePubKey route.Vertex
3✔
2903
                toNodePubKey   route.Vertex
3✔
2904
        )
3✔
2905
        copy(fromNodePubKey[:], fromNode)
3✔
2906
        copy(toNodePubKey[:], toNode)
3✔
2907

3✔
2908
        return fromNodePubKey, toNodePubKey, isUpdate1, nil
3✔
2909
}
2910

2911
// isPublic determines whether the node is seen as public within the graph from
2912
// the source node's point of view. An existing database transaction can also be
2913
// specified.
2914
func (c *KVStore) isPublic(tx kvdb.RTx, nodePub route.Vertex,
2915
        sourcePubKey []byte) (bool, error) {
3✔
2916

3✔
2917
        // In order to determine whether this node is publicly advertised within
3✔
2918
        // the graph, we'll need to look at all of its edges and check whether
3✔
2919
        // they extend to any other node than the source node. errDone will be
3✔
2920
        // used to terminate the check early.
3✔
2921
        nodeIsPublic := false
3✔
2922
        errDone := errors.New("done")
3✔
2923
        err := c.forEachNodeChannelTx(tx, nodePub, func(tx kvdb.RTx,
3✔
2924
                info *models.ChannelEdgeInfo, _ *models.ChannelEdgePolicy,
3✔
2925
                _ *models.ChannelEdgePolicy) error {
6✔
2926

3✔
2927
                // If this edge doesn't extend to the source node, we'll
3✔
2928
                // terminate our search as we can now conclude that the node is
3✔
2929
                // publicly advertised within the graph due to the local node
3✔
2930
                // knowing of the current edge.
3✔
2931
                if !bytes.Equal(info.NodeKey1Bytes[:], sourcePubKey) &&
3✔
2932
                        !bytes.Equal(info.NodeKey2Bytes[:], sourcePubKey) {
6✔
2933

3✔
2934
                        nodeIsPublic = true
3✔
2935
                        return errDone
3✔
2936
                }
3✔
2937

2938
                // Since the edge _does_ extend to the source node, we'll also
2939
                // need to ensure that this is a public edge.
2940
                if info.AuthProof != nil {
6✔
2941
                        nodeIsPublic = true
3✔
2942
                        return errDone
3✔
2943
                }
3✔
2944

2945
                // Otherwise, we'll continue our search.
2946
                return nil
3✔
2947
        })
2948
        if err != nil && !errors.Is(err, errDone) {
3✔
2949
                return false, err
×
2950
        }
×
2951

2952
        return nodeIsPublic, nil
3✔
2953
}
2954

2955
// FetchLightningNodeTx attempts to look up a target node by its identity
2956
// public key. If the node isn't found in the database, then
2957
// ErrGraphNodeNotFound is returned. An optional transaction may be provided.
2958
// If none is provided, then a new one will be created.
2959
func (c *KVStore) FetchLightningNodeTx(tx kvdb.RTx, nodePub route.Vertex) (
2960
        *models.LightningNode, error) {
3✔
2961

3✔
2962
        return c.fetchLightningNode(tx, nodePub)
3✔
2963
}
3✔
2964

2965
// FetchLightningNode attempts to look up a target node by its identity public
2966
// key. If the node isn't found in the database, then ErrGraphNodeNotFound is
2967
// returned.
2968
func (c *KVStore) FetchLightningNode(nodePub route.Vertex) (
2969
        *models.LightningNode, error) {
3✔
2970

3✔
2971
        return c.fetchLightningNode(nil, nodePub)
3✔
2972
}
3✔
2973

2974
// fetchLightningNode attempts to look up a target node by its identity public
2975
// key. If the node isn't found in the database, then ErrGraphNodeNotFound is
2976
// returned. An optional transaction may be provided. If none is provided, then
2977
// a new one will be created.
2978
func (c *KVStore) fetchLightningNode(tx kvdb.RTx,
2979
        nodePub route.Vertex) (*models.LightningNode, error) {
3✔
2980

3✔
2981
        var node *models.LightningNode
3✔
2982
        fetch := func(tx kvdb.RTx) error {
6✔
2983
                // First grab the nodes bucket which stores the mapping from
3✔
2984
                // pubKey to node information.
3✔
2985
                nodes := tx.ReadBucket(nodeBucket)
3✔
2986
                if nodes == nil {
3✔
2987
                        return ErrGraphNotFound
×
2988
                }
×
2989

2990
                // If a key for this serialized public key isn't found, then
2991
                // the target node doesn't exist within the database.
2992
                nodeBytes := nodes.Get(nodePub[:])
3✔
2993
                if nodeBytes == nil {
6✔
2994
                        return ErrGraphNodeNotFound
3✔
2995
                }
3✔
2996

2997
                // If the node is found, then we can de deserialize the node
2998
                // information to return to the user.
2999
                nodeReader := bytes.NewReader(nodeBytes)
3✔
3000
                n, err := deserializeLightningNode(nodeReader)
3✔
3001
                if err != nil {
3✔
3002
                        return err
×
3003
                }
×
3004

3005
                node = &n
3✔
3006

3✔
3007
                return nil
3✔
3008
        }
3009

3010
        if tx == nil {
6✔
3011
                err := kvdb.View(
3✔
3012
                        c.db, fetch, func() {
6✔
3013
                                node = nil
3✔
3014
                        },
3✔
3015
                )
3016
                if err != nil {
6✔
3017
                        return nil, err
3✔
3018
                }
3✔
3019

3020
                return node, nil
3✔
3021
        }
3022

UNCOV
3023
        err := fetch(tx)
×
UNCOV
3024
        if err != nil {
×
UNCOV
3025
                return nil, err
×
UNCOV
3026
        }
×
3027

UNCOV
3028
        return node, nil
×
3029
}
3030

3031
// HasLightningNode determines if the graph has a vertex identified by the
3032
// target node identity public key. If the node exists in the database, a
3033
// timestamp of when the data for the node was lasted updated is returned along
3034
// with a true boolean. Otherwise, an empty time.Time is returned with a false
3035
// boolean.
3036
func (c *KVStore) HasLightningNode(nodePub [33]byte) (time.Time, bool,
3037
        error) {
3✔
3038

3✔
3039
        var (
3✔
3040
                updateTime time.Time
3✔
3041
                exists     bool
3✔
3042
        )
3✔
3043

3✔
3044
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
3045
                // First grab the nodes bucket which stores the mapping from
3✔
3046
                // pubKey to node information.
3✔
3047
                nodes := tx.ReadBucket(nodeBucket)
3✔
3048
                if nodes == nil {
3✔
3049
                        return ErrGraphNotFound
×
3050
                }
×
3051

3052
                // If a key for this serialized public key isn't found, we can
3053
                // exit early.
3054
                nodeBytes := nodes.Get(nodePub[:])
3✔
3055
                if nodeBytes == nil {
6✔
3056
                        exists = false
3✔
3057
                        return nil
3✔
3058
                }
3✔
3059

3060
                // Otherwise we continue on to obtain the time stamp
3061
                // representing the last time the data for this node was
3062
                // updated.
3063
                nodeReader := bytes.NewReader(nodeBytes)
3✔
3064
                node, err := deserializeLightningNode(nodeReader)
3✔
3065
                if err != nil {
3✔
3066
                        return err
×
3067
                }
×
3068

3069
                exists = true
3✔
3070
                updateTime = node.LastUpdate
3✔
3071

3✔
3072
                return nil
3✔
3073
        }, func() {
3✔
3074
                updateTime = time.Time{}
3✔
3075
                exists = false
3✔
3076
        })
3✔
3077
        if err != nil {
3✔
3078
                return time.Time{}, exists, err
×
3079
        }
×
3080

3081
        return updateTime, exists, nil
3✔
3082
}
3083

3084
// nodeTraversal is used to traverse all channels of a node given by its
3085
// public key and passes channel information into the specified callback.
3086
func nodeTraversal(tx kvdb.RTx, nodePub []byte, db kvdb.Backend,
3087
        cb func(kvdb.RTx, *models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3088
                *models.ChannelEdgePolicy) error) error {
3✔
3089

3✔
3090
        traversal := func(tx kvdb.RTx) error {
6✔
3091
                edges := tx.ReadBucket(edgeBucket)
3✔
3092
                if edges == nil {
3✔
3093
                        return ErrGraphNotFound
×
3094
                }
×
3095
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
3✔
3096
                if edgeIndex == nil {
3✔
3097
                        return ErrGraphNoEdgesFound
×
3098
                }
×
3099

3100
                // In order to reach all the edges for this node, we take
3101
                // advantage of the construction of the key-space within the
3102
                // edge bucket. The keys are stored in the form: pubKey ||
3103
                // chanID. Therefore, starting from a chanID of zero, we can
3104
                // scan forward in the bucket, grabbing all the edges for the
3105
                // node. Once the prefix no longer matches, then we know we're
3106
                // done.
3107
                var nodeStart [33 + 8]byte
3✔
3108
                copy(nodeStart[:], nodePub)
3✔
3109
                copy(nodeStart[33:], chanStart[:])
3✔
3110

3✔
3111
                // Starting from the key pubKey || 0, we seek forward in the
3✔
3112
                // bucket until the retrieved key no longer has the public key
3✔
3113
                // as its prefix. This indicates that we've stepped over into
3✔
3114
                // another node's edges, so we can terminate our scan.
3✔
3115
                edgeCursor := edges.ReadCursor()
3✔
3116
                for nodeEdge, _ := edgeCursor.Seek(nodeStart[:]); bytes.HasPrefix(nodeEdge, nodePub); nodeEdge, _ = edgeCursor.Next() { //nolint:ll
6✔
3117
                        // If the prefix still matches, the channel id is
3✔
3118
                        // returned in nodeEdge. Channel id is used to lookup
3✔
3119
                        // the node at the other end of the channel and both
3✔
3120
                        // edge policies.
3✔
3121
                        chanID := nodeEdge[33:]
3✔
3122
                        edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
3✔
3123
                        if err != nil {
3✔
3124
                                return err
×
3125
                        }
×
3126

3127
                        outgoingPolicy, err := fetchChanEdgePolicy(
3✔
3128
                                edges, chanID, nodePub,
3✔
3129
                        )
3✔
3130
                        if err != nil {
3✔
3131
                                return err
×
3132
                        }
×
3133

3134
                        otherNode, err := edgeInfo.OtherNodeKeyBytes(nodePub)
3✔
3135
                        if err != nil {
3✔
3136
                                return err
×
3137
                        }
×
3138

3139
                        incomingPolicy, err := fetchChanEdgePolicy(
3✔
3140
                                edges, chanID, otherNode[:],
3✔
3141
                        )
3✔
3142
                        if err != nil {
3✔
3143
                                return err
×
3144
                        }
×
3145

3146
                        // Finally, we execute the callback.
3147
                        err = cb(tx, &edgeInfo, outgoingPolicy, incomingPolicy)
3✔
3148
                        if err != nil {
6✔
3149
                                return err
3✔
3150
                        }
3✔
3151
                }
3152

3153
                return nil
3✔
3154
        }
3155

3156
        // If no transaction was provided, then we'll create a new transaction
3157
        // to execute the transaction within.
3158
        if tx == nil {
6✔
3159
                return kvdb.View(db, traversal, func() {})
6✔
3160
        }
3161

3162
        // Otherwise, we re-use the existing transaction to execute the graph
3163
        // traversal.
3164
        return traversal(tx)
3✔
3165
}
3166

3167
// ForEachNodeChannel iterates through all channels of the given node,
3168
// executing the passed callback with an edge info structure and the policies
3169
// of each end of the channel. The first edge policy is the outgoing edge *to*
3170
// the connecting node, while the second is the incoming edge *from* the
3171
// connecting node. If the callback returns an error, then the iteration is
3172
// halted with the error propagated back up to the caller.
3173
//
3174
// Unknown policies are passed into the callback as nil values.
3175
func (c *KVStore) ForEachNodeChannel(nodePub route.Vertex,
3176
        cb func(*models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3177
                *models.ChannelEdgePolicy) error) error {
3✔
3178

3✔
3179
        return nodeTraversal(nil, nodePub[:], c.db, func(_ kvdb.RTx,
3✔
3180
                info *models.ChannelEdgeInfo, policy,
3✔
3181
                policy2 *models.ChannelEdgePolicy) error {
6✔
3182

3✔
3183
                return cb(info, policy, policy2)
3✔
3184
        })
3✔
3185
}
3186

3187
// ForEachSourceNodeChannel iterates through all channels of the source node,
3188
// executing the passed callback on each. The callback is provided with the
3189
// channel's outpoint, whether we have a policy for the channel and the channel
3190
// peer's node information.
3191
func (c *KVStore) ForEachSourceNodeChannel(cb func(chanPoint wire.OutPoint,
3192
        havePolicy bool, otherNode *models.LightningNode) error) error {
3✔
3193

3✔
3194
        return kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
3195
                nodes := tx.ReadBucket(nodeBucket)
3✔
3196
                if nodes == nil {
3✔
3197
                        return ErrGraphNotFound
×
3198
                }
×
3199

3200
                node, err := c.sourceNode(nodes)
3✔
3201
                if err != nil {
3✔
3202
                        return err
×
3203
                }
×
3204

3205
                return nodeTraversal(
3✔
3206
                        tx, node.PubKeyBytes[:], c.db, func(tx kvdb.RTx,
3✔
3207
                                info *models.ChannelEdgeInfo,
3✔
3208
                                policy, _ *models.ChannelEdgePolicy) error {
6✔
3209

3✔
3210
                                peer, err := c.fetchOtherNode(
3✔
3211
                                        tx, info, node.PubKeyBytes[:],
3✔
3212
                                )
3✔
3213
                                if err != nil {
3✔
3214
                                        return err
×
3215
                                }
×
3216

3217
                                return cb(
3✔
3218
                                        info.ChannelPoint, policy != nil, peer,
3✔
3219
                                )
3✔
3220
                        },
3221
                )
3222
        }, func() {})
3✔
3223
}
3224

3225
// forEachNodeChannelTx iterates through all channels of the given node,
3226
// executing the passed callback with an edge info structure and the policies
3227
// of each end of the channel. The first edge policy is the outgoing edge *to*
3228
// the connecting node, while the second is the incoming edge *from* the
3229
// connecting node. If the callback returns an error, then the iteration is
3230
// halted with the error propagated back up to the caller.
3231
//
3232
// Unknown policies are passed into the callback as nil values.
3233
//
3234
// If the caller wishes to re-use an existing boltdb transaction, then it
3235
// should be passed as the first argument.  Otherwise, the first argument should
3236
// be nil and a fresh transaction will be created to execute the graph
3237
// traversal.
3238
func (c *KVStore) forEachNodeChannelTx(tx kvdb.RTx,
3239
        nodePub route.Vertex, cb func(kvdb.RTx, *models.ChannelEdgeInfo,
3240
                *models.ChannelEdgePolicy,
3241
                *models.ChannelEdgePolicy) error) error {
3✔
3242

3✔
3243
        return nodeTraversal(tx, nodePub[:], c.db, cb)
3✔
3244
}
3✔
3245

3246
// fetchOtherNode attempts to fetch the full LightningNode that's opposite of
3247
// the target node in the channel. This is useful when one knows the pubkey of
3248
// one of the nodes, and wishes to obtain the full LightningNode for the other
3249
// end of the channel.
3250
func (c *KVStore) fetchOtherNode(tx kvdb.RTx,
3251
        channel *models.ChannelEdgeInfo, thisNodeKey []byte) (
3252
        *models.LightningNode, error) {
3✔
3253

3✔
3254
        // Ensure that the node passed in is actually a member of the channel.
3✔
3255
        var targetNodeBytes [33]byte
3✔
3256
        switch {
3✔
3257
        case bytes.Equal(channel.NodeKey1Bytes[:], thisNodeKey):
3✔
3258
                targetNodeBytes = channel.NodeKey2Bytes
3✔
3259
        case bytes.Equal(channel.NodeKey2Bytes[:], thisNodeKey):
3✔
3260
                targetNodeBytes = channel.NodeKey1Bytes
3✔
3261
        default:
×
3262
                return nil, fmt.Errorf("node not participating in this channel")
×
3263
        }
3264

3265
        var targetNode *models.LightningNode
3✔
3266
        fetchNodeFunc := func(tx kvdb.RTx) error {
6✔
3267
                // First grab the nodes bucket which stores the mapping from
3✔
3268
                // pubKey to node information.
3✔
3269
                nodes := tx.ReadBucket(nodeBucket)
3✔
3270
                if nodes == nil {
3✔
3271
                        return ErrGraphNotFound
×
3272
                }
×
3273

3274
                node, err := fetchLightningNode(nodes, targetNodeBytes[:])
3✔
3275
                if err != nil {
3✔
3276
                        return err
×
3277
                }
×
3278

3279
                targetNode = &node
3✔
3280

3✔
3281
                return nil
3✔
3282
        }
3283

3284
        // If the transaction is nil, then we'll need to create a new one,
3285
        // otherwise we can use the existing db transaction.
3286
        var err error
3✔
3287
        if tx == nil {
3✔
3288
                err = kvdb.View(c.db, fetchNodeFunc, func() {
×
3289
                        targetNode = nil
×
3290
                })
×
3291
        } else {
3✔
3292
                err = fetchNodeFunc(tx)
3✔
3293
        }
3✔
3294

3295
        return targetNode, err
3✔
3296
}
3297

3298
// computeEdgePolicyKeys is a helper function that can be used to compute the
3299
// keys used to index the channel edge policy info for the two nodes of the
3300
// edge. The keys for node 1 and node 2 are returned respectively.
3301
func computeEdgePolicyKeys(info *models.ChannelEdgeInfo) ([]byte, []byte) {
3✔
3302
        var (
3✔
3303
                node1Key [33 + 8]byte
3✔
3304
                node2Key [33 + 8]byte
3✔
3305
        )
3✔
3306

3✔
3307
        copy(node1Key[:], info.NodeKey1Bytes[:])
3✔
3308
        copy(node2Key[:], info.NodeKey2Bytes[:])
3✔
3309

3✔
3310
        byteOrder.PutUint64(node1Key[33:], info.ChannelID)
3✔
3311
        byteOrder.PutUint64(node2Key[33:], info.ChannelID)
3✔
3312

3✔
3313
        return node1Key[:], node2Key[:]
3✔
3314
}
3✔
3315

3316
// FetchChannelEdgesByOutpoint attempts to lookup the two directed edges for
3317
// the channel identified by the funding outpoint. If the channel can't be
3318
// found, then ErrEdgeNotFound is returned. A struct which houses the general
3319
// information for the channel itself is returned as well as two structs that
3320
// contain the routing policies for the channel in either direction.
3321
func (c *KVStore) FetchChannelEdgesByOutpoint(op *wire.OutPoint) (
3322
        *models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3323
        *models.ChannelEdgePolicy, error) {
3✔
3324

3✔
3325
        var (
3✔
3326
                edgeInfo *models.ChannelEdgeInfo
3✔
3327
                policy1  *models.ChannelEdgePolicy
3✔
3328
                policy2  *models.ChannelEdgePolicy
3✔
3329
        )
3✔
3330

3✔
3331
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
3332
                // First, grab the node bucket. This will be used to populate
3✔
3333
                // the Node pointers in each edge read from disk.
3✔
3334
                nodes := tx.ReadBucket(nodeBucket)
3✔
3335
                if nodes == nil {
3✔
3336
                        return ErrGraphNotFound
×
3337
                }
×
3338

3339
                // Next, grab the edge bucket which stores the edges, and also
3340
                // the index itself so we can group the directed edges together
3341
                // logically.
3342
                edges := tx.ReadBucket(edgeBucket)
3✔
3343
                if edges == nil {
3✔
3344
                        return ErrGraphNoEdgesFound
×
3345
                }
×
3346
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
3✔
3347
                if edgeIndex == nil {
3✔
3348
                        return ErrGraphNoEdgesFound
×
3349
                }
×
3350

3351
                // If the channel's outpoint doesn't exist within the outpoint
3352
                // index, then the edge does not exist.
3353
                chanIndex := edges.NestedReadBucket(channelPointBucket)
3✔
3354
                if chanIndex == nil {
3✔
3355
                        return ErrGraphNoEdgesFound
×
3356
                }
×
3357
                var b bytes.Buffer
3✔
3358
                if err := WriteOutpoint(&b, op); err != nil {
3✔
3359
                        return err
×
3360
                }
×
3361
                chanID := chanIndex.Get(b.Bytes())
3✔
3362
                if chanID == nil {
6✔
3363
                        return fmt.Errorf("%w: op=%v", ErrEdgeNotFound, op)
3✔
3364
                }
3✔
3365

3366
                // If the channel is found to exists, then we'll first retrieve
3367
                // the general information for the channel.
3368
                edge, err := fetchChanEdgeInfo(edgeIndex, chanID)
3✔
3369
                if err != nil {
3✔
3370
                        return fmt.Errorf("%w: chanID=%x", err, chanID)
×
3371
                }
×
3372
                edgeInfo = &edge
3✔
3373

3✔
3374
                // Once we have the information about the channels' parameters,
3✔
3375
                // we'll fetch the routing policies for each for the directed
3✔
3376
                // edges.
3✔
3377
                e1, e2, err := fetchChanEdgePolicies(edgeIndex, edges, chanID)
3✔
3378
                if err != nil {
3✔
3379
                        return fmt.Errorf("failed to find policy: %w", err)
×
3380
                }
×
3381

3382
                policy1 = e1
3✔
3383
                policy2 = e2
3✔
3384

3✔
3385
                return nil
3✔
3386
        }, func() {
3✔
3387
                edgeInfo = nil
3✔
3388
                policy1 = nil
3✔
3389
                policy2 = nil
3✔
3390
        })
3✔
3391
        if err != nil {
6✔
3392
                return nil, nil, nil, err
3✔
3393
        }
3✔
3394

3395
        return edgeInfo, policy1, policy2, nil
3✔
3396
}
3397

3398
// FetchChannelEdgesByID attempts to lookup the two directed edges for the
3399
// channel identified by the channel ID. If the channel can't be found, then
3400
// ErrEdgeNotFound is returned. A struct which houses the general information
3401
// for the channel itself is returned as well as two structs that contain the
3402
// routing policies for the channel in either direction.
3403
//
3404
// ErrZombieEdge an be returned if the edge is currently marked as a zombie
3405
// within the database. In this case, the ChannelEdgePolicy's will be nil, and
3406
// the ChannelEdgeInfo will only include the public keys of each node.
3407
func (c *KVStore) FetchChannelEdgesByID(chanID uint64) (
3408
        *models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3409
        *models.ChannelEdgePolicy, error) {
3✔
3410

3✔
3411
        var (
3✔
3412
                edgeInfo  *models.ChannelEdgeInfo
3✔
3413
                policy1   *models.ChannelEdgePolicy
3✔
3414
                policy2   *models.ChannelEdgePolicy
3✔
3415
                channelID [8]byte
3✔
3416
        )
3✔
3417

3✔
3418
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
3419
                // First, grab the node bucket. This will be used to populate
3✔
3420
                // the Node pointers in each edge read from disk.
3✔
3421
                nodes := tx.ReadBucket(nodeBucket)
3✔
3422
                if nodes == nil {
3✔
3423
                        return ErrGraphNotFound
×
3424
                }
×
3425

3426
                // Next, grab the edge bucket which stores the edges, and also
3427
                // the index itself so we can group the directed edges together
3428
                // logically.
3429
                edges := tx.ReadBucket(edgeBucket)
3✔
3430
                if edges == nil {
3✔
3431
                        return ErrGraphNoEdgesFound
×
3432
                }
×
3433
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
3✔
3434
                if edgeIndex == nil {
3✔
3435
                        return ErrGraphNoEdgesFound
×
3436
                }
×
3437

3438
                byteOrder.PutUint64(channelID[:], chanID)
3✔
3439

3✔
3440
                // Now, attempt to fetch edge.
3✔
3441
                edge, err := fetchChanEdgeInfo(edgeIndex, channelID[:])
3✔
3442

3✔
3443
                // If it doesn't exist, we'll quickly check our zombie index to
3✔
3444
                // see if we've previously marked it as so.
3✔
3445
                if errors.Is(err, ErrEdgeNotFound) {
6✔
3446
                        // If the zombie index doesn't exist, or the edge is not
3✔
3447
                        // marked as a zombie within it, then we'll return the
3✔
3448
                        // original ErrEdgeNotFound error.
3✔
3449
                        zombieIndex := edges.NestedReadBucket(zombieBucket)
3✔
3450
                        if zombieIndex == nil {
3✔
3451
                                return ErrEdgeNotFound
×
3452
                        }
×
3453

3454
                        isZombie, pubKey1, pubKey2 := isZombieEdge(
3✔
3455
                                zombieIndex, chanID,
3✔
3456
                        )
3✔
3457
                        if !isZombie {
6✔
3458
                                return ErrEdgeNotFound
3✔
3459
                        }
3✔
3460

3461
                        // Otherwise, the edge is marked as a zombie, so we'll
3462
                        // populate the edge info with the public keys of each
3463
                        // party as this is the only information we have about
3464
                        // it and return an error signaling so.
3465
                        edgeInfo = &models.ChannelEdgeInfo{
3✔
3466
                                NodeKey1Bytes: pubKey1,
3✔
3467
                                NodeKey2Bytes: pubKey2,
3✔
3468
                        }
3✔
3469

3✔
3470
                        return ErrZombieEdge
3✔
3471
                }
3472

3473
                // Otherwise, we'll just return the error if any.
3474
                if err != nil {
3✔
3475
                        return err
×
3476
                }
×
3477

3478
                edgeInfo = &edge
3✔
3479

3✔
3480
                // Then we'll attempt to fetch the accompanying policies of this
3✔
3481
                // edge.
3✔
3482
                e1, e2, err := fetchChanEdgePolicies(
3✔
3483
                        edgeIndex, edges, channelID[:],
3✔
3484
                )
3✔
3485
                if err != nil {
3✔
3486
                        return err
×
3487
                }
×
3488

3489
                policy1 = e1
3✔
3490
                policy2 = e2
3✔
3491

3✔
3492
                return nil
3✔
3493
        }, func() {
3✔
3494
                edgeInfo = nil
3✔
3495
                policy1 = nil
3✔
3496
                policy2 = nil
3✔
3497
        })
3✔
3498
        if errors.Is(err, ErrZombieEdge) {
6✔
3499
                return edgeInfo, nil, nil, err
3✔
3500
        }
3✔
3501
        if err != nil {
6✔
3502
                return nil, nil, nil, err
3✔
3503
        }
3✔
3504

3505
        return edgeInfo, policy1, policy2, nil
3✔
3506
}
3507

3508
// IsPublicNode is a helper method that determines whether the node with the
3509
// given public key is seen as a public node in the graph from the graph's
3510
// source node's point of view.
3511
func (c *KVStore) IsPublicNode(pubKey [33]byte) (bool, error) {
3✔
3512
        var nodeIsPublic bool
3✔
3513
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
3514
                nodes := tx.ReadBucket(nodeBucket)
3✔
3515
                if nodes == nil {
3✔
3516
                        return ErrGraphNodesNotFound
×
3517
                }
×
3518
                ourPubKey := nodes.Get(sourceKey)
3✔
3519
                if ourPubKey == nil {
3✔
3520
                        return ErrSourceNodeNotSet
×
3521
                }
×
3522
                node, err := fetchLightningNode(nodes, pubKey[:])
3✔
3523
                if err != nil {
3✔
3524
                        return err
×
3525
                }
×
3526

3527
                nodeIsPublic, err = c.isPublic(tx, node.PubKeyBytes, ourPubKey)
3✔
3528

3✔
3529
                return err
3✔
3530
        }, func() {
3✔
3531
                nodeIsPublic = false
3✔
3532
        })
3✔
3533
        if err != nil {
3✔
3534
                return false, err
×
3535
        }
×
3536

3537
        return nodeIsPublic, nil
3✔
3538
}
3539

3540
// genMultiSigP2WSH generates the p2wsh'd multisig script for 2 of 2 pubkeys.
3541
func genMultiSigP2WSH(aPub, bPub []byte) ([]byte, error) {
3✔
3542
        witnessScript, err := input.GenMultiSigScript(aPub, bPub)
3✔
3543
        if err != nil {
3✔
3544
                return nil, err
×
3545
        }
×
3546

3547
        // With the witness script generated, we'll now turn it into a p2wsh
3548
        // script:
3549
        //  * OP_0 <sha256(script)>
3550
        bldr := txscript.NewScriptBuilder(
3✔
3551
                txscript.WithScriptAllocSize(input.P2WSHSize),
3✔
3552
        )
3✔
3553
        bldr.AddOp(txscript.OP_0)
3✔
3554
        scriptHash := sha256.Sum256(witnessScript)
3✔
3555
        bldr.AddData(scriptHash[:])
3✔
3556

3✔
3557
        return bldr.Script()
3✔
3558
}
3559

3560
// EdgePoint couples the outpoint of a channel with the funding script that it
3561
// creates. The FilteredChainView will use this to watch for spends of this
3562
// edge point on chain. We require both of these values as depending on the
3563
// concrete implementation, either the pkScript, or the out point will be used.
3564
type EdgePoint struct {
3565
        // FundingPkScript is the p2wsh multi-sig script of the target channel.
3566
        FundingPkScript []byte
3567

3568
        // OutPoint is the outpoint of the target channel.
3569
        OutPoint wire.OutPoint
3570
}
3571

3572
// String returns a human readable version of the target EdgePoint. We return
3573
// the outpoint directly as it is enough to uniquely identify the edge point.
3574
func (e *EdgePoint) String() string {
×
3575
        return e.OutPoint.String()
×
3576
}
×
3577

3578
// ChannelView returns the verifiable edge information for each active channel
3579
// within the known channel graph. The set of UTXO's (along with their scripts)
3580
// returned are the ones that need to be watched on chain to detect channel
3581
// closes on the resident blockchain.
3582
func (c *KVStore) ChannelView() ([]EdgePoint, error) {
3✔
3583
        var edgePoints []EdgePoint
3✔
3584
        if err := kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
3585
                // We're going to iterate over the entire channel index, so
3✔
3586
                // we'll need to fetch the edgeBucket to get to the index as
3✔
3587
                // it's a sub-bucket.
3✔
3588
                edges := tx.ReadBucket(edgeBucket)
3✔
3589
                if edges == nil {
3✔
3590
                        return ErrGraphNoEdgesFound
×
3591
                }
×
3592
                chanIndex := edges.NestedReadBucket(channelPointBucket)
3✔
3593
                if chanIndex == nil {
3✔
3594
                        return ErrGraphNoEdgesFound
×
3595
                }
×
3596
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
3✔
3597
                if edgeIndex == nil {
3✔
3598
                        return ErrGraphNoEdgesFound
×
3599
                }
×
3600

3601
                // Once we have the proper bucket, we'll range over each key
3602
                // (which is the channel point for the channel) and decode it,
3603
                // accumulating each entry.
3604
                return chanIndex.ForEach(
3✔
3605
                        func(chanPointBytes, chanID []byte) error {
6✔
3606
                                chanPointReader := bytes.NewReader(
3✔
3607
                                        chanPointBytes,
3✔
3608
                                )
3✔
3609

3✔
3610
                                var chanPoint wire.OutPoint
3✔
3611
                                err := ReadOutpoint(chanPointReader, &chanPoint)
3✔
3612
                                if err != nil {
3✔
3613
                                        return err
×
3614
                                }
×
3615

3616
                                edgeInfo, err := fetchChanEdgeInfo(
3✔
3617
                                        edgeIndex, chanID,
3✔
3618
                                )
3✔
3619
                                if err != nil {
3✔
3620
                                        return err
×
3621
                                }
×
3622

3623
                                pkScript, err := genMultiSigP2WSH(
3✔
3624
                                        edgeInfo.BitcoinKey1Bytes[:],
3✔
3625
                                        edgeInfo.BitcoinKey2Bytes[:],
3✔
3626
                                )
3✔
3627
                                if err != nil {
3✔
3628
                                        return err
×
3629
                                }
×
3630

3631
                                edgePoints = append(edgePoints, EdgePoint{
3✔
3632
                                        FundingPkScript: pkScript,
3✔
3633
                                        OutPoint:        chanPoint,
3✔
3634
                                })
3✔
3635

3✔
3636
                                return nil
3✔
3637
                        },
3638
                )
3639
        }, func() {
3✔
3640
                edgePoints = nil
3✔
3641
        }); err != nil {
3✔
3642
                return nil, err
×
3643
        }
×
3644

3645
        return edgePoints, nil
3✔
3646
}
3647

3648
// MarkEdgeZombie attempts to mark a channel identified by its channel ID as a
3649
// zombie. This method is used on an ad-hoc basis, when channels need to be
3650
// marked as zombies outside the normal pruning cycle.
3651
func (c *KVStore) MarkEdgeZombie(chanID uint64,
UNCOV
3652
        pubKey1, pubKey2 [33]byte) error {
×
UNCOV
3653

×
UNCOV
3654
        c.cacheMu.Lock()
×
UNCOV
3655
        defer c.cacheMu.Unlock()
×
UNCOV
3656

×
UNCOV
3657
        err := kvdb.Batch(c.db, func(tx kvdb.RwTx) error {
×
UNCOV
3658
                edges := tx.ReadWriteBucket(edgeBucket)
×
UNCOV
3659
                if edges == nil {
×
3660
                        return ErrGraphNoEdgesFound
×
3661
                }
×
UNCOV
3662
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
×
UNCOV
3663
                if err != nil {
×
3664
                        return fmt.Errorf("unable to create zombie "+
×
3665
                                "bucket: %w", err)
×
3666
                }
×
3667

UNCOV
3668
                return markEdgeZombie(zombieIndex, chanID, pubKey1, pubKey2)
×
3669
        })
UNCOV
3670
        if err != nil {
×
3671
                return err
×
3672
        }
×
3673

UNCOV
3674
        c.rejectCache.remove(chanID)
×
UNCOV
3675
        c.chanCache.remove(chanID)
×
UNCOV
3676

×
UNCOV
3677
        return nil
×
3678
}
3679

3680
// markEdgeZombie marks an edge as a zombie within our zombie index. The public
3681
// keys should represent the node public keys of the two parties involved in the
3682
// edge.
3683
func markEdgeZombie(zombieIndex kvdb.RwBucket, chanID uint64, pubKey1,
3684
        pubKey2 [33]byte) error {
3✔
3685

3✔
3686
        var k [8]byte
3✔
3687
        byteOrder.PutUint64(k[:], chanID)
3✔
3688

3✔
3689
        var v [66]byte
3✔
3690
        copy(v[:33], pubKey1[:])
3✔
3691
        copy(v[33:], pubKey2[:])
3✔
3692

3✔
3693
        return zombieIndex.Put(k[:], v[:])
3✔
3694
}
3✔
3695

3696
// MarkEdgeLive clears an edge from our zombie index, deeming it as live.
UNCOV
3697
func (c *KVStore) MarkEdgeLive(chanID uint64) error {
×
UNCOV
3698
        c.cacheMu.Lock()
×
UNCOV
3699
        defer c.cacheMu.Unlock()
×
UNCOV
3700

×
UNCOV
3701
        return c.markEdgeLiveUnsafe(nil, chanID)
×
UNCOV
3702
}
×
3703

3704
// markEdgeLiveUnsafe clears an edge from the zombie index. This method can be
3705
// called with an existing kvdb.RwTx or the argument can be set to nil in which
3706
// case a new transaction will be created.
3707
//
3708
// NOTE: this method MUST only be called if the cacheMu has already been
3709
// acquired.
UNCOV
3710
func (c *KVStore) markEdgeLiveUnsafe(tx kvdb.RwTx, chanID uint64) error {
×
UNCOV
3711
        dbFn := func(tx kvdb.RwTx) error {
×
UNCOV
3712
                edges := tx.ReadWriteBucket(edgeBucket)
×
UNCOV
3713
                if edges == nil {
×
3714
                        return ErrGraphNoEdgesFound
×
3715
                }
×
UNCOV
3716
                zombieIndex := edges.NestedReadWriteBucket(zombieBucket)
×
UNCOV
3717
                if zombieIndex == nil {
×
3718
                        return nil
×
3719
                }
×
3720

UNCOV
3721
                var k [8]byte
×
UNCOV
3722
                byteOrder.PutUint64(k[:], chanID)
×
UNCOV
3723

×
UNCOV
3724
                if len(zombieIndex.Get(k[:])) == 0 {
×
UNCOV
3725
                        return ErrZombieEdgeNotFound
×
UNCOV
3726
                }
×
3727

UNCOV
3728
                return zombieIndex.Delete(k[:])
×
3729
        }
3730

3731
        // If the transaction is nil, we'll create a new one. Otherwise, we use
3732
        // the existing transaction
UNCOV
3733
        var err error
×
UNCOV
3734
        if tx == nil {
×
UNCOV
3735
                err = kvdb.Update(c.db, dbFn, func() {})
×
3736
        } else {
×
3737
                err = dbFn(tx)
×
3738
        }
×
UNCOV
3739
        if err != nil {
×
UNCOV
3740
                return err
×
UNCOV
3741
        }
×
3742

UNCOV
3743
        c.rejectCache.remove(chanID)
×
UNCOV
3744
        c.chanCache.remove(chanID)
×
UNCOV
3745

×
UNCOV
3746
        return nil
×
3747
}
3748

3749
// IsZombieEdge returns whether the edge is considered zombie. If it is a
3750
// zombie, then the two node public keys corresponding to this edge are also
3751
// returned.
UNCOV
3752
func (c *KVStore) IsZombieEdge(chanID uint64) (bool, [33]byte, [33]byte) {
×
UNCOV
3753
        var (
×
UNCOV
3754
                isZombie         bool
×
UNCOV
3755
                pubKey1, pubKey2 [33]byte
×
UNCOV
3756
        )
×
UNCOV
3757

×
UNCOV
3758
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
×
UNCOV
3759
                edges := tx.ReadBucket(edgeBucket)
×
UNCOV
3760
                if edges == nil {
×
3761
                        return ErrGraphNoEdgesFound
×
3762
                }
×
UNCOV
3763
                zombieIndex := edges.NestedReadBucket(zombieBucket)
×
UNCOV
3764
                if zombieIndex == nil {
×
3765
                        return nil
×
3766
                }
×
3767

UNCOV
3768
                isZombie, pubKey1, pubKey2 = isZombieEdge(zombieIndex, chanID)
×
UNCOV
3769

×
UNCOV
3770
                return nil
×
UNCOV
3771
        }, func() {
×
UNCOV
3772
                isZombie = false
×
UNCOV
3773
                pubKey1 = [33]byte{}
×
UNCOV
3774
                pubKey2 = [33]byte{}
×
UNCOV
3775
        })
×
UNCOV
3776
        if err != nil {
×
3777
                return false, [33]byte{}, [33]byte{}
×
3778
        }
×
3779

UNCOV
3780
        return isZombie, pubKey1, pubKey2
×
3781
}
3782

3783
// isZombieEdge returns whether an entry exists for the given channel in the
3784
// zombie index. If an entry exists, then the two node public keys corresponding
3785
// to this edge are also returned.
3786
func isZombieEdge(zombieIndex kvdb.RBucket,
3787
        chanID uint64) (bool, [33]byte, [33]byte) {
3✔
3788

3✔
3789
        var k [8]byte
3✔
3790
        byteOrder.PutUint64(k[:], chanID)
3✔
3791

3✔
3792
        v := zombieIndex.Get(k[:])
3✔
3793
        if v == nil {
6✔
3794
                return false, [33]byte{}, [33]byte{}
3✔
3795
        }
3✔
3796

3797
        var pubKey1, pubKey2 [33]byte
3✔
3798
        copy(pubKey1[:], v[:33])
3✔
3799
        copy(pubKey2[:], v[33:])
3✔
3800

3✔
3801
        return true, pubKey1, pubKey2
3✔
3802
}
3803

3804
// NumZombies returns the current number of zombie channels in the graph.
UNCOV
3805
func (c *KVStore) NumZombies() (uint64, error) {
×
UNCOV
3806
        var numZombies uint64
×
UNCOV
3807
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
×
UNCOV
3808
                edges := tx.ReadBucket(edgeBucket)
×
UNCOV
3809
                if edges == nil {
×
3810
                        return nil
×
3811
                }
×
UNCOV
3812
                zombieIndex := edges.NestedReadBucket(zombieBucket)
×
UNCOV
3813
                if zombieIndex == nil {
×
3814
                        return nil
×
3815
                }
×
3816

UNCOV
3817
                return zombieIndex.ForEach(func(_, _ []byte) error {
×
UNCOV
3818
                        numZombies++
×
UNCOV
3819
                        return nil
×
UNCOV
3820
                })
×
UNCOV
3821
        }, func() {
×
UNCOV
3822
                numZombies = 0
×
UNCOV
3823
        })
×
UNCOV
3824
        if err != nil {
×
3825
                return 0, err
×
3826
        }
×
3827

UNCOV
3828
        return numZombies, nil
×
3829
}
3830

3831
// PutClosedScid stores a SCID for a closed channel in the database. This is so
3832
// that we can ignore channel announcements that we know to be closed without
3833
// having to validate them and fetch a block.
UNCOV
3834
func (c *KVStore) PutClosedScid(scid lnwire.ShortChannelID) error {
×
UNCOV
3835
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
×
UNCOV
3836
                closedScids, err := tx.CreateTopLevelBucket(closedScidBucket)
×
UNCOV
3837
                if err != nil {
×
3838
                        return err
×
3839
                }
×
3840

UNCOV
3841
                var k [8]byte
×
UNCOV
3842
                byteOrder.PutUint64(k[:], scid.ToUint64())
×
UNCOV
3843

×
UNCOV
3844
                return closedScids.Put(k[:], []byte{})
×
UNCOV
3845
        }, func() {})
×
3846
}
3847

3848
// IsClosedScid checks whether a channel identified by the passed in scid is
3849
// closed. This helps avoid having to perform expensive validation checks.
3850
// TODO: Add an LRU cache to cut down on disc reads.
3851
func (c *KVStore) IsClosedScid(scid lnwire.ShortChannelID) (bool, error) {
3✔
3852
        var isClosed bool
3✔
3853
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
3854
                closedScids := tx.ReadBucket(closedScidBucket)
3✔
3855
                if closedScids == nil {
3✔
3856
                        return ErrClosedScidsNotFound
×
3857
                }
×
3858

3859
                var k [8]byte
3✔
3860
                byteOrder.PutUint64(k[:], scid.ToUint64())
3✔
3861

3✔
3862
                if closedScids.Get(k[:]) != nil {
3✔
UNCOV
3863
                        isClosed = true
×
UNCOV
3864
                        return nil
×
UNCOV
3865
                }
×
3866

3867
                return nil
3✔
3868
        }, func() {
3✔
3869
                isClosed = false
3✔
3870
        })
3✔
3871
        if err != nil {
3✔
3872
                return false, err
×
3873
        }
×
3874

3875
        return isClosed, nil
3✔
3876
}
3877

3878
// GraphSession will provide the call-back with access to a NodeTraverser
3879
// instance which can be used to perform queries against the channel graph.
UNCOV
3880
func (c *KVStore) GraphSession(cb func(graph NodeTraverser) error) error {
×
UNCOV
3881
        return c.db.View(func(tx walletdb.ReadTx) error {
×
UNCOV
3882
                return cb(&nodeTraverserSession{
×
UNCOV
3883
                        db: c,
×
UNCOV
3884
                        tx: tx,
×
UNCOV
3885
                })
×
UNCOV
3886
        }, func() {})
×
3887
}
3888

3889
// nodeTraverserSession implements the NodeTraverser interface but with a
3890
// backing read only transaction for a consistent view of the graph.
3891
type nodeTraverserSession struct {
3892
        tx kvdb.RTx
3893
        db *KVStore
3894
}
3895

3896
// ForEachNodeDirectedChannel calls the callback for every channel of the given
3897
// node.
3898
//
3899
// NOTE: Part of the NodeTraverser interface.
3900
func (c *nodeTraverserSession) ForEachNodeDirectedChannel(nodePub route.Vertex,
UNCOV
3901
        cb func(channel *DirectedChannel) error) error {
×
UNCOV
3902

×
UNCOV
3903
        return c.db.forEachNodeDirectedChannel(c.tx, nodePub, cb)
×
UNCOV
3904
}
×
3905

3906
// FetchNodeFeatures returns the features of the given node. If the node is
3907
// unknown, assume no additional features are supported.
3908
//
3909
// NOTE: Part of the NodeTraverser interface.
3910
func (c *nodeTraverserSession) FetchNodeFeatures(nodePub route.Vertex) (
UNCOV
3911
        *lnwire.FeatureVector, error) {
×
UNCOV
3912

×
UNCOV
3913
        return c.db.fetchNodeFeatures(c.tx, nodePub)
×
UNCOV
3914
}
×
3915

3916
func putLightningNode(nodeBucket, aliasBucket, updateIndex kvdb.RwBucket,
3917
        node *models.LightningNode) error {
3✔
3918

3✔
3919
        var (
3✔
3920
                scratch [16]byte
3✔
3921
                b       bytes.Buffer
3✔
3922
        )
3✔
3923

3✔
3924
        pub, err := node.PubKey()
3✔
3925
        if err != nil {
3✔
3926
                return err
×
3927
        }
×
3928
        nodePub := pub.SerializeCompressed()
3✔
3929

3✔
3930
        // If the node has the update time set, write it, else write 0.
3✔
3931
        updateUnix := uint64(0)
3✔
3932
        if node.LastUpdate.Unix() > 0 {
6✔
3933
                updateUnix = uint64(node.LastUpdate.Unix())
3✔
3934
        }
3✔
3935

3936
        byteOrder.PutUint64(scratch[:8], updateUnix)
3✔
3937
        if _, err := b.Write(scratch[:8]); err != nil {
3✔
3938
                return err
×
3939
        }
×
3940

3941
        if _, err := b.Write(nodePub); err != nil {
3✔
3942
                return err
×
3943
        }
×
3944

3945
        // If we got a node announcement for this node, we will have the rest
3946
        // of the data available. If not we don't have more data to write.
3947
        if !node.HaveNodeAnnouncement {
6✔
3948
                // Write HaveNodeAnnouncement=0.
3✔
3949
                byteOrder.PutUint16(scratch[:2], 0)
3✔
3950
                if _, err := b.Write(scratch[:2]); err != nil {
3✔
3951
                        return err
×
3952
                }
×
3953

3954
                return nodeBucket.Put(nodePub, b.Bytes())
3✔
3955
        }
3956

3957
        // Write HaveNodeAnnouncement=1.
3958
        byteOrder.PutUint16(scratch[:2], 1)
3✔
3959
        if _, err := b.Write(scratch[:2]); err != nil {
3✔
3960
                return err
×
3961
        }
×
3962

3963
        if err := binary.Write(&b, byteOrder, node.Color.R); err != nil {
3✔
3964
                return err
×
3965
        }
×
3966
        if err := binary.Write(&b, byteOrder, node.Color.G); err != nil {
3✔
3967
                return err
×
3968
        }
×
3969
        if err := binary.Write(&b, byteOrder, node.Color.B); err != nil {
3✔
3970
                return err
×
3971
        }
×
3972

3973
        if err := wire.WriteVarString(&b, 0, node.Alias); err != nil {
3✔
3974
                return err
×
3975
        }
×
3976

3977
        if err := node.Features.Encode(&b); err != nil {
3✔
3978
                return err
×
3979
        }
×
3980

3981
        numAddresses := uint16(len(node.Addresses))
3✔
3982
        byteOrder.PutUint16(scratch[:2], numAddresses)
3✔
3983
        if _, err := b.Write(scratch[:2]); err != nil {
3✔
3984
                return err
×
3985
        }
×
3986

3987
        for _, address := range node.Addresses {
6✔
3988
                if err := SerializeAddr(&b, address); err != nil {
3✔
3989
                        return err
×
3990
                }
×
3991
        }
3992

3993
        sigLen := len(node.AuthSigBytes)
3✔
3994
        if sigLen > 80 {
3✔
3995
                return fmt.Errorf("max sig len allowed is 80, had %v",
×
3996
                        sigLen)
×
3997
        }
×
3998

3999
        err = wire.WriteVarBytes(&b, 0, node.AuthSigBytes)
3✔
4000
        if err != nil {
3✔
4001
                return err
×
4002
        }
×
4003

4004
        if len(node.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
3✔
4005
                return ErrTooManyExtraOpaqueBytes(len(node.ExtraOpaqueData))
×
4006
        }
×
4007
        err = wire.WriteVarBytes(&b, 0, node.ExtraOpaqueData)
3✔
4008
        if err != nil {
3✔
4009
                return err
×
4010
        }
×
4011

4012
        if err := aliasBucket.Put(nodePub, []byte(node.Alias)); err != nil {
3✔
4013
                return err
×
4014
        }
×
4015

4016
        // With the alias bucket updated, we'll now update the index that
4017
        // tracks the time series of node updates.
4018
        var indexKey [8 + 33]byte
3✔
4019
        byteOrder.PutUint64(indexKey[:8], updateUnix)
3✔
4020
        copy(indexKey[8:], nodePub)
3✔
4021

3✔
4022
        // If there was already an old index entry for this node, then we'll
3✔
4023
        // delete the old one before we write the new entry.
3✔
4024
        if nodeBytes := nodeBucket.Get(nodePub); nodeBytes != nil {
6✔
4025
                // Extract out the old update time to we can reconstruct the
3✔
4026
                // prior index key to delete it from the index.
3✔
4027
                oldUpdateTime := nodeBytes[:8]
3✔
4028

3✔
4029
                var oldIndexKey [8 + 33]byte
3✔
4030
                copy(oldIndexKey[:8], oldUpdateTime)
3✔
4031
                copy(oldIndexKey[8:], nodePub)
3✔
4032

3✔
4033
                if err := updateIndex.Delete(oldIndexKey[:]); err != nil {
3✔
4034
                        return err
×
4035
                }
×
4036
        }
4037

4038
        if err := updateIndex.Put(indexKey[:], nil); err != nil {
3✔
4039
                return err
×
4040
        }
×
4041

4042
        return nodeBucket.Put(nodePub, b.Bytes())
3✔
4043
}
4044

4045
func fetchLightningNode(nodeBucket kvdb.RBucket,
4046
        nodePub []byte) (models.LightningNode, error) {
3✔
4047

3✔
4048
        nodeBytes := nodeBucket.Get(nodePub)
3✔
4049
        if nodeBytes == nil {
6✔
4050
                return models.LightningNode{}, ErrGraphNodeNotFound
3✔
4051
        }
3✔
4052

4053
        nodeReader := bytes.NewReader(nodeBytes)
3✔
4054

3✔
4055
        return deserializeLightningNode(nodeReader)
3✔
4056
}
4057

4058
func deserializeLightningNodeCacheable(r io.Reader) (route.Vertex,
4059
        *lnwire.FeatureVector, error) {
3✔
4060

3✔
4061
        var (
3✔
4062
                pubKey      route.Vertex
3✔
4063
                features    = lnwire.EmptyFeatureVector()
3✔
4064
                nodeScratch [8]byte
3✔
4065
        )
3✔
4066

3✔
4067
        // Skip ahead:
3✔
4068
        // - LastUpdate (8 bytes)
3✔
4069
        if _, err := r.Read(nodeScratch[:]); err != nil {
3✔
4070
                return pubKey, nil, err
×
4071
        }
×
4072

4073
        if _, err := io.ReadFull(r, pubKey[:]); err != nil {
3✔
4074
                return pubKey, nil, err
×
4075
        }
×
4076

4077
        // Read the node announcement flag.
4078
        if _, err := r.Read(nodeScratch[:2]); err != nil {
3✔
4079
                return pubKey, nil, err
×
4080
        }
×
4081
        hasNodeAnn := byteOrder.Uint16(nodeScratch[:2])
3✔
4082

3✔
4083
        // The rest of the data is optional, and will only be there if we got a
3✔
4084
        // node announcement for this node.
3✔
4085
        if hasNodeAnn == 0 {
6✔
4086
                return pubKey, features, nil
3✔
4087
        }
3✔
4088

4089
        // We did get a node announcement for this node, so we'll have the rest
4090
        // of the data available.
4091
        var rgb uint8
3✔
4092
        if err := binary.Read(r, byteOrder, &rgb); err != nil {
3✔
4093
                return pubKey, nil, err
×
4094
        }
×
4095
        if err := binary.Read(r, byteOrder, &rgb); err != nil {
3✔
4096
                return pubKey, nil, err
×
4097
        }
×
4098
        if err := binary.Read(r, byteOrder, &rgb); err != nil {
3✔
4099
                return pubKey, nil, err
×
4100
        }
×
4101

4102
        if _, err := wire.ReadVarString(r, 0); err != nil {
3✔
4103
                return pubKey, nil, err
×
4104
        }
×
4105

4106
        if err := features.Decode(r); err != nil {
3✔
4107
                return pubKey, nil, err
×
4108
        }
×
4109

4110
        return pubKey, features, nil
3✔
4111
}
4112

4113
func deserializeLightningNode(r io.Reader) (models.LightningNode, error) {
3✔
4114
        var (
3✔
4115
                node    models.LightningNode
3✔
4116
                scratch [8]byte
3✔
4117
                err     error
3✔
4118
        )
3✔
4119

3✔
4120
        // Always populate a feature vector, even if we don't have a node
3✔
4121
        // announcement and short circuit below.
3✔
4122
        node.Features = lnwire.EmptyFeatureVector()
3✔
4123

3✔
4124
        if _, err := r.Read(scratch[:]); err != nil {
3✔
4125
                return models.LightningNode{}, err
×
4126
        }
×
4127

4128
        unix := int64(byteOrder.Uint64(scratch[:]))
3✔
4129
        node.LastUpdate = time.Unix(unix, 0)
3✔
4130

3✔
4131
        if _, err := io.ReadFull(r, node.PubKeyBytes[:]); err != nil {
3✔
4132
                return models.LightningNode{}, err
×
4133
        }
×
4134

4135
        if _, err := r.Read(scratch[:2]); err != nil {
3✔
4136
                return models.LightningNode{}, err
×
4137
        }
×
4138

4139
        hasNodeAnn := byteOrder.Uint16(scratch[:2])
3✔
4140
        if hasNodeAnn == 1 {
6✔
4141
                node.HaveNodeAnnouncement = true
3✔
4142
        } else {
6✔
4143
                node.HaveNodeAnnouncement = false
3✔
4144
        }
3✔
4145

4146
        // The rest of the data is optional, and will only be there if we got a
4147
        // node announcement for this node.
4148
        if !node.HaveNodeAnnouncement {
6✔
4149
                return node, nil
3✔
4150
        }
3✔
4151

4152
        // We did get a node announcement for this node, so we'll have the rest
4153
        // of the data available.
4154
        if err := binary.Read(r, byteOrder, &node.Color.R); err != nil {
3✔
4155
                return models.LightningNode{}, err
×
4156
        }
×
4157
        if err := binary.Read(r, byteOrder, &node.Color.G); err != nil {
3✔
4158
                return models.LightningNode{}, err
×
4159
        }
×
4160
        if err := binary.Read(r, byteOrder, &node.Color.B); err != nil {
3✔
4161
                return models.LightningNode{}, err
×
4162
        }
×
4163

4164
        node.Alias, err = wire.ReadVarString(r, 0)
3✔
4165
        if err != nil {
3✔
4166
                return models.LightningNode{}, err
×
4167
        }
×
4168

4169
        err = node.Features.Decode(r)
3✔
4170
        if err != nil {
3✔
4171
                return models.LightningNode{}, err
×
4172
        }
×
4173

4174
        if _, err := r.Read(scratch[:2]); err != nil {
3✔
4175
                return models.LightningNode{}, err
×
4176
        }
×
4177
        numAddresses := int(byteOrder.Uint16(scratch[:2]))
3✔
4178

3✔
4179
        var addresses []net.Addr
3✔
4180
        for i := 0; i < numAddresses; i++ {
6✔
4181
                address, err := DeserializeAddr(r)
3✔
4182
                if err != nil {
3✔
4183
                        return models.LightningNode{}, err
×
4184
                }
×
4185
                addresses = append(addresses, address)
3✔
4186
        }
4187
        node.Addresses = addresses
3✔
4188

3✔
4189
        node.AuthSigBytes, err = wire.ReadVarBytes(r, 0, 80, "sig")
3✔
4190
        if err != nil {
3✔
4191
                return models.LightningNode{}, err
×
4192
        }
×
4193

4194
        // We'll try and see if there are any opaque bytes left, if not, then
4195
        // we'll ignore the EOF error and return the node as is.
4196
        extraBytes, err := wire.ReadVarBytes(
3✔
4197
                r, 0, MaxAllowedExtraOpaqueBytes, "blob",
3✔
4198
        )
3✔
4199
        switch {
3✔
4200
        case errors.Is(err, io.ErrUnexpectedEOF):
×
4201
        case errors.Is(err, io.EOF):
×
4202
        case err != nil:
×
4203
                return models.LightningNode{}, err
×
4204
        }
4205

4206
        if len(extraBytes) > 0 {
3✔
UNCOV
4207
                node.ExtraOpaqueData = extraBytes
×
UNCOV
4208
        }
×
4209

4210
        return node, nil
3✔
4211
}
4212

4213
func putChanEdgeInfo(edgeIndex kvdb.RwBucket,
4214
        edgeInfo *models.ChannelEdgeInfo, chanID [8]byte) error {
3✔
4215

3✔
4216
        var b bytes.Buffer
3✔
4217

3✔
4218
        if _, err := b.Write(edgeInfo.NodeKey1Bytes[:]); err != nil {
3✔
4219
                return err
×
4220
        }
×
4221
        if _, err := b.Write(edgeInfo.NodeKey2Bytes[:]); err != nil {
3✔
4222
                return err
×
4223
        }
×
4224
        if _, err := b.Write(edgeInfo.BitcoinKey1Bytes[:]); err != nil {
3✔
4225
                return err
×
4226
        }
×
4227
        if _, err := b.Write(edgeInfo.BitcoinKey2Bytes[:]); err != nil {
3✔
4228
                return err
×
4229
        }
×
4230

4231
        if err := wire.WriteVarBytes(&b, 0, edgeInfo.Features); err != nil {
3✔
4232
                return err
×
4233
        }
×
4234

4235
        authProof := edgeInfo.AuthProof
3✔
4236
        var nodeSig1, nodeSig2, bitcoinSig1, bitcoinSig2 []byte
3✔
4237
        if authProof != nil {
6✔
4238
                nodeSig1 = authProof.NodeSig1Bytes
3✔
4239
                nodeSig2 = authProof.NodeSig2Bytes
3✔
4240
                bitcoinSig1 = authProof.BitcoinSig1Bytes
3✔
4241
                bitcoinSig2 = authProof.BitcoinSig2Bytes
3✔
4242
        }
3✔
4243

4244
        if err := wire.WriteVarBytes(&b, 0, nodeSig1); err != nil {
3✔
4245
                return err
×
4246
        }
×
4247
        if err := wire.WriteVarBytes(&b, 0, nodeSig2); err != nil {
3✔
4248
                return err
×
4249
        }
×
4250
        if err := wire.WriteVarBytes(&b, 0, bitcoinSig1); err != nil {
3✔
4251
                return err
×
4252
        }
×
4253
        if err := wire.WriteVarBytes(&b, 0, bitcoinSig2); err != nil {
3✔
4254
                return err
×
4255
        }
×
4256

4257
        if err := WriteOutpoint(&b, &edgeInfo.ChannelPoint); err != nil {
3✔
4258
                return err
×
4259
        }
×
4260
        err := binary.Write(&b, byteOrder, uint64(edgeInfo.Capacity))
3✔
4261
        if err != nil {
3✔
4262
                return err
×
4263
        }
×
4264
        if _, err := b.Write(chanID[:]); err != nil {
3✔
4265
                return err
×
4266
        }
×
4267
        if _, err := b.Write(edgeInfo.ChainHash[:]); err != nil {
3✔
4268
                return err
×
4269
        }
×
4270

4271
        if len(edgeInfo.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
3✔
4272
                return ErrTooManyExtraOpaqueBytes(len(edgeInfo.ExtraOpaqueData))
×
4273
        }
×
4274
        err = wire.WriteVarBytes(&b, 0, edgeInfo.ExtraOpaqueData)
3✔
4275
        if err != nil {
3✔
4276
                return err
×
4277
        }
×
4278

4279
        return edgeIndex.Put(chanID[:], b.Bytes())
3✔
4280
}
4281

4282
func fetchChanEdgeInfo(edgeIndex kvdb.RBucket,
4283
        chanID []byte) (models.ChannelEdgeInfo, error) {
3✔
4284

3✔
4285
        edgeInfoBytes := edgeIndex.Get(chanID)
3✔
4286
        if edgeInfoBytes == nil {
6✔
4287
                return models.ChannelEdgeInfo{}, ErrEdgeNotFound
3✔
4288
        }
3✔
4289

4290
        edgeInfoReader := bytes.NewReader(edgeInfoBytes)
3✔
4291

3✔
4292
        return deserializeChanEdgeInfo(edgeInfoReader)
3✔
4293
}
4294

4295
func deserializeChanEdgeInfo(r io.Reader) (models.ChannelEdgeInfo, error) {
3✔
4296
        var (
3✔
4297
                err      error
3✔
4298
                edgeInfo models.ChannelEdgeInfo
3✔
4299
        )
3✔
4300

3✔
4301
        if _, err := io.ReadFull(r, edgeInfo.NodeKey1Bytes[:]); err != nil {
3✔
4302
                return models.ChannelEdgeInfo{}, err
×
4303
        }
×
4304
        if _, err := io.ReadFull(r, edgeInfo.NodeKey2Bytes[:]); err != nil {
3✔
4305
                return models.ChannelEdgeInfo{}, err
×
4306
        }
×
4307
        if _, err := io.ReadFull(r, edgeInfo.BitcoinKey1Bytes[:]); err != nil {
3✔
4308
                return models.ChannelEdgeInfo{}, err
×
4309
        }
×
4310
        if _, err := io.ReadFull(r, edgeInfo.BitcoinKey2Bytes[:]); err != nil {
3✔
4311
                return models.ChannelEdgeInfo{}, err
×
4312
        }
×
4313

4314
        edgeInfo.Features, err = wire.ReadVarBytes(r, 0, 900, "features")
3✔
4315
        if err != nil {
3✔
4316
                return models.ChannelEdgeInfo{}, err
×
4317
        }
×
4318

4319
        proof := &models.ChannelAuthProof{}
3✔
4320

3✔
4321
        proof.NodeSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
3✔
4322
        if err != nil {
3✔
4323
                return models.ChannelEdgeInfo{}, err
×
4324
        }
×
4325
        proof.NodeSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
3✔
4326
        if err != nil {
3✔
4327
                return models.ChannelEdgeInfo{}, err
×
4328
        }
×
4329
        proof.BitcoinSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
3✔
4330
        if err != nil {
3✔
4331
                return models.ChannelEdgeInfo{}, err
×
4332
        }
×
4333
        proof.BitcoinSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
3✔
4334
        if err != nil {
3✔
4335
                return models.ChannelEdgeInfo{}, err
×
4336
        }
×
4337

4338
        if !proof.IsEmpty() {
6✔
4339
                edgeInfo.AuthProof = proof
3✔
4340
        }
3✔
4341

4342
        edgeInfo.ChannelPoint = wire.OutPoint{}
3✔
4343
        if err := ReadOutpoint(r, &edgeInfo.ChannelPoint); err != nil {
3✔
4344
                return models.ChannelEdgeInfo{}, err
×
4345
        }
×
4346
        if err := binary.Read(r, byteOrder, &edgeInfo.Capacity); err != nil {
3✔
4347
                return models.ChannelEdgeInfo{}, err
×
4348
        }
×
4349
        if err := binary.Read(r, byteOrder, &edgeInfo.ChannelID); err != nil {
3✔
4350
                return models.ChannelEdgeInfo{}, err
×
4351
        }
×
4352

4353
        if _, err := io.ReadFull(r, edgeInfo.ChainHash[:]); err != nil {
3✔
4354
                return models.ChannelEdgeInfo{}, err
×
4355
        }
×
4356

4357
        // We'll try and see if there are any opaque bytes left, if not, then
4358
        // we'll ignore the EOF error and return the edge as is.
4359
        edgeInfo.ExtraOpaqueData, err = wire.ReadVarBytes(
3✔
4360
                r, 0, MaxAllowedExtraOpaqueBytes, "blob",
3✔
4361
        )
3✔
4362
        switch {
3✔
4363
        case errors.Is(err, io.ErrUnexpectedEOF):
×
4364
        case errors.Is(err, io.EOF):
×
4365
        case err != nil:
×
4366
                return models.ChannelEdgeInfo{}, err
×
4367
        }
4368

4369
        return edgeInfo, nil
3✔
4370
}
4371

4372
func putChanEdgePolicy(edges kvdb.RwBucket, edge *models.ChannelEdgePolicy,
4373
        from, to []byte) error {
3✔
4374

3✔
4375
        var edgeKey [33 + 8]byte
3✔
4376
        copy(edgeKey[:], from)
3✔
4377
        byteOrder.PutUint64(edgeKey[33:], edge.ChannelID)
3✔
4378

3✔
4379
        var b bytes.Buffer
3✔
4380
        if err := serializeChanEdgePolicy(&b, edge, to); err != nil {
3✔
UNCOV
4381
                return err
×
UNCOV
4382
        }
×
4383

4384
        // Before we write out the new edge, we'll create a new entry in the
4385
        // update index in order to keep it fresh.
4386
        updateUnix := uint64(edge.LastUpdate.Unix())
3✔
4387
        var indexKey [8 + 8]byte
3✔
4388
        byteOrder.PutUint64(indexKey[:8], updateUnix)
3✔
4389
        byteOrder.PutUint64(indexKey[8:], edge.ChannelID)
3✔
4390

3✔
4391
        updateIndex, err := edges.CreateBucketIfNotExists(edgeUpdateIndexBucket)
3✔
4392
        if err != nil {
3✔
4393
                return err
×
4394
        }
×
4395

4396
        // If there was already an entry for this edge, then we'll need to
4397
        // delete the old one to ensure we don't leave around any after-images.
4398
        // An unknown policy value does not have a update time recorded, so
4399
        // it also does not need to be removed.
4400
        if edgeBytes := edges.Get(edgeKey[:]); edgeBytes != nil &&
3✔
4401
                !bytes.Equal(edgeBytes, unknownPolicy) {
6✔
4402

3✔
4403
                // In order to delete the old entry, we'll need to obtain the
3✔
4404
                // *prior* update time in order to delete it. To do this, we'll
3✔
4405
                // need to deserialize the existing policy within the database
3✔
4406
                // (now outdated by the new one), and delete its corresponding
3✔
4407
                // entry within the update index. We'll ignore any
3✔
4408
                // ErrEdgePolicyOptionalFieldNotFound or ErrParsingExtraTLVBytes
3✔
4409
                // errors, as we only need the channel ID and update time to
3✔
4410
                // delete the entry.
3✔
4411
                //
3✔
4412
                // TODO(halseth): get rid of these invalid policies in a
3✔
4413
                // migration.
3✔
4414
                // TODO(elle): complete the above TODO in migration from kvdb
3✔
4415
                // to SQL.
3✔
4416
                oldEdgePolicy, err := deserializeChanEdgePolicy(
3✔
4417
                        bytes.NewReader(edgeBytes),
3✔
4418
                )
3✔
4419
                if err != nil &&
3✔
4420
                        !errors.Is(err, ErrEdgePolicyOptionalFieldNotFound) &&
3✔
4421
                        !errors.Is(err, ErrParsingExtraTLVBytes) {
3✔
4422

×
4423
                        return err
×
4424
                }
×
4425

4426
                oldUpdateTime := uint64(oldEdgePolicy.LastUpdate.Unix())
3✔
4427

3✔
4428
                var oldIndexKey [8 + 8]byte
3✔
4429
                byteOrder.PutUint64(oldIndexKey[:8], oldUpdateTime)
3✔
4430
                byteOrder.PutUint64(oldIndexKey[8:], edge.ChannelID)
3✔
4431

3✔
4432
                if err := updateIndex.Delete(oldIndexKey[:]); err != nil {
3✔
4433
                        return err
×
4434
                }
×
4435
        }
4436

4437
        if err := updateIndex.Put(indexKey[:], nil); err != nil {
3✔
4438
                return err
×
4439
        }
×
4440

4441
        err = updateEdgePolicyDisabledIndex(
3✔
4442
                edges, edge.ChannelID,
3✔
4443
                edge.ChannelFlags&lnwire.ChanUpdateDirection > 0,
3✔
4444
                edge.IsDisabled(),
3✔
4445
        )
3✔
4446
        if err != nil {
3✔
4447
                return err
×
4448
        }
×
4449

4450
        return edges.Put(edgeKey[:], b.Bytes())
3✔
4451
}
4452

4453
// updateEdgePolicyDisabledIndex is used to update the disabledEdgePolicyIndex
4454
// bucket by either add a new disabled ChannelEdgePolicy or remove an existing
4455
// one.
4456
// The direction represents the direction of the edge and disabled is used for
4457
// deciding whether to remove or add an entry to the bucket.
4458
// In general a channel is disabled if two entries for the same chanID exist
4459
// in this bucket.
4460
// Maintaining the bucket this way allows a fast retrieval of disabled
4461
// channels, for example when prune is needed.
4462
func updateEdgePolicyDisabledIndex(edges kvdb.RwBucket, chanID uint64,
4463
        direction bool, disabled bool) error {
3✔
4464

3✔
4465
        var disabledEdgeKey [8 + 1]byte
3✔
4466
        byteOrder.PutUint64(disabledEdgeKey[0:], chanID)
3✔
4467
        if direction {
6✔
4468
                disabledEdgeKey[8] = 1
3✔
4469
        }
3✔
4470

4471
        disabledEdgePolicyIndex, err := edges.CreateBucketIfNotExists(
3✔
4472
                disabledEdgePolicyBucket,
3✔
4473
        )
3✔
4474
        if err != nil {
3✔
4475
                return err
×
4476
        }
×
4477

4478
        if disabled {
6✔
4479
                return disabledEdgePolicyIndex.Put(disabledEdgeKey[:], []byte{})
3✔
4480
        }
3✔
4481

4482
        return disabledEdgePolicyIndex.Delete(disabledEdgeKey[:])
3✔
4483
}
4484

4485
// putChanEdgePolicyUnknown marks the edge policy as unknown
4486
// in the edges bucket.
4487
func putChanEdgePolicyUnknown(edges kvdb.RwBucket, channelID uint64,
4488
        from []byte) error {
3✔
4489

3✔
4490
        var edgeKey [33 + 8]byte
3✔
4491
        copy(edgeKey[:], from)
3✔
4492
        byteOrder.PutUint64(edgeKey[33:], channelID)
3✔
4493

3✔
4494
        if edges.Get(edgeKey[:]) != nil {
3✔
4495
                return fmt.Errorf("cannot write unknown policy for channel %v "+
×
4496
                        " when there is already a policy present", channelID)
×
4497
        }
×
4498

4499
        return edges.Put(edgeKey[:], unknownPolicy)
3✔
4500
}
4501

4502
func fetchChanEdgePolicy(edges kvdb.RBucket, chanID []byte,
4503
        nodePub []byte) (*models.ChannelEdgePolicy, error) {
3✔
4504

3✔
4505
        var edgeKey [33 + 8]byte
3✔
4506
        copy(edgeKey[:], nodePub)
3✔
4507
        copy(edgeKey[33:], chanID)
3✔
4508

3✔
4509
        edgeBytes := edges.Get(edgeKey[:])
3✔
4510
        if edgeBytes == nil {
3✔
4511
                return nil, ErrEdgeNotFound
×
4512
        }
×
4513

4514
        // No need to deserialize unknown policy.
4515
        if bytes.Equal(edgeBytes, unknownPolicy) {
6✔
4516
                return nil, nil
3✔
4517
        }
3✔
4518

4519
        edgeReader := bytes.NewReader(edgeBytes)
3✔
4520

3✔
4521
        ep, err := deserializeChanEdgePolicy(edgeReader)
3✔
4522
        switch {
3✔
4523
        // If the db policy was missing an expected optional field, we return
4524
        // nil as if the policy was unknown.
UNCOV
4525
        case errors.Is(err, ErrEdgePolicyOptionalFieldNotFound):
×
UNCOV
4526
                return nil, nil
×
4527

4528
        // If the policy contains invalid TLV bytes, we return nil as if
4529
        // the policy was unknown.
4530
        case errors.Is(err, ErrParsingExtraTLVBytes):
×
4531
                return nil, nil
×
4532

4533
        case err != nil:
×
4534
                return nil, err
×
4535
        }
4536

4537
        return ep, nil
3✔
4538
}
4539

4540
func fetchChanEdgePolicies(edgeIndex kvdb.RBucket, edges kvdb.RBucket,
4541
        chanID []byte) (*models.ChannelEdgePolicy, *models.ChannelEdgePolicy,
4542
        error) {
3✔
4543

3✔
4544
        edgeInfo := edgeIndex.Get(chanID)
3✔
4545
        if edgeInfo == nil {
3✔
4546
                return nil, nil, fmt.Errorf("%w: chanID=%x", ErrEdgeNotFound,
×
4547
                        chanID)
×
4548
        }
×
4549

4550
        // The first node is contained within the first half of the edge
4551
        // information. We only propagate the error here and below if it's
4552
        // something other than edge non-existence.
4553
        node1Pub := edgeInfo[:33]
3✔
4554
        edge1, err := fetchChanEdgePolicy(edges, chanID, node1Pub)
3✔
4555
        if err != nil {
3✔
4556
                return nil, nil, fmt.Errorf("%w: node1Pub=%x", ErrEdgeNotFound,
×
4557
                        node1Pub)
×
4558
        }
×
4559

4560
        // Similarly, the second node is contained within the latter
4561
        // half of the edge information.
4562
        node2Pub := edgeInfo[33:66]
3✔
4563
        edge2, err := fetchChanEdgePolicy(edges, chanID, node2Pub)
3✔
4564
        if err != nil {
3✔
4565
                return nil, nil, fmt.Errorf("%w: node2Pub=%x", ErrEdgeNotFound,
×
4566
                        node2Pub)
×
4567
        }
×
4568

4569
        return edge1, edge2, nil
3✔
4570
}
4571

4572
func serializeChanEdgePolicy(w io.Writer, edge *models.ChannelEdgePolicy,
4573
        to []byte) error {
3✔
4574

3✔
4575
        err := wire.WriteVarBytes(w, 0, edge.SigBytes)
3✔
4576
        if err != nil {
3✔
4577
                return err
×
4578
        }
×
4579

4580
        if err := binary.Write(w, byteOrder, edge.ChannelID); err != nil {
3✔
4581
                return err
×
4582
        }
×
4583

4584
        var scratch [8]byte
3✔
4585
        updateUnix := uint64(edge.LastUpdate.Unix())
3✔
4586
        byteOrder.PutUint64(scratch[:], updateUnix)
3✔
4587
        if _, err := w.Write(scratch[:]); err != nil {
3✔
4588
                return err
×
4589
        }
×
4590

4591
        if err := binary.Write(w, byteOrder, edge.MessageFlags); err != nil {
3✔
4592
                return err
×
4593
        }
×
4594
        if err := binary.Write(w, byteOrder, edge.ChannelFlags); err != nil {
3✔
4595
                return err
×
4596
        }
×
4597
        if err := binary.Write(w, byteOrder, edge.TimeLockDelta); err != nil {
3✔
4598
                return err
×
4599
        }
×
4600
        if err := binary.Write(w, byteOrder, uint64(edge.MinHTLC)); err != nil {
3✔
4601
                return err
×
4602
        }
×
4603
        err = binary.Write(w, byteOrder, uint64(edge.FeeBaseMSat))
3✔
4604
        if err != nil {
3✔
4605
                return err
×
4606
        }
×
4607
        err = binary.Write(
3✔
4608
                w, byteOrder, uint64(edge.FeeProportionalMillionths),
3✔
4609
        )
3✔
4610
        if err != nil {
3✔
4611
                return err
×
4612
        }
×
4613

4614
        if _, err := w.Write(to); err != nil {
3✔
4615
                return err
×
4616
        }
×
4617

4618
        // If the max_htlc field is present, we write it. To be compatible with
4619
        // older versions that wasn't aware of this field, we write it as part
4620
        // of the opaque data.
4621
        // TODO(halseth): clean up when moving to TLV.
4622
        var opaqueBuf bytes.Buffer
3✔
4623
        if edge.MessageFlags.HasMaxHtlc() {
6✔
4624
                err := binary.Write(&opaqueBuf, byteOrder, uint64(edge.MaxHTLC))
3✔
4625
                if err != nil {
3✔
4626
                        return err
×
4627
                }
×
4628
        }
4629

4630
        // Validate that the ExtraOpaqueData is in fact a valid TLV stream.
4631
        err = edge.ExtraOpaqueData.ValidateTLV()
3✔
4632
        if err != nil {
3✔
UNCOV
4633
                return fmt.Errorf("%w: %w", ErrParsingExtraTLVBytes, err)
×
UNCOV
4634
        }
×
4635

4636
        if len(edge.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
3✔
4637
                return ErrTooManyExtraOpaqueBytes(len(edge.ExtraOpaqueData))
×
4638
        }
×
4639
        if _, err := opaqueBuf.Write(edge.ExtraOpaqueData); err != nil {
3✔
4640
                return err
×
4641
        }
×
4642

4643
        if err := wire.WriteVarBytes(w, 0, opaqueBuf.Bytes()); err != nil {
3✔
4644
                return err
×
4645
        }
×
4646

4647
        return nil
3✔
4648
}
4649

4650
func deserializeChanEdgePolicy(r io.Reader) (*models.ChannelEdgePolicy, error) {
3✔
4651
        // Deserialize the policy. Note that in case an optional field is not
3✔
4652
        // found or if the edge has invalid TLV data, then both an error and a
3✔
4653
        // populated policy object are returned so that the caller can decide
3✔
4654
        // if it still wants to use the edge or not.
3✔
4655
        edge, err := deserializeChanEdgePolicyRaw(r)
3✔
4656
        if err != nil &&
3✔
4657
                !errors.Is(err, ErrEdgePolicyOptionalFieldNotFound) &&
3✔
4658
                !errors.Is(err, ErrParsingExtraTLVBytes) {
3✔
4659

×
4660
                return nil, err
×
4661
        }
×
4662

4663
        return edge, err
3✔
4664
}
4665

4666
func deserializeChanEdgePolicyRaw(r io.Reader) (*models.ChannelEdgePolicy,
4667
        error) {
3✔
4668

3✔
4669
        edge := &models.ChannelEdgePolicy{}
3✔
4670

3✔
4671
        var err error
3✔
4672
        edge.SigBytes, err = wire.ReadVarBytes(r, 0, 80, "sig")
3✔
4673
        if err != nil {
3✔
4674
                return nil, err
×
4675
        }
×
4676

4677
        if err := binary.Read(r, byteOrder, &edge.ChannelID); err != nil {
3✔
4678
                return nil, err
×
4679
        }
×
4680

4681
        var scratch [8]byte
3✔
4682
        if _, err := r.Read(scratch[:]); err != nil {
3✔
4683
                return nil, err
×
4684
        }
×
4685
        unix := int64(byteOrder.Uint64(scratch[:]))
3✔
4686
        edge.LastUpdate = time.Unix(unix, 0)
3✔
4687

3✔
4688
        if err := binary.Read(r, byteOrder, &edge.MessageFlags); err != nil {
3✔
4689
                return nil, err
×
4690
        }
×
4691
        if err := binary.Read(r, byteOrder, &edge.ChannelFlags); err != nil {
3✔
4692
                return nil, err
×
4693
        }
×
4694
        if err := binary.Read(r, byteOrder, &edge.TimeLockDelta); err != nil {
3✔
4695
                return nil, err
×
4696
        }
×
4697

4698
        var n uint64
3✔
4699
        if err := binary.Read(r, byteOrder, &n); err != nil {
3✔
4700
                return nil, err
×
4701
        }
×
4702
        edge.MinHTLC = lnwire.MilliSatoshi(n)
3✔
4703

3✔
4704
        if err := binary.Read(r, byteOrder, &n); err != nil {
3✔
4705
                return nil, err
×
4706
        }
×
4707
        edge.FeeBaseMSat = lnwire.MilliSatoshi(n)
3✔
4708

3✔
4709
        if err := binary.Read(r, byteOrder, &n); err != nil {
3✔
4710
                return nil, err
×
4711
        }
×
4712
        edge.FeeProportionalMillionths = lnwire.MilliSatoshi(n)
3✔
4713

3✔
4714
        if _, err := r.Read(edge.ToNode[:]); err != nil {
3✔
4715
                return nil, err
×
4716
        }
×
4717

4718
        // We'll try and see if there are any opaque bytes left, if not, then
4719
        // we'll ignore the EOF error and return the edge as is.
4720
        edge.ExtraOpaqueData, err = wire.ReadVarBytes(
3✔
4721
                r, 0, MaxAllowedExtraOpaqueBytes, "blob",
3✔
4722
        )
3✔
4723
        switch {
3✔
4724
        case errors.Is(err, io.ErrUnexpectedEOF):
×
UNCOV
4725
        case errors.Is(err, io.EOF):
×
4726
        case err != nil:
×
4727
                return nil, err
×
4728
        }
4729

4730
        // See if optional fields are present.
4731
        if edge.MessageFlags.HasMaxHtlc() {
6✔
4732
                // The max_htlc field should be at the beginning of the opaque
3✔
4733
                // bytes.
3✔
4734
                opq := edge.ExtraOpaqueData
3✔
4735

3✔
4736
                // If the max_htlc field is not present, it might be old data
3✔
4737
                // stored before this field was validated. We'll return the
3✔
4738
                // edge along with an error.
3✔
4739
                if len(opq) < 8 {
3✔
UNCOV
4740
                        return edge, ErrEdgePolicyOptionalFieldNotFound
×
UNCOV
4741
                }
×
4742

4743
                maxHtlc := byteOrder.Uint64(opq[:8])
3✔
4744
                edge.MaxHTLC = lnwire.MilliSatoshi(maxHtlc)
3✔
4745

3✔
4746
                // Exclude the parsed field from the rest of the opaque data.
3✔
4747
                edge.ExtraOpaqueData = opq[8:]
3✔
4748
        }
4749

4750
        // Attempt to extract the inbound fee from the opaque data. If we fail
4751
        // to parse the TLV here, we return an error we also return the edge
4752
        // so that the caller can still use it. This is for backwards
4753
        // compatibility in case we have already persisted some policies that
4754
        // have invalid TLV data.
4755
        var inboundFee lnwire.Fee
3✔
4756
        typeMap, err := edge.ExtraOpaqueData.ExtractRecords(&inboundFee)
3✔
4757
        if err != nil {
3✔
4758
                return edge, fmt.Errorf("%w: %w", ErrParsingExtraTLVBytes, err)
×
4759
        }
×
4760

4761
        val, ok := typeMap[lnwire.FeeRecordType]
3✔
4762
        if ok && val == nil {
6✔
4763
                edge.InboundFee = fn.Some(inboundFee)
3✔
4764
        }
3✔
4765

4766
        return edge, nil
3✔
4767
}
4768

4769
// chanGraphNodeTx is an implementation of the NodeRTx interface backed by the
4770
// KVStore and a kvdb.RTx.
4771
type chanGraphNodeTx struct {
4772
        tx   kvdb.RTx
4773
        db   *KVStore
4774
        node *models.LightningNode
4775
}
4776

4777
// A compile-time constraint to ensure chanGraphNodeTx implements the NodeRTx
4778
// interface.
4779
var _ NodeRTx = (*chanGraphNodeTx)(nil)
4780

4781
func newChanGraphNodeTx(tx kvdb.RTx, db *KVStore,
4782
        node *models.LightningNode) *chanGraphNodeTx {
3✔
4783

3✔
4784
        return &chanGraphNodeTx{
3✔
4785
                tx:   tx,
3✔
4786
                db:   db,
3✔
4787
                node: node,
3✔
4788
        }
3✔
4789
}
3✔
4790

4791
// Node returns the raw information of the node.
4792
//
4793
// NOTE: This is a part of the NodeRTx interface.
4794
func (c *chanGraphNodeTx) Node() *models.LightningNode {
3✔
4795
        return c.node
3✔
4796
}
3✔
4797

4798
// FetchNode fetches the node with the given pub key under the same transaction
4799
// used to fetch the current node. The returned node is also a NodeRTx and any
4800
// operations on that NodeRTx will also be done under the same transaction.
4801
//
4802
// NOTE: This is a part of the NodeRTx interface.
UNCOV
4803
func (c *chanGraphNodeTx) FetchNode(nodePub route.Vertex) (NodeRTx, error) {
×
UNCOV
4804
        node, err := c.db.FetchLightningNodeTx(c.tx, nodePub)
×
UNCOV
4805
        if err != nil {
×
4806
                return nil, err
×
4807
        }
×
4808

UNCOV
4809
        return newChanGraphNodeTx(c.tx, c.db, node), nil
×
4810
}
4811

4812
// ForEachChannel can be used to iterate over the node's channels under
4813
// the same transaction used to fetch the node.
4814
//
4815
// NOTE: This is a part of the NodeRTx interface.
4816
func (c *chanGraphNodeTx) ForEachChannel(f func(*models.ChannelEdgeInfo,
UNCOV
4817
        *models.ChannelEdgePolicy, *models.ChannelEdgePolicy) error) error {
×
UNCOV
4818

×
UNCOV
4819
        return c.db.forEachNodeChannelTx(c.tx, c.node.PubKeyBytes,
×
UNCOV
4820
                func(_ kvdb.RTx, info *models.ChannelEdgeInfo, policy1,
×
UNCOV
4821
                        policy2 *models.ChannelEdgePolicy) error {
×
UNCOV
4822

×
UNCOV
4823
                        return f(info, policy1, policy2)
×
UNCOV
4824
                },
×
4825
        )
4826
}
4827

4828
// MakeTestGraph creates a new instance of the ChannelGraph for testing
4829
// purposes.
4830
//
4831
// NOTE: this helper currently creates a ChannelGraph that is only ever backed
4832
// by the `KVStore` of the `V1Store` interface.
UNCOV
4833
func MakeTestGraph(t testing.TB, opts ...ChanGraphOption) *ChannelGraph {
×
UNCOV
4834
        t.Helper()
×
UNCOV
4835

×
UNCOV
4836
        // Next, create KVStore for the first time.
×
UNCOV
4837
        backend, backendCleanup, err := kvdb.GetTestBackend(t.TempDir(), "cgr")
×
UNCOV
4838
        t.Cleanup(backendCleanup)
×
UNCOV
4839
        require.NoError(t, err)
×
UNCOV
4840
        t.Cleanup(func() {
×
UNCOV
4841
                require.NoError(t, backend.Close())
×
UNCOV
4842
        })
×
4843

UNCOV
4844
        graphStore, err := NewKVStore(backend)
×
UNCOV
4845
        require.NoError(t, err)
×
UNCOV
4846

×
UNCOV
4847
        graph, err := NewChannelGraph(graphStore, opts...)
×
UNCOV
4848
        require.NoError(t, err)
×
UNCOV
4849
        require.NoError(t, graph.Start())
×
UNCOV
4850
        t.Cleanup(func() {
×
UNCOV
4851
                require.NoError(t, graph.Stop())
×
UNCOV
4852
        })
×
4853

UNCOV
4854
        return graph
×
4855
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc