• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 15441417953

04 Jun 2025 11:45AM UTC coverage: 57.69% (-0.6%) from 58.311%
15441417953

Pull #9897

github

web-flow
Merge 55141160a into aec16eee9
Pull Request #9897: multi: explicitly define InboundFees in ChannelUpdate and ChannelEdgePolicy

92 of 145 new or added lines in 11 files covered. (63.45%)

1821 existing lines in 31 files now uncovered.

96493 of 167262 relevant lines covered (57.69%)

1.21 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

68.21
/graph/db/kv_store.go
1
package graphdb
2

3
import (
4
        "bytes"
5
        "context"
6
        "crypto/sha256"
7
        "encoding/binary"
8
        "errors"
9
        "fmt"
10
        "io"
11
        "math"
12
        "net"
13
        "sort"
14
        "sync"
15
        "testing"
16
        "time"
17

18
        "github.com/btcsuite/btcd/btcec/v2"
19
        "github.com/btcsuite/btcd/chaincfg/chainhash"
20
        "github.com/btcsuite/btcd/txscript"
21
        "github.com/btcsuite/btcd/wire"
22
        "github.com/btcsuite/btcwallet/walletdb"
23
        "github.com/lightningnetwork/lnd/aliasmgr"
24
        "github.com/lightningnetwork/lnd/batch"
25
        "github.com/lightningnetwork/lnd/fn/v2"
26
        "github.com/lightningnetwork/lnd/graph/db/models"
27
        "github.com/lightningnetwork/lnd/input"
28
        "github.com/lightningnetwork/lnd/kvdb"
29
        "github.com/lightningnetwork/lnd/lnwire"
30
        "github.com/lightningnetwork/lnd/routing/route"
31
        "github.com/stretchr/testify/require"
32
)
33

34
var (
35
        // nodeBucket is a bucket which houses all the vertices or nodes within
36
        // the channel graph. This bucket has a single-sub bucket which adds an
37
        // additional index from pubkey -> alias. Within the top-level of this
38
        // bucket, the key space maps a node's compressed public key to the
39
        // serialized information for that node. Additionally, there's a
40
        // special key "source" which stores the pubkey of the source node. The
41
        // source node is used as the starting point for all graph/queries and
42
        // traversals. The graph is formed as a star-graph with the source node
43
        // at the center.
44
        //
45
        // maps: pubKey -> nodeInfo
46
        // maps: source -> selfPubKey
47
        nodeBucket = []byte("graph-node")
48

49
        // nodeUpdateIndexBucket is a sub-bucket of the nodeBucket. This bucket
50
        // will be used to quickly look up the "freshness" of a node's last
51
        // update to the network. The bucket only contains keys, and no values,
52
        // it's mapping:
53
        //
54
        // maps: updateTime || nodeID -> nil
55
        nodeUpdateIndexBucket = []byte("graph-node-update-index")
56

57
        // sourceKey is a special key that resides within the nodeBucket. The
58
        // sourceKey maps a key to the public key of the "self node".
59
        sourceKey = []byte("source")
60

61
        // aliasIndexBucket is a sub-bucket that's nested within the main
62
        // nodeBucket. This bucket maps the public key of a node to its
63
        // current alias. This bucket is provided as it can be used within a
64
        // future UI layer to add an additional degree of confirmation.
65
        aliasIndexBucket = []byte("alias")
66

67
        // edgeBucket is a bucket which houses all of the edge or channel
68
        // information within the channel graph. This bucket essentially acts
69
        // as an adjacency list, which in conjunction with a range scan, can be
70
        // used to iterate over all the incoming and outgoing edges for a
71
        // particular node. Key in the bucket use a prefix scheme which leads
72
        // with the node's public key and sends with the compact edge ID.
73
        // For each chanID, there will be two entries within the bucket, as the
74
        // graph is directed: nodes may have different policies w.r.t to fees
75
        // for their respective directions.
76
        //
77
        // maps: pubKey || chanID -> channel edge policy for node
78
        edgeBucket = []byte("graph-edge")
79

80
        // unknownPolicy is represented as an empty slice. It is
81
        // used as the value in edgeBucket for unknown channel edge policies.
82
        // Unknown policies are still stored in the database to enable efficient
83
        // lookup of incoming channel edges.
84
        unknownPolicy = []byte{}
85

86
        // chanStart is an array of all zero bytes which is used to perform
87
        // range scans within the edgeBucket to obtain all of the outgoing
88
        // edges for a particular node.
89
        chanStart [8]byte
90

91
        // edgeIndexBucket is an index which can be used to iterate all edges
92
        // in the bucket, grouping them according to their in/out nodes.
93
        // Additionally, the items in this bucket also contain the complete
94
        // edge information for a channel. The edge information includes the
95
        // capacity of the channel, the nodes that made the channel, etc. This
96
        // bucket resides within the edgeBucket above. Creation of an edge
97
        // proceeds in two phases: first the edge is added to the edge index,
98
        // afterwards the edgeBucket can be updated with the latest details of
99
        // the edge as they are announced on the network.
100
        //
101
        // maps: chanID -> pubKey1 || pubKey2 || restofEdgeInfo
102
        edgeIndexBucket = []byte("edge-index")
103

104
        // edgeUpdateIndexBucket is a sub-bucket of the main edgeBucket. This
105
        // bucket contains an index which allows us to gauge the "freshness" of
106
        // a channel's last updates.
107
        //
108
        // maps: updateTime || chanID -> nil
109
        edgeUpdateIndexBucket = []byte("edge-update-index")
110

111
        // channelPointBucket maps a channel's full outpoint (txid:index) to
112
        // its short 8-byte channel ID. This bucket resides within the
113
        // edgeBucket above, and can be used to quickly remove an edge due to
114
        // the outpoint being spent, or to query for existence of a channel.
115
        //
116
        // maps: outPoint -> chanID
117
        channelPointBucket = []byte("chan-index")
118

119
        // zombieBucket is a sub-bucket of the main edgeBucket bucket
120
        // responsible for maintaining an index of zombie channels. Each entry
121
        // exists within the bucket as follows:
122
        //
123
        // maps: chanID -> pubKey1 || pubKey2
124
        //
125
        // The chanID represents the channel ID of the edge that is marked as a
126
        // zombie and is used as the key, which maps to the public keys of the
127
        // edge's participants.
128
        zombieBucket = []byte("zombie-index")
129

130
        // disabledEdgePolicyBucket is a sub-bucket of the main edgeBucket
131
        // bucket responsible for maintaining an index of disabled edge
132
        // policies. Each entry exists within the bucket as follows:
133
        //
134
        // maps: <chanID><direction> -> []byte{}
135
        //
136
        // The chanID represents the channel ID of the edge and the direction is
137
        // one byte representing the direction of the edge. The main purpose of
138
        // this index is to allow pruning disabled channels in a fast way
139
        // without the need to iterate all over the graph.
140
        disabledEdgePolicyBucket = []byte("disabled-edge-policy-index")
141

142
        // graphMetaBucket is a top-level bucket which stores various meta-deta
143
        // related to the on-disk channel graph. Data stored in this bucket
144
        // includes the block to which the graph has been synced to, the total
145
        // number of channels, etc.
146
        graphMetaBucket = []byte("graph-meta")
147

148
        // pruneLogBucket is a bucket within the graphMetaBucket that stores
149
        // a mapping from the block height to the hash for the blocks used to
150
        // prune the graph.
151
        // Once a new block is discovered, any channels that have been closed
152
        // (by spending the outpoint) can safely be removed from the graph, and
153
        // the block is added to the prune log. We need to keep such a log for
154
        // the case where a reorg happens, and we must "rewind" the state of the
155
        // graph by removing channels that were previously confirmed. In such a
156
        // case we'll remove all entries from the prune log with a block height
157
        // that no longer exists.
158
        pruneLogBucket = []byte("prune-log")
159

160
        // closedScidBucket is a top-level bucket that stores scids for
161
        // channels that we know to be closed. This is used so that we don't
162
        // need to perform expensive validation checks if we receive a channel
163
        // announcement for the channel again.
164
        //
165
        // maps: scid -> []byte{}
166
        closedScidBucket = []byte("closed-scid")
167
)
168

169
const (
170
        // MaxAllowedExtraOpaqueBytes is the largest amount of opaque bytes that
171
        // we'll permit to be written to disk. We limit this as otherwise, it
172
        // would be possible for a node to create a ton of updates and slowly
173
        // fill our disk, and also waste bandwidth due to relaying.
174
        MaxAllowedExtraOpaqueBytes = 10000
175
)
176

177
// KVStore is a persistent, on-disk graph representation of the Lightning
178
// Network. This struct can be used to implement path finding algorithms on top
179
// of, and also to update a node's view based on information received from the
180
// p2p network. Internally, the graph is stored using a modified adjacency list
181
// representation with some added object interaction possible with each
182
// serialized edge/node. The graph is stored is directed, meaning that are two
183
// edges stored for each channel: an inbound/outbound edge for each node pair.
184
// Nodes, edges, and edge information can all be added to the graph
185
// independently. Edge removal results in the deletion of all edge information
186
// for that edge.
187
type KVStore struct {
188
        db kvdb.Backend
189

190
        // cacheMu guards all caches (rejectCache and chanCache). If
191
        // this mutex will be acquired at the same time as the DB mutex then
192
        // the cacheMu MUST be acquired first to prevent deadlock.
193
        cacheMu     sync.RWMutex
194
        rejectCache *rejectCache
195
        chanCache   *channelCache
196

197
        chanScheduler batch.Scheduler[kvdb.RwTx]
198
        nodeScheduler batch.Scheduler[kvdb.RwTx]
199
}
200

201
// A compile-time assertion to ensure that the KVStore struct implements the
202
// V1Store interface.
203
var _ V1Store = (*KVStore)(nil)
204

205
// NewKVStore allocates a new KVStore backed by a DB instance. The
206
// returned instance has its own unique reject cache and channel cache.
207
func NewKVStore(db kvdb.Backend, options ...StoreOptionModifier) (*KVStore,
208
        error) {
2✔
209

2✔
210
        opts := DefaultOptions()
2✔
211
        for _, o := range options {
4✔
212
                o(opts)
2✔
213
        }
2✔
214

215
        if !opts.NoMigration {
4✔
216
                if err := initKVStore(db); err != nil {
2✔
217
                        return nil, err
×
218
                }
×
219
        }
220

221
        g := &KVStore{
2✔
222
                db:          db,
2✔
223
                rejectCache: newRejectCache(opts.RejectCacheSize),
2✔
224
                chanCache:   newChannelCache(opts.ChannelCacheSize),
2✔
225
        }
2✔
226
        g.chanScheduler = batch.NewTimeScheduler(
2✔
227
                batch.NewBoltBackend[kvdb.RwTx](db), &g.cacheMu,
2✔
228
                opts.BatchCommitInterval,
2✔
229
        )
2✔
230
        g.nodeScheduler = batch.NewTimeScheduler(
2✔
231
                batch.NewBoltBackend[kvdb.RwTx](db), nil,
2✔
232
                opts.BatchCommitInterval,
2✔
233
        )
2✔
234

2✔
235
        return g, nil
2✔
236
}
237

238
// channelMapKey is the key structure used for storing channel edge policies.
239
type channelMapKey struct {
240
        nodeKey route.Vertex
241
        chanID  [8]byte
242
}
243

244
// getChannelMap loads all channel edge policies from the database and stores
245
// them in a map.
246
func (c *KVStore) getChannelMap(edges kvdb.RBucket) (
247
        map[channelMapKey]*models.ChannelEdgePolicy, error) {
2✔
248

2✔
249
        // Create a map to store all channel edge policies.
2✔
250
        channelMap := make(map[channelMapKey]*models.ChannelEdgePolicy)
2✔
251

2✔
252
        err := kvdb.ForAll(edges, func(k, edgeBytes []byte) error {
4✔
253
                // Skip embedded buckets.
2✔
254
                if bytes.Equal(k, edgeIndexBucket) ||
2✔
255
                        bytes.Equal(k, edgeUpdateIndexBucket) ||
2✔
256
                        bytes.Equal(k, zombieBucket) ||
2✔
257
                        bytes.Equal(k, disabledEdgePolicyBucket) ||
2✔
258
                        bytes.Equal(k, channelPointBucket) {
4✔
259

2✔
260
                        return nil
2✔
261
                }
2✔
262

263
                // Validate key length.
264
                if len(k) != 33+8 {
2✔
265
                        return fmt.Errorf("invalid edge key %x encountered", k)
×
266
                }
×
267

268
                var key channelMapKey
2✔
269
                copy(key.nodeKey[:], k[:33])
2✔
270
                copy(key.chanID[:], k[33:])
2✔
271

2✔
272
                // No need to deserialize unknown policy.
2✔
273
                if bytes.Equal(edgeBytes, unknownPolicy) {
2✔
274
                        return nil
×
275
                }
×
276

277
                edgeReader := bytes.NewReader(edgeBytes)
2✔
278
                edge, err := deserializeChanEdgePolicyRaw(
2✔
279
                        edgeReader,
2✔
280
                )
2✔
281

2✔
282
                switch {
2✔
283
                // If the db policy was missing an expected optional field, we
284
                // return nil as if the policy was unknown.
285
                case errors.Is(err, ErrEdgePolicyOptionalFieldNotFound):
×
286
                        return nil
×
287

288
                // We don't want a single policy with bad TLV data to stop us
289
                // from loading the rest of the data, so we just skip this
290
                // policy. This is for backwards compatibility since we did not
291
                // use to validate TLV data in the past before persisting it.
NEW
292
                case errors.Is(err, ErrParsingExtraTLVBytes):
×
NEW
293
                        return nil
×
294

295
                case err != nil:
×
296
                        return err
×
297
                }
298

299
                channelMap[key] = edge
2✔
300

2✔
301
                return nil
2✔
302
        })
303
        if err != nil {
2✔
304
                return nil, err
×
305
        }
×
306

307
        return channelMap, nil
2✔
308
}
309

310
var graphTopLevelBuckets = [][]byte{
311
        nodeBucket,
312
        edgeBucket,
313
        graphMetaBucket,
314
        closedScidBucket,
315
}
316

317
// createChannelDB creates and initializes a fresh version of  In
318
// the case that the target path has not yet been created or doesn't yet exist,
319
// then the path is created. Additionally, all required top-level buckets used
320
// within the database are created.
321
func initKVStore(db kvdb.Backend) error {
2✔
322
        err := kvdb.Update(db, func(tx kvdb.RwTx) error {
4✔
323
                for _, tlb := range graphTopLevelBuckets {
4✔
324
                        if _, err := tx.CreateTopLevelBucket(tlb); err != nil {
2✔
325
                                return err
×
326
                        }
×
327
                }
328

329
                nodes := tx.ReadWriteBucket(nodeBucket)
2✔
330
                _, err := nodes.CreateBucketIfNotExists(aliasIndexBucket)
2✔
331
                if err != nil {
2✔
332
                        return err
×
333
                }
×
334
                _, err = nodes.CreateBucketIfNotExists(nodeUpdateIndexBucket)
2✔
335
                if err != nil {
2✔
336
                        return err
×
337
                }
×
338

339
                edges := tx.ReadWriteBucket(edgeBucket)
2✔
340
                _, err = edges.CreateBucketIfNotExists(edgeIndexBucket)
2✔
341
                if err != nil {
2✔
342
                        return err
×
343
                }
×
344
                _, err = edges.CreateBucketIfNotExists(edgeUpdateIndexBucket)
2✔
345
                if err != nil {
2✔
346
                        return err
×
347
                }
×
348
                _, err = edges.CreateBucketIfNotExists(channelPointBucket)
2✔
349
                if err != nil {
2✔
350
                        return err
×
351
                }
×
352
                _, err = edges.CreateBucketIfNotExists(zombieBucket)
2✔
353
                if err != nil {
2✔
354
                        return err
×
355
                }
×
356

357
                graphMeta := tx.ReadWriteBucket(graphMetaBucket)
2✔
358
                _, err = graphMeta.CreateBucketIfNotExists(pruneLogBucket)
2✔
359

2✔
360
                return err
2✔
361
        }, func() {})
2✔
362
        if err != nil {
2✔
363
                return fmt.Errorf("unable to create new channel graph: %w", err)
×
364
        }
×
365

366
        return nil
2✔
367
}
368

369
// AddrsForNode returns all known addresses for the target node public key that
370
// the graph DB is aware of. The returned boolean indicates if the given node is
371
// unknown to the graph DB or not.
372
//
373
// NOTE: this is part of the channeldb.AddrSource interface.
374
func (c *KVStore) AddrsForNode(nodePub *btcec.PublicKey) (bool, []net.Addr,
375
        error) {
2✔
376

2✔
377
        pubKey, err := route.NewVertexFromBytes(nodePub.SerializeCompressed())
2✔
378
        if err != nil {
2✔
379
                return false, nil, err
×
380
        }
×
381

382
        node, err := c.FetchLightningNode(pubKey)
2✔
383
        // We don't consider it an error if the graph is unaware of the node.
2✔
384
        switch {
2✔
385
        case err != nil && !errors.Is(err, ErrGraphNodeNotFound):
×
386
                return false, nil, err
×
387

388
        case errors.Is(err, ErrGraphNodeNotFound):
2✔
389
                return false, nil, nil
2✔
390
        }
391

392
        return true, node.Addresses, nil
2✔
393
}
394

395
// ForEachChannel iterates through all the channel edges stored within the
396
// graph and invokes the passed callback for each edge. The callback takes two
397
// edges as since this is a directed graph, both the in/out edges are visited.
398
// If the callback returns an error, then the transaction is aborted and the
399
// iteration stops early.
400
//
401
// NOTE: If an edge can't be found, or wasn't advertised, then a nil pointer
402
// for that particular channel edge routing policy will be passed into the
403
// callback.
404
func (c *KVStore) ForEachChannel(cb func(*models.ChannelEdgeInfo,
405
        *models.ChannelEdgePolicy, *models.ChannelEdgePolicy) error) error {
2✔
406

2✔
407
        return c.db.View(func(tx kvdb.RTx) error {
4✔
408
                edges := tx.ReadBucket(edgeBucket)
2✔
409
                if edges == nil {
2✔
410
                        return ErrGraphNoEdgesFound
×
411
                }
×
412

413
                // First, load all edges in memory indexed by node and channel
414
                // id.
415
                channelMap, err := c.getChannelMap(edges)
2✔
416
                if err != nil {
2✔
417
                        return err
×
418
                }
×
419

420
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
2✔
421
                if edgeIndex == nil {
2✔
422
                        return ErrGraphNoEdgesFound
×
423
                }
×
424

425
                // Load edge index, recombine each channel with the policies
426
                // loaded above and invoke the callback.
427
                return kvdb.ForAll(
2✔
428
                        edgeIndex, func(k, edgeInfoBytes []byte) error {
4✔
429
                                var chanID [8]byte
2✔
430
                                copy(chanID[:], k)
2✔
431

2✔
432
                                edgeInfoReader := bytes.NewReader(edgeInfoBytes)
2✔
433
                                info, err := deserializeChanEdgeInfo(
2✔
434
                                        edgeInfoReader,
2✔
435
                                )
2✔
436
                                if err != nil {
2✔
437
                                        return err
×
438
                                }
×
439

440
                                policy1 := channelMap[channelMapKey{
2✔
441
                                        nodeKey: info.NodeKey1Bytes,
2✔
442
                                        chanID:  chanID,
2✔
443
                                }]
2✔
444

2✔
445
                                policy2 := channelMap[channelMapKey{
2✔
446
                                        nodeKey: info.NodeKey2Bytes,
2✔
447
                                        chanID:  chanID,
2✔
448
                                }]
2✔
449

2✔
450
                                return cb(&info, policy1, policy2)
2✔
451
                        },
452
                )
453
        }, func() {})
2✔
454
}
455

456
// forEachNodeDirectedChannel iterates through all channels of a given node,
457
// executing the passed callback on the directed edge representing the channel
458
// and its incoming policy. If the callback returns an error, then the iteration
459
// is halted with the error propagated back up to the caller. An optional read
460
// transaction may be provided. If none is provided, a new one will be created.
461
//
462
// Unknown policies are passed into the callback as nil values.
463
func (c *KVStore) forEachNodeDirectedChannel(tx kvdb.RTx,
464
        node route.Vertex, cb func(channel *DirectedChannel) error) error {
2✔
465

2✔
466
        // Fallback that uses the database.
2✔
467
        toNodeCallback := func() route.Vertex {
4✔
468
                return node
2✔
469
        }
2✔
470
        toNodeFeatures, err := c.fetchNodeFeatures(tx, node)
2✔
471
        if err != nil {
2✔
472
                return err
×
473
        }
×
474

475
        dbCallback := func(tx kvdb.RTx, e *models.ChannelEdgeInfo, p1,
2✔
476
                p2 *models.ChannelEdgePolicy) error {
4✔
477

2✔
478
                var cachedInPolicy *models.CachedEdgePolicy
2✔
479
                if p2 != nil {
4✔
480
                        cachedInPolicy = models.NewCachedPolicy(p2)
2✔
481
                        cachedInPolicy.ToNodePubKey = toNodeCallback
2✔
482
                        cachedInPolicy.ToNodeFeatures = toNodeFeatures
2✔
483
                }
2✔
484

485
                directedChannel := &DirectedChannel{
2✔
486
                        ChannelID:    e.ChannelID,
2✔
487
                        IsNode1:      node == e.NodeKey1Bytes,
2✔
488
                        OtherNode:    e.NodeKey2Bytes,
2✔
489
                        Capacity:     e.Capacity,
2✔
490
                        OutPolicySet: p1 != nil,
2✔
491
                        InPolicy:     cachedInPolicy,
2✔
492
                }
2✔
493
                p1.InboundFee.WhenSome(func(fee lnwire.Fee) {
2✔
NEW
494
                        directedChannel.InboundFee = fee
×
NEW
495
                })
×
496

497
                if node == e.NodeKey2Bytes {
4✔
498
                        directedChannel.OtherNode = e.NodeKey1Bytes
2✔
499
                }
2✔
500

501
                return cb(directedChannel)
2✔
502
        }
503

504
        return nodeTraversal(tx, node[:], c.db, dbCallback)
2✔
505
}
506

507
// fetchNodeFeatures returns the features of a given node. If no features are
508
// known for the node, an empty feature vector is returned. An optional read
509
// transaction may be provided. If none is provided, a new one will be created.
510
func (c *KVStore) fetchNodeFeatures(tx kvdb.RTx,
511
        node route.Vertex) (*lnwire.FeatureVector, error) {
2✔
512

2✔
513
        // Fallback that uses the database.
2✔
514
        targetNode, err := c.FetchLightningNodeTx(tx, node)
2✔
515
        switch {
2✔
516
        // If the node exists and has features, return them directly.
517
        case err == nil:
2✔
518
                return targetNode.Features, nil
2✔
519

520
        // If we couldn't find a node announcement, populate a blank feature
521
        // vector.
UNCOV
522
        case errors.Is(err, ErrGraphNodeNotFound):
×
UNCOV
523
                return lnwire.EmptyFeatureVector(), nil
×
524

525
        // Otherwise, bubble the error up.
526
        default:
×
UNCOV
527
                return nil, err
×
528
        }
529
}
530

531
// ForEachNodeDirectedChannel iterates through all channels of a given node,
532
// executing the passed callback on the directed edge representing the channel
533
// and its incoming policy. If the callback returns an error, then the iteration
534
// is halted with the error propagated back up to the caller.
535
//
536
// Unknown policies are passed into the callback as nil values.
537
//
538
// NOTE: this is part of the graphdb.NodeTraverser interface.
539
func (c *KVStore) ForEachNodeDirectedChannel(nodePub route.Vertex,
540
        cb func(channel *DirectedChannel) error) error {
2✔
541

2✔
542
        return c.forEachNodeDirectedChannel(nil, nodePub, cb)
2✔
543
}
2✔
544

545
// FetchNodeFeatures returns the features of the given node. If no features are
546
// known for the node, an empty feature vector is returned.
547
//
548
// NOTE: this is part of the graphdb.NodeTraverser interface.
549
func (c *KVStore) FetchNodeFeatures(nodePub route.Vertex) (
550
        *lnwire.FeatureVector, error) {
2✔
551

2✔
552
        return c.fetchNodeFeatures(nil, nodePub)
2✔
553
}
2✔
554

555
// ForEachNodeCached is similar to forEachNode, but it returns DirectedChannel
556
// data to the call-back.
557
//
558
// NOTE: The callback contents MUST not be modified.
559
func (c *KVStore) ForEachNodeCached(cb func(node route.Vertex,
UNCOV
560
        chans map[uint64]*DirectedChannel) error) error {
×
UNCOV
561

×
UNCOV
562
        // Otherwise call back to a version that uses the database directly.
×
563
        // We'll iterate over each node, then the set of channels for each
×
564
        // node, and construct a similar callback functiopn signature as the
×
565
        // main funcotin expects.
×
566
        return c.forEachNode(func(tx kvdb.RTx,
×
567
                node *models.LightningNode) error {
×
568

×
569
                channels := make(map[uint64]*DirectedChannel)
×
570

×
571
                err := c.forEachNodeChannelTx(tx, node.PubKeyBytes,
×
572
                        func(tx kvdb.RTx, e *models.ChannelEdgeInfo,
×
573
                                p1 *models.ChannelEdgePolicy,
×
574
                                p2 *models.ChannelEdgePolicy) error {
×
575

×
576
                                toNodeCallback := func() route.Vertex {
×
577
                                        return node.PubKeyBytes
×
578
                                }
×
579
                                toNodeFeatures, err := c.fetchNodeFeatures(
×
580
                                        tx, node.PubKeyBytes,
×
581
                                )
×
582
                                if err != nil {
×
583
                                        return err
×
584
                                }
×
585

586
                                var cachedInPolicy *models.CachedEdgePolicy
×
587
                                if p2 != nil {
×
UNCOV
588
                                        cachedInPolicy =
×
589
                                                models.NewCachedPolicy(p2)
×
590
                                        cachedInPolicy.ToNodePubKey =
×
591
                                                toNodeCallback
×
592
                                        cachedInPolicy.ToNodeFeatures =
×
593
                                                toNodeFeatures
×
594
                                }
×
595

596
                                directedChannel := &DirectedChannel{
×
597
                                        ChannelID: e.ChannelID,
×
UNCOV
598
                                        IsNode1: node.PubKeyBytes ==
×
599
                                                e.NodeKey1Bytes,
×
600
                                        OtherNode:    e.NodeKey2Bytes,
×
601
                                        Capacity:     e.Capacity,
×
602
                                        OutPolicySet: p1 != nil,
×
603
                                        InPolicy:     cachedInPolicy,
×
604
                                }
×
605

×
606
                                if node.PubKeyBytes == e.NodeKey2Bytes {
×
607
                                        directedChannel.OtherNode =
×
608
                                                e.NodeKey1Bytes
×
609
                                }
×
610

611
                                channels[e.ChannelID] = directedChannel
×
612

×
UNCOV
613
                                return nil
×
614
                        })
615
                if err != nil {
×
616
                        return err
×
UNCOV
617
                }
×
618

619
                return cb(node.PubKeyBytes, channels)
×
620
        })
621
}
622

623
// DisabledChannelIDs returns the channel ids of disabled channels.
624
// A channel is disabled when two of the associated ChanelEdgePolicies
625
// have their disabled bit on.
UNCOV
626
func (c *KVStore) DisabledChannelIDs() ([]uint64, error) {
×
UNCOV
627
        var disabledChanIDs []uint64
×
UNCOV
628
        var chanEdgeFound map[uint64]struct{}
×
629

×
630
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
×
631
                edges := tx.ReadBucket(edgeBucket)
×
632
                if edges == nil {
×
633
                        return ErrGraphNoEdgesFound
×
634
                }
×
635

636
                disabledEdgePolicyIndex := edges.NestedReadBucket(
×
637
                        disabledEdgePolicyBucket,
×
UNCOV
638
                )
×
639
                if disabledEdgePolicyIndex == nil {
×
640
                        return nil
×
641
                }
×
642

643
                // We iterate over all disabled policies and we add each channel
644
                // that has more than one disabled policy to disabledChanIDs
645
                // array.
UNCOV
646
                return disabledEdgePolicyIndex.ForEach(
×
UNCOV
647
                        func(k, v []byte) error {
×
UNCOV
648
                                chanID := byteOrder.Uint64(k[:8])
×
649
                                _, edgeFound := chanEdgeFound[chanID]
×
650
                                if edgeFound {
×
651
                                        delete(chanEdgeFound, chanID)
×
652
                                        disabledChanIDs = append(
×
653
                                                disabledChanIDs, chanID,
×
654
                                        )
×
655

×
656
                                        return nil
×
657
                                }
×
658

659
                                chanEdgeFound[chanID] = struct{}{}
×
660

×
UNCOV
661
                                return nil
×
662
                        },
663
                )
664
        }, func() {
×
UNCOV
665
                disabledChanIDs = nil
×
UNCOV
666
                chanEdgeFound = make(map[uint64]struct{})
×
667
        })
×
668
        if err != nil {
×
669
                return nil, err
×
670
        }
×
671

672
        return disabledChanIDs, nil
×
673
}
674

675
// ForEachNode iterates through all the stored vertices/nodes in the graph,
676
// executing the passed callback with each node encountered. If the callback
677
// returns an error, then the transaction is aborted and the iteration stops
678
// early. Any operations performed on the NodeTx passed to the call-back are
679
// executed under the same read transaction and so, methods on the NodeTx object
680
// _MUST_ only be called from within the call-back.
681
func (c *KVStore) ForEachNode(cb func(tx NodeRTx) error) error {
2✔
682
        return c.forEachNode(func(tx kvdb.RTx,
2✔
683
                node *models.LightningNode) error {
4✔
684

2✔
685
                return cb(newChanGraphNodeTx(tx, c, node))
2✔
686
        })
2✔
687
}
688

689
// forEachNode iterates through all the stored vertices/nodes in the graph,
690
// executing the passed callback with each node encountered. If the callback
691
// returns an error, then the transaction is aborted and the iteration stops
692
// early.
693
//
694
// TODO(roasbeef): add iterator interface to allow for memory efficient graph
695
// traversal when graph gets mega.
696
func (c *KVStore) forEachNode(
697
        cb func(kvdb.RTx, *models.LightningNode) error) error {
2✔
698

2✔
699
        traversal := func(tx kvdb.RTx) error {
4✔
700
                // First grab the nodes bucket which stores the mapping from
2✔
701
                // pubKey to node information.
2✔
702
                nodes := tx.ReadBucket(nodeBucket)
2✔
703
                if nodes == nil {
2✔
UNCOV
704
                        return ErrGraphNotFound
×
UNCOV
705
                }
×
706

707
                return nodes.ForEach(func(pubKey, nodeBytes []byte) error {
4✔
708
                        // If this is the source key, then we skip this
2✔
709
                        // iteration as the value for this key is a pubKey
2✔
710
                        // rather than raw node information.
2✔
711
                        if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 {
4✔
712
                                return nil
2✔
713
                        }
2✔
714

715
                        nodeReader := bytes.NewReader(nodeBytes)
2✔
716
                        node, err := deserializeLightningNode(nodeReader)
2✔
717
                        if err != nil {
2✔
UNCOV
718
                                return err
×
UNCOV
719
                        }
×
720

721
                        // Execute the callback, the transaction will abort if
722
                        // this returns an error.
723
                        return cb(tx, &node)
2✔
724
                })
725
        }
726

727
        return kvdb.View(c.db, traversal, func() {})
4✔
728
}
729

730
// ForEachNodeCacheable iterates through all the stored vertices/nodes in the
731
// graph, executing the passed callback with each node encountered. If the
732
// callback returns an error, then the transaction is aborted and the iteration
733
// stops early.
734
func (c *KVStore) ForEachNodeCacheable(cb func(route.Vertex,
735
        *lnwire.FeatureVector) error) error {
2✔
736

2✔
737
        traversal := func(tx kvdb.RTx) error {
4✔
738
                // First grab the nodes bucket which stores the mapping from
2✔
739
                // pubKey to node information.
2✔
740
                nodes := tx.ReadBucket(nodeBucket)
2✔
741
                if nodes == nil {
2✔
UNCOV
742
                        return ErrGraphNotFound
×
UNCOV
743
                }
×
744

745
                return nodes.ForEach(func(pubKey, nodeBytes []byte) error {
4✔
746
                        // If this is the source key, then we skip this
2✔
747
                        // iteration as the value for this key is a pubKey
2✔
748
                        // rather than raw node information.
2✔
749
                        if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 {
4✔
750
                                return nil
2✔
751
                        }
2✔
752

753
                        nodeReader := bytes.NewReader(nodeBytes)
2✔
754
                        node, features, err := deserializeLightningNodeCacheable( //nolint:ll
2✔
755
                                nodeReader,
2✔
756
                        )
2✔
757
                        if err != nil {
2✔
UNCOV
758
                                return err
×
UNCOV
759
                        }
×
760

761
                        // Execute the callback, the transaction will abort if
762
                        // this returns an error.
763
                        return cb(node, features)
2✔
764
                })
765
        }
766

767
        return kvdb.View(c.db, traversal, func() {})
4✔
768
}
769

770
// SourceNode returns the source node of the graph. The source node is treated
771
// as the center node within a star-graph. This method may be used to kick off
772
// a path finding algorithm in order to explore the reachability of another
773
// node based off the source node.
774
func (c *KVStore) SourceNode() (*models.LightningNode, error) {
2✔
775
        var source *models.LightningNode
2✔
776
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
4✔
777
                // First grab the nodes bucket which stores the mapping from
2✔
778
                // pubKey to node information.
2✔
779
                nodes := tx.ReadBucket(nodeBucket)
2✔
780
                if nodes == nil {
2✔
UNCOV
781
                        return ErrGraphNotFound
×
UNCOV
782
                }
×
783

784
                node, err := c.sourceNode(nodes)
2✔
785
                if err != nil {
2✔
UNCOV
786
                        return err
×
UNCOV
787
                }
×
788
                source = node
2✔
789

2✔
790
                return nil
2✔
791
        }, func() {
2✔
792
                source = nil
2✔
793
        })
2✔
794
        if err != nil {
2✔
UNCOV
795
                return nil, err
×
UNCOV
796
        }
×
797

798
        return source, nil
2✔
799
}
800

801
// sourceNode uses an existing database transaction and returns the source node
802
// of the graph. The source node is treated as the center node within a
803
// star-graph. This method may be used to kick off a path finding algorithm in
804
// order to explore the reachability of another node based off the source node.
805
func (c *KVStore) sourceNode(nodes kvdb.RBucket) (*models.LightningNode,
806
        error) {
2✔
807

2✔
808
        selfPub := nodes.Get(sourceKey)
2✔
809
        if selfPub == nil {
2✔
UNCOV
810
                return nil, ErrSourceNodeNotSet
×
UNCOV
811
        }
×
812

813
        // With the pubKey of the source node retrieved, we're able to
814
        // fetch the full node information.
815
        node, err := fetchLightningNode(nodes, selfPub)
2✔
816
        if err != nil {
2✔
UNCOV
817
                return nil, err
×
UNCOV
818
        }
×
819

820
        return &node, nil
2✔
821
}
822

823
// SetSourceNode sets the source node within the graph database. The source
824
// node is to be used as the center of a star-graph within path finding
825
// algorithms.
826
func (c *KVStore) SetSourceNode(node *models.LightningNode) error {
2✔
827
        nodePubBytes := node.PubKeyBytes[:]
2✔
828

2✔
829
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
4✔
830
                // First grab the nodes bucket which stores the mapping from
2✔
831
                // pubKey to node information.
2✔
832
                nodes, err := tx.CreateTopLevelBucket(nodeBucket)
2✔
833
                if err != nil {
2✔
UNCOV
834
                        return err
×
UNCOV
835
                }
×
836

837
                // Next we create the mapping from source to the targeted
838
                // public key.
839
                if err := nodes.Put(sourceKey, nodePubBytes); err != nil {
2✔
UNCOV
840
                        return err
×
UNCOV
841
                }
×
842

843
                // Finally, we commit the information of the lightning node
844
                // itself.
845
                return addLightningNode(tx, node)
2✔
846
        }, func() {})
2✔
847
}
848

849
// AddLightningNode adds a vertex/node to the graph database. If the node is not
850
// in the database from before, this will add a new, unconnected one to the
851
// graph. If it is present from before, this will update that node's
852
// information. Note that this method is expected to only be called to update an
853
// already present node from a node announcement, or to insert a node found in a
854
// channel update.
855
//
856
// TODO(roasbeef): also need sig of announcement.
857
func (c *KVStore) AddLightningNode(node *models.LightningNode,
858
        opts ...batch.SchedulerOption) error {
2✔
859

2✔
860
        ctx := context.TODO()
2✔
861

2✔
862
        r := &batch.Request[kvdb.RwTx]{
2✔
863
                Opts: batch.NewSchedulerOptions(opts...),
2✔
864
                Do: func(tx kvdb.RwTx) error {
4✔
865
                        return addLightningNode(tx, node)
2✔
866
                },
2✔
867
        }
868

869
        return c.nodeScheduler.Execute(ctx, r)
2✔
870
}
871

872
func addLightningNode(tx kvdb.RwTx, node *models.LightningNode) error {
2✔
873
        nodes, err := tx.CreateTopLevelBucket(nodeBucket)
2✔
874
        if err != nil {
2✔
UNCOV
875
                return err
×
UNCOV
876
        }
×
877

878
        aliases, err := nodes.CreateBucketIfNotExists(aliasIndexBucket)
2✔
879
        if err != nil {
2✔
UNCOV
880
                return err
×
UNCOV
881
        }
×
882

883
        updateIndex, err := nodes.CreateBucketIfNotExists(
2✔
884
                nodeUpdateIndexBucket,
2✔
885
        )
2✔
886
        if err != nil {
2✔
UNCOV
887
                return err
×
UNCOV
888
        }
×
889

890
        return putLightningNode(nodes, aliases, updateIndex, node)
2✔
891
}
892

893
// LookupAlias attempts to return the alias as advertised by the target node.
894
// TODO(roasbeef): currently assumes that aliases are unique...
895
func (c *KVStore) LookupAlias(pub *btcec.PublicKey) (string, error) {
2✔
896
        var alias string
2✔
897

2✔
898
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
4✔
899
                nodes := tx.ReadBucket(nodeBucket)
2✔
900
                if nodes == nil {
2✔
UNCOV
901
                        return ErrGraphNodesNotFound
×
UNCOV
902
                }
×
903

904
                aliases := nodes.NestedReadBucket(aliasIndexBucket)
2✔
905
                if aliases == nil {
2✔
UNCOV
906
                        return ErrGraphNodesNotFound
×
UNCOV
907
                }
×
908

909
                nodePub := pub.SerializeCompressed()
2✔
910
                a := aliases.Get(nodePub)
2✔
911
                if a == nil {
2✔
UNCOV
912
                        return ErrNodeAliasNotFound
×
UNCOV
913
                }
×
914

915
                // TODO(roasbeef): should actually be using the utf-8
916
                // package...
917
                alias = string(a)
2✔
918

2✔
919
                return nil
2✔
920
        }, func() {
2✔
921
                alias = ""
2✔
922
        })
2✔
923
        if err != nil {
2✔
UNCOV
924
                return "", err
×
UNCOV
925
        }
×
926

927
        return alias, nil
2✔
928
}
929

930
// DeleteLightningNode starts a new database transaction to remove a vertex/node
931
// from the database according to the node's public key.
UNCOV
932
func (c *KVStore) DeleteLightningNode(nodePub route.Vertex) error {
×
UNCOV
933
        // TODO(roasbeef): ensure dangling edges are removed...
×
UNCOV
934
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
×
935
                nodes := tx.ReadWriteBucket(nodeBucket)
×
936
                if nodes == nil {
×
937
                        return ErrGraphNodeNotFound
×
938
                }
×
939

940
                return c.deleteLightningNode(nodes, nodePub[:])
×
941
        }, func() {})
×
942
}
943

944
// deleteLightningNode uses an existing database transaction to remove a
945
// vertex/node from the database according to the node's public key.
946
func (c *KVStore) deleteLightningNode(nodes kvdb.RwBucket,
947
        compressedPubKey []byte) error {
2✔
948

2✔
949
        aliases := nodes.NestedReadWriteBucket(aliasIndexBucket)
2✔
950
        if aliases == nil {
2✔
UNCOV
951
                return ErrGraphNodesNotFound
×
UNCOV
952
        }
×
953

954
        if err := aliases.Delete(compressedPubKey); err != nil {
2✔
955
                return err
×
UNCOV
956
        }
×
957

958
        // Before we delete the node, we'll fetch its current state so we can
959
        // determine when its last update was to clear out the node update
960
        // index.
961
        node, err := fetchLightningNode(nodes, compressedPubKey)
2✔
962
        if err != nil {
2✔
UNCOV
963
                return err
×
UNCOV
964
        }
×
965

966
        if err := nodes.Delete(compressedPubKey); err != nil {
2✔
967
                return err
×
UNCOV
968
        }
×
969

970
        // Finally, we'll delete the index entry for the node within the
971
        // nodeUpdateIndexBucket as this node is no longer active, so we don't
972
        // need to track its last update.
973
        nodeUpdateIndex := nodes.NestedReadWriteBucket(nodeUpdateIndexBucket)
2✔
974
        if nodeUpdateIndex == nil {
2✔
UNCOV
975
                return ErrGraphNodesNotFound
×
UNCOV
976
        }
×
977

978
        // In order to delete the entry, we'll need to reconstruct the key for
979
        // its last update.
980
        updateUnix := uint64(node.LastUpdate.Unix())
2✔
981
        var indexKey [8 + 33]byte
2✔
982
        byteOrder.PutUint64(indexKey[:8], updateUnix)
2✔
983
        copy(indexKey[8:], compressedPubKey)
2✔
984

2✔
985
        return nodeUpdateIndex.Delete(indexKey[:])
2✔
986
}
987

988
// AddChannelEdge adds a new (undirected, blank) edge to the graph database. An
989
// undirected edge from the two target nodes are created. The information stored
990
// denotes the static attributes of the channel, such as the channelID, the keys
991
// involved in creation of the channel, and the set of features that the channel
992
// supports. The chanPoint and chanID are used to uniquely identify the edge
993
// globally within the database.
994
func (c *KVStore) AddChannelEdge(edge *models.ChannelEdgeInfo,
995
        opts ...batch.SchedulerOption) error {
2✔
996

2✔
997
        ctx := context.TODO()
2✔
998

2✔
999
        var alreadyExists bool
2✔
1000
        r := &batch.Request[kvdb.RwTx]{
2✔
1001
                Opts: batch.NewSchedulerOptions(opts...),
2✔
1002
                Reset: func() {
4✔
1003
                        alreadyExists = false
2✔
1004
                },
2✔
1005
                Do: func(tx kvdb.RwTx) error {
2✔
1006
                        err := c.addChannelEdge(tx, edge)
2✔
1007

2✔
1008
                        // Silence ErrEdgeAlreadyExist so that the batch can
2✔
1009
                        // succeed, but propagate the error via local state.
2✔
1010
                        if errors.Is(err, ErrEdgeAlreadyExist) {
2✔
UNCOV
1011
                                alreadyExists = true
×
UNCOV
1012
                                return nil
×
UNCOV
1013
                        }
×
1014

1015
                        return err
2✔
1016
                },
1017
                OnCommit: func(err error) error {
2✔
1018
                        switch {
2✔
UNCOV
1019
                        case err != nil:
×
UNCOV
1020
                                return err
×
UNCOV
1021
                        case alreadyExists:
×
1022
                                return ErrEdgeAlreadyExist
×
1023
                        default:
2✔
1024
                                c.rejectCache.remove(edge.ChannelID)
2✔
1025
                                c.chanCache.remove(edge.ChannelID)
2✔
1026
                                return nil
2✔
1027
                        }
1028
                },
1029
        }
1030

1031
        return c.chanScheduler.Execute(ctx, r)
2✔
1032
}
1033

1034
// addChannelEdge is the private form of AddChannelEdge that allows callers to
1035
// utilize an existing db transaction.
1036
func (c *KVStore) addChannelEdge(tx kvdb.RwTx,
1037
        edge *models.ChannelEdgeInfo) error {
2✔
1038

2✔
1039
        // Construct the channel's primary key which is the 8-byte channel ID.
2✔
1040
        var chanKey [8]byte
2✔
1041
        binary.BigEndian.PutUint64(chanKey[:], edge.ChannelID)
2✔
1042

2✔
1043
        nodes, err := tx.CreateTopLevelBucket(nodeBucket)
2✔
1044
        if err != nil {
2✔
UNCOV
1045
                return err
×
UNCOV
1046
        }
×
1047
        edges, err := tx.CreateTopLevelBucket(edgeBucket)
2✔
1048
        if err != nil {
2✔
1049
                return err
×
UNCOV
1050
        }
×
1051
        edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
2✔
1052
        if err != nil {
2✔
1053
                return err
×
UNCOV
1054
        }
×
1055
        chanIndex, err := edges.CreateBucketIfNotExists(channelPointBucket)
2✔
1056
        if err != nil {
2✔
1057
                return err
×
UNCOV
1058
        }
×
1059

1060
        // First, attempt to check if this edge has already been created. If
1061
        // so, then we can exit early as this method is meant to be idempotent.
1062
        if edgeInfo := edgeIndex.Get(chanKey[:]); edgeInfo != nil {
2✔
UNCOV
1063
                return ErrEdgeAlreadyExist
×
UNCOV
1064
        }
×
1065

1066
        // Before we insert the channel into the database, we'll ensure that
1067
        // both nodes already exist in the channel graph. If either node
1068
        // doesn't, then we'll insert a "shell" node that just includes its
1069
        // public key, so subsequent validation and queries can work properly.
1070
        _, node1Err := fetchLightningNode(nodes, edge.NodeKey1Bytes[:])
2✔
1071
        switch {
2✔
1072
        case errors.Is(node1Err, ErrGraphNodeNotFound):
2✔
1073
                node1Shell := models.LightningNode{
2✔
1074
                        PubKeyBytes:          edge.NodeKey1Bytes,
2✔
1075
                        HaveNodeAnnouncement: false,
2✔
1076
                }
2✔
1077
                err := addLightningNode(tx, &node1Shell)
2✔
1078
                if err != nil {
2✔
UNCOV
1079
                        return fmt.Errorf("unable to create shell node "+
×
UNCOV
1080
                                "for: %x: %w", edge.NodeKey1Bytes, err)
×
UNCOV
1081
                }
×
1082
        case node1Err != nil:
×
1083
                return node1Err
×
1084
        }
1085

1086
        _, node2Err := fetchLightningNode(nodes, edge.NodeKey2Bytes[:])
2✔
1087
        switch {
2✔
1088
        case errors.Is(node2Err, ErrGraphNodeNotFound):
2✔
1089
                node2Shell := models.LightningNode{
2✔
1090
                        PubKeyBytes:          edge.NodeKey2Bytes,
2✔
1091
                        HaveNodeAnnouncement: false,
2✔
1092
                }
2✔
1093
                err := addLightningNode(tx, &node2Shell)
2✔
1094
                if err != nil {
2✔
UNCOV
1095
                        return fmt.Errorf("unable to create shell node "+
×
UNCOV
1096
                                "for: %x: %w", edge.NodeKey2Bytes, err)
×
UNCOV
1097
                }
×
1098
        case node2Err != nil:
×
1099
                return node2Err
×
1100
        }
1101

1102
        // If the edge hasn't been created yet, then we'll first add it to the
1103
        // edge index in order to associate the edge between two nodes and also
1104
        // store the static components of the channel.
1105
        if err := putChanEdgeInfo(edgeIndex, edge, chanKey); err != nil {
2✔
UNCOV
1106
                return err
×
UNCOV
1107
        }
×
1108

1109
        // Mark edge policies for both sides as unknown. This is to enable
1110
        // efficient incoming channel lookup for a node.
1111
        keys := []*[33]byte{
2✔
1112
                &edge.NodeKey1Bytes,
2✔
1113
                &edge.NodeKey2Bytes,
2✔
1114
        }
2✔
1115
        for _, key := range keys {
4✔
1116
                err := putChanEdgePolicyUnknown(edges, edge.ChannelID, key[:])
2✔
1117
                if err != nil {
2✔
UNCOV
1118
                        return err
×
UNCOV
1119
                }
×
1120
        }
1121

1122
        // Finally we add it to the channel index which maps channel points
1123
        // (outpoints) to the shorter channel ID's.
1124
        var b bytes.Buffer
2✔
1125
        if err := WriteOutpoint(&b, &edge.ChannelPoint); err != nil {
2✔
UNCOV
1126
                return err
×
UNCOV
1127
        }
×
1128

1129
        return chanIndex.Put(b.Bytes(), chanKey[:])
2✔
1130
}
1131

1132
// HasChannelEdge returns true if the database knows of a channel edge with the
1133
// passed channel ID, and false otherwise. If an edge with that ID is found
1134
// within the graph, then two time stamps representing the last time the edge
1135
// was updated for both directed edges are returned along with the boolean. If
1136
// it is not found, then the zombie index is checked and its result is returned
1137
// as the second boolean.
1138
func (c *KVStore) HasChannelEdge(
1139
        chanID uint64) (time.Time, time.Time, bool, bool, error) {
2✔
1140

2✔
1141
        var (
2✔
1142
                upd1Time time.Time
2✔
1143
                upd2Time time.Time
2✔
1144
                exists   bool
2✔
1145
                isZombie bool
2✔
1146
        )
2✔
1147

2✔
1148
        // We'll query the cache with the shared lock held to allow multiple
2✔
1149
        // readers to access values in the cache concurrently if they exist.
2✔
1150
        c.cacheMu.RLock()
2✔
1151
        if entry, ok := c.rejectCache.get(chanID); ok {
4✔
1152
                c.cacheMu.RUnlock()
2✔
1153
                upd1Time = time.Unix(entry.upd1Time, 0)
2✔
1154
                upd2Time = time.Unix(entry.upd2Time, 0)
2✔
1155
                exists, isZombie = entry.flags.unpack()
2✔
1156

2✔
1157
                return upd1Time, upd2Time, exists, isZombie, nil
2✔
1158
        }
2✔
1159
        c.cacheMu.RUnlock()
2✔
1160

2✔
1161
        c.cacheMu.Lock()
2✔
1162
        defer c.cacheMu.Unlock()
2✔
1163

2✔
1164
        // The item was not found with the shared lock, so we'll acquire the
2✔
1165
        // exclusive lock and check the cache again in case another method added
2✔
1166
        // the entry to the cache while no lock was held.
2✔
1167
        if entry, ok := c.rejectCache.get(chanID); ok {
3✔
1168
                upd1Time = time.Unix(entry.upd1Time, 0)
1✔
1169
                upd2Time = time.Unix(entry.upd2Time, 0)
1✔
1170
                exists, isZombie = entry.flags.unpack()
1✔
1171

1✔
1172
                return upd1Time, upd2Time, exists, isZombie, nil
1✔
1173
        }
1✔
1174

1175
        if err := kvdb.View(c.db, func(tx kvdb.RTx) error {
4✔
1176
                edges := tx.ReadBucket(edgeBucket)
2✔
1177
                if edges == nil {
2✔
UNCOV
1178
                        return ErrGraphNoEdgesFound
×
UNCOV
1179
                }
×
1180
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
2✔
1181
                if edgeIndex == nil {
2✔
1182
                        return ErrGraphNoEdgesFound
×
UNCOV
1183
                }
×
1184

1185
                var channelID [8]byte
2✔
1186
                byteOrder.PutUint64(channelID[:], chanID)
2✔
1187

2✔
1188
                // If the edge doesn't exist, then we'll also check our zombie
2✔
1189
                // index.
2✔
1190
                if edgeIndex.Get(channelID[:]) == nil {
4✔
1191
                        exists = false
2✔
1192
                        zombieIndex := edges.NestedReadBucket(zombieBucket)
2✔
1193
                        if zombieIndex != nil {
4✔
1194
                                isZombie, _, _ = isZombieEdge(
2✔
1195
                                        zombieIndex, chanID,
2✔
1196
                                )
2✔
1197
                        }
2✔
1198

1199
                        return nil
2✔
1200
                }
1201

1202
                exists = true
2✔
1203
                isZombie = false
2✔
1204

2✔
1205
                // If the channel has been found in the graph, then retrieve
2✔
1206
                // the edges itself so we can return the last updated
2✔
1207
                // timestamps.
2✔
1208
                nodes := tx.ReadBucket(nodeBucket)
2✔
1209
                if nodes == nil {
2✔
UNCOV
1210
                        return ErrGraphNodeNotFound
×
UNCOV
1211
                }
×
1212

1213
                e1, e2, err := fetchChanEdgePolicies(
2✔
1214
                        edgeIndex, edges, channelID[:],
2✔
1215
                )
2✔
1216
                if err != nil {
2✔
UNCOV
1217
                        return err
×
UNCOV
1218
                }
×
1219

1220
                // As we may have only one of the edges populated, only set the
1221
                // update time if the edge was found in the database.
1222
                if e1 != nil {
4✔
1223
                        upd1Time = e1.LastUpdate
2✔
1224
                }
2✔
1225
                if e2 != nil {
4✔
1226
                        upd2Time = e2.LastUpdate
2✔
1227
                }
2✔
1228

1229
                return nil
2✔
1230
        }, func() {}); err != nil {
2✔
UNCOV
1231
                return time.Time{}, time.Time{}, exists, isZombie, err
×
UNCOV
1232
        }
×
1233

1234
        c.rejectCache.insert(chanID, rejectCacheEntry{
2✔
1235
                upd1Time: upd1Time.Unix(),
2✔
1236
                upd2Time: upd2Time.Unix(),
2✔
1237
                flags:    packRejectFlags(exists, isZombie),
2✔
1238
        })
2✔
1239

2✔
1240
        return upd1Time, upd2Time, exists, isZombie, nil
2✔
1241
}
1242

1243
// AddEdgeProof sets the proof of an existing edge in the graph database.
1244
func (c *KVStore) AddEdgeProof(chanID lnwire.ShortChannelID,
1245
        proof *models.ChannelAuthProof) error {
2✔
1246

2✔
1247
        // Construct the channel's primary key which is the 8-byte channel ID.
2✔
1248
        var chanKey [8]byte
2✔
1249
        binary.BigEndian.PutUint64(chanKey[:], chanID.ToUint64())
2✔
1250

2✔
1251
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
4✔
1252
                edges := tx.ReadWriteBucket(edgeBucket)
2✔
1253
                if edges == nil {
2✔
UNCOV
1254
                        return ErrEdgeNotFound
×
UNCOV
1255
                }
×
1256

1257
                edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
2✔
1258
                if edgeIndex == nil {
2✔
UNCOV
1259
                        return ErrEdgeNotFound
×
UNCOV
1260
                }
×
1261

1262
                edge, err := fetchChanEdgeInfo(edgeIndex, chanKey[:])
2✔
1263
                if err != nil {
2✔
UNCOV
1264
                        return err
×
UNCOV
1265
                }
×
1266

1267
                edge.AuthProof = proof
2✔
1268

2✔
1269
                return putChanEdgeInfo(edgeIndex, &edge, chanKey)
2✔
1270
        }, func() {})
2✔
1271
}
1272

1273
const (
1274
        // pruneTipBytes is the total size of the value which stores a prune
1275
        // entry of the graph in the prune log. The "prune tip" is the last
1276
        // entry in the prune log, and indicates if the channel graph is in
1277
        // sync with the current UTXO state. The structure of the value
1278
        // is: blockHash, taking 32 bytes total.
1279
        pruneTipBytes = 32
1280
)
1281

1282
// PruneGraph prunes newly closed channels from the channel graph in response
1283
// to a new block being solved on the network. Any transactions which spend the
1284
// funding output of any known channels within he graph will be deleted.
1285
// Additionally, the "prune tip", or the last block which has been used to
1286
// prune the graph is stored so callers can ensure the graph is fully in sync
1287
// with the current UTXO state. A slice of channels that have been closed by
1288
// the target block along with any pruned nodes are returned if the function
1289
// succeeds without error.
1290
func (c *KVStore) PruneGraph(spentOutputs []*wire.OutPoint,
1291
        blockHash *chainhash.Hash, blockHeight uint32) (
1292
        []*models.ChannelEdgeInfo, []route.Vertex, error) {
2✔
1293

2✔
1294
        c.cacheMu.Lock()
2✔
1295
        defer c.cacheMu.Unlock()
2✔
1296

2✔
1297
        var (
2✔
1298
                chansClosed []*models.ChannelEdgeInfo
2✔
1299
                prunedNodes []route.Vertex
2✔
1300
        )
2✔
1301

2✔
1302
        err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
4✔
1303
                // First grab the edges bucket which houses the information
2✔
1304
                // we'd like to delete
2✔
1305
                edges, err := tx.CreateTopLevelBucket(edgeBucket)
2✔
1306
                if err != nil {
2✔
UNCOV
1307
                        return err
×
UNCOV
1308
                }
×
1309

1310
                // Next grab the two edge indexes which will also need to be
1311
                // updated.
1312
                edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
2✔
1313
                if err != nil {
2✔
UNCOV
1314
                        return err
×
UNCOV
1315
                }
×
1316
                chanIndex, err := edges.CreateBucketIfNotExists(
2✔
1317
                        channelPointBucket,
2✔
1318
                )
2✔
1319
                if err != nil {
2✔
UNCOV
1320
                        return err
×
UNCOV
1321
                }
×
1322
                nodes := tx.ReadWriteBucket(nodeBucket)
2✔
1323
                if nodes == nil {
2✔
1324
                        return ErrSourceNodeNotSet
×
UNCOV
1325
                }
×
1326
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
2✔
1327
                if err != nil {
2✔
1328
                        return err
×
UNCOV
1329
                }
×
1330

1331
                // For each of the outpoints that have been spent within the
1332
                // block, we attempt to delete them from the graph as if that
1333
                // outpoint was a channel, then it has now been closed.
1334
                for _, chanPoint := range spentOutputs {
4✔
1335
                        // TODO(roasbeef): load channel bloom filter, continue
2✔
1336
                        // if NOT if filter
2✔
1337

2✔
1338
                        var opBytes bytes.Buffer
2✔
1339
                        err := WriteOutpoint(&opBytes, chanPoint)
2✔
1340
                        if err != nil {
2✔
UNCOV
1341
                                return err
×
UNCOV
1342
                        }
×
1343

1344
                        // First attempt to see if the channel exists within
1345
                        // the database, if not, then we can exit early.
1346
                        chanID := chanIndex.Get(opBytes.Bytes())
2✔
1347
                        if chanID == nil {
2✔
UNCOV
1348
                                continue
×
1349
                        }
1350

1351
                        // Attempt to delete the channel, an ErrEdgeNotFound
1352
                        // will be returned if that outpoint isn't known to be
1353
                        // a channel. If no error is returned, then a channel
1354
                        // was successfully pruned.
1355
                        edgeInfo, err := c.delChannelEdgeUnsafe(
2✔
1356
                                edges, edgeIndex, chanIndex, zombieIndex,
2✔
1357
                                chanID, false, false,
2✔
1358
                        )
2✔
1359
                        if err != nil && !errors.Is(err, ErrEdgeNotFound) {
2✔
UNCOV
1360
                                return err
×
UNCOV
1361
                        }
×
1362

1363
                        chansClosed = append(chansClosed, edgeInfo)
2✔
1364
                }
1365

1366
                metaBucket, err := tx.CreateTopLevelBucket(graphMetaBucket)
2✔
1367
                if err != nil {
2✔
UNCOV
1368
                        return err
×
UNCOV
1369
                }
×
1370

1371
                pruneBucket, err := metaBucket.CreateBucketIfNotExists(
2✔
1372
                        pruneLogBucket,
2✔
1373
                )
2✔
1374
                if err != nil {
2✔
UNCOV
1375
                        return err
×
UNCOV
1376
                }
×
1377

1378
                // With the graph pruned, add a new entry to the prune log,
1379
                // which can be used to check if the graph is fully synced with
1380
                // the current UTXO state.
1381
                var blockHeightBytes [4]byte
2✔
1382
                byteOrder.PutUint32(blockHeightBytes[:], blockHeight)
2✔
1383

2✔
1384
                var newTip [pruneTipBytes]byte
2✔
1385
                copy(newTip[:], blockHash[:])
2✔
1386

2✔
1387
                err = pruneBucket.Put(blockHeightBytes[:], newTip[:])
2✔
1388
                if err != nil {
2✔
UNCOV
1389
                        return err
×
UNCOV
1390
                }
×
1391

1392
                // Now that the graph has been pruned, we'll also attempt to
1393
                // prune any nodes that have had a channel closed within the
1394
                // latest block.
1395
                prunedNodes, err = c.pruneGraphNodes(nodes, edgeIndex)
2✔
1396

2✔
1397
                return err
2✔
1398
        }, func() {
2✔
1399
                chansClosed = nil
2✔
1400
                prunedNodes = nil
2✔
1401
        })
2✔
1402
        if err != nil {
2✔
UNCOV
1403
                return nil, nil, err
×
UNCOV
1404
        }
×
1405

1406
        for _, channel := range chansClosed {
4✔
1407
                c.rejectCache.remove(channel.ChannelID)
2✔
1408
                c.chanCache.remove(channel.ChannelID)
2✔
1409
        }
2✔
1410

1411
        return chansClosed, prunedNodes, nil
2✔
1412
}
1413

1414
// PruneGraphNodes is a garbage collection method which attempts to prune out
1415
// any nodes from the channel graph that are currently unconnected. This ensure
1416
// that we only maintain a graph of reachable nodes. In the event that a pruned
1417
// node gains more channels, it will be re-added back to the graph.
1418
func (c *KVStore) PruneGraphNodes() ([]route.Vertex, error) {
2✔
1419
        var prunedNodes []route.Vertex
2✔
1420
        err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
4✔
1421
                nodes := tx.ReadWriteBucket(nodeBucket)
2✔
1422
                if nodes == nil {
2✔
UNCOV
1423
                        return ErrGraphNodesNotFound
×
UNCOV
1424
                }
×
1425
                edges := tx.ReadWriteBucket(edgeBucket)
2✔
1426
                if edges == nil {
2✔
1427
                        return ErrGraphNotFound
×
UNCOV
1428
                }
×
1429
                edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
2✔
1430
                if edgeIndex == nil {
2✔
1431
                        return ErrGraphNoEdgesFound
×
UNCOV
1432
                }
×
1433

1434
                var err error
2✔
1435
                prunedNodes, err = c.pruneGraphNodes(nodes, edgeIndex)
2✔
1436
                if err != nil {
2✔
UNCOV
1437
                        return err
×
UNCOV
1438
                }
×
1439

1440
                return nil
2✔
1441
        }, func() {
2✔
1442
                prunedNodes = nil
2✔
1443
        })
2✔
1444

1445
        return prunedNodes, err
2✔
1446
}
1447

1448
// pruneGraphNodes attempts to remove any nodes from the graph who have had a
1449
// channel closed within the current block. If the node still has existing
1450
// channels in the graph, this will act as a no-op.
1451
func (c *KVStore) pruneGraphNodes(nodes kvdb.RwBucket,
1452
        edgeIndex kvdb.RwBucket) ([]route.Vertex, error) {
2✔
1453

2✔
1454
        log.Trace("Pruning nodes from graph with no open channels")
2✔
1455

2✔
1456
        // We'll retrieve the graph's source node to ensure we don't remove it
2✔
1457
        // even if it no longer has any open channels.
2✔
1458
        sourceNode, err := c.sourceNode(nodes)
2✔
1459
        if err != nil {
2✔
UNCOV
1460
                return nil, err
×
UNCOV
1461
        }
×
1462

1463
        // We'll use this map to keep count the number of references to a node
1464
        // in the graph. A node should only be removed once it has no more
1465
        // references in the graph.
1466
        nodeRefCounts := make(map[[33]byte]int)
2✔
1467
        err = nodes.ForEach(func(pubKey, nodeBytes []byte) error {
4✔
1468
                // If this is the source key, then we skip this
2✔
1469
                // iteration as the value for this key is a pubKey
2✔
1470
                // rather than raw node information.
2✔
1471
                if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 {
4✔
1472
                        return nil
2✔
1473
                }
2✔
1474

1475
                var nodePub [33]byte
2✔
1476
                copy(nodePub[:], pubKey)
2✔
1477
                nodeRefCounts[nodePub] = 0
2✔
1478

2✔
1479
                return nil
2✔
1480
        })
1481
        if err != nil {
2✔
UNCOV
1482
                return nil, err
×
UNCOV
1483
        }
×
1484

1485
        // To ensure we never delete the source node, we'll start off by
1486
        // bumping its ref count to 1.
1487
        nodeRefCounts[sourceNode.PubKeyBytes] = 1
2✔
1488

2✔
1489
        // Next, we'll run through the edgeIndex which maps a channel ID to the
2✔
1490
        // edge info. We'll use this scan to populate our reference count map
2✔
1491
        // above.
2✔
1492
        err = edgeIndex.ForEach(func(chanID, edgeInfoBytes []byte) error {
4✔
1493
                // The first 66 bytes of the edge info contain the pubkeys of
2✔
1494
                // the nodes that this edge attaches. We'll extract them, and
2✔
1495
                // add them to the ref count map.
2✔
1496
                var node1, node2 [33]byte
2✔
1497
                copy(node1[:], edgeInfoBytes[:33])
2✔
1498
                copy(node2[:], edgeInfoBytes[33:])
2✔
1499

2✔
1500
                // With the nodes extracted, we'll increase the ref count of
2✔
1501
                // each of the nodes.
2✔
1502
                nodeRefCounts[node1]++
2✔
1503
                nodeRefCounts[node2]++
2✔
1504

2✔
1505
                return nil
2✔
1506
        })
2✔
1507
        if err != nil {
2✔
UNCOV
1508
                return nil, err
×
UNCOV
1509
        }
×
1510

1511
        // Finally, we'll make a second pass over the set of nodes, and delete
1512
        // any nodes that have a ref count of zero.
1513
        var pruned []route.Vertex
2✔
1514
        for nodePubKey, refCount := range nodeRefCounts {
4✔
1515
                // If the ref count of the node isn't zero, then we can safely
2✔
1516
                // skip it as it still has edges to or from it within the
2✔
1517
                // graph.
2✔
1518
                if refCount != 0 {
4✔
1519
                        continue
2✔
1520
                }
1521

1522
                // If we reach this point, then there are no longer any edges
1523
                // that connect this node, so we can delete it.
1524
                err := c.deleteLightningNode(nodes, nodePubKey[:])
2✔
1525
                if err != nil {
2✔
UNCOV
1526
                        if errors.Is(err, ErrGraphNodeNotFound) ||
×
UNCOV
1527
                                errors.Is(err, ErrGraphNodesNotFound) {
×
UNCOV
1528

×
1529
                                log.Warnf("Unable to prune node %x from the "+
×
1530
                                        "graph: %v", nodePubKey, err)
×
1531
                                continue
×
1532
                        }
1533

1534
                        return nil, err
×
1535
                }
1536

1537
                log.Infof("Pruned unconnected node %x from channel graph",
2✔
1538
                        nodePubKey[:])
2✔
1539

2✔
1540
                pruned = append(pruned, nodePubKey)
2✔
1541
        }
1542

1543
        if len(pruned) > 0 {
4✔
1544
                log.Infof("Pruned %v unconnected nodes from the channel graph",
2✔
1545
                        len(pruned))
2✔
1546
        }
2✔
1547

1548
        return pruned, err
2✔
1549
}
1550

1551
// DisconnectBlockAtHeight is used to indicate that the block specified
1552
// by the passed height has been disconnected from the main chain. This
1553
// will "rewind" the graph back to the height below, deleting channels
1554
// that are no longer confirmed from the graph. The prune log will be
1555
// set to the last prune height valid for the remaining chain.
1556
// Channels that were removed from the graph resulting from the
1557
// disconnected block are returned.
1558
func (c *KVStore) DisconnectBlockAtHeight(height uint32) (
1559
        []*models.ChannelEdgeInfo, error) {
2✔
1560

2✔
1561
        // Every channel having a ShortChannelID starting at 'height'
2✔
1562
        // will no longer be confirmed.
2✔
1563
        startShortChanID := lnwire.ShortChannelID{
2✔
1564
                BlockHeight: height,
2✔
1565
        }
2✔
1566

2✔
1567
        // Delete everything after this height from the db up until the
2✔
1568
        // SCID alias range.
2✔
1569
        endShortChanID := aliasmgr.StartingAlias
2✔
1570

2✔
1571
        // The block height will be the 3 first bytes of the channel IDs.
2✔
1572
        var chanIDStart [8]byte
2✔
1573
        byteOrder.PutUint64(chanIDStart[:], startShortChanID.ToUint64())
2✔
1574
        var chanIDEnd [8]byte
2✔
1575
        byteOrder.PutUint64(chanIDEnd[:], endShortChanID.ToUint64())
2✔
1576

2✔
1577
        c.cacheMu.Lock()
2✔
1578
        defer c.cacheMu.Unlock()
2✔
1579

2✔
1580
        // Keep track of the channels that are removed from the graph.
2✔
1581
        var removedChans []*models.ChannelEdgeInfo
2✔
1582

2✔
1583
        if err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
4✔
1584
                edges, err := tx.CreateTopLevelBucket(edgeBucket)
2✔
1585
                if err != nil {
2✔
UNCOV
1586
                        return err
×
UNCOV
1587
                }
×
1588
                edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
2✔
1589
                if err != nil {
2✔
1590
                        return err
×
UNCOV
1591
                }
×
1592
                chanIndex, err := edges.CreateBucketIfNotExists(
2✔
1593
                        channelPointBucket,
2✔
1594
                )
2✔
1595
                if err != nil {
2✔
UNCOV
1596
                        return err
×
UNCOV
1597
                }
×
1598
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
2✔
1599
                if err != nil {
2✔
1600
                        return err
×
UNCOV
1601
                }
×
1602

1603
                // Scan from chanIDStart to chanIDEnd, deleting every
1604
                // found edge.
1605
                // NOTE: we must delete the edges after the cursor loop, since
1606
                // modifying the bucket while traversing is not safe.
1607
                // NOTE: We use a < comparison in bytes.Compare instead of <=
1608
                // so that the StartingAlias itself isn't deleted.
1609
                var keys [][]byte
2✔
1610
                cursor := edgeIndex.ReadWriteCursor()
2✔
1611

2✔
1612
                //nolint:ll
2✔
1613
                for k, _ := cursor.Seek(chanIDStart[:]); k != nil &&
2✔
1614
                        bytes.Compare(k, chanIDEnd[:]) < 0; k, _ = cursor.Next() {
4✔
1615
                        keys = append(keys, k)
2✔
1616
                }
2✔
1617

1618
                for _, k := range keys {
4✔
1619
                        edgeInfo, err := c.delChannelEdgeUnsafe(
2✔
1620
                                edges, edgeIndex, chanIndex, zombieIndex,
2✔
1621
                                k, false, false,
2✔
1622
                        )
2✔
1623
                        if err != nil && !errors.Is(err, ErrEdgeNotFound) {
2✔
UNCOV
1624
                                return err
×
UNCOV
1625
                        }
×
1626

1627
                        removedChans = append(removedChans, edgeInfo)
2✔
1628
                }
1629

1630
                // Delete all the entries in the prune log having a height
1631
                // greater or equal to the block disconnected.
1632
                metaBucket, err := tx.CreateTopLevelBucket(graphMetaBucket)
2✔
1633
                if err != nil {
2✔
UNCOV
1634
                        return err
×
UNCOV
1635
                }
×
1636

1637
                pruneBucket, err := metaBucket.CreateBucketIfNotExists(
2✔
1638
                        pruneLogBucket,
2✔
1639
                )
2✔
1640
                if err != nil {
2✔
UNCOV
1641
                        return err
×
UNCOV
1642
                }
×
1643

1644
                var pruneKeyStart [4]byte
2✔
1645
                byteOrder.PutUint32(pruneKeyStart[:], height)
2✔
1646

2✔
1647
                var pruneKeyEnd [4]byte
2✔
1648
                byteOrder.PutUint32(pruneKeyEnd[:], math.MaxUint32)
2✔
1649

2✔
1650
                // To avoid modifying the bucket while traversing, we delete
2✔
1651
                // the keys in a second loop.
2✔
1652
                var pruneKeys [][]byte
2✔
1653
                pruneCursor := pruneBucket.ReadWriteCursor()
2✔
1654
                //nolint:ll
2✔
1655
                for k, _ := pruneCursor.Seek(pruneKeyStart[:]); k != nil &&
2✔
1656
                        bytes.Compare(k, pruneKeyEnd[:]) <= 0; k, _ = pruneCursor.Next() {
4✔
1657
                        pruneKeys = append(pruneKeys, k)
2✔
1658
                }
2✔
1659

1660
                for _, k := range pruneKeys {
4✔
1661
                        if err := pruneBucket.Delete(k); err != nil {
2✔
UNCOV
1662
                                return err
×
UNCOV
1663
                        }
×
1664
                }
1665

1666
                return nil
2✔
1667
        }, func() {
2✔
1668
                removedChans = nil
2✔
1669
        }); err != nil {
2✔
UNCOV
1670
                return nil, err
×
UNCOV
1671
        }
×
1672

1673
        for _, channel := range removedChans {
4✔
1674
                c.rejectCache.remove(channel.ChannelID)
2✔
1675
                c.chanCache.remove(channel.ChannelID)
2✔
1676
        }
2✔
1677

1678
        return removedChans, nil
2✔
1679
}
1680

1681
// PruneTip returns the block height and hash of the latest block that has been
1682
// used to prune channels in the graph. Knowing the "prune tip" allows callers
1683
// to tell if the graph is currently in sync with the current best known UTXO
1684
// state.
1685
func (c *KVStore) PruneTip() (*chainhash.Hash, uint32, error) {
2✔
1686
        var (
2✔
1687
                tipHash   chainhash.Hash
2✔
1688
                tipHeight uint32
2✔
1689
        )
2✔
1690

2✔
1691
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
4✔
1692
                graphMeta := tx.ReadBucket(graphMetaBucket)
2✔
1693
                if graphMeta == nil {
2✔
UNCOV
1694
                        return ErrGraphNotFound
×
UNCOV
1695
                }
×
1696
                pruneBucket := graphMeta.NestedReadBucket(pruneLogBucket)
2✔
1697
                if pruneBucket == nil {
2✔
1698
                        return ErrGraphNeverPruned
×
UNCOV
1699
                }
×
1700

1701
                pruneCursor := pruneBucket.ReadCursor()
2✔
1702

2✔
1703
                // The prune key with the largest block height will be our
2✔
1704
                // prune tip.
2✔
1705
                k, v := pruneCursor.Last()
2✔
1706
                if k == nil {
4✔
1707
                        return ErrGraphNeverPruned
2✔
1708
                }
2✔
1709

1710
                // Once we have the prune tip, the value will be the block hash,
1711
                // and the key the block height.
1712
                copy(tipHash[:], v)
2✔
1713
                tipHeight = byteOrder.Uint32(k)
2✔
1714

2✔
1715
                return nil
2✔
1716
        }, func() {})
2✔
1717
        if err != nil {
4✔
1718
                return nil, 0, err
2✔
1719
        }
2✔
1720

1721
        return &tipHash, tipHeight, nil
2✔
1722
}
1723

1724
// DeleteChannelEdges removes edges with the given channel IDs from the
1725
// database and marks them as zombies. This ensures that we're unable to re-add
1726
// it to our database once again. If an edge does not exist within the
1727
// database, then ErrEdgeNotFound will be returned. If strictZombiePruning is
1728
// true, then when we mark these edges as zombies, we'll set up the keys such
1729
// that we require the node that failed to send the fresh update to be the one
1730
// that resurrects the channel from its zombie state. The markZombie bool
1731
// denotes whether or not to mark the channel as a zombie.
1732
func (c *KVStore) DeleteChannelEdges(strictZombiePruning, markZombie bool,
1733
        chanIDs ...uint64) ([]*models.ChannelEdgeInfo, error) {
2✔
1734

2✔
1735
        // TODO(roasbeef): possibly delete from node bucket if node has no more
2✔
1736
        // channels
2✔
1737
        // TODO(roasbeef): don't delete both edges?
2✔
1738

2✔
1739
        c.cacheMu.Lock()
2✔
1740
        defer c.cacheMu.Unlock()
2✔
1741

2✔
1742
        var infos []*models.ChannelEdgeInfo
2✔
1743
        err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
4✔
1744
                edges := tx.ReadWriteBucket(edgeBucket)
2✔
1745
                if edges == nil {
2✔
UNCOV
1746
                        return ErrEdgeNotFound
×
UNCOV
1747
                }
×
1748
                edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
2✔
1749
                if edgeIndex == nil {
2✔
1750
                        return ErrEdgeNotFound
×
UNCOV
1751
                }
×
1752
                chanIndex := edges.NestedReadWriteBucket(channelPointBucket)
2✔
1753
                if chanIndex == nil {
2✔
1754
                        return ErrEdgeNotFound
×
UNCOV
1755
                }
×
1756
                nodes := tx.ReadWriteBucket(nodeBucket)
2✔
1757
                if nodes == nil {
2✔
1758
                        return ErrGraphNodeNotFound
×
UNCOV
1759
                }
×
1760
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
2✔
1761
                if err != nil {
2✔
1762
                        return err
×
UNCOV
1763
                }
×
1764

1765
                var rawChanID [8]byte
2✔
1766
                for _, chanID := range chanIDs {
4✔
1767
                        byteOrder.PutUint64(rawChanID[:], chanID)
2✔
1768
                        edgeInfo, err := c.delChannelEdgeUnsafe(
2✔
1769
                                edges, edgeIndex, chanIndex, zombieIndex,
2✔
1770
                                rawChanID[:], markZombie, strictZombiePruning,
2✔
1771
                        )
2✔
1772
                        if err != nil {
2✔
UNCOV
1773
                                return err
×
UNCOV
1774
                        }
×
1775

1776
                        infos = append(infos, edgeInfo)
2✔
1777
                }
1778

1779
                return nil
2✔
1780
        }, func() {
2✔
1781
                infos = nil
2✔
1782
        })
2✔
1783
        if err != nil {
2✔
UNCOV
1784
                return nil, err
×
UNCOV
1785
        }
×
1786

1787
        for _, chanID := range chanIDs {
4✔
1788
                c.rejectCache.remove(chanID)
2✔
1789
                c.chanCache.remove(chanID)
2✔
1790
        }
2✔
1791

1792
        return infos, nil
2✔
1793
}
1794

1795
// ChannelID attempt to lookup the 8-byte compact channel ID which maps to the
1796
// passed channel point (outpoint). If the passed channel doesn't exist within
1797
// the database, then ErrEdgeNotFound is returned.
1798
func (c *KVStore) ChannelID(chanPoint *wire.OutPoint) (uint64, error) {
2✔
1799
        var chanID uint64
2✔
1800
        if err := kvdb.View(c.db, func(tx kvdb.RTx) error {
4✔
1801
                var err error
2✔
1802
                chanID, err = getChanID(tx, chanPoint)
2✔
1803
                return err
2✔
1804
        }, func() {
4✔
1805
                chanID = 0
2✔
1806
        }); err != nil {
4✔
1807
                return 0, err
2✔
1808
        }
2✔
1809

1810
        return chanID, nil
2✔
1811
}
1812

1813
// getChanID returns the assigned channel ID for a given channel point.
1814
func getChanID(tx kvdb.RTx, chanPoint *wire.OutPoint) (uint64, error) {
2✔
1815
        var b bytes.Buffer
2✔
1816
        if err := WriteOutpoint(&b, chanPoint); err != nil {
2✔
UNCOV
1817
                return 0, err
×
UNCOV
1818
        }
×
1819

1820
        edges := tx.ReadBucket(edgeBucket)
2✔
1821
        if edges == nil {
2✔
UNCOV
1822
                return 0, ErrGraphNoEdgesFound
×
UNCOV
1823
        }
×
1824
        chanIndex := edges.NestedReadBucket(channelPointBucket)
2✔
1825
        if chanIndex == nil {
2✔
1826
                return 0, ErrGraphNoEdgesFound
×
UNCOV
1827
        }
×
1828

1829
        chanIDBytes := chanIndex.Get(b.Bytes())
2✔
1830
        if chanIDBytes == nil {
4✔
1831
                return 0, ErrEdgeNotFound
2✔
1832
        }
2✔
1833

1834
        chanID := byteOrder.Uint64(chanIDBytes)
2✔
1835

2✔
1836
        return chanID, nil
2✔
1837
}
1838

1839
// TODO(roasbeef): allow updates to use Batch?
1840

1841
// HighestChanID returns the "highest" known channel ID in the channel graph.
1842
// This represents the "newest" channel from the PoV of the chain. This method
1843
// can be used by peers to quickly determine if they're graphs are in sync.
1844
func (c *KVStore) HighestChanID() (uint64, error) {
2✔
1845
        var cid uint64
2✔
1846

2✔
1847
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
4✔
1848
                edges := tx.ReadBucket(edgeBucket)
2✔
1849
                if edges == nil {
2✔
UNCOV
1850
                        return ErrGraphNoEdgesFound
×
UNCOV
1851
                }
×
1852
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
2✔
1853
                if edgeIndex == nil {
2✔
1854
                        return ErrGraphNoEdgesFound
×
UNCOV
1855
                }
×
1856

1857
                // In order to find the highest chan ID, we'll fetch a cursor
1858
                // and use that to seek to the "end" of our known rage.
1859
                cidCursor := edgeIndex.ReadCursor()
2✔
1860

2✔
1861
                lastChanID, _ := cidCursor.Last()
2✔
1862

2✔
1863
                // If there's no key, then this means that we don't actually
2✔
1864
                // know of any channels, so we'll return a predicable error.
2✔
1865
                if lastChanID == nil {
4✔
1866
                        return ErrGraphNoEdgesFound
2✔
1867
                }
2✔
1868

1869
                // Otherwise, we'll de serialize the channel ID and return it
1870
                // to the caller.
1871
                cid = byteOrder.Uint64(lastChanID)
2✔
1872

2✔
1873
                return nil
2✔
1874
        }, func() {
2✔
1875
                cid = 0
2✔
1876
        })
2✔
1877
        if err != nil && !errors.Is(err, ErrGraphNoEdgesFound) {
2✔
UNCOV
1878
                return 0, err
×
UNCOV
1879
        }
×
1880

1881
        return cid, nil
2✔
1882
}
1883

1884
// ChannelEdge represents the complete set of information for a channel edge in
1885
// the known channel graph. This struct couples the core information of the
1886
// edge as well as each of the known advertised edge policies.
1887
type ChannelEdge struct {
1888
        // Info contains all the static information describing the channel.
1889
        Info *models.ChannelEdgeInfo
1890

1891
        // Policy1 points to the "first" edge policy of the channel containing
1892
        // the dynamic information required to properly route through the edge.
1893
        Policy1 *models.ChannelEdgePolicy
1894

1895
        // Policy2 points to the "second" edge policy of the channel containing
1896
        // the dynamic information required to properly route through the edge.
1897
        Policy2 *models.ChannelEdgePolicy
1898

1899
        // Node1 is "node 1" in the channel. This is the node that would have
1900
        // produced Policy1 if it exists.
1901
        Node1 *models.LightningNode
1902

1903
        // Node2 is "node 2" in the channel. This is the node that would have
1904
        // produced Policy2 if it exists.
1905
        Node2 *models.LightningNode
1906
}
1907

1908
// ChanUpdatesInHorizon returns all the known channel edges which have at least
1909
// one edge that has an update timestamp within the specified horizon.
1910
func (c *KVStore) ChanUpdatesInHorizon(startTime,
1911
        endTime time.Time) ([]ChannelEdge, error) {
2✔
1912

2✔
1913
        // To ensure we don't return duplicate ChannelEdges, we'll use an
2✔
1914
        // additional map to keep track of the edges already seen to prevent
2✔
1915
        // re-adding it.
2✔
1916
        var edgesSeen map[uint64]struct{}
2✔
1917
        var edgesToCache map[uint64]ChannelEdge
2✔
1918
        var edgesInHorizon []ChannelEdge
2✔
1919

2✔
1920
        c.cacheMu.Lock()
2✔
1921
        defer c.cacheMu.Unlock()
2✔
1922

2✔
1923
        var hits int
2✔
1924
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
4✔
1925
                edges := tx.ReadBucket(edgeBucket)
2✔
1926
                if edges == nil {
2✔
UNCOV
1927
                        return ErrGraphNoEdgesFound
×
UNCOV
1928
                }
×
1929
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
2✔
1930
                if edgeIndex == nil {
2✔
1931
                        return ErrGraphNoEdgesFound
×
UNCOV
1932
                }
×
1933
                edgeUpdateIndex := edges.NestedReadBucket(edgeUpdateIndexBucket)
2✔
1934
                if edgeUpdateIndex == nil {
2✔
1935
                        return ErrGraphNoEdgesFound
×
UNCOV
1936
                }
×
1937

1938
                nodes := tx.ReadBucket(nodeBucket)
2✔
1939
                if nodes == nil {
2✔
UNCOV
1940
                        return ErrGraphNodesNotFound
×
UNCOV
1941
                }
×
1942

1943
                // We'll now obtain a cursor to perform a range query within
1944
                // the index to find all channels within the horizon.
1945
                updateCursor := edgeUpdateIndex.ReadCursor()
2✔
1946

2✔
1947
                var startTimeBytes, endTimeBytes [8 + 8]byte
2✔
1948
                byteOrder.PutUint64(
2✔
1949
                        startTimeBytes[:8], uint64(startTime.Unix()),
2✔
1950
                )
2✔
1951
                byteOrder.PutUint64(
2✔
1952
                        endTimeBytes[:8], uint64(endTime.Unix()),
2✔
1953
                )
2✔
1954

2✔
1955
                // With our start and end times constructed, we'll step through
2✔
1956
                // the index collecting the info and policy of each update of
2✔
1957
                // each channel that has a last update within the time range.
2✔
1958
                //
2✔
1959
                //nolint:ll
2✔
1960
                for indexKey, _ := updateCursor.Seek(startTimeBytes[:]); indexKey != nil &&
2✔
1961
                        bytes.Compare(indexKey, endTimeBytes[:]) <= 0; indexKey, _ = updateCursor.Next() {
4✔
1962
                        // We have a new eligible entry, so we'll slice of the
2✔
1963
                        // chan ID so we can query it in the DB.
2✔
1964
                        chanID := indexKey[8:]
2✔
1965

2✔
1966
                        // If we've already retrieved the info and policies for
2✔
1967
                        // this edge, then we can skip it as we don't need to do
2✔
1968
                        // so again.
2✔
1969
                        chanIDInt := byteOrder.Uint64(chanID)
2✔
1970
                        if _, ok := edgesSeen[chanIDInt]; ok {
2✔
UNCOV
1971
                                continue
×
1972
                        }
1973

1974
                        if channel, ok := c.chanCache.get(chanIDInt); ok {
3✔
1975
                                hits++
1✔
1976
                                edgesSeen[chanIDInt] = struct{}{}
1✔
1977
                                edgesInHorizon = append(edgesInHorizon, channel)
1✔
1978

1✔
1979
                                continue
1✔
1980
                        }
1981

1982
                        // First, we'll fetch the static edge information.
1983
                        edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
2✔
1984
                        if err != nil {
2✔
UNCOV
1985
                                chanID := byteOrder.Uint64(chanID)
×
UNCOV
1986
                                return fmt.Errorf("unable to fetch info for "+
×
UNCOV
1987
                                        "edge with chan_id=%v: %v", chanID, err)
×
1988
                        }
×
1989

1990
                        // With the static information obtained, we'll now
1991
                        // fetch the dynamic policy info.
1992
                        edge1, edge2, err := fetchChanEdgePolicies(
2✔
1993
                                edgeIndex, edges, chanID,
2✔
1994
                        )
2✔
1995
                        if err != nil {
2✔
UNCOV
1996
                                chanID := byteOrder.Uint64(chanID)
×
UNCOV
1997
                                return fmt.Errorf("unable to fetch policies "+
×
UNCOV
1998
                                        "for edge with chan_id=%v: %v", chanID,
×
1999
                                        err)
×
2000
                        }
×
2001

2002
                        node1, err := fetchLightningNode(
2✔
2003
                                nodes, edgeInfo.NodeKey1Bytes[:],
2✔
2004
                        )
2✔
2005
                        if err != nil {
2✔
UNCOV
2006
                                return err
×
UNCOV
2007
                        }
×
2008

2009
                        node2, err := fetchLightningNode(
2✔
2010
                                nodes, edgeInfo.NodeKey2Bytes[:],
2✔
2011
                        )
2✔
2012
                        if err != nil {
2✔
UNCOV
2013
                                return err
×
UNCOV
2014
                        }
×
2015

2016
                        // Finally, we'll collate this edge with the rest of
2017
                        // edges to be returned.
2018
                        edgesSeen[chanIDInt] = struct{}{}
2✔
2019
                        channel := ChannelEdge{
2✔
2020
                                Info:    &edgeInfo,
2✔
2021
                                Policy1: edge1,
2✔
2022
                                Policy2: edge2,
2✔
2023
                                Node1:   &node1,
2✔
2024
                                Node2:   &node2,
2✔
2025
                        }
2✔
2026
                        edgesInHorizon = append(edgesInHorizon, channel)
2✔
2027
                        edgesToCache[chanIDInt] = channel
2✔
2028
                }
2029

2030
                return nil
2✔
2031
        }, func() {
2✔
2032
                edgesSeen = make(map[uint64]struct{})
2✔
2033
                edgesToCache = make(map[uint64]ChannelEdge)
2✔
2034
                edgesInHorizon = nil
2✔
2035
        })
2✔
2036
        switch {
2✔
UNCOV
2037
        case errors.Is(err, ErrGraphNoEdgesFound):
×
UNCOV
2038
                fallthrough
×
UNCOV
2039
        case errors.Is(err, ErrGraphNodesNotFound):
×
2040
                break
×
2041

2042
        case err != nil:
×
2043
                return nil, err
×
2044
        }
2045

2046
        // Insert any edges loaded from disk into the cache.
2047
        for chanid, channel := range edgesToCache {
4✔
2048
                c.chanCache.insert(chanid, channel)
2✔
2049
        }
2✔
2050

2051
        log.Debugf("ChanUpdatesInHorizon hit percentage: %f (%d/%d)",
2✔
2052
                float64(hits)/float64(len(edgesInHorizon)), hits,
2✔
2053
                len(edgesInHorizon))
2✔
2054

2✔
2055
        return edgesInHorizon, nil
2✔
2056
}
2057

2058
// NodeUpdatesInHorizon returns all the known lightning node which have an
2059
// update timestamp within the passed range. This method can be used by two
2060
// nodes to quickly determine if they have the same set of up to date node
2061
// announcements.
2062
func (c *KVStore) NodeUpdatesInHorizon(startTime,
2063
        endTime time.Time) ([]models.LightningNode, error) {
2✔
2064

2✔
2065
        var nodesInHorizon []models.LightningNode
2✔
2066

2✔
2067
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
4✔
2068
                nodes := tx.ReadBucket(nodeBucket)
2✔
2069
                if nodes == nil {
2✔
UNCOV
2070
                        return ErrGraphNodesNotFound
×
UNCOV
2071
                }
×
2072

2073
                nodeUpdateIndex := nodes.NestedReadBucket(nodeUpdateIndexBucket)
2✔
2074
                if nodeUpdateIndex == nil {
2✔
UNCOV
2075
                        return ErrGraphNodesNotFound
×
UNCOV
2076
                }
×
2077

2078
                // We'll now obtain a cursor to perform a range query within
2079
                // the index to find all node announcements within the horizon.
2080
                updateCursor := nodeUpdateIndex.ReadCursor()
2✔
2081

2✔
2082
                var startTimeBytes, endTimeBytes [8 + 33]byte
2✔
2083
                byteOrder.PutUint64(
2✔
2084
                        startTimeBytes[:8], uint64(startTime.Unix()),
2✔
2085
                )
2✔
2086
                byteOrder.PutUint64(
2✔
2087
                        endTimeBytes[:8], uint64(endTime.Unix()),
2✔
2088
                )
2✔
2089

2✔
2090
                // With our start and end times constructed, we'll step through
2✔
2091
                // the index collecting info for each node within the time
2✔
2092
                // range.
2✔
2093
                //
2✔
2094
                //nolint:ll
2✔
2095
                for indexKey, _ := updateCursor.Seek(startTimeBytes[:]); indexKey != nil &&
2✔
2096
                        bytes.Compare(indexKey, endTimeBytes[:]) <= 0; indexKey, _ = updateCursor.Next() {
4✔
2097
                        nodePub := indexKey[8:]
2✔
2098
                        node, err := fetchLightningNode(nodes, nodePub)
2✔
2099
                        if err != nil {
2✔
UNCOV
2100
                                return err
×
UNCOV
2101
                        }
×
2102

2103
                        nodesInHorizon = append(nodesInHorizon, node)
2✔
2104
                }
2105

2106
                return nil
2✔
2107
        }, func() {
2✔
2108
                nodesInHorizon = nil
2✔
2109
        })
2✔
2110
        switch {
2✔
UNCOV
2111
        case errors.Is(err, ErrGraphNoEdgesFound):
×
UNCOV
2112
                fallthrough
×
UNCOV
2113
        case errors.Is(err, ErrGraphNodesNotFound):
×
2114
                break
×
2115

2116
        case err != nil:
×
2117
                return nil, err
×
2118
        }
2119

2120
        return nodesInHorizon, nil
2✔
2121
}
2122

2123
// FilterKnownChanIDs takes a set of channel IDs and return the subset of chan
2124
// ID's that we don't know and are not known zombies of the passed set. In other
2125
// words, we perform a set difference of our set of chan ID's and the ones
2126
// passed in. This method can be used by callers to determine the set of
2127
// channels another peer knows of that we don't. The ChannelUpdateInfos for the
2128
// known zombies is also returned.
2129
func (c *KVStore) FilterKnownChanIDs(chansInfo []ChannelUpdateInfo) ([]uint64,
2130
        []ChannelUpdateInfo, error) {
2✔
2131

2✔
2132
        var (
2✔
2133
                newChanIDs   []uint64
2✔
2134
                knownZombies []ChannelUpdateInfo
2✔
2135
        )
2✔
2136

2✔
2137
        c.cacheMu.Lock()
2✔
2138
        defer c.cacheMu.Unlock()
2✔
2139

2✔
2140
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
4✔
2141
                edges := tx.ReadBucket(edgeBucket)
2✔
2142
                if edges == nil {
2✔
UNCOV
2143
                        return ErrGraphNoEdgesFound
×
UNCOV
2144
                }
×
2145
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
2✔
2146
                if edgeIndex == nil {
2✔
2147
                        return ErrGraphNoEdgesFound
×
UNCOV
2148
                }
×
2149

2150
                // Fetch the zombie index, it may not exist if no edges have
2151
                // ever been marked as zombies. If the index has been
2152
                // initialized, we will use it later to skip known zombie edges.
2153
                zombieIndex := edges.NestedReadBucket(zombieBucket)
2✔
2154

2✔
2155
                // We'll run through the set of chanIDs and collate only the
2✔
2156
                // set of channel that are unable to be found within our db.
2✔
2157
                var cidBytes [8]byte
2✔
2158
                for _, info := range chansInfo {
4✔
2159
                        scid := info.ShortChannelID.ToUint64()
2✔
2160
                        byteOrder.PutUint64(cidBytes[:], scid)
2✔
2161

2✔
2162
                        // If the edge is already known, skip it.
2✔
2163
                        if v := edgeIndex.Get(cidBytes[:]); v != nil {
4✔
2164
                                continue
2✔
2165
                        }
2166

2167
                        // If the edge is a known zombie, skip it.
2168
                        if zombieIndex != nil {
4✔
2169
                                isZombie, _, _ := isZombieEdge(
2✔
2170
                                        zombieIndex, scid,
2✔
2171
                                )
2✔
2172

2✔
2173
                                if isZombie {
2✔
UNCOV
2174
                                        knownZombies = append(
×
UNCOV
2175
                                                knownZombies, info,
×
UNCOV
2176
                                        )
×
2177

×
2178
                                        continue
×
2179
                                }
2180
                        }
2181

2182
                        newChanIDs = append(newChanIDs, scid)
2✔
2183
                }
2184

2185
                return nil
2✔
2186
        }, func() {
2✔
2187
                newChanIDs = nil
2✔
2188
                knownZombies = nil
2✔
2189
        })
2✔
2190
        switch {
2✔
2191
        // If we don't know of any edges yet, then we'll return the entire set
2192
        // of chan IDs specified.
UNCOV
2193
        case errors.Is(err, ErrGraphNoEdgesFound):
×
UNCOV
2194
                ogChanIDs := make([]uint64, len(chansInfo))
×
UNCOV
2195
                for i, info := range chansInfo {
×
2196
                        ogChanIDs[i] = info.ShortChannelID.ToUint64()
×
2197
                }
×
2198

2199
                return ogChanIDs, nil, nil
×
2200

UNCOV
2201
        case err != nil:
×
2202
                return nil, nil, err
×
2203
        }
2204

2205
        return newChanIDs, knownZombies, nil
2✔
2206
}
2207

2208
// ChannelUpdateInfo couples the SCID of a channel with the timestamps of the
2209
// latest received channel updates for the channel.
2210
type ChannelUpdateInfo struct {
2211
        // ShortChannelID is the SCID identifier of the channel.
2212
        ShortChannelID lnwire.ShortChannelID
2213

2214
        // Node1UpdateTimestamp is the timestamp of the latest received update
2215
        // from the node 1 channel peer. This will be set to zero time if no
2216
        // update has yet been received from this node.
2217
        Node1UpdateTimestamp time.Time
2218

2219
        // Node2UpdateTimestamp is the timestamp of the latest received update
2220
        // from the node 2 channel peer. This will be set to zero time if no
2221
        // update has yet been received from this node.
2222
        Node2UpdateTimestamp time.Time
2223
}
2224

2225
// NewChannelUpdateInfo is a constructor which makes sure we initialize the
2226
// timestamps with zero seconds unix timestamp which equals
2227
// `January 1, 1970, 00:00:00 UTC` in case the value is `time.Time{}`.
2228
func NewChannelUpdateInfo(scid lnwire.ShortChannelID, node1Timestamp,
2229
        node2Timestamp time.Time) ChannelUpdateInfo {
2✔
2230

2✔
2231
        chanInfo := ChannelUpdateInfo{
2✔
2232
                ShortChannelID:       scid,
2✔
2233
                Node1UpdateTimestamp: node1Timestamp,
2✔
2234
                Node2UpdateTimestamp: node2Timestamp,
2✔
2235
        }
2✔
2236

2✔
2237
        if node1Timestamp.IsZero() {
4✔
2238
                chanInfo.Node1UpdateTimestamp = time.Unix(0, 0)
2✔
2239
        }
2✔
2240

2241
        if node2Timestamp.IsZero() {
4✔
2242
                chanInfo.Node2UpdateTimestamp = time.Unix(0, 0)
2✔
2243
        }
2✔
2244

2245
        return chanInfo
2✔
2246
}
2247

2248
// BlockChannelRange represents a range of channels for a given block height.
2249
type BlockChannelRange struct {
2250
        // Height is the height of the block all of the channels below were
2251
        // included in.
2252
        Height uint32
2253

2254
        // Channels is the list of channels identified by their short ID
2255
        // representation known to us that were included in the block height
2256
        // above. The list may include channel update timestamp information if
2257
        // requested.
2258
        Channels []ChannelUpdateInfo
2259
}
2260

2261
// FilterChannelRange returns the channel ID's of all known channels which were
2262
// mined in a block height within the passed range. The channel IDs are grouped
2263
// by their common block height. This method can be used to quickly share with a
2264
// peer the set of channels we know of within a particular range to catch them
2265
// up after a period of time offline. If withTimestamps is true then the
2266
// timestamp info of the latest received channel update messages of the channel
2267
// will be included in the response.
2268
func (c *KVStore) FilterChannelRange(startHeight,
2269
        endHeight uint32, withTimestamps bool) ([]BlockChannelRange, error) {
2✔
2270

2✔
2271
        startChanID := &lnwire.ShortChannelID{
2✔
2272
                BlockHeight: startHeight,
2✔
2273
        }
2✔
2274

2✔
2275
        endChanID := lnwire.ShortChannelID{
2✔
2276
                BlockHeight: endHeight,
2✔
2277
                TxIndex:     math.MaxUint32 & 0x00ffffff,
2✔
2278
                TxPosition:  math.MaxUint16,
2✔
2279
        }
2✔
2280

2✔
2281
        // As we need to perform a range scan, we'll convert the starting and
2✔
2282
        // ending height to their corresponding values when encoded using short
2✔
2283
        // channel ID's.
2✔
2284
        var chanIDStart, chanIDEnd [8]byte
2✔
2285
        byteOrder.PutUint64(chanIDStart[:], startChanID.ToUint64())
2✔
2286
        byteOrder.PutUint64(chanIDEnd[:], endChanID.ToUint64())
2✔
2287

2✔
2288
        var channelsPerBlock map[uint32][]ChannelUpdateInfo
2✔
2289
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
4✔
2290
                edges := tx.ReadBucket(edgeBucket)
2✔
2291
                if edges == nil {
2✔
UNCOV
2292
                        return ErrGraphNoEdgesFound
×
UNCOV
2293
                }
×
2294
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
2✔
2295
                if edgeIndex == nil {
2✔
2296
                        return ErrGraphNoEdgesFound
×
UNCOV
2297
                }
×
2298

2299
                cursor := edgeIndex.ReadCursor()
2✔
2300

2✔
2301
                // We'll now iterate through the database, and find each
2✔
2302
                // channel ID that resides within the specified range.
2✔
2303
                //
2✔
2304
                //nolint:ll
2✔
2305
                for k, v := cursor.Seek(chanIDStart[:]); k != nil &&
2✔
2306
                        bytes.Compare(k, chanIDEnd[:]) <= 0; k, v = cursor.Next() {
4✔
2307
                        // Don't send alias SCIDs during gossip sync.
2✔
2308
                        edgeReader := bytes.NewReader(v)
2✔
2309
                        edgeInfo, err := deserializeChanEdgeInfo(edgeReader)
2✔
2310
                        if err != nil {
2✔
UNCOV
2311
                                return err
×
UNCOV
2312
                        }
×
2313

2314
                        if edgeInfo.AuthProof == nil {
4✔
2315
                                continue
2✔
2316
                        }
2317

2318
                        // This channel ID rests within the target range, so
2319
                        // we'll add it to our returned set.
2320
                        rawCid := byteOrder.Uint64(k)
2✔
2321
                        cid := lnwire.NewShortChanIDFromInt(rawCid)
2✔
2322

2✔
2323
                        chanInfo := NewChannelUpdateInfo(
2✔
2324
                                cid, time.Time{}, time.Time{},
2✔
2325
                        )
2✔
2326

2✔
2327
                        if !withTimestamps {
2✔
UNCOV
2328
                                channelsPerBlock[cid.BlockHeight] = append(
×
UNCOV
2329
                                        channelsPerBlock[cid.BlockHeight],
×
UNCOV
2330
                                        chanInfo,
×
2331
                                )
×
2332

×
2333
                                continue
×
2334
                        }
2335

2336
                        node1Key, node2Key := computeEdgePolicyKeys(&edgeInfo)
2✔
2337

2✔
2338
                        rawPolicy := edges.Get(node1Key)
2✔
2339
                        if len(rawPolicy) != 0 {
4✔
2340
                                r := bytes.NewReader(rawPolicy)
2✔
2341

2✔
2342
                                edge, err := deserializeChanEdgePolicyRaw(r)
2✔
2343
                                if err != nil && !errors.Is(
2✔
2344
                                        err, ErrEdgePolicyOptionalFieldNotFound,
2✔
2345
                                ) && !errors.Is(err, ErrParsingExtraTLVBytes) {
2✔
UNCOV
2346
                                        return err
×
UNCOV
2347
                                }
×
2348

2349
                                chanInfo.Node1UpdateTimestamp = edge.LastUpdate
2✔
2350
                        }
2351

2352
                        rawPolicy = edges.Get(node2Key)
2✔
2353
                        if len(rawPolicy) != 0 {
4✔
2354
                                r := bytes.NewReader(rawPolicy)
2✔
2355

2✔
2356
                                edge, err := deserializeChanEdgePolicyRaw(r)
2✔
2357
                                if err != nil && !errors.Is(
2✔
2358
                                        err, ErrEdgePolicyOptionalFieldNotFound,
2✔
2359
                                ) && !errors.Is(err, ErrParsingExtraTLVBytes) {
2✔
UNCOV
2360
                                        return err
×
UNCOV
2361
                                }
×
2362

2363
                                chanInfo.Node2UpdateTimestamp = edge.LastUpdate
2✔
2364
                        }
2365

2366
                        channelsPerBlock[cid.BlockHeight] = append(
2✔
2367
                                channelsPerBlock[cid.BlockHeight], chanInfo,
2✔
2368
                        )
2✔
2369
                }
2370

2371
                return nil
2✔
2372
        }, func() {
2✔
2373
                channelsPerBlock = make(map[uint32][]ChannelUpdateInfo)
2✔
2374
        })
2✔
2375

2376
        switch {
2✔
2377
        // If we don't know of any channels yet, then there's nothing to
2378
        // filter, so we'll return an empty slice.
2379
        case errors.Is(err, ErrGraphNoEdgesFound) || len(channelsPerBlock) == 0:
2✔
2380
                return nil, nil
2✔
2381

UNCOV
2382
        case err != nil:
×
UNCOV
2383
                return nil, err
×
2384
        }
2385

2386
        // Return the channel ranges in ascending block height order.
2387
        blocks := make([]uint32, 0, len(channelsPerBlock))
2✔
2388
        for block := range channelsPerBlock {
4✔
2389
                blocks = append(blocks, block)
2✔
2390
        }
2✔
2391
        sort.Slice(blocks, func(i, j int) bool {
4✔
2392
                return blocks[i] < blocks[j]
2✔
2393
        })
2✔
2394

2395
        channelRanges := make([]BlockChannelRange, 0, len(channelsPerBlock))
2✔
2396
        for _, block := range blocks {
4✔
2397
                channelRanges = append(channelRanges, BlockChannelRange{
2✔
2398
                        Height:   block,
2✔
2399
                        Channels: channelsPerBlock[block],
2✔
2400
                })
2✔
2401
        }
2✔
2402

2403
        return channelRanges, nil
2✔
2404
}
2405

2406
// FetchChanInfos returns the set of channel edges that correspond to the passed
2407
// channel ID's. If an edge is the query is unknown to the database, it will
2408
// skipped and the result will contain only those edges that exist at the time
2409
// of the query. This can be used to respond to peer queries that are seeking to
2410
// fill in gaps in their view of the channel graph.
2411
func (c *KVStore) FetchChanInfos(chanIDs []uint64) ([]ChannelEdge, error) {
2✔
2412
        return c.fetchChanInfos(nil, chanIDs)
2✔
2413
}
2✔
2414

2415
// fetchChanInfos returns the set of channel edges that correspond to the passed
2416
// channel ID's. If an edge is the query is unknown to the database, it will
2417
// skipped and the result will contain only those edges that exist at the time
2418
// of the query. This can be used to respond to peer queries that are seeking to
2419
// fill in gaps in their view of the channel graph.
2420
//
2421
// NOTE: An optional transaction may be provided. If none is provided, then a
2422
// new one will be created.
2423
func (c *KVStore) fetchChanInfos(tx kvdb.RTx, chanIDs []uint64) (
2424
        []ChannelEdge, error) {
2✔
2425
        // TODO(roasbeef): sort cids?
2✔
2426

2✔
2427
        var (
2✔
2428
                chanEdges []ChannelEdge
2✔
2429
                cidBytes  [8]byte
2✔
2430
        )
2✔
2431

2✔
2432
        fetchChanInfos := func(tx kvdb.RTx) error {
4✔
2433
                edges := tx.ReadBucket(edgeBucket)
2✔
2434
                if edges == nil {
2✔
UNCOV
2435
                        return ErrGraphNoEdgesFound
×
UNCOV
2436
                }
×
2437
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
2✔
2438
                if edgeIndex == nil {
2✔
UNCOV
2439
                        return ErrGraphNoEdgesFound
×
2440
                }
×
2441
                nodes := tx.ReadBucket(nodeBucket)
2✔
2442
                if nodes == nil {
2✔
UNCOV
2443
                        return ErrGraphNotFound
×
2444
                }
×
2445

2446
                for _, cid := range chanIDs {
4✔
2447
                        byteOrder.PutUint64(cidBytes[:], cid)
2✔
2448

2✔
2449
                        // First, we'll fetch the static edge information. If
2✔
2450
                        // the edge is unknown, we will skip the edge and
2✔
2451
                        // continue gathering all known edges.
2✔
2452
                        edgeInfo, err := fetchChanEdgeInfo(
2✔
2453
                                edgeIndex, cidBytes[:],
2✔
2454
                        )
2✔
2455
                        switch {
2✔
UNCOV
2456
                        case errors.Is(err, ErrEdgeNotFound):
×
UNCOV
2457
                                continue
×
UNCOV
2458
                        case err != nil:
×
UNCOV
2459
                                return err
×
2460
                        }
2461

2462
                        // With the static information obtained, we'll now
2463
                        // fetch the dynamic policy info.
2464
                        edge1, edge2, err := fetchChanEdgePolicies(
2✔
2465
                                edgeIndex, edges, cidBytes[:],
2✔
2466
                        )
2✔
2467
                        if err != nil {
2✔
UNCOV
2468
                                return err
×
UNCOV
2469
                        }
×
2470

2471
                        node1, err := fetchLightningNode(
2✔
2472
                                nodes, edgeInfo.NodeKey1Bytes[:],
2✔
2473
                        )
2✔
2474
                        if err != nil {
2✔
UNCOV
2475
                                return err
×
UNCOV
2476
                        }
×
2477

2478
                        node2, err := fetchLightningNode(
2✔
2479
                                nodes, edgeInfo.NodeKey2Bytes[:],
2✔
2480
                        )
2✔
2481
                        if err != nil {
2✔
UNCOV
2482
                                return err
×
UNCOV
2483
                        }
×
2484

2485
                        chanEdges = append(chanEdges, ChannelEdge{
2✔
2486
                                Info:    &edgeInfo,
2✔
2487
                                Policy1: edge1,
2✔
2488
                                Policy2: edge2,
2✔
2489
                                Node1:   &node1,
2✔
2490
                                Node2:   &node2,
2✔
2491
                        })
2✔
2492
                }
2493

2494
                return nil
2✔
2495
        }
2496

2497
        if tx == nil {
4✔
2498
                err := kvdb.View(c.db, fetchChanInfos, func() {
4✔
2499
                        chanEdges = nil
2✔
2500
                })
2✔
2501
                if err != nil {
2✔
UNCOV
2502
                        return nil, err
×
UNCOV
2503
                }
×
2504

2505
                return chanEdges, nil
2✔
2506
        }
2507

2508
        err := fetchChanInfos(tx)
×
UNCOV
2509
        if err != nil {
×
UNCOV
2510
                return nil, err
×
UNCOV
2511
        }
×
2512

2513
        return chanEdges, nil
×
2514
}
2515

2516
func delEdgeUpdateIndexEntry(edgesBucket kvdb.RwBucket, chanID uint64,
2517
        edge1, edge2 *models.ChannelEdgePolicy) error {
2✔
2518

2✔
2519
        // First, we'll fetch the edge update index bucket which currently
2✔
2520
        // stores an entry for the channel we're about to delete.
2✔
2521
        updateIndex := edgesBucket.NestedReadWriteBucket(edgeUpdateIndexBucket)
2✔
2522
        if updateIndex == nil {
2✔
UNCOV
2523
                // No edges in bucket, return early.
×
UNCOV
2524
                return nil
×
UNCOV
2525
        }
×
2526

2527
        // Now that we have the bucket, we'll attempt to construct a template
2528
        // for the index key: updateTime || chanid.
2529
        var indexKey [8 + 8]byte
2✔
2530
        byteOrder.PutUint64(indexKey[8:], chanID)
2✔
2531

2✔
2532
        // With the template constructed, we'll attempt to delete an entry that
2✔
2533
        // would have been created by both edges: we'll alternate the update
2✔
2534
        // times, as one may had overridden the other.
2✔
2535
        if edge1 != nil {
4✔
2536
                byteOrder.PutUint64(
2✔
2537
                        indexKey[:8], uint64(edge1.LastUpdate.Unix()),
2✔
2538
                )
2✔
2539
                if err := updateIndex.Delete(indexKey[:]); err != nil {
2✔
UNCOV
2540
                        return err
×
UNCOV
2541
                }
×
2542
        }
2543

2544
        // We'll also attempt to delete the entry that may have been created by
2545
        // the second edge.
2546
        if edge2 != nil {
4✔
2547
                byteOrder.PutUint64(
2✔
2548
                        indexKey[:8], uint64(edge2.LastUpdate.Unix()),
2✔
2549
                )
2✔
2550
                if err := updateIndex.Delete(indexKey[:]); err != nil {
2✔
UNCOV
2551
                        return err
×
UNCOV
2552
                }
×
2553
        }
2554

2555
        return nil
2✔
2556
}
2557

2558
// delChannelEdgeUnsafe deletes the edge with the given chanID from the graph
2559
// cache. It then goes on to delete any policy info and edge info for this
2560
// channel from the DB and finally, if isZombie is true, it will add an entry
2561
// for this channel in the zombie index.
2562
//
2563
// NOTE: this method MUST only be called if the cacheMu has already been
2564
// acquired.
2565
func (c *KVStore) delChannelEdgeUnsafe(edges, edgeIndex, chanIndex,
2566
        zombieIndex kvdb.RwBucket, chanID []byte, isZombie,
2567
        strictZombie bool) (*models.ChannelEdgeInfo, error) {
2✔
2568

2✔
2569
        edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
2✔
2570
        if err != nil {
2✔
UNCOV
2571
                return nil, err
×
UNCOV
2572
        }
×
2573

2574
        // We'll also remove the entry in the edge update index bucket before
2575
        // we delete the edges themselves so we can access their last update
2576
        // times.
2577
        cid := byteOrder.Uint64(chanID)
2✔
2578
        edge1, edge2, err := fetchChanEdgePolicies(edgeIndex, edges, chanID)
2✔
2579
        if err != nil {
2✔
UNCOV
2580
                return nil, err
×
UNCOV
2581
        }
×
2582
        err = delEdgeUpdateIndexEntry(edges, cid, edge1, edge2)
2✔
2583
        if err != nil {
2✔
UNCOV
2584
                return nil, err
×
2585
        }
×
2586

2587
        // The edge key is of the format pubKey || chanID. First we construct
2588
        // the latter half, populating the channel ID.
2589
        var edgeKey [33 + 8]byte
2✔
2590
        copy(edgeKey[33:], chanID)
2✔
2591

2✔
2592
        // With the latter half constructed, copy over the first public key to
2✔
2593
        // delete the edge in this direction, then the second to delete the
2✔
2594
        // edge in the opposite direction.
2✔
2595
        copy(edgeKey[:33], edgeInfo.NodeKey1Bytes[:])
2✔
2596
        if edges.Get(edgeKey[:]) != nil {
4✔
2597
                if err := edges.Delete(edgeKey[:]); err != nil {
2✔
UNCOV
2598
                        return nil, err
×
UNCOV
2599
                }
×
2600
        }
2601
        copy(edgeKey[:33], edgeInfo.NodeKey2Bytes[:])
2✔
2602
        if edges.Get(edgeKey[:]) != nil {
4✔
2603
                if err := edges.Delete(edgeKey[:]); err != nil {
2✔
2604
                        return nil, err
×
UNCOV
2605
                }
×
2606
        }
2607

2608
        // As part of deleting the edge we also remove all disabled entries
2609
        // from the edgePolicyDisabledIndex bucket. We do that for both
2610
        // directions.
2611
        err = updateEdgePolicyDisabledIndex(edges, cid, false, false)
2✔
2612
        if err != nil {
2✔
UNCOV
2613
                return nil, err
×
UNCOV
2614
        }
×
2615
        err = updateEdgePolicyDisabledIndex(edges, cid, true, false)
2✔
2616
        if err != nil {
2✔
UNCOV
2617
                return nil, err
×
2618
        }
×
2619

2620
        // With the edge data deleted, we can purge the information from the two
2621
        // edge indexes.
2622
        if err := edgeIndex.Delete(chanID); err != nil {
2✔
2623
                return nil, err
×
UNCOV
2624
        }
×
2625
        var b bytes.Buffer
2✔
2626
        if err := WriteOutpoint(&b, &edgeInfo.ChannelPoint); err != nil {
2✔
UNCOV
2627
                return nil, err
×
2628
        }
×
2629
        if err := chanIndex.Delete(b.Bytes()); err != nil {
2✔
UNCOV
2630
                return nil, err
×
UNCOV
2631
        }
×
2632

2633
        // Finally, we'll mark the edge as a zombie within our index if it's
2634
        // being removed due to the channel becoming a zombie. We do this to
2635
        // ensure we don't store unnecessary data for spent channels.
2636
        if !isZombie {
4✔
2637
                return &edgeInfo, nil
2✔
2638
        }
2✔
2639

2640
        nodeKey1, nodeKey2 := edgeInfo.NodeKey1Bytes, edgeInfo.NodeKey2Bytes
2✔
2641
        if strictZombie {
2✔
UNCOV
2642
                nodeKey1, nodeKey2 = makeZombiePubkeys(&edgeInfo, edge1, edge2)
×
UNCOV
2643
        }
×
2644

2645
        return &edgeInfo, markEdgeZombie(
2✔
2646
                zombieIndex, byteOrder.Uint64(chanID), nodeKey1, nodeKey2,
2✔
2647
        )
2✔
2648
}
2649

2650
// makeZombiePubkeys derives the node pubkeys to store in the zombie index for a
2651
// particular pair of channel policies. The return values are one of:
2652
//  1. (pubkey1, pubkey2)
2653
//  2. (pubkey1, blank)
2654
//  3. (blank, pubkey2)
2655
//
2656
// A blank pubkey means that corresponding node will be unable to resurrect a
2657
// channel on its own. For example, node1 may continue to publish recent
2658
// updates, but node2 has fallen way behind. After marking an edge as a zombie,
2659
// we don't want another fresh update from node1 to resurrect, as the edge can
2660
// only become live once node2 finally sends something recent.
2661
//
2662
// In the case where we have neither update, we allow either party to resurrect
2663
// the channel. If the channel were to be marked zombie again, it would be
2664
// marked with the correct lagging channel since we received an update from only
2665
// one side.
2666
func makeZombiePubkeys(info *models.ChannelEdgeInfo,
UNCOV
2667
        e1, e2 *models.ChannelEdgePolicy) ([33]byte, [33]byte) {
×
UNCOV
2668

×
UNCOV
2669
        switch {
×
2670
        // If we don't have either edge policy, we'll return both pubkeys so
2671
        // that the channel can be resurrected by either party.
2672
        case e1 == nil && e2 == nil:
×
2673
                return info.NodeKey1Bytes, info.NodeKey2Bytes
×
2674

2675
        // If we're missing edge1, or if both edges are present but edge1 is
2676
        // older, we'll return edge1's pubkey and a blank pubkey for edge2. This
2677
        // means that only an update from edge1 will be able to resurrect the
2678
        // channel.
UNCOV
2679
        case e1 == nil || (e2 != nil && e1.LastUpdate.Before(e2.LastUpdate)):
×
UNCOV
2680
                return info.NodeKey1Bytes, [33]byte{}
×
2681

2682
        // Otherwise, we're missing edge2 or edge2 is the older side, so we
2683
        // return a blank pubkey for edge1. In this case, only an update from
2684
        // edge2 can resurect the channel.
2685
        default:
×
UNCOV
2686
                return [33]byte{}, info.NodeKey2Bytes
×
2687
        }
2688
}
2689

2690
// UpdateEdgePolicy updates the edge routing policy for a single directed edge
2691
// within the database for the referenced channel. The `flags` attribute within
2692
// the ChannelEdgePolicy determines which of the directed edges are being
2693
// updated. If the flag is 1, then the first node's information is being
2694
// updated, otherwise it's the second node's information. The node ordering is
2695
// determined by the lexicographical ordering of the identity public keys of the
2696
// nodes on either side of the channel.
2697
func (c *KVStore) UpdateEdgePolicy(edge *models.ChannelEdgePolicy,
2698
        opts ...batch.SchedulerOption) (route.Vertex, route.Vertex, error) {
2✔
2699

2✔
2700
        var (
2✔
2701
                ctx          = context.TODO()
2✔
2702
                isUpdate1    bool
2✔
2703
                edgeNotFound bool
2✔
2704
                from, to     route.Vertex
2✔
2705
        )
2✔
2706

2✔
2707
        r := &batch.Request[kvdb.RwTx]{
2✔
2708
                Opts: batch.NewSchedulerOptions(opts...),
2✔
2709
                Reset: func() {
4✔
2710
                        isUpdate1 = false
2✔
2711
                        edgeNotFound = false
2✔
2712
                },
2✔
2713
                Do: func(tx kvdb.RwTx) error {
2✔
2714
                        var err error
2✔
2715
                        from, to, isUpdate1, err = updateEdgePolicy(tx, edge)
2✔
2716
                        if err != nil {
2✔
UNCOV
2717
                                log.Errorf("UpdateEdgePolicy faild: %v", err)
×
UNCOV
2718
                        }
×
2719

2720
                        // Silence ErrEdgeNotFound so that the batch can
2721
                        // succeed, but propagate the error via local state.
2722
                        if errors.Is(err, ErrEdgeNotFound) {
2✔
2723
                                edgeNotFound = true
×
UNCOV
2724
                                return nil
×
UNCOV
2725
                        }
×
2726

2727
                        return err
2✔
2728
                },
2729
                OnCommit: func(err error) error {
2✔
2730
                        switch {
2✔
UNCOV
2731
                        case err != nil:
×
UNCOV
2732
                                return err
×
UNCOV
2733
                        case edgeNotFound:
×
UNCOV
2734
                                return ErrEdgeNotFound
×
2735
                        default:
2✔
2736
                                c.updateEdgeCache(edge, isUpdate1)
2✔
2737
                                return nil
2✔
2738
                        }
2739
                },
2740
        }
2741

2742
        err := c.chanScheduler.Execute(ctx, r)
2✔
2743

2✔
2744
        return from, to, err
2✔
2745
}
2746

2747
func (c *KVStore) updateEdgeCache(e *models.ChannelEdgePolicy,
2748
        isUpdate1 bool) {
2✔
2749

2✔
2750
        // If an entry for this channel is found in reject cache, we'll modify
2✔
2751
        // the entry with the updated timestamp for the direction that was just
2✔
2752
        // written. If the edge doesn't exist, we'll load the cache entry lazily
2✔
2753
        // during the next query for this edge.
2✔
2754
        if entry, ok := c.rejectCache.get(e.ChannelID); ok {
4✔
2755
                if isUpdate1 {
4✔
2756
                        entry.upd1Time = e.LastUpdate.Unix()
2✔
2757
                } else {
4✔
2758
                        entry.upd2Time = e.LastUpdate.Unix()
2✔
2759
                }
2✔
2760
                c.rejectCache.insert(e.ChannelID, entry)
2✔
2761
        }
2762

2763
        // If an entry for this channel is found in channel cache, we'll modify
2764
        // the entry with the updated policy for the direction that was just
2765
        // written. If the edge doesn't exist, we'll defer loading the info and
2766
        // policies and lazily read from disk during the next query.
2767
        if channel, ok := c.chanCache.get(e.ChannelID); ok {
4✔
2768
                if isUpdate1 {
4✔
2769
                        channel.Policy1 = e
2✔
2770
                } else {
4✔
2771
                        channel.Policy2 = e
2✔
2772
                }
2✔
2773
                c.chanCache.insert(e.ChannelID, channel)
2✔
2774
        }
2775
}
2776

2777
// updateEdgePolicy attempts to update an edge's policy within the relevant
2778
// buckets using an existing database transaction. The returned boolean will be
2779
// true if the updated policy belongs to node1, and false if the policy belonged
2780
// to node2.
2781
func updateEdgePolicy(tx kvdb.RwTx, edge *models.ChannelEdgePolicy) (
2782
        route.Vertex, route.Vertex, bool, error) {
2✔
2783

2✔
2784
        var noVertex route.Vertex
2✔
2785

2✔
2786
        edges := tx.ReadWriteBucket(edgeBucket)
2✔
2787
        if edges == nil {
2✔
UNCOV
2788
                return noVertex, noVertex, false, ErrEdgeNotFound
×
UNCOV
2789
        }
×
2790
        edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
2✔
2791
        if edgeIndex == nil {
2✔
UNCOV
2792
                return noVertex, noVertex, false, ErrEdgeNotFound
×
2793
        }
×
2794

2795
        // Create the channelID key be converting the channel ID
2796
        // integer into a byte slice.
2797
        var chanID [8]byte
2✔
2798
        byteOrder.PutUint64(chanID[:], edge.ChannelID)
2✔
2799

2✔
2800
        // With the channel ID, we then fetch the value storing the two
2✔
2801
        // nodes which connect this channel edge.
2✔
2802
        nodeInfo := edgeIndex.Get(chanID[:])
2✔
2803
        if nodeInfo == nil {
2✔
UNCOV
2804
                return noVertex, noVertex, false, ErrEdgeNotFound
×
UNCOV
2805
        }
×
2806

2807
        // Depending on the flags value passed above, either the first
2808
        // or second edge policy is being updated.
2809
        var fromNode, toNode []byte
2✔
2810
        var isUpdate1 bool
2✔
2811
        if edge.ChannelFlags&lnwire.ChanUpdateDirection == 0 {
4✔
2812
                fromNode = nodeInfo[:33]
2✔
2813
                toNode = nodeInfo[33:66]
2✔
2814
                isUpdate1 = true
2✔
2815
        } else {
4✔
2816
                fromNode = nodeInfo[33:66]
2✔
2817
                toNode = nodeInfo[:33]
2✔
2818
                isUpdate1 = false
2✔
2819
        }
2✔
2820

2821
        // Finally, with the direction of the edge being updated
2822
        // identified, we update the on-disk edge representation.
2823
        err := putChanEdgePolicy(edges, edge, fromNode, toNode)
2✔
2824
        if err != nil {
2✔
UNCOV
2825
                return noVertex, noVertex, false, err
×
UNCOV
2826
        }
×
2827

2828
        var (
2✔
2829
                fromNodePubKey route.Vertex
2✔
2830
                toNodePubKey   route.Vertex
2✔
2831
        )
2✔
2832
        copy(fromNodePubKey[:], fromNode)
2✔
2833
        copy(toNodePubKey[:], toNode)
2✔
2834

2✔
2835
        return fromNodePubKey, toNodePubKey, isUpdate1, nil
2✔
2836
}
2837

2838
// isPublic determines whether the node is seen as public within the graph from
2839
// the source node's point of view. An existing database transaction can also be
2840
// specified.
2841
func (c *KVStore) isPublic(tx kvdb.RTx, nodePub route.Vertex,
2842
        sourcePubKey []byte) (bool, error) {
2✔
2843

2✔
2844
        // In order to determine whether this node is publicly advertised within
2✔
2845
        // the graph, we'll need to look at all of its edges and check whether
2✔
2846
        // they extend to any other node than the source node. errDone will be
2✔
2847
        // used to terminate the check early.
2✔
2848
        nodeIsPublic := false
2✔
2849
        errDone := errors.New("done")
2✔
2850
        err := c.forEachNodeChannelTx(tx, nodePub, func(tx kvdb.RTx,
2✔
2851
                info *models.ChannelEdgeInfo, _ *models.ChannelEdgePolicy,
2✔
2852
                _ *models.ChannelEdgePolicy) error {
4✔
2853

2✔
2854
                // If this edge doesn't extend to the source node, we'll
2✔
2855
                // terminate our search as we can now conclude that the node is
2✔
2856
                // publicly advertised within the graph due to the local node
2✔
2857
                // knowing of the current edge.
2✔
2858
                if !bytes.Equal(info.NodeKey1Bytes[:], sourcePubKey) &&
2✔
2859
                        !bytes.Equal(info.NodeKey2Bytes[:], sourcePubKey) {
4✔
2860

2✔
2861
                        nodeIsPublic = true
2✔
2862
                        return errDone
2✔
2863
                }
2✔
2864

2865
                // Since the edge _does_ extend to the source node, we'll also
2866
                // need to ensure that this is a public edge.
2867
                if info.AuthProof != nil {
4✔
2868
                        nodeIsPublic = true
2✔
2869
                        return errDone
2✔
2870
                }
2✔
2871

2872
                // Otherwise, we'll continue our search.
2873
                return nil
2✔
2874
        })
2875
        if err != nil && !errors.Is(err, errDone) {
2✔
UNCOV
2876
                return false, err
×
UNCOV
2877
        }
×
2878

2879
        return nodeIsPublic, nil
2✔
2880
}
2881

2882
// FetchLightningNodeTx attempts to look up a target node by its identity
2883
// public key. If the node isn't found in the database, then
2884
// ErrGraphNodeNotFound is returned. An optional transaction may be provided.
2885
// If none is provided, then a new one will be created.
2886
func (c *KVStore) FetchLightningNodeTx(tx kvdb.RTx, nodePub route.Vertex) (
2887
        *models.LightningNode, error) {
2✔
2888

2✔
2889
        return c.fetchLightningNode(tx, nodePub)
2✔
2890
}
2✔
2891

2892
// FetchLightningNode attempts to look up a target node by its identity public
2893
// key. If the node isn't found in the database, then ErrGraphNodeNotFound is
2894
// returned.
2895
func (c *KVStore) FetchLightningNode(nodePub route.Vertex) (
2896
        *models.LightningNode, error) {
2✔
2897

2✔
2898
        return c.fetchLightningNode(nil, nodePub)
2✔
2899
}
2✔
2900

2901
// fetchLightningNode attempts to look up a target node by its identity public
2902
// key. If the node isn't found in the database, then ErrGraphNodeNotFound is
2903
// returned. An optional transaction may be provided. If none is provided, then
2904
// a new one will be created.
2905
func (c *KVStore) fetchLightningNode(tx kvdb.RTx,
2906
        nodePub route.Vertex) (*models.LightningNode, error) {
2✔
2907

2✔
2908
        var node *models.LightningNode
2✔
2909
        fetch := func(tx kvdb.RTx) error {
4✔
2910
                // First grab the nodes bucket which stores the mapping from
2✔
2911
                // pubKey to node information.
2✔
2912
                nodes := tx.ReadBucket(nodeBucket)
2✔
2913
                if nodes == nil {
2✔
UNCOV
2914
                        return ErrGraphNotFound
×
UNCOV
2915
                }
×
2916

2917
                // If a key for this serialized public key isn't found, then
2918
                // the target node doesn't exist within the database.
2919
                nodeBytes := nodes.Get(nodePub[:])
2✔
2920
                if nodeBytes == nil {
4✔
2921
                        return ErrGraphNodeNotFound
2✔
2922
                }
2✔
2923

2924
                // If the node is found, then we can de deserialize the node
2925
                // information to return to the user.
2926
                nodeReader := bytes.NewReader(nodeBytes)
2✔
2927
                n, err := deserializeLightningNode(nodeReader)
2✔
2928
                if err != nil {
2✔
UNCOV
2929
                        return err
×
UNCOV
2930
                }
×
2931

2932
                node = &n
2✔
2933

2✔
2934
                return nil
2✔
2935
        }
2936

2937
        if tx == nil {
4✔
2938
                err := kvdb.View(
2✔
2939
                        c.db, fetch, func() {
4✔
2940
                                node = nil
2✔
2941
                        },
2✔
2942
                )
2943
                if err != nil {
4✔
2944
                        return nil, err
2✔
2945
                }
2✔
2946

2947
                return node, nil
2✔
2948
        }
2949

UNCOV
2950
        err := fetch(tx)
×
UNCOV
2951
        if err != nil {
×
UNCOV
2952
                return nil, err
×
UNCOV
2953
        }
×
2954

2955
        return node, nil
×
2956
}
2957

2958
// HasLightningNode determines if the graph has a vertex identified by the
2959
// target node identity public key. If the node exists in the database, a
2960
// timestamp of when the data for the node was lasted updated is returned along
2961
// with a true boolean. Otherwise, an empty time.Time is returned with a false
2962
// boolean.
2963
func (c *KVStore) HasLightningNode(nodePub [33]byte) (time.Time, bool,
2964
        error) {
2✔
2965

2✔
2966
        var (
2✔
2967
                updateTime time.Time
2✔
2968
                exists     bool
2✔
2969
        )
2✔
2970

2✔
2971
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
4✔
2972
                // First grab the nodes bucket which stores the mapping from
2✔
2973
                // pubKey to node information.
2✔
2974
                nodes := tx.ReadBucket(nodeBucket)
2✔
2975
                if nodes == nil {
2✔
UNCOV
2976
                        return ErrGraphNotFound
×
UNCOV
2977
                }
×
2978

2979
                // If a key for this serialized public key isn't found, we can
2980
                // exit early.
2981
                nodeBytes := nodes.Get(nodePub[:])
2✔
2982
                if nodeBytes == nil {
4✔
2983
                        exists = false
2✔
2984
                        return nil
2✔
2985
                }
2✔
2986

2987
                // Otherwise we continue on to obtain the time stamp
2988
                // representing the last time the data for this node was
2989
                // updated.
2990
                nodeReader := bytes.NewReader(nodeBytes)
2✔
2991
                node, err := deserializeLightningNode(nodeReader)
2✔
2992
                if err != nil {
2✔
UNCOV
2993
                        return err
×
UNCOV
2994
                }
×
2995

2996
                exists = true
2✔
2997
                updateTime = node.LastUpdate
2✔
2998

2✔
2999
                return nil
2✔
3000
        }, func() {
2✔
3001
                updateTime = time.Time{}
2✔
3002
                exists = false
2✔
3003
        })
2✔
3004
        if err != nil {
2✔
UNCOV
3005
                return time.Time{}, exists, err
×
UNCOV
3006
        }
×
3007

3008
        return updateTime, exists, nil
2✔
3009
}
3010

3011
// nodeTraversal is used to traverse all channels of a node given by its
3012
// public key and passes channel information into the specified callback.
3013
func nodeTraversal(tx kvdb.RTx, nodePub []byte, db kvdb.Backend,
3014
        cb func(kvdb.RTx, *models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3015
                *models.ChannelEdgePolicy) error) error {
2✔
3016

2✔
3017
        traversal := func(tx kvdb.RTx) error {
4✔
3018
                edges := tx.ReadBucket(edgeBucket)
2✔
3019
                if edges == nil {
2✔
UNCOV
3020
                        return ErrGraphNotFound
×
UNCOV
3021
                }
×
3022
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
2✔
3023
                if edgeIndex == nil {
2✔
UNCOV
3024
                        return ErrGraphNoEdgesFound
×
3025
                }
×
3026

3027
                // In order to reach all the edges for this node, we take
3028
                // advantage of the construction of the key-space within the
3029
                // edge bucket. The keys are stored in the form: pubKey ||
3030
                // chanID. Therefore, starting from a chanID of zero, we can
3031
                // scan forward in the bucket, grabbing all the edges for the
3032
                // node. Once the prefix no longer matches, then we know we're
3033
                // done.
3034
                var nodeStart [33 + 8]byte
2✔
3035
                copy(nodeStart[:], nodePub)
2✔
3036
                copy(nodeStart[33:], chanStart[:])
2✔
3037

2✔
3038
                // Starting from the key pubKey || 0, we seek forward in the
2✔
3039
                // bucket until the retrieved key no longer has the public key
2✔
3040
                // as its prefix. This indicates that we've stepped over into
2✔
3041
                // another node's edges, so we can terminate our scan.
2✔
3042
                edgeCursor := edges.ReadCursor()
2✔
3043
                for nodeEdge, _ := edgeCursor.Seek(nodeStart[:]); bytes.HasPrefix(nodeEdge, nodePub); nodeEdge, _ = edgeCursor.Next() { //nolint:ll
4✔
3044
                        // If the prefix still matches, the channel id is
2✔
3045
                        // returned in nodeEdge. Channel id is used to lookup
2✔
3046
                        // the node at the other end of the channel and both
2✔
3047
                        // edge policies.
2✔
3048
                        chanID := nodeEdge[33:]
2✔
3049
                        edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
2✔
3050
                        if err != nil {
2✔
UNCOV
3051
                                return err
×
UNCOV
3052
                        }
×
3053

3054
                        outgoingPolicy, err := fetchChanEdgePolicy(
2✔
3055
                                edges, chanID, nodePub,
2✔
3056
                        )
2✔
3057
                        if err != nil {
2✔
UNCOV
3058
                                return err
×
UNCOV
3059
                        }
×
3060

3061
                        otherNode, err := edgeInfo.OtherNodeKeyBytes(nodePub)
2✔
3062
                        if err != nil {
2✔
3063
                                return err
×
3064
                        }
×
3065

3066
                        incomingPolicy, err := fetchChanEdgePolicy(
2✔
3067
                                edges, chanID, otherNode[:],
2✔
3068
                        )
2✔
3069
                        if err != nil {
2✔
UNCOV
3070
                                return err
×
UNCOV
3071
                        }
×
3072

3073
                        // Finally, we execute the callback.
3074
                        err = cb(tx, &edgeInfo, outgoingPolicy, incomingPolicy)
2✔
3075
                        if err != nil {
4✔
3076
                                return err
2✔
3077
                        }
2✔
3078
                }
3079

3080
                return nil
2✔
3081
        }
3082

3083
        // If no transaction was provided, then we'll create a new transaction
3084
        // to execute the transaction within.
3085
        if tx == nil {
4✔
3086
                return kvdb.View(db, traversal, func() {})
4✔
3087
        }
3088

3089
        // Otherwise, we re-use the existing transaction to execute the graph
3090
        // traversal.
3091
        return traversal(tx)
2✔
3092
}
3093

3094
// ForEachNodeChannel iterates through all channels of the given node,
3095
// executing the passed callback with an edge info structure and the policies
3096
// of each end of the channel. The first edge policy is the outgoing edge *to*
3097
// the connecting node, while the second is the incoming edge *from* the
3098
// connecting node. If the callback returns an error, then the iteration is
3099
// halted with the error propagated back up to the caller.
3100
//
3101
// Unknown policies are passed into the callback as nil values.
3102
func (c *KVStore) ForEachNodeChannel(nodePub route.Vertex,
3103
        cb func(*models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3104
                *models.ChannelEdgePolicy) error) error {
2✔
3105

2✔
3106
        return nodeTraversal(nil, nodePub[:], c.db, func(_ kvdb.RTx,
2✔
3107
                info *models.ChannelEdgeInfo, policy,
2✔
3108
                policy2 *models.ChannelEdgePolicy) error {
4✔
3109

2✔
3110
                return cb(info, policy, policy2)
2✔
3111
        })
2✔
3112
}
3113

3114
// ForEachSourceNodeChannel iterates through all channels of the source node,
3115
// executing the passed callback on each. The callback is provided with the
3116
// channel's outpoint, whether we have a policy for the channel and the channel
3117
// peer's node information.
3118
func (c *KVStore) ForEachSourceNodeChannel(cb func(chanPoint wire.OutPoint,
3119
        havePolicy bool, otherNode *models.LightningNode) error) error {
2✔
3120

2✔
3121
        return kvdb.View(c.db, func(tx kvdb.RTx) error {
4✔
3122
                nodes := tx.ReadBucket(nodeBucket)
2✔
3123
                if nodes == nil {
2✔
UNCOV
3124
                        return ErrGraphNotFound
×
UNCOV
3125
                }
×
3126

3127
                node, err := c.sourceNode(nodes)
2✔
3128
                if err != nil {
2✔
3129
                        return err
×
3130
                }
×
3131

3132
                return nodeTraversal(
2✔
3133
                        tx, node.PubKeyBytes[:], c.db, func(tx kvdb.RTx,
2✔
3134
                                info *models.ChannelEdgeInfo,
2✔
3135
                                policy, _ *models.ChannelEdgePolicy) error {
4✔
3136

2✔
3137
                                peer, err := c.fetchOtherNode(
2✔
3138
                                        tx, info, node.PubKeyBytes[:],
2✔
3139
                                )
2✔
3140
                                if err != nil {
2✔
UNCOV
3141
                                        return err
×
UNCOV
3142
                                }
×
3143

3144
                                return cb(
2✔
3145
                                        info.ChannelPoint, policy != nil, peer,
2✔
3146
                                )
2✔
3147
                        },
3148
                )
3149
        }, func() {})
2✔
3150
}
3151

3152
// forEachNodeChannelTx iterates through all channels of the given node,
3153
// executing the passed callback with an edge info structure and the policies
3154
// of each end of the channel. The first edge policy is the outgoing edge *to*
3155
// the connecting node, while the second is the incoming edge *from* the
3156
// connecting node. If the callback returns an error, then the iteration is
3157
// halted with the error propagated back up to the caller.
3158
//
3159
// Unknown policies are passed into the callback as nil values.
3160
//
3161
// If the caller wishes to re-use an existing boltdb transaction, then it
3162
// should be passed as the first argument.  Otherwise, the first argument should
3163
// be nil and a fresh transaction will be created to execute the graph
3164
// traversal.
3165
func (c *KVStore) forEachNodeChannelTx(tx kvdb.RTx,
3166
        nodePub route.Vertex, cb func(kvdb.RTx, *models.ChannelEdgeInfo,
3167
                *models.ChannelEdgePolicy,
3168
                *models.ChannelEdgePolicy) error) error {
2✔
3169

2✔
3170
        return nodeTraversal(tx, nodePub[:], c.db, cb)
2✔
3171
}
2✔
3172

3173
// fetchOtherNode attempts to fetch the full LightningNode that's opposite of
3174
// the target node in the channel. This is useful when one knows the pubkey of
3175
// one of the nodes, and wishes to obtain the full LightningNode for the other
3176
// end of the channel.
3177
func (c *KVStore) fetchOtherNode(tx kvdb.RTx,
3178
        channel *models.ChannelEdgeInfo, thisNodeKey []byte) (
3179
        *models.LightningNode, error) {
2✔
3180

2✔
3181
        // Ensure that the node passed in is actually a member of the channel.
2✔
3182
        var targetNodeBytes [33]byte
2✔
3183
        switch {
2✔
3184
        case bytes.Equal(channel.NodeKey1Bytes[:], thisNodeKey):
2✔
3185
                targetNodeBytes = channel.NodeKey2Bytes
2✔
3186
        case bytes.Equal(channel.NodeKey2Bytes[:], thisNodeKey):
2✔
3187
                targetNodeBytes = channel.NodeKey1Bytes
2✔
UNCOV
3188
        default:
×
UNCOV
3189
                return nil, fmt.Errorf("node not participating in this channel")
×
3190
        }
3191

3192
        var targetNode *models.LightningNode
2✔
3193
        fetchNodeFunc := func(tx kvdb.RTx) error {
4✔
3194
                // First grab the nodes bucket which stores the mapping from
2✔
3195
                // pubKey to node information.
2✔
3196
                nodes := tx.ReadBucket(nodeBucket)
2✔
3197
                if nodes == nil {
2✔
UNCOV
3198
                        return ErrGraphNotFound
×
UNCOV
3199
                }
×
3200

3201
                node, err := fetchLightningNode(nodes, targetNodeBytes[:])
2✔
3202
                if err != nil {
2✔
3203
                        return err
×
3204
                }
×
3205

3206
                targetNode = &node
2✔
3207

2✔
3208
                return nil
2✔
3209
        }
3210

3211
        // If the transaction is nil, then we'll need to create a new one,
3212
        // otherwise we can use the existing db transaction.
3213
        var err error
2✔
3214
        if tx == nil {
2✔
UNCOV
3215
                err = kvdb.View(c.db, fetchNodeFunc, func() {
×
UNCOV
3216
                        targetNode = nil
×
UNCOV
3217
                })
×
3218
        } else {
2✔
3219
                err = fetchNodeFunc(tx)
2✔
3220
        }
2✔
3221

3222
        return targetNode, err
2✔
3223
}
3224

3225
// computeEdgePolicyKeys is a helper function that can be used to compute the
3226
// keys used to index the channel edge policy info for the two nodes of the
3227
// edge. The keys for node 1 and node 2 are returned respectively.
3228
func computeEdgePolicyKeys(info *models.ChannelEdgeInfo) ([]byte, []byte) {
2✔
3229
        var (
2✔
3230
                node1Key [33 + 8]byte
2✔
3231
                node2Key [33 + 8]byte
2✔
3232
        )
2✔
3233

2✔
3234
        copy(node1Key[:], info.NodeKey1Bytes[:])
2✔
3235
        copy(node2Key[:], info.NodeKey2Bytes[:])
2✔
3236

2✔
3237
        byteOrder.PutUint64(node1Key[33:], info.ChannelID)
2✔
3238
        byteOrder.PutUint64(node2Key[33:], info.ChannelID)
2✔
3239

2✔
3240
        return node1Key[:], node2Key[:]
2✔
3241
}
2✔
3242

3243
// FetchChannelEdgesByOutpoint attempts to lookup the two directed edges for
3244
// the channel identified by the funding outpoint. If the channel can't be
3245
// found, then ErrEdgeNotFound is returned. A struct which houses the general
3246
// information for the channel itself is returned as well as two structs that
3247
// contain the routing policies for the channel in either direction.
3248
func (c *KVStore) FetchChannelEdgesByOutpoint(op *wire.OutPoint) (
3249
        *models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3250
        *models.ChannelEdgePolicy, error) {
2✔
3251

2✔
3252
        var (
2✔
3253
                edgeInfo *models.ChannelEdgeInfo
2✔
3254
                policy1  *models.ChannelEdgePolicy
2✔
3255
                policy2  *models.ChannelEdgePolicy
2✔
3256
        )
2✔
3257

2✔
3258
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
4✔
3259
                // First, grab the node bucket. This will be used to populate
2✔
3260
                // the Node pointers in each edge read from disk.
2✔
3261
                nodes := tx.ReadBucket(nodeBucket)
2✔
3262
                if nodes == nil {
2✔
UNCOV
3263
                        return ErrGraphNotFound
×
UNCOV
3264
                }
×
3265

3266
                // Next, grab the edge bucket which stores the edges, and also
3267
                // the index itself so we can group the directed edges together
3268
                // logically.
3269
                edges := tx.ReadBucket(edgeBucket)
2✔
3270
                if edges == nil {
2✔
UNCOV
3271
                        return ErrGraphNoEdgesFound
×
UNCOV
3272
                }
×
3273
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
2✔
3274
                if edgeIndex == nil {
2✔
UNCOV
3275
                        return ErrGraphNoEdgesFound
×
3276
                }
×
3277

3278
                // If the channel's outpoint doesn't exist within the outpoint
3279
                // index, then the edge does not exist.
3280
                chanIndex := edges.NestedReadBucket(channelPointBucket)
2✔
3281
                if chanIndex == nil {
2✔
UNCOV
3282
                        return ErrGraphNoEdgesFound
×
UNCOV
3283
                }
×
3284
                var b bytes.Buffer
2✔
3285
                if err := WriteOutpoint(&b, op); err != nil {
2✔
UNCOV
3286
                        return err
×
3287
                }
×
3288
                chanID := chanIndex.Get(b.Bytes())
2✔
3289
                if chanID == nil {
4✔
3290
                        return fmt.Errorf("%w: op=%v", ErrEdgeNotFound, op)
2✔
3291
                }
2✔
3292

3293
                // If the channel is found to exists, then we'll first retrieve
3294
                // the general information for the channel.
3295
                edge, err := fetchChanEdgeInfo(edgeIndex, chanID)
2✔
3296
                if err != nil {
2✔
UNCOV
3297
                        return fmt.Errorf("%w: chanID=%x", err, chanID)
×
UNCOV
3298
                }
×
3299
                edgeInfo = &edge
2✔
3300

2✔
3301
                // Once we have the information about the channels' parameters,
2✔
3302
                // we'll fetch the routing policies for each for the directed
2✔
3303
                // edges.
2✔
3304
                e1, e2, err := fetchChanEdgePolicies(edgeIndex, edges, chanID)
2✔
3305
                if err != nil {
2✔
UNCOV
3306
                        return fmt.Errorf("failed to find policy: %w", err)
×
UNCOV
3307
                }
×
3308

3309
                policy1 = e1
2✔
3310
                policy2 = e2
2✔
3311

2✔
3312
                return nil
2✔
3313
        }, func() {
2✔
3314
                edgeInfo = nil
2✔
3315
                policy1 = nil
2✔
3316
                policy2 = nil
2✔
3317
        })
2✔
3318
        if err != nil {
4✔
3319
                return nil, nil, nil, err
2✔
3320
        }
2✔
3321

3322
        return edgeInfo, policy1, policy2, nil
2✔
3323
}
3324

3325
// FetchChannelEdgesByID attempts to lookup the two directed edges for the
3326
// channel identified by the channel ID. If the channel can't be found, then
3327
// ErrEdgeNotFound is returned. A struct which houses the general information
3328
// for the channel itself is returned as well as two structs that contain the
3329
// routing policies for the channel in either direction.
3330
//
3331
// ErrZombieEdge an be returned if the edge is currently marked as a zombie
3332
// within the database. In this case, the ChannelEdgePolicy's will be nil, and
3333
// the ChannelEdgeInfo will only include the public keys of each node.
3334
func (c *KVStore) FetchChannelEdgesByID(chanID uint64) (
3335
        *models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3336
        *models.ChannelEdgePolicy, error) {
2✔
3337

2✔
3338
        var (
2✔
3339
                edgeInfo  *models.ChannelEdgeInfo
2✔
3340
                policy1   *models.ChannelEdgePolicy
2✔
3341
                policy2   *models.ChannelEdgePolicy
2✔
3342
                channelID [8]byte
2✔
3343
        )
2✔
3344

2✔
3345
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
4✔
3346
                // First, grab the node bucket. This will be used to populate
2✔
3347
                // the Node pointers in each edge read from disk.
2✔
3348
                nodes := tx.ReadBucket(nodeBucket)
2✔
3349
                if nodes == nil {
2✔
UNCOV
3350
                        return ErrGraphNotFound
×
UNCOV
3351
                }
×
3352

3353
                // Next, grab the edge bucket which stores the edges, and also
3354
                // the index itself so we can group the directed edges together
3355
                // logically.
3356
                edges := tx.ReadBucket(edgeBucket)
2✔
3357
                if edges == nil {
2✔
UNCOV
3358
                        return ErrGraphNoEdgesFound
×
UNCOV
3359
                }
×
3360
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
2✔
3361
                if edgeIndex == nil {
2✔
UNCOV
3362
                        return ErrGraphNoEdgesFound
×
3363
                }
×
3364

3365
                byteOrder.PutUint64(channelID[:], chanID)
2✔
3366

2✔
3367
                // Now, attempt to fetch edge.
2✔
3368
                edge, err := fetchChanEdgeInfo(edgeIndex, channelID[:])
2✔
3369

2✔
3370
                // If it doesn't exist, we'll quickly check our zombie index to
2✔
3371
                // see if we've previously marked it as so.
2✔
3372
                if errors.Is(err, ErrEdgeNotFound) {
4✔
3373
                        // If the zombie index doesn't exist, or the edge is not
2✔
3374
                        // marked as a zombie within it, then we'll return the
2✔
3375
                        // original ErrEdgeNotFound error.
2✔
3376
                        zombieIndex := edges.NestedReadBucket(zombieBucket)
2✔
3377
                        if zombieIndex == nil {
2✔
UNCOV
3378
                                return ErrEdgeNotFound
×
UNCOV
3379
                        }
×
3380

3381
                        isZombie, pubKey1, pubKey2 := isZombieEdge(
2✔
3382
                                zombieIndex, chanID,
2✔
3383
                        )
2✔
3384
                        if !isZombie {
4✔
3385
                                return ErrEdgeNotFound
2✔
3386
                        }
2✔
3387

3388
                        // Otherwise, the edge is marked as a zombie, so we'll
3389
                        // populate the edge info with the public keys of each
3390
                        // party as this is the only information we have about
3391
                        // it and return an error signaling so.
3392
                        edgeInfo = &models.ChannelEdgeInfo{
2✔
3393
                                NodeKey1Bytes: pubKey1,
2✔
3394
                                NodeKey2Bytes: pubKey2,
2✔
3395
                        }
2✔
3396

2✔
3397
                        return ErrZombieEdge
2✔
3398
                }
3399

3400
                // Otherwise, we'll just return the error if any.
3401
                if err != nil {
2✔
UNCOV
3402
                        return err
×
UNCOV
3403
                }
×
3404

3405
                edgeInfo = &edge
2✔
3406

2✔
3407
                // Then we'll attempt to fetch the accompanying policies of this
2✔
3408
                // edge.
2✔
3409
                e1, e2, err := fetchChanEdgePolicies(
2✔
3410
                        edgeIndex, edges, channelID[:],
2✔
3411
                )
2✔
3412
                if err != nil {
2✔
UNCOV
3413
                        return err
×
UNCOV
3414
                }
×
3415

3416
                policy1 = e1
2✔
3417
                policy2 = e2
2✔
3418

2✔
3419
                return nil
2✔
3420
        }, func() {
2✔
3421
                edgeInfo = nil
2✔
3422
                policy1 = nil
2✔
3423
                policy2 = nil
2✔
3424
        })
2✔
3425
        if errors.Is(err, ErrZombieEdge) {
4✔
3426
                return edgeInfo, nil, nil, err
2✔
3427
        }
2✔
3428
        if err != nil {
4✔
3429
                return nil, nil, nil, err
2✔
3430
        }
2✔
3431

3432
        return edgeInfo, policy1, policy2, nil
2✔
3433
}
3434

3435
// IsPublicNode is a helper method that determines whether the node with the
3436
// given public key is seen as a public node in the graph from the graph's
3437
// source node's point of view.
3438
func (c *KVStore) IsPublicNode(pubKey [33]byte) (bool, error) {
2✔
3439
        var nodeIsPublic bool
2✔
3440
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
4✔
3441
                nodes := tx.ReadBucket(nodeBucket)
2✔
3442
                if nodes == nil {
2✔
UNCOV
3443
                        return ErrGraphNodesNotFound
×
UNCOV
3444
                }
×
3445
                ourPubKey := nodes.Get(sourceKey)
2✔
3446
                if ourPubKey == nil {
2✔
UNCOV
3447
                        return ErrSourceNodeNotSet
×
3448
                }
×
3449
                node, err := fetchLightningNode(nodes, pubKey[:])
2✔
3450
                if err != nil {
2✔
UNCOV
3451
                        return err
×
3452
                }
×
3453

3454
                nodeIsPublic, err = c.isPublic(tx, node.PubKeyBytes, ourPubKey)
2✔
3455

2✔
3456
                return err
2✔
3457
        }, func() {
2✔
3458
                nodeIsPublic = false
2✔
3459
        })
2✔
3460
        if err != nil {
2✔
UNCOV
3461
                return false, err
×
UNCOV
3462
        }
×
3463

3464
        return nodeIsPublic, nil
2✔
3465
}
3466

3467
// genMultiSigP2WSH generates the p2wsh'd multisig script for 2 of 2 pubkeys.
3468
func genMultiSigP2WSH(aPub, bPub []byte) ([]byte, error) {
2✔
3469
        witnessScript, err := input.GenMultiSigScript(aPub, bPub)
2✔
3470
        if err != nil {
2✔
UNCOV
3471
                return nil, err
×
UNCOV
3472
        }
×
3473

3474
        // With the witness script generated, we'll now turn it into a p2wsh
3475
        // script:
3476
        //  * OP_0 <sha256(script)>
3477
        bldr := txscript.NewScriptBuilder(
2✔
3478
                txscript.WithScriptAllocSize(input.P2WSHSize),
2✔
3479
        )
2✔
3480
        bldr.AddOp(txscript.OP_0)
2✔
3481
        scriptHash := sha256.Sum256(witnessScript)
2✔
3482
        bldr.AddData(scriptHash[:])
2✔
3483

2✔
3484
        return bldr.Script()
2✔
3485
}
3486

3487
// EdgePoint couples the outpoint of a channel with the funding script that it
3488
// creates. The FilteredChainView will use this to watch for spends of this
3489
// edge point on chain. We require both of these values as depending on the
3490
// concrete implementation, either the pkScript, or the out point will be used.
3491
type EdgePoint struct {
3492
        // FundingPkScript is the p2wsh multi-sig script of the target channel.
3493
        FundingPkScript []byte
3494

3495
        // OutPoint is the outpoint of the target channel.
3496
        OutPoint wire.OutPoint
3497
}
3498

3499
// String returns a human readable version of the target EdgePoint. We return
3500
// the outpoint directly as it is enough to uniquely identify the edge point.
UNCOV
3501
func (e *EdgePoint) String() string {
×
UNCOV
3502
        return e.OutPoint.String()
×
UNCOV
3503
}
×
3504

3505
// ChannelView returns the verifiable edge information for each active channel
3506
// within the known channel graph. The set of UTXO's (along with their scripts)
3507
// returned are the ones that need to be watched on chain to detect channel
3508
// closes on the resident blockchain.
3509
func (c *KVStore) ChannelView() ([]EdgePoint, error) {
2✔
3510
        var edgePoints []EdgePoint
2✔
3511
        if err := kvdb.View(c.db, func(tx kvdb.RTx) error {
4✔
3512
                // We're going to iterate over the entire channel index, so
2✔
3513
                // we'll need to fetch the edgeBucket to get to the index as
2✔
3514
                // it's a sub-bucket.
2✔
3515
                edges := tx.ReadBucket(edgeBucket)
2✔
3516
                if edges == nil {
2✔
UNCOV
3517
                        return ErrGraphNoEdgesFound
×
UNCOV
3518
                }
×
3519
                chanIndex := edges.NestedReadBucket(channelPointBucket)
2✔
3520
                if chanIndex == nil {
2✔
UNCOV
3521
                        return ErrGraphNoEdgesFound
×
3522
                }
×
3523
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
2✔
3524
                if edgeIndex == nil {
2✔
UNCOV
3525
                        return ErrGraphNoEdgesFound
×
3526
                }
×
3527

3528
                // Once we have the proper bucket, we'll range over each key
3529
                // (which is the channel point for the channel) and decode it,
3530
                // accumulating each entry.
3531
                return chanIndex.ForEach(
2✔
3532
                        func(chanPointBytes, chanID []byte) error {
4✔
3533
                                chanPointReader := bytes.NewReader(
2✔
3534
                                        chanPointBytes,
2✔
3535
                                )
2✔
3536

2✔
3537
                                var chanPoint wire.OutPoint
2✔
3538
                                err := ReadOutpoint(chanPointReader, &chanPoint)
2✔
3539
                                if err != nil {
2✔
UNCOV
3540
                                        return err
×
UNCOV
3541
                                }
×
3542

3543
                                edgeInfo, err := fetchChanEdgeInfo(
2✔
3544
                                        edgeIndex, chanID,
2✔
3545
                                )
2✔
3546
                                if err != nil {
2✔
UNCOV
3547
                                        return err
×
UNCOV
3548
                                }
×
3549

3550
                                pkScript, err := genMultiSigP2WSH(
2✔
3551
                                        edgeInfo.BitcoinKey1Bytes[:],
2✔
3552
                                        edgeInfo.BitcoinKey2Bytes[:],
2✔
3553
                                )
2✔
3554
                                if err != nil {
2✔
UNCOV
3555
                                        return err
×
UNCOV
3556
                                }
×
3557

3558
                                edgePoints = append(edgePoints, EdgePoint{
2✔
3559
                                        FundingPkScript: pkScript,
2✔
3560
                                        OutPoint:        chanPoint,
2✔
3561
                                })
2✔
3562

2✔
3563
                                return nil
2✔
3564
                        },
3565
                )
3566
        }, func() {
2✔
3567
                edgePoints = nil
2✔
3568
        }); err != nil {
2✔
UNCOV
3569
                return nil, err
×
UNCOV
3570
        }
×
3571

3572
        return edgePoints, nil
2✔
3573
}
3574

3575
// MarkEdgeZombie attempts to mark a channel identified by its channel ID as a
3576
// zombie. This method is used on an ad-hoc basis, when channels need to be
3577
// marked as zombies outside the normal pruning cycle.
3578
func (c *KVStore) MarkEdgeZombie(chanID uint64,
UNCOV
3579
        pubKey1, pubKey2 [33]byte) error {
×
UNCOV
3580

×
UNCOV
3581
        c.cacheMu.Lock()
×
UNCOV
3582
        defer c.cacheMu.Unlock()
×
UNCOV
3583

×
3584
        err := kvdb.Batch(c.db, func(tx kvdb.RwTx) error {
×
3585
                edges := tx.ReadWriteBucket(edgeBucket)
×
3586
                if edges == nil {
×
3587
                        return ErrGraphNoEdgesFound
×
3588
                }
×
3589
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
×
3590
                if err != nil {
×
3591
                        return fmt.Errorf("unable to create zombie "+
×
3592
                                "bucket: %w", err)
×
3593
                }
×
3594

3595
                return markEdgeZombie(zombieIndex, chanID, pubKey1, pubKey2)
×
3596
        })
3597
        if err != nil {
×
3598
                return err
×
UNCOV
3599
        }
×
3600

UNCOV
3601
        c.rejectCache.remove(chanID)
×
3602
        c.chanCache.remove(chanID)
×
3603

×
3604
        return nil
×
3605
}
3606

3607
// markEdgeZombie marks an edge as a zombie within our zombie index. The public
3608
// keys should represent the node public keys of the two parties involved in the
3609
// edge.
3610
func markEdgeZombie(zombieIndex kvdb.RwBucket, chanID uint64, pubKey1,
3611
        pubKey2 [33]byte) error {
2✔
3612

2✔
3613
        var k [8]byte
2✔
3614
        byteOrder.PutUint64(k[:], chanID)
2✔
3615

2✔
3616
        var v [66]byte
2✔
3617
        copy(v[:33], pubKey1[:])
2✔
3618
        copy(v[33:], pubKey2[:])
2✔
3619

2✔
3620
        return zombieIndex.Put(k[:], v[:])
2✔
3621
}
2✔
3622

3623
// MarkEdgeLive clears an edge from our zombie index, deeming it as live.
UNCOV
3624
func (c *KVStore) MarkEdgeLive(chanID uint64) error {
×
UNCOV
3625
        c.cacheMu.Lock()
×
UNCOV
3626
        defer c.cacheMu.Unlock()
×
UNCOV
3627

×
UNCOV
3628
        return c.markEdgeLiveUnsafe(nil, chanID)
×
3629
}
×
3630

3631
// markEdgeLiveUnsafe clears an edge from the zombie index. This method can be
3632
// called with an existing kvdb.RwTx or the argument can be set to nil in which
3633
// case a new transaction will be created.
3634
//
3635
// NOTE: this method MUST only be called if the cacheMu has already been
3636
// acquired.
UNCOV
3637
func (c *KVStore) markEdgeLiveUnsafe(tx kvdb.RwTx, chanID uint64) error {
×
UNCOV
3638
        dbFn := func(tx kvdb.RwTx) error {
×
UNCOV
3639
                edges := tx.ReadWriteBucket(edgeBucket)
×
UNCOV
3640
                if edges == nil {
×
UNCOV
3641
                        return ErrGraphNoEdgesFound
×
3642
                }
×
3643
                zombieIndex := edges.NestedReadWriteBucket(zombieBucket)
×
3644
                if zombieIndex == nil {
×
3645
                        return nil
×
3646
                }
×
3647

3648
                var k [8]byte
×
3649
                byteOrder.PutUint64(k[:], chanID)
×
3650

×
3651
                if len(zombieIndex.Get(k[:])) == 0 {
×
UNCOV
3652
                        return ErrZombieEdgeNotFound
×
3653
                }
×
3654

3655
                return zombieIndex.Delete(k[:])
×
3656
        }
3657

3658
        // If the transaction is nil, we'll create a new one. Otherwise, we use
3659
        // the existing transaction
3660
        var err error
×
UNCOV
3661
        if tx == nil {
×
UNCOV
3662
                err = kvdb.Update(c.db, dbFn, func() {})
×
UNCOV
3663
        } else {
×
UNCOV
3664
                err = dbFn(tx)
×
3665
        }
×
3666
        if err != nil {
×
3667
                return err
×
3668
        }
×
3669

3670
        c.rejectCache.remove(chanID)
×
3671
        c.chanCache.remove(chanID)
×
3672

×
3673
        return nil
×
3674
}
3675

3676
// IsZombieEdge returns whether the edge is considered zombie. If it is a
3677
// zombie, then the two node public keys corresponding to this edge are also
3678
// returned.
UNCOV
3679
func (c *KVStore) IsZombieEdge(chanID uint64) (bool, [33]byte, [33]byte) {
×
UNCOV
3680
        var (
×
UNCOV
3681
                isZombie         bool
×
UNCOV
3682
                pubKey1, pubKey2 [33]byte
×
UNCOV
3683
        )
×
3684

×
3685
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
×
3686
                edges := tx.ReadBucket(edgeBucket)
×
3687
                if edges == nil {
×
3688
                        return ErrGraphNoEdgesFound
×
3689
                }
×
3690
                zombieIndex := edges.NestedReadBucket(zombieBucket)
×
3691
                if zombieIndex == nil {
×
3692
                        return nil
×
3693
                }
×
3694

3695
                isZombie, pubKey1, pubKey2 = isZombieEdge(zombieIndex, chanID)
×
3696

×
3697
                return nil
×
3698
        }, func() {
×
UNCOV
3699
                isZombie = false
×
3700
                pubKey1 = [33]byte{}
×
3701
                pubKey2 = [33]byte{}
×
3702
        })
×
3703
        if err != nil {
×
3704
                return false, [33]byte{}, [33]byte{}
×
3705
        }
×
3706

3707
        return isZombie, pubKey1, pubKey2
×
3708
}
3709

3710
// isZombieEdge returns whether an entry exists for the given channel in the
3711
// zombie index. If an entry exists, then the two node public keys corresponding
3712
// to this edge are also returned.
3713
func isZombieEdge(zombieIndex kvdb.RBucket,
3714
        chanID uint64) (bool, [33]byte, [33]byte) {
2✔
3715

2✔
3716
        var k [8]byte
2✔
3717
        byteOrder.PutUint64(k[:], chanID)
2✔
3718

2✔
3719
        v := zombieIndex.Get(k[:])
2✔
3720
        if v == nil {
4✔
3721
                return false, [33]byte{}, [33]byte{}
2✔
3722
        }
2✔
3723

3724
        var pubKey1, pubKey2 [33]byte
2✔
3725
        copy(pubKey1[:], v[:33])
2✔
3726
        copy(pubKey2[:], v[33:])
2✔
3727

2✔
3728
        return true, pubKey1, pubKey2
2✔
3729
}
3730

3731
// NumZombies returns the current number of zombie channels in the graph.
UNCOV
3732
func (c *KVStore) NumZombies() (uint64, error) {
×
UNCOV
3733
        var numZombies uint64
×
UNCOV
3734
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
×
UNCOV
3735
                edges := tx.ReadBucket(edgeBucket)
×
UNCOV
3736
                if edges == nil {
×
3737
                        return nil
×
3738
                }
×
3739
                zombieIndex := edges.NestedReadBucket(zombieBucket)
×
3740
                if zombieIndex == nil {
×
3741
                        return nil
×
3742
                }
×
3743

3744
                return zombieIndex.ForEach(func(_, _ []byte) error {
×
3745
                        numZombies++
×
3746
                        return nil
×
3747
                })
×
UNCOV
3748
        }, func() {
×
3749
                numZombies = 0
×
3750
        })
×
3751
        if err != nil {
×
3752
                return 0, err
×
3753
        }
×
3754

3755
        return numZombies, nil
×
3756
}
3757

3758
// PutClosedScid stores a SCID for a closed channel in the database. This is so
3759
// that we can ignore channel announcements that we know to be closed without
3760
// having to validate them and fetch a block.
UNCOV
3761
func (c *KVStore) PutClosedScid(scid lnwire.ShortChannelID) error {
×
UNCOV
3762
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
×
UNCOV
3763
                closedScids, err := tx.CreateTopLevelBucket(closedScidBucket)
×
UNCOV
3764
                if err != nil {
×
UNCOV
3765
                        return err
×
3766
                }
×
3767

3768
                var k [8]byte
×
3769
                byteOrder.PutUint64(k[:], scid.ToUint64())
×
3770

×
3771
                return closedScids.Put(k[:], []byte{})
×
UNCOV
3772
        }, func() {})
×
3773
}
3774

3775
// IsClosedScid checks whether a channel identified by the passed in scid is
3776
// closed. This helps avoid having to perform expensive validation checks.
3777
// TODO: Add an LRU cache to cut down on disc reads.
3778
func (c *KVStore) IsClosedScid(scid lnwire.ShortChannelID) (bool, error) {
2✔
3779
        var isClosed bool
2✔
3780
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
4✔
3781
                closedScids := tx.ReadBucket(closedScidBucket)
2✔
3782
                if closedScids == nil {
2✔
UNCOV
3783
                        return ErrClosedScidsNotFound
×
UNCOV
3784
                }
×
3785

3786
                var k [8]byte
2✔
3787
                byteOrder.PutUint64(k[:], scid.ToUint64())
2✔
3788

2✔
3789
                if closedScids.Get(k[:]) != nil {
2✔
UNCOV
3790
                        isClosed = true
×
UNCOV
3791
                        return nil
×
UNCOV
3792
                }
×
3793

3794
                return nil
2✔
3795
        }, func() {
2✔
3796
                isClosed = false
2✔
3797
        })
2✔
3798
        if err != nil {
2✔
UNCOV
3799
                return false, err
×
UNCOV
3800
        }
×
3801

3802
        return isClosed, nil
2✔
3803
}
3804

3805
// GraphSession will provide the call-back with access to a NodeTraverser
3806
// instance which can be used to perform queries against the channel graph.
UNCOV
3807
func (c *KVStore) GraphSession(cb func(graph NodeTraverser) error) error {
×
UNCOV
3808
        return c.db.View(func(tx walletdb.ReadTx) error {
×
UNCOV
3809
                return cb(&nodeTraverserSession{
×
UNCOV
3810
                        db: c,
×
UNCOV
3811
                        tx: tx,
×
3812
                })
×
3813
        }, func() {})
×
3814
}
3815

3816
// nodeTraverserSession implements the NodeTraverser interface but with a
3817
// backing read only transaction for a consistent view of the graph.
3818
type nodeTraverserSession struct {
3819
        tx kvdb.RTx
3820
        db *KVStore
3821
}
3822

3823
// ForEachNodeDirectedChannel calls the callback for every channel of the given
3824
// node.
3825
//
3826
// NOTE: Part of the NodeTraverser interface.
3827
func (c *nodeTraverserSession) ForEachNodeDirectedChannel(nodePub route.Vertex,
UNCOV
3828
        cb func(channel *DirectedChannel) error) error {
×
UNCOV
3829

×
UNCOV
3830
        return c.db.forEachNodeDirectedChannel(c.tx, nodePub, cb)
×
UNCOV
3831
}
×
3832

3833
// FetchNodeFeatures returns the features of the given node. If the node is
3834
// unknown, assume no additional features are supported.
3835
//
3836
// NOTE: Part of the NodeTraverser interface.
3837
func (c *nodeTraverserSession) FetchNodeFeatures(nodePub route.Vertex) (
UNCOV
3838
        *lnwire.FeatureVector, error) {
×
UNCOV
3839

×
UNCOV
3840
        return c.db.fetchNodeFeatures(c.tx, nodePub)
×
UNCOV
3841
}
×
3842

3843
func putLightningNode(nodeBucket, aliasBucket, updateIndex kvdb.RwBucket,
3844
        node *models.LightningNode) error {
2✔
3845

2✔
3846
        var (
2✔
3847
                scratch [16]byte
2✔
3848
                b       bytes.Buffer
2✔
3849
        )
2✔
3850

2✔
3851
        pub, err := node.PubKey()
2✔
3852
        if err != nil {
2✔
UNCOV
3853
                return err
×
UNCOV
3854
        }
×
3855
        nodePub := pub.SerializeCompressed()
2✔
3856

2✔
3857
        // If the node has the update time set, write it, else write 0.
2✔
3858
        updateUnix := uint64(0)
2✔
3859
        if node.LastUpdate.Unix() > 0 {
4✔
3860
                updateUnix = uint64(node.LastUpdate.Unix())
2✔
3861
        }
2✔
3862

3863
        byteOrder.PutUint64(scratch[:8], updateUnix)
2✔
3864
        if _, err := b.Write(scratch[:8]); err != nil {
2✔
UNCOV
3865
                return err
×
UNCOV
3866
        }
×
3867

3868
        if _, err := b.Write(nodePub); err != nil {
2✔
UNCOV
3869
                return err
×
3870
        }
×
3871

3872
        // If we got a node announcement for this node, we will have the rest
3873
        // of the data available. If not we don't have more data to write.
3874
        if !node.HaveNodeAnnouncement {
4✔
3875
                // Write HaveNodeAnnouncement=0.
2✔
3876
                byteOrder.PutUint16(scratch[:2], 0)
2✔
3877
                if _, err := b.Write(scratch[:2]); err != nil {
2✔
UNCOV
3878
                        return err
×
UNCOV
3879
                }
×
3880

3881
                return nodeBucket.Put(nodePub, b.Bytes())
2✔
3882
        }
3883

3884
        // Write HaveNodeAnnouncement=1.
3885
        byteOrder.PutUint16(scratch[:2], 1)
2✔
3886
        if _, err := b.Write(scratch[:2]); err != nil {
2✔
UNCOV
3887
                return err
×
UNCOV
3888
        }
×
3889

3890
        if err := binary.Write(&b, byteOrder, node.Color.R); err != nil {
2✔
UNCOV
3891
                return err
×
3892
        }
×
3893
        if err := binary.Write(&b, byteOrder, node.Color.G); err != nil {
2✔
UNCOV
3894
                return err
×
UNCOV
3895
        }
×
3896
        if err := binary.Write(&b, byteOrder, node.Color.B); err != nil {
2✔
3897
                return err
×
UNCOV
3898
        }
×
3899

3900
        if err := wire.WriteVarString(&b, 0, node.Alias); err != nil {
2✔
UNCOV
3901
                return err
×
3902
        }
×
3903

3904
        if err := node.Features.Encode(&b); err != nil {
2✔
UNCOV
3905
                return err
×
3906
        }
×
3907

3908
        numAddresses := uint16(len(node.Addresses))
2✔
3909
        byteOrder.PutUint16(scratch[:2], numAddresses)
2✔
3910
        if _, err := b.Write(scratch[:2]); err != nil {
2✔
3911
                return err
×
UNCOV
3912
        }
×
3913

3914
        for _, address := range node.Addresses {
4✔
3915
                if err := SerializeAddr(&b, address); err != nil {
2✔
3916
                        return err
×
3917
                }
×
3918
        }
3919

3920
        sigLen := len(node.AuthSigBytes)
2✔
3921
        if sigLen > 80 {
2✔
3922
                return fmt.Errorf("max sig len allowed is 80, had %v",
×
UNCOV
3923
                        sigLen)
×
UNCOV
3924
        }
×
3925

3926
        err = wire.WriteVarBytes(&b, 0, node.AuthSigBytes)
2✔
3927
        if err != nil {
2✔
3928
                return err
×
3929
        }
×
3930

3931
        if len(node.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
2✔
UNCOV
3932
                return ErrTooManyExtraOpaqueBytes(len(node.ExtraOpaqueData))
×
3933
        }
×
3934
        err = wire.WriteVarBytes(&b, 0, node.ExtraOpaqueData)
2✔
3935
        if err != nil {
2✔
UNCOV
3936
                return err
×
3937
        }
×
3938

3939
        if err := aliasBucket.Put(nodePub, []byte(node.Alias)); err != nil {
2✔
UNCOV
3940
                return err
×
3941
        }
×
3942

3943
        // With the alias bucket updated, we'll now update the index that
3944
        // tracks the time series of node updates.
3945
        var indexKey [8 + 33]byte
2✔
3946
        byteOrder.PutUint64(indexKey[:8], updateUnix)
2✔
3947
        copy(indexKey[8:], nodePub)
2✔
3948

2✔
3949
        // If there was already an old index entry for this node, then we'll
2✔
3950
        // delete the old one before we write the new entry.
2✔
3951
        if nodeBytes := nodeBucket.Get(nodePub); nodeBytes != nil {
4✔
3952
                // Extract out the old update time to we can reconstruct the
2✔
3953
                // prior index key to delete it from the index.
2✔
3954
                oldUpdateTime := nodeBytes[:8]
2✔
3955

2✔
3956
                var oldIndexKey [8 + 33]byte
2✔
3957
                copy(oldIndexKey[:8], oldUpdateTime)
2✔
3958
                copy(oldIndexKey[8:], nodePub)
2✔
3959

2✔
3960
                if err := updateIndex.Delete(oldIndexKey[:]); err != nil {
2✔
UNCOV
3961
                        return err
×
UNCOV
3962
                }
×
3963
        }
3964

3965
        if err := updateIndex.Put(indexKey[:], nil); err != nil {
2✔
3966
                return err
×
3967
        }
×
3968

3969
        return nodeBucket.Put(nodePub, b.Bytes())
2✔
3970
}
3971

3972
func fetchLightningNode(nodeBucket kvdb.RBucket,
3973
        nodePub []byte) (models.LightningNode, error) {
2✔
3974

2✔
3975
        nodeBytes := nodeBucket.Get(nodePub)
2✔
3976
        if nodeBytes == nil {
4✔
3977
                return models.LightningNode{}, ErrGraphNodeNotFound
2✔
3978
        }
2✔
3979

3980
        nodeReader := bytes.NewReader(nodeBytes)
2✔
3981

2✔
3982
        return deserializeLightningNode(nodeReader)
2✔
3983
}
3984

3985
func deserializeLightningNodeCacheable(r io.Reader) (route.Vertex,
3986
        *lnwire.FeatureVector, error) {
2✔
3987

2✔
3988
        var (
2✔
3989
                pubKey      route.Vertex
2✔
3990
                features    = lnwire.EmptyFeatureVector()
2✔
3991
                nodeScratch [8]byte
2✔
3992
        )
2✔
3993

2✔
3994
        // Skip ahead:
2✔
3995
        // - LastUpdate (8 bytes)
2✔
3996
        if _, err := r.Read(nodeScratch[:]); err != nil {
2✔
UNCOV
3997
                return pubKey, nil, err
×
UNCOV
3998
        }
×
3999

4000
        if _, err := io.ReadFull(r, pubKey[:]); err != nil {
2✔
UNCOV
4001
                return pubKey, nil, err
×
4002
        }
×
4003

4004
        // Read the node announcement flag.
4005
        if _, err := r.Read(nodeScratch[:2]); err != nil {
2✔
4006
                return pubKey, nil, err
×
4007
        }
×
4008
        hasNodeAnn := byteOrder.Uint16(nodeScratch[:2])
2✔
4009

2✔
4010
        // The rest of the data is optional, and will only be there if we got a
2✔
4011
        // node announcement for this node.
2✔
4012
        if hasNodeAnn == 0 {
4✔
4013
                return pubKey, features, nil
2✔
4014
        }
2✔
4015

4016
        // We did get a node announcement for this node, so we'll have the rest
4017
        // of the data available.
4018
        var rgb uint8
2✔
4019
        if err := binary.Read(r, byteOrder, &rgb); err != nil {
2✔
UNCOV
4020
                return pubKey, nil, err
×
UNCOV
4021
        }
×
4022
        if err := binary.Read(r, byteOrder, &rgb); err != nil {
2✔
UNCOV
4023
                return pubKey, nil, err
×
UNCOV
4024
        }
×
4025
        if err := binary.Read(r, byteOrder, &rgb); err != nil {
2✔
4026
                return pubKey, nil, err
×
UNCOV
4027
        }
×
4028

4029
        if _, err := wire.ReadVarString(r, 0); err != nil {
2✔
UNCOV
4030
                return pubKey, nil, err
×
4031
        }
×
4032

4033
        if err := features.Decode(r); err != nil {
2✔
UNCOV
4034
                return pubKey, nil, err
×
4035
        }
×
4036

4037
        return pubKey, features, nil
2✔
4038
}
4039

4040
func deserializeLightningNode(r io.Reader) (models.LightningNode, error) {
2✔
4041
        var (
2✔
4042
                node    models.LightningNode
2✔
4043
                scratch [8]byte
2✔
4044
                err     error
2✔
4045
        )
2✔
4046

2✔
4047
        // Always populate a feature vector, even if we don't have a node
2✔
4048
        // announcement and short circuit below.
2✔
4049
        node.Features = lnwire.EmptyFeatureVector()
2✔
4050

2✔
4051
        if _, err := r.Read(scratch[:]); err != nil {
2✔
UNCOV
4052
                return models.LightningNode{}, err
×
UNCOV
4053
        }
×
4054

4055
        unix := int64(byteOrder.Uint64(scratch[:]))
2✔
4056
        node.LastUpdate = time.Unix(unix, 0)
2✔
4057

2✔
4058
        if _, err := io.ReadFull(r, node.PubKeyBytes[:]); err != nil {
2✔
UNCOV
4059
                return models.LightningNode{}, err
×
UNCOV
4060
        }
×
4061

4062
        if _, err := r.Read(scratch[:2]); err != nil {
2✔
UNCOV
4063
                return models.LightningNode{}, err
×
4064
        }
×
4065

4066
        hasNodeAnn := byteOrder.Uint16(scratch[:2])
2✔
4067
        if hasNodeAnn == 1 {
4✔
4068
                node.HaveNodeAnnouncement = true
2✔
4069
        } else {
4✔
4070
                node.HaveNodeAnnouncement = false
2✔
4071
        }
2✔
4072

4073
        // The rest of the data is optional, and will only be there if we got a
4074
        // node announcement for this node.
4075
        if !node.HaveNodeAnnouncement {
4✔
4076
                return node, nil
2✔
4077
        }
2✔
4078

4079
        // We did get a node announcement for this node, so we'll have the rest
4080
        // of the data available.
4081
        if err := binary.Read(r, byteOrder, &node.Color.R); err != nil {
2✔
UNCOV
4082
                return models.LightningNode{}, err
×
UNCOV
4083
        }
×
4084
        if err := binary.Read(r, byteOrder, &node.Color.G); err != nil {
2✔
UNCOV
4085
                return models.LightningNode{}, err
×
UNCOV
4086
        }
×
4087
        if err := binary.Read(r, byteOrder, &node.Color.B); err != nil {
2✔
4088
                return models.LightningNode{}, err
×
UNCOV
4089
        }
×
4090

4091
        node.Alias, err = wire.ReadVarString(r, 0)
2✔
4092
        if err != nil {
2✔
4093
                return models.LightningNode{}, err
×
4094
        }
×
4095

4096
        err = node.Features.Decode(r)
2✔
4097
        if err != nil {
2✔
4098
                return models.LightningNode{}, err
×
4099
        }
×
4100

4101
        if _, err := r.Read(scratch[:2]); err != nil {
2✔
UNCOV
4102
                return models.LightningNode{}, err
×
4103
        }
×
4104
        numAddresses := int(byteOrder.Uint16(scratch[:2]))
2✔
4105

2✔
4106
        var addresses []net.Addr
2✔
4107
        for i := 0; i < numAddresses; i++ {
4✔
4108
                address, err := DeserializeAddr(r)
2✔
4109
                if err != nil {
2✔
UNCOV
4110
                        return models.LightningNode{}, err
×
UNCOV
4111
                }
×
4112
                addresses = append(addresses, address)
2✔
4113
        }
4114
        node.Addresses = addresses
2✔
4115

2✔
4116
        node.AuthSigBytes, err = wire.ReadVarBytes(r, 0, 80, "sig")
2✔
4117
        if err != nil {
2✔
UNCOV
4118
                return models.LightningNode{}, err
×
UNCOV
4119
        }
×
4120

4121
        // We'll try and see if there are any opaque bytes left, if not, then
4122
        // we'll ignore the EOF error and return the node as is.
4123
        extraBytes, err := wire.ReadVarBytes(
2✔
4124
                r, 0, MaxAllowedExtraOpaqueBytes, "blob",
2✔
4125
        )
2✔
4126
        switch {
2✔
UNCOV
4127
        case errors.Is(err, io.ErrUnexpectedEOF):
×
UNCOV
4128
        case errors.Is(err, io.EOF):
×
UNCOV
4129
        case err != nil:
×
UNCOV
4130
                return models.LightningNode{}, err
×
4131
        }
4132

4133
        if len(extraBytes) > 0 {
2✔
4134
                node.ExtraOpaqueData = extraBytes
×
4135
        }
×
4136

4137
        return node, nil
2✔
4138
}
4139

4140
func putChanEdgeInfo(edgeIndex kvdb.RwBucket,
4141
        edgeInfo *models.ChannelEdgeInfo, chanID [8]byte) error {
2✔
4142

2✔
4143
        var b bytes.Buffer
2✔
4144

2✔
4145
        if _, err := b.Write(edgeInfo.NodeKey1Bytes[:]); err != nil {
2✔
UNCOV
4146
                return err
×
UNCOV
4147
        }
×
4148
        if _, err := b.Write(edgeInfo.NodeKey2Bytes[:]); err != nil {
2✔
UNCOV
4149
                return err
×
UNCOV
4150
        }
×
4151
        if _, err := b.Write(edgeInfo.BitcoinKey1Bytes[:]); err != nil {
2✔
4152
                return err
×
UNCOV
4153
        }
×
4154
        if _, err := b.Write(edgeInfo.BitcoinKey2Bytes[:]); err != nil {
2✔
4155
                return err
×
UNCOV
4156
        }
×
4157

4158
        if err := wire.WriteVarBytes(&b, 0, edgeInfo.Features); err != nil {
2✔
UNCOV
4159
                return err
×
4160
        }
×
4161

4162
        authProof := edgeInfo.AuthProof
2✔
4163
        var nodeSig1, nodeSig2, bitcoinSig1, bitcoinSig2 []byte
2✔
4164
        if authProof != nil {
4✔
4165
                nodeSig1 = authProof.NodeSig1Bytes
2✔
4166
                nodeSig2 = authProof.NodeSig2Bytes
2✔
4167
                bitcoinSig1 = authProof.BitcoinSig1Bytes
2✔
4168
                bitcoinSig2 = authProof.BitcoinSig2Bytes
2✔
4169
        }
2✔
4170

4171
        if err := wire.WriteVarBytes(&b, 0, nodeSig1); err != nil {
2✔
UNCOV
4172
                return err
×
UNCOV
4173
        }
×
4174
        if err := wire.WriteVarBytes(&b, 0, nodeSig2); err != nil {
2✔
UNCOV
4175
                return err
×
UNCOV
4176
        }
×
4177
        if err := wire.WriteVarBytes(&b, 0, bitcoinSig1); err != nil {
2✔
4178
                return err
×
UNCOV
4179
        }
×
4180
        if err := wire.WriteVarBytes(&b, 0, bitcoinSig2); err != nil {
2✔
4181
                return err
×
UNCOV
4182
        }
×
4183

4184
        if err := WriteOutpoint(&b, &edgeInfo.ChannelPoint); err != nil {
2✔
UNCOV
4185
                return err
×
4186
        }
×
4187
        err := binary.Write(&b, byteOrder, uint64(edgeInfo.Capacity))
2✔
4188
        if err != nil {
2✔
UNCOV
4189
                return err
×
4190
        }
×
4191
        if _, err := b.Write(chanID[:]); err != nil {
2✔
UNCOV
4192
                return err
×
UNCOV
4193
        }
×
4194
        if _, err := b.Write(edgeInfo.ChainHash[:]); err != nil {
2✔
4195
                return err
×
UNCOV
4196
        }
×
4197

4198
        if len(edgeInfo.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
2✔
UNCOV
4199
                return ErrTooManyExtraOpaqueBytes(len(edgeInfo.ExtraOpaqueData))
×
4200
        }
×
4201
        err = wire.WriteVarBytes(&b, 0, edgeInfo.ExtraOpaqueData)
2✔
4202
        if err != nil {
2✔
UNCOV
4203
                return err
×
4204
        }
×
4205

4206
        return edgeIndex.Put(chanID[:], b.Bytes())
2✔
4207
}
4208

4209
func fetchChanEdgeInfo(edgeIndex kvdb.RBucket,
4210
        chanID []byte) (models.ChannelEdgeInfo, error) {
2✔
4211

2✔
4212
        edgeInfoBytes := edgeIndex.Get(chanID)
2✔
4213
        if edgeInfoBytes == nil {
4✔
4214
                return models.ChannelEdgeInfo{}, ErrEdgeNotFound
2✔
4215
        }
2✔
4216

4217
        edgeInfoReader := bytes.NewReader(edgeInfoBytes)
2✔
4218

2✔
4219
        return deserializeChanEdgeInfo(edgeInfoReader)
2✔
4220
}
4221

4222
func deserializeChanEdgeInfo(r io.Reader) (models.ChannelEdgeInfo, error) {
2✔
4223
        var (
2✔
4224
                err      error
2✔
4225
                edgeInfo models.ChannelEdgeInfo
2✔
4226
        )
2✔
4227

2✔
4228
        if _, err := io.ReadFull(r, edgeInfo.NodeKey1Bytes[:]); err != nil {
2✔
UNCOV
4229
                return models.ChannelEdgeInfo{}, err
×
UNCOV
4230
        }
×
4231
        if _, err := io.ReadFull(r, edgeInfo.NodeKey2Bytes[:]); err != nil {
2✔
UNCOV
4232
                return models.ChannelEdgeInfo{}, err
×
UNCOV
4233
        }
×
4234
        if _, err := io.ReadFull(r, edgeInfo.BitcoinKey1Bytes[:]); err != nil {
2✔
4235
                return models.ChannelEdgeInfo{}, err
×
UNCOV
4236
        }
×
4237
        if _, err := io.ReadFull(r, edgeInfo.BitcoinKey2Bytes[:]); err != nil {
2✔
4238
                return models.ChannelEdgeInfo{}, err
×
UNCOV
4239
        }
×
4240

4241
        edgeInfo.Features, err = wire.ReadVarBytes(r, 0, 900, "features")
2✔
4242
        if err != nil {
2✔
4243
                return models.ChannelEdgeInfo{}, err
×
4244
        }
×
4245

4246
        proof := &models.ChannelAuthProof{}
2✔
4247

2✔
4248
        proof.NodeSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
2✔
4249
        if err != nil {
2✔
UNCOV
4250
                return models.ChannelEdgeInfo{}, err
×
UNCOV
4251
        }
×
4252
        proof.NodeSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
2✔
4253
        if err != nil {
2✔
UNCOV
4254
                return models.ChannelEdgeInfo{}, err
×
4255
        }
×
4256
        proof.BitcoinSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
2✔
4257
        if err != nil {
2✔
UNCOV
4258
                return models.ChannelEdgeInfo{}, err
×
4259
        }
×
4260
        proof.BitcoinSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
2✔
4261
        if err != nil {
2✔
UNCOV
4262
                return models.ChannelEdgeInfo{}, err
×
4263
        }
×
4264

4265
        if !proof.IsEmpty() {
4✔
4266
                edgeInfo.AuthProof = proof
2✔
4267
        }
2✔
4268

4269
        edgeInfo.ChannelPoint = wire.OutPoint{}
2✔
4270
        if err := ReadOutpoint(r, &edgeInfo.ChannelPoint); err != nil {
2✔
UNCOV
4271
                return models.ChannelEdgeInfo{}, err
×
UNCOV
4272
        }
×
4273
        if err := binary.Read(r, byteOrder, &edgeInfo.Capacity); err != nil {
2✔
UNCOV
4274
                return models.ChannelEdgeInfo{}, err
×
UNCOV
4275
        }
×
4276
        if err := binary.Read(r, byteOrder, &edgeInfo.ChannelID); err != nil {
2✔
4277
                return models.ChannelEdgeInfo{}, err
×
UNCOV
4278
        }
×
4279

4280
        if _, err := io.ReadFull(r, edgeInfo.ChainHash[:]); err != nil {
2✔
UNCOV
4281
                return models.ChannelEdgeInfo{}, err
×
4282
        }
×
4283

4284
        // We'll try and see if there are any opaque bytes left, if not, then
4285
        // we'll ignore the EOF error and return the edge as is.
4286
        edgeInfo.ExtraOpaqueData, err = wire.ReadVarBytes(
2✔
4287
                r, 0, MaxAllowedExtraOpaqueBytes, "blob",
2✔
4288
        )
2✔
4289
        switch {
2✔
UNCOV
4290
        case errors.Is(err, io.ErrUnexpectedEOF):
×
UNCOV
4291
        case errors.Is(err, io.EOF):
×
UNCOV
4292
        case err != nil:
×
UNCOV
4293
                return models.ChannelEdgeInfo{}, err
×
4294
        }
4295

4296
        return edgeInfo, nil
2✔
4297
}
4298

4299
func putChanEdgePolicy(edges kvdb.RwBucket, edge *models.ChannelEdgePolicy,
4300
        from, to []byte) error {
2✔
4301

2✔
4302
        var edgeKey [33 + 8]byte
2✔
4303
        copy(edgeKey[:], from)
2✔
4304
        byteOrder.PutUint64(edgeKey[33:], edge.ChannelID)
2✔
4305

2✔
4306
        var b bytes.Buffer
2✔
4307
        if err := serializeChanEdgePolicy(&b, edge, to); err != nil {
2✔
UNCOV
4308
                return err
×
UNCOV
4309
        }
×
4310

4311
        // Before we write out the new edge, we'll create a new entry in the
4312
        // update index in order to keep it fresh.
4313
        updateUnix := uint64(edge.LastUpdate.Unix())
2✔
4314
        var indexKey [8 + 8]byte
2✔
4315
        byteOrder.PutUint64(indexKey[:8], updateUnix)
2✔
4316
        byteOrder.PutUint64(indexKey[8:], edge.ChannelID)
2✔
4317

2✔
4318
        updateIndex, err := edges.CreateBucketIfNotExists(edgeUpdateIndexBucket)
2✔
4319
        if err != nil {
2✔
UNCOV
4320
                return err
×
UNCOV
4321
        }
×
4322

4323
        // If there was already an entry for this edge, then we'll need to
4324
        // delete the old one to ensure we don't leave around any after-images.
4325
        // An unknown policy value does not have a update time recorded, so
4326
        // it also does not need to be removed.
4327
        if edgeBytes := edges.Get(edgeKey[:]); edgeBytes != nil &&
2✔
4328
                !bytes.Equal(edgeBytes, unknownPolicy) {
4✔
4329

2✔
4330
                // In order to delete the old entry, we'll need to obtain the
2✔
4331
                // *prior* update time in order to delete it. To do this, we'll
2✔
4332
                // need to deserialize the existing policy within the database
2✔
4333
                // (now outdated by the new one), and delete its corresponding
2✔
4334
                // entry within the update index. We'll ignore any
2✔
4335
                // ErrEdgePolicyOptionalFieldNotFound or ErrParsingExtraTLVBytes
2✔
4336
                // errors, as we only need the channel ID and update time to
2✔
4337
                // delete the entry.
2✔
4338
                //
2✔
4339
                // TODO(halseth): get rid of these invalid policies in a
2✔
4340
                // migration.
2✔
4341
                // TODO(elle): complete the above TODO in migration from kvdb
2✔
4342
                // to SQL.
2✔
4343
                oldEdgePolicy, err := deserializeChanEdgePolicy(
2✔
4344
                        bytes.NewReader(edgeBytes),
2✔
4345
                )
2✔
4346
                if err != nil &&
2✔
4347
                        !errors.Is(err, ErrEdgePolicyOptionalFieldNotFound) &&
2✔
4348
                        !errors.Is(err, ErrParsingExtraTLVBytes) {
2✔
UNCOV
4349

×
UNCOV
4350
                        return err
×
UNCOV
4351
                }
×
4352

4353
                oldUpdateTime := uint64(oldEdgePolicy.LastUpdate.Unix())
2✔
4354

2✔
4355
                var oldIndexKey [8 + 8]byte
2✔
4356
                byteOrder.PutUint64(oldIndexKey[:8], oldUpdateTime)
2✔
4357
                byteOrder.PutUint64(oldIndexKey[8:], edge.ChannelID)
2✔
4358

2✔
4359
                if err := updateIndex.Delete(oldIndexKey[:]); err != nil {
2✔
UNCOV
4360
                        return err
×
UNCOV
4361
                }
×
4362
        }
4363

4364
        if err := updateIndex.Put(indexKey[:], nil); err != nil {
2✔
4365
                return err
×
4366
        }
×
4367

4368
        err = updateEdgePolicyDisabledIndex(
2✔
4369
                edges, edge.ChannelID,
2✔
4370
                edge.ChannelFlags&lnwire.ChanUpdateDirection > 0,
2✔
4371
                edge.IsDisabled(),
2✔
4372
        )
2✔
4373
        if err != nil {
2✔
UNCOV
4374
                return err
×
UNCOV
4375
        }
×
4376

4377
        return edges.Put(edgeKey[:], b.Bytes())
2✔
4378
}
4379

4380
// updateEdgePolicyDisabledIndex is used to update the disabledEdgePolicyIndex
4381
// bucket by either add a new disabled ChannelEdgePolicy or remove an existing
4382
// one.
4383
// The direction represents the direction of the edge and disabled is used for
4384
// deciding whether to remove or add an entry to the bucket.
4385
// In general a channel is disabled if two entries for the same chanID exist
4386
// in this bucket.
4387
// Maintaining the bucket this way allows a fast retrieval of disabled
4388
// channels, for example when prune is needed.
4389
func updateEdgePolicyDisabledIndex(edges kvdb.RwBucket, chanID uint64,
4390
        direction bool, disabled bool) error {
2✔
4391

2✔
4392
        var disabledEdgeKey [8 + 1]byte
2✔
4393
        byteOrder.PutUint64(disabledEdgeKey[0:], chanID)
2✔
4394
        if direction {
4✔
4395
                disabledEdgeKey[8] = 1
2✔
4396
        }
2✔
4397

4398
        disabledEdgePolicyIndex, err := edges.CreateBucketIfNotExists(
2✔
4399
                disabledEdgePolicyBucket,
2✔
4400
        )
2✔
4401
        if err != nil {
2✔
UNCOV
4402
                return err
×
UNCOV
4403
        }
×
4404

4405
        if disabled {
4✔
4406
                return disabledEdgePolicyIndex.Put(disabledEdgeKey[:], []byte{})
2✔
4407
        }
2✔
4408

4409
        return disabledEdgePolicyIndex.Delete(disabledEdgeKey[:])
2✔
4410
}
4411

4412
// putChanEdgePolicyUnknown marks the edge policy as unknown
4413
// in the edges bucket.
4414
func putChanEdgePolicyUnknown(edges kvdb.RwBucket, channelID uint64,
4415
        from []byte) error {
2✔
4416

2✔
4417
        var edgeKey [33 + 8]byte
2✔
4418
        copy(edgeKey[:], from)
2✔
4419
        byteOrder.PutUint64(edgeKey[33:], channelID)
2✔
4420

2✔
4421
        if edges.Get(edgeKey[:]) != nil {
2✔
UNCOV
4422
                return fmt.Errorf("cannot write unknown policy for channel %v "+
×
UNCOV
4423
                        " when there is already a policy present", channelID)
×
UNCOV
4424
        }
×
4425

4426
        return edges.Put(edgeKey[:], unknownPolicy)
2✔
4427
}
4428

4429
func fetchChanEdgePolicy(edges kvdb.RBucket, chanID []byte,
4430
        nodePub []byte) (*models.ChannelEdgePolicy, error) {
2✔
4431

2✔
4432
        var edgeKey [33 + 8]byte
2✔
4433
        copy(edgeKey[:], nodePub)
2✔
4434
        copy(edgeKey[33:], chanID)
2✔
4435

2✔
4436
        edgeBytes := edges.Get(edgeKey[:])
2✔
4437
        if edgeBytes == nil {
2✔
UNCOV
4438
                return nil, ErrEdgeNotFound
×
UNCOV
4439
        }
×
4440

4441
        // No need to deserialize unknown policy.
4442
        if bytes.Equal(edgeBytes, unknownPolicy) {
4✔
4443
                return nil, nil
2✔
4444
        }
2✔
4445

4446
        edgeReader := bytes.NewReader(edgeBytes)
2✔
4447

2✔
4448
        ep, err := deserializeChanEdgePolicy(edgeReader)
2✔
4449
        switch {
2✔
4450
        // If the db policy was missing an expected optional field, we return
4451
        // nil as if the policy was unknown.
UNCOV
4452
        case errors.Is(err, ErrEdgePolicyOptionalFieldNotFound):
×
UNCOV
4453
                return nil, nil
×
4454

4455
        // If the policy contains invalid TLV bytes, we return nil as if
4456
        // the policy was unknown.
4457
        case errors.Is(err, ErrParsingExtraTLVBytes):
×
4458
                return nil, nil
×
4459

NEW
4460
        case err != nil:
×
NEW
4461
                return nil, err
×
4462
        }
4463

4464
        return ep, nil
2✔
4465
}
4466

4467
func fetchChanEdgePolicies(edgeIndex kvdb.RBucket, edges kvdb.RBucket,
4468
        chanID []byte) (*models.ChannelEdgePolicy, *models.ChannelEdgePolicy,
4469
        error) {
2✔
4470

2✔
4471
        edgeInfo := edgeIndex.Get(chanID)
2✔
4472
        if edgeInfo == nil {
2✔
UNCOV
4473
                return nil, nil, fmt.Errorf("%w: chanID=%x", ErrEdgeNotFound,
×
UNCOV
4474
                        chanID)
×
UNCOV
4475
        }
×
4476

4477
        // The first node is contained within the first half of the edge
4478
        // information. We only propagate the error here and below if it's
4479
        // something other than edge non-existence.
4480
        node1Pub := edgeInfo[:33]
2✔
4481
        edge1, err := fetchChanEdgePolicy(edges, chanID, node1Pub)
2✔
4482
        if err != nil {
2✔
UNCOV
4483
                return nil, nil, fmt.Errorf("%w: node1Pub=%x", ErrEdgeNotFound,
×
UNCOV
4484
                        node1Pub)
×
UNCOV
4485
        }
×
4486

4487
        // Similarly, the second node is contained within the latter
4488
        // half of the edge information.
4489
        node2Pub := edgeInfo[33:66]
2✔
4490
        edge2, err := fetchChanEdgePolicy(edges, chanID, node2Pub)
2✔
4491
        if err != nil {
2✔
UNCOV
4492
                return nil, nil, fmt.Errorf("%w: node2Pub=%x", ErrEdgeNotFound,
×
UNCOV
4493
                        node2Pub)
×
UNCOV
4494
        }
×
4495

4496
        return edge1, edge2, nil
2✔
4497
}
4498

4499
func serializeChanEdgePolicy(w io.Writer, edge *models.ChannelEdgePolicy,
4500
        to []byte) error {
2✔
4501

2✔
4502
        err := wire.WriteVarBytes(w, 0, edge.SigBytes)
2✔
4503
        if err != nil {
2✔
UNCOV
4504
                return err
×
UNCOV
4505
        }
×
4506

4507
        if err := binary.Write(w, byteOrder, edge.ChannelID); err != nil {
2✔
UNCOV
4508
                return err
×
4509
        }
×
4510

4511
        var scratch [8]byte
2✔
4512
        updateUnix := uint64(edge.LastUpdate.Unix())
2✔
4513
        byteOrder.PutUint64(scratch[:], updateUnix)
2✔
4514
        if _, err := w.Write(scratch[:]); err != nil {
2✔
UNCOV
4515
                return err
×
UNCOV
4516
        }
×
4517

4518
        if err := binary.Write(w, byteOrder, edge.MessageFlags); err != nil {
2✔
UNCOV
4519
                return err
×
4520
        }
×
4521
        if err := binary.Write(w, byteOrder, edge.ChannelFlags); err != nil {
2✔
UNCOV
4522
                return err
×
UNCOV
4523
        }
×
4524
        if err := binary.Write(w, byteOrder, edge.TimeLockDelta); err != nil {
2✔
4525
                return err
×
UNCOV
4526
        }
×
4527
        if err := binary.Write(w, byteOrder, uint64(edge.MinHTLC)); err != nil {
2✔
4528
                return err
×
UNCOV
4529
        }
×
4530
        err = binary.Write(w, byteOrder, uint64(edge.FeeBaseMSat))
2✔
4531
        if err != nil {
2✔
UNCOV
4532
                return err
×
4533
        }
×
4534
        err = binary.Write(
2✔
4535
                w, byteOrder, uint64(edge.FeeProportionalMillionths),
2✔
4536
        )
2✔
4537
        if err != nil {
2✔
4538
                return err
×
UNCOV
4539
        }
×
4540

4541
        if _, err := w.Write(to); err != nil {
2✔
UNCOV
4542
                return err
×
4543
        }
×
4544

4545
        // If the max_htlc field is present, we write it. To be compatible with
4546
        // older versions that wasn't aware of this field, we write it as part
4547
        // of the opaque data.
4548
        // TODO(halseth): clean up when moving to TLV.
4549
        var opaqueBuf bytes.Buffer
2✔
4550
        if edge.MessageFlags.HasMaxHtlc() {
4✔
4551
                err := binary.Write(&opaqueBuf, byteOrder, uint64(edge.MaxHTLC))
2✔
4552
                if err != nil {
2✔
UNCOV
4553
                        return err
×
UNCOV
4554
                }
×
4555
        }
4556

4557
        // Validate that the ExtraOpaqueData is in fact a valid TLV stream.
4558
        err = edge.ExtraOpaqueData.ValidateTLV()
2✔
4559
        if err != nil {
2✔
UNCOV
4560
                return fmt.Errorf("%w: %v", ErrParsingExtraTLVBytes, err)
×
UNCOV
4561
        }
×
4562

4563
        if len(edge.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
2✔
NEW
4564
                return ErrTooManyExtraOpaqueBytes(len(edge.ExtraOpaqueData))
×
NEW
4565
        }
×
4566
        if _, err := opaqueBuf.Write(edge.ExtraOpaqueData); err != nil {
2✔
NEW
4567
                return err
×
4568
        }
×
4569

4570
        if err := wire.WriteVarBytes(w, 0, opaqueBuf.Bytes()); err != nil {
2✔
UNCOV
4571
                return err
×
4572
        }
×
4573

4574
        return nil
2✔
4575
}
4576

4577
func deserializeChanEdgePolicy(r io.Reader) (*models.ChannelEdgePolicy, error) {
2✔
4578
        // Deserialize the policy. Note that in case an optional field is not
2✔
4579
        // found or if the edge has invalid TLV data, then both an error and a
2✔
4580
        // populated policy object are returned so that the caller can decide
2✔
4581
        // if it still wants to use the edge or not.
2✔
4582
        edge, err := deserializeChanEdgePolicyRaw(r)
2✔
4583
        if err != nil &&
2✔
4584
                !errors.Is(err, ErrEdgePolicyOptionalFieldNotFound) &&
2✔
4585
                !errors.Is(err, ErrParsingExtraTLVBytes) {
2✔
NEW
4586

×
NEW
4587
                return nil, err
×
NEW
4588
        }
×
4589

4590
        return edge, err
2✔
4591
}
4592

4593
func deserializeChanEdgePolicyRaw(r io.Reader) (*models.ChannelEdgePolicy,
4594
        error) {
2✔
4595

2✔
4596
        edge := &models.ChannelEdgePolicy{}
2✔
4597

2✔
4598
        var err error
2✔
4599
        edge.SigBytes, err = wire.ReadVarBytes(r, 0, 80, "sig")
2✔
4600
        if err != nil {
2✔
UNCOV
4601
                return nil, err
×
UNCOV
4602
        }
×
4603

4604
        if err := binary.Read(r, byteOrder, &edge.ChannelID); err != nil {
2✔
UNCOV
4605
                return nil, err
×
4606
        }
×
4607

4608
        var scratch [8]byte
2✔
4609
        if _, err := r.Read(scratch[:]); err != nil {
2✔
4610
                return nil, err
×
4611
        }
×
4612
        unix := int64(byteOrder.Uint64(scratch[:]))
2✔
4613
        edge.LastUpdate = time.Unix(unix, 0)
2✔
4614

2✔
4615
        if err := binary.Read(r, byteOrder, &edge.MessageFlags); err != nil {
2✔
4616
                return nil, err
×
UNCOV
4617
        }
×
4618
        if err := binary.Read(r, byteOrder, &edge.ChannelFlags); err != nil {
2✔
UNCOV
4619
                return nil, err
×
UNCOV
4620
        }
×
4621
        if err := binary.Read(r, byteOrder, &edge.TimeLockDelta); err != nil {
2✔
4622
                return nil, err
×
UNCOV
4623
        }
×
4624

4625
        var n uint64
2✔
4626
        if err := binary.Read(r, byteOrder, &n); err != nil {
2✔
4627
                return nil, err
×
4628
        }
×
4629
        edge.MinHTLC = lnwire.MilliSatoshi(n)
2✔
4630

2✔
4631
        if err := binary.Read(r, byteOrder, &n); err != nil {
2✔
4632
                return nil, err
×
4633
        }
×
4634
        edge.FeeBaseMSat = lnwire.MilliSatoshi(n)
2✔
4635

2✔
4636
        if err := binary.Read(r, byteOrder, &n); err != nil {
2✔
4637
                return nil, err
×
4638
        }
×
4639
        edge.FeeProportionalMillionths = lnwire.MilliSatoshi(n)
2✔
4640

2✔
4641
        if _, err := r.Read(edge.ToNode[:]); err != nil {
2✔
4642
                return nil, err
×
4643
        }
×
4644

4645
        // We'll try and see if there are any opaque bytes left, if not, then
4646
        // we'll ignore the EOF error and return the edge as is.
4647
        edge.ExtraOpaqueData, err = wire.ReadVarBytes(
2✔
4648
                r, 0, MaxAllowedExtraOpaqueBytes, "blob",
2✔
4649
        )
2✔
4650
        switch {
2✔
UNCOV
4651
        case errors.Is(err, io.ErrUnexpectedEOF):
×
UNCOV
4652
        case errors.Is(err, io.EOF):
×
UNCOV
4653
        case err != nil:
×
UNCOV
4654
                return nil, err
×
4655
        }
4656

4657
        // See if optional fields are present.
4658
        if edge.MessageFlags.HasMaxHtlc() {
4✔
4659
                // The max_htlc field should be at the beginning of the opaque
2✔
4660
                // bytes.
2✔
4661
                opq := edge.ExtraOpaqueData
2✔
4662

2✔
4663
                // If the max_htlc field is not present, it might be old data
2✔
4664
                // stored before this field was validated. We'll return the
2✔
4665
                // edge along with an error.
2✔
4666
                if len(opq) < 8 {
2✔
UNCOV
4667
                        return edge, ErrEdgePolicyOptionalFieldNotFound
×
UNCOV
4668
                }
×
4669

4670
                maxHtlc := byteOrder.Uint64(opq[:8])
2✔
4671
                edge.MaxHTLC = lnwire.MilliSatoshi(maxHtlc)
2✔
4672

2✔
4673
                // Exclude the parsed field from the rest of the opaque data.
2✔
4674
                edge.ExtraOpaqueData = opq[8:]
2✔
4675
        }
4676

4677
        // Attempt to extract the inbound fee from the opaque data. If we fail
4678
        // to parse the TLV here, we return an error we also return the edge
4679
        // so that the caller can still use it. This is for backwards
4680
        // compatibility in case we have already persisted some policies that
4681
        // have invalid TLV data.
4682
        var inboundFee lnwire.Fee
2✔
4683
        typeMap, err := edge.ExtraOpaqueData.ExtractRecords(&inboundFee)
2✔
4684
        if err != nil {
2✔
NEW
4685
                return edge, fmt.Errorf("%w: %w", ErrParsingExtraTLVBytes, err)
×
NEW
4686
        }
×
4687

4688
        val, ok := typeMap[lnwire.FeeRecordType]
2✔
4689
        if ok && val == nil {
4✔
4690
                edge.InboundFee = fn.Some(inboundFee)
2✔
4691
        }
2✔
4692

4693
        return edge, nil
2✔
4694
}
4695

4696
// chanGraphNodeTx is an implementation of the NodeRTx interface backed by the
4697
// KVStore and a kvdb.RTx.
4698
type chanGraphNodeTx struct {
4699
        tx   kvdb.RTx
4700
        db   *KVStore
4701
        node *models.LightningNode
4702
}
4703

4704
// A compile-time constraint to ensure chanGraphNodeTx implements the NodeRTx
4705
// interface.
4706
var _ NodeRTx = (*chanGraphNodeTx)(nil)
4707

4708
func newChanGraphNodeTx(tx kvdb.RTx, db *KVStore,
4709
        node *models.LightningNode) *chanGraphNodeTx {
2✔
4710

2✔
4711
        return &chanGraphNodeTx{
2✔
4712
                tx:   tx,
2✔
4713
                db:   db,
2✔
4714
                node: node,
2✔
4715
        }
2✔
4716
}
2✔
4717

4718
// Node returns the raw information of the node.
4719
//
4720
// NOTE: This is a part of the NodeRTx interface.
4721
func (c *chanGraphNodeTx) Node() *models.LightningNode {
2✔
4722
        return c.node
2✔
4723
}
2✔
4724

4725
// FetchNode fetches the node with the given pub key under the same transaction
4726
// used to fetch the current node. The returned node is also a NodeRTx and any
4727
// operations on that NodeRTx will also be done under the same transaction.
4728
//
4729
// NOTE: This is a part of the NodeRTx interface.
UNCOV
4730
func (c *chanGraphNodeTx) FetchNode(nodePub route.Vertex) (NodeRTx, error) {
×
UNCOV
4731
        node, err := c.db.FetchLightningNodeTx(c.tx, nodePub)
×
UNCOV
4732
        if err != nil {
×
UNCOV
4733
                return nil, err
×
UNCOV
4734
        }
×
4735

4736
        return newChanGraphNodeTx(c.tx, c.db, node), nil
×
4737
}
4738

4739
// ForEachChannel can be used to iterate over the node's channels under
4740
// the same transaction used to fetch the node.
4741
//
4742
// NOTE: This is a part of the NodeRTx interface.
4743
func (c *chanGraphNodeTx) ForEachChannel(f func(*models.ChannelEdgeInfo,
UNCOV
4744
        *models.ChannelEdgePolicy, *models.ChannelEdgePolicy) error) error {
×
UNCOV
4745

×
UNCOV
4746
        return c.db.forEachNodeChannelTx(c.tx, c.node.PubKeyBytes,
×
UNCOV
4747
                func(_ kvdb.RTx, info *models.ChannelEdgeInfo, policy1,
×
UNCOV
4748
                        policy2 *models.ChannelEdgePolicy) error {
×
4749

×
4750
                        return f(info, policy1, policy2)
×
4751
                },
×
4752
        )
4753
}
4754

4755
// MakeTestGraph creates a new instance of the ChannelGraph for testing
4756
// purposes.
4757
//
4758
// NOTE: this helper currently creates a ChannelGraph that is only ever backed
4759
// by the `KVStore` of the `V1Store` interface.
UNCOV
4760
func MakeTestGraph(t testing.TB, opts ...ChanGraphOption) *ChannelGraph {
×
UNCOV
4761
        t.Helper()
×
UNCOV
4762

×
UNCOV
4763
        // Next, create KVStore for the first time.
×
UNCOV
4764
        backend, backendCleanup, err := kvdb.GetTestBackend(t.TempDir(), "cgr")
×
4765
        t.Cleanup(backendCleanup)
×
4766
        require.NoError(t, err)
×
4767
        t.Cleanup(func() {
×
4768
                require.NoError(t, backend.Close())
×
4769
        })
×
4770

4771
        graphStore, err := NewKVStore(backend)
×
4772
        require.NoError(t, err)
×
4773

×
4774
        graph, err := NewChannelGraph(graphStore, opts...)
×
UNCOV
4775
        require.NoError(t, err)
×
4776
        require.NoError(t, graph.Start())
×
4777
        t.Cleanup(func() {
×
4778
                require.NoError(t, graph.Stop())
×
4779
        })
×
4780

4781
        return graph
×
4782
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc