• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 13543384853

26 Feb 2025 11:57AM UTC coverage: 58.865% (+0.03%) from 58.834%
13543384853

Pull #9551

github

ellemouton
graph/db: move cache writes for Prune methods

This commit moves the cache writes for PruneGraphNodes and PruneGraph
from the KVStore to the ChannelGraph.
Pull Request #9551: graph: extract cache from CRUD [4]

113 of 135 new or added lines in 2 files covered. (83.7%)

275 existing lines in 12 files now uncovered.

136522 of 231922 relevant lines covered (58.87%)

19149.25 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

77.43
/graph/db/kv_store.go
1
package graphdb
2

3
import (
4
        "bytes"
5
        "crypto/sha256"
6
        "encoding/binary"
7
        "errors"
8
        "fmt"
9
        "io"
10
        "math"
11
        "net"
12
        "sort"
13
        "sync"
14
        "testing"
15
        "time"
16

17
        "github.com/btcsuite/btcd/btcec/v2"
18
        "github.com/btcsuite/btcd/chaincfg/chainhash"
19
        "github.com/btcsuite/btcd/txscript"
20
        "github.com/btcsuite/btcd/wire"
21
        "github.com/btcsuite/btcwallet/walletdb"
22
        "github.com/lightningnetwork/lnd/aliasmgr"
23
        "github.com/lightningnetwork/lnd/batch"
24
        "github.com/lightningnetwork/lnd/graph/db/models"
25
        "github.com/lightningnetwork/lnd/input"
26
        "github.com/lightningnetwork/lnd/kvdb"
27
        "github.com/lightningnetwork/lnd/lnwire"
28
        "github.com/lightningnetwork/lnd/routing/route"
29
)
30

31
var (
32
        // nodeBucket is a bucket which houses all the vertices or nodes within
33
        // the channel graph. This bucket has a single-sub bucket which adds an
34
        // additional index from pubkey -> alias. Within the top-level of this
35
        // bucket, the key space maps a node's compressed public key to the
36
        // serialized information for that node. Additionally, there's a
37
        // special key "source" which stores the pubkey of the source node. The
38
        // source node is used as the starting point for all graph/queries and
39
        // traversals. The graph is formed as a star-graph with the source node
40
        // at the center.
41
        //
42
        // maps: pubKey -> nodeInfo
43
        // maps: source -> selfPubKey
44
        nodeBucket = []byte("graph-node")
45

46
        // nodeUpdateIndexBucket is a sub-bucket of the nodeBucket. This bucket
47
        // will be used to quickly look up the "freshness" of a node's last
48
        // update to the network. The bucket only contains keys, and no values,
49
        // it's mapping:
50
        //
51
        // maps: updateTime || nodeID -> nil
52
        nodeUpdateIndexBucket = []byte("graph-node-update-index")
53

54
        // sourceKey is a special key that resides within the nodeBucket. The
55
        // sourceKey maps a key to the public key of the "self node".
56
        sourceKey = []byte("source")
57

58
        // aliasIndexBucket is a sub-bucket that's nested within the main
59
        // nodeBucket. This bucket maps the public key of a node to its
60
        // current alias. This bucket is provided as it can be used within a
61
        // future UI layer to add an additional degree of confirmation.
62
        aliasIndexBucket = []byte("alias")
63

64
        // edgeBucket is a bucket which houses all of the edge or channel
65
        // information within the channel graph. This bucket essentially acts
66
        // as an adjacency list, which in conjunction with a range scan, can be
67
        // used to iterate over all the incoming and outgoing edges for a
68
        // particular node. Key in the bucket use a prefix scheme which leads
69
        // with the node's public key and sends with the compact edge ID.
70
        // For each chanID, there will be two entries within the bucket, as the
71
        // graph is directed: nodes may have different policies w.r.t to fees
72
        // for their respective directions.
73
        //
74
        // maps: pubKey || chanID -> channel edge policy for node
75
        edgeBucket = []byte("graph-edge")
76

77
        // unknownPolicy is represented as an empty slice. It is
78
        // used as the value in edgeBucket for unknown channel edge policies.
79
        // Unknown policies are still stored in the database to enable efficient
80
        // lookup of incoming channel edges.
81
        unknownPolicy = []byte{}
82

83
        // chanStart is an array of all zero bytes which is used to perform
84
        // range scans within the edgeBucket to obtain all of the outgoing
85
        // edges for a particular node.
86
        chanStart [8]byte
87

88
        // edgeIndexBucket is an index which can be used to iterate all edges
89
        // in the bucket, grouping them according to their in/out nodes.
90
        // Additionally, the items in this bucket also contain the complete
91
        // edge information for a channel. The edge information includes the
92
        // capacity of the channel, the nodes that made the channel, etc. This
93
        // bucket resides within the edgeBucket above. Creation of an edge
94
        // proceeds in two phases: first the edge is added to the edge index,
95
        // afterwards the edgeBucket can be updated with the latest details of
96
        // the edge as they are announced on the network.
97
        //
98
        // maps: chanID -> pubKey1 || pubKey2 || restofEdgeInfo
99
        edgeIndexBucket = []byte("edge-index")
100

101
        // edgeUpdateIndexBucket is a sub-bucket of the main edgeBucket. This
102
        // bucket contains an index which allows us to gauge the "freshness" of
103
        // a channel's last updates.
104
        //
105
        // maps: updateTime || chanID -> nil
106
        edgeUpdateIndexBucket = []byte("edge-update-index")
107

108
        // channelPointBucket maps a channel's full outpoint (txid:index) to
109
        // its short 8-byte channel ID. This bucket resides within the
110
        // edgeBucket above, and can be used to quickly remove an edge due to
111
        // the outpoint being spent, or to query for existence of a channel.
112
        //
113
        // maps: outPoint -> chanID
114
        channelPointBucket = []byte("chan-index")
115

116
        // zombieBucket is a sub-bucket of the main edgeBucket bucket
117
        // responsible for maintaining an index of zombie channels. Each entry
118
        // exists within the bucket as follows:
119
        //
120
        // maps: chanID -> pubKey1 || pubKey2
121
        //
122
        // The chanID represents the channel ID of the edge that is marked as a
123
        // zombie and is used as the key, which maps to the public keys of the
124
        // edge's participants.
125
        zombieBucket = []byte("zombie-index")
126

127
        // disabledEdgePolicyBucket is a sub-bucket of the main edgeBucket
128
        // bucket responsible for maintaining an index of disabled edge
129
        // policies. Each entry exists within the bucket as follows:
130
        //
131
        // maps: <chanID><direction> -> []byte{}
132
        //
133
        // The chanID represents the channel ID of the edge and the direction is
134
        // one byte representing the direction of the edge. The main purpose of
135
        // this index is to allow pruning disabled channels in a fast way
136
        // without the need to iterate all over the graph.
137
        disabledEdgePolicyBucket = []byte("disabled-edge-policy-index")
138

139
        // graphMetaBucket is a top-level bucket which stores various meta-deta
140
        // related to the on-disk channel graph. Data stored in this bucket
141
        // includes the block to which the graph has been synced to, the total
142
        // number of channels, etc.
143
        graphMetaBucket = []byte("graph-meta")
144

145
        // pruneLogBucket is a bucket within the graphMetaBucket that stores
146
        // a mapping from the block height to the hash for the blocks used to
147
        // prune the graph.
148
        // Once a new block is discovered, any channels that have been closed
149
        // (by spending the outpoint) can safely be removed from the graph, and
150
        // the block is added to the prune log. We need to keep such a log for
151
        // the case where a reorg happens, and we must "rewind" the state of the
152
        // graph by removing channels that were previously confirmed. In such a
153
        // case we'll remove all entries from the prune log with a block height
154
        // that no longer exists.
155
        pruneLogBucket = []byte("prune-log")
156

157
        // closedScidBucket is a top-level bucket that stores scids for
158
        // channels that we know to be closed. This is used so that we don't
159
        // need to perform expensive validation checks if we receive a channel
160
        // announcement for the channel again.
161
        //
162
        // maps: scid -> []byte{}
163
        closedScidBucket = []byte("closed-scid")
164
)
165

166
const (
167
        // MaxAllowedExtraOpaqueBytes is the largest amount of opaque bytes that
168
        // we'll permit to be written to disk. We limit this as otherwise, it
169
        // would be possible for a node to create a ton of updates and slowly
170
        // fill our disk, and also waste bandwidth due to relaying.
171
        MaxAllowedExtraOpaqueBytes = 10000
172
)
173

174
// KVStore is a persistent, on-disk graph representation of the Lightning
175
// Network. This struct can be used to implement path finding algorithms on top
176
// of, and also to update a node's view based on information received from the
177
// p2p network. Internally, the graph is stored using a modified adjacency list
178
// representation with some added object interaction possible with each
179
// serialized edge/node. The graph is stored is directed, meaning that are two
180
// edges stored for each channel: an inbound/outbound edge for each node pair.
181
// Nodes, edges, and edge information can all be added to the graph
182
// independently. Edge removal results in the deletion of all edge information
183
// for that edge.
184
type KVStore struct {
185
        db kvdb.Backend
186

187
        // cacheMu guards all caches (rejectCache, chanCache, graphCache). If
188
        // this mutex will be acquired at the same time as the DB mutex then
189
        // the cacheMu MUST be acquired first to prevent deadlock.
190
        cacheMu     sync.RWMutex
191
        rejectCache *rejectCache
192
        chanCache   *channelCache
193
        graphCache  *GraphCache
194

195
        chanScheduler batch.Scheduler
196
        nodeScheduler batch.Scheduler
197
}
198

199
// NewKVStore allocates a new KVStore backed by a DB instance. The
200
// returned instance has its own unique reject cache and channel cache.
201
func NewKVStore(db kvdb.Backend, options ...KVStoreOptionModifier) (*KVStore,
202
        error) {
176✔
203

176✔
204
        opts := DefaultOptions()
176✔
205
        for _, o := range options {
179✔
206
                o(opts)
3✔
207
        }
3✔
208

209
        if !opts.NoMigration {
352✔
210
                if err := initKVStore(db); err != nil {
176✔
211
                        return nil, err
×
212
                }
×
213
        }
214

215
        g := &KVStore{
176✔
216
                db:          db,
176✔
217
                rejectCache: newRejectCache(opts.RejectCacheSize),
176✔
218
                chanCache:   newChannelCache(opts.ChannelCacheSize),
176✔
219
        }
176✔
220
        g.chanScheduler = batch.NewTimeScheduler(
176✔
221
                db, &g.cacheMu, opts.BatchCommitInterval,
176✔
222
        )
176✔
223
        g.nodeScheduler = batch.NewTimeScheduler(
176✔
224
                db, nil, opts.BatchCommitInterval,
176✔
225
        )
176✔
226

176✔
227
        return g, nil
176✔
228
}
229

230
// setGraphCache sets the KVStore's graphCache.
231
//
232
// NOTE: this is temporary and will only be called from the ChannelGraph's
233
// constructor before the KVStore methods are available to be called. This will
234
// be removed once the graph cache is fully owned by the ChannelGraph.
235
func (c *KVStore) setGraphCache(cache *GraphCache) {
143✔
236
        c.graphCache = cache
143✔
237
}
143✔
238

239
// channelMapKey is the key structure used for storing channel edge policies.
240
type channelMapKey struct {
241
        nodeKey route.Vertex
242
        chanID  [8]byte
243
}
244

245
// getChannelMap loads all channel edge policies from the database and stores
246
// them in a map.
247
func (c *KVStore) getChannelMap(edges kvdb.RBucket) (
248
        map[channelMapKey]*models.ChannelEdgePolicy, error) {
147✔
249

147✔
250
        // Create a map to store all channel edge policies.
147✔
251
        channelMap := make(map[channelMapKey]*models.ChannelEdgePolicy)
147✔
252

147✔
253
        err := kvdb.ForAll(edges, func(k, edgeBytes []byte) error {
1,721✔
254
                // Skip embedded buckets.
1,574✔
255
                if bytes.Equal(k, edgeIndexBucket) ||
1,574✔
256
                        bytes.Equal(k, edgeUpdateIndexBucket) ||
1,574✔
257
                        bytes.Equal(k, zombieBucket) ||
1,574✔
258
                        bytes.Equal(k, disabledEdgePolicyBucket) ||
1,574✔
259
                        bytes.Equal(k, channelPointBucket) {
2,158✔
260

584✔
261
                        return nil
584✔
262
                }
584✔
263

264
                // Validate key length.
265
                if len(k) != 33+8 {
993✔
266
                        return fmt.Errorf("invalid edge key %x encountered", k)
×
267
                }
×
268

269
                var key channelMapKey
993✔
270
                copy(key.nodeKey[:], k[:33])
993✔
271
                copy(key.chanID[:], k[33:])
993✔
272

993✔
273
                // No need to deserialize unknown policy.
993✔
274
                if bytes.Equal(edgeBytes, unknownPolicy) {
993✔
275
                        return nil
×
276
                }
×
277

278
                edgeReader := bytes.NewReader(edgeBytes)
993✔
279
                edge, err := deserializeChanEdgePolicyRaw(
993✔
280
                        edgeReader,
993✔
281
                )
993✔
282

993✔
283
                switch {
993✔
284
                // If the db policy was missing an expected optional field, we
285
                // return nil as if the policy was unknown.
286
                case errors.Is(err, ErrEdgePolicyOptionalFieldNotFound):
×
287
                        return nil
×
288

289
                case err != nil:
×
290
                        return err
×
291
                }
292

293
                channelMap[key] = edge
993✔
294

993✔
295
                return nil
993✔
296
        })
297
        if err != nil {
147✔
298
                return nil, err
×
299
        }
×
300

301
        return channelMap, nil
147✔
302
}
303

304
var graphTopLevelBuckets = [][]byte{
305
        nodeBucket,
306
        edgeBucket,
307
        graphMetaBucket,
308
        closedScidBucket,
309
}
310

311
// Wipe completely deletes all saved state within all used buckets within the
312
// database. The deletion is done in a single transaction, therefore this
313
// operation is fully atomic.
314
func (c *KVStore) Wipe() error {
×
315
        err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
×
316
                for _, tlb := range graphTopLevelBuckets {
×
317
                        err := tx.DeleteTopLevelBucket(tlb)
×
318
                        if err != nil &&
×
319
                                !errors.Is(err, kvdb.ErrBucketNotFound) {
×
320

×
321
                                return err
×
322
                        }
×
323
                }
324

325
                return nil
×
326
        }, func() {})
×
327
        if err != nil {
×
328
                return err
×
329
        }
×
330

331
        return initKVStore(c.db)
×
332
}
333

334
// createChannelDB creates and initializes a fresh version of  In
335
// the case that the target path has not yet been created or doesn't yet exist,
336
// then the path is created. Additionally, all required top-level buckets used
337
// within the database are created.
338
func initKVStore(db kvdb.Backend) error {
176✔
339
        err := kvdb.Update(db, func(tx kvdb.RwTx) error {
352✔
340
                for _, tlb := range graphTopLevelBuckets {
871✔
341
                        if _, err := tx.CreateTopLevelBucket(tlb); err != nil {
695✔
342
                                return err
×
343
                        }
×
344
                }
345

346
                nodes := tx.ReadWriteBucket(nodeBucket)
176✔
347
                _, err := nodes.CreateBucketIfNotExists(aliasIndexBucket)
176✔
348
                if err != nil {
176✔
349
                        return err
×
350
                }
×
351
                _, err = nodes.CreateBucketIfNotExists(nodeUpdateIndexBucket)
176✔
352
                if err != nil {
176✔
353
                        return err
×
354
                }
×
355

356
                edges := tx.ReadWriteBucket(edgeBucket)
176✔
357
                _, err = edges.CreateBucketIfNotExists(edgeIndexBucket)
176✔
358
                if err != nil {
176✔
359
                        return err
×
360
                }
×
361
                _, err = edges.CreateBucketIfNotExists(edgeUpdateIndexBucket)
176✔
362
                if err != nil {
176✔
363
                        return err
×
364
                }
×
365
                _, err = edges.CreateBucketIfNotExists(channelPointBucket)
176✔
366
                if err != nil {
176✔
367
                        return err
×
368
                }
×
369
                _, err = edges.CreateBucketIfNotExists(zombieBucket)
176✔
370
                if err != nil {
176✔
371
                        return err
×
372
                }
×
373

374
                graphMeta := tx.ReadWriteBucket(graphMetaBucket)
176✔
375
                _, err = graphMeta.CreateBucketIfNotExists(pruneLogBucket)
176✔
376

176✔
377
                return err
176✔
378
        }, func() {})
176✔
379
        if err != nil {
176✔
380
                return fmt.Errorf("unable to create new channel graph: %w", err)
×
381
        }
×
382

383
        return nil
176✔
384
}
385

386
// AddrsForNode returns all known addresses for the target node public key that
387
// the graph DB is aware of. The returned boolean indicates if the given node is
388
// unknown to the graph DB or not.
389
//
390
// NOTE: this is part of the channeldb.AddrSource interface.
391
func (c *KVStore) AddrsForNode(nodePub *btcec.PublicKey) (bool, []net.Addr,
392
        error) {
4✔
393

4✔
394
        pubKey, err := route.NewVertexFromBytes(nodePub.SerializeCompressed())
4✔
395
        if err != nil {
4✔
396
                return false, nil, err
×
397
        }
×
398

399
        node, err := c.FetchLightningNode(pubKey)
4✔
400
        // We don't consider it an error if the graph is unaware of the node.
4✔
401
        switch {
4✔
402
        case err != nil && !errors.Is(err, ErrGraphNodeNotFound):
×
403
                return false, nil, err
×
404

405
        case errors.Is(err, ErrGraphNodeNotFound):
3✔
406
                return false, nil, nil
3✔
407
        }
408

409
        return true, node.Addresses, nil
4✔
410
}
411

412
// ForEachChannel iterates through all the channel edges stored within the
413
// graph and invokes the passed callback for each edge. The callback takes two
414
// edges as since this is a directed graph, both the in/out edges are visited.
415
// If the callback returns an error, then the transaction is aborted and the
416
// iteration stops early.
417
//
418
// NOTE: If an edge can't be found, or wasn't advertised, then a nil pointer
419
// for that particular channel edge routing policy will be passed into the
420
// callback.
421
func (c *KVStore) ForEachChannel(cb func(*models.ChannelEdgeInfo,
422
        *models.ChannelEdgePolicy, *models.ChannelEdgePolicy) error) error {
147✔
423

147✔
424
        return c.db.View(func(tx kvdb.RTx) error {
294✔
425
                edges := tx.ReadBucket(edgeBucket)
147✔
426
                if edges == nil {
147✔
427
                        return ErrGraphNoEdgesFound
×
428
                }
×
429

430
                // First, load all edges in memory indexed by node and channel
431
                // id.
432
                channelMap, err := c.getChannelMap(edges)
147✔
433
                if err != nil {
147✔
434
                        return err
×
435
                }
×
436

437
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
147✔
438
                if edgeIndex == nil {
147✔
439
                        return ErrGraphNoEdgesFound
×
440
                }
×
441

442
                // Load edge index, recombine each channel with the policies
443
                // loaded above and invoke the callback.
444
                return kvdb.ForAll(
147✔
445
                        edgeIndex, func(k, edgeInfoBytes []byte) error {
645✔
446
                                var chanID [8]byte
498✔
447
                                copy(chanID[:], k)
498✔
448

498✔
449
                                edgeInfoReader := bytes.NewReader(edgeInfoBytes)
498✔
450
                                info, err := deserializeChanEdgeInfo(
498✔
451
                                        edgeInfoReader,
498✔
452
                                )
498✔
453
                                if err != nil {
498✔
454
                                        return err
×
455
                                }
×
456

457
                                policy1 := channelMap[channelMapKey{
498✔
458
                                        nodeKey: info.NodeKey1Bytes,
498✔
459
                                        chanID:  chanID,
498✔
460
                                }]
498✔
461

498✔
462
                                policy2 := channelMap[channelMapKey{
498✔
463
                                        nodeKey: info.NodeKey2Bytes,
498✔
464
                                        chanID:  chanID,
498✔
465
                                }]
498✔
466

498✔
467
                                return cb(&info, policy1, policy2)
498✔
468
                        },
469
                )
470
        }, func() {})
147✔
471
}
472

473
// forEachNodeDirectedChannel iterates through all channels of a given node,
474
// executing the passed callback on the directed edge representing the channel
475
// and its incoming policy. If the callback returns an error, then the iteration
476
// is halted with the error propagated back up to the caller. An optional read
477
// transaction may be provided. If none is provided, a new one will be created.
478
//
479
// Unknown policies are passed into the callback as nil values.
480
func (c *KVStore) forEachNodeDirectedChannel(tx kvdb.RTx,
481
        node route.Vertex, cb func(channel *DirectedChannel) error) error {
245✔
482

245✔
483
        // Fallback that uses the database.
245✔
484
        toNodeCallback := func() route.Vertex {
380✔
485
                return node
135✔
486
        }
135✔
487
        toNodeFeatures, err := c.fetchNodeFeatures(tx, node)
245✔
488
        if err != nil {
245✔
489
                return err
×
490
        }
×
491

492
        dbCallback := func(tx kvdb.RTx, e *models.ChannelEdgeInfo, p1,
245✔
493
                p2 *models.ChannelEdgePolicy) error {
744✔
494

499✔
495
                var cachedInPolicy *models.CachedEdgePolicy
499✔
496
                if p2 != nil {
995✔
497
                        cachedInPolicy = models.NewCachedPolicy(p2)
496✔
498
                        cachedInPolicy.ToNodePubKey = toNodeCallback
496✔
499
                        cachedInPolicy.ToNodeFeatures = toNodeFeatures
496✔
500
                }
496✔
501

502
                var inboundFee lnwire.Fee
499✔
503
                if p1 != nil {
997✔
504
                        // Extract inbound fee. If there is a decoding error,
498✔
505
                        // skip this edge.
498✔
506
                        _, err := p1.ExtraOpaqueData.ExtractRecords(&inboundFee)
498✔
507
                        if err != nil {
499✔
508
                                return nil
1✔
509
                        }
1✔
510
                }
511

512
                directedChannel := &DirectedChannel{
498✔
513
                        ChannelID:    e.ChannelID,
498✔
514
                        IsNode1:      node == e.NodeKey1Bytes,
498✔
515
                        OtherNode:    e.NodeKey2Bytes,
498✔
516
                        Capacity:     e.Capacity,
498✔
517
                        OutPolicySet: p1 != nil,
498✔
518
                        InPolicy:     cachedInPolicy,
498✔
519
                        InboundFee:   inboundFee,
498✔
520
                }
498✔
521

498✔
522
                if node == e.NodeKey2Bytes {
749✔
523
                        directedChannel.OtherNode = e.NodeKey1Bytes
251✔
524
                }
251✔
525

526
                return cb(directedChannel)
498✔
527
        }
528

529
        return nodeTraversal(tx, node[:], c.db, dbCallback)
245✔
530
}
531

532
// fetchNodeFeatures returns the features of a given node. If no features are
533
// known for the node, an empty feature vector is returned. An optional read
534
// transaction may be provided. If none is provided, a new one will be created.
535
func (c *KVStore) fetchNodeFeatures(tx kvdb.RTx,
536
        node route.Vertex) (*lnwire.FeatureVector, error) {
689✔
537

689✔
538
        // Fallback that uses the database.
689✔
539
        targetNode, err := c.FetchLightningNodeTx(tx, node)
689✔
540
        switch {
689✔
541
        // If the node exists and has features, return them directly.
542
        case err == nil:
678✔
543
                return targetNode.Features, nil
678✔
544

545
        // If we couldn't find a node announcement, populate a blank feature
546
        // vector.
547
        case errors.Is(err, ErrGraphNodeNotFound):
11✔
548
                return lnwire.EmptyFeatureVector(), nil
11✔
549

550
        // Otherwise, bubble the error up.
551
        default:
×
552
                return nil, err
×
553
        }
554
}
555

556
// ForEachNodeDirectedChannel iterates through all channels of a given node,
557
// executing the passed callback on the directed edge representing the channel
558
// and its incoming policy. If the callback returns an error, then the iteration
559
// is halted with the error propagated back up to the caller.
560
//
561
// Unknown policies are passed into the callback as nil values.
562
//
563
// NOTE: this is part of the graphdb.NodeTraverser interface.
564
func (c *KVStore) ForEachNodeDirectedChannel(nodePub route.Vertex,
565
        cb func(channel *DirectedChannel) error) error {
6✔
566

6✔
567
        return c.forEachNodeDirectedChannel(nil, nodePub, cb)
6✔
568
}
6✔
569

570
// FetchNodeFeatures returns the features of the given node. If no features are
571
// known for the node, an empty feature vector is returned.
572
//
573
// NOTE: this is part of the graphdb.NodeTraverser interface.
574
func (c *KVStore) FetchNodeFeatures(nodePub route.Vertex) (
575
        *lnwire.FeatureVector, error) {
3✔
576

3✔
577
        return c.fetchNodeFeatures(nil, nodePub)
3✔
578
}
3✔
579

580
// ForEachNodeCached is similar to forEachNode, but it returns DirectedChannel
581
// data to the call-back.
582
//
583
// NOTE: The callback contents MUST not be modified.
584
func (c *KVStore) ForEachNodeCached(cb func(node route.Vertex,
585
        chans map[uint64]*DirectedChannel) error) error {
1✔
586

1✔
587
        // Otherwise call back to a version that uses the database directly.
1✔
588
        // We'll iterate over each node, then the set of channels for each
1✔
589
        // node, and construct a similar callback functiopn signature as the
1✔
590
        // main funcotin expects.
1✔
591
        return c.forEachNode(func(tx kvdb.RTx,
1✔
592
                node *models.LightningNode) error {
21✔
593

20✔
594
                channels := make(map[uint64]*DirectedChannel)
20✔
595

20✔
596
                err := c.ForEachNodeChannelTx(tx, node.PubKeyBytes,
20✔
597
                        func(tx kvdb.RTx, e *models.ChannelEdgeInfo,
20✔
598
                                p1 *models.ChannelEdgePolicy,
20✔
599
                                p2 *models.ChannelEdgePolicy) error {
210✔
600

190✔
601
                                toNodeCallback := func() route.Vertex {
190✔
602
                                        return node.PubKeyBytes
×
603
                                }
×
604
                                toNodeFeatures, err := c.fetchNodeFeatures(
190✔
605
                                        tx, node.PubKeyBytes,
190✔
606
                                )
190✔
607
                                if err != nil {
190✔
608
                                        return err
×
609
                                }
×
610

611
                                var cachedInPolicy *models.CachedEdgePolicy
190✔
612
                                if p2 != nil {
380✔
613
                                        cachedInPolicy =
190✔
614
                                                models.NewCachedPolicy(p2)
190✔
615
                                        cachedInPolicy.ToNodePubKey =
190✔
616
                                                toNodeCallback
190✔
617
                                        cachedInPolicy.ToNodeFeatures =
190✔
618
                                                toNodeFeatures
190✔
619
                                }
190✔
620

621
                                directedChannel := &DirectedChannel{
190✔
622
                                        ChannelID: e.ChannelID,
190✔
623
                                        IsNode1: node.PubKeyBytes ==
190✔
624
                                                e.NodeKey1Bytes,
190✔
625
                                        OtherNode:    e.NodeKey2Bytes,
190✔
626
                                        Capacity:     e.Capacity,
190✔
627
                                        OutPolicySet: p1 != nil,
190✔
628
                                        InPolicy:     cachedInPolicy,
190✔
629
                                }
190✔
630

190✔
631
                                if node.PubKeyBytes == e.NodeKey2Bytes {
285✔
632
                                        directedChannel.OtherNode =
95✔
633
                                                e.NodeKey1Bytes
95✔
634
                                }
95✔
635

636
                                channels[e.ChannelID] = directedChannel
190✔
637

190✔
638
                                return nil
190✔
639
                        })
640
                if err != nil {
20✔
641
                        return err
×
642
                }
×
643

644
                return cb(node.PubKeyBytes, channels)
20✔
645
        })
646
}
647

648
// DisabledChannelIDs returns the channel ids of disabled channels.
649
// A channel is disabled when two of the associated ChanelEdgePolicies
650
// have their disabled bit on.
651
func (c *KVStore) DisabledChannelIDs() ([]uint64, error) {
6✔
652
        var disabledChanIDs []uint64
6✔
653
        var chanEdgeFound map[uint64]struct{}
6✔
654

6✔
655
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
12✔
656
                edges := tx.ReadBucket(edgeBucket)
6✔
657
                if edges == nil {
6✔
658
                        return ErrGraphNoEdgesFound
×
659
                }
×
660

661
                disabledEdgePolicyIndex := edges.NestedReadBucket(
6✔
662
                        disabledEdgePolicyBucket,
6✔
663
                )
6✔
664
                if disabledEdgePolicyIndex == nil {
7✔
665
                        return nil
1✔
666
                }
1✔
667

668
                // We iterate over all disabled policies and we add each channel
669
                // that has more than one disabled policy to disabledChanIDs
670
                // array.
671
                return disabledEdgePolicyIndex.ForEach(
5✔
672
                        func(k, v []byte) error {
16✔
673
                                chanID := byteOrder.Uint64(k[:8])
11✔
674
                                _, edgeFound := chanEdgeFound[chanID]
11✔
675
                                if edgeFound {
15✔
676
                                        delete(chanEdgeFound, chanID)
4✔
677
                                        disabledChanIDs = append(
4✔
678
                                                disabledChanIDs, chanID,
4✔
679
                                        )
4✔
680

4✔
681
                                        return nil
4✔
682
                                }
4✔
683

684
                                chanEdgeFound[chanID] = struct{}{}
7✔
685

7✔
686
                                return nil
7✔
687
                        },
688
                )
689
        }, func() {
6✔
690
                disabledChanIDs = nil
6✔
691
                chanEdgeFound = make(map[uint64]struct{})
6✔
692
        })
6✔
693
        if err != nil {
6✔
694
                return nil, err
×
695
        }
×
696

697
        return disabledChanIDs, nil
6✔
698
}
699

700
// ForEachNode iterates through all the stored vertices/nodes in the graph,
701
// executing the passed callback with each node encountered. If the callback
702
// returns an error, then the transaction is aborted and the iteration stops
703
// early. Any operations performed on the NodeTx passed to the call-back are
704
// executed under the same read transaction and so, methods on the NodeTx object
705
// _MUST_ only be called from within the call-back.
706
func (c *KVStore) ForEachNode(cb func(tx NodeRTx) error) error {
123✔
707
        return c.forEachNode(func(tx kvdb.RTx,
123✔
708
                node *models.LightningNode) error {
1,096✔
709

973✔
710
                return cb(newChanGraphNodeTx(tx, c, node))
973✔
711
        })
973✔
712
}
713

714
// forEachNode iterates through all the stored vertices/nodes in the graph,
715
// executing the passed callback with each node encountered. If the callback
716
// returns an error, then the transaction is aborted and the iteration stops
717
// early.
718
//
719
// TODO(roasbeef): add iterator interface to allow for memory efficient graph
720
// traversal when graph gets mega.
721
func (c *KVStore) forEachNode(
722
        cb func(kvdb.RTx, *models.LightningNode) error) error {
132✔
723

132✔
724
        traversal := func(tx kvdb.RTx) error {
264✔
725
                // First grab the nodes bucket which stores the mapping from
132✔
726
                // pubKey to node information.
132✔
727
                nodes := tx.ReadBucket(nodeBucket)
132✔
728
                if nodes == nil {
132✔
729
                        return ErrGraphNotFound
×
730
                }
×
731

732
                return nodes.ForEach(func(pubKey, nodeBytes []byte) error {
1,574✔
733
                        // If this is the source key, then we skip this
1,442✔
734
                        // iteration as the value for this key is a pubKey
1,442✔
735
                        // rather than raw node information.
1,442✔
736
                        if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 {
1,706✔
737
                                return nil
264✔
738
                        }
264✔
739

740
                        nodeReader := bytes.NewReader(nodeBytes)
1,181✔
741
                        node, err := deserializeLightningNode(nodeReader)
1,181✔
742
                        if err != nil {
1,181✔
743
                                return err
×
744
                        }
×
745

746
                        // Execute the callback, the transaction will abort if
747
                        // this returns an error.
748
                        return cb(tx, &node)
1,181✔
749
                })
750
        }
751

752
        return kvdb.View(c.db, traversal, func() {})
264✔
753
}
754

755
// ForEachNodeCacheable iterates through all the stored vertices/nodes in the
756
// graph, executing the passed callback with each node encountered. If the
757
// callback returns an error, then the transaction is aborted and the iteration
758
// stops early.
759
func (c *KVStore) ForEachNodeCacheable(cb func(route.Vertex,
760
        *lnwire.FeatureVector) error) error {
144✔
761

144✔
762
        traversal := func(tx kvdb.RTx) error {
288✔
763
                // First grab the nodes bucket which stores the mapping from
144✔
764
                // pubKey to node information.
144✔
765
                nodes := tx.ReadBucket(nodeBucket)
144✔
766
                if nodes == nil {
144✔
767
                        return ErrGraphNotFound
×
768
                }
×
769

770
                return nodes.ForEach(func(pubKey, nodeBytes []byte) error {
549✔
771
                        // If this is the source key, then we skip this
405✔
772
                        // iteration as the value for this key is a pubKey
405✔
773
                        // rather than raw node information.
405✔
774
                        if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 {
690✔
775
                                return nil
285✔
776
                        }
285✔
777

778
                        nodeReader := bytes.NewReader(nodeBytes)
123✔
779
                        node, features, err := deserializeLightningNodeCacheable( //nolint:ll
123✔
780
                                nodeReader,
123✔
781
                        )
123✔
782
                        if err != nil {
123✔
783
                                return err
×
784
                        }
×
785

786
                        // Execute the callback, the transaction will abort if
787
                        // this returns an error.
788
                        return cb(node, features)
123✔
789
                })
790
        }
791

792
        return kvdb.View(c.db, traversal, func() {})
288✔
793
}
794

795
// SourceNode returns the source node of the graph. The source node is treated
796
// as the center node within a star-graph. This method may be used to kick off
797
// a path finding algorithm in order to explore the reachability of another
798
// node based off the source node.
799
func (c *KVStore) SourceNode() (*models.LightningNode, error) {
234✔
800
        var source *models.LightningNode
234✔
801
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
468✔
802
                // First grab the nodes bucket which stores the mapping from
234✔
803
                // pubKey to node information.
234✔
804
                nodes := tx.ReadBucket(nodeBucket)
234✔
805
                if nodes == nil {
234✔
806
                        return ErrGraphNotFound
×
807
                }
×
808

809
                node, err := c.sourceNode(nodes)
234✔
810
                if err != nil {
235✔
811
                        return err
1✔
812
                }
1✔
813
                source = node
233✔
814

233✔
815
                return nil
233✔
816
        }, func() {
234✔
817
                source = nil
234✔
818
        })
234✔
819
        if err != nil {
235✔
820
                return nil, err
1✔
821
        }
1✔
822

823
        return source, nil
233✔
824
}
825

826
// sourceNode uses an existing database transaction and returns the source node
827
// of the graph. The source node is treated as the center node within a
828
// star-graph. This method may be used to kick off a path finding algorithm in
829
// order to explore the reachability of another node based off the source node.
830
func (c *KVStore) sourceNode(nodes kvdb.RBucket) (*models.LightningNode,
831
        error) {
496✔
832

496✔
833
        selfPub := nodes.Get(sourceKey)
496✔
834
        if selfPub == nil {
497✔
835
                return nil, ErrSourceNodeNotSet
1✔
836
        }
1✔
837

838
        // With the pubKey of the source node retrieved, we're able to
839
        // fetch the full node information.
840
        node, err := fetchLightningNode(nodes, selfPub)
495✔
841
        if err != nil {
495✔
842
                return nil, err
×
843
        }
×
844

845
        return &node, nil
495✔
846
}
847

848
// SetSourceNode sets the source node within the graph database. The source
849
// node is to be used as the center of a star-graph within path finding
850
// algorithms.
851
func (c *KVStore) SetSourceNode(node *models.LightningNode) error {
120✔
852
        nodePubBytes := node.PubKeyBytes[:]
120✔
853

120✔
854
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
240✔
855
                // First grab the nodes bucket which stores the mapping from
120✔
856
                // pubKey to node information.
120✔
857
                nodes, err := tx.CreateTopLevelBucket(nodeBucket)
120✔
858
                if err != nil {
120✔
859
                        return err
×
860
                }
×
861

862
                // Next we create the mapping from source to the targeted
863
                // public key.
864
                if err := nodes.Put(sourceKey, nodePubBytes); err != nil {
120✔
865
                        return err
×
866
                }
×
867

868
                // Finally, we commit the information of the lightning node
869
                // itself.
870
                return addLightningNode(tx, node)
120✔
871
        }, func() {})
120✔
872
}
873

874
// AddLightningNode adds a vertex/node to the graph database. If the node is not
875
// in the database from before, this will add a new, unconnected one to the
876
// graph. If it is present from before, this will update that node's
877
// information. Note that this method is expected to only be called to update an
878
// already present node from a node announcement, or to insert a node found in a
879
// channel update.
880
//
881
// TODO(roasbeef): also need sig of announcement.
882
func (c *KVStore) AddLightningNode(node *models.LightningNode,
883
        op ...batch.SchedulerOption) error {
803✔
884

803✔
885
        r := &batch.Request{
803✔
886
                Update: func(tx kvdb.RwTx) error {
1,606✔
887
                        return addLightningNode(tx, node)
803✔
888
                },
803✔
889
        }
890

891
        for _, f := range op {
806✔
892
                f(r)
3✔
893
        }
3✔
894

895
        return c.nodeScheduler.Execute(r)
803✔
896
}
897

898
func addLightningNode(tx kvdb.RwTx, node *models.LightningNode) error {
990✔
899
        nodes, err := tx.CreateTopLevelBucket(nodeBucket)
990✔
900
        if err != nil {
990✔
901
                return err
×
902
        }
×
903

904
        aliases, err := nodes.CreateBucketIfNotExists(aliasIndexBucket)
990✔
905
        if err != nil {
990✔
906
                return err
×
907
        }
×
908

909
        updateIndex, err := nodes.CreateBucketIfNotExists(
990✔
910
                nodeUpdateIndexBucket,
990✔
911
        )
990✔
912
        if err != nil {
990✔
913
                return err
×
914
        }
×
915

916
        return putLightningNode(nodes, aliases, updateIndex, node)
990✔
917
}
918

919
// LookupAlias attempts to return the alias as advertised by the target node.
920
// TODO(roasbeef): currently assumes that aliases are unique...
921
func (c *KVStore) LookupAlias(pub *btcec.PublicKey) (string, error) {
5✔
922
        var alias string
5✔
923

5✔
924
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
10✔
925
                nodes := tx.ReadBucket(nodeBucket)
5✔
926
                if nodes == nil {
5✔
927
                        return ErrGraphNodesNotFound
×
928
                }
×
929

930
                aliases := nodes.NestedReadBucket(aliasIndexBucket)
5✔
931
                if aliases == nil {
5✔
932
                        return ErrGraphNodesNotFound
×
933
                }
×
934

935
                nodePub := pub.SerializeCompressed()
5✔
936
                a := aliases.Get(nodePub)
5✔
937
                if a == nil {
6✔
938
                        return ErrNodeAliasNotFound
1✔
939
                }
1✔
940

941
                // TODO(roasbeef): should actually be using the utf-8
942
                // package...
943
                alias = string(a)
4✔
944

4✔
945
                return nil
4✔
946
        }, func() {
5✔
947
                alias = ""
5✔
948
        })
5✔
949
        if err != nil {
6✔
950
                return "", err
1✔
951
        }
1✔
952

953
        return alias, nil
4✔
954
}
955

956
// DeleteLightningNode starts a new database transaction to remove a vertex/node
957
// from the database according to the node's public key.
958
func (c *KVStore) DeleteLightningNode(nodePub route.Vertex) error {
3✔
959
        // TODO(roasbeef): ensure dangling edges are removed...
3✔
960
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
6✔
961
                nodes := tx.ReadWriteBucket(nodeBucket)
3✔
962
                if nodes == nil {
3✔
963
                        return ErrGraphNodeNotFound
×
964
                }
×
965

966
                return c.deleteLightningNode(nodes, nodePub[:])
3✔
967
        }, func() {})
3✔
968
}
969

970
// deleteLightningNode uses an existing database transaction to remove a
971
// vertex/node from the database according to the node's public key.
972
func (c *KVStore) deleteLightningNode(nodes kvdb.RwBucket,
973
        compressedPubKey []byte) error {
66✔
974

66✔
975
        aliases := nodes.NestedReadWriteBucket(aliasIndexBucket)
66✔
976
        if aliases == nil {
66✔
977
                return ErrGraphNodesNotFound
×
978
        }
×
979

980
        if err := aliases.Delete(compressedPubKey); err != nil {
66✔
981
                return err
×
982
        }
×
983

984
        // Before we delete the node, we'll fetch its current state so we can
985
        // determine when its last update was to clear out the node update
986
        // index.
987
        node, err := fetchLightningNode(nodes, compressedPubKey)
66✔
988
        if err != nil {
66✔
989
                return err
×
990
        }
×
991

992
        if err := nodes.Delete(compressedPubKey); err != nil {
66✔
993
                return err
×
994
        }
×
995

996
        // Finally, we'll delete the index entry for the node within the
997
        // nodeUpdateIndexBucket as this node is no longer active, so we don't
998
        // need to track its last update.
999
        nodeUpdateIndex := nodes.NestedReadWriteBucket(nodeUpdateIndexBucket)
66✔
1000
        if nodeUpdateIndex == nil {
66✔
1001
                return ErrGraphNodesNotFound
×
1002
        }
×
1003

1004
        // In order to delete the entry, we'll need to reconstruct the key for
1005
        // its last update.
1006
        updateUnix := uint64(node.LastUpdate.Unix())
66✔
1007
        var indexKey [8 + 33]byte
66✔
1008
        byteOrder.PutUint64(indexKey[:8], updateUnix)
66✔
1009
        copy(indexKey[8:], compressedPubKey)
66✔
1010

66✔
1011
        return nodeUpdateIndex.Delete(indexKey[:])
66✔
1012
}
1013

1014
// AddChannelEdge adds a new (undirected, blank) edge to the graph database. An
1015
// undirected edge from the two target nodes are created. The information stored
1016
// denotes the static attributes of the channel, such as the channelID, the keys
1017
// involved in creation of the channel, and the set of features that the channel
1018
// supports. The chanPoint and chanID are used to uniquely identify the edge
1019
// globally within the database.
1020
func (c *KVStore) AddChannelEdge(edge *models.ChannelEdgeInfo,
1021
        op ...batch.SchedulerOption) error {
1,718✔
1022

1,718✔
1023
        var alreadyExists bool
1,718✔
1024
        r := &batch.Request{
1,718✔
1025
                Reset: func() {
3,436✔
1026
                        alreadyExists = false
1,718✔
1027
                },
1,718✔
1028
                Update: func(tx kvdb.RwTx) error {
1,718✔
1029
                        err := c.addChannelEdge(tx, edge)
1,718✔
1030

1,718✔
1031
                        // Silence ErrEdgeAlreadyExist so that the batch can
1,718✔
1032
                        // succeed, but propagate the error via local state.
1,718✔
1033
                        if errors.Is(err, ErrEdgeAlreadyExist) {
1,952✔
1034
                                alreadyExists = true
234✔
1035
                                return nil
234✔
1036
                        }
234✔
1037

1038
                        return err
1,484✔
1039
                },
1040
                OnCommit: func(err error) error {
1,718✔
1041
                        switch {
1,718✔
1042
                        case err != nil:
×
1043
                                return err
×
1044
                        case alreadyExists:
234✔
1045
                                return ErrEdgeAlreadyExist
234✔
1046
                        default:
1,484✔
1047
                                c.rejectCache.remove(edge.ChannelID)
1,484✔
1048
                                c.chanCache.remove(edge.ChannelID)
1,484✔
1049
                                return nil
1,484✔
1050
                        }
1051
                },
1052
        }
1053

1054
        for _, f := range op {
1,721✔
1055
                if f == nil {
3✔
1056
                        return fmt.Errorf("nil scheduler option was used")
×
1057
                }
×
1058

1059
                f(r)
3✔
1060
        }
1061

1062
        return c.chanScheduler.Execute(r)
1,718✔
1063
}
1064

1065
// addChannelEdge is the private form of AddChannelEdge that allows callers to
1066
// utilize an existing db transaction.
1067
func (c *KVStore) addChannelEdge(tx kvdb.RwTx,
1068
        edge *models.ChannelEdgeInfo) error {
1,718✔
1069

1,718✔
1070
        // Construct the channel's primary key which is the 8-byte channel ID.
1,718✔
1071
        var chanKey [8]byte
1,718✔
1072
        binary.BigEndian.PutUint64(chanKey[:], edge.ChannelID)
1,718✔
1073

1,718✔
1074
        nodes, err := tx.CreateTopLevelBucket(nodeBucket)
1,718✔
1075
        if err != nil {
1,718✔
1076
                return err
×
1077
        }
×
1078
        edges, err := tx.CreateTopLevelBucket(edgeBucket)
1,718✔
1079
        if err != nil {
1,718✔
1080
                return err
×
1081
        }
×
1082
        edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
1,718✔
1083
        if err != nil {
1,718✔
1084
                return err
×
1085
        }
×
1086
        chanIndex, err := edges.CreateBucketIfNotExists(channelPointBucket)
1,718✔
1087
        if err != nil {
1,718✔
1088
                return err
×
1089
        }
×
1090

1091
        // First, attempt to check if this edge has already been created. If
1092
        // so, then we can exit early as this method is meant to be idempotent.
1093
        if edgeInfo := edgeIndex.Get(chanKey[:]); edgeInfo != nil {
1,952✔
1094
                return ErrEdgeAlreadyExist
234✔
1095
        }
234✔
1096

1097
        // Before we insert the channel into the database, we'll ensure that
1098
        // both nodes already exist in the channel graph. If either node
1099
        // doesn't, then we'll insert a "shell" node that just includes its
1100
        // public key, so subsequent validation and queries can work properly.
1101
        _, node1Err := fetchLightningNode(nodes, edge.NodeKey1Bytes[:])
1,484✔
1102
        switch {
1,484✔
1103
        case errors.Is(node1Err, ErrGraphNodeNotFound):
21✔
1104
                node1Shell := models.LightningNode{
21✔
1105
                        PubKeyBytes:          edge.NodeKey1Bytes,
21✔
1106
                        HaveNodeAnnouncement: false,
21✔
1107
                }
21✔
1108
                err := addLightningNode(tx, &node1Shell)
21✔
1109
                if err != nil {
21✔
1110
                        return fmt.Errorf("unable to create shell node "+
×
1111
                                "for: %x: %w", edge.NodeKey1Bytes, err)
×
1112
                }
×
1113
        case node1Err != nil:
×
1114
                return node1Err
×
1115
        }
1116

1117
        _, node2Err := fetchLightningNode(nodes, edge.NodeKey2Bytes[:])
1,484✔
1118
        switch {
1,484✔
1119
        case errors.Is(node2Err, ErrGraphNodeNotFound):
55✔
1120
                node2Shell := models.LightningNode{
55✔
1121
                        PubKeyBytes:          edge.NodeKey2Bytes,
55✔
1122
                        HaveNodeAnnouncement: false,
55✔
1123
                }
55✔
1124
                err := addLightningNode(tx, &node2Shell)
55✔
1125
                if err != nil {
55✔
1126
                        return fmt.Errorf("unable to create shell node "+
×
1127
                                "for: %x: %w", edge.NodeKey2Bytes, err)
×
1128
                }
×
1129
        case node2Err != nil:
×
1130
                return node2Err
×
1131
        }
1132

1133
        // If the edge hasn't been created yet, then we'll first add it to the
1134
        // edge index in order to associate the edge between two nodes and also
1135
        // store the static components of the channel.
1136
        if err := putChanEdgeInfo(edgeIndex, edge, chanKey); err != nil {
1,484✔
1137
                return err
×
1138
        }
×
1139

1140
        // Mark edge policies for both sides as unknown. This is to enable
1141
        // efficient incoming channel lookup for a node.
1142
        keys := []*[33]byte{
1,484✔
1143
                &edge.NodeKey1Bytes,
1,484✔
1144
                &edge.NodeKey2Bytes,
1,484✔
1145
        }
1,484✔
1146
        for _, key := range keys {
4,449✔
1147
                err := putChanEdgePolicyUnknown(edges, edge.ChannelID, key[:])
2,965✔
1148
                if err != nil {
2,965✔
1149
                        return err
×
1150
                }
×
1151
        }
1152

1153
        // Finally we add it to the channel index which maps channel points
1154
        // (outpoints) to the shorter channel ID's.
1155
        var b bytes.Buffer
1,484✔
1156
        if err := WriteOutpoint(&b, &edge.ChannelPoint); err != nil {
1,484✔
1157
                return err
×
1158
        }
×
1159

1160
        return chanIndex.Put(b.Bytes(), chanKey[:])
1,484✔
1161
}
1162

1163
// HasChannelEdge returns true if the database knows of a channel edge with the
1164
// passed channel ID, and false otherwise. If an edge with that ID is found
1165
// within the graph, then two time stamps representing the last time the edge
1166
// was updated for both directed edges are returned along with the boolean. If
1167
// it is not found, then the zombie index is checked and its result is returned
1168
// as the second boolean.
1169
func (c *KVStore) HasChannelEdge(
1170
        chanID uint64) (time.Time, time.Time, bool, bool, error) {
218✔
1171

218✔
1172
        var (
218✔
1173
                upd1Time time.Time
218✔
1174
                upd2Time time.Time
218✔
1175
                exists   bool
218✔
1176
                isZombie bool
218✔
1177
        )
218✔
1178

218✔
1179
        // We'll query the cache with the shared lock held to allow multiple
218✔
1180
        // readers to access values in the cache concurrently if they exist.
218✔
1181
        c.cacheMu.RLock()
218✔
1182
        if entry, ok := c.rejectCache.get(chanID); ok {
296✔
1183
                c.cacheMu.RUnlock()
78✔
1184
                upd1Time = time.Unix(entry.upd1Time, 0)
78✔
1185
                upd2Time = time.Unix(entry.upd2Time, 0)
78✔
1186
                exists, isZombie = entry.flags.unpack()
78✔
1187

78✔
1188
                return upd1Time, upd2Time, exists, isZombie, nil
78✔
1189
        }
78✔
1190
        c.cacheMu.RUnlock()
143✔
1191

143✔
1192
        c.cacheMu.Lock()
143✔
1193
        defer c.cacheMu.Unlock()
143✔
1194

143✔
1195
        // The item was not found with the shared lock, so we'll acquire the
143✔
1196
        // exclusive lock and check the cache again in case another method added
143✔
1197
        // the entry to the cache while no lock was held.
143✔
1198
        if entry, ok := c.rejectCache.get(chanID); ok {
150✔
1199
                upd1Time = time.Unix(entry.upd1Time, 0)
7✔
1200
                upd2Time = time.Unix(entry.upd2Time, 0)
7✔
1201
                exists, isZombie = entry.flags.unpack()
7✔
1202

7✔
1203
                return upd1Time, upd2Time, exists, isZombie, nil
7✔
1204
        }
7✔
1205

1206
        if err := kvdb.View(c.db, func(tx kvdb.RTx) error {
278✔
1207
                edges := tx.ReadBucket(edgeBucket)
139✔
1208
                if edges == nil {
139✔
1209
                        return ErrGraphNoEdgesFound
×
1210
                }
×
1211
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
139✔
1212
                if edgeIndex == nil {
139✔
1213
                        return ErrGraphNoEdgesFound
×
1214
                }
×
1215

1216
                var channelID [8]byte
139✔
1217
                byteOrder.PutUint64(channelID[:], chanID)
139✔
1218

139✔
1219
                // If the edge doesn't exist, then we'll also check our zombie
139✔
1220
                // index.
139✔
1221
                if edgeIndex.Get(channelID[:]) == nil {
226✔
1222
                        exists = false
87✔
1223
                        zombieIndex := edges.NestedReadBucket(zombieBucket)
87✔
1224
                        if zombieIndex != nil {
174✔
1225
                                isZombie, _, _ = isZombieEdge(
87✔
1226
                                        zombieIndex, chanID,
87✔
1227
                                )
87✔
1228
                        }
87✔
1229

1230
                        return nil
87✔
1231
                }
1232

1233
                exists = true
55✔
1234
                isZombie = false
55✔
1235

55✔
1236
                // If the channel has been found in the graph, then retrieve
55✔
1237
                // the edges itself so we can return the last updated
55✔
1238
                // timestamps.
55✔
1239
                nodes := tx.ReadBucket(nodeBucket)
55✔
1240
                if nodes == nil {
55✔
1241
                        return ErrGraphNodeNotFound
×
1242
                }
×
1243

1244
                e1, e2, err := fetchChanEdgePolicies(
55✔
1245
                        edgeIndex, edges, channelID[:],
55✔
1246
                )
55✔
1247
                if err != nil {
55✔
1248
                        return err
×
1249
                }
×
1250

1251
                // As we may have only one of the edges populated, only set the
1252
                // update time if the edge was found in the database.
1253
                if e1 != nil {
76✔
1254
                        upd1Time = e1.LastUpdate
21✔
1255
                }
21✔
1256
                if e2 != nil {
74✔
1257
                        upd2Time = e2.LastUpdate
19✔
1258
                }
19✔
1259

1260
                return nil
55✔
1261
        }, func() {}); err != nil {
139✔
1262
                return time.Time{}, time.Time{}, exists, isZombie, err
×
1263
        }
×
1264

1265
        c.rejectCache.insert(chanID, rejectCacheEntry{
139✔
1266
                upd1Time: upd1Time.Unix(),
139✔
1267
                upd2Time: upd2Time.Unix(),
139✔
1268
                flags:    packRejectFlags(exists, isZombie),
139✔
1269
        })
139✔
1270

139✔
1271
        return upd1Time, upd2Time, exists, isZombie, nil
139✔
1272
}
1273

1274
// AddEdgeProof sets the proof of an existing edge in the graph database.
1275
func (c *KVStore) AddEdgeProof(chanID lnwire.ShortChannelID,
1276
        proof *models.ChannelAuthProof) error {
4✔
1277

4✔
1278
        // Construct the channel's primary key which is the 8-byte channel ID.
4✔
1279
        var chanKey [8]byte
4✔
1280
        binary.BigEndian.PutUint64(chanKey[:], chanID.ToUint64())
4✔
1281

4✔
1282
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
8✔
1283
                edges := tx.ReadWriteBucket(edgeBucket)
4✔
1284
                if edges == nil {
4✔
1285
                        return ErrEdgeNotFound
×
1286
                }
×
1287

1288
                edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
4✔
1289
                if edgeIndex == nil {
4✔
1290
                        return ErrEdgeNotFound
×
1291
                }
×
1292

1293
                edge, err := fetchChanEdgeInfo(edgeIndex, chanKey[:])
4✔
1294
                if err != nil {
4✔
1295
                        return err
×
1296
                }
×
1297

1298
                edge.AuthProof = proof
4✔
1299

4✔
1300
                return putChanEdgeInfo(edgeIndex, &edge, chanKey)
4✔
1301
        }, func() {})
4✔
1302
}
1303

1304
const (
1305
        // pruneTipBytes is the total size of the value which stores a prune
1306
        // entry of the graph in the prune log. The "prune tip" is the last
1307
        // entry in the prune log, and indicates if the channel graph is in
1308
        // sync with the current UTXO state. The structure of the value
1309
        // is: blockHash, taking 32 bytes total.
1310
        pruneTipBytes = 32
1311
)
1312

1313
// PruneGraph prunes newly closed channels from the channel graph in response
1314
// to a new block being solved on the network. Any transactions which spend the
1315
// funding output of any known channels within he graph will be deleted.
1316
// Additionally, the "prune tip", or the last block which has been used to
1317
// prune the graph is stored so callers can ensure the graph is fully in sync
1318
// with the current UTXO state. A slice of channels that have been closed by
1319
// the target block along with any pruned nodes are returned if the function
1320
// succeeds without error.
1321
func (c *KVStore) PruneGraph(spentOutputs []*wire.OutPoint,
1322
        blockHash *chainhash.Hash, blockHeight uint32) (
1323
        []*models.ChannelEdgeInfo, []route.Vertex, error) {
242✔
1324

242✔
1325
        c.cacheMu.Lock()
242✔
1326
        defer c.cacheMu.Unlock()
242✔
1327

242✔
1328
        var (
242✔
1329
                chansClosed []*models.ChannelEdgeInfo
242✔
1330
                prunedNodes []route.Vertex
242✔
1331
        )
242✔
1332

242✔
1333
        err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
484✔
1334
                // First grab the edges bucket which houses the information
242✔
1335
                // we'd like to delete
242✔
1336
                edges, err := tx.CreateTopLevelBucket(edgeBucket)
242✔
1337
                if err != nil {
242✔
1338
                        return err
×
1339
                }
×
1340

1341
                // Next grab the two edge indexes which will also need to be
1342
                // updated.
1343
                edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
242✔
1344
                if err != nil {
242✔
1345
                        return err
×
1346
                }
×
1347
                chanIndex, err := edges.CreateBucketIfNotExists(
242✔
1348
                        channelPointBucket,
242✔
1349
                )
242✔
1350
                if err != nil {
242✔
1351
                        return err
×
1352
                }
×
1353
                nodes := tx.ReadWriteBucket(nodeBucket)
242✔
1354
                if nodes == nil {
242✔
1355
                        return ErrSourceNodeNotSet
×
1356
                }
×
1357
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
242✔
1358
                if err != nil {
242✔
1359
                        return err
×
1360
                }
×
1361

1362
                // For each of the outpoints that have been spent within the
1363
                // block, we attempt to delete them from the graph as if that
1364
                // outpoint was a channel, then it has now been closed.
1365
                for _, chanPoint := range spentOutputs {
389✔
1366
                        // TODO(roasbeef): load channel bloom filter, continue
147✔
1367
                        // if NOT if filter
147✔
1368

147✔
1369
                        var opBytes bytes.Buffer
147✔
1370
                        err := WriteOutpoint(&opBytes, chanPoint)
147✔
1371
                        if err != nil {
147✔
1372
                                return err
×
1373
                        }
×
1374

1375
                        // First attempt to see if the channel exists within
1376
                        // the database, if not, then we can exit early.
1377
                        chanID := chanIndex.Get(opBytes.Bytes())
147✔
1378
                        if chanID == nil {
270✔
1379
                                continue
123✔
1380
                        }
1381

1382
                        // Attempt to delete the channel, an ErrEdgeNotFound
1383
                        // will be returned if that outpoint isn't known to be
1384
                        // a channel. If no error is returned, then a channel
1385
                        // was successfully pruned.
1386
                        edgeInfo, err := c.delChannelEdgeUnsafe(
24✔
1387
                                edges, edgeIndex, chanIndex, zombieIndex,
24✔
1388
                                chanID, false, false,
24✔
1389
                        )
24✔
1390
                        if err != nil && !errors.Is(err, ErrEdgeNotFound) {
24✔
1391
                                return err
×
1392
                        }
×
1393

1394
                        chansClosed = append(chansClosed, edgeInfo)
24✔
1395
                }
1396

1397
                metaBucket, err := tx.CreateTopLevelBucket(graphMetaBucket)
242✔
1398
                if err != nil {
242✔
1399
                        return err
×
1400
                }
×
1401

1402
                pruneBucket, err := metaBucket.CreateBucketIfNotExists(
242✔
1403
                        pruneLogBucket,
242✔
1404
                )
242✔
1405
                if err != nil {
242✔
1406
                        return err
×
1407
                }
×
1408

1409
                // With the graph pruned, add a new entry to the prune log,
1410
                // which can be used to check if the graph is fully synced with
1411
                // the current UTXO state.
1412
                var blockHeightBytes [4]byte
242✔
1413
                byteOrder.PutUint32(blockHeightBytes[:], blockHeight)
242✔
1414

242✔
1415
                var newTip [pruneTipBytes]byte
242✔
1416
                copy(newTip[:], blockHash[:])
242✔
1417

242✔
1418
                err = pruneBucket.Put(blockHeightBytes[:], newTip[:])
242✔
1419
                if err != nil {
242✔
1420
                        return err
×
1421
                }
×
1422

1423
                // Now that the graph has been pruned, we'll also attempt to
1424
                // prune any nodes that have had a channel closed within the
1425
                // latest block.
1426
                prunedNodes, err = c.pruneGraphNodes(nodes, edgeIndex)
242✔
1427

242✔
1428
                return err
242✔
1429
        }, func() {
242✔
1430
                chansClosed = nil
242✔
1431
                prunedNodes = nil
242✔
1432
        })
242✔
1433
        if err != nil {
242✔
NEW
1434
                return nil, nil, err
×
1435
        }
×
1436

1437
        for _, channel := range chansClosed {
266✔
1438
                c.rejectCache.remove(channel.ChannelID)
24✔
1439
                c.chanCache.remove(channel.ChannelID)
24✔
1440
        }
24✔
1441

1442
        return chansClosed, prunedNodes, nil
242✔
1443
}
1444

1445
// PruneGraphNodes is a garbage collection method which attempts to prune out
1446
// any nodes from the channel graph that are currently unconnected. This ensure
1447
// that we only maintain a graph of reachable nodes. In the event that a pruned
1448
// node gains more channels, it will be re-added back to the graph.
1449
func (c *KVStore) PruneGraphNodes() ([]route.Vertex, error) {
26✔
1450
        var prunedNodes []route.Vertex
26✔
1451
        err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
52✔
1452
                nodes := tx.ReadWriteBucket(nodeBucket)
26✔
1453
                if nodes == nil {
26✔
1454
                        return ErrGraphNodesNotFound
×
1455
                }
×
1456
                edges := tx.ReadWriteBucket(edgeBucket)
26✔
1457
                if edges == nil {
26✔
1458
                        return ErrGraphNotFound
×
1459
                }
×
1460
                edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
26✔
1461
                if edgeIndex == nil {
26✔
1462
                        return ErrGraphNoEdgesFound
×
1463
                }
×
1464

1465
                var err error
26✔
1466
                prunedNodes, err = c.pruneGraphNodes(nodes, edgeIndex)
26✔
1467
                if err != nil {
26✔
NEW
1468
                        return err
×
NEW
1469
                }
×
1470

1471
                return nil
26✔
1472
        }, func() {
26✔
1473
                prunedNodes = nil
26✔
1474
        })
26✔
1475

1476
        return prunedNodes, err
26✔
1477
}
1478

1479
// pruneGraphNodes attempts to remove any nodes from the graph who have had a
1480
// channel closed within the current block. If the node still has existing
1481
// channels in the graph, this will act as a no-op.
1482
func (c *KVStore) pruneGraphNodes(nodes kvdb.RwBucket,
1483
        edgeIndex kvdb.RwBucket) ([]route.Vertex, error) {
265✔
1484

265✔
1485
        log.Trace("Pruning nodes from graph with no open channels")
265✔
1486

265✔
1487
        // We'll retrieve the graph's source node to ensure we don't remove it
265✔
1488
        // even if it no longer has any open channels.
265✔
1489
        sourceNode, err := c.sourceNode(nodes)
265✔
1490
        if err != nil {
265✔
NEW
1491
                return nil, err
×
1492
        }
×
1493

1494
        // We'll use this map to keep count the number of references to a node
1495
        // in the graph. A node should only be removed once it has no more
1496
        // references in the graph.
1497
        nodeRefCounts := make(map[[33]byte]int)
265✔
1498
        err = nodes.ForEach(func(pubKey, nodeBytes []byte) error {
1,559✔
1499
                // If this is the source key, then we skip this
1,294✔
1500
                // iteration as the value for this key is a pubKey
1,294✔
1501
                // rather than raw node information.
1,294✔
1502
                if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 {
2,083✔
1503
                        return nil
789✔
1504
                }
789✔
1505

1506
                var nodePub [33]byte
508✔
1507
                copy(nodePub[:], pubKey)
508✔
1508
                nodeRefCounts[nodePub] = 0
508✔
1509

508✔
1510
                return nil
508✔
1511
        })
1512
        if err != nil {
265✔
NEW
1513
                return nil, err
×
1514
        }
×
1515

1516
        // To ensure we never delete the source node, we'll start off by
1517
        // bumping its ref count to 1.
1518
        nodeRefCounts[sourceNode.PubKeyBytes] = 1
265✔
1519

265✔
1520
        // Next, we'll run through the edgeIndex which maps a channel ID to the
265✔
1521
        // edge info. We'll use this scan to populate our reference count map
265✔
1522
        // above.
265✔
1523
        err = edgeIndex.ForEach(func(chanID, edgeInfoBytes []byte) error {
480✔
1524
                // The first 66 bytes of the edge info contain the pubkeys of
215✔
1525
                // the nodes that this edge attaches. We'll extract them, and
215✔
1526
                // add them to the ref count map.
215✔
1527
                var node1, node2 [33]byte
215✔
1528
                copy(node1[:], edgeInfoBytes[:33])
215✔
1529
                copy(node2[:], edgeInfoBytes[33:])
215✔
1530

215✔
1531
                // With the nodes extracted, we'll increase the ref count of
215✔
1532
                // each of the nodes.
215✔
1533
                nodeRefCounts[node1]++
215✔
1534
                nodeRefCounts[node2]++
215✔
1535

215✔
1536
                return nil
215✔
1537
        })
215✔
1538
        if err != nil {
265✔
NEW
1539
                return nil, err
×
1540
        }
×
1541

1542
        // Finally, we'll make a second pass over the set of nodes, and delete
1543
        // any nodes that have a ref count of zero.
1544
        var pruned []route.Vertex
265✔
1545
        for nodePubKey, refCount := range nodeRefCounts {
773✔
1546
                // If the ref count of the node isn't zero, then we can safely
508✔
1547
                // skip it as it still has edges to or from it within the
508✔
1548
                // graph.
508✔
1549
                if refCount != 0 {
956✔
1550
                        continue
448✔
1551
                }
1552

1553
                // If we reach this point, then there are no longer any edges
1554
                // that connect this node, so we can delete it.
1555
                err := c.deleteLightningNode(nodes, nodePubKey[:])
63✔
1556
                if err != nil {
63✔
1557
                        if errors.Is(err, ErrGraphNodeNotFound) ||
×
1558
                                errors.Is(err, ErrGraphNodesNotFound) {
×
1559

×
1560
                                log.Warnf("Unable to prune node %x from the "+
×
1561
                                        "graph: %v", nodePubKey, err)
×
1562
                                continue
×
1563
                        }
1564

NEW
1565
                        return nil, err
×
1566
                }
1567

1568
                log.Infof("Pruned unconnected node %x from channel graph",
63✔
1569
                        nodePubKey[:])
63✔
1570

63✔
1571
                pruned = append(pruned, nodePubKey)
63✔
1572
        }
1573

1574
        if len(pruned) > 0 {
312✔
1575
                log.Infof("Pruned %v unconnected nodes from the channel graph",
47✔
1576
                        len(pruned))
47✔
1577
        }
47✔
1578

1579
        return pruned, err
265✔
1580
}
1581

1582
// DisconnectBlockAtHeight is used to indicate that the block specified
1583
// by the passed height has been disconnected from the main chain. This
1584
// will "rewind" the graph back to the height below, deleting channels
1585
// that are no longer confirmed from the graph. The prune log will be
1586
// set to the last prune height valid for the remaining chain.
1587
// Channels that were removed from the graph resulting from the
1588
// disconnected block are returned.
1589
func (c *KVStore) DisconnectBlockAtHeight(height uint32) (
1590
        []*models.ChannelEdgeInfo, error) {
161✔
1591

161✔
1592
        // Every channel having a ShortChannelID starting at 'height'
161✔
1593
        // will no longer be confirmed.
161✔
1594
        startShortChanID := lnwire.ShortChannelID{
161✔
1595
                BlockHeight: height,
161✔
1596
        }
161✔
1597

161✔
1598
        // Delete everything after this height from the db up until the
161✔
1599
        // SCID alias range.
161✔
1600
        endShortChanID := aliasmgr.StartingAlias
161✔
1601

161✔
1602
        // The block height will be the 3 first bytes of the channel IDs.
161✔
1603
        var chanIDStart [8]byte
161✔
1604
        byteOrder.PutUint64(chanIDStart[:], startShortChanID.ToUint64())
161✔
1605
        var chanIDEnd [8]byte
161✔
1606
        byteOrder.PutUint64(chanIDEnd[:], endShortChanID.ToUint64())
161✔
1607

161✔
1608
        c.cacheMu.Lock()
161✔
1609
        defer c.cacheMu.Unlock()
161✔
1610

161✔
1611
        // Keep track of the channels that are removed from the graph.
161✔
1612
        var removedChans []*models.ChannelEdgeInfo
161✔
1613

161✔
1614
        if err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
322✔
1615
                edges, err := tx.CreateTopLevelBucket(edgeBucket)
161✔
1616
                if err != nil {
161✔
1617
                        return err
×
1618
                }
×
1619
                edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
161✔
1620
                if err != nil {
161✔
1621
                        return err
×
1622
                }
×
1623
                chanIndex, err := edges.CreateBucketIfNotExists(
161✔
1624
                        channelPointBucket,
161✔
1625
                )
161✔
1626
                if err != nil {
161✔
1627
                        return err
×
1628
                }
×
1629
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
161✔
1630
                if err != nil {
161✔
1631
                        return err
×
1632
                }
×
1633

1634
                // Scan from chanIDStart to chanIDEnd, deleting every
1635
                // found edge.
1636
                // NOTE: we must delete the edges after the cursor loop, since
1637
                // modifying the bucket while traversing is not safe.
1638
                // NOTE: We use a < comparison in bytes.Compare instead of <=
1639
                // so that the StartingAlias itself isn't deleted.
1640
                var keys [][]byte
161✔
1641
                cursor := edgeIndex.ReadWriteCursor()
161✔
1642

161✔
1643
                //nolint:ll
161✔
1644
                for k, _ := cursor.Seek(chanIDStart[:]); k != nil &&
161✔
1645
                        bytes.Compare(k, chanIDEnd[:]) < 0; k, _ = cursor.Next() {
257✔
1646
                        keys = append(keys, k)
96✔
1647
                }
96✔
1648

1649
                for _, k := range keys {
257✔
1650
                        edgeInfo, err := c.delChannelEdgeUnsafe(
96✔
1651
                                edges, edgeIndex, chanIndex, zombieIndex,
96✔
1652
                                k, false, false,
96✔
1653
                        )
96✔
1654
                        if err != nil && !errors.Is(err, ErrEdgeNotFound) {
96✔
1655
                                return err
×
1656
                        }
×
1657

1658
                        removedChans = append(removedChans, edgeInfo)
96✔
1659
                }
1660

1661
                // Delete all the entries in the prune log having a height
1662
                // greater or equal to the block disconnected.
1663
                metaBucket, err := tx.CreateTopLevelBucket(graphMetaBucket)
161✔
1664
                if err != nil {
161✔
1665
                        return err
×
1666
                }
×
1667

1668
                pruneBucket, err := metaBucket.CreateBucketIfNotExists(
161✔
1669
                        pruneLogBucket,
161✔
1670
                )
161✔
1671
                if err != nil {
161✔
1672
                        return err
×
1673
                }
×
1674

1675
                var pruneKeyStart [4]byte
161✔
1676
                byteOrder.PutUint32(pruneKeyStart[:], height)
161✔
1677

161✔
1678
                var pruneKeyEnd [4]byte
161✔
1679
                byteOrder.PutUint32(pruneKeyEnd[:], math.MaxUint32)
161✔
1680

161✔
1681
                // To avoid modifying the bucket while traversing, we delete
161✔
1682
                // the keys in a second loop.
161✔
1683
                var pruneKeys [][]byte
161✔
1684
                pruneCursor := pruneBucket.ReadWriteCursor()
161✔
1685
                //nolint:ll
161✔
1686
                for k, _ := pruneCursor.Seek(pruneKeyStart[:]); k != nil &&
161✔
1687
                        bytes.Compare(k, pruneKeyEnd[:]) <= 0; k, _ = pruneCursor.Next() {
251✔
1688
                        pruneKeys = append(pruneKeys, k)
90✔
1689
                }
90✔
1690

1691
                for _, k := range pruneKeys {
251✔
1692
                        if err := pruneBucket.Delete(k); err != nil {
90✔
1693
                                return err
×
1694
                        }
×
1695
                }
1696

1697
                return nil
161✔
1698
        }, func() {
161✔
1699
                removedChans = nil
161✔
1700
        }); err != nil {
161✔
1701
                return nil, err
×
1702
        }
×
1703

1704
        for _, channel := range removedChans {
257✔
1705
                c.rejectCache.remove(channel.ChannelID)
96✔
1706
                c.chanCache.remove(channel.ChannelID)
96✔
1707
        }
96✔
1708

1709
        return removedChans, nil
161✔
1710
}
1711

1712
// PruneTip returns the block height and hash of the latest block that has been
1713
// used to prune channels in the graph. Knowing the "prune tip" allows callers
1714
// to tell if the graph is currently in sync with the current best known UTXO
1715
// state.
1716
func (c *KVStore) PruneTip() (*chainhash.Hash, uint32, error) {
56✔
1717
        var (
56✔
1718
                tipHash   chainhash.Hash
56✔
1719
                tipHeight uint32
56✔
1720
        )
56✔
1721

56✔
1722
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
112✔
1723
                graphMeta := tx.ReadBucket(graphMetaBucket)
56✔
1724
                if graphMeta == nil {
56✔
1725
                        return ErrGraphNotFound
×
1726
                }
×
1727
                pruneBucket := graphMeta.NestedReadBucket(pruneLogBucket)
56✔
1728
                if pruneBucket == nil {
56✔
1729
                        return ErrGraphNeverPruned
×
1730
                }
×
1731

1732
                pruneCursor := pruneBucket.ReadCursor()
56✔
1733

56✔
1734
                // The prune key with the largest block height will be our
56✔
1735
                // prune tip.
56✔
1736
                k, v := pruneCursor.Last()
56✔
1737
                if k == nil {
77✔
1738
                        return ErrGraphNeverPruned
21✔
1739
                }
21✔
1740

1741
                // Once we have the prune tip, the value will be the block hash,
1742
                // and the key the block height.
1743
                copy(tipHash[:], v)
38✔
1744
                tipHeight = byteOrder.Uint32(k)
38✔
1745

38✔
1746
                return nil
38✔
1747
        }, func() {})
56✔
1748
        if err != nil {
77✔
1749
                return nil, 0, err
21✔
1750
        }
21✔
1751

1752
        return &tipHash, tipHeight, nil
38✔
1753
}
1754

1755
// DeleteChannelEdges removes edges with the given channel IDs from the
1756
// database and marks them as zombies. This ensures that we're unable to re-add
1757
// it to our database once again. If an edge does not exist within the
1758
// database, then ErrEdgeNotFound will be returned. If strictZombiePruning is
1759
// true, then when we mark these edges as zombies, we'll set up the keys such
1760
// that we require the node that failed to send the fresh update to be the one
1761
// that resurrects the channel from its zombie state. The markZombie bool
1762
// denotes whether or not to mark the channel as a zombie.
1763
func (c *KVStore) DeleteChannelEdges(strictZombiePruning, markZombie bool,
1764
        chanIDs ...uint64) ([]*models.ChannelEdgeInfo, error) {
149✔
1765

149✔
1766
        // TODO(roasbeef): possibly delete from node bucket if node has no more
149✔
1767
        // channels
149✔
1768
        // TODO(roasbeef): don't delete both edges?
149✔
1769

149✔
1770
        c.cacheMu.Lock()
149✔
1771
        defer c.cacheMu.Unlock()
149✔
1772

149✔
1773
        var infos []*models.ChannelEdgeInfo
149✔
1774
        err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
298✔
1775
                edges := tx.ReadWriteBucket(edgeBucket)
149✔
1776
                if edges == nil {
149✔
1777
                        return ErrEdgeNotFound
×
1778
                }
×
1779
                edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
149✔
1780
                if edgeIndex == nil {
149✔
1781
                        return ErrEdgeNotFound
×
1782
                }
×
1783
                chanIndex := edges.NestedReadWriteBucket(channelPointBucket)
149✔
1784
                if chanIndex == nil {
149✔
1785
                        return ErrEdgeNotFound
×
1786
                }
×
1787
                nodes := tx.ReadWriteBucket(nodeBucket)
149✔
1788
                if nodes == nil {
149✔
1789
                        return ErrGraphNodeNotFound
×
1790
                }
×
1791
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
149✔
1792
                if err != nil {
149✔
1793
                        return err
×
1794
                }
×
1795

1796
                var rawChanID [8]byte
149✔
1797
                for _, chanID := range chanIDs {
238✔
1798
                        byteOrder.PutUint64(rawChanID[:], chanID)
89✔
1799
                        edgeInfo, err := c.delChannelEdgeUnsafe(
89✔
1800
                                edges, edgeIndex, chanIndex, zombieIndex,
89✔
1801
                                rawChanID[:], markZombie, strictZombiePruning,
89✔
1802
                        )
89✔
1803
                        if err != nil {
150✔
1804
                                return err
61✔
1805
                        }
61✔
1806

1807
                        infos = append(infos, edgeInfo)
28✔
1808
                }
1809

1810
                return nil
88✔
1811
        }, func() {
149✔
1812
                infos = nil
149✔
1813
        })
149✔
1814
        if err != nil {
210✔
1815
                return nil, err
61✔
1816
        }
61✔
1817

1818
        for _, chanID := range chanIDs {
116✔
1819
                c.rejectCache.remove(chanID)
28✔
1820
                c.chanCache.remove(chanID)
28✔
1821
        }
28✔
1822

1823
        return infos, nil
88✔
1824
}
1825

1826
// ChannelID attempt to lookup the 8-byte compact channel ID which maps to the
1827
// passed channel point (outpoint). If the passed channel doesn't exist within
1828
// the database, then ErrEdgeNotFound is returned.
1829
func (c *KVStore) ChannelID(chanPoint *wire.OutPoint) (uint64, error) {
4✔
1830
        var chanID uint64
4✔
1831
        if err := kvdb.View(c.db, func(tx kvdb.RTx) error {
8✔
1832
                var err error
4✔
1833
                chanID, err = getChanID(tx, chanPoint)
4✔
1834
                return err
4✔
1835
        }, func() {
8✔
1836
                chanID = 0
4✔
1837
        }); err != nil {
7✔
1838
                return 0, err
3✔
1839
        }
3✔
1840

1841
        return chanID, nil
4✔
1842
}
1843

1844
// getChanID returns the assigned channel ID for a given channel point.
1845
func getChanID(tx kvdb.RTx, chanPoint *wire.OutPoint) (uint64, error) {
4✔
1846
        var b bytes.Buffer
4✔
1847
        if err := WriteOutpoint(&b, chanPoint); err != nil {
4✔
1848
                return 0, err
×
1849
        }
×
1850

1851
        edges := tx.ReadBucket(edgeBucket)
4✔
1852
        if edges == nil {
4✔
1853
                return 0, ErrGraphNoEdgesFound
×
1854
        }
×
1855
        chanIndex := edges.NestedReadBucket(channelPointBucket)
4✔
1856
        if chanIndex == nil {
4✔
1857
                return 0, ErrGraphNoEdgesFound
×
1858
        }
×
1859

1860
        chanIDBytes := chanIndex.Get(b.Bytes())
4✔
1861
        if chanIDBytes == nil {
7✔
1862
                return 0, ErrEdgeNotFound
3✔
1863
        }
3✔
1864

1865
        chanID := byteOrder.Uint64(chanIDBytes)
4✔
1866

4✔
1867
        return chanID, nil
4✔
1868
}
1869

1870
// TODO(roasbeef): allow updates to use Batch?
1871

1872
// HighestChanID returns the "highest" known channel ID in the channel graph.
1873
// This represents the "newest" channel from the PoV of the chain. This method
1874
// can be used by peers to quickly determine if they're graphs are in sync.
1875
func (c *KVStore) HighestChanID() (uint64, error) {
6✔
1876
        var cid uint64
6✔
1877

6✔
1878
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
12✔
1879
                edges := tx.ReadBucket(edgeBucket)
6✔
1880
                if edges == nil {
6✔
1881
                        return ErrGraphNoEdgesFound
×
1882
                }
×
1883
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
6✔
1884
                if edgeIndex == nil {
6✔
1885
                        return ErrGraphNoEdgesFound
×
1886
                }
×
1887

1888
                // In order to find the highest chan ID, we'll fetch a cursor
1889
                // and use that to seek to the "end" of our known rage.
1890
                cidCursor := edgeIndex.ReadCursor()
6✔
1891

6✔
1892
                lastChanID, _ := cidCursor.Last()
6✔
1893

6✔
1894
                // If there's no key, then this means that we don't actually
6✔
1895
                // know of any channels, so we'll return a predicable error.
6✔
1896
                if lastChanID == nil {
10✔
1897
                        return ErrGraphNoEdgesFound
4✔
1898
                }
4✔
1899

1900
                // Otherwise, we'll de serialize the channel ID and return it
1901
                // to the caller.
1902
                cid = byteOrder.Uint64(lastChanID)
5✔
1903

5✔
1904
                return nil
5✔
1905
        }, func() {
6✔
1906
                cid = 0
6✔
1907
        })
6✔
1908
        if err != nil && !errors.Is(err, ErrGraphNoEdgesFound) {
6✔
1909
                return 0, err
×
1910
        }
×
1911

1912
        return cid, nil
6✔
1913
}
1914

1915
// ChannelEdge represents the complete set of information for a channel edge in
1916
// the known channel graph. This struct couples the core information of the
1917
// edge as well as each of the known advertised edge policies.
1918
type ChannelEdge struct {
1919
        // Info contains all the static information describing the channel.
1920
        Info *models.ChannelEdgeInfo
1921

1922
        // Policy1 points to the "first" edge policy of the channel containing
1923
        // the dynamic information required to properly route through the edge.
1924
        Policy1 *models.ChannelEdgePolicy
1925

1926
        // Policy2 points to the "second" edge policy of the channel containing
1927
        // the dynamic information required to properly route through the edge.
1928
        Policy2 *models.ChannelEdgePolicy
1929

1930
        // Node1 is "node 1" in the channel. This is the node that would have
1931
        // produced Policy1 if it exists.
1932
        Node1 *models.LightningNode
1933

1934
        // Node2 is "node 2" in the channel. This is the node that would have
1935
        // produced Policy2 if it exists.
1936
        Node2 *models.LightningNode
1937
}
1938

1939
// ChanUpdatesInHorizon returns all the known channel edges which have at least
1940
// one edge that has an update timestamp within the specified horizon.
1941
func (c *KVStore) ChanUpdatesInHorizon(startTime,
1942
        endTime time.Time) ([]ChannelEdge, error) {
140✔
1943

140✔
1944
        // To ensure we don't return duplicate ChannelEdges, we'll use an
140✔
1945
        // additional map to keep track of the edges already seen to prevent
140✔
1946
        // re-adding it.
140✔
1947
        var edgesSeen map[uint64]struct{}
140✔
1948
        var edgesToCache map[uint64]ChannelEdge
140✔
1949
        var edgesInHorizon []ChannelEdge
140✔
1950

140✔
1951
        c.cacheMu.Lock()
140✔
1952
        defer c.cacheMu.Unlock()
140✔
1953

140✔
1954
        var hits int
140✔
1955
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
280✔
1956
                edges := tx.ReadBucket(edgeBucket)
140✔
1957
                if edges == nil {
140✔
1958
                        return ErrGraphNoEdgesFound
×
1959
                }
×
1960
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
140✔
1961
                if edgeIndex == nil {
140✔
1962
                        return ErrGraphNoEdgesFound
×
1963
                }
×
1964
                edgeUpdateIndex := edges.NestedReadBucket(edgeUpdateIndexBucket)
140✔
1965
                if edgeUpdateIndex == nil {
140✔
1966
                        return ErrGraphNoEdgesFound
×
1967
                }
×
1968

1969
                nodes := tx.ReadBucket(nodeBucket)
140✔
1970
                if nodes == nil {
140✔
1971
                        return ErrGraphNodesNotFound
×
1972
                }
×
1973

1974
                // We'll now obtain a cursor to perform a range query within
1975
                // the index to find all channels within the horizon.
1976
                updateCursor := edgeUpdateIndex.ReadCursor()
140✔
1977

140✔
1978
                var startTimeBytes, endTimeBytes [8 + 8]byte
140✔
1979
                byteOrder.PutUint64(
140✔
1980
                        startTimeBytes[:8], uint64(startTime.Unix()),
140✔
1981
                )
140✔
1982
                byteOrder.PutUint64(
140✔
1983
                        endTimeBytes[:8], uint64(endTime.Unix()),
140✔
1984
                )
140✔
1985

140✔
1986
                // With our start and end times constructed, we'll step through
140✔
1987
                // the index collecting the info and policy of each update of
140✔
1988
                // each channel that has a last update within the time range.
140✔
1989
                //
140✔
1990
                //nolint:ll
140✔
1991
                for indexKey, _ := updateCursor.Seek(startTimeBytes[:]); indexKey != nil &&
140✔
1992
                        bytes.Compare(indexKey, endTimeBytes[:]) <= 0; indexKey, _ = updateCursor.Next() {
189✔
1993
                        // We have a new eligible entry, so we'll slice of the
49✔
1994
                        // chan ID so we can query it in the DB.
49✔
1995
                        chanID := indexKey[8:]
49✔
1996

49✔
1997
                        // If we've already retrieved the info and policies for
49✔
1998
                        // this edge, then we can skip it as we don't need to do
49✔
1999
                        // so again.
49✔
2000
                        chanIDInt := byteOrder.Uint64(chanID)
49✔
2001
                        if _, ok := edgesSeen[chanIDInt]; ok {
68✔
2002
                                continue
19✔
2003
                        }
2004

2005
                        if channel, ok := c.chanCache.get(chanIDInt); ok {
41✔
2006
                                hits++
11✔
2007
                                edgesSeen[chanIDInt] = struct{}{}
11✔
2008
                                edgesInHorizon = append(edgesInHorizon, channel)
11✔
2009

11✔
2010
                                continue
11✔
2011
                        }
2012

2013
                        // First, we'll fetch the static edge information.
2014
                        edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
21✔
2015
                        if err != nil {
21✔
2016
                                chanID := byteOrder.Uint64(chanID)
×
2017
                                return fmt.Errorf("unable to fetch info for "+
×
2018
                                        "edge with chan_id=%v: %v", chanID, err)
×
2019
                        }
×
2020

2021
                        // With the static information obtained, we'll now
2022
                        // fetch the dynamic policy info.
2023
                        edge1, edge2, err := fetchChanEdgePolicies(
21✔
2024
                                edgeIndex, edges, chanID,
21✔
2025
                        )
21✔
2026
                        if err != nil {
21✔
2027
                                chanID := byteOrder.Uint64(chanID)
×
2028
                                return fmt.Errorf("unable to fetch policies "+
×
2029
                                        "for edge with chan_id=%v: %v", chanID,
×
2030
                                        err)
×
2031
                        }
×
2032

2033
                        node1, err := fetchLightningNode(
21✔
2034
                                nodes, edgeInfo.NodeKey1Bytes[:],
21✔
2035
                        )
21✔
2036
                        if err != nil {
21✔
2037
                                return err
×
2038
                        }
×
2039

2040
                        node2, err := fetchLightningNode(
21✔
2041
                                nodes, edgeInfo.NodeKey2Bytes[:],
21✔
2042
                        )
21✔
2043
                        if err != nil {
21✔
2044
                                return err
×
2045
                        }
×
2046

2047
                        // Finally, we'll collate this edge with the rest of
2048
                        // edges to be returned.
2049
                        edgesSeen[chanIDInt] = struct{}{}
21✔
2050
                        channel := ChannelEdge{
21✔
2051
                                Info:    &edgeInfo,
21✔
2052
                                Policy1: edge1,
21✔
2053
                                Policy2: edge2,
21✔
2054
                                Node1:   &node1,
21✔
2055
                                Node2:   &node2,
21✔
2056
                        }
21✔
2057
                        edgesInHorizon = append(edgesInHorizon, channel)
21✔
2058
                        edgesToCache[chanIDInt] = channel
21✔
2059
                }
2060

2061
                return nil
140✔
2062
        }, func() {
140✔
2063
                edgesSeen = make(map[uint64]struct{})
140✔
2064
                edgesToCache = make(map[uint64]ChannelEdge)
140✔
2065
                edgesInHorizon = nil
140✔
2066
        })
140✔
2067
        switch {
140✔
2068
        case errors.Is(err, ErrGraphNoEdgesFound):
×
2069
                fallthrough
×
2070
        case errors.Is(err, ErrGraphNodesNotFound):
×
2071
                break
×
2072

2073
        case err != nil:
×
2074
                return nil, err
×
2075
        }
2076

2077
        // Insert any edges loaded from disk into the cache.
2078
        for chanid, channel := range edgesToCache {
161✔
2079
                c.chanCache.insert(chanid, channel)
21✔
2080
        }
21✔
2081

2082
        log.Debugf("ChanUpdatesInHorizon hit percentage: %f (%d/%d)",
140✔
2083
                float64(hits)/float64(len(edgesInHorizon)), hits,
140✔
2084
                len(edgesInHorizon))
140✔
2085

140✔
2086
        return edgesInHorizon, nil
140✔
2087
}
2088

2089
// NodeUpdatesInHorizon returns all the known lightning node which have an
2090
// update timestamp within the passed range. This method can be used by two
2091
// nodes to quickly determine if they have the same set of up to date node
2092
// announcements.
2093
func (c *KVStore) NodeUpdatesInHorizon(startTime,
2094
        endTime time.Time) ([]models.LightningNode, error) {
11✔
2095

11✔
2096
        var nodesInHorizon []models.LightningNode
11✔
2097

11✔
2098
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
22✔
2099
                nodes := tx.ReadBucket(nodeBucket)
11✔
2100
                if nodes == nil {
11✔
2101
                        return ErrGraphNodesNotFound
×
2102
                }
×
2103

2104
                nodeUpdateIndex := nodes.NestedReadBucket(nodeUpdateIndexBucket)
11✔
2105
                if nodeUpdateIndex == nil {
11✔
2106
                        return ErrGraphNodesNotFound
×
2107
                }
×
2108

2109
                // We'll now obtain a cursor to perform a range query within
2110
                // the index to find all node announcements within the horizon.
2111
                updateCursor := nodeUpdateIndex.ReadCursor()
11✔
2112

11✔
2113
                var startTimeBytes, endTimeBytes [8 + 33]byte
11✔
2114
                byteOrder.PutUint64(
11✔
2115
                        startTimeBytes[:8], uint64(startTime.Unix()),
11✔
2116
                )
11✔
2117
                byteOrder.PutUint64(
11✔
2118
                        endTimeBytes[:8], uint64(endTime.Unix()),
11✔
2119
                )
11✔
2120

11✔
2121
                // With our start and end times constructed, we'll step through
11✔
2122
                // the index collecting info for each node within the time
11✔
2123
                // range.
11✔
2124
                //
11✔
2125
                //nolint:ll
11✔
2126
                for indexKey, _ := updateCursor.Seek(startTimeBytes[:]); indexKey != nil &&
11✔
2127
                        bytes.Compare(indexKey, endTimeBytes[:]) <= 0; indexKey, _ = updateCursor.Next() {
43✔
2128
                        nodePub := indexKey[8:]
32✔
2129
                        node, err := fetchLightningNode(nodes, nodePub)
32✔
2130
                        if err != nil {
32✔
2131
                                return err
×
2132
                        }
×
2133

2134
                        nodesInHorizon = append(nodesInHorizon, node)
32✔
2135
                }
2136

2137
                return nil
11✔
2138
        }, func() {
11✔
2139
                nodesInHorizon = nil
11✔
2140
        })
11✔
2141
        switch {
11✔
2142
        case errors.Is(err, ErrGraphNoEdgesFound):
×
2143
                fallthrough
×
2144
        case errors.Is(err, ErrGraphNodesNotFound):
×
2145
                break
×
2146

2147
        case err != nil:
×
2148
                return nil, err
×
2149
        }
2150

2151
        return nodesInHorizon, nil
11✔
2152
}
2153

2154
// FilterKnownChanIDs takes a set of channel IDs and return the subset of chan
2155
// ID's that we don't know and are not known zombies of the passed set. In other
2156
// words, we perform a set difference of our set of chan ID's and the ones
2157
// passed in. This method can be used by callers to determine the set of
2158
// channels another peer knows of that we don't.
2159
func (c *KVStore) FilterKnownChanIDs(chansInfo []ChannelUpdateInfo,
2160
        isZombieChan func(time.Time, time.Time) bool) ([]uint64, error) {
125✔
2161

125✔
2162
        var newChanIDs []uint64
125✔
2163

125✔
2164
        c.cacheMu.Lock()
125✔
2165
        defer c.cacheMu.Unlock()
125✔
2166

125✔
2167
        err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
250✔
2168
                edges := tx.ReadBucket(edgeBucket)
125✔
2169
                if edges == nil {
125✔
2170
                        return ErrGraphNoEdgesFound
×
2171
                }
×
2172
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
125✔
2173
                if edgeIndex == nil {
125✔
2174
                        return ErrGraphNoEdgesFound
×
2175
                }
×
2176

2177
                // Fetch the zombie index, it may not exist if no edges have
2178
                // ever been marked as zombies. If the index has been
2179
                // initialized, we will use it later to skip known zombie edges.
2180
                zombieIndex := edges.NestedReadBucket(zombieBucket)
125✔
2181

125✔
2182
                // We'll run through the set of chanIDs and collate only the
125✔
2183
                // set of channel that are unable to be found within our db.
125✔
2184
                var cidBytes [8]byte
125✔
2185
                for _, info := range chansInfo {
236✔
2186
                        scid := info.ShortChannelID.ToUint64()
111✔
2187
                        byteOrder.PutUint64(cidBytes[:], scid)
111✔
2188

111✔
2189
                        // If the edge is already known, skip it.
111✔
2190
                        if v := edgeIndex.Get(cidBytes[:]); v != nil {
134✔
2191
                                continue
23✔
2192
                        }
2193

2194
                        // If the edge is a known zombie, skip it.
2195
                        if zombieIndex != nil {
182✔
2196
                                isZombie, _, _ := isZombieEdge(
91✔
2197
                                        zombieIndex, scid,
91✔
2198
                                )
91✔
2199

91✔
2200
                                // TODO(ziggie): Make sure that for the strict
91✔
2201
                                // pruning case we compare the pubkeys and
91✔
2202
                                // whether the right timestamp is not older than
91✔
2203
                                // the `ChannelPruneExpiry`.
91✔
2204
                                //
91✔
2205
                                // NOTE: The timestamp data has no verification
91✔
2206
                                // attached to it in the `ReplyChannelRange` msg
91✔
2207
                                // so we are trusting this data at this point.
91✔
2208
                                // However it is not critical because we are
91✔
2209
                                // just removing the channel from the db when
91✔
2210
                                // the timestamps are more recent. During the
91✔
2211
                                // querying of the gossip msg verification
91✔
2212
                                // happens as usual.
91✔
2213
                                // However we should start punishing peers when
91✔
2214
                                // they don't provide us honest data ?
91✔
2215
                                isStillZombie := isZombieChan(
91✔
2216
                                        info.Node1UpdateTimestamp,
91✔
2217
                                        info.Node2UpdateTimestamp,
91✔
2218
                                )
91✔
2219

91✔
2220
                                switch {
91✔
2221
                                // If the edge is a known zombie and if we
2222
                                // would still consider it a zombie given the
2223
                                // latest update timestamps, then we skip this
2224
                                // channel.
2225
                                case isZombie && isStillZombie:
24✔
2226
                                        continue
24✔
2227

2228
                                // Otherwise, if we have marked it as a zombie
2229
                                // but the latest update timestamps could bring
2230
                                // it back from the dead, then we mark it alive,
2231
                                // and we let it be added to the set of IDs to
2232
                                // query our peer for.
2233
                                case isZombie && !isStillZombie:
15✔
2234
                                        err := c.markEdgeLiveUnsafe(tx, scid)
15✔
2235
                                        if err != nil {
15✔
2236
                                                return err
×
2237
                                        }
×
2238
                                }
2239
                        }
2240

2241
                        newChanIDs = append(newChanIDs, scid)
67✔
2242
                }
2243

2244
                return nil
125✔
2245
        }, func() {
125✔
2246
                newChanIDs = nil
125✔
2247
        })
125✔
2248
        switch {
125✔
2249
        // If we don't know of any edges yet, then we'll return the entire set
2250
        // of chan IDs specified.
2251
        case errors.Is(err, ErrGraphNoEdgesFound):
×
2252
                ogChanIDs := make([]uint64, len(chansInfo))
×
2253
                for i, info := range chansInfo {
×
2254
                        ogChanIDs[i] = info.ShortChannelID.ToUint64()
×
2255
                }
×
2256

2257
                return ogChanIDs, nil
×
2258

2259
        case err != nil:
×
2260
                return nil, err
×
2261
        }
2262

2263
        return newChanIDs, nil
125✔
2264
}
2265

2266
// ChannelUpdateInfo couples the SCID of a channel with the timestamps of the
2267
// latest received channel updates for the channel.
2268
type ChannelUpdateInfo struct {
2269
        // ShortChannelID is the SCID identifier of the channel.
2270
        ShortChannelID lnwire.ShortChannelID
2271

2272
        // Node1UpdateTimestamp is the timestamp of the latest received update
2273
        // from the node 1 channel peer. This will be set to zero time if no
2274
        // update has yet been received from this node.
2275
        Node1UpdateTimestamp time.Time
2276

2277
        // Node2UpdateTimestamp is the timestamp of the latest received update
2278
        // from the node 2 channel peer. This will be set to zero time if no
2279
        // update has yet been received from this node.
2280
        Node2UpdateTimestamp time.Time
2281
}
2282

2283
// NewChannelUpdateInfo is a constructor which makes sure we initialize the
2284
// timestamps with zero seconds unix timestamp which equals
2285
// `January 1, 1970, 00:00:00 UTC` in case the value is `time.Time{}`.
2286
func NewChannelUpdateInfo(scid lnwire.ShortChannelID, node1Timestamp,
2287
        node2Timestamp time.Time) ChannelUpdateInfo {
221✔
2288

221✔
2289
        chanInfo := ChannelUpdateInfo{
221✔
2290
                ShortChannelID:       scid,
221✔
2291
                Node1UpdateTimestamp: node1Timestamp,
221✔
2292
                Node2UpdateTimestamp: node2Timestamp,
221✔
2293
        }
221✔
2294

221✔
2295
        if node1Timestamp.IsZero() {
432✔
2296
                chanInfo.Node1UpdateTimestamp = time.Unix(0, 0)
211✔
2297
        }
211✔
2298

2299
        if node2Timestamp.IsZero() {
432✔
2300
                chanInfo.Node2UpdateTimestamp = time.Unix(0, 0)
211✔
2301
        }
211✔
2302

2303
        return chanInfo
221✔
2304
}
2305

2306
// BlockChannelRange represents a range of channels for a given block height.
2307
type BlockChannelRange struct {
2308
        // Height is the height of the block all of the channels below were
2309
        // included in.
2310
        Height uint32
2311

2312
        // Channels is the list of channels identified by their short ID
2313
        // representation known to us that were included in the block height
2314
        // above. The list may include channel update timestamp information if
2315
        // requested.
2316
        Channels []ChannelUpdateInfo
2317
}
2318

2319
// FilterChannelRange returns the channel ID's of all known channels which were
2320
// mined in a block height within the passed range. The channel IDs are grouped
2321
// by their common block height. This method can be used to quickly share with a
2322
// peer the set of channels we know of within a particular range to catch them
2323
// up after a period of time offline. If withTimestamps is true then the
2324
// timestamp info of the latest received channel update messages of the channel
2325
// will be included in the response.
2326
func (c *KVStore) FilterChannelRange(startHeight,
2327
        endHeight uint32, withTimestamps bool) ([]BlockChannelRange, error) {
14✔
2328

14✔
2329
        startChanID := &lnwire.ShortChannelID{
14✔
2330
                BlockHeight: startHeight,
14✔
2331
        }
14✔
2332

14✔
2333
        endChanID := lnwire.ShortChannelID{
14✔
2334
                BlockHeight: endHeight,
14✔
2335
                TxIndex:     math.MaxUint32 & 0x00ffffff,
14✔
2336
                TxPosition:  math.MaxUint16,
14✔
2337
        }
14✔
2338

14✔
2339
        // As we need to perform a range scan, we'll convert the starting and
14✔
2340
        // ending height to their corresponding values when encoded using short
14✔
2341
        // channel ID's.
14✔
2342
        var chanIDStart, chanIDEnd [8]byte
14✔
2343
        byteOrder.PutUint64(chanIDStart[:], startChanID.ToUint64())
14✔
2344
        byteOrder.PutUint64(chanIDEnd[:], endChanID.ToUint64())
14✔
2345

14✔
2346
        var channelsPerBlock map[uint32][]ChannelUpdateInfo
14✔
2347
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
28✔
2348
                edges := tx.ReadBucket(edgeBucket)
14✔
2349
                if edges == nil {
14✔
2350
                        return ErrGraphNoEdgesFound
×
2351
                }
×
2352
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
14✔
2353
                if edgeIndex == nil {
14✔
2354
                        return ErrGraphNoEdgesFound
×
2355
                }
×
2356

2357
                cursor := edgeIndex.ReadCursor()
14✔
2358

14✔
2359
                // We'll now iterate through the database, and find each
14✔
2360
                // channel ID that resides within the specified range.
14✔
2361
                //
14✔
2362
                //nolint:ll
14✔
2363
                for k, v := cursor.Seek(chanIDStart[:]); k != nil &&
14✔
2364
                        bytes.Compare(k, chanIDEnd[:]) <= 0; k, v = cursor.Next() {
61✔
2365
                        // Don't send alias SCIDs during gossip sync.
47✔
2366
                        edgeReader := bytes.NewReader(v)
47✔
2367
                        edgeInfo, err := deserializeChanEdgeInfo(edgeReader)
47✔
2368
                        if err != nil {
47✔
2369
                                return err
×
2370
                        }
×
2371

2372
                        if edgeInfo.AuthProof == nil {
50✔
2373
                                continue
3✔
2374
                        }
2375

2376
                        // This channel ID rests within the target range, so
2377
                        // we'll add it to our returned set.
2378
                        rawCid := byteOrder.Uint64(k)
47✔
2379
                        cid := lnwire.NewShortChanIDFromInt(rawCid)
47✔
2380

47✔
2381
                        chanInfo := NewChannelUpdateInfo(
47✔
2382
                                cid, time.Time{}, time.Time{},
47✔
2383
                        )
47✔
2384

47✔
2385
                        if !withTimestamps {
69✔
2386
                                channelsPerBlock[cid.BlockHeight] = append(
22✔
2387
                                        channelsPerBlock[cid.BlockHeight],
22✔
2388
                                        chanInfo,
22✔
2389
                                )
22✔
2390

22✔
2391
                                continue
22✔
2392
                        }
2393

2394
                        node1Key, node2Key := computeEdgePolicyKeys(&edgeInfo)
25✔
2395

25✔
2396
                        rawPolicy := edges.Get(node1Key)
25✔
2397
                        if len(rawPolicy) != 0 {
34✔
2398
                                r := bytes.NewReader(rawPolicy)
9✔
2399

9✔
2400
                                edge, err := deserializeChanEdgePolicyRaw(r)
9✔
2401
                                if err != nil && !errors.Is(
9✔
2402
                                        err, ErrEdgePolicyOptionalFieldNotFound,
9✔
2403
                                ) {
9✔
2404

×
2405
                                        return err
×
2406
                                }
×
2407

2408
                                chanInfo.Node1UpdateTimestamp = edge.LastUpdate
9✔
2409
                        }
2410

2411
                        rawPolicy = edges.Get(node2Key)
25✔
2412
                        if len(rawPolicy) != 0 {
39✔
2413
                                r := bytes.NewReader(rawPolicy)
14✔
2414

14✔
2415
                                edge, err := deserializeChanEdgePolicyRaw(r)
14✔
2416
                                if err != nil && !errors.Is(
14✔
2417
                                        err, ErrEdgePolicyOptionalFieldNotFound,
14✔
2418
                                ) {
14✔
2419

×
2420
                                        return err
×
2421
                                }
×
2422

2423
                                chanInfo.Node2UpdateTimestamp = edge.LastUpdate
14✔
2424
                        }
2425

2426
                        channelsPerBlock[cid.BlockHeight] = append(
25✔
2427
                                channelsPerBlock[cid.BlockHeight], chanInfo,
25✔
2428
                        )
25✔
2429
                }
2430

2431
                return nil
14✔
2432
        }, func() {
14✔
2433
                channelsPerBlock = make(map[uint32][]ChannelUpdateInfo)
14✔
2434
        })
14✔
2435

2436
        switch {
14✔
2437
        // If we don't know of any channels yet, then there's nothing to
2438
        // filter, so we'll return an empty slice.
2439
        case errors.Is(err, ErrGraphNoEdgesFound) || len(channelsPerBlock) == 0:
6✔
2440
                return nil, nil
6✔
2441

2442
        case err != nil:
×
2443
                return nil, err
×
2444
        }
2445

2446
        // Return the channel ranges in ascending block height order.
2447
        blocks := make([]uint32, 0, len(channelsPerBlock))
11✔
2448
        for block := range channelsPerBlock {
36✔
2449
                blocks = append(blocks, block)
25✔
2450
        }
25✔
2451
        sort.Slice(blocks, func(i, j int) bool {
33✔
2452
                return blocks[i] < blocks[j]
22✔
2453
        })
22✔
2454

2455
        channelRanges := make([]BlockChannelRange, 0, len(channelsPerBlock))
11✔
2456
        for _, block := range blocks {
36✔
2457
                channelRanges = append(channelRanges, BlockChannelRange{
25✔
2458
                        Height:   block,
25✔
2459
                        Channels: channelsPerBlock[block],
25✔
2460
                })
25✔
2461
        }
25✔
2462

2463
        return channelRanges, nil
11✔
2464
}
2465

2466
// FetchChanInfos returns the set of channel edges that correspond to the passed
2467
// channel ID's. If an edge is the query is unknown to the database, it will
2468
// skipped and the result will contain only those edges that exist at the time
2469
// of the query. This can be used to respond to peer queries that are seeking to
2470
// fill in gaps in their view of the channel graph.
2471
func (c *KVStore) FetchChanInfos(chanIDs []uint64) ([]ChannelEdge, error) {
7✔
2472
        return c.fetchChanInfos(nil, chanIDs)
7✔
2473
}
7✔
2474

2475
// fetchChanInfos returns the set of channel edges that correspond to the passed
2476
// channel ID's. If an edge is the query is unknown to the database, it will
2477
// skipped and the result will contain only those edges that exist at the time
2478
// of the query. This can be used to respond to peer queries that are seeking to
2479
// fill in gaps in their view of the channel graph.
2480
//
2481
// NOTE: An optional transaction may be provided. If none is provided, then a
2482
// new one will be created.
2483
func (c *KVStore) fetchChanInfos(tx kvdb.RTx, chanIDs []uint64) (
2484
        []ChannelEdge, error) {
7✔
2485
        // TODO(roasbeef): sort cids?
7✔
2486

7✔
2487
        var (
7✔
2488
                chanEdges []ChannelEdge
7✔
2489
                cidBytes  [8]byte
7✔
2490
        )
7✔
2491

7✔
2492
        fetchChanInfos := func(tx kvdb.RTx) error {
14✔
2493
                edges := tx.ReadBucket(edgeBucket)
7✔
2494
                if edges == nil {
7✔
2495
                        return ErrGraphNoEdgesFound
×
2496
                }
×
2497
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
7✔
2498
                if edgeIndex == nil {
7✔
2499
                        return ErrGraphNoEdgesFound
×
2500
                }
×
2501
                nodes := tx.ReadBucket(nodeBucket)
7✔
2502
                if nodes == nil {
7✔
2503
                        return ErrGraphNotFound
×
2504
                }
×
2505

2506
                for _, cid := range chanIDs {
21✔
2507
                        byteOrder.PutUint64(cidBytes[:], cid)
14✔
2508

14✔
2509
                        // First, we'll fetch the static edge information. If
14✔
2510
                        // the edge is unknown, we will skip the edge and
14✔
2511
                        // continue gathering all known edges.
14✔
2512
                        edgeInfo, err := fetchChanEdgeInfo(
14✔
2513
                                edgeIndex, cidBytes[:],
14✔
2514
                        )
14✔
2515
                        switch {
14✔
2516
                        case errors.Is(err, ErrEdgeNotFound):
3✔
2517
                                continue
3✔
2518
                        case err != nil:
×
2519
                                return err
×
2520
                        }
2521

2522
                        // With the static information obtained, we'll now
2523
                        // fetch the dynamic policy info.
2524
                        edge1, edge2, err := fetchChanEdgePolicies(
11✔
2525
                                edgeIndex, edges, cidBytes[:],
11✔
2526
                        )
11✔
2527
                        if err != nil {
11✔
2528
                                return err
×
2529
                        }
×
2530

2531
                        node1, err := fetchLightningNode(
11✔
2532
                                nodes, edgeInfo.NodeKey1Bytes[:],
11✔
2533
                        )
11✔
2534
                        if err != nil {
11✔
2535
                                return err
×
2536
                        }
×
2537

2538
                        node2, err := fetchLightningNode(
11✔
2539
                                nodes, edgeInfo.NodeKey2Bytes[:],
11✔
2540
                        )
11✔
2541
                        if err != nil {
11✔
2542
                                return err
×
2543
                        }
×
2544

2545
                        chanEdges = append(chanEdges, ChannelEdge{
11✔
2546
                                Info:    &edgeInfo,
11✔
2547
                                Policy1: edge1,
11✔
2548
                                Policy2: edge2,
11✔
2549
                                Node1:   &node1,
11✔
2550
                                Node2:   &node2,
11✔
2551
                        })
11✔
2552
                }
2553

2554
                return nil
7✔
2555
        }
2556

2557
        if tx == nil {
14✔
2558
                err := kvdb.View(c.db, fetchChanInfos, func() {
14✔
2559
                        chanEdges = nil
7✔
2560
                })
7✔
2561
                if err != nil {
7✔
2562
                        return nil, err
×
2563
                }
×
2564

2565
                return chanEdges, nil
7✔
2566
        }
2567

2568
        err := fetchChanInfos(tx)
×
2569
        if err != nil {
×
2570
                return nil, err
×
2571
        }
×
2572

2573
        return chanEdges, nil
×
2574
}
2575

2576
func delEdgeUpdateIndexEntry(edgesBucket kvdb.RwBucket, chanID uint64,
2577
        edge1, edge2 *models.ChannelEdgePolicy) error {
143✔
2578

143✔
2579
        // First, we'll fetch the edge update index bucket which currently
143✔
2580
        // stores an entry for the channel we're about to delete.
143✔
2581
        updateIndex := edgesBucket.NestedReadWriteBucket(edgeUpdateIndexBucket)
143✔
2582
        if updateIndex == nil {
143✔
2583
                // No edges in bucket, return early.
×
2584
                return nil
×
2585
        }
×
2586

2587
        // Now that we have the bucket, we'll attempt to construct a template
2588
        // for the index key: updateTime || chanid.
2589
        var indexKey [8 + 8]byte
143✔
2590
        byteOrder.PutUint64(indexKey[8:], chanID)
143✔
2591

143✔
2592
        // With the template constructed, we'll attempt to delete an entry that
143✔
2593
        // would have been created by both edges: we'll alternate the update
143✔
2594
        // times, as one may had overridden the other.
143✔
2595
        if edge1 != nil {
156✔
2596
                byteOrder.PutUint64(
13✔
2597
                        indexKey[:8], uint64(edge1.LastUpdate.Unix()),
13✔
2598
                )
13✔
2599
                if err := updateIndex.Delete(indexKey[:]); err != nil {
13✔
2600
                        return err
×
2601
                }
×
2602
        }
2603

2604
        // We'll also attempt to delete the entry that may have been created by
2605
        // the second edge.
2606
        if edge2 != nil {
158✔
2607
                byteOrder.PutUint64(
15✔
2608
                        indexKey[:8], uint64(edge2.LastUpdate.Unix()),
15✔
2609
                )
15✔
2610
                if err := updateIndex.Delete(indexKey[:]); err != nil {
15✔
2611
                        return err
×
2612
                }
×
2613
        }
2614

2615
        return nil
143✔
2616
}
2617

2618
// delChannelEdgeUnsafe deletes the edge with the given chanID from the graph
2619
// cache. It then goes on to delete any policy info and edge info for this
2620
// channel from the DB and finally, if isZombie is true, it will add an entry
2621
// for this channel in the zombie index.
2622
//
2623
// NOTE: this method MUST only be called if the cacheMu has already been
2624
// acquired.
2625
func (c *KVStore) delChannelEdgeUnsafe(edges, edgeIndex, chanIndex,
2626
        zombieIndex kvdb.RwBucket, chanID []byte, isZombie,
2627
        strictZombie bool) (*models.ChannelEdgeInfo, error) {
204✔
2628

204✔
2629
        edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
204✔
2630
        if err != nil {
265✔
2631
                return nil, err
61✔
2632
        }
61✔
2633

2634
        // We'll also remove the entry in the edge update index bucket before
2635
        // we delete the edges themselves so we can access their last update
2636
        // times.
2637
        cid := byteOrder.Uint64(chanID)
143✔
2638
        edge1, edge2, err := fetchChanEdgePolicies(edgeIndex, edges, chanID)
143✔
2639
        if err != nil {
143✔
NEW
2640
                return nil, err
×
2641
        }
×
2642
        err = delEdgeUpdateIndexEntry(edges, cid, edge1, edge2)
143✔
2643
        if err != nil {
143✔
NEW
2644
                return nil, err
×
2645
        }
×
2646

2647
        // The edge key is of the format pubKey || chanID. First we construct
2648
        // the latter half, populating the channel ID.
2649
        var edgeKey [33 + 8]byte
143✔
2650
        copy(edgeKey[33:], chanID)
143✔
2651

143✔
2652
        // With the latter half constructed, copy over the first public key to
143✔
2653
        // delete the edge in this direction, then the second to delete the
143✔
2654
        // edge in the opposite direction.
143✔
2655
        copy(edgeKey[:33], edgeInfo.NodeKey1Bytes[:])
143✔
2656
        if edges.Get(edgeKey[:]) != nil {
286✔
2657
                if err := edges.Delete(edgeKey[:]); err != nil {
143✔
NEW
2658
                        return nil, err
×
2659
                }
×
2660
        }
2661
        copy(edgeKey[:33], edgeInfo.NodeKey2Bytes[:])
143✔
2662
        if edges.Get(edgeKey[:]) != nil {
286✔
2663
                if err := edges.Delete(edgeKey[:]); err != nil {
143✔
NEW
2664
                        return nil, err
×
2665
                }
×
2666
        }
2667

2668
        // As part of deleting the edge we also remove all disabled entries
2669
        // from the edgePolicyDisabledIndex bucket. We do that for both
2670
        // directions.
2671
        err = updateEdgePolicyDisabledIndex(edges, cid, false, false)
143✔
2672
        if err != nil {
143✔
NEW
2673
                return nil, err
×
2674
        }
×
2675
        err = updateEdgePolicyDisabledIndex(edges, cid, true, false)
143✔
2676
        if err != nil {
143✔
NEW
2677
                return nil, err
×
2678
        }
×
2679

2680
        // With the edge data deleted, we can purge the information from the two
2681
        // edge indexes.
2682
        if err := edgeIndex.Delete(chanID); err != nil {
143✔
NEW
2683
                return nil, err
×
2684
        }
×
2685
        var b bytes.Buffer
143✔
2686
        if err := WriteOutpoint(&b, &edgeInfo.ChannelPoint); err != nil {
143✔
NEW
2687
                return nil, err
×
2688
        }
×
2689
        if err := chanIndex.Delete(b.Bytes()); err != nil {
143✔
NEW
2690
                return nil, err
×
2691
        }
×
2692

2693
        // Finally, we'll mark the edge as a zombie within our index if it's
2694
        // being removed due to the channel becoming a zombie. We do this to
2695
        // ensure we don't store unnecessary data for spent channels.
2696
        if !isZombie {
262✔
2697
                return &edgeInfo, nil
119✔
2698
        }
119✔
2699

2700
        nodeKey1, nodeKey2 := edgeInfo.NodeKey1Bytes, edgeInfo.NodeKey2Bytes
27✔
2701
        if strictZombie {
31✔
2702
                nodeKey1, nodeKey2 = makeZombiePubkeys(&edgeInfo, edge1, edge2)
4✔
2703
        }
4✔
2704

2705
        return &edgeInfo, markEdgeZombie(
27✔
2706
                zombieIndex, byteOrder.Uint64(chanID), nodeKey1, nodeKey2,
27✔
2707
        )
27✔
2708
}
2709

2710
// makeZombiePubkeys derives the node pubkeys to store in the zombie index for a
2711
// particular pair of channel policies. The return values are one of:
2712
//  1. (pubkey1, pubkey2)
2713
//  2. (pubkey1, blank)
2714
//  3. (blank, pubkey2)
2715
//
2716
// A blank pubkey means that corresponding node will be unable to resurrect a
2717
// channel on its own. For example, node1 may continue to publish recent
2718
// updates, but node2 has fallen way behind. After marking an edge as a zombie,
2719
// we don't want another fresh update from node1 to resurrect, as the edge can
2720
// only become live once node2 finally sends something recent.
2721
//
2722
// In the case where we have neither update, we allow either party to resurrect
2723
// the channel. If the channel were to be marked zombie again, it would be
2724
// marked with the correct lagging channel since we received an update from only
2725
// one side.
2726
func makeZombiePubkeys(info *models.ChannelEdgeInfo,
2727
        e1, e2 *models.ChannelEdgePolicy) ([33]byte, [33]byte) {
4✔
2728

4✔
2729
        switch {
4✔
2730
        // If we don't have either edge policy, we'll return both pubkeys so
2731
        // that the channel can be resurrected by either party.
2732
        case e1 == nil && e2 == nil:
1✔
2733
                return info.NodeKey1Bytes, info.NodeKey2Bytes
1✔
2734

2735
        // If we're missing edge1, or if both edges are present but edge1 is
2736
        // older, we'll return edge1's pubkey and a blank pubkey for edge2. This
2737
        // means that only an update from edge1 will be able to resurrect the
2738
        // channel.
2739
        case e1 == nil || (e2 != nil && e1.LastUpdate.Before(e2.LastUpdate)):
1✔
2740
                return info.NodeKey1Bytes, [33]byte{}
1✔
2741

2742
        // Otherwise, we're missing edge2 or edge2 is the older side, so we
2743
        // return a blank pubkey for edge1. In this case, only an update from
2744
        // edge2 can resurect the channel.
2745
        default:
2✔
2746
                return [33]byte{}, info.NodeKey2Bytes
2✔
2747
        }
2748
}
2749

2750
// UpdateEdgePolicy updates the edge routing policy for a single directed edge
2751
// within the database for the referenced channel. The `flags` attribute within
2752
// the ChannelEdgePolicy determines which of the directed edges are being
2753
// updated. If the flag is 1, then the first node's information is being
2754
// updated, otherwise it's the second node's information. The node ordering is
2755
// determined by the lexicographical ordering of the identity public keys of the
2756
// nodes on either side of the channel.
2757
func (c *KVStore) UpdateEdgePolicy(edge *models.ChannelEdgePolicy,
2758
        op ...batch.SchedulerOption) error {
2,666✔
2759

2,666✔
2760
        var (
2,666✔
2761
                isUpdate1    bool
2,666✔
2762
                edgeNotFound bool
2,666✔
2763
        )
2,666✔
2764

2,666✔
2765
        r := &batch.Request{
2,666✔
2766
                Reset: func() {
5,332✔
2767
                        isUpdate1 = false
2,666✔
2768
                        edgeNotFound = false
2,666✔
2769
                },
2,666✔
2770
                Update: func(tx kvdb.RwTx) error {
2,666✔
2771
                        var err error
2,666✔
2772
                        isUpdate1, err = updateEdgePolicy(
2,666✔
2773
                                tx, edge, c.graphCache,
2,666✔
2774
                        )
2,666✔
2775

2,666✔
2776
                        if err != nil {
2,669✔
2777
                                log.Errorf("UpdateEdgePolicy faild: %v", err)
3✔
2778
                        }
3✔
2779

2780
                        // Silence ErrEdgeNotFound so that the batch can
2781
                        // succeed, but propagate the error via local state.
2782
                        if errors.Is(err, ErrEdgeNotFound) {
2,669✔
2783
                                edgeNotFound = true
3✔
2784
                                return nil
3✔
2785
                        }
3✔
2786

2787
                        return err
2,663✔
2788
                },
2789
                OnCommit: func(err error) error {
2,666✔
2790
                        switch {
2,666✔
2791
                        case err != nil:
×
2792
                                return err
×
2793
                        case edgeNotFound:
3✔
2794
                                return ErrEdgeNotFound
3✔
2795
                        default:
2,663✔
2796
                                c.updateEdgeCache(edge, isUpdate1)
2,663✔
2797
                                return nil
2,663✔
2798
                        }
2799
                },
2800
        }
2801

2802
        for _, f := range op {
2,669✔
2803
                f(r)
3✔
2804
        }
3✔
2805

2806
        return c.chanScheduler.Execute(r)
2,666✔
2807
}
2808

2809
func (c *KVStore) updateEdgeCache(e *models.ChannelEdgePolicy,
2810
        isUpdate1 bool) {
2,663✔
2811

2,663✔
2812
        // If an entry for this channel is found in reject cache, we'll modify
2,663✔
2813
        // the entry with the updated timestamp for the direction that was just
2,663✔
2814
        // written. If the edge doesn't exist, we'll load the cache entry lazily
2,663✔
2815
        // during the next query for this edge.
2,663✔
2816
        if entry, ok := c.rejectCache.get(e.ChannelID); ok {
2,671✔
2817
                if isUpdate1 {
14✔
2818
                        entry.upd1Time = e.LastUpdate.Unix()
6✔
2819
                } else {
11✔
2820
                        entry.upd2Time = e.LastUpdate.Unix()
5✔
2821
                }
5✔
2822
                c.rejectCache.insert(e.ChannelID, entry)
8✔
2823
        }
2824

2825
        // If an entry for this channel is found in channel cache, we'll modify
2826
        // the entry with the updated policy for the direction that was just
2827
        // written. If the edge doesn't exist, we'll defer loading the info and
2828
        // policies and lazily read from disk during the next query.
2829
        if channel, ok := c.chanCache.get(e.ChannelID); ok {
2,666✔
2830
                if isUpdate1 {
6✔
2831
                        channel.Policy1 = e
3✔
2832
                } else {
6✔
2833
                        channel.Policy2 = e
3✔
2834
                }
3✔
2835
                c.chanCache.insert(e.ChannelID, channel)
3✔
2836
        }
2837
}
2838

2839
// updateEdgePolicy attempts to update an edge's policy within the relevant
2840
// buckets using an existing database transaction. The returned boolean will be
2841
// true if the updated policy belongs to node1, and false if the policy belonged
2842
// to node2.
2843
func updateEdgePolicy(tx kvdb.RwTx, edge *models.ChannelEdgePolicy,
2844
        graphCache *GraphCache) (bool, error) {
2,666✔
2845

2,666✔
2846
        edges := tx.ReadWriteBucket(edgeBucket)
2,666✔
2847
        if edges == nil {
2,666✔
2848
                return false, ErrEdgeNotFound
×
2849
        }
×
2850
        edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
2,666✔
2851
        if edgeIndex == nil {
2,666✔
2852
                return false, ErrEdgeNotFound
×
2853
        }
×
2854

2855
        // Create the channelID key be converting the channel ID
2856
        // integer into a byte slice.
2857
        var chanID [8]byte
2,666✔
2858
        byteOrder.PutUint64(chanID[:], edge.ChannelID)
2,666✔
2859

2,666✔
2860
        // With the channel ID, we then fetch the value storing the two
2,666✔
2861
        // nodes which connect this channel edge.
2,666✔
2862
        nodeInfo := edgeIndex.Get(chanID[:])
2,666✔
2863
        if nodeInfo == nil {
2,669✔
2864
                return false, ErrEdgeNotFound
3✔
2865
        }
3✔
2866

2867
        // Depending on the flags value passed above, either the first
2868
        // or second edge policy is being updated.
2869
        var fromNode, toNode []byte
2,663✔
2870
        var isUpdate1 bool
2,663✔
2871
        if edge.ChannelFlags&lnwire.ChanUpdateDirection == 0 {
4,000✔
2872
                fromNode = nodeInfo[:33]
1,337✔
2873
                toNode = nodeInfo[33:66]
1,337✔
2874
                isUpdate1 = true
1,337✔
2875
        } else {
2,666✔
2876
                fromNode = nodeInfo[33:66]
1,329✔
2877
                toNode = nodeInfo[:33]
1,329✔
2878
                isUpdate1 = false
1,329✔
2879
        }
1,329✔
2880

2881
        // Finally, with the direction of the edge being updated
2882
        // identified, we update the on-disk edge representation.
2883
        err := putChanEdgePolicy(edges, edge, fromNode, toNode)
2,663✔
2884
        if err != nil {
2,663✔
2885
                return false, err
×
2886
        }
×
2887

2888
        var (
2,663✔
2889
                fromNodePubKey route.Vertex
2,663✔
2890
                toNodePubKey   route.Vertex
2,663✔
2891
        )
2,663✔
2892
        copy(fromNodePubKey[:], fromNode)
2,663✔
2893
        copy(toNodePubKey[:], toNode)
2,663✔
2894

2,663✔
2895
        if graphCache != nil {
4,940✔
2896
                graphCache.UpdatePolicy(
2,277✔
2897
                        edge, fromNodePubKey, toNodePubKey, isUpdate1,
2,277✔
2898
                )
2,277✔
2899
        }
2,277✔
2900

2901
        return isUpdate1, nil
2,663✔
2902
}
2903

2904
// isPublic determines whether the node is seen as public within the graph from
2905
// the source node's point of view. An existing database transaction can also be
2906
// specified.
2907
func (c *KVStore) isPublic(tx kvdb.RTx, nodePub route.Vertex,
2908
        sourcePubKey []byte) (bool, error) {
16✔
2909

16✔
2910
        // In order to determine whether this node is publicly advertised within
16✔
2911
        // the graph, we'll need to look at all of its edges and check whether
16✔
2912
        // they extend to any other node than the source node. errDone will be
16✔
2913
        // used to terminate the check early.
16✔
2914
        nodeIsPublic := false
16✔
2915
        errDone := errors.New("done")
16✔
2916
        err := c.ForEachNodeChannelTx(tx, nodePub, func(tx kvdb.RTx,
16✔
2917
                info *models.ChannelEdgeInfo, _ *models.ChannelEdgePolicy,
16✔
2918
                _ *models.ChannelEdgePolicy) error {
29✔
2919

13✔
2920
                // If this edge doesn't extend to the source node, we'll
13✔
2921
                // terminate our search as we can now conclude that the node is
13✔
2922
                // publicly advertised within the graph due to the local node
13✔
2923
                // knowing of the current edge.
13✔
2924
                if !bytes.Equal(info.NodeKey1Bytes[:], sourcePubKey) &&
13✔
2925
                        !bytes.Equal(info.NodeKey2Bytes[:], sourcePubKey) {
19✔
2926

6✔
2927
                        nodeIsPublic = true
6✔
2928
                        return errDone
6✔
2929
                }
6✔
2930

2931
                // Since the edge _does_ extend to the source node, we'll also
2932
                // need to ensure that this is a public edge.
2933
                if info.AuthProof != nil {
19✔
2934
                        nodeIsPublic = true
9✔
2935
                        return errDone
9✔
2936
                }
9✔
2937

2938
                // Otherwise, we'll continue our search.
2939
                return nil
4✔
2940
        })
2941
        if err != nil && !errors.Is(err, errDone) {
16✔
2942
                return false, err
×
2943
        }
×
2944

2945
        return nodeIsPublic, nil
16✔
2946
}
2947

2948
// FetchLightningNodeTx attempts to look up a target node by its identity
2949
// public key. If the node isn't found in the database, then
2950
// ErrGraphNodeNotFound is returned. An optional transaction may be provided.
2951
// If none is provided, then a new one will be created.
2952
func (c *KVStore) FetchLightningNodeTx(tx kvdb.RTx, nodePub route.Vertex) (
2953
        *models.LightningNode, error) {
3,633✔
2954

3,633✔
2955
        return c.fetchLightningNode(tx, nodePub)
3,633✔
2956
}
3,633✔
2957

2958
// FetchLightningNode attempts to look up a target node by its identity public
2959
// key. If the node isn't found in the database, then ErrGraphNodeNotFound is
2960
// returned.
2961
func (c *KVStore) FetchLightningNode(nodePub route.Vertex) (
2962
        *models.LightningNode, error) {
155✔
2963

155✔
2964
        return c.fetchLightningNode(nil, nodePub)
155✔
2965
}
155✔
2966

2967
// fetchLightningNode attempts to look up a target node by its identity public
2968
// key. If the node isn't found in the database, then ErrGraphNodeNotFound is
2969
// returned. An optional transaction may be provided. If none is provided, then
2970
// a new one will be created.
2971
func (c *KVStore) fetchLightningNode(tx kvdb.RTx,
2972
        nodePub route.Vertex) (*models.LightningNode, error) {
3,785✔
2973

3,785✔
2974
        var node *models.LightningNode
3,785✔
2975
        fetch := func(tx kvdb.RTx) error {
7,570✔
2976
                // First grab the nodes bucket which stores the mapping from
3,785✔
2977
                // pubKey to node information.
3,785✔
2978
                nodes := tx.ReadBucket(nodeBucket)
3,785✔
2979
                if nodes == nil {
3,785✔
2980
                        return ErrGraphNotFound
×
2981
                }
×
2982

2983
                // If a key for this serialized public key isn't found, then
2984
                // the target node doesn't exist within the database.
2985
                nodeBytes := nodes.Get(nodePub[:])
3,785✔
2986
                if nodeBytes == nil {
3,802✔
2987
                        return ErrGraphNodeNotFound
17✔
2988
                }
17✔
2989

2990
                // If the node is found, then we can de deserialize the node
2991
                // information to return to the user.
2992
                nodeReader := bytes.NewReader(nodeBytes)
3,771✔
2993
                n, err := deserializeLightningNode(nodeReader)
3,771✔
2994
                if err != nil {
3,771✔
2995
                        return err
×
2996
                }
×
2997

2998
                node = &n
3,771✔
2999

3,771✔
3000
                return nil
3,771✔
3001
        }
3002

3003
        if tx == nil {
3,943✔
3004
                err := kvdb.View(
158✔
3005
                        c.db, fetch, func() {
316✔
3006
                                node = nil
158✔
3007
                        },
158✔
3008
                )
3009
                if err != nil {
164✔
3010
                        return nil, err
6✔
3011
                }
6✔
3012

3013
                return node, nil
155✔
3014
        }
3015

3016
        err := fetch(tx)
3,627✔
3017
        if err != nil {
3,638✔
3018
                return nil, err
11✔
3019
        }
11✔
3020

3021
        return node, nil
3,616✔
3022
}
3023

3024
// HasLightningNode determines if the graph has a vertex identified by the
3025
// target node identity public key. If the node exists in the database, a
3026
// timestamp of when the data for the node was lasted updated is returned along
3027
// with a true boolean. Otherwise, an empty time.Time is returned with a false
3028
// boolean.
3029
func (c *KVStore) HasLightningNode(nodePub [33]byte) (time.Time, bool,
3030
        error) {
19✔
3031

19✔
3032
        var (
19✔
3033
                updateTime time.Time
19✔
3034
                exists     bool
19✔
3035
        )
19✔
3036

19✔
3037
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
38✔
3038
                // First grab the nodes bucket which stores the mapping from
19✔
3039
                // pubKey to node information.
19✔
3040
                nodes := tx.ReadBucket(nodeBucket)
19✔
3041
                if nodes == nil {
19✔
3042
                        return ErrGraphNotFound
×
3043
                }
×
3044

3045
                // If a key for this serialized public key isn't found, we can
3046
                // exit early.
3047
                nodeBytes := nodes.Get(nodePub[:])
19✔
3048
                if nodeBytes == nil {
25✔
3049
                        exists = false
6✔
3050
                        return nil
6✔
3051
                }
6✔
3052

3053
                // Otherwise we continue on to obtain the time stamp
3054
                // representing the last time the data for this node was
3055
                // updated.
3056
                nodeReader := bytes.NewReader(nodeBytes)
16✔
3057
                node, err := deserializeLightningNode(nodeReader)
16✔
3058
                if err != nil {
16✔
3059
                        return err
×
3060
                }
×
3061

3062
                exists = true
16✔
3063
                updateTime = node.LastUpdate
16✔
3064

16✔
3065
                return nil
16✔
3066
        }, func() {
19✔
3067
                updateTime = time.Time{}
19✔
3068
                exists = false
19✔
3069
        })
19✔
3070
        if err != nil {
19✔
3071
                return time.Time{}, exists, err
×
3072
        }
×
3073

3074
        return updateTime, exists, nil
19✔
3075
}
3076

3077
// nodeTraversal is used to traverse all channels of a node given by its
3078
// public key and passes channel information into the specified callback.
3079
func nodeTraversal(tx kvdb.RTx, nodePub []byte, db kvdb.Backend,
3080
        cb func(kvdb.RTx, *models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3081
                *models.ChannelEdgePolicy) error) error {
1,269✔
3082

1,269✔
3083
        traversal := func(tx kvdb.RTx) error {
2,538✔
3084
                edges := tx.ReadBucket(edgeBucket)
1,269✔
3085
                if edges == nil {
1,269✔
3086
                        return ErrGraphNotFound
×
3087
                }
×
3088
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
1,269✔
3089
                if edgeIndex == nil {
1,269✔
3090
                        return ErrGraphNoEdgesFound
×
3091
                }
×
3092

3093
                // In order to reach all the edges for this node, we take
3094
                // advantage of the construction of the key-space within the
3095
                // edge bucket. The keys are stored in the form: pubKey ||
3096
                // chanID. Therefore, starting from a chanID of zero, we can
3097
                // scan forward in the bucket, grabbing all the edges for the
3098
                // node. Once the prefix no longer matches, then we know we're
3099
                // done.
3100
                var nodeStart [33 + 8]byte
1,269✔
3101
                copy(nodeStart[:], nodePub)
1,269✔
3102
                copy(nodeStart[33:], chanStart[:])
1,269✔
3103

1,269✔
3104
                // Starting from the key pubKey || 0, we seek forward in the
1,269✔
3105
                // bucket until the retrieved key no longer has the public key
1,269✔
3106
                // as its prefix. This indicates that we've stepped over into
1,269✔
3107
                // another node's edges, so we can terminate our scan.
1,269✔
3108
                edgeCursor := edges.ReadCursor()
1,269✔
3109
                for nodeEdge, _ := edgeCursor.Seek(nodeStart[:]); bytes.HasPrefix(nodeEdge, nodePub); nodeEdge, _ = edgeCursor.Next() { //nolint:ll
5,112✔
3110
                        // If the prefix still matches, the channel id is
3,843✔
3111
                        // returned in nodeEdge. Channel id is used to lookup
3,843✔
3112
                        // the node at the other end of the channel and both
3,843✔
3113
                        // edge policies.
3,843✔
3114
                        chanID := nodeEdge[33:]
3,843✔
3115
                        edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
3,843✔
3116
                        if err != nil {
3,843✔
3117
                                return err
×
3118
                        }
×
3119

3120
                        outgoingPolicy, err := fetchChanEdgePolicy(
3,843✔
3121
                                edges, chanID, nodePub,
3,843✔
3122
                        )
3,843✔
3123
                        if err != nil {
3,843✔
3124
                                return err
×
3125
                        }
×
3126

3127
                        otherNode, err := edgeInfo.OtherNodeKeyBytes(nodePub)
3,843✔
3128
                        if err != nil {
3,843✔
3129
                                return err
×
3130
                        }
×
3131

3132
                        incomingPolicy, err := fetchChanEdgePolicy(
3,843✔
3133
                                edges, chanID, otherNode[:],
3,843✔
3134
                        )
3,843✔
3135
                        if err != nil {
3,843✔
3136
                                return err
×
3137
                        }
×
3138

3139
                        // Finally, we execute the callback.
3140
                        err = cb(tx, &edgeInfo, outgoingPolicy, incomingPolicy)
3,843✔
3141
                        if err != nil {
3,855✔
3142
                                return err
12✔
3143
                        }
12✔
3144
                }
3145

3146
                return nil
1,260✔
3147
        }
3148

3149
        // If no transaction was provided, then we'll create a new transaction
3150
        // to execute the transaction within.
3151
        if tx == nil {
1,281✔
3152
                return kvdb.View(db, traversal, func() {})
24✔
3153
        }
3154

3155
        // Otherwise, we re-use the existing transaction to execute the graph
3156
        // traversal.
3157
        return traversal(tx)
1,260✔
3158
}
3159

3160
// ForEachNodeChannel iterates through all channels of the given node,
3161
// executing the passed callback with an edge info structure and the policies
3162
// of each end of the channel. The first edge policy is the outgoing edge *to*
3163
// the connecting node, while the second is the incoming edge *from* the
3164
// connecting node. If the callback returns an error, then the iteration is
3165
// halted with the error propagated back up to the caller.
3166
//
3167
// Unknown policies are passed into the callback as nil values.
3168
func (c *KVStore) ForEachNodeChannel(nodePub route.Vertex,
3169
        cb func(kvdb.RTx, *models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3170
                *models.ChannelEdgePolicy) error) error {
9✔
3171

9✔
3172
        return nodeTraversal(nil, nodePub[:], c.db, cb)
9✔
3173
}
9✔
3174

3175
// ForEachNodeChannelTx iterates through all channels of the given node,
3176
// executing the passed callback with an edge info structure and the policies
3177
// of each end of the channel. The first edge policy is the outgoing edge *to*
3178
// the connecting node, while the second is the incoming edge *from* the
3179
// connecting node. If the callback returns an error, then the iteration is
3180
// halted with the error propagated back up to the caller.
3181
//
3182
// Unknown policies are passed into the callback as nil values.
3183
//
3184
// If the caller wishes to re-use an existing boltdb transaction, then it
3185
// should be passed as the first argument.  Otherwise, the first argument should
3186
// be nil and a fresh transaction will be created to execute the graph
3187
// traversal.
3188
func (c *KVStore) ForEachNodeChannelTx(tx kvdb.RTx,
3189
        nodePub route.Vertex, cb func(kvdb.RTx, *models.ChannelEdgeInfo,
3190
                *models.ChannelEdgePolicy,
3191
                *models.ChannelEdgePolicy) error) error {
1,021✔
3192

1,021✔
3193
        return nodeTraversal(tx, nodePub[:], c.db, cb)
1,021✔
3194
}
1,021✔
3195

3196
// FetchOtherNode attempts to fetch the full LightningNode that's opposite of
3197
// the target node in the channel. This is useful when one knows the pubkey of
3198
// one of the nodes, and wishes to obtain the full LightningNode for the other
3199
// end of the channel.
3200
func (c *KVStore) FetchOtherNode(tx kvdb.RTx,
3201
        channel *models.ChannelEdgeInfo, thisNodeKey []byte) (
3202
        *models.LightningNode, error) {
3✔
3203

3✔
3204
        // Ensure that the node passed in is actually a member of the channel.
3✔
3205
        var targetNodeBytes [33]byte
3✔
3206
        switch {
3✔
3207
        case bytes.Equal(channel.NodeKey1Bytes[:], thisNodeKey):
3✔
3208
                targetNodeBytes = channel.NodeKey2Bytes
3✔
3209
        case bytes.Equal(channel.NodeKey2Bytes[:], thisNodeKey):
3✔
3210
                targetNodeBytes = channel.NodeKey1Bytes
3✔
3211
        default:
×
3212
                return nil, fmt.Errorf("node not participating in this channel")
×
3213
        }
3214

3215
        var targetNode *models.LightningNode
3✔
3216
        fetchNodeFunc := func(tx kvdb.RTx) error {
6✔
3217
                // First grab the nodes bucket which stores the mapping from
3✔
3218
                // pubKey to node information.
3✔
3219
                nodes := tx.ReadBucket(nodeBucket)
3✔
3220
                if nodes == nil {
3✔
3221
                        return ErrGraphNotFound
×
3222
                }
×
3223

3224
                node, err := fetchLightningNode(nodes, targetNodeBytes[:])
3✔
3225
                if err != nil {
3✔
3226
                        return err
×
3227
                }
×
3228

3229
                targetNode = &node
3✔
3230

3✔
3231
                return nil
3✔
3232
        }
3233

3234
        // If the transaction is nil, then we'll need to create a new one,
3235
        // otherwise we can use the existing db transaction.
3236
        var err error
3✔
3237
        if tx == nil {
3✔
3238
                err = kvdb.View(c.db, fetchNodeFunc, func() {
×
3239
                        targetNode = nil
×
3240
                })
×
3241
        } else {
3✔
3242
                err = fetchNodeFunc(tx)
3✔
3243
        }
3✔
3244

3245
        return targetNode, err
3✔
3246
}
3247

3248
// computeEdgePolicyKeys is a helper function that can be used to compute the
3249
// keys used to index the channel edge policy info for the two nodes of the
3250
// edge. The keys for node 1 and node 2 are returned respectively.
3251
func computeEdgePolicyKeys(info *models.ChannelEdgeInfo) ([]byte, []byte) {
25✔
3252
        var (
25✔
3253
                node1Key [33 + 8]byte
25✔
3254
                node2Key [33 + 8]byte
25✔
3255
        )
25✔
3256

25✔
3257
        copy(node1Key[:], info.NodeKey1Bytes[:])
25✔
3258
        copy(node2Key[:], info.NodeKey2Bytes[:])
25✔
3259

25✔
3260
        byteOrder.PutUint64(node1Key[33:], info.ChannelID)
25✔
3261
        byteOrder.PutUint64(node2Key[33:], info.ChannelID)
25✔
3262

25✔
3263
        return node1Key[:], node2Key[:]
25✔
3264
}
25✔
3265

3266
// FetchChannelEdgesByOutpoint attempts to lookup the two directed edges for
3267
// the channel identified by the funding outpoint. If the channel can't be
3268
// found, then ErrEdgeNotFound is returned. A struct which houses the general
3269
// information for the channel itself is returned as well as two structs that
3270
// contain the routing policies for the channel in either direction.
3271
func (c *KVStore) FetchChannelEdgesByOutpoint(op *wire.OutPoint) (
3272
        *models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3273
        *models.ChannelEdgePolicy, error) {
14✔
3274

14✔
3275
        var (
14✔
3276
                edgeInfo *models.ChannelEdgeInfo
14✔
3277
                policy1  *models.ChannelEdgePolicy
14✔
3278
                policy2  *models.ChannelEdgePolicy
14✔
3279
        )
14✔
3280

14✔
3281
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
28✔
3282
                // First, grab the node bucket. This will be used to populate
14✔
3283
                // the Node pointers in each edge read from disk.
14✔
3284
                nodes := tx.ReadBucket(nodeBucket)
14✔
3285
                if nodes == nil {
14✔
3286
                        return ErrGraphNotFound
×
3287
                }
×
3288

3289
                // Next, grab the edge bucket which stores the edges, and also
3290
                // the index itself so we can group the directed edges together
3291
                // logically.
3292
                edges := tx.ReadBucket(edgeBucket)
14✔
3293
                if edges == nil {
14✔
3294
                        return ErrGraphNoEdgesFound
×
3295
                }
×
3296
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
14✔
3297
                if edgeIndex == nil {
14✔
3298
                        return ErrGraphNoEdgesFound
×
3299
                }
×
3300

3301
                // If the channel's outpoint doesn't exist within the outpoint
3302
                // index, then the edge does not exist.
3303
                chanIndex := edges.NestedReadBucket(channelPointBucket)
14✔
3304
                if chanIndex == nil {
14✔
3305
                        return ErrGraphNoEdgesFound
×
3306
                }
×
3307
                var b bytes.Buffer
14✔
3308
                if err := WriteOutpoint(&b, op); err != nil {
14✔
3309
                        return err
×
3310
                }
×
3311
                chanID := chanIndex.Get(b.Bytes())
14✔
3312
                if chanID == nil {
27✔
3313
                        return fmt.Errorf("%w: op=%v", ErrEdgeNotFound, op)
13✔
3314
                }
13✔
3315

3316
                // If the channel is found to exists, then we'll first retrieve
3317
                // the general information for the channel.
3318
                edge, err := fetchChanEdgeInfo(edgeIndex, chanID)
4✔
3319
                if err != nil {
4✔
3320
                        return fmt.Errorf("%w: chanID=%x", err, chanID)
×
3321
                }
×
3322
                edgeInfo = &edge
4✔
3323

4✔
3324
                // Once we have the information about the channels' parameters,
4✔
3325
                // we'll fetch the routing policies for each for the directed
4✔
3326
                // edges.
4✔
3327
                e1, e2, err := fetchChanEdgePolicies(edgeIndex, edges, chanID)
4✔
3328
                if err != nil {
4✔
3329
                        return fmt.Errorf("failed to find policy: %w", err)
×
3330
                }
×
3331

3332
                policy1 = e1
4✔
3333
                policy2 = e2
4✔
3334

4✔
3335
                return nil
4✔
3336
        }, func() {
14✔
3337
                edgeInfo = nil
14✔
3338
                policy1 = nil
14✔
3339
                policy2 = nil
14✔
3340
        })
14✔
3341
        if err != nil {
27✔
3342
                return nil, nil, nil, err
13✔
3343
        }
13✔
3344

3345
        return edgeInfo, policy1, policy2, nil
4✔
3346
}
3347

3348
// FetchChannelEdgesByID attempts to lookup the two directed edges for the
3349
// channel identified by the channel ID. If the channel can't be found, then
3350
// ErrEdgeNotFound is returned. A struct which houses the general information
3351
// for the channel itself is returned as well as two structs that contain the
3352
// routing policies for the channel in either direction.
3353
//
3354
// ErrZombieEdge an be returned if the edge is currently marked as a zombie
3355
// within the database. In this case, the ChannelEdgePolicy's will be nil, and
3356
// the ChannelEdgeInfo will only include the public keys of each node.
3357
func (c *KVStore) FetchChannelEdgesByID(chanID uint64) (
3358
        *models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3359
        *models.ChannelEdgePolicy, error) {
27✔
3360

27✔
3361
        var (
27✔
3362
                edgeInfo  *models.ChannelEdgeInfo
27✔
3363
                policy1   *models.ChannelEdgePolicy
27✔
3364
                policy2   *models.ChannelEdgePolicy
27✔
3365
                channelID [8]byte
27✔
3366
        )
27✔
3367

27✔
3368
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
54✔
3369
                // First, grab the node bucket. This will be used to populate
27✔
3370
                // the Node pointers in each edge read from disk.
27✔
3371
                nodes := tx.ReadBucket(nodeBucket)
27✔
3372
                if nodes == nil {
27✔
3373
                        return ErrGraphNotFound
×
3374
                }
×
3375

3376
                // Next, grab the edge bucket which stores the edges, and also
3377
                // the index itself so we can group the directed edges together
3378
                // logically.
3379
                edges := tx.ReadBucket(edgeBucket)
27✔
3380
                if edges == nil {
27✔
3381
                        return ErrGraphNoEdgesFound
×
3382
                }
×
3383
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
27✔
3384
                if edgeIndex == nil {
27✔
3385
                        return ErrGraphNoEdgesFound
×
3386
                }
×
3387

3388
                byteOrder.PutUint64(channelID[:], chanID)
27✔
3389

27✔
3390
                // Now, attempt to fetch edge.
27✔
3391
                edge, err := fetchChanEdgeInfo(edgeIndex, channelID[:])
27✔
3392

27✔
3393
                // If it doesn't exist, we'll quickly check our zombie index to
27✔
3394
                // see if we've previously marked it as so.
27✔
3395
                if errors.Is(err, ErrEdgeNotFound) {
31✔
3396
                        // If the zombie index doesn't exist, or the edge is not
4✔
3397
                        // marked as a zombie within it, then we'll return the
4✔
3398
                        // original ErrEdgeNotFound error.
4✔
3399
                        zombieIndex := edges.NestedReadBucket(zombieBucket)
4✔
3400
                        if zombieIndex == nil {
4✔
3401
                                return ErrEdgeNotFound
×
3402
                        }
×
3403

3404
                        isZombie, pubKey1, pubKey2 := isZombieEdge(
4✔
3405
                                zombieIndex, chanID,
4✔
3406
                        )
4✔
3407
                        if !isZombie {
7✔
3408
                                return ErrEdgeNotFound
3✔
3409
                        }
3✔
3410

3411
                        // Otherwise, the edge is marked as a zombie, so we'll
3412
                        // populate the edge info with the public keys of each
3413
                        // party as this is the only information we have about
3414
                        // it and return an error signaling so.
3415
                        edgeInfo = &models.ChannelEdgeInfo{
4✔
3416
                                NodeKey1Bytes: pubKey1,
4✔
3417
                                NodeKey2Bytes: pubKey2,
4✔
3418
                        }
4✔
3419

4✔
3420
                        return ErrZombieEdge
4✔
3421
                }
3422

3423
                // Otherwise, we'll just return the error if any.
3424
                if err != nil {
26✔
3425
                        return err
×
3426
                }
×
3427

3428
                edgeInfo = &edge
26✔
3429

26✔
3430
                // Then we'll attempt to fetch the accompanying policies of this
26✔
3431
                // edge.
26✔
3432
                e1, e2, err := fetchChanEdgePolicies(
26✔
3433
                        edgeIndex, edges, channelID[:],
26✔
3434
                )
26✔
3435
                if err != nil {
26✔
3436
                        return err
×
3437
                }
×
3438

3439
                policy1 = e1
26✔
3440
                policy2 = e2
26✔
3441

26✔
3442
                return nil
26✔
3443
        }, func() {
27✔
3444
                edgeInfo = nil
27✔
3445
                policy1 = nil
27✔
3446
                policy2 = nil
27✔
3447
        })
27✔
3448
        if errors.Is(err, ErrZombieEdge) {
31✔
3449
                return edgeInfo, nil, nil, err
4✔
3450
        }
4✔
3451
        if err != nil {
29✔
3452
                return nil, nil, nil, err
3✔
3453
        }
3✔
3454

3455
        return edgeInfo, policy1, policy2, nil
26✔
3456
}
3457

3458
// IsPublicNode is a helper method that determines whether the node with the
3459
// given public key is seen as a public node in the graph from the graph's
3460
// source node's point of view.
3461
func (c *KVStore) IsPublicNode(pubKey [33]byte) (bool, error) {
16✔
3462
        var nodeIsPublic bool
16✔
3463
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
32✔
3464
                nodes := tx.ReadBucket(nodeBucket)
16✔
3465
                if nodes == nil {
16✔
3466
                        return ErrGraphNodesNotFound
×
3467
                }
×
3468
                ourPubKey := nodes.Get(sourceKey)
16✔
3469
                if ourPubKey == nil {
16✔
3470
                        return ErrSourceNodeNotSet
×
3471
                }
×
3472
                node, err := fetchLightningNode(nodes, pubKey[:])
16✔
3473
                if err != nil {
16✔
3474
                        return err
×
3475
                }
×
3476

3477
                nodeIsPublic, err = c.isPublic(tx, node.PubKeyBytes, ourPubKey)
16✔
3478

16✔
3479
                return err
16✔
3480
        }, func() {
16✔
3481
                nodeIsPublic = false
16✔
3482
        })
16✔
3483
        if err != nil {
16✔
3484
                return false, err
×
3485
        }
×
3486

3487
        return nodeIsPublic, nil
16✔
3488
}
3489

3490
// genMultiSigP2WSH generates the p2wsh'd multisig script for 2 of 2 pubkeys.
3491
func genMultiSigP2WSH(aPub, bPub []byte) ([]byte, error) {
49✔
3492
        witnessScript, err := input.GenMultiSigScript(aPub, bPub)
49✔
3493
        if err != nil {
49✔
3494
                return nil, err
×
3495
        }
×
3496

3497
        // With the witness script generated, we'll now turn it into a p2wsh
3498
        // script:
3499
        //  * OP_0 <sha256(script)>
3500
        bldr := txscript.NewScriptBuilder(
49✔
3501
                txscript.WithScriptAllocSize(input.P2WSHSize),
49✔
3502
        )
49✔
3503
        bldr.AddOp(txscript.OP_0)
49✔
3504
        scriptHash := sha256.Sum256(witnessScript)
49✔
3505
        bldr.AddData(scriptHash[:])
49✔
3506

49✔
3507
        return bldr.Script()
49✔
3508
}
3509

3510
// EdgePoint couples the outpoint of a channel with the funding script that it
3511
// creates. The FilteredChainView will use this to watch for spends of this
3512
// edge point on chain. We require both of these values as depending on the
3513
// concrete implementation, either the pkScript, or the out point will be used.
3514
type EdgePoint struct {
3515
        // FundingPkScript is the p2wsh multi-sig script of the target channel.
3516
        FundingPkScript []byte
3517

3518
        // OutPoint is the outpoint of the target channel.
3519
        OutPoint wire.OutPoint
3520
}
3521

3522
// String returns a human readable version of the target EdgePoint. We return
3523
// the outpoint directly as it is enough to uniquely identify the edge point.
3524
func (e *EdgePoint) String() string {
×
3525
        return e.OutPoint.String()
×
3526
}
×
3527

3528
// ChannelView returns the verifiable edge information for each active channel
3529
// within the known channel graph. The set of UTXO's (along with their scripts)
3530
// returned are the ones that need to be watched on chain to detect channel
3531
// closes on the resident blockchain.
3532
func (c *KVStore) ChannelView() ([]EdgePoint, error) {
25✔
3533
        var edgePoints []EdgePoint
25✔
3534
        if err := kvdb.View(c.db, func(tx kvdb.RTx) error {
50✔
3535
                // We're going to iterate over the entire channel index, so
25✔
3536
                // we'll need to fetch the edgeBucket to get to the index as
25✔
3537
                // it's a sub-bucket.
25✔
3538
                edges := tx.ReadBucket(edgeBucket)
25✔
3539
                if edges == nil {
25✔
3540
                        return ErrGraphNoEdgesFound
×
3541
                }
×
3542
                chanIndex := edges.NestedReadBucket(channelPointBucket)
25✔
3543
                if chanIndex == nil {
25✔
3544
                        return ErrGraphNoEdgesFound
×
3545
                }
×
3546
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
25✔
3547
                if edgeIndex == nil {
25✔
3548
                        return ErrGraphNoEdgesFound
×
3549
                }
×
3550

3551
                // Once we have the proper bucket, we'll range over each key
3552
                // (which is the channel point for the channel) and decode it,
3553
                // accumulating each entry.
3554
                return chanIndex.ForEach(
25✔
3555
                        func(chanPointBytes, chanID []byte) error {
70✔
3556
                                chanPointReader := bytes.NewReader(
45✔
3557
                                        chanPointBytes,
45✔
3558
                                )
45✔
3559

45✔
3560
                                var chanPoint wire.OutPoint
45✔
3561
                                err := ReadOutpoint(chanPointReader, &chanPoint)
45✔
3562
                                if err != nil {
45✔
3563
                                        return err
×
3564
                                }
×
3565

3566
                                edgeInfo, err := fetchChanEdgeInfo(
45✔
3567
                                        edgeIndex, chanID,
45✔
3568
                                )
45✔
3569
                                if err != nil {
45✔
3570
                                        return err
×
3571
                                }
×
3572

3573
                                pkScript, err := genMultiSigP2WSH(
45✔
3574
                                        edgeInfo.BitcoinKey1Bytes[:],
45✔
3575
                                        edgeInfo.BitcoinKey2Bytes[:],
45✔
3576
                                )
45✔
3577
                                if err != nil {
45✔
3578
                                        return err
×
3579
                                }
×
3580

3581
                                edgePoints = append(edgePoints, EdgePoint{
45✔
3582
                                        FundingPkScript: pkScript,
45✔
3583
                                        OutPoint:        chanPoint,
45✔
3584
                                })
45✔
3585

45✔
3586
                                return nil
45✔
3587
                        },
3588
                )
3589
        }, func() {
25✔
3590
                edgePoints = nil
25✔
3591
        }); err != nil {
25✔
3592
                return nil, err
×
3593
        }
×
3594

3595
        return edgePoints, nil
25✔
3596
}
3597

3598
// MarkEdgeZombie attempts to mark a channel identified by its channel ID as a
3599
// zombie. This method is used on an ad-hoc basis, when channels need to be
3600
// marked as zombies outside the normal pruning cycle.
3601
func (c *KVStore) MarkEdgeZombie(chanID uint64,
3602
        pubKey1, pubKey2 [33]byte) error {
119✔
3603

119✔
3604
        c.cacheMu.Lock()
119✔
3605
        defer c.cacheMu.Unlock()
119✔
3606

119✔
3607
        err := kvdb.Batch(c.db, func(tx kvdb.RwTx) error {
238✔
3608
                edges := tx.ReadWriteBucket(edgeBucket)
119✔
3609
                if edges == nil {
119✔
3610
                        return ErrGraphNoEdgesFound
×
3611
                }
×
3612
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
119✔
3613
                if err != nil {
119✔
3614
                        return fmt.Errorf("unable to create zombie "+
×
3615
                                "bucket: %w", err)
×
3616
                }
×
3617

3618
                if c.graphCache != nil {
238✔
3619
                        c.graphCache.RemoveChannel(pubKey1, pubKey2, chanID)
119✔
3620
                }
119✔
3621

3622
                return markEdgeZombie(zombieIndex, chanID, pubKey1, pubKey2)
119✔
3623
        })
3624
        if err != nil {
119✔
3625
                return err
×
3626
        }
×
3627

3628
        c.rejectCache.remove(chanID)
119✔
3629
        c.chanCache.remove(chanID)
119✔
3630

119✔
3631
        return nil
119✔
3632
}
3633

3634
// markEdgeZombie marks an edge as a zombie within our zombie index. The public
3635
// keys should represent the node public keys of the two parties involved in the
3636
// edge.
3637
func markEdgeZombie(zombieIndex kvdb.RwBucket, chanID uint64, pubKey1,
3638
        pubKey2 [33]byte) error {
146✔
3639

146✔
3640
        var k [8]byte
146✔
3641
        byteOrder.PutUint64(k[:], chanID)
146✔
3642

146✔
3643
        var v [66]byte
146✔
3644
        copy(v[:33], pubKey1[:])
146✔
3645
        copy(v[33:], pubKey2[:])
146✔
3646

146✔
3647
        return zombieIndex.Put(k[:], v[:])
146✔
3648
}
146✔
3649

3650
// MarkEdgeLive clears an edge from our zombie index, deeming it as live.
3651
func (c *KVStore) MarkEdgeLive(chanID uint64) error {
2✔
3652
        c.cacheMu.Lock()
2✔
3653
        defer c.cacheMu.Unlock()
2✔
3654

2✔
3655
        return c.markEdgeLiveUnsafe(nil, chanID)
2✔
3656
}
2✔
3657

3658
// markEdgeLiveUnsafe clears an edge from the zombie index. This method can be
3659
// called with an existing kvdb.RwTx or the argument can be set to nil in which
3660
// case a new transaction will be created.
3661
//
3662
// NOTE: this method MUST only be called if the cacheMu has already been
3663
// acquired.
3664
func (c *KVStore) markEdgeLiveUnsafe(tx kvdb.RwTx, chanID uint64) error {
17✔
3665
        dbFn := func(tx kvdb.RwTx) error {
34✔
3666
                edges := tx.ReadWriteBucket(edgeBucket)
17✔
3667
                if edges == nil {
17✔
3668
                        return ErrGraphNoEdgesFound
×
3669
                }
×
3670
                zombieIndex := edges.NestedReadWriteBucket(zombieBucket)
17✔
3671
                if zombieIndex == nil {
17✔
3672
                        return nil
×
3673
                }
×
3674

3675
                var k [8]byte
17✔
3676
                byteOrder.PutUint64(k[:], chanID)
17✔
3677

17✔
3678
                if len(zombieIndex.Get(k[:])) == 0 {
18✔
3679
                        return ErrZombieEdgeNotFound
1✔
3680
                }
1✔
3681

3682
                return zombieIndex.Delete(k[:])
16✔
3683
        }
3684

3685
        // If the transaction is nil, we'll create a new one. Otherwise, we use
3686
        // the existing transaction
3687
        var err error
17✔
3688
        if tx == nil {
19✔
3689
                err = kvdb.Update(c.db, dbFn, func() {})
4✔
3690
        } else {
15✔
3691
                err = dbFn(tx)
15✔
3692
        }
15✔
3693
        if err != nil {
18✔
3694
                return err
1✔
3695
        }
1✔
3696

3697
        c.rejectCache.remove(chanID)
16✔
3698
        c.chanCache.remove(chanID)
16✔
3699

16✔
3700
        return nil
16✔
3701
}
3702

3703
// IsZombieEdge returns whether the edge is considered zombie. If it is a
3704
// zombie, then the two node public keys corresponding to this edge are also
3705
// returned.
3706
func (c *KVStore) IsZombieEdge(chanID uint64) (bool, [33]byte, [33]byte) {
5✔
3707
        var (
5✔
3708
                isZombie         bool
5✔
3709
                pubKey1, pubKey2 [33]byte
5✔
3710
        )
5✔
3711

5✔
3712
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
10✔
3713
                edges := tx.ReadBucket(edgeBucket)
5✔
3714
                if edges == nil {
5✔
3715
                        return ErrGraphNoEdgesFound
×
3716
                }
×
3717
                zombieIndex := edges.NestedReadBucket(zombieBucket)
5✔
3718
                if zombieIndex == nil {
5✔
3719
                        return nil
×
3720
                }
×
3721

3722
                isZombie, pubKey1, pubKey2 = isZombieEdge(zombieIndex, chanID)
5✔
3723

5✔
3724
                return nil
5✔
3725
        }, func() {
5✔
3726
                isZombie = false
5✔
3727
                pubKey1 = [33]byte{}
5✔
3728
                pubKey2 = [33]byte{}
5✔
3729
        })
5✔
3730
        if err != nil {
5✔
3731
                return false, [33]byte{}, [33]byte{}
×
3732
        }
×
3733

3734
        return isZombie, pubKey1, pubKey2
5✔
3735
}
3736

3737
// isZombieEdge returns whether an entry exists for the given channel in the
3738
// zombie index. If an entry exists, then the two node public keys corresponding
3739
// to this edge are also returned.
3740
func isZombieEdge(zombieIndex kvdb.RBucket,
3741
        chanID uint64) (bool, [33]byte, [33]byte) {
181✔
3742

181✔
3743
        var k [8]byte
181✔
3744
        byteOrder.PutUint64(k[:], chanID)
181✔
3745

181✔
3746
        v := zombieIndex.Get(k[:])
181✔
3747
        if v == nil {
288✔
3748
                return false, [33]byte{}, [33]byte{}
107✔
3749
        }
107✔
3750

3751
        var pubKey1, pubKey2 [33]byte
77✔
3752
        copy(pubKey1[:], v[:33])
77✔
3753
        copy(pubKey2[:], v[33:])
77✔
3754

77✔
3755
        return true, pubKey1, pubKey2
77✔
3756
}
3757

3758
// NumZombies returns the current number of zombie channels in the graph.
3759
func (c *KVStore) NumZombies() (uint64, error) {
4✔
3760
        var numZombies uint64
4✔
3761
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
8✔
3762
                edges := tx.ReadBucket(edgeBucket)
4✔
3763
                if edges == nil {
4✔
3764
                        return nil
×
3765
                }
×
3766
                zombieIndex := edges.NestedReadBucket(zombieBucket)
4✔
3767
                if zombieIndex == nil {
4✔
3768
                        return nil
×
3769
                }
×
3770

3771
                return zombieIndex.ForEach(func(_, _ []byte) error {
6✔
3772
                        numZombies++
2✔
3773
                        return nil
2✔
3774
                })
2✔
3775
        }, func() {
4✔
3776
                numZombies = 0
4✔
3777
        })
4✔
3778
        if err != nil {
4✔
3779
                return 0, err
×
3780
        }
×
3781

3782
        return numZombies, nil
4✔
3783
}
3784

3785
// PutClosedScid stores a SCID for a closed channel in the database. This is so
3786
// that we can ignore channel announcements that we know to be closed without
3787
// having to validate them and fetch a block.
3788
func (c *KVStore) PutClosedScid(scid lnwire.ShortChannelID) error {
1✔
3789
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
2✔
3790
                closedScids, err := tx.CreateTopLevelBucket(closedScidBucket)
1✔
3791
                if err != nil {
1✔
3792
                        return err
×
3793
                }
×
3794

3795
                var k [8]byte
1✔
3796
                byteOrder.PutUint64(k[:], scid.ToUint64())
1✔
3797

1✔
3798
                return closedScids.Put(k[:], []byte{})
1✔
3799
        }, func() {})
1✔
3800
}
3801

3802
// IsClosedScid checks whether a channel identified by the passed in scid is
3803
// closed. This helps avoid having to perform expensive validation checks.
3804
// TODO: Add an LRU cache to cut down on disc reads.
3805
func (c *KVStore) IsClosedScid(scid lnwire.ShortChannelID) (bool, error) {
5✔
3806
        var isClosed bool
5✔
3807
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
10✔
3808
                closedScids := tx.ReadBucket(closedScidBucket)
5✔
3809
                if closedScids == nil {
5✔
3810
                        return ErrClosedScidsNotFound
×
3811
                }
×
3812

3813
                var k [8]byte
5✔
3814
                byteOrder.PutUint64(k[:], scid.ToUint64())
5✔
3815

5✔
3816
                if closedScids.Get(k[:]) != nil {
6✔
3817
                        isClosed = true
1✔
3818
                        return nil
1✔
3819
                }
1✔
3820

3821
                return nil
4✔
3822
        }, func() {
5✔
3823
                isClosed = false
5✔
3824
        })
5✔
3825
        if err != nil {
5✔
3826
                return false, err
×
3827
        }
×
3828

3829
        return isClosed, nil
5✔
3830
}
3831

3832
// GraphSession will provide the call-back with access to a NodeTraverser
3833
// instance which can be used to perform queries against the channel graph.
3834
func (c *KVStore) GraphSession(cb func(graph NodeTraverser) error) error {
54✔
3835
        return c.db.View(func(tx walletdb.ReadTx) error {
108✔
3836
                return cb(&nodeTraverserSession{
54✔
3837
                        db: c,
54✔
3838
                        tx: tx,
54✔
3839
                })
54✔
3840
        }, func() {})
108✔
3841
}
3842

3843
// nodeTraverserSession implements the NodeTraverser interface but with a
3844
// backing read only transaction for a consistent view of the graph.
3845
type nodeTraverserSession struct {
3846
        tx kvdb.RTx
3847
        db *KVStore
3848
}
3849

3850
// ForEachNodeDirectedChannel calls the callback for every channel of the given
3851
// node.
3852
//
3853
// NOTE: Part of the NodeTraverser interface.
3854
func (c *nodeTraverserSession) ForEachNodeDirectedChannel(nodePub route.Vertex,
3855
        cb func(channel *DirectedChannel) error) error {
239✔
3856

239✔
3857
        return c.db.forEachNodeDirectedChannel(c.tx, nodePub, cb)
239✔
3858
}
239✔
3859

3860
// FetchNodeFeatures returns the features of the given node. If the node is
3861
// unknown, assume no additional features are supported.
3862
//
3863
// NOTE: Part of the NodeTraverser interface.
3864
func (c *nodeTraverserSession) FetchNodeFeatures(nodePub route.Vertex) (
3865
        *lnwire.FeatureVector, error) {
254✔
3866

254✔
3867
        return c.db.fetchNodeFeatures(c.tx, nodePub)
254✔
3868
}
254✔
3869

3870
func putLightningNode(nodeBucket, aliasBucket, updateIndex kvdb.RwBucket,
3871
        node *models.LightningNode) error {
990✔
3872

990✔
3873
        var (
990✔
3874
                scratch [16]byte
990✔
3875
                b       bytes.Buffer
990✔
3876
        )
990✔
3877

990✔
3878
        pub, err := node.PubKey()
990✔
3879
        if err != nil {
990✔
3880
                return err
×
3881
        }
×
3882
        nodePub := pub.SerializeCompressed()
990✔
3883

990✔
3884
        // If the node has the update time set, write it, else write 0.
990✔
3885
        updateUnix := uint64(0)
990✔
3886
        if node.LastUpdate.Unix() > 0 {
1,856✔
3887
                updateUnix = uint64(node.LastUpdate.Unix())
866✔
3888
        }
866✔
3889

3890
        byteOrder.PutUint64(scratch[:8], updateUnix)
990✔
3891
        if _, err := b.Write(scratch[:8]); err != nil {
990✔
3892
                return err
×
3893
        }
×
3894

3895
        if _, err := b.Write(nodePub); err != nil {
990✔
3896
                return err
×
3897
        }
×
3898

3899
        // If we got a node announcement for this node, we will have the rest
3900
        // of the data available. If not we don't have more data to write.
3901
        if !node.HaveNodeAnnouncement {
1,064✔
3902
                // Write HaveNodeAnnouncement=0.
74✔
3903
                byteOrder.PutUint16(scratch[:2], 0)
74✔
3904
                if _, err := b.Write(scratch[:2]); err != nil {
74✔
3905
                        return err
×
3906
                }
×
3907

3908
                return nodeBucket.Put(nodePub, b.Bytes())
74✔
3909
        }
3910

3911
        // Write HaveNodeAnnouncement=1.
3912
        byteOrder.PutUint16(scratch[:2], 1)
919✔
3913
        if _, err := b.Write(scratch[:2]); err != nil {
919✔
3914
                return err
×
3915
        }
×
3916

3917
        if err := binary.Write(&b, byteOrder, node.Color.R); err != nil {
919✔
3918
                return err
×
3919
        }
×
3920
        if err := binary.Write(&b, byteOrder, node.Color.G); err != nil {
919✔
3921
                return err
×
3922
        }
×
3923
        if err := binary.Write(&b, byteOrder, node.Color.B); err != nil {
919✔
3924
                return err
×
3925
        }
×
3926

3927
        if err := wire.WriteVarString(&b, 0, node.Alias); err != nil {
919✔
3928
                return err
×
3929
        }
×
3930

3931
        if err := node.Features.Encode(&b); err != nil {
919✔
3932
                return err
×
3933
        }
×
3934

3935
        numAddresses := uint16(len(node.Addresses))
919✔
3936
        byteOrder.PutUint16(scratch[:2], numAddresses)
919✔
3937
        if _, err := b.Write(scratch[:2]); err != nil {
919✔
3938
                return err
×
3939
        }
×
3940

3941
        for _, address := range node.Addresses {
2,066✔
3942
                if err := SerializeAddr(&b, address); err != nil {
1,147✔
3943
                        return err
×
3944
                }
×
3945
        }
3946

3947
        sigLen := len(node.AuthSigBytes)
919✔
3948
        if sigLen > 80 {
919✔
3949
                return fmt.Errorf("max sig len allowed is 80, had %v",
×
3950
                        sigLen)
×
3951
        }
×
3952

3953
        err = wire.WriteVarBytes(&b, 0, node.AuthSigBytes)
919✔
3954
        if err != nil {
919✔
3955
                return err
×
3956
        }
×
3957

3958
        if len(node.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
919✔
3959
                return ErrTooManyExtraOpaqueBytes(len(node.ExtraOpaqueData))
×
3960
        }
×
3961
        err = wire.WriteVarBytes(&b, 0, node.ExtraOpaqueData)
919✔
3962
        if err != nil {
919✔
3963
                return err
×
3964
        }
×
3965

3966
        if err := aliasBucket.Put(nodePub, []byte(node.Alias)); err != nil {
919✔
3967
                return err
×
3968
        }
×
3969

3970
        // With the alias bucket updated, we'll now update the index that
3971
        // tracks the time series of node updates.
3972
        var indexKey [8 + 33]byte
919✔
3973
        byteOrder.PutUint64(indexKey[:8], updateUnix)
919✔
3974
        copy(indexKey[8:], nodePub)
919✔
3975

919✔
3976
        // If there was already an old index entry for this node, then we'll
919✔
3977
        // delete the old one before we write the new entry.
919✔
3978
        if nodeBytes := nodeBucket.Get(nodePub); nodeBytes != nil {
1,026✔
3979
                // Extract out the old update time to we can reconstruct the
107✔
3980
                // prior index key to delete it from the index.
107✔
3981
                oldUpdateTime := nodeBytes[:8]
107✔
3982

107✔
3983
                var oldIndexKey [8 + 33]byte
107✔
3984
                copy(oldIndexKey[:8], oldUpdateTime)
107✔
3985
                copy(oldIndexKey[8:], nodePub)
107✔
3986

107✔
3987
                if err := updateIndex.Delete(oldIndexKey[:]); err != nil {
107✔
3988
                        return err
×
3989
                }
×
3990
        }
3991

3992
        if err := updateIndex.Put(indexKey[:], nil); err != nil {
919✔
3993
                return err
×
3994
        }
×
3995

3996
        return nodeBucket.Put(nodePub, b.Bytes())
919✔
3997
}
3998

3999
func fetchLightningNode(nodeBucket kvdb.RBucket,
4000
        nodePub []byte) (models.LightningNode, error) {
3,614✔
4001

3,614✔
4002
        nodeBytes := nodeBucket.Get(nodePub)
3,614✔
4003
        if nodeBytes == nil {
3,687✔
4004
                return models.LightningNode{}, ErrGraphNodeNotFound
73✔
4005
        }
73✔
4006

4007
        nodeReader := bytes.NewReader(nodeBytes)
3,544✔
4008

3,544✔
4009
        return deserializeLightningNode(nodeReader)
3,544✔
4010
}
4011

4012
func deserializeLightningNodeCacheable(r io.Reader) (route.Vertex,
4013
        *lnwire.FeatureVector, error) {
123✔
4014

123✔
4015
        var (
123✔
4016
                pubKey      route.Vertex
123✔
4017
                features    = lnwire.EmptyFeatureVector()
123✔
4018
                nodeScratch [8]byte
123✔
4019
        )
123✔
4020

123✔
4021
        // Skip ahead:
123✔
4022
        // - LastUpdate (8 bytes)
123✔
4023
        if _, err := r.Read(nodeScratch[:]); err != nil {
123✔
4024
                return pubKey, nil, err
×
4025
        }
×
4026

4027
        if _, err := io.ReadFull(r, pubKey[:]); err != nil {
123✔
4028
                return pubKey, nil, err
×
4029
        }
×
4030

4031
        // Read the node announcement flag.
4032
        if _, err := r.Read(nodeScratch[:2]); err != nil {
123✔
4033
                return pubKey, nil, err
×
4034
        }
×
4035
        hasNodeAnn := byteOrder.Uint16(nodeScratch[:2])
123✔
4036

123✔
4037
        // The rest of the data is optional, and will only be there if we got a
123✔
4038
        // node announcement for this node.
123✔
4039
        if hasNodeAnn == 0 {
126✔
4040
                return pubKey, features, nil
3✔
4041
        }
3✔
4042

4043
        // We did get a node announcement for this node, so we'll have the rest
4044
        // of the data available.
4045
        var rgb uint8
123✔
4046
        if err := binary.Read(r, byteOrder, &rgb); err != nil {
123✔
4047
                return pubKey, nil, err
×
4048
        }
×
4049
        if err := binary.Read(r, byteOrder, &rgb); err != nil {
123✔
4050
                return pubKey, nil, err
×
4051
        }
×
4052
        if err := binary.Read(r, byteOrder, &rgb); err != nil {
123✔
4053
                return pubKey, nil, err
×
4054
        }
×
4055

4056
        if _, err := wire.ReadVarString(r, 0); err != nil {
123✔
4057
                return pubKey, nil, err
×
4058
        }
×
4059

4060
        if err := features.Decode(r); err != nil {
123✔
4061
                return pubKey, nil, err
×
4062
        }
×
4063

4064
        return pubKey, features, nil
123✔
4065
}
4066

4067
func deserializeLightningNode(r io.Reader) (models.LightningNode, error) {
8,503✔
4068
        var (
8,503✔
4069
                node    models.LightningNode
8,503✔
4070
                scratch [8]byte
8,503✔
4071
                err     error
8,503✔
4072
        )
8,503✔
4073

8,503✔
4074
        // Always populate a feature vector, even if we don't have a node
8,503✔
4075
        // announcement and short circuit below.
8,503✔
4076
        node.Features = lnwire.EmptyFeatureVector()
8,503✔
4077

8,503✔
4078
        if _, err := r.Read(scratch[:]); err != nil {
8,503✔
4079
                return models.LightningNode{}, err
×
4080
        }
×
4081

4082
        unix := int64(byteOrder.Uint64(scratch[:]))
8,503✔
4083
        node.LastUpdate = time.Unix(unix, 0)
8,503✔
4084

8,503✔
4085
        if _, err := io.ReadFull(r, node.PubKeyBytes[:]); err != nil {
8,503✔
4086
                return models.LightningNode{}, err
×
4087
        }
×
4088

4089
        if _, err := r.Read(scratch[:2]); err != nil {
8,503✔
4090
                return models.LightningNode{}, err
×
4091
        }
×
4092

4093
        hasNodeAnn := byteOrder.Uint16(scratch[:2])
8,503✔
4094
        if hasNodeAnn == 1 {
16,865✔
4095
                node.HaveNodeAnnouncement = true
8,362✔
4096
        } else {
8,506✔
4097
                node.HaveNodeAnnouncement = false
144✔
4098
        }
144✔
4099

4100
        // The rest of the data is optional, and will only be there if we got a
4101
        // node announcement for this node.
4102
        if !node.HaveNodeAnnouncement {
8,647✔
4103
                return node, nil
144✔
4104
        }
144✔
4105

4106
        // We did get a node announcement for this node, so we'll have the rest
4107
        // of the data available.
4108
        if err := binary.Read(r, byteOrder, &node.Color.R); err != nil {
8,362✔
4109
                return models.LightningNode{}, err
×
4110
        }
×
4111
        if err := binary.Read(r, byteOrder, &node.Color.G); err != nil {
8,362✔
4112
                return models.LightningNode{}, err
×
4113
        }
×
4114
        if err := binary.Read(r, byteOrder, &node.Color.B); err != nil {
8,362✔
4115
                return models.LightningNode{}, err
×
4116
        }
×
4117

4118
        node.Alias, err = wire.ReadVarString(r, 0)
8,362✔
4119
        if err != nil {
8,362✔
4120
                return models.LightningNode{}, err
×
4121
        }
×
4122

4123
        err = node.Features.Decode(r)
8,362✔
4124
        if err != nil {
8,362✔
4125
                return models.LightningNode{}, err
×
4126
        }
×
4127

4128
        if _, err := r.Read(scratch[:2]); err != nil {
8,362✔
4129
                return models.LightningNode{}, err
×
4130
        }
×
4131
        numAddresses := int(byteOrder.Uint16(scratch[:2]))
8,362✔
4132

8,362✔
4133
        var addresses []net.Addr
8,362✔
4134
        for i := 0; i < numAddresses; i++ {
18,954✔
4135
                address, err := DeserializeAddr(r)
10,592✔
4136
                if err != nil {
10,592✔
4137
                        return models.LightningNode{}, err
×
4138
                }
×
4139
                addresses = append(addresses, address)
10,592✔
4140
        }
4141
        node.Addresses = addresses
8,362✔
4142

8,362✔
4143
        node.AuthSigBytes, err = wire.ReadVarBytes(r, 0, 80, "sig")
8,362✔
4144
        if err != nil {
8,362✔
4145
                return models.LightningNode{}, err
×
4146
        }
×
4147

4148
        // We'll try and see if there are any opaque bytes left, if not, then
4149
        // we'll ignore the EOF error and return the node as is.
4150
        node.ExtraOpaqueData, err = wire.ReadVarBytes(
8,362✔
4151
                r, 0, MaxAllowedExtraOpaqueBytes, "blob",
8,362✔
4152
        )
8,362✔
4153
        switch {
8,362✔
4154
        case errors.Is(err, io.ErrUnexpectedEOF):
×
4155
        case errors.Is(err, io.EOF):
×
4156
        case err != nil:
×
4157
                return models.LightningNode{}, err
×
4158
        }
4159

4160
        return node, nil
8,362✔
4161
}
4162

4163
func putChanEdgeInfo(edgeIndex kvdb.RwBucket,
4164
        edgeInfo *models.ChannelEdgeInfo, chanID [8]byte) error {
1,485✔
4165

1,485✔
4166
        var b bytes.Buffer
1,485✔
4167

1,485✔
4168
        if _, err := b.Write(edgeInfo.NodeKey1Bytes[:]); err != nil {
1,485✔
4169
                return err
×
4170
        }
×
4171
        if _, err := b.Write(edgeInfo.NodeKey2Bytes[:]); err != nil {
1,485✔
4172
                return err
×
4173
        }
×
4174
        if _, err := b.Write(edgeInfo.BitcoinKey1Bytes[:]); err != nil {
1,485✔
4175
                return err
×
4176
        }
×
4177
        if _, err := b.Write(edgeInfo.BitcoinKey2Bytes[:]); err != nil {
1,485✔
4178
                return err
×
4179
        }
×
4180

4181
        if err := wire.WriteVarBytes(&b, 0, edgeInfo.Features); err != nil {
1,485✔
4182
                return err
×
4183
        }
×
4184

4185
        authProof := edgeInfo.AuthProof
1,485✔
4186
        var nodeSig1, nodeSig2, bitcoinSig1, bitcoinSig2 []byte
1,485✔
4187
        if authProof != nil {
2,887✔
4188
                nodeSig1 = authProof.NodeSig1Bytes
1,402✔
4189
                nodeSig2 = authProof.NodeSig2Bytes
1,402✔
4190
                bitcoinSig1 = authProof.BitcoinSig1Bytes
1,402✔
4191
                bitcoinSig2 = authProof.BitcoinSig2Bytes
1,402✔
4192
        }
1,402✔
4193

4194
        if err := wire.WriteVarBytes(&b, 0, nodeSig1); err != nil {
1,485✔
4195
                return err
×
4196
        }
×
4197
        if err := wire.WriteVarBytes(&b, 0, nodeSig2); err != nil {
1,485✔
4198
                return err
×
4199
        }
×
4200
        if err := wire.WriteVarBytes(&b, 0, bitcoinSig1); err != nil {
1,485✔
4201
                return err
×
4202
        }
×
4203
        if err := wire.WriteVarBytes(&b, 0, bitcoinSig2); err != nil {
1,485✔
4204
                return err
×
4205
        }
×
4206

4207
        if err := WriteOutpoint(&b, &edgeInfo.ChannelPoint); err != nil {
1,485✔
4208
                return err
×
4209
        }
×
4210
        err := binary.Write(&b, byteOrder, uint64(edgeInfo.Capacity))
1,485✔
4211
        if err != nil {
1,485✔
4212
                return err
×
4213
        }
×
4214
        if _, err := b.Write(chanID[:]); err != nil {
1,485✔
4215
                return err
×
4216
        }
×
4217
        if _, err := b.Write(edgeInfo.ChainHash[:]); err != nil {
1,485✔
4218
                return err
×
4219
        }
×
4220

4221
        if len(edgeInfo.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
1,485✔
4222
                return ErrTooManyExtraOpaqueBytes(len(edgeInfo.ExtraOpaqueData))
×
4223
        }
×
4224
        err = wire.WriteVarBytes(&b, 0, edgeInfo.ExtraOpaqueData)
1,485✔
4225
        if err != nil {
1,485✔
4226
                return err
×
4227
        }
×
4228

4229
        return edgeIndex.Put(chanID[:], b.Bytes())
1,485✔
4230
}
4231

4232
func fetchChanEdgeInfo(edgeIndex kvdb.RBucket,
4233
        chanID []byte) (models.ChannelEdgeInfo, error) {
4,141✔
4234

4,141✔
4235
        edgeInfoBytes := edgeIndex.Get(chanID)
4,141✔
4236
        if edgeInfoBytes == nil {
4,209✔
4237
                return models.ChannelEdgeInfo{}, ErrEdgeNotFound
68✔
4238
        }
68✔
4239

4240
        edgeInfoReader := bytes.NewReader(edgeInfoBytes)
4,076✔
4241

4,076✔
4242
        return deserializeChanEdgeInfo(edgeInfoReader)
4,076✔
4243
}
4244

4245
func deserializeChanEdgeInfo(r io.Reader) (models.ChannelEdgeInfo, error) {
4,615✔
4246
        var (
4,615✔
4247
                err      error
4,615✔
4248
                edgeInfo models.ChannelEdgeInfo
4,615✔
4249
        )
4,615✔
4250

4,615✔
4251
        if _, err := io.ReadFull(r, edgeInfo.NodeKey1Bytes[:]); err != nil {
4,615✔
4252
                return models.ChannelEdgeInfo{}, err
×
4253
        }
×
4254
        if _, err := io.ReadFull(r, edgeInfo.NodeKey2Bytes[:]); err != nil {
4,615✔
4255
                return models.ChannelEdgeInfo{}, err
×
4256
        }
×
4257
        if _, err := io.ReadFull(r, edgeInfo.BitcoinKey1Bytes[:]); err != nil {
4,615✔
4258
                return models.ChannelEdgeInfo{}, err
×
4259
        }
×
4260
        if _, err := io.ReadFull(r, edgeInfo.BitcoinKey2Bytes[:]); err != nil {
4,615✔
4261
                return models.ChannelEdgeInfo{}, err
×
4262
        }
×
4263

4264
        edgeInfo.Features, err = wire.ReadVarBytes(r, 0, 900, "features")
4,615✔
4265
        if err != nil {
4,615✔
4266
                return models.ChannelEdgeInfo{}, err
×
4267
        }
×
4268

4269
        proof := &models.ChannelAuthProof{}
4,615✔
4270

4,615✔
4271
        proof.NodeSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
4,615✔
4272
        if err != nil {
4,615✔
4273
                return models.ChannelEdgeInfo{}, err
×
4274
        }
×
4275
        proof.NodeSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
4,615✔
4276
        if err != nil {
4,615✔
4277
                return models.ChannelEdgeInfo{}, err
×
4278
        }
×
4279
        proof.BitcoinSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
4,615✔
4280
        if err != nil {
4,615✔
4281
                return models.ChannelEdgeInfo{}, err
×
4282
        }
×
4283
        proof.BitcoinSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
4,615✔
4284
        if err != nil {
4,615✔
4285
                return models.ChannelEdgeInfo{}, err
×
4286
        }
×
4287

4288
        if !proof.IsEmpty() {
6,281✔
4289
                edgeInfo.AuthProof = proof
1,666✔
4290
        }
1,666✔
4291

4292
        edgeInfo.ChannelPoint = wire.OutPoint{}
4,615✔
4293
        if err := ReadOutpoint(r, &edgeInfo.ChannelPoint); err != nil {
4,615✔
4294
                return models.ChannelEdgeInfo{}, err
×
4295
        }
×
4296
        if err := binary.Read(r, byteOrder, &edgeInfo.Capacity); err != nil {
4,615✔
4297
                return models.ChannelEdgeInfo{}, err
×
4298
        }
×
4299
        if err := binary.Read(r, byteOrder, &edgeInfo.ChannelID); err != nil {
4,615✔
4300
                return models.ChannelEdgeInfo{}, err
×
4301
        }
×
4302

4303
        if _, err := io.ReadFull(r, edgeInfo.ChainHash[:]); err != nil {
4,615✔
4304
                return models.ChannelEdgeInfo{}, err
×
4305
        }
×
4306

4307
        // We'll try and see if there are any opaque bytes left, if not, then
4308
        // we'll ignore the EOF error and return the edge as is.
4309
        edgeInfo.ExtraOpaqueData, err = wire.ReadVarBytes(
4,615✔
4310
                r, 0, MaxAllowedExtraOpaqueBytes, "blob",
4,615✔
4311
        )
4,615✔
4312
        switch {
4,615✔
4313
        case errors.Is(err, io.ErrUnexpectedEOF):
×
4314
        case errors.Is(err, io.EOF):
×
4315
        case err != nil:
×
4316
                return models.ChannelEdgeInfo{}, err
×
4317
        }
4318

4319
        return edgeInfo, nil
4,615✔
4320
}
4321

4322
func putChanEdgePolicy(edges kvdb.RwBucket, edge *models.ChannelEdgePolicy,
4323
        from, to []byte) error {
2,663✔
4324

2,663✔
4325
        var edgeKey [33 + 8]byte
2,663✔
4326
        copy(edgeKey[:], from)
2,663✔
4327
        byteOrder.PutUint64(edgeKey[33:], edge.ChannelID)
2,663✔
4328

2,663✔
4329
        var b bytes.Buffer
2,663✔
4330
        if err := serializeChanEdgePolicy(&b, edge, to); err != nil {
2,663✔
4331
                return err
×
4332
        }
×
4333

4334
        // Before we write out the new edge, we'll create a new entry in the
4335
        // update index in order to keep it fresh.
4336
        updateUnix := uint64(edge.LastUpdate.Unix())
2,663✔
4337
        var indexKey [8 + 8]byte
2,663✔
4338
        byteOrder.PutUint64(indexKey[:8], updateUnix)
2,663✔
4339
        byteOrder.PutUint64(indexKey[8:], edge.ChannelID)
2,663✔
4340

2,663✔
4341
        updateIndex, err := edges.CreateBucketIfNotExists(edgeUpdateIndexBucket)
2,663✔
4342
        if err != nil {
2,663✔
4343
                return err
×
4344
        }
×
4345

4346
        // If there was already an entry for this edge, then we'll need to
4347
        // delete the old one to ensure we don't leave around any after-images.
4348
        // An unknown policy value does not have a update time recorded, so
4349
        // it also does not need to be removed.
4350
        if edgeBytes := edges.Get(edgeKey[:]); edgeBytes != nil &&
2,663✔
4351
                !bytes.Equal(edgeBytes, unknownPolicy) {
2,690✔
4352

27✔
4353
                // In order to delete the old entry, we'll need to obtain the
27✔
4354
                // *prior* update time in order to delete it. To do this, we'll
27✔
4355
                // need to deserialize the existing policy within the database
27✔
4356
                // (now outdated by the new one), and delete its corresponding
27✔
4357
                // entry within the update index. We'll ignore any
27✔
4358
                // ErrEdgePolicyOptionalFieldNotFound error, as we only need
27✔
4359
                // the channel ID and update time to delete the entry.
27✔
4360
                // TODO(halseth): get rid of these invalid policies in a
27✔
4361
                // migration.
27✔
4362
                oldEdgePolicy, err := deserializeChanEdgePolicy(
27✔
4363
                        bytes.NewReader(edgeBytes),
27✔
4364
                )
27✔
4365
                if err != nil &&
27✔
4366
                        !errors.Is(err, ErrEdgePolicyOptionalFieldNotFound) {
27✔
4367

×
4368
                        return err
×
4369
                }
×
4370

4371
                oldUpdateTime := uint64(oldEdgePolicy.LastUpdate.Unix())
27✔
4372

27✔
4373
                var oldIndexKey [8 + 8]byte
27✔
4374
                byteOrder.PutUint64(oldIndexKey[:8], oldUpdateTime)
27✔
4375
                byteOrder.PutUint64(oldIndexKey[8:], edge.ChannelID)
27✔
4376

27✔
4377
                if err := updateIndex.Delete(oldIndexKey[:]); err != nil {
27✔
4378
                        return err
×
4379
                }
×
4380
        }
4381

4382
        if err := updateIndex.Put(indexKey[:], nil); err != nil {
2,663✔
4383
                return err
×
4384
        }
×
4385

4386
        err = updateEdgePolicyDisabledIndex(
2,663✔
4387
                edges, edge.ChannelID,
2,663✔
4388
                edge.ChannelFlags&lnwire.ChanUpdateDirection > 0,
2,663✔
4389
                edge.IsDisabled(),
2,663✔
4390
        )
2,663✔
4391
        if err != nil {
2,663✔
4392
                return err
×
4393
        }
×
4394

4395
        return edges.Put(edgeKey[:], b.Bytes())
2,663✔
4396
}
4397

4398
// updateEdgePolicyDisabledIndex is used to update the disabledEdgePolicyIndex
4399
// bucket by either add a new disabled ChannelEdgePolicy or remove an existing
4400
// one.
4401
// The direction represents the direction of the edge and disabled is used for
4402
// deciding whether to remove or add an entry to the bucket.
4403
// In general a channel is disabled if two entries for the same chanID exist
4404
// in this bucket.
4405
// Maintaining the bucket this way allows a fast retrieval of disabled
4406
// channels, for example when prune is needed.
4407
func updateEdgePolicyDisabledIndex(edges kvdb.RwBucket, chanID uint64,
4408
        direction bool, disabled bool) error {
2,943✔
4409

2,943✔
4410
        var disabledEdgeKey [8 + 1]byte
2,943✔
4411
        byteOrder.PutUint64(disabledEdgeKey[0:], chanID)
2,943✔
4412
        if direction {
4,412✔
4413
                disabledEdgeKey[8] = 1
1,469✔
4414
        }
1,469✔
4415

4416
        disabledEdgePolicyIndex, err := edges.CreateBucketIfNotExists(
2,943✔
4417
                disabledEdgePolicyBucket,
2,943✔
4418
        )
2,943✔
4419
        if err != nil {
2,943✔
4420
                return err
×
4421
        }
×
4422

4423
        if disabled {
2,972✔
4424
                return disabledEdgePolicyIndex.Put(disabledEdgeKey[:], []byte{})
29✔
4425
        }
29✔
4426

4427
        return disabledEdgePolicyIndex.Delete(disabledEdgeKey[:])
2,917✔
4428
}
4429

4430
// putChanEdgePolicyUnknown marks the edge policy as unknown
4431
// in the edges bucket.
4432
func putChanEdgePolicyUnknown(edges kvdb.RwBucket, channelID uint64,
4433
        from []byte) error {
2,965✔
4434

2,965✔
4435
        var edgeKey [33 + 8]byte
2,965✔
4436
        copy(edgeKey[:], from)
2,965✔
4437
        byteOrder.PutUint64(edgeKey[33:], channelID)
2,965✔
4438

2,965✔
4439
        if edges.Get(edgeKey[:]) != nil {
2,965✔
4440
                return fmt.Errorf("cannot write unknown policy for channel %v "+
×
4441
                        " when there is already a policy present", channelID)
×
4442
        }
×
4443

4444
        return edges.Put(edgeKey[:], unknownPolicy)
2,965✔
4445
}
4446

4447
func fetchChanEdgePolicy(edges kvdb.RBucket, chanID []byte,
4448
        nodePub []byte) (*models.ChannelEdgePolicy, error) {
8,167✔
4449

8,167✔
4450
        var edgeKey [33 + 8]byte
8,167✔
4451
        copy(edgeKey[:], nodePub)
8,167✔
4452
        copy(edgeKey[33:], chanID)
8,167✔
4453

8,167✔
4454
        edgeBytes := edges.Get(edgeKey[:])
8,167✔
4455
        if edgeBytes == nil {
8,167✔
4456
                return nil, ErrEdgeNotFound
×
4457
        }
×
4458

4459
        // No need to deserialize unknown policy.
4460
        if bytes.Equal(edgeBytes, unknownPolicy) {
8,537✔
4461
                return nil, nil
370✔
4462
        }
370✔
4463

4464
        edgeReader := bytes.NewReader(edgeBytes)
7,800✔
4465

7,800✔
4466
        ep, err := deserializeChanEdgePolicy(edgeReader)
7,800✔
4467
        switch {
7,800✔
4468
        // If the db policy was missing an expected optional field, we return
4469
        // nil as if the policy was unknown.
4470
        case errors.Is(err, ErrEdgePolicyOptionalFieldNotFound):
1✔
4471
                return nil, nil
1✔
4472

4473
        case err != nil:
×
4474
                return nil, err
×
4475
        }
4476

4477
        return ep, nil
7,799✔
4478
}
4479

4480
func fetchChanEdgePolicies(edgeIndex kvdb.RBucket, edges kvdb.RBucket,
4481
        chanID []byte) (*models.ChannelEdgePolicy, *models.ChannelEdgePolicy,
4482
        error) {
245✔
4483

245✔
4484
        edgeInfo := edgeIndex.Get(chanID)
245✔
4485
        if edgeInfo == nil {
245✔
4486
                return nil, nil, fmt.Errorf("%w: chanID=%x", ErrEdgeNotFound,
×
4487
                        chanID)
×
4488
        }
×
4489

4490
        // The first node is contained within the first half of the edge
4491
        // information. We only propagate the error here and below if it's
4492
        // something other than edge non-existence.
4493
        node1Pub := edgeInfo[:33]
245✔
4494
        edge1, err := fetchChanEdgePolicy(edges, chanID, node1Pub)
245✔
4495
        if err != nil {
245✔
4496
                return nil, nil, fmt.Errorf("%w: node1Pub=%x", ErrEdgeNotFound,
×
4497
                        node1Pub)
×
4498
        }
×
4499

4500
        // Similarly, the second node is contained within the latter
4501
        // half of the edge information.
4502
        node2Pub := edgeInfo[33:66]
245✔
4503
        edge2, err := fetchChanEdgePolicy(edges, chanID, node2Pub)
245✔
4504
        if err != nil {
245✔
4505
                return nil, nil, fmt.Errorf("%w: node2Pub=%x", ErrEdgeNotFound,
×
4506
                        node2Pub)
×
4507
        }
×
4508

4509
        return edge1, edge2, nil
245✔
4510
}
4511

4512
func serializeChanEdgePolicy(w io.Writer, edge *models.ChannelEdgePolicy,
4513
        to []byte) error {
2,665✔
4514

2,665✔
4515
        err := wire.WriteVarBytes(w, 0, edge.SigBytes)
2,665✔
4516
        if err != nil {
2,665✔
4517
                return err
×
4518
        }
×
4519

4520
        if err := binary.Write(w, byteOrder, edge.ChannelID); err != nil {
2,665✔
4521
                return err
×
4522
        }
×
4523

4524
        var scratch [8]byte
2,665✔
4525
        updateUnix := uint64(edge.LastUpdate.Unix())
2,665✔
4526
        byteOrder.PutUint64(scratch[:], updateUnix)
2,665✔
4527
        if _, err := w.Write(scratch[:]); err != nil {
2,665✔
4528
                return err
×
4529
        }
×
4530

4531
        if err := binary.Write(w, byteOrder, edge.MessageFlags); err != nil {
2,665✔
4532
                return err
×
4533
        }
×
4534
        if err := binary.Write(w, byteOrder, edge.ChannelFlags); err != nil {
2,665✔
4535
                return err
×
4536
        }
×
4537
        if err := binary.Write(w, byteOrder, edge.TimeLockDelta); err != nil {
2,665✔
4538
                return err
×
4539
        }
×
4540
        if err := binary.Write(w, byteOrder, uint64(edge.MinHTLC)); err != nil {
2,665✔
4541
                return err
×
4542
        }
×
4543
        err = binary.Write(w, byteOrder, uint64(edge.FeeBaseMSat))
2,665✔
4544
        if err != nil {
2,665✔
4545
                return err
×
4546
        }
×
4547
        err = binary.Write(
2,665✔
4548
                w, byteOrder, uint64(edge.FeeProportionalMillionths),
2,665✔
4549
        )
2,665✔
4550
        if err != nil {
2,665✔
4551
                return err
×
4552
        }
×
4553

4554
        if _, err := w.Write(to); err != nil {
2,665✔
4555
                return err
×
4556
        }
×
4557

4558
        // If the max_htlc field is present, we write it. To be compatible with
4559
        // older versions that wasn't aware of this field, we write it as part
4560
        // of the opaque data.
4561
        // TODO(halseth): clean up when moving to TLV.
4562
        var opaqueBuf bytes.Buffer
2,665✔
4563
        if edge.MessageFlags.HasMaxHtlc() {
4,946✔
4564
                err := binary.Write(&opaqueBuf, byteOrder, uint64(edge.MaxHTLC))
2,281✔
4565
                if err != nil {
2,281✔
4566
                        return err
×
4567
                }
×
4568
        }
4569

4570
        if len(edge.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
2,665✔
4571
                return ErrTooManyExtraOpaqueBytes(len(edge.ExtraOpaqueData))
×
4572
        }
×
4573
        if _, err := opaqueBuf.Write(edge.ExtraOpaqueData); err != nil {
2,665✔
4574
                return err
×
4575
        }
×
4576

4577
        if err := wire.WriteVarBytes(w, 0, opaqueBuf.Bytes()); err != nil {
2,665✔
4578
                return err
×
4579
        }
×
4580

4581
        return nil
2,665✔
4582
}
4583

4584
func deserializeChanEdgePolicy(r io.Reader) (*models.ChannelEdgePolicy, error) {
7,825✔
4585
        // Deserialize the policy. Note that in case an optional field is not
7,825✔
4586
        // found, both an error and a populated policy object are returned.
7,825✔
4587
        edge, deserializeErr := deserializeChanEdgePolicyRaw(r)
7,825✔
4588
        if deserializeErr != nil &&
7,825✔
4589
                !errors.Is(deserializeErr, ErrEdgePolicyOptionalFieldNotFound) {
7,825✔
4590

×
4591
                return nil, deserializeErr
×
4592
        }
×
4593

4594
        return edge, deserializeErr
7,825✔
4595
}
4596

4597
func deserializeChanEdgePolicyRaw(r io.Reader) (*models.ChannelEdgePolicy,
4598
        error) {
8,832✔
4599

8,832✔
4600
        edge := &models.ChannelEdgePolicy{}
8,832✔
4601

8,832✔
4602
        var err error
8,832✔
4603
        edge.SigBytes, err = wire.ReadVarBytes(r, 0, 80, "sig")
8,832✔
4604
        if err != nil {
8,832✔
4605
                return nil, err
×
4606
        }
×
4607

4608
        if err := binary.Read(r, byteOrder, &edge.ChannelID); err != nil {
8,832✔
4609
                return nil, err
×
4610
        }
×
4611

4612
        var scratch [8]byte
8,832✔
4613
        if _, err := r.Read(scratch[:]); err != nil {
8,832✔
4614
                return nil, err
×
4615
        }
×
4616
        unix := int64(byteOrder.Uint64(scratch[:]))
8,832✔
4617
        edge.LastUpdate = time.Unix(unix, 0)
8,832✔
4618

8,832✔
4619
        if err := binary.Read(r, byteOrder, &edge.MessageFlags); err != nil {
8,832✔
4620
                return nil, err
×
4621
        }
×
4622
        if err := binary.Read(r, byteOrder, &edge.ChannelFlags); err != nil {
8,832✔
4623
                return nil, err
×
4624
        }
×
4625
        if err := binary.Read(r, byteOrder, &edge.TimeLockDelta); err != nil {
8,832✔
4626
                return nil, err
×
4627
        }
×
4628

4629
        var n uint64
8,832✔
4630
        if err := binary.Read(r, byteOrder, &n); err != nil {
8,832✔
4631
                return nil, err
×
4632
        }
×
4633
        edge.MinHTLC = lnwire.MilliSatoshi(n)
8,832✔
4634

8,832✔
4635
        if err := binary.Read(r, byteOrder, &n); err != nil {
8,832✔
4636
                return nil, err
×
4637
        }
×
4638
        edge.FeeBaseMSat = lnwire.MilliSatoshi(n)
8,832✔
4639

8,832✔
4640
        if err := binary.Read(r, byteOrder, &n); err != nil {
8,832✔
4641
                return nil, err
×
4642
        }
×
4643
        edge.FeeProportionalMillionths = lnwire.MilliSatoshi(n)
8,832✔
4644

8,832✔
4645
        if _, err := r.Read(edge.ToNode[:]); err != nil {
8,832✔
4646
                return nil, err
×
4647
        }
×
4648

4649
        // We'll try and see if there are any opaque bytes left, if not, then
4650
        // we'll ignore the EOF error and return the edge as is.
4651
        edge.ExtraOpaqueData, err = wire.ReadVarBytes(
8,832✔
4652
                r, 0, MaxAllowedExtraOpaqueBytes, "blob",
8,832✔
4653
        )
8,832✔
4654
        switch {
8,832✔
4655
        case errors.Is(err, io.ErrUnexpectedEOF):
×
4656
        case errors.Is(err, io.EOF):
3✔
4657
        case err != nil:
×
4658
                return nil, err
×
4659
        }
4660

4661
        // See if optional fields are present.
4662
        if edge.MessageFlags.HasMaxHtlc() {
17,290✔
4663
                // The max_htlc field should be at the beginning of the opaque
8,458✔
4664
                // bytes.
8,458✔
4665
                opq := edge.ExtraOpaqueData
8,458✔
4666

8,458✔
4667
                // If the max_htlc field is not present, it might be old data
8,458✔
4668
                // stored before this field was validated. We'll return the
8,458✔
4669
                // edge along with an error.
8,458✔
4670
                if len(opq) < 8 {
8,461✔
4671
                        return edge, ErrEdgePolicyOptionalFieldNotFound
3✔
4672
                }
3✔
4673

4674
                maxHtlc := byteOrder.Uint64(opq[:8])
8,455✔
4675
                edge.MaxHTLC = lnwire.MilliSatoshi(maxHtlc)
8,455✔
4676

8,455✔
4677
                // Exclude the parsed field from the rest of the opaque data.
8,455✔
4678
                edge.ExtraOpaqueData = opq[8:]
8,455✔
4679
        }
4680

4681
        return edge, nil
8,829✔
4682
}
4683

4684
// chanGraphNodeTx is an implementation of the NodeRTx interface backed by the
4685
// KVStore and a kvdb.RTx.
4686
type chanGraphNodeTx struct {
4687
        tx   kvdb.RTx
4688
        db   *KVStore
4689
        node *models.LightningNode
4690
}
4691

4692
// A compile-time constraint to ensure chanGraphNodeTx implements the NodeRTx
4693
// interface.
4694
var _ NodeRTx = (*chanGraphNodeTx)(nil)
4695

4696
func newChanGraphNodeTx(tx kvdb.RTx, db *KVStore,
4697
        node *models.LightningNode) *chanGraphNodeTx {
3,917✔
4698

3,917✔
4699
        return &chanGraphNodeTx{
3,917✔
4700
                tx:   tx,
3,917✔
4701
                db:   db,
3,917✔
4702
                node: node,
3,917✔
4703
        }
3,917✔
4704
}
3,917✔
4705

4706
// Node returns the raw information of the node.
4707
//
4708
// NOTE: This is a part of the NodeRTx interface.
4709
func (c *chanGraphNodeTx) Node() *models.LightningNode {
4,842✔
4710
        return c.node
4,842✔
4711
}
4,842✔
4712

4713
// FetchNode fetches the node with the given pub key under the same transaction
4714
// used to fetch the current node. The returned node is also a NodeRTx and any
4715
// operations on that NodeRTx will also be done under the same transaction.
4716
//
4717
// NOTE: This is a part of the NodeRTx interface.
4718
func (c *chanGraphNodeTx) FetchNode(nodePub route.Vertex) (NodeRTx, error) {
2,944✔
4719
        node, err := c.db.FetchLightningNodeTx(c.tx, nodePub)
2,944✔
4720
        if err != nil {
2,944✔
4721
                return nil, err
×
4722
        }
×
4723

4724
        return newChanGraphNodeTx(c.tx, c.db, node), nil
2,944✔
4725
}
4726

4727
// ForEachChannel can be used to iterate over the node's channels under
4728
// the same transaction used to fetch the node.
4729
//
4730
// NOTE: This is a part of the NodeRTx interface.
4731
func (c *chanGraphNodeTx) ForEachChannel(f func(*models.ChannelEdgeInfo,
4732
        *models.ChannelEdgePolicy, *models.ChannelEdgePolicy) error) error {
965✔
4733

965✔
4734
        return c.db.ForEachNodeChannelTx(c.tx, c.node.PubKeyBytes,
965✔
4735
                func(_ kvdb.RTx, info *models.ChannelEdgeInfo, policy1,
965✔
4736
                        policy2 *models.ChannelEdgePolicy) error {
3,909✔
4737

2,944✔
4738
                        return f(info, policy1, policy2)
2,944✔
4739
                },
2,944✔
4740
        )
4741
}
4742

4743
// MakeTestGraph creates a new instance of the KVStore for testing
4744
// purposes.
4745
func MakeTestGraph(t testing.TB, modifiers ...KVStoreOptionModifier) (
4746
        *ChannelGraph, error) {
40✔
4747

40✔
4748
        opts := DefaultOptions()
40✔
4749
        for _, modifier := range modifiers {
40✔
4750
                modifier(opts)
×
4751
        }
×
4752

4753
        // Next, create KVStore for the first time.
4754
        backend, backendCleanup, err := kvdb.GetTestBackend(t.TempDir(), "cgr")
40✔
4755
        if err != nil {
40✔
4756
                backendCleanup()
×
4757

×
4758
                return nil, err
×
4759
        }
×
4760

4761
        graph, err := NewChannelGraph(&Config{
40✔
4762
                KVDB:        backend,
40✔
4763
                KVStoreOpts: modifiers,
40✔
4764
        })
40✔
4765
        if err != nil {
40✔
4766
                backendCleanup()
×
4767

×
4768
                return nil, err
×
4769
        }
×
4770

4771
        t.Cleanup(func() {
80✔
4772
                _ = backend.Close()
40✔
4773
                backendCleanup()
40✔
4774
        })
40✔
4775

4776
        return graph, nil
40✔
4777
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc