• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 14899797232

08 May 2025 06:05AM UTC coverage: 69.004% (+0.02%) from 68.987%
14899797232

Pull #9692

github

web-flow
Merge 4d99961b4 into 1a5432368
Pull Request #9692: [graph-work-side-branch]: temp side branch for graph work

221 of 272 new or added lines in 23 files covered. (81.25%)

67 existing lines in 23 files now uncovered.

133967 of 194145 relevant lines covered (69.0%)

22139.43 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

77.57
/graph/db/kv_store.go
1
package graphdb
2

3
import (
4
        "bytes"
5
        "crypto/sha256"
6
        "encoding/binary"
7
        "errors"
8
        "fmt"
9
        "io"
10
        "math"
11
        "net"
12
        "sort"
13
        "sync"
14
        "testing"
15
        "time"
16

17
        "github.com/btcsuite/btcd/btcec/v2"
18
        "github.com/btcsuite/btcd/chaincfg/chainhash"
19
        "github.com/btcsuite/btcd/txscript"
20
        "github.com/btcsuite/btcd/wire"
21
        "github.com/btcsuite/btcwallet/walletdb"
22
        "github.com/lightningnetwork/lnd/aliasmgr"
23
        "github.com/lightningnetwork/lnd/batch"
24
        "github.com/lightningnetwork/lnd/graph/db/models"
25
        "github.com/lightningnetwork/lnd/input"
26
        "github.com/lightningnetwork/lnd/kvdb"
27
        "github.com/lightningnetwork/lnd/lnwire"
28
        "github.com/lightningnetwork/lnd/routing/route"
29
        "github.com/stretchr/testify/require"
30
)
31

32
var (
33
        // nodeBucket is a bucket which houses all the vertices or nodes within
34
        // the channel graph. This bucket has a single-sub bucket which adds an
35
        // additional index from pubkey -> alias. Within the top-level of this
36
        // bucket, the key space maps a node's compressed public key to the
37
        // serialized information for that node. Additionally, there's a
38
        // special key "source" which stores the pubkey of the source node. The
39
        // source node is used as the starting point for all graph/queries and
40
        // traversals. The graph is formed as a star-graph with the source node
41
        // at the center.
42
        //
43
        // maps: pubKey -> nodeInfo
44
        // maps: source -> selfPubKey
45
        nodeBucket = []byte("graph-node")
46

47
        // nodeUpdateIndexBucket is a sub-bucket of the nodeBucket. This bucket
48
        // will be used to quickly look up the "freshness" of a node's last
49
        // update to the network. The bucket only contains keys, and no values,
50
        // it's mapping:
51
        //
52
        // maps: updateTime || nodeID -> nil
53
        nodeUpdateIndexBucket = []byte("graph-node-update-index")
54

55
        // sourceKey is a special key that resides within the nodeBucket. The
56
        // sourceKey maps a key to the public key of the "self node".
57
        sourceKey = []byte("source")
58

59
        // aliasIndexBucket is a sub-bucket that's nested within the main
60
        // nodeBucket. This bucket maps the public key of a node to its
61
        // current alias. This bucket is provided as it can be used within a
62
        // future UI layer to add an additional degree of confirmation.
63
        aliasIndexBucket = []byte("alias")
64

65
        // edgeBucket is a bucket which houses all of the edge or channel
66
        // information within the channel graph. This bucket essentially acts
67
        // as an adjacency list, which in conjunction with a range scan, can be
68
        // used to iterate over all the incoming and outgoing edges for a
69
        // particular node. Key in the bucket use a prefix scheme which leads
70
        // with the node's public key and sends with the compact edge ID.
71
        // For each chanID, there will be two entries within the bucket, as the
72
        // graph is directed: nodes may have different policies w.r.t to fees
73
        // for their respective directions.
74
        //
75
        // maps: pubKey || chanID -> channel edge policy for node
76
        edgeBucket = []byte("graph-edge")
77

78
        // unknownPolicy is represented as an empty slice. It is
79
        // used as the value in edgeBucket for unknown channel edge policies.
80
        // Unknown policies are still stored in the database to enable efficient
81
        // lookup of incoming channel edges.
82
        unknownPolicy = []byte{}
83

84
        // chanStart is an array of all zero bytes which is used to perform
85
        // range scans within the edgeBucket to obtain all of the outgoing
86
        // edges for a particular node.
87
        chanStart [8]byte
88

89
        // edgeIndexBucket is an index which can be used to iterate all edges
90
        // in the bucket, grouping them according to their in/out nodes.
91
        // Additionally, the items in this bucket also contain the complete
92
        // edge information for a channel. The edge information includes the
93
        // capacity of the channel, the nodes that made the channel, etc. This
94
        // bucket resides within the edgeBucket above. Creation of an edge
95
        // proceeds in two phases: first the edge is added to the edge index,
96
        // afterwards the edgeBucket can be updated with the latest details of
97
        // the edge as they are announced on the network.
98
        //
99
        // maps: chanID -> pubKey1 || pubKey2 || restofEdgeInfo
100
        edgeIndexBucket = []byte("edge-index")
101

102
        // edgeUpdateIndexBucket is a sub-bucket of the main edgeBucket. This
103
        // bucket contains an index which allows us to gauge the "freshness" of
104
        // a channel's last updates.
105
        //
106
        // maps: updateTime || chanID -> nil
107
        edgeUpdateIndexBucket = []byte("edge-update-index")
108

109
        // channelPointBucket maps a channel's full outpoint (txid:index) to
110
        // its short 8-byte channel ID. This bucket resides within the
111
        // edgeBucket above, and can be used to quickly remove an edge due to
112
        // the outpoint being spent, or to query for existence of a channel.
113
        //
114
        // maps: outPoint -> chanID
115
        channelPointBucket = []byte("chan-index")
116

117
        // zombieBucket is a sub-bucket of the main edgeBucket bucket
118
        // responsible for maintaining an index of zombie channels. Each entry
119
        // exists within the bucket as follows:
120
        //
121
        // maps: chanID -> pubKey1 || pubKey2
122
        //
123
        // The chanID represents the channel ID of the edge that is marked as a
124
        // zombie and is used as the key, which maps to the public keys of the
125
        // edge's participants.
126
        zombieBucket = []byte("zombie-index")
127

128
        // disabledEdgePolicyBucket is a sub-bucket of the main edgeBucket
129
        // bucket responsible for maintaining an index of disabled edge
130
        // policies. Each entry exists within the bucket as follows:
131
        //
132
        // maps: <chanID><direction> -> []byte{}
133
        //
134
        // The chanID represents the channel ID of the edge and the direction is
135
        // one byte representing the direction of the edge. The main purpose of
136
        // this index is to allow pruning disabled channels in a fast way
137
        // without the need to iterate all over the graph.
138
        disabledEdgePolicyBucket = []byte("disabled-edge-policy-index")
139

140
        // graphMetaBucket is a top-level bucket which stores various meta-deta
141
        // related to the on-disk channel graph. Data stored in this bucket
142
        // includes the block to which the graph has been synced to, the total
143
        // number of channels, etc.
144
        graphMetaBucket = []byte("graph-meta")
145

146
        // pruneLogBucket is a bucket within the graphMetaBucket that stores
147
        // a mapping from the block height to the hash for the blocks used to
148
        // prune the graph.
149
        // Once a new block is discovered, any channels that have been closed
150
        // (by spending the outpoint) can safely be removed from the graph, and
151
        // the block is added to the prune log. We need to keep such a log for
152
        // the case where a reorg happens, and we must "rewind" the state of the
153
        // graph by removing channels that were previously confirmed. In such a
154
        // case we'll remove all entries from the prune log with a block height
155
        // that no longer exists.
156
        pruneLogBucket = []byte("prune-log")
157

158
        // closedScidBucket is a top-level bucket that stores scids for
159
        // channels that we know to be closed. This is used so that we don't
160
        // need to perform expensive validation checks if we receive a channel
161
        // announcement for the channel again.
162
        //
163
        // maps: scid -> []byte{}
164
        closedScidBucket = []byte("closed-scid")
165
)
166

167
const (
168
        // MaxAllowedExtraOpaqueBytes is the largest amount of opaque bytes that
169
        // we'll permit to be written to disk. We limit this as otherwise, it
170
        // would be possible for a node to create a ton of updates and slowly
171
        // fill our disk, and also waste bandwidth due to relaying.
172
        MaxAllowedExtraOpaqueBytes = 10000
173
)
174

175
// KVStore is a persistent, on-disk graph representation of the Lightning
176
// Network. This struct can be used to implement path finding algorithms on top
177
// of, and also to update a node's view based on information received from the
178
// p2p network. Internally, the graph is stored using a modified adjacency list
179
// representation with some added object interaction possible with each
180
// serialized edge/node. The graph is stored is directed, meaning that are two
181
// edges stored for each channel: an inbound/outbound edge for each node pair.
182
// Nodes, edges, and edge information can all be added to the graph
183
// independently. Edge removal results in the deletion of all edge information
184
// for that edge.
185
type KVStore struct {
186
        db kvdb.Backend
187

188
        // cacheMu guards all caches (rejectCache and chanCache). If
189
        // this mutex will be acquired at the same time as the DB mutex then
190
        // the cacheMu MUST be acquired first to prevent deadlock.
191
        cacheMu     sync.RWMutex
192
        rejectCache *rejectCache
193
        chanCache   *channelCache
194

195
        chanScheduler batch.Scheduler
196
        nodeScheduler batch.Scheduler
197
}
198

199
// NewKVStore allocates a new KVStore backed by a DB instance. The
200
// returned instance has its own unique reject cache and channel cache.
201
func NewKVStore(db kvdb.Backend, options ...KVStoreOptionModifier) (*KVStore,
202
        error) {
172✔
203

172✔
204
        opts := DefaultOptions()
172✔
205
        for _, o := range options {
175✔
206
                o(opts)
3✔
207
        }
3✔
208

209
        if !opts.NoMigration {
344✔
210
                if err := initKVStore(db); err != nil {
172✔
211
                        return nil, err
×
212
                }
×
213
        }
214

215
        g := &KVStore{
172✔
216
                db:          db,
172✔
217
                rejectCache: newRejectCache(opts.RejectCacheSize),
172✔
218
                chanCache:   newChannelCache(opts.ChannelCacheSize),
172✔
219
        }
172✔
220
        g.chanScheduler = batch.NewTimeScheduler(
172✔
221
                db, &g.cacheMu, opts.BatchCommitInterval,
172✔
222
        )
172✔
223
        g.nodeScheduler = batch.NewTimeScheduler(
172✔
224
                db, nil, opts.BatchCommitInterval,
172✔
225
        )
172✔
226

172✔
227
        return g, nil
172✔
228
}
229

230
// channelMapKey is the key structure used for storing channel edge policies.
231
type channelMapKey struct {
232
        nodeKey route.Vertex
233
        chanID  [8]byte
234
}
235

236
// getChannelMap loads all channel edge policies from the database and stores
237
// them in a map.
238
func (c *KVStore) getChannelMap(edges kvdb.RBucket) (
239
        map[channelMapKey]*models.ChannelEdgePolicy, error) {
143✔
240

143✔
241
        // Create a map to store all channel edge policies.
143✔
242
        channelMap := make(map[channelMapKey]*models.ChannelEdgePolicy)
143✔
243

143✔
244
        err := kvdb.ForAll(edges, func(k, edgeBytes []byte) error {
1,701✔
245
                // Skip embedded buckets.
1,558✔
246
                if bytes.Equal(k, edgeIndexBucket) ||
1,558✔
247
                        bytes.Equal(k, edgeUpdateIndexBucket) ||
1,558✔
248
                        bytes.Equal(k, zombieBucket) ||
1,558✔
249
                        bytes.Equal(k, disabledEdgePolicyBucket) ||
1,558✔
250
                        bytes.Equal(k, channelPointBucket) {
2,126✔
251

568✔
252
                        return nil
568✔
253
                }
568✔
254

255
                // Validate key length.
256
                if len(k) != 33+8 {
993✔
257
                        return fmt.Errorf("invalid edge key %x encountered", k)
×
258
                }
×
259

260
                var key channelMapKey
993✔
261
                copy(key.nodeKey[:], k[:33])
993✔
262
                copy(key.chanID[:], k[33:])
993✔
263

993✔
264
                // No need to deserialize unknown policy.
993✔
265
                if bytes.Equal(edgeBytes, unknownPolicy) {
993✔
266
                        return nil
×
267
                }
×
268

269
                edgeReader := bytes.NewReader(edgeBytes)
993✔
270
                edge, err := deserializeChanEdgePolicyRaw(
993✔
271
                        edgeReader,
993✔
272
                )
993✔
273

993✔
274
                switch {
993✔
275
                // If the db policy was missing an expected optional field, we
276
                // return nil as if the policy was unknown.
277
                case errors.Is(err, ErrEdgePolicyOptionalFieldNotFound):
×
278
                        return nil
×
279

280
                case err != nil:
×
281
                        return err
×
282
                }
283

284
                channelMap[key] = edge
993✔
285

993✔
286
                return nil
993✔
287
        })
288
        if err != nil {
143✔
289
                return nil, err
×
290
        }
×
291

292
        return channelMap, nil
143✔
293
}
294

295
var graphTopLevelBuckets = [][]byte{
296
        nodeBucket,
297
        edgeBucket,
298
        graphMetaBucket,
299
        closedScidBucket,
300
}
301

302
// createChannelDB creates and initializes a fresh version of  In
303
// the case that the target path has not yet been created or doesn't yet exist,
304
// then the path is created. Additionally, all required top-level buckets used
305
// within the database are created.
306
func initKVStore(db kvdb.Backend) error {
172✔
307
        err := kvdb.Update(db, func(tx kvdb.RwTx) error {
344✔
308
                for _, tlb := range graphTopLevelBuckets {
851✔
309
                        if _, err := tx.CreateTopLevelBucket(tlb); err != nil {
679✔
310
                                return err
×
311
                        }
×
312
                }
313

314
                nodes := tx.ReadWriteBucket(nodeBucket)
172✔
315
                _, err := nodes.CreateBucketIfNotExists(aliasIndexBucket)
172✔
316
                if err != nil {
172✔
317
                        return err
×
318
                }
×
319
                _, err = nodes.CreateBucketIfNotExists(nodeUpdateIndexBucket)
172✔
320
                if err != nil {
172✔
321
                        return err
×
322
                }
×
323

324
                edges := tx.ReadWriteBucket(edgeBucket)
172✔
325
                _, err = edges.CreateBucketIfNotExists(edgeIndexBucket)
172✔
326
                if err != nil {
172✔
327
                        return err
×
328
                }
×
329
                _, err = edges.CreateBucketIfNotExists(edgeUpdateIndexBucket)
172✔
330
                if err != nil {
172✔
331
                        return err
×
332
                }
×
333
                _, err = edges.CreateBucketIfNotExists(channelPointBucket)
172✔
334
                if err != nil {
172✔
335
                        return err
×
336
                }
×
337
                _, err = edges.CreateBucketIfNotExists(zombieBucket)
172✔
338
                if err != nil {
172✔
339
                        return err
×
340
                }
×
341

342
                graphMeta := tx.ReadWriteBucket(graphMetaBucket)
172✔
343
                _, err = graphMeta.CreateBucketIfNotExists(pruneLogBucket)
172✔
344

172✔
345
                return err
172✔
346
        }, func() {})
172✔
347
        if err != nil {
172✔
348
                return fmt.Errorf("unable to create new channel graph: %w", err)
×
349
        }
×
350

351
        return nil
172✔
352
}
353

354
// AddrsForNode returns all known addresses for the target node public key that
355
// the graph DB is aware of. The returned boolean indicates if the given node is
356
// unknown to the graph DB or not.
357
//
358
// NOTE: this is part of the channeldb.AddrSource interface.
359
func (c *KVStore) AddrsForNode(nodePub *btcec.PublicKey) (bool, []net.Addr,
360
        error) {
3✔
361

3✔
362
        pubKey, err := route.NewVertexFromBytes(nodePub.SerializeCompressed())
3✔
363
        if err != nil {
3✔
364
                return false, nil, err
×
365
        }
×
366

367
        node, err := c.FetchLightningNode(pubKey)
3✔
368
        // We don't consider it an error if the graph is unaware of the node.
3✔
369
        switch {
3✔
370
        case err != nil && !errors.Is(err, ErrGraphNodeNotFound):
×
371
                return false, nil, err
×
372

373
        case errors.Is(err, ErrGraphNodeNotFound):
3✔
374
                return false, nil, nil
3✔
375
        }
376

377
        return true, node.Addresses, nil
3✔
378
}
379

380
// ForEachChannel iterates through all the channel edges stored within the
381
// graph and invokes the passed callback for each edge. The callback takes two
382
// edges as since this is a directed graph, both the in/out edges are visited.
383
// If the callback returns an error, then the transaction is aborted and the
384
// iteration stops early.
385
//
386
// NOTE: If an edge can't be found, or wasn't advertised, then a nil pointer
387
// for that particular channel edge routing policy will be passed into the
388
// callback.
389
func (c *KVStore) ForEachChannel(cb func(*models.ChannelEdgeInfo,
390
        *models.ChannelEdgePolicy, *models.ChannelEdgePolicy) error) error {
143✔
391

143✔
392
        return c.db.View(func(tx kvdb.RTx) error {
286✔
393
                edges := tx.ReadBucket(edgeBucket)
143✔
394
                if edges == nil {
143✔
395
                        return ErrGraphNoEdgesFound
×
396
                }
×
397

398
                // First, load all edges in memory indexed by node and channel
399
                // id.
400
                channelMap, err := c.getChannelMap(edges)
143✔
401
                if err != nil {
143✔
402
                        return err
×
403
                }
×
404

405
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
143✔
406
                if edgeIndex == nil {
143✔
407
                        return ErrGraphNoEdgesFound
×
408
                }
×
409

410
                // Load edge index, recombine each channel with the policies
411
                // loaded above and invoke the callback.
412
                return kvdb.ForAll(
143✔
413
                        edgeIndex, func(k, edgeInfoBytes []byte) error {
641✔
414
                                var chanID [8]byte
498✔
415
                                copy(chanID[:], k)
498✔
416

498✔
417
                                edgeInfoReader := bytes.NewReader(edgeInfoBytes)
498✔
418
                                info, err := deserializeChanEdgeInfo(
498✔
419
                                        edgeInfoReader,
498✔
420
                                )
498✔
421
                                if err != nil {
498✔
422
                                        return err
×
423
                                }
×
424

425
                                policy1 := channelMap[channelMapKey{
498✔
426
                                        nodeKey: info.NodeKey1Bytes,
498✔
427
                                        chanID:  chanID,
498✔
428
                                }]
498✔
429

498✔
430
                                policy2 := channelMap[channelMapKey{
498✔
431
                                        nodeKey: info.NodeKey2Bytes,
498✔
432
                                        chanID:  chanID,
498✔
433
                                }]
498✔
434

498✔
435
                                return cb(&info, policy1, policy2)
498✔
436
                        },
437
                )
438
        }, func() {})
143✔
439
}
440

441
// forEachNodeDirectedChannel iterates through all channels of a given node,
442
// executing the passed callback on the directed edge representing the channel
443
// and its incoming policy. If the callback returns an error, then the iteration
444
// is halted with the error propagated back up to the caller. An optional read
445
// transaction may be provided. If none is provided, a new one will be created.
446
//
447
// Unknown policies are passed into the callback as nil values.
448
func (c *KVStore) forEachNodeDirectedChannel(tx kvdb.RTx,
449
        node route.Vertex, cb func(channel *DirectedChannel) error) error {
245✔
450

245✔
451
        // Fallback that uses the database.
245✔
452
        toNodeCallback := func() route.Vertex {
380✔
453
                return node
135✔
454
        }
135✔
455
        toNodeFeatures, err := c.fetchNodeFeatures(tx, node)
245✔
456
        if err != nil {
245✔
457
                return err
×
458
        }
×
459

460
        dbCallback := func(tx kvdb.RTx, e *models.ChannelEdgeInfo, p1,
245✔
461
                p2 *models.ChannelEdgePolicy) error {
744✔
462

499✔
463
                var cachedInPolicy *models.CachedEdgePolicy
499✔
464
                if p2 != nil {
995✔
465
                        cachedInPolicy = models.NewCachedPolicy(p2)
496✔
466
                        cachedInPolicy.ToNodePubKey = toNodeCallback
496✔
467
                        cachedInPolicy.ToNodeFeatures = toNodeFeatures
496✔
468
                }
496✔
469

470
                var inboundFee lnwire.Fee
499✔
471
                if p1 != nil {
997✔
472
                        // Extract inbound fee. If there is a decoding error,
498✔
473
                        // skip this edge.
498✔
474
                        _, err := p1.ExtraOpaqueData.ExtractRecords(&inboundFee)
498✔
475
                        if err != nil {
499✔
476
                                return nil
1✔
477
                        }
1✔
478
                }
479

480
                directedChannel := &DirectedChannel{
498✔
481
                        ChannelID:    e.ChannelID,
498✔
482
                        IsNode1:      node == e.NodeKey1Bytes,
498✔
483
                        OtherNode:    e.NodeKey2Bytes,
498✔
484
                        Capacity:     e.Capacity,
498✔
485
                        OutPolicySet: p1 != nil,
498✔
486
                        InPolicy:     cachedInPolicy,
498✔
487
                        InboundFee:   inboundFee,
498✔
488
                }
498✔
489

498✔
490
                if node == e.NodeKey2Bytes {
751✔
491
                        directedChannel.OtherNode = e.NodeKey1Bytes
253✔
492
                }
253✔
493

494
                return cb(directedChannel)
498✔
495
        }
496

497
        return nodeTraversal(tx, node[:], c.db, dbCallback)
245✔
498
}
499

500
// fetchNodeFeatures returns the features of a given node. If no features are
501
// known for the node, an empty feature vector is returned. An optional read
502
// transaction may be provided. If none is provided, a new one will be created.
503
func (c *KVStore) fetchNodeFeatures(tx kvdb.RTx,
504
        node route.Vertex) (*lnwire.FeatureVector, error) {
689✔
505

689✔
506
        // Fallback that uses the database.
689✔
507
        targetNode, err := c.FetchLightningNodeTx(tx, node)
689✔
508
        switch {
689✔
509
        // If the node exists and has features, return them directly.
510
        case err == nil:
678✔
511
                return targetNode.Features, nil
678✔
512

513
        // If we couldn't find a node announcement, populate a blank feature
514
        // vector.
515
        case errors.Is(err, ErrGraphNodeNotFound):
11✔
516
                return lnwire.EmptyFeatureVector(), nil
11✔
517

518
        // Otherwise, bubble the error up.
519
        default:
×
520
                return nil, err
×
521
        }
522
}
523

524
// ForEachNodeDirectedChannel iterates through all channels of a given node,
525
// executing the passed callback on the directed edge representing the channel
526
// and its incoming policy. If the callback returns an error, then the iteration
527
// is halted with the error propagated back up to the caller.
528
//
529
// Unknown policies are passed into the callback as nil values.
530
//
531
// NOTE: this is part of the graphdb.NodeTraverser interface.
532
func (c *KVStore) ForEachNodeDirectedChannel(nodePub route.Vertex,
533
        cb func(channel *DirectedChannel) error) error {
6✔
534

6✔
535
        return c.forEachNodeDirectedChannel(nil, nodePub, cb)
6✔
536
}
6✔
537

538
// FetchNodeFeatures returns the features of the given node. If no features are
539
// known for the node, an empty feature vector is returned.
540
//
541
// NOTE: this is part of the graphdb.NodeTraverser interface.
542
func (c *KVStore) FetchNodeFeatures(nodePub route.Vertex) (
543
        *lnwire.FeatureVector, error) {
3✔
544

3✔
545
        return c.fetchNodeFeatures(nil, nodePub)
3✔
546
}
3✔
547

548
// ForEachNodeCached is similar to forEachNode, but it returns DirectedChannel
549
// data to the call-back.
550
//
551
// NOTE: The callback contents MUST not be modified.
552
func (c *KVStore) ForEachNodeCached(cb func(node route.Vertex,
553
        chans map[uint64]*DirectedChannel) error) error {
1✔
554

1✔
555
        // Otherwise call back to a version that uses the database directly.
1✔
556
        // We'll iterate over each node, then the set of channels for each
1✔
557
        // node, and construct a similar callback functiopn signature as the
1✔
558
        // main funcotin expects.
1✔
559
        return c.forEachNode(func(tx kvdb.RTx,
1✔
560
                node *models.LightningNode) error {
21✔
561

20✔
562
                channels := make(map[uint64]*DirectedChannel)
20✔
563

20✔
564
                err := c.forEachNodeChannelTx(tx, node.PubKeyBytes,
20✔
565
                        func(tx kvdb.RTx, e *models.ChannelEdgeInfo,
20✔
566
                                p1 *models.ChannelEdgePolicy,
20✔
567
                                p2 *models.ChannelEdgePolicy) error {
210✔
568

190✔
569
                                toNodeCallback := func() route.Vertex {
190✔
570
                                        return node.PubKeyBytes
×
571
                                }
×
572
                                toNodeFeatures, err := c.fetchNodeFeatures(
190✔
573
                                        tx, node.PubKeyBytes,
190✔
574
                                )
190✔
575
                                if err != nil {
190✔
576
                                        return err
×
577
                                }
×
578

579
                                var cachedInPolicy *models.CachedEdgePolicy
190✔
580
                                if p2 != nil {
380✔
581
                                        cachedInPolicy =
190✔
582
                                                models.NewCachedPolicy(p2)
190✔
583
                                        cachedInPolicy.ToNodePubKey =
190✔
584
                                                toNodeCallback
190✔
585
                                        cachedInPolicy.ToNodeFeatures =
190✔
586
                                                toNodeFeatures
190✔
587
                                }
190✔
588

589
                                directedChannel := &DirectedChannel{
190✔
590
                                        ChannelID: e.ChannelID,
190✔
591
                                        IsNode1: node.PubKeyBytes ==
190✔
592
                                                e.NodeKey1Bytes,
190✔
593
                                        OtherNode:    e.NodeKey2Bytes,
190✔
594
                                        Capacity:     e.Capacity,
190✔
595
                                        OutPolicySet: p1 != nil,
190✔
596
                                        InPolicy:     cachedInPolicy,
190✔
597
                                }
190✔
598

190✔
599
                                if node.PubKeyBytes == e.NodeKey2Bytes {
285✔
600
                                        directedChannel.OtherNode =
95✔
601
                                                e.NodeKey1Bytes
95✔
602
                                }
95✔
603

604
                                channels[e.ChannelID] = directedChannel
190✔
605

190✔
606
                                return nil
190✔
607
                        })
608
                if err != nil {
20✔
609
                        return err
×
610
                }
×
611

612
                return cb(node.PubKeyBytes, channels)
20✔
613
        })
614
}
615

616
// DisabledChannelIDs returns the channel ids of disabled channels.
617
// A channel is disabled when two of the associated ChanelEdgePolicies
618
// have their disabled bit on.
619
func (c *KVStore) DisabledChannelIDs() ([]uint64, error) {
6✔
620
        var disabledChanIDs []uint64
6✔
621
        var chanEdgeFound map[uint64]struct{}
6✔
622

6✔
623
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
12✔
624
                edges := tx.ReadBucket(edgeBucket)
6✔
625
                if edges == nil {
6✔
626
                        return ErrGraphNoEdgesFound
×
627
                }
×
628

629
                disabledEdgePolicyIndex := edges.NestedReadBucket(
6✔
630
                        disabledEdgePolicyBucket,
6✔
631
                )
6✔
632
                if disabledEdgePolicyIndex == nil {
7✔
633
                        return nil
1✔
634
                }
1✔
635

636
                // We iterate over all disabled policies and we add each channel
637
                // that has more than one disabled policy to disabledChanIDs
638
                // array.
639
                return disabledEdgePolicyIndex.ForEach(
5✔
640
                        func(k, v []byte) error {
16✔
641
                                chanID := byteOrder.Uint64(k[:8])
11✔
642
                                _, edgeFound := chanEdgeFound[chanID]
11✔
643
                                if edgeFound {
15✔
644
                                        delete(chanEdgeFound, chanID)
4✔
645
                                        disabledChanIDs = append(
4✔
646
                                                disabledChanIDs, chanID,
4✔
647
                                        )
4✔
648

4✔
649
                                        return nil
4✔
650
                                }
4✔
651

652
                                chanEdgeFound[chanID] = struct{}{}
7✔
653

7✔
654
                                return nil
7✔
655
                        },
656
                )
657
        }, func() {
6✔
658
                disabledChanIDs = nil
6✔
659
                chanEdgeFound = make(map[uint64]struct{})
6✔
660
        })
6✔
661
        if err != nil {
6✔
662
                return nil, err
×
663
        }
×
664

665
        return disabledChanIDs, nil
6✔
666
}
667

668
// ForEachNode iterates through all the stored vertices/nodes in the graph,
669
// executing the passed callback with each node encountered. If the callback
670
// returns an error, then the transaction is aborted and the iteration stops
671
// early. Any operations performed on the NodeTx passed to the call-back are
672
// executed under the same read transaction and so, methods on the NodeTx object
673
// _MUST_ only be called from within the call-back.
674
func (c *KVStore) ForEachNode(cb func(tx NodeRTx) error) error {
131✔
675
        return c.forEachNode(func(tx kvdb.RTx,
131✔
676
                node *models.LightningNode) error {
1,292✔
677

1,161✔
678
                return cb(newChanGraphNodeTx(tx, c, node))
1,161✔
679
        })
1,161✔
680
}
681

682
// forEachNode iterates through all the stored vertices/nodes in the graph,
683
// executing the passed callback with each node encountered. If the callback
684
// returns an error, then the transaction is aborted and the iteration stops
685
// early.
686
//
687
// TODO(roasbeef): add iterator interface to allow for memory efficient graph
688
// traversal when graph gets mega.
689
func (c *KVStore) forEachNode(
690
        cb func(kvdb.RTx, *models.LightningNode) error) error {
132✔
691

132✔
692
        traversal := func(tx kvdb.RTx) error {
264✔
693
                // First grab the nodes bucket which stores the mapping from
132✔
694
                // pubKey to node information.
132✔
695
                nodes := tx.ReadBucket(nodeBucket)
132✔
696
                if nodes == nil {
132✔
697
                        return ErrGraphNotFound
×
698
                }
×
699

700
                return nodes.ForEach(func(pubKey, nodeBytes []byte) error {
1,574✔
701
                        // If this is the source key, then we skip this
1,442✔
702
                        // iteration as the value for this key is a pubKey
1,442✔
703
                        // rather than raw node information.
1,442✔
704
                        if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 {
1,706✔
705
                                return nil
264✔
706
                        }
264✔
707

708
                        nodeReader := bytes.NewReader(nodeBytes)
1,181✔
709
                        node, err := deserializeLightningNode(nodeReader)
1,181✔
710
                        if err != nil {
1,181✔
711
                                return err
×
712
                        }
×
713

714
                        // Execute the callback, the transaction will abort if
715
                        // this returns an error.
716
                        return cb(tx, &node)
1,181✔
717
                })
718
        }
719

720
        return kvdb.View(c.db, traversal, func() {})
264✔
721
}
722

723
// ForEachNodeCacheable iterates through all the stored vertices/nodes in the
724
// graph, executing the passed callback with each node encountered. If the
725
// callback returns an error, then the transaction is aborted and the iteration
726
// stops early.
727
func (c *KVStore) ForEachNodeCacheable(cb func(route.Vertex,
728
        *lnwire.FeatureVector) error) error {
140✔
729

140✔
730
        traversal := func(tx kvdb.RTx) error {
280✔
731
                // First grab the nodes bucket which stores the mapping from
140✔
732
                // pubKey to node information.
140✔
733
                nodes := tx.ReadBucket(nodeBucket)
140✔
734
                if nodes == nil {
140✔
735
                        return ErrGraphNotFound
×
736
                }
×
737

738
                return nodes.ForEach(func(pubKey, nodeBytes []byte) error {
537✔
739
                        // If this is the source key, then we skip this
397✔
740
                        // iteration as the value for this key is a pubKey
397✔
741
                        // rather than raw node information.
397✔
742
                        if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 {
674✔
743
                                return nil
277✔
744
                        }
277✔
745

746
                        nodeReader := bytes.NewReader(nodeBytes)
123✔
747
                        node, features, err := deserializeLightningNodeCacheable( //nolint:ll
123✔
748
                                nodeReader,
123✔
749
                        )
123✔
750
                        if err != nil {
123✔
751
                                return err
×
752
                        }
×
753

754
                        // Execute the callback, the transaction will abort if
755
                        // this returns an error.
756
                        return cb(node, features)
123✔
757
                })
758
        }
759

760
        return kvdb.View(c.db, traversal, func() {})
280✔
761
}
762

763
// SourceNode returns the source node of the graph. The source node is treated
764
// as the center node within a star-graph. This method may be used to kick off
765
// a path finding algorithm in order to explore the reachability of another
766
// node based off the source node.
767
func (c *KVStore) SourceNode() (*models.LightningNode, error) {
234✔
768
        var source *models.LightningNode
234✔
769
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
468✔
770
                // First grab the nodes bucket which stores the mapping from
234✔
771
                // pubKey to node information.
234✔
772
                nodes := tx.ReadBucket(nodeBucket)
234✔
773
                if nodes == nil {
234✔
774
                        return ErrGraphNotFound
×
775
                }
×
776

777
                node, err := c.sourceNode(nodes)
234✔
778
                if err != nil {
235✔
779
                        return err
1✔
780
                }
1✔
781
                source = node
233✔
782

233✔
783
                return nil
233✔
784
        }, func() {
234✔
785
                source = nil
234✔
786
        })
234✔
787
        if err != nil {
235✔
788
                return nil, err
1✔
789
        }
1✔
790

791
        return source, nil
233✔
792
}
793

794
// sourceNode uses an existing database transaction and returns the source node
795
// of the graph. The source node is treated as the center node within a
796
// star-graph. This method may be used to kick off a path finding algorithm in
797
// order to explore the reachability of another node based off the source node.
798
func (c *KVStore) sourceNode(nodes kvdb.RBucket) (*models.LightningNode,
799
        error) {
495✔
800

495✔
801
        selfPub := nodes.Get(sourceKey)
495✔
802
        if selfPub == nil {
496✔
803
                return nil, ErrSourceNodeNotSet
1✔
804
        }
1✔
805

806
        // With the pubKey of the source node retrieved, we're able to
807
        // fetch the full node information.
808
        node, err := fetchLightningNode(nodes, selfPub)
494✔
809
        if err != nil {
494✔
810
                return nil, err
×
811
        }
×
812

813
        return &node, nil
494✔
814
}
815

816
// SetSourceNode sets the source node within the graph database. The source
817
// node is to be used as the center of a star-graph within path finding
818
// algorithms.
819
func (c *KVStore) SetSourceNode(node *models.LightningNode) error {
116✔
820
        nodePubBytes := node.PubKeyBytes[:]
116✔
821

116✔
822
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
232✔
823
                // First grab the nodes bucket which stores the mapping from
116✔
824
                // pubKey to node information.
116✔
825
                nodes, err := tx.CreateTopLevelBucket(nodeBucket)
116✔
826
                if err != nil {
116✔
827
                        return err
×
828
                }
×
829

830
                // Next we create the mapping from source to the targeted
831
                // public key.
832
                if err := nodes.Put(sourceKey, nodePubBytes); err != nil {
116✔
833
                        return err
×
834
                }
×
835

836
                // Finally, we commit the information of the lightning node
837
                // itself.
838
                return addLightningNode(tx, node)
116✔
839
        }, func() {})
116✔
840
}
841

842
// AddLightningNode adds a vertex/node to the graph database. If the node is not
843
// in the database from before, this will add a new, unconnected one to the
844
// graph. If it is present from before, this will update that node's
845
// information. Note that this method is expected to only be called to update an
846
// already present node from a node announcement, or to insert a node found in a
847
// channel update.
848
//
849
// TODO(roasbeef): also need sig of announcement.
850
func (c *KVStore) AddLightningNode(node *models.LightningNode,
851
        opts ...batch.SchedulerOption) error {
802✔
852

802✔
853
        r := &batch.Request{
802✔
854
                Opts: batch.NewSchedulerOptions(opts...),
802✔
855
                Update: func(tx kvdb.RwTx) error {
1,604✔
856
                        return addLightningNode(tx, node)
802✔
857
                },
802✔
858
        }
859

860
        return c.nodeScheduler.Execute(r)
802✔
861
}
862

863
func addLightningNode(tx kvdb.RwTx, node *models.LightningNode) error {
990✔
864
        nodes, err := tx.CreateTopLevelBucket(nodeBucket)
990✔
865
        if err != nil {
990✔
866
                return err
×
867
        }
×
868

869
        aliases, err := nodes.CreateBucketIfNotExists(aliasIndexBucket)
990✔
870
        if err != nil {
990✔
871
                return err
×
872
        }
×
873

874
        updateIndex, err := nodes.CreateBucketIfNotExists(
990✔
875
                nodeUpdateIndexBucket,
990✔
876
        )
990✔
877
        if err != nil {
990✔
878
                return err
×
879
        }
×
880

881
        return putLightningNode(nodes, aliases, updateIndex, node)
990✔
882
}
883

884
// LookupAlias attempts to return the alias as advertised by the target node.
885
// TODO(roasbeef): currently assumes that aliases are unique...
886
func (c *KVStore) LookupAlias(pub *btcec.PublicKey) (string, error) {
5✔
887
        var alias string
5✔
888

5✔
889
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
10✔
890
                nodes := tx.ReadBucket(nodeBucket)
5✔
891
                if nodes == nil {
5✔
892
                        return ErrGraphNodesNotFound
×
893
                }
×
894

895
                aliases := nodes.NestedReadBucket(aliasIndexBucket)
5✔
896
                if aliases == nil {
5✔
897
                        return ErrGraphNodesNotFound
×
898
                }
×
899

900
                nodePub := pub.SerializeCompressed()
5✔
901
                a := aliases.Get(nodePub)
5✔
902
                if a == nil {
6✔
903
                        return ErrNodeAliasNotFound
1✔
904
                }
1✔
905

906
                // TODO(roasbeef): should actually be using the utf-8
907
                // package...
908
                alias = string(a)
4✔
909

4✔
910
                return nil
4✔
911
        }, func() {
5✔
912
                alias = ""
5✔
913
        })
5✔
914
        if err != nil {
6✔
915
                return "", err
1✔
916
        }
1✔
917

918
        return alias, nil
4✔
919
}
920

921
// DeleteLightningNode starts a new database transaction to remove a vertex/node
922
// from the database according to the node's public key.
923
func (c *KVStore) DeleteLightningNode(nodePub route.Vertex) error {
3✔
924
        // TODO(roasbeef): ensure dangling edges are removed...
3✔
925
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
6✔
926
                nodes := tx.ReadWriteBucket(nodeBucket)
3✔
927
                if nodes == nil {
3✔
928
                        return ErrGraphNodeNotFound
×
929
                }
×
930

931
                return c.deleteLightningNode(nodes, nodePub[:])
3✔
932
        }, func() {})
3✔
933
}
934

935
// deleteLightningNode uses an existing database transaction to remove a
936
// vertex/node from the database according to the node's public key.
937
func (c *KVStore) deleteLightningNode(nodes kvdb.RwBucket,
938
        compressedPubKey []byte) error {
66✔
939

66✔
940
        aliases := nodes.NestedReadWriteBucket(aliasIndexBucket)
66✔
941
        if aliases == nil {
66✔
942
                return ErrGraphNodesNotFound
×
943
        }
×
944

945
        if err := aliases.Delete(compressedPubKey); err != nil {
66✔
946
                return err
×
947
        }
×
948

949
        // Before we delete the node, we'll fetch its current state so we can
950
        // determine when its last update was to clear out the node update
951
        // index.
952
        node, err := fetchLightningNode(nodes, compressedPubKey)
66✔
953
        if err != nil {
66✔
954
                return err
×
955
        }
×
956

957
        if err := nodes.Delete(compressedPubKey); err != nil {
66✔
958
                return err
×
959
        }
×
960

961
        // Finally, we'll delete the index entry for the node within the
962
        // nodeUpdateIndexBucket as this node is no longer active, so we don't
963
        // need to track its last update.
964
        nodeUpdateIndex := nodes.NestedReadWriteBucket(nodeUpdateIndexBucket)
66✔
965
        if nodeUpdateIndex == nil {
66✔
966
                return ErrGraphNodesNotFound
×
967
        }
×
968

969
        // In order to delete the entry, we'll need to reconstruct the key for
970
        // its last update.
971
        updateUnix := uint64(node.LastUpdate.Unix())
66✔
972
        var indexKey [8 + 33]byte
66✔
973
        byteOrder.PutUint64(indexKey[:8], updateUnix)
66✔
974
        copy(indexKey[8:], compressedPubKey)
66✔
975

66✔
976
        return nodeUpdateIndex.Delete(indexKey[:])
66✔
977
}
978

979
// AddChannelEdge adds a new (undirected, blank) edge to the graph database. An
980
// undirected edge from the two target nodes are created. The information stored
981
// denotes the static attributes of the channel, such as the channelID, the keys
982
// involved in creation of the channel, and the set of features that the channel
983
// supports. The chanPoint and chanID are used to uniquely identify the edge
984
// globally within the database.
985
func (c *KVStore) AddChannelEdge(edge *models.ChannelEdgeInfo,
986
        opts ...batch.SchedulerOption) error {
1,714✔
987

1,714✔
988
        var alreadyExists bool
1,714✔
989
        r := &batch.Request{
1,714✔
990
                Opts: batch.NewSchedulerOptions(opts...),
1,714✔
991
                Reset: func() {
3,428✔
992
                        alreadyExists = false
1,714✔
993
                },
1,714✔
994
                Update: func(tx kvdb.RwTx) error {
1,714✔
995
                        err := c.addChannelEdge(tx, edge)
1,714✔
996

1,714✔
997
                        // Silence ErrEdgeAlreadyExist so that the batch can
1,714✔
998
                        // succeed, but propagate the error via local state.
1,714✔
999
                        if errors.Is(err, ErrEdgeAlreadyExist) {
1,948✔
1000
                                alreadyExists = true
234✔
1001
                                return nil
234✔
1002
                        }
234✔
1003

1004
                        return err
1,480✔
1005
                },
1006
                OnCommit: func(err error) error {
1,714✔
1007
                        switch {
1,714✔
1008
                        case err != nil:
×
1009
                                return err
×
1010
                        case alreadyExists:
234✔
1011
                                return ErrEdgeAlreadyExist
234✔
1012
                        default:
1,480✔
1013
                                c.rejectCache.remove(edge.ChannelID)
1,480✔
1014
                                c.chanCache.remove(edge.ChannelID)
1,480✔
1015
                                return nil
1,480✔
1016
                        }
1017
                },
1018
        }
1019

1020
        return c.chanScheduler.Execute(r)
1,714✔
1021
}
1022

1023
// addChannelEdge is the private form of AddChannelEdge that allows callers to
1024
// utilize an existing db transaction.
1025
func (c *KVStore) addChannelEdge(tx kvdb.RwTx,
1026
        edge *models.ChannelEdgeInfo) error {
1,714✔
1027

1,714✔
1028
        // Construct the channel's primary key which is the 8-byte channel ID.
1,714✔
1029
        var chanKey [8]byte
1,714✔
1030
        binary.BigEndian.PutUint64(chanKey[:], edge.ChannelID)
1,714✔
1031

1,714✔
1032
        nodes, err := tx.CreateTopLevelBucket(nodeBucket)
1,714✔
1033
        if err != nil {
1,714✔
1034
                return err
×
1035
        }
×
1036
        edges, err := tx.CreateTopLevelBucket(edgeBucket)
1,714✔
1037
        if err != nil {
1,714✔
1038
                return err
×
1039
        }
×
1040
        edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
1,714✔
1041
        if err != nil {
1,714✔
1042
                return err
×
1043
        }
×
1044
        chanIndex, err := edges.CreateBucketIfNotExists(channelPointBucket)
1,714✔
1045
        if err != nil {
1,714✔
1046
                return err
×
1047
        }
×
1048

1049
        // First, attempt to check if this edge has already been created. If
1050
        // so, then we can exit early as this method is meant to be idempotent.
1051
        if edgeInfo := edgeIndex.Get(chanKey[:]); edgeInfo != nil {
1,948✔
1052
                return ErrEdgeAlreadyExist
234✔
1053
        }
234✔
1054

1055
        // Before we insert the channel into the database, we'll ensure that
1056
        // both nodes already exist in the channel graph. If either node
1057
        // doesn't, then we'll insert a "shell" node that just includes its
1058
        // public key, so subsequent validation and queries can work properly.
1059
        _, node1Err := fetchLightningNode(nodes, edge.NodeKey1Bytes[:])
1,480✔
1060
        switch {
1,480✔
1061
        case errors.Is(node1Err, ErrGraphNodeNotFound):
23✔
1062
                node1Shell := models.LightningNode{
23✔
1063
                        PubKeyBytes:          edge.NodeKey1Bytes,
23✔
1064
                        HaveNodeAnnouncement: false,
23✔
1065
                }
23✔
1066
                err := addLightningNode(tx, &node1Shell)
23✔
1067
                if err != nil {
23✔
1068
                        return fmt.Errorf("unable to create shell node "+
×
1069
                                "for: %x: %w", edge.NodeKey1Bytes, err)
×
1070
                }
×
1071
        case node1Err != nil:
×
1072
                return node1Err
×
1073
        }
1074

1075
        _, node2Err := fetchLightningNode(nodes, edge.NodeKey2Bytes[:])
1,480✔
1076
        switch {
1,480✔
1077
        case errors.Is(node2Err, ErrGraphNodeNotFound):
58✔
1078
                node2Shell := models.LightningNode{
58✔
1079
                        PubKeyBytes:          edge.NodeKey2Bytes,
58✔
1080
                        HaveNodeAnnouncement: false,
58✔
1081
                }
58✔
1082
                err := addLightningNode(tx, &node2Shell)
58✔
1083
                if err != nil {
58✔
1084
                        return fmt.Errorf("unable to create shell node "+
×
1085
                                "for: %x: %w", edge.NodeKey2Bytes, err)
×
1086
                }
×
1087
        case node2Err != nil:
×
1088
                return node2Err
×
1089
        }
1090

1091
        // If the edge hasn't been created yet, then we'll first add it to the
1092
        // edge index in order to associate the edge between two nodes and also
1093
        // store the static components of the channel.
1094
        if err := putChanEdgeInfo(edgeIndex, edge, chanKey); err != nil {
1,480✔
1095
                return err
×
1096
        }
×
1097

1098
        // Mark edge policies for both sides as unknown. This is to enable
1099
        // efficient incoming channel lookup for a node.
1100
        keys := []*[33]byte{
1,480✔
1101
                &edge.NodeKey1Bytes,
1,480✔
1102
                &edge.NodeKey2Bytes,
1,480✔
1103
        }
1,480✔
1104
        for _, key := range keys {
4,437✔
1105
                err := putChanEdgePolicyUnknown(edges, edge.ChannelID, key[:])
2,957✔
1106
                if err != nil {
2,957✔
1107
                        return err
×
1108
                }
×
1109
        }
1110

1111
        // Finally we add it to the channel index which maps channel points
1112
        // (outpoints) to the shorter channel ID's.
1113
        var b bytes.Buffer
1,480✔
1114
        if err := WriteOutpoint(&b, &edge.ChannelPoint); err != nil {
1,480✔
1115
                return err
×
1116
        }
×
1117

1118
        return chanIndex.Put(b.Bytes(), chanKey[:])
1,480✔
1119
}
1120

1121
// HasChannelEdge returns true if the database knows of a channel edge with the
1122
// passed channel ID, and false otherwise. If an edge with that ID is found
1123
// within the graph, then two time stamps representing the last time the edge
1124
// was updated for both directed edges are returned along with the boolean. If
1125
// it is not found, then the zombie index is checked and its result is returned
1126
// as the second boolean.
1127
func (c *KVStore) HasChannelEdge(
1128
        chanID uint64) (time.Time, time.Time, bool, bool, error) {
220✔
1129

220✔
1130
        var (
220✔
1131
                upd1Time time.Time
220✔
1132
                upd2Time time.Time
220✔
1133
                exists   bool
220✔
1134
                isZombie bool
220✔
1135
        )
220✔
1136

220✔
1137
        // We'll query the cache with the shared lock held to allow multiple
220✔
1138
        // readers to access values in the cache concurrently if they exist.
220✔
1139
        c.cacheMu.RLock()
220✔
1140
        if entry, ok := c.rejectCache.get(chanID); ok {
292✔
1141
                c.cacheMu.RUnlock()
72✔
1142
                upd1Time = time.Unix(entry.upd1Time, 0)
72✔
1143
                upd2Time = time.Unix(entry.upd2Time, 0)
72✔
1144
                exists, isZombie = entry.flags.unpack()
72✔
1145

72✔
1146
                return upd1Time, upd2Time, exists, isZombie, nil
72✔
1147
        }
72✔
1148
        c.cacheMu.RUnlock()
151✔
1149

151✔
1150
        c.cacheMu.Lock()
151✔
1151
        defer c.cacheMu.Unlock()
151✔
1152

151✔
1153
        // The item was not found with the shared lock, so we'll acquire the
151✔
1154
        // exclusive lock and check the cache again in case another method added
151✔
1155
        // the entry to the cache while no lock was held.
151✔
1156
        if entry, ok := c.rejectCache.get(chanID); ok {
161✔
1157
                upd1Time = time.Unix(entry.upd1Time, 0)
10✔
1158
                upd2Time = time.Unix(entry.upd2Time, 0)
10✔
1159
                exists, isZombie = entry.flags.unpack()
10✔
1160

10✔
1161
                return upd1Time, upd2Time, exists, isZombie, nil
10✔
1162
        }
10✔
1163

1164
        if err := kvdb.View(c.db, func(tx kvdb.RTx) error {
288✔
1165
                edges := tx.ReadBucket(edgeBucket)
144✔
1166
                if edges == nil {
144✔
1167
                        return ErrGraphNoEdgesFound
×
1168
                }
×
1169
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
144✔
1170
                if edgeIndex == nil {
144✔
1171
                        return ErrGraphNoEdgesFound
×
1172
                }
×
1173

1174
                var channelID [8]byte
144✔
1175
                byteOrder.PutUint64(channelID[:], chanID)
144✔
1176

144✔
1177
                // If the edge doesn't exist, then we'll also check our zombie
144✔
1178
                // index.
144✔
1179
                if edgeIndex.Get(channelID[:]) == nil {
235✔
1180
                        exists = false
91✔
1181
                        zombieIndex := edges.NestedReadBucket(zombieBucket)
91✔
1182
                        if zombieIndex != nil {
182✔
1183
                                isZombie, _, _ = isZombieEdge(
91✔
1184
                                        zombieIndex, chanID,
91✔
1185
                                )
91✔
1186
                        }
91✔
1187

1188
                        return nil
91✔
1189
                }
1190

1191
                exists = true
56✔
1192
                isZombie = false
56✔
1193

56✔
1194
                // If the channel has been found in the graph, then retrieve
56✔
1195
                // the edges itself so we can return the last updated
56✔
1196
                // timestamps.
56✔
1197
                nodes := tx.ReadBucket(nodeBucket)
56✔
1198
                if nodes == nil {
56✔
1199
                        return ErrGraphNodeNotFound
×
1200
                }
×
1201

1202
                e1, e2, err := fetchChanEdgePolicies(
56✔
1203
                        edgeIndex, edges, channelID[:],
56✔
1204
                )
56✔
1205
                if err != nil {
56✔
1206
                        return err
×
1207
                }
×
1208

1209
                // As we may have only one of the edges populated, only set the
1210
                // update time if the edge was found in the database.
1211
                if e1 != nil {
77✔
1212
                        upd1Time = e1.LastUpdate
21✔
1213
                }
21✔
1214
                if e2 != nil {
75✔
1215
                        upd2Time = e2.LastUpdate
19✔
1216
                }
19✔
1217

1218
                return nil
56✔
1219
        }, func() {}); err != nil {
144✔
1220
                return time.Time{}, time.Time{}, exists, isZombie, err
×
1221
        }
×
1222

1223
        c.rejectCache.insert(chanID, rejectCacheEntry{
144✔
1224
                upd1Time: upd1Time.Unix(),
144✔
1225
                upd2Time: upd2Time.Unix(),
144✔
1226
                flags:    packRejectFlags(exists, isZombie),
144✔
1227
        })
144✔
1228

144✔
1229
        return upd1Time, upd2Time, exists, isZombie, nil
144✔
1230
}
1231

1232
// AddEdgeProof sets the proof of an existing edge in the graph database.
1233
func (c *KVStore) AddEdgeProof(chanID lnwire.ShortChannelID,
1234
        proof *models.ChannelAuthProof) error {
4✔
1235

4✔
1236
        // Construct the channel's primary key which is the 8-byte channel ID.
4✔
1237
        var chanKey [8]byte
4✔
1238
        binary.BigEndian.PutUint64(chanKey[:], chanID.ToUint64())
4✔
1239

4✔
1240
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
8✔
1241
                edges := tx.ReadWriteBucket(edgeBucket)
4✔
1242
                if edges == nil {
4✔
1243
                        return ErrEdgeNotFound
×
1244
                }
×
1245

1246
                edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
4✔
1247
                if edgeIndex == nil {
4✔
1248
                        return ErrEdgeNotFound
×
1249
                }
×
1250

1251
                edge, err := fetchChanEdgeInfo(edgeIndex, chanKey[:])
4✔
1252
                if err != nil {
4✔
1253
                        return err
×
1254
                }
×
1255

1256
                edge.AuthProof = proof
4✔
1257

4✔
1258
                return putChanEdgeInfo(edgeIndex, &edge, chanKey)
4✔
1259
        }, func() {})
4✔
1260
}
1261

1262
const (
1263
        // pruneTipBytes is the total size of the value which stores a prune
1264
        // entry of the graph in the prune log. The "prune tip" is the last
1265
        // entry in the prune log, and indicates if the channel graph is in
1266
        // sync with the current UTXO state. The structure of the value
1267
        // is: blockHash, taking 32 bytes total.
1268
        pruneTipBytes = 32
1269
)
1270

1271
// PruneGraph prunes newly closed channels from the channel graph in response
1272
// to a new block being solved on the network. Any transactions which spend the
1273
// funding output of any known channels within he graph will be deleted.
1274
// Additionally, the "prune tip", or the last block which has been used to
1275
// prune the graph is stored so callers can ensure the graph is fully in sync
1276
// with the current UTXO state. A slice of channels that have been closed by
1277
// the target block along with any pruned nodes are returned if the function
1278
// succeeds without error.
1279
func (c *KVStore) PruneGraph(spentOutputs []*wire.OutPoint,
1280
        blockHash *chainhash.Hash, blockHeight uint32) (
1281
        []*models.ChannelEdgeInfo, []route.Vertex, error) {
240✔
1282

240✔
1283
        c.cacheMu.Lock()
240✔
1284
        defer c.cacheMu.Unlock()
240✔
1285

240✔
1286
        var (
240✔
1287
                chansClosed []*models.ChannelEdgeInfo
240✔
1288
                prunedNodes []route.Vertex
240✔
1289
        )
240✔
1290

240✔
1291
        err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
480✔
1292
                // First grab the edges bucket which houses the information
240✔
1293
                // we'd like to delete
240✔
1294
                edges, err := tx.CreateTopLevelBucket(edgeBucket)
240✔
1295
                if err != nil {
240✔
1296
                        return err
×
1297
                }
×
1298

1299
                // Next grab the two edge indexes which will also need to be
1300
                // updated.
1301
                edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
240✔
1302
                if err != nil {
240✔
1303
                        return err
×
1304
                }
×
1305
                chanIndex, err := edges.CreateBucketIfNotExists(
240✔
1306
                        channelPointBucket,
240✔
1307
                )
240✔
1308
                if err != nil {
240✔
1309
                        return err
×
1310
                }
×
1311
                nodes := tx.ReadWriteBucket(nodeBucket)
240✔
1312
                if nodes == nil {
240✔
1313
                        return ErrSourceNodeNotSet
×
1314
                }
×
1315
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
240✔
1316
                if err != nil {
240✔
1317
                        return err
×
1318
                }
×
1319

1320
                // For each of the outpoints that have been spent within the
1321
                // block, we attempt to delete them from the graph as if that
1322
                // outpoint was a channel, then it has now been closed.
1323
                for _, chanPoint := range spentOutputs {
353✔
1324
                        // TODO(roasbeef): load channel bloom filter, continue
113✔
1325
                        // if NOT if filter
113✔
1326

113✔
1327
                        var opBytes bytes.Buffer
113✔
1328
                        err := WriteOutpoint(&opBytes, chanPoint)
113✔
1329
                        if err != nil {
113✔
1330
                                return err
×
1331
                        }
×
1332

1333
                        // First attempt to see if the channel exists within
1334
                        // the database, if not, then we can exit early.
1335
                        chanID := chanIndex.Get(opBytes.Bytes())
113✔
1336
                        if chanID == nil {
205✔
1337
                                continue
92✔
1338
                        }
1339

1340
                        // Attempt to delete the channel, an ErrEdgeNotFound
1341
                        // will be returned if that outpoint isn't known to be
1342
                        // a channel. If no error is returned, then a channel
1343
                        // was successfully pruned.
1344
                        edgeInfo, err := c.delChannelEdgeUnsafe(
21✔
1345
                                edges, edgeIndex, chanIndex, zombieIndex,
21✔
1346
                                chanID, false, false,
21✔
1347
                        )
21✔
1348
                        if err != nil && !errors.Is(err, ErrEdgeNotFound) {
21✔
1349
                                return err
×
1350
                        }
×
1351

1352
                        chansClosed = append(chansClosed, edgeInfo)
21✔
1353
                }
1354

1355
                metaBucket, err := tx.CreateTopLevelBucket(graphMetaBucket)
240✔
1356
                if err != nil {
240✔
1357
                        return err
×
1358
                }
×
1359

1360
                pruneBucket, err := metaBucket.CreateBucketIfNotExists(
240✔
1361
                        pruneLogBucket,
240✔
1362
                )
240✔
1363
                if err != nil {
240✔
1364
                        return err
×
1365
                }
×
1366

1367
                // With the graph pruned, add a new entry to the prune log,
1368
                // which can be used to check if the graph is fully synced with
1369
                // the current UTXO state.
1370
                var blockHeightBytes [4]byte
240✔
1371
                byteOrder.PutUint32(blockHeightBytes[:], blockHeight)
240✔
1372

240✔
1373
                var newTip [pruneTipBytes]byte
240✔
1374
                copy(newTip[:], blockHash[:])
240✔
1375

240✔
1376
                err = pruneBucket.Put(blockHeightBytes[:], newTip[:])
240✔
1377
                if err != nil {
240✔
1378
                        return err
×
1379
                }
×
1380

1381
                // Now that the graph has been pruned, we'll also attempt to
1382
                // prune any nodes that have had a channel closed within the
1383
                // latest block.
1384
                prunedNodes, err = c.pruneGraphNodes(nodes, edgeIndex)
240✔
1385

240✔
1386
                return err
240✔
1387
        }, func() {
240✔
1388
                chansClosed = nil
240✔
1389
                prunedNodes = nil
240✔
1390
        })
240✔
1391
        if err != nil {
240✔
1392
                return nil, nil, err
×
1393
        }
×
1394

1395
        for _, channel := range chansClosed {
261✔
1396
                c.rejectCache.remove(channel.ChannelID)
21✔
1397
                c.chanCache.remove(channel.ChannelID)
21✔
1398
        }
21✔
1399

1400
        return chansClosed, prunedNodes, nil
240✔
1401
}
1402

1403
// PruneGraphNodes is a garbage collection method which attempts to prune out
1404
// any nodes from the channel graph that are currently unconnected. This ensure
1405
// that we only maintain a graph of reachable nodes. In the event that a pruned
1406
// node gains more channels, it will be re-added back to the graph.
1407
func (c *KVStore) PruneGraphNodes() ([]route.Vertex, error) {
26✔
1408
        var prunedNodes []route.Vertex
26✔
1409
        err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
52✔
1410
                nodes := tx.ReadWriteBucket(nodeBucket)
26✔
1411
                if nodes == nil {
26✔
1412
                        return ErrGraphNodesNotFound
×
1413
                }
×
1414
                edges := tx.ReadWriteBucket(edgeBucket)
26✔
1415
                if edges == nil {
26✔
1416
                        return ErrGraphNotFound
×
1417
                }
×
1418
                edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
26✔
1419
                if edgeIndex == nil {
26✔
1420
                        return ErrGraphNoEdgesFound
×
1421
                }
×
1422

1423
                var err error
26✔
1424
                prunedNodes, err = c.pruneGraphNodes(nodes, edgeIndex)
26✔
1425
                if err != nil {
26✔
1426
                        return err
×
1427
                }
×
1428

1429
                return nil
26✔
1430
        }, func() {
26✔
1431
                prunedNodes = nil
26✔
1432
        })
26✔
1433

1434
        return prunedNodes, err
26✔
1435
}
1436

1437
// pruneGraphNodes attempts to remove any nodes from the graph who have had a
1438
// channel closed within the current block. If the node still has existing
1439
// channels in the graph, this will act as a no-op.
1440
func (c *KVStore) pruneGraphNodes(nodes kvdb.RwBucket,
1441
        edgeIndex kvdb.RwBucket) ([]route.Vertex, error) {
263✔
1442

263✔
1443
        log.Trace("Pruning nodes from graph with no open channels")
263✔
1444

263✔
1445
        // We'll retrieve the graph's source node to ensure we don't remove it
263✔
1446
        // even if it no longer has any open channels.
263✔
1447
        sourceNode, err := c.sourceNode(nodes)
263✔
1448
        if err != nil {
263✔
1449
                return nil, err
×
1450
        }
×
1451

1452
        // We'll use this map to keep count the number of references to a node
1453
        // in the graph. A node should only be removed once it has no more
1454
        // references in the graph.
1455
        nodeRefCounts := make(map[[33]byte]int)
263✔
1456
        err = nodes.ForEach(func(pubKey, nodeBytes []byte) error {
1,551✔
1457
                // If this is the source key, then we skip this
1,288✔
1458
                // iteration as the value for this key is a pubKey
1,288✔
1459
                // rather than raw node information.
1,288✔
1460
                if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 {
2,071✔
1461
                        return nil
783✔
1462
                }
783✔
1463

1464
                var nodePub [33]byte
508✔
1465
                copy(nodePub[:], pubKey)
508✔
1466
                nodeRefCounts[nodePub] = 0
508✔
1467

508✔
1468
                return nil
508✔
1469
        })
1470
        if err != nil {
263✔
1471
                return nil, err
×
1472
        }
×
1473

1474
        // To ensure we never delete the source node, we'll start off by
1475
        // bumping its ref count to 1.
1476
        nodeRefCounts[sourceNode.PubKeyBytes] = 1
263✔
1477

263✔
1478
        // Next, we'll run through the edgeIndex which maps a channel ID to the
263✔
1479
        // edge info. We'll use this scan to populate our reference count map
263✔
1480
        // above.
263✔
1481
        err = edgeIndex.ForEach(func(chanID, edgeInfoBytes []byte) error {
474✔
1482
                // The first 66 bytes of the edge info contain the pubkeys of
211✔
1483
                // the nodes that this edge attaches. We'll extract them, and
211✔
1484
                // add them to the ref count map.
211✔
1485
                var node1, node2 [33]byte
211✔
1486
                copy(node1[:], edgeInfoBytes[:33])
211✔
1487
                copy(node2[:], edgeInfoBytes[33:])
211✔
1488

211✔
1489
                // With the nodes extracted, we'll increase the ref count of
211✔
1490
                // each of the nodes.
211✔
1491
                nodeRefCounts[node1]++
211✔
1492
                nodeRefCounts[node2]++
211✔
1493

211✔
1494
                return nil
211✔
1495
        })
211✔
1496
        if err != nil {
263✔
1497
                return nil, err
×
1498
        }
×
1499

1500
        // Finally, we'll make a second pass over the set of nodes, and delete
1501
        // any nodes that have a ref count of zero.
1502
        var pruned []route.Vertex
263✔
1503
        for nodePubKey, refCount := range nodeRefCounts {
771✔
1504
                // If the ref count of the node isn't zero, then we can safely
508✔
1505
                // skip it as it still has edges to or from it within the
508✔
1506
                // graph.
508✔
1507
                if refCount != 0 {
956✔
1508
                        continue
448✔
1509
                }
1510

1511
                // If we reach this point, then there are no longer any edges
1512
                // that connect this node, so we can delete it.
1513
                err := c.deleteLightningNode(nodes, nodePubKey[:])
63✔
1514
                if err != nil {
63✔
1515
                        if errors.Is(err, ErrGraphNodeNotFound) ||
×
1516
                                errors.Is(err, ErrGraphNodesNotFound) {
×
1517

×
1518
                                log.Warnf("Unable to prune node %x from the "+
×
1519
                                        "graph: %v", nodePubKey, err)
×
1520
                                continue
×
1521
                        }
1522

1523
                        return nil, err
×
1524
                }
1525

1526
                log.Infof("Pruned unconnected node %x from channel graph",
63✔
1527
                        nodePubKey[:])
63✔
1528

63✔
1529
                pruned = append(pruned, nodePubKey)
63✔
1530
        }
1531

1532
        if len(pruned) > 0 {
310✔
1533
                log.Infof("Pruned %v unconnected nodes from the channel graph",
47✔
1534
                        len(pruned))
47✔
1535
        }
47✔
1536

1537
        return pruned, err
263✔
1538
}
1539

1540
// DisconnectBlockAtHeight is used to indicate that the block specified
1541
// by the passed height has been disconnected from the main chain. This
1542
// will "rewind" the graph back to the height below, deleting channels
1543
// that are no longer confirmed from the graph. The prune log will be
1544
// set to the last prune height valid for the remaining chain.
1545
// Channels that were removed from the graph resulting from the
1546
// disconnected block are returned.
1547
func (c *KVStore) DisconnectBlockAtHeight(height uint32) (
1548
        []*models.ChannelEdgeInfo, error) {
156✔
1549

156✔
1550
        // Every channel having a ShortChannelID starting at 'height'
156✔
1551
        // will no longer be confirmed.
156✔
1552
        startShortChanID := lnwire.ShortChannelID{
156✔
1553
                BlockHeight: height,
156✔
1554
        }
156✔
1555

156✔
1556
        // Delete everything after this height from the db up until the
156✔
1557
        // SCID alias range.
156✔
1558
        endShortChanID := aliasmgr.StartingAlias
156✔
1559

156✔
1560
        // The block height will be the 3 first bytes of the channel IDs.
156✔
1561
        var chanIDStart [8]byte
156✔
1562
        byteOrder.PutUint64(chanIDStart[:], startShortChanID.ToUint64())
156✔
1563
        var chanIDEnd [8]byte
156✔
1564
        byteOrder.PutUint64(chanIDEnd[:], endShortChanID.ToUint64())
156✔
1565

156✔
1566
        c.cacheMu.Lock()
156✔
1567
        defer c.cacheMu.Unlock()
156✔
1568

156✔
1569
        // Keep track of the channels that are removed from the graph.
156✔
1570
        var removedChans []*models.ChannelEdgeInfo
156✔
1571

156✔
1572
        if err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
312✔
1573
                edges, err := tx.CreateTopLevelBucket(edgeBucket)
156✔
1574
                if err != nil {
156✔
1575
                        return err
×
1576
                }
×
1577
                edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
156✔
1578
                if err != nil {
156✔
1579
                        return err
×
1580
                }
×
1581
                chanIndex, err := edges.CreateBucketIfNotExists(
156✔
1582
                        channelPointBucket,
156✔
1583
                )
156✔
1584
                if err != nil {
156✔
1585
                        return err
×
1586
                }
×
1587
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
156✔
1588
                if err != nil {
156✔
1589
                        return err
×
1590
                }
×
1591

1592
                // Scan from chanIDStart to chanIDEnd, deleting every
1593
                // found edge.
1594
                // NOTE: we must delete the edges after the cursor loop, since
1595
                // modifying the bucket while traversing is not safe.
1596
                // NOTE: We use a < comparison in bytes.Compare instead of <=
1597
                // so that the StartingAlias itself isn't deleted.
1598
                var keys [][]byte
156✔
1599
                cursor := edgeIndex.ReadWriteCursor()
156✔
1600

156✔
1601
                //nolint:ll
156✔
1602
                for k, _ := cursor.Seek(chanIDStart[:]); k != nil &&
156✔
1603
                        bytes.Compare(k, chanIDEnd[:]) < 0; k, _ = cursor.Next() {
245✔
1604
                        keys = append(keys, k)
89✔
1605
                }
89✔
1606

1607
                for _, k := range keys {
245✔
1608
                        edgeInfo, err := c.delChannelEdgeUnsafe(
89✔
1609
                                edges, edgeIndex, chanIndex, zombieIndex,
89✔
1610
                                k, false, false,
89✔
1611
                        )
89✔
1612
                        if err != nil && !errors.Is(err, ErrEdgeNotFound) {
89✔
1613
                                return err
×
1614
                        }
×
1615

1616
                        removedChans = append(removedChans, edgeInfo)
89✔
1617
                }
1618

1619
                // Delete all the entries in the prune log having a height
1620
                // greater or equal to the block disconnected.
1621
                metaBucket, err := tx.CreateTopLevelBucket(graphMetaBucket)
156✔
1622
                if err != nil {
156✔
1623
                        return err
×
1624
                }
×
1625

1626
                pruneBucket, err := metaBucket.CreateBucketIfNotExists(
156✔
1627
                        pruneLogBucket,
156✔
1628
                )
156✔
1629
                if err != nil {
156✔
1630
                        return err
×
1631
                }
×
1632

1633
                var pruneKeyStart [4]byte
156✔
1634
                byteOrder.PutUint32(pruneKeyStart[:], height)
156✔
1635

156✔
1636
                var pruneKeyEnd [4]byte
156✔
1637
                byteOrder.PutUint32(pruneKeyEnd[:], math.MaxUint32)
156✔
1638

156✔
1639
                // To avoid modifying the bucket while traversing, we delete
156✔
1640
                // the keys in a second loop.
156✔
1641
                var pruneKeys [][]byte
156✔
1642
                pruneCursor := pruneBucket.ReadWriteCursor()
156✔
1643
                //nolint:ll
156✔
1644
                for k, _ := pruneCursor.Seek(pruneKeyStart[:]); k != nil &&
156✔
1645
                        bytes.Compare(k, pruneKeyEnd[:]) <= 0; k, _ = pruneCursor.Next() {
251✔
1646
                        pruneKeys = append(pruneKeys, k)
95✔
1647
                }
95✔
1648

1649
                for _, k := range pruneKeys {
251✔
1650
                        if err := pruneBucket.Delete(k); err != nil {
95✔
1651
                                return err
×
1652
                        }
×
1653
                }
1654

1655
                return nil
156✔
1656
        }, func() {
156✔
1657
                removedChans = nil
156✔
1658
        }); err != nil {
156✔
1659
                return nil, err
×
1660
        }
×
1661

1662
        for _, channel := range removedChans {
245✔
1663
                c.rejectCache.remove(channel.ChannelID)
89✔
1664
                c.chanCache.remove(channel.ChannelID)
89✔
1665
        }
89✔
1666

1667
        return removedChans, nil
156✔
1668
}
1669

1670
// PruneTip returns the block height and hash of the latest block that has been
1671
// used to prune channels in the graph. Knowing the "prune tip" allows callers
1672
// to tell if the graph is currently in sync with the current best known UTXO
1673
// state.
1674
func (c *KVStore) PruneTip() (*chainhash.Hash, uint32, error) {
56✔
1675
        var (
56✔
1676
                tipHash   chainhash.Hash
56✔
1677
                tipHeight uint32
56✔
1678
        )
56✔
1679

56✔
1680
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
112✔
1681
                graphMeta := tx.ReadBucket(graphMetaBucket)
56✔
1682
                if graphMeta == nil {
56✔
1683
                        return ErrGraphNotFound
×
1684
                }
×
1685
                pruneBucket := graphMeta.NestedReadBucket(pruneLogBucket)
56✔
1686
                if pruneBucket == nil {
56✔
1687
                        return ErrGraphNeverPruned
×
1688
                }
×
1689

1690
                pruneCursor := pruneBucket.ReadCursor()
56✔
1691

56✔
1692
                // The prune key with the largest block height will be our
56✔
1693
                // prune tip.
56✔
1694
                k, v := pruneCursor.Last()
56✔
1695
                if k == nil {
77✔
1696
                        return ErrGraphNeverPruned
21✔
1697
                }
21✔
1698

1699
                // Once we have the prune tip, the value will be the block hash,
1700
                // and the key the block height.
1701
                copy(tipHash[:], v)
38✔
1702
                tipHeight = byteOrder.Uint32(k)
38✔
1703

38✔
1704
                return nil
38✔
1705
        }, func() {})
56✔
1706
        if err != nil {
77✔
1707
                return nil, 0, err
21✔
1708
        }
21✔
1709

1710
        return &tipHash, tipHeight, nil
38✔
1711
}
1712

1713
// DeleteChannelEdges removes edges with the given channel IDs from the
1714
// database and marks them as zombies. This ensures that we're unable to re-add
1715
// it to our database once again. If an edge does not exist within the
1716
// database, then ErrEdgeNotFound will be returned. If strictZombiePruning is
1717
// true, then when we mark these edges as zombies, we'll set up the keys such
1718
// that we require the node that failed to send the fresh update to be the one
1719
// that resurrects the channel from its zombie state. The markZombie bool
1720
// denotes whether or not to mark the channel as a zombie.
1721
func (c *KVStore) DeleteChannelEdges(strictZombiePruning, markZombie bool,
1722
        chanIDs ...uint64) ([]*models.ChannelEdgeInfo, error) {
147✔
1723

147✔
1724
        // TODO(roasbeef): possibly delete from node bucket if node has no more
147✔
1725
        // channels
147✔
1726
        // TODO(roasbeef): don't delete both edges?
147✔
1727

147✔
1728
        c.cacheMu.Lock()
147✔
1729
        defer c.cacheMu.Unlock()
147✔
1730

147✔
1731
        var infos []*models.ChannelEdgeInfo
147✔
1732
        err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
294✔
1733
                edges := tx.ReadWriteBucket(edgeBucket)
147✔
1734
                if edges == nil {
147✔
1735
                        return ErrEdgeNotFound
×
1736
                }
×
1737
                edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
147✔
1738
                if edgeIndex == nil {
147✔
1739
                        return ErrEdgeNotFound
×
1740
                }
×
1741
                chanIndex := edges.NestedReadWriteBucket(channelPointBucket)
147✔
1742
                if chanIndex == nil {
147✔
1743
                        return ErrEdgeNotFound
×
1744
                }
×
1745
                nodes := tx.ReadWriteBucket(nodeBucket)
147✔
1746
                if nodes == nil {
147✔
1747
                        return ErrGraphNodeNotFound
×
1748
                }
×
1749
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
147✔
1750
                if err != nil {
147✔
1751
                        return err
×
1752
                }
×
1753

1754
                var rawChanID [8]byte
147✔
1755
                for _, chanID := range chanIDs {
241✔
1756
                        byteOrder.PutUint64(rawChanID[:], chanID)
94✔
1757
                        edgeInfo, err := c.delChannelEdgeUnsafe(
94✔
1758
                                edges, edgeIndex, chanIndex, zombieIndex,
94✔
1759
                                rawChanID[:], markZombie, strictZombiePruning,
94✔
1760
                        )
94✔
1761
                        if err != nil {
158✔
1762
                                return err
64✔
1763
                        }
64✔
1764

1765
                        infos = append(infos, edgeInfo)
30✔
1766
                }
1767

1768
                return nil
83✔
1769
        }, func() {
147✔
1770
                infos = nil
147✔
1771
        })
147✔
1772
        if err != nil {
211✔
1773
                return nil, err
64✔
1774
        }
64✔
1775

1776
        for _, chanID := range chanIDs {
113✔
1777
                c.rejectCache.remove(chanID)
30✔
1778
                c.chanCache.remove(chanID)
30✔
1779
        }
30✔
1780

1781
        return infos, nil
83✔
1782
}
1783

1784
// ChannelID attempt to lookup the 8-byte compact channel ID which maps to the
1785
// passed channel point (outpoint). If the passed channel doesn't exist within
1786
// the database, then ErrEdgeNotFound is returned.
1787
func (c *KVStore) ChannelID(chanPoint *wire.OutPoint) (uint64, error) {
4✔
1788
        var chanID uint64
4✔
1789
        if err := kvdb.View(c.db, func(tx kvdb.RTx) error {
8✔
1790
                var err error
4✔
1791
                chanID, err = getChanID(tx, chanPoint)
4✔
1792
                return err
4✔
1793
        }, func() {
8✔
1794
                chanID = 0
4✔
1795
        }); err != nil {
7✔
1796
                return 0, err
3✔
1797
        }
3✔
1798

1799
        return chanID, nil
4✔
1800
}
1801

1802
// getChanID returns the assigned channel ID for a given channel point.
1803
func getChanID(tx kvdb.RTx, chanPoint *wire.OutPoint) (uint64, error) {
4✔
1804
        var b bytes.Buffer
4✔
1805
        if err := WriteOutpoint(&b, chanPoint); err != nil {
4✔
1806
                return 0, err
×
1807
        }
×
1808

1809
        edges := tx.ReadBucket(edgeBucket)
4✔
1810
        if edges == nil {
4✔
1811
                return 0, ErrGraphNoEdgesFound
×
1812
        }
×
1813
        chanIndex := edges.NestedReadBucket(channelPointBucket)
4✔
1814
        if chanIndex == nil {
4✔
1815
                return 0, ErrGraphNoEdgesFound
×
1816
        }
×
1817

1818
        chanIDBytes := chanIndex.Get(b.Bytes())
4✔
1819
        if chanIDBytes == nil {
7✔
1820
                return 0, ErrEdgeNotFound
3✔
1821
        }
3✔
1822

1823
        chanID := byteOrder.Uint64(chanIDBytes)
4✔
1824

4✔
1825
        return chanID, nil
4✔
1826
}
1827

1828
// TODO(roasbeef): allow updates to use Batch?
1829

1830
// HighestChanID returns the "highest" known channel ID in the channel graph.
1831
// This represents the "newest" channel from the PoV of the chain. This method
1832
// can be used by peers to quickly determine if they're graphs are in sync.
1833
func (c *KVStore) HighestChanID() (uint64, error) {
6✔
1834
        var cid uint64
6✔
1835

6✔
1836
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
12✔
1837
                edges := tx.ReadBucket(edgeBucket)
6✔
1838
                if edges == nil {
6✔
1839
                        return ErrGraphNoEdgesFound
×
1840
                }
×
1841
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
6✔
1842
                if edgeIndex == nil {
6✔
1843
                        return ErrGraphNoEdgesFound
×
1844
                }
×
1845

1846
                // In order to find the highest chan ID, we'll fetch a cursor
1847
                // and use that to seek to the "end" of our known rage.
1848
                cidCursor := edgeIndex.ReadCursor()
6✔
1849

6✔
1850
                lastChanID, _ := cidCursor.Last()
6✔
1851

6✔
1852
                // If there's no key, then this means that we don't actually
6✔
1853
                // know of any channels, so we'll return a predicable error.
6✔
1854
                if lastChanID == nil {
10✔
1855
                        return ErrGraphNoEdgesFound
4✔
1856
                }
4✔
1857

1858
                // Otherwise, we'll de serialize the channel ID and return it
1859
                // to the caller.
1860
                cid = byteOrder.Uint64(lastChanID)
5✔
1861

5✔
1862
                return nil
5✔
1863
        }, func() {
6✔
1864
                cid = 0
6✔
1865
        })
6✔
1866
        if err != nil && !errors.Is(err, ErrGraphNoEdgesFound) {
6✔
1867
                return 0, err
×
1868
        }
×
1869

1870
        return cid, nil
6✔
1871
}
1872

1873
// ChannelEdge represents the complete set of information for a channel edge in
1874
// the known channel graph. This struct couples the core information of the
1875
// edge as well as each of the known advertised edge policies.
1876
type ChannelEdge struct {
1877
        // Info contains all the static information describing the channel.
1878
        Info *models.ChannelEdgeInfo
1879

1880
        // Policy1 points to the "first" edge policy of the channel containing
1881
        // the dynamic information required to properly route through the edge.
1882
        Policy1 *models.ChannelEdgePolicy
1883

1884
        // Policy2 points to the "second" edge policy of the channel containing
1885
        // the dynamic information required to properly route through the edge.
1886
        Policy2 *models.ChannelEdgePolicy
1887

1888
        // Node1 is "node 1" in the channel. This is the node that would have
1889
        // produced Policy1 if it exists.
1890
        Node1 *models.LightningNode
1891

1892
        // Node2 is "node 2" in the channel. This is the node that would have
1893
        // produced Policy2 if it exists.
1894
        Node2 *models.LightningNode
1895
}
1896

1897
// ChanUpdatesInHorizon returns all the known channel edges which have at least
1898
// one edge that has an update timestamp within the specified horizon.
1899
func (c *KVStore) ChanUpdatesInHorizon(startTime,
1900
        endTime time.Time) ([]ChannelEdge, error) {
145✔
1901

145✔
1902
        // To ensure we don't return duplicate ChannelEdges, we'll use an
145✔
1903
        // additional map to keep track of the edges already seen to prevent
145✔
1904
        // re-adding it.
145✔
1905
        var edgesSeen map[uint64]struct{}
145✔
1906
        var edgesToCache map[uint64]ChannelEdge
145✔
1907
        var edgesInHorizon []ChannelEdge
145✔
1908

145✔
1909
        c.cacheMu.Lock()
145✔
1910
        defer c.cacheMu.Unlock()
145✔
1911

145✔
1912
        var hits int
145✔
1913
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
290✔
1914
                edges := tx.ReadBucket(edgeBucket)
145✔
1915
                if edges == nil {
145✔
1916
                        return ErrGraphNoEdgesFound
×
1917
                }
×
1918
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
145✔
1919
                if edgeIndex == nil {
145✔
1920
                        return ErrGraphNoEdgesFound
×
1921
                }
×
1922
                edgeUpdateIndex := edges.NestedReadBucket(edgeUpdateIndexBucket)
145✔
1923
                if edgeUpdateIndex == nil {
145✔
1924
                        return ErrGraphNoEdgesFound
×
1925
                }
×
1926

1927
                nodes := tx.ReadBucket(nodeBucket)
145✔
1928
                if nodes == nil {
145✔
1929
                        return ErrGraphNodesNotFound
×
1930
                }
×
1931

1932
                // We'll now obtain a cursor to perform a range query within
1933
                // the index to find all channels within the horizon.
1934
                updateCursor := edgeUpdateIndex.ReadCursor()
145✔
1935

145✔
1936
                var startTimeBytes, endTimeBytes [8 + 8]byte
145✔
1937
                byteOrder.PutUint64(
145✔
1938
                        startTimeBytes[:8], uint64(startTime.Unix()),
145✔
1939
                )
145✔
1940
                byteOrder.PutUint64(
145✔
1941
                        endTimeBytes[:8], uint64(endTime.Unix()),
145✔
1942
                )
145✔
1943

145✔
1944
                // With our start and end times constructed, we'll step through
145✔
1945
                // the index collecting the info and policy of each update of
145✔
1946
                // each channel that has a last update within the time range.
145✔
1947
                //
145✔
1948
                //nolint:ll
145✔
1949
                for indexKey, _ := updateCursor.Seek(startTimeBytes[:]); indexKey != nil &&
145✔
1950
                        bytes.Compare(indexKey, endTimeBytes[:]) <= 0; indexKey, _ = updateCursor.Next() {
194✔
1951
                        // We have a new eligible entry, so we'll slice of the
49✔
1952
                        // chan ID so we can query it in the DB.
49✔
1953
                        chanID := indexKey[8:]
49✔
1954

49✔
1955
                        // If we've already retrieved the info and policies for
49✔
1956
                        // this edge, then we can skip it as we don't need to do
49✔
1957
                        // so again.
49✔
1958
                        chanIDInt := byteOrder.Uint64(chanID)
49✔
1959
                        if _, ok := edgesSeen[chanIDInt]; ok {
68✔
1960
                                continue
19✔
1961
                        }
1962

1963
                        if channel, ok := c.chanCache.get(chanIDInt); ok {
42✔
1964
                                hits++
12✔
1965
                                edgesSeen[chanIDInt] = struct{}{}
12✔
1966
                                edgesInHorizon = append(edgesInHorizon, channel)
12✔
1967

12✔
1968
                                continue
12✔
1969
                        }
1970

1971
                        // First, we'll fetch the static edge information.
1972
                        edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
21✔
1973
                        if err != nil {
21✔
1974
                                chanID := byteOrder.Uint64(chanID)
×
1975
                                return fmt.Errorf("unable to fetch info for "+
×
1976
                                        "edge with chan_id=%v: %v", chanID, err)
×
1977
                        }
×
1978

1979
                        // With the static information obtained, we'll now
1980
                        // fetch the dynamic policy info.
1981
                        edge1, edge2, err := fetchChanEdgePolicies(
21✔
1982
                                edgeIndex, edges, chanID,
21✔
1983
                        )
21✔
1984
                        if err != nil {
21✔
1985
                                chanID := byteOrder.Uint64(chanID)
×
1986
                                return fmt.Errorf("unable to fetch policies "+
×
1987
                                        "for edge with chan_id=%v: %v", chanID,
×
1988
                                        err)
×
1989
                        }
×
1990

1991
                        node1, err := fetchLightningNode(
21✔
1992
                                nodes, edgeInfo.NodeKey1Bytes[:],
21✔
1993
                        )
21✔
1994
                        if err != nil {
21✔
1995
                                return err
×
1996
                        }
×
1997

1998
                        node2, err := fetchLightningNode(
21✔
1999
                                nodes, edgeInfo.NodeKey2Bytes[:],
21✔
2000
                        )
21✔
2001
                        if err != nil {
21✔
2002
                                return err
×
2003
                        }
×
2004

2005
                        // Finally, we'll collate this edge with the rest of
2006
                        // edges to be returned.
2007
                        edgesSeen[chanIDInt] = struct{}{}
21✔
2008
                        channel := ChannelEdge{
21✔
2009
                                Info:    &edgeInfo,
21✔
2010
                                Policy1: edge1,
21✔
2011
                                Policy2: edge2,
21✔
2012
                                Node1:   &node1,
21✔
2013
                                Node2:   &node2,
21✔
2014
                        }
21✔
2015
                        edgesInHorizon = append(edgesInHorizon, channel)
21✔
2016
                        edgesToCache[chanIDInt] = channel
21✔
2017
                }
2018

2019
                return nil
145✔
2020
        }, func() {
145✔
2021
                edgesSeen = make(map[uint64]struct{})
145✔
2022
                edgesToCache = make(map[uint64]ChannelEdge)
145✔
2023
                edgesInHorizon = nil
145✔
2024
        })
145✔
2025
        switch {
145✔
2026
        case errors.Is(err, ErrGraphNoEdgesFound):
×
2027
                fallthrough
×
2028
        case errors.Is(err, ErrGraphNodesNotFound):
×
2029
                break
×
2030

2031
        case err != nil:
×
2032
                return nil, err
×
2033
        }
2034

2035
        // Insert any edges loaded from disk into the cache.
2036
        for chanid, channel := range edgesToCache {
166✔
2037
                c.chanCache.insert(chanid, channel)
21✔
2038
        }
21✔
2039

2040
        log.Debugf("ChanUpdatesInHorizon hit percentage: %f (%d/%d)",
145✔
2041
                float64(hits)/float64(len(edgesInHorizon)), hits,
145✔
2042
                len(edgesInHorizon))
145✔
2043

145✔
2044
        return edgesInHorizon, nil
145✔
2045
}
2046

2047
// NodeUpdatesInHorizon returns all the known lightning node which have an
2048
// update timestamp within the passed range. This method can be used by two
2049
// nodes to quickly determine if they have the same set of up to date node
2050
// announcements.
2051
func (c *KVStore) NodeUpdatesInHorizon(startTime,
2052
        endTime time.Time) ([]models.LightningNode, error) {
11✔
2053

11✔
2054
        var nodesInHorizon []models.LightningNode
11✔
2055

11✔
2056
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
22✔
2057
                nodes := tx.ReadBucket(nodeBucket)
11✔
2058
                if nodes == nil {
11✔
2059
                        return ErrGraphNodesNotFound
×
2060
                }
×
2061

2062
                nodeUpdateIndex := nodes.NestedReadBucket(nodeUpdateIndexBucket)
11✔
2063
                if nodeUpdateIndex == nil {
11✔
2064
                        return ErrGraphNodesNotFound
×
2065
                }
×
2066

2067
                // We'll now obtain a cursor to perform a range query within
2068
                // the index to find all node announcements within the horizon.
2069
                updateCursor := nodeUpdateIndex.ReadCursor()
11✔
2070

11✔
2071
                var startTimeBytes, endTimeBytes [8 + 33]byte
11✔
2072
                byteOrder.PutUint64(
11✔
2073
                        startTimeBytes[:8], uint64(startTime.Unix()),
11✔
2074
                )
11✔
2075
                byteOrder.PutUint64(
11✔
2076
                        endTimeBytes[:8], uint64(endTime.Unix()),
11✔
2077
                )
11✔
2078

11✔
2079
                // With our start and end times constructed, we'll step through
11✔
2080
                // the index collecting info for each node within the time
11✔
2081
                // range.
11✔
2082
                //
11✔
2083
                //nolint:ll
11✔
2084
                for indexKey, _ := updateCursor.Seek(startTimeBytes[:]); indexKey != nil &&
11✔
2085
                        bytes.Compare(indexKey, endTimeBytes[:]) <= 0; indexKey, _ = updateCursor.Next() {
43✔
2086
                        nodePub := indexKey[8:]
32✔
2087
                        node, err := fetchLightningNode(nodes, nodePub)
32✔
2088
                        if err != nil {
32✔
2089
                                return err
×
2090
                        }
×
2091

2092
                        nodesInHorizon = append(nodesInHorizon, node)
32✔
2093
                }
2094

2095
                return nil
11✔
2096
        }, func() {
11✔
2097
                nodesInHorizon = nil
11✔
2098
        })
11✔
2099
        switch {
11✔
2100
        case errors.Is(err, ErrGraphNoEdgesFound):
×
2101
                fallthrough
×
2102
        case errors.Is(err, ErrGraphNodesNotFound):
×
2103
                break
×
2104

2105
        case err != nil:
×
2106
                return nil, err
×
2107
        }
2108

2109
        return nodesInHorizon, nil
11✔
2110
}
2111

2112
// FilterKnownChanIDs takes a set of channel IDs and return the subset of chan
2113
// ID's that we don't know and are not known zombies of the passed set. In other
2114
// words, we perform a set difference of our set of chan ID's and the ones
2115
// passed in. This method can be used by callers to determine the set of
2116
// channels another peer knows of that we don't. The ChannelUpdateInfos for the
2117
// known zombies is also returned.
2118
func (c *KVStore) FilterKnownChanIDs(chansInfo []ChannelUpdateInfo) ([]uint64,
2119
        []ChannelUpdateInfo, error) {
126✔
2120

126✔
2121
        var (
126✔
2122
                newChanIDs   []uint64
126✔
2123
                knownZombies []ChannelUpdateInfo
126✔
2124
        )
126✔
2125

126✔
2126
        c.cacheMu.Lock()
126✔
2127
        defer c.cacheMu.Unlock()
126✔
2128

126✔
2129
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
252✔
2130
                edges := tx.ReadBucket(edgeBucket)
126✔
2131
                if edges == nil {
126✔
2132
                        return ErrGraphNoEdgesFound
×
2133
                }
×
2134
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
126✔
2135
                if edgeIndex == nil {
126✔
2136
                        return ErrGraphNoEdgesFound
×
2137
                }
×
2138

2139
                // Fetch the zombie index, it may not exist if no edges have
2140
                // ever been marked as zombies. If the index has been
2141
                // initialized, we will use it later to skip known zombie edges.
2142
                zombieIndex := edges.NestedReadBucket(zombieBucket)
126✔
2143

126✔
2144
                // We'll run through the set of chanIDs and collate only the
126✔
2145
                // set of channel that are unable to be found within our db.
126✔
2146
                var cidBytes [8]byte
126✔
2147
                for _, info := range chansInfo {
219✔
2148
                        scid := info.ShortChannelID.ToUint64()
93✔
2149
                        byteOrder.PutUint64(cidBytes[:], scid)
93✔
2150

93✔
2151
                        // If the edge is already known, skip it.
93✔
2152
                        if v := edgeIndex.Get(cidBytes[:]); v != nil {
112✔
2153
                                continue
19✔
2154
                        }
2155

2156
                        // If the edge is a known zombie, skip it.
2157
                        if zombieIndex != nil {
154✔
2158
                                isZombie, _, _ := isZombieEdge(
77✔
2159
                                        zombieIndex, scid,
77✔
2160
                                )
77✔
2161

77✔
2162
                                if isZombie {
115✔
2163
                                        knownZombies = append(
38✔
2164
                                                knownZombies, info,
38✔
2165
                                        )
38✔
2166

38✔
2167
                                        continue
38✔
2168
                                }
2169
                        }
2170

2171
                        newChanIDs = append(newChanIDs, scid)
39✔
2172
                }
2173

2174
                return nil
126✔
2175
        }, func() {
126✔
2176
                newChanIDs = nil
126✔
2177
                knownZombies = nil
126✔
2178
        })
126✔
2179
        switch {
126✔
2180
        // If we don't know of any edges yet, then we'll return the entire set
2181
        // of chan IDs specified.
2182
        case errors.Is(err, ErrGraphNoEdgesFound):
×
2183
                ogChanIDs := make([]uint64, len(chansInfo))
×
2184
                for i, info := range chansInfo {
×
2185
                        ogChanIDs[i] = info.ShortChannelID.ToUint64()
×
2186
                }
×
2187

2188
                return ogChanIDs, nil, nil
×
2189

2190
        case err != nil:
×
2191
                return nil, nil, err
×
2192
        }
2193

2194
        return newChanIDs, knownZombies, nil
126✔
2195
}
2196

2197
// ChannelUpdateInfo couples the SCID of a channel with the timestamps of the
2198
// latest received channel updates for the channel.
2199
type ChannelUpdateInfo struct {
2200
        // ShortChannelID is the SCID identifier of the channel.
2201
        ShortChannelID lnwire.ShortChannelID
2202

2203
        // Node1UpdateTimestamp is the timestamp of the latest received update
2204
        // from the node 1 channel peer. This will be set to zero time if no
2205
        // update has yet been received from this node.
2206
        Node1UpdateTimestamp time.Time
2207

2208
        // Node2UpdateTimestamp is the timestamp of the latest received update
2209
        // from the node 2 channel peer. This will be set to zero time if no
2210
        // update has yet been received from this node.
2211
        Node2UpdateTimestamp time.Time
2212
}
2213

2214
// NewChannelUpdateInfo is a constructor which makes sure we initialize the
2215
// timestamps with zero seconds unix timestamp which equals
2216
// `January 1, 1970, 00:00:00 UTC` in case the value is `time.Time{}`.
2217
func NewChannelUpdateInfo(scid lnwire.ShortChannelID, node1Timestamp,
2218
        node2Timestamp time.Time) ChannelUpdateInfo {
199✔
2219

199✔
2220
        chanInfo := ChannelUpdateInfo{
199✔
2221
                ShortChannelID:       scid,
199✔
2222
                Node1UpdateTimestamp: node1Timestamp,
199✔
2223
                Node2UpdateTimestamp: node2Timestamp,
199✔
2224
        }
199✔
2225

199✔
2226
        if node1Timestamp.IsZero() {
388✔
2227
                chanInfo.Node1UpdateTimestamp = time.Unix(0, 0)
189✔
2228
        }
189✔
2229

2230
        if node2Timestamp.IsZero() {
388✔
2231
                chanInfo.Node2UpdateTimestamp = time.Unix(0, 0)
189✔
2232
        }
189✔
2233

2234
        return chanInfo
199✔
2235
}
2236

2237
// BlockChannelRange represents a range of channels for a given block height.
2238
type BlockChannelRange struct {
2239
        // Height is the height of the block all of the channels below were
2240
        // included in.
2241
        Height uint32
2242

2243
        // Channels is the list of channels identified by their short ID
2244
        // representation known to us that were included in the block height
2245
        // above. The list may include channel update timestamp information if
2246
        // requested.
2247
        Channels []ChannelUpdateInfo
2248
}
2249

2250
// FilterChannelRange returns the channel ID's of all known channels which were
2251
// mined in a block height within the passed range. The channel IDs are grouped
2252
// by their common block height. This method can be used to quickly share with a
2253
// peer the set of channels we know of within a particular range to catch them
2254
// up after a period of time offline. If withTimestamps is true then the
2255
// timestamp info of the latest received channel update messages of the channel
2256
// will be included in the response.
2257
func (c *KVStore) FilterChannelRange(startHeight,
2258
        endHeight uint32, withTimestamps bool) ([]BlockChannelRange, error) {
14✔
2259

14✔
2260
        startChanID := &lnwire.ShortChannelID{
14✔
2261
                BlockHeight: startHeight,
14✔
2262
        }
14✔
2263

14✔
2264
        endChanID := lnwire.ShortChannelID{
14✔
2265
                BlockHeight: endHeight,
14✔
2266
                TxIndex:     math.MaxUint32 & 0x00ffffff,
14✔
2267
                TxPosition:  math.MaxUint16,
14✔
2268
        }
14✔
2269

14✔
2270
        // As we need to perform a range scan, we'll convert the starting and
14✔
2271
        // ending height to their corresponding values when encoded using short
14✔
2272
        // channel ID's.
14✔
2273
        var chanIDStart, chanIDEnd [8]byte
14✔
2274
        byteOrder.PutUint64(chanIDStart[:], startChanID.ToUint64())
14✔
2275
        byteOrder.PutUint64(chanIDEnd[:], endChanID.ToUint64())
14✔
2276

14✔
2277
        var channelsPerBlock map[uint32][]ChannelUpdateInfo
14✔
2278
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
28✔
2279
                edges := tx.ReadBucket(edgeBucket)
14✔
2280
                if edges == nil {
14✔
2281
                        return ErrGraphNoEdgesFound
×
2282
                }
×
2283
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
14✔
2284
                if edgeIndex == nil {
14✔
2285
                        return ErrGraphNoEdgesFound
×
2286
                }
×
2287

2288
                cursor := edgeIndex.ReadCursor()
14✔
2289

14✔
2290
                // We'll now iterate through the database, and find each
14✔
2291
                // channel ID that resides within the specified range.
14✔
2292
                //
14✔
2293
                //nolint:ll
14✔
2294
                for k, v := cursor.Seek(chanIDStart[:]); k != nil &&
14✔
2295
                        bytes.Compare(k, chanIDEnd[:]) <= 0; k, v = cursor.Next() {
61✔
2296
                        // Don't send alias SCIDs during gossip sync.
47✔
2297
                        edgeReader := bytes.NewReader(v)
47✔
2298
                        edgeInfo, err := deserializeChanEdgeInfo(edgeReader)
47✔
2299
                        if err != nil {
47✔
2300
                                return err
×
2301
                        }
×
2302

2303
                        if edgeInfo.AuthProof == nil {
50✔
2304
                                continue
3✔
2305
                        }
2306

2307
                        // This channel ID rests within the target range, so
2308
                        // we'll add it to our returned set.
2309
                        rawCid := byteOrder.Uint64(k)
47✔
2310
                        cid := lnwire.NewShortChanIDFromInt(rawCid)
47✔
2311

47✔
2312
                        chanInfo := NewChannelUpdateInfo(
47✔
2313
                                cid, time.Time{}, time.Time{},
47✔
2314
                        )
47✔
2315

47✔
2316
                        if !withTimestamps {
69✔
2317
                                channelsPerBlock[cid.BlockHeight] = append(
22✔
2318
                                        channelsPerBlock[cid.BlockHeight],
22✔
2319
                                        chanInfo,
22✔
2320
                                )
22✔
2321

22✔
2322
                                continue
22✔
2323
                        }
2324

2325
                        node1Key, node2Key := computeEdgePolicyKeys(&edgeInfo)
25✔
2326

25✔
2327
                        rawPolicy := edges.Get(node1Key)
25✔
2328
                        if len(rawPolicy) != 0 {
34✔
2329
                                r := bytes.NewReader(rawPolicy)
9✔
2330

9✔
2331
                                edge, err := deserializeChanEdgePolicyRaw(r)
9✔
2332
                                if err != nil && !errors.Is(
9✔
2333
                                        err, ErrEdgePolicyOptionalFieldNotFound,
9✔
2334
                                ) {
9✔
2335

×
2336
                                        return err
×
2337
                                }
×
2338

2339
                                chanInfo.Node1UpdateTimestamp = edge.LastUpdate
9✔
2340
                        }
2341

2342
                        rawPolicy = edges.Get(node2Key)
25✔
2343
                        if len(rawPolicy) != 0 {
39✔
2344
                                r := bytes.NewReader(rawPolicy)
14✔
2345

14✔
2346
                                edge, err := deserializeChanEdgePolicyRaw(r)
14✔
2347
                                if err != nil && !errors.Is(
14✔
2348
                                        err, ErrEdgePolicyOptionalFieldNotFound,
14✔
2349
                                ) {
14✔
2350

×
2351
                                        return err
×
2352
                                }
×
2353

2354
                                chanInfo.Node2UpdateTimestamp = edge.LastUpdate
14✔
2355
                        }
2356

2357
                        channelsPerBlock[cid.BlockHeight] = append(
25✔
2358
                                channelsPerBlock[cid.BlockHeight], chanInfo,
25✔
2359
                        )
25✔
2360
                }
2361

2362
                return nil
14✔
2363
        }, func() {
14✔
2364
                channelsPerBlock = make(map[uint32][]ChannelUpdateInfo)
14✔
2365
        })
14✔
2366

2367
        switch {
14✔
2368
        // If we don't know of any channels yet, then there's nothing to
2369
        // filter, so we'll return an empty slice.
2370
        case errors.Is(err, ErrGraphNoEdgesFound) || len(channelsPerBlock) == 0:
6✔
2371
                return nil, nil
6✔
2372

2373
        case err != nil:
×
2374
                return nil, err
×
2375
        }
2376

2377
        // Return the channel ranges in ascending block height order.
2378
        blocks := make([]uint32, 0, len(channelsPerBlock))
11✔
2379
        for block := range channelsPerBlock {
36✔
2380
                blocks = append(blocks, block)
25✔
2381
        }
25✔
2382
        sort.Slice(blocks, func(i, j int) bool {
31✔
2383
                return blocks[i] < blocks[j]
20✔
2384
        })
20✔
2385

2386
        channelRanges := make([]BlockChannelRange, 0, len(channelsPerBlock))
11✔
2387
        for _, block := range blocks {
36✔
2388
                channelRanges = append(channelRanges, BlockChannelRange{
25✔
2389
                        Height:   block,
25✔
2390
                        Channels: channelsPerBlock[block],
25✔
2391
                })
25✔
2392
        }
25✔
2393

2394
        return channelRanges, nil
11✔
2395
}
2396

2397
// FetchChanInfos returns the set of channel edges that correspond to the passed
2398
// channel ID's. If an edge is the query is unknown to the database, it will
2399
// skipped and the result will contain only those edges that exist at the time
2400
// of the query. This can be used to respond to peer queries that are seeking to
2401
// fill in gaps in their view of the channel graph.
2402
func (c *KVStore) FetchChanInfos(chanIDs []uint64) ([]ChannelEdge, error) {
7✔
2403
        return c.fetchChanInfos(nil, chanIDs)
7✔
2404
}
7✔
2405

2406
// fetchChanInfos returns the set of channel edges that correspond to the passed
2407
// channel ID's. If an edge is the query is unknown to the database, it will
2408
// skipped and the result will contain only those edges that exist at the time
2409
// of the query. This can be used to respond to peer queries that are seeking to
2410
// fill in gaps in their view of the channel graph.
2411
//
2412
// NOTE: An optional transaction may be provided. If none is provided, then a
2413
// new one will be created.
2414
func (c *KVStore) fetchChanInfos(tx kvdb.RTx, chanIDs []uint64) (
2415
        []ChannelEdge, error) {
7✔
2416
        // TODO(roasbeef): sort cids?
7✔
2417

7✔
2418
        var (
7✔
2419
                chanEdges []ChannelEdge
7✔
2420
                cidBytes  [8]byte
7✔
2421
        )
7✔
2422

7✔
2423
        fetchChanInfos := func(tx kvdb.RTx) error {
14✔
2424
                edges := tx.ReadBucket(edgeBucket)
7✔
2425
                if edges == nil {
7✔
2426
                        return ErrGraphNoEdgesFound
×
2427
                }
×
2428
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
7✔
2429
                if edgeIndex == nil {
7✔
2430
                        return ErrGraphNoEdgesFound
×
2431
                }
×
2432
                nodes := tx.ReadBucket(nodeBucket)
7✔
2433
                if nodes == nil {
7✔
2434
                        return ErrGraphNotFound
×
2435
                }
×
2436

2437
                for _, cid := range chanIDs {
21✔
2438
                        byteOrder.PutUint64(cidBytes[:], cid)
14✔
2439

14✔
2440
                        // First, we'll fetch the static edge information. If
14✔
2441
                        // the edge is unknown, we will skip the edge and
14✔
2442
                        // continue gathering all known edges.
14✔
2443
                        edgeInfo, err := fetchChanEdgeInfo(
14✔
2444
                                edgeIndex, cidBytes[:],
14✔
2445
                        )
14✔
2446
                        switch {
14✔
2447
                        case errors.Is(err, ErrEdgeNotFound):
3✔
2448
                                continue
3✔
2449
                        case err != nil:
×
2450
                                return err
×
2451
                        }
2452

2453
                        // With the static information obtained, we'll now
2454
                        // fetch the dynamic policy info.
2455
                        edge1, edge2, err := fetchChanEdgePolicies(
11✔
2456
                                edgeIndex, edges, cidBytes[:],
11✔
2457
                        )
11✔
2458
                        if err != nil {
11✔
2459
                                return err
×
2460
                        }
×
2461

2462
                        node1, err := fetchLightningNode(
11✔
2463
                                nodes, edgeInfo.NodeKey1Bytes[:],
11✔
2464
                        )
11✔
2465
                        if err != nil {
11✔
2466
                                return err
×
2467
                        }
×
2468

2469
                        node2, err := fetchLightningNode(
11✔
2470
                                nodes, edgeInfo.NodeKey2Bytes[:],
11✔
2471
                        )
11✔
2472
                        if err != nil {
11✔
2473
                                return err
×
2474
                        }
×
2475

2476
                        chanEdges = append(chanEdges, ChannelEdge{
11✔
2477
                                Info:    &edgeInfo,
11✔
2478
                                Policy1: edge1,
11✔
2479
                                Policy2: edge2,
11✔
2480
                                Node1:   &node1,
11✔
2481
                                Node2:   &node2,
11✔
2482
                        })
11✔
2483
                }
2484

2485
                return nil
7✔
2486
        }
2487

2488
        if tx == nil {
14✔
2489
                err := kvdb.View(c.db, fetchChanInfos, func() {
14✔
2490
                        chanEdges = nil
7✔
2491
                })
7✔
2492
                if err != nil {
7✔
2493
                        return nil, err
×
2494
                }
×
2495

2496
                return chanEdges, nil
7✔
2497
        }
2498

2499
        err := fetchChanInfos(tx)
×
2500
        if err != nil {
×
2501
                return nil, err
×
2502
        }
×
2503

2504
        return chanEdges, nil
×
2505
}
2506

2507
func delEdgeUpdateIndexEntry(edgesBucket kvdb.RwBucket, chanID uint64,
2508
        edge1, edge2 *models.ChannelEdgePolicy) error {
135✔
2509

135✔
2510
        // First, we'll fetch the edge update index bucket which currently
135✔
2511
        // stores an entry for the channel we're about to delete.
135✔
2512
        updateIndex := edgesBucket.NestedReadWriteBucket(edgeUpdateIndexBucket)
135✔
2513
        if updateIndex == nil {
135✔
2514
                // No edges in bucket, return early.
×
2515
                return nil
×
2516
        }
×
2517

2518
        // Now that we have the bucket, we'll attempt to construct a template
2519
        // for the index key: updateTime || chanid.
2520
        var indexKey [8 + 8]byte
135✔
2521
        byteOrder.PutUint64(indexKey[8:], chanID)
135✔
2522

135✔
2523
        // With the template constructed, we'll attempt to delete an entry that
135✔
2524
        // would have been created by both edges: we'll alternate the update
135✔
2525
        // times, as one may had overridden the other.
135✔
2526
        if edge1 != nil {
148✔
2527
                byteOrder.PutUint64(
13✔
2528
                        indexKey[:8], uint64(edge1.LastUpdate.Unix()),
13✔
2529
                )
13✔
2530
                if err := updateIndex.Delete(indexKey[:]); err != nil {
13✔
2531
                        return err
×
2532
                }
×
2533
        }
2534

2535
        // We'll also attempt to delete the entry that may have been created by
2536
        // the second edge.
2537
        if edge2 != nil {
150✔
2538
                byteOrder.PutUint64(
15✔
2539
                        indexKey[:8], uint64(edge2.LastUpdate.Unix()),
15✔
2540
                )
15✔
2541
                if err := updateIndex.Delete(indexKey[:]); err != nil {
15✔
2542
                        return err
×
2543
                }
×
2544
        }
2545

2546
        return nil
135✔
2547
}
2548

2549
// delChannelEdgeUnsafe deletes the edge with the given chanID from the graph
2550
// cache. It then goes on to delete any policy info and edge info for this
2551
// channel from the DB and finally, if isZombie is true, it will add an entry
2552
// for this channel in the zombie index.
2553
//
2554
// NOTE: this method MUST only be called if the cacheMu has already been
2555
// acquired.
2556
func (c *KVStore) delChannelEdgeUnsafe(edges, edgeIndex, chanIndex,
2557
        zombieIndex kvdb.RwBucket, chanID []byte, isZombie,
2558
        strictZombie bool) (*models.ChannelEdgeInfo, error) {
199✔
2559

199✔
2560
        edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
199✔
2561
        if err != nil {
263✔
2562
                return nil, err
64✔
2563
        }
64✔
2564

2565
        // We'll also remove the entry in the edge update index bucket before
2566
        // we delete the edges themselves so we can access their last update
2567
        // times.
2568
        cid := byteOrder.Uint64(chanID)
135✔
2569
        edge1, edge2, err := fetchChanEdgePolicies(edgeIndex, edges, chanID)
135✔
2570
        if err != nil {
135✔
2571
                return nil, err
×
2572
        }
×
2573
        err = delEdgeUpdateIndexEntry(edges, cid, edge1, edge2)
135✔
2574
        if err != nil {
135✔
2575
                return nil, err
×
2576
        }
×
2577

2578
        // The edge key is of the format pubKey || chanID. First we construct
2579
        // the latter half, populating the channel ID.
2580
        var edgeKey [33 + 8]byte
135✔
2581
        copy(edgeKey[33:], chanID)
135✔
2582

135✔
2583
        // With the latter half constructed, copy over the first public key to
135✔
2584
        // delete the edge in this direction, then the second to delete the
135✔
2585
        // edge in the opposite direction.
135✔
2586
        copy(edgeKey[:33], edgeInfo.NodeKey1Bytes[:])
135✔
2587
        if edges.Get(edgeKey[:]) != nil {
270✔
2588
                if err := edges.Delete(edgeKey[:]); err != nil {
135✔
2589
                        return nil, err
×
2590
                }
×
2591
        }
2592
        copy(edgeKey[:33], edgeInfo.NodeKey2Bytes[:])
135✔
2593
        if edges.Get(edgeKey[:]) != nil {
270✔
2594
                if err := edges.Delete(edgeKey[:]); err != nil {
135✔
2595
                        return nil, err
×
2596
                }
×
2597
        }
2598

2599
        // As part of deleting the edge we also remove all disabled entries
2600
        // from the edgePolicyDisabledIndex bucket. We do that for both
2601
        // directions.
2602
        err = updateEdgePolicyDisabledIndex(edges, cid, false, false)
135✔
2603
        if err != nil {
135✔
2604
                return nil, err
×
2605
        }
×
2606
        err = updateEdgePolicyDisabledIndex(edges, cid, true, false)
135✔
2607
        if err != nil {
135✔
2608
                return nil, err
×
2609
        }
×
2610

2611
        // With the edge data deleted, we can purge the information from the two
2612
        // edge indexes.
2613
        if err := edgeIndex.Delete(chanID); err != nil {
135✔
2614
                return nil, err
×
2615
        }
×
2616
        var b bytes.Buffer
135✔
2617
        if err := WriteOutpoint(&b, &edgeInfo.ChannelPoint); err != nil {
135✔
2618
                return nil, err
×
2619
        }
×
2620
        if err := chanIndex.Delete(b.Bytes()); err != nil {
135✔
2621
                return nil, err
×
2622
        }
×
2623

2624
        // Finally, we'll mark the edge as a zombie within our index if it's
2625
        // being removed due to the channel becoming a zombie. We do this to
2626
        // ensure we don't store unnecessary data for spent channels.
2627
        if !isZombie {
247✔
2628
                return &edgeInfo, nil
112✔
2629
        }
112✔
2630

2631
        nodeKey1, nodeKey2 := edgeInfo.NodeKey1Bytes, edgeInfo.NodeKey2Bytes
26✔
2632
        if strictZombie {
29✔
2633
                nodeKey1, nodeKey2 = makeZombiePubkeys(&edgeInfo, edge1, edge2)
3✔
2634
        }
3✔
2635

2636
        return &edgeInfo, markEdgeZombie(
26✔
2637
                zombieIndex, byteOrder.Uint64(chanID), nodeKey1, nodeKey2,
26✔
2638
        )
26✔
2639
}
2640

2641
// makeZombiePubkeys derives the node pubkeys to store in the zombie index for a
2642
// particular pair of channel policies. The return values are one of:
2643
//  1. (pubkey1, pubkey2)
2644
//  2. (pubkey1, blank)
2645
//  3. (blank, pubkey2)
2646
//
2647
// A blank pubkey means that corresponding node will be unable to resurrect a
2648
// channel on its own. For example, node1 may continue to publish recent
2649
// updates, but node2 has fallen way behind. After marking an edge as a zombie,
2650
// we don't want another fresh update from node1 to resurrect, as the edge can
2651
// only become live once node2 finally sends something recent.
2652
//
2653
// In the case where we have neither update, we allow either party to resurrect
2654
// the channel. If the channel were to be marked zombie again, it would be
2655
// marked with the correct lagging channel since we received an update from only
2656
// one side.
2657
func makeZombiePubkeys(info *models.ChannelEdgeInfo,
2658
        e1, e2 *models.ChannelEdgePolicy) ([33]byte, [33]byte) {
3✔
2659

3✔
2660
        switch {
3✔
2661
        // If we don't have either edge policy, we'll return both pubkeys so
2662
        // that the channel can be resurrected by either party.
2663
        case e1 == nil && e2 == nil:
×
2664
                return info.NodeKey1Bytes, info.NodeKey2Bytes
×
2665

2666
        // If we're missing edge1, or if both edges are present but edge1 is
2667
        // older, we'll return edge1's pubkey and a blank pubkey for edge2. This
2668
        // means that only an update from edge1 will be able to resurrect the
2669
        // channel.
2670
        case e1 == nil || (e2 != nil && e1.LastUpdate.Before(e2.LastUpdate)):
1✔
2671
                return info.NodeKey1Bytes, [33]byte{}
1✔
2672

2673
        // Otherwise, we're missing edge2 or edge2 is the older side, so we
2674
        // return a blank pubkey for edge1. In this case, only an update from
2675
        // edge2 can resurect the channel.
2676
        default:
2✔
2677
                return [33]byte{}, info.NodeKey2Bytes
2✔
2678
        }
2679
}
2680

2681
// UpdateEdgePolicy updates the edge routing policy for a single directed edge
2682
// within the database for the referenced channel. The `flags` attribute within
2683
// the ChannelEdgePolicy determines which of the directed edges are being
2684
// updated. If the flag is 1, then the first node's information is being
2685
// updated, otherwise it's the second node's information. The node ordering is
2686
// determined by the lexicographical ordering of the identity public keys of the
2687
// nodes on either side of the channel.
2688
func (c *KVStore) UpdateEdgePolicy(edge *models.ChannelEdgePolicy,
2689
        opts ...batch.SchedulerOption) (route.Vertex, route.Vertex, error) {
2,668✔
2690

2,668✔
2691
        var (
2,668✔
2692
                isUpdate1    bool
2,668✔
2693
                edgeNotFound bool
2,668✔
2694
                from, to     route.Vertex
2,668✔
2695
        )
2,668✔
2696

2,668✔
2697
        r := &batch.Request{
2,668✔
2698
                Opts: batch.NewSchedulerOptions(opts...),
2,668✔
2699
                Reset: func() {
5,336✔
2700
                        isUpdate1 = false
2,668✔
2701
                        edgeNotFound = false
2,668✔
2702
                },
2,668✔
2703
                Update: func(tx kvdb.RwTx) error {
2,668✔
2704
                        var err error
2,668✔
2705
                        from, to, isUpdate1, err = updateEdgePolicy(tx, edge)
2,668✔
2706
                        if err != nil {
2,671✔
2707
                                log.Errorf("UpdateEdgePolicy faild: %v", err)
3✔
2708
                        }
3✔
2709

2710
                        // Silence ErrEdgeNotFound so that the batch can
2711
                        // succeed, but propagate the error via local state.
2712
                        if errors.Is(err, ErrEdgeNotFound) {
2,671✔
2713
                                edgeNotFound = true
3✔
2714
                                return nil
3✔
2715
                        }
3✔
2716

2717
                        return err
2,665✔
2718
                },
2719
                OnCommit: func(err error) error {
2,668✔
2720
                        switch {
2,668✔
2721
                        case err != nil:
×
2722
                                return err
×
2723
                        case edgeNotFound:
3✔
2724
                                return ErrEdgeNotFound
3✔
2725
                        default:
2,665✔
2726
                                c.updateEdgeCache(edge, isUpdate1)
2,665✔
2727
                                return nil
2,665✔
2728
                        }
2729
                },
2730
        }
2731

2732
        err := c.chanScheduler.Execute(r)
2,668✔
2733

2,668✔
2734
        return from, to, err
2,668✔
2735
}
2736

2737
func (c *KVStore) updateEdgeCache(e *models.ChannelEdgePolicy,
2738
        isUpdate1 bool) {
2,665✔
2739

2,665✔
2740
        // If an entry for this channel is found in reject cache, we'll modify
2,665✔
2741
        // the entry with the updated timestamp for the direction that was just
2,665✔
2742
        // written. If the edge doesn't exist, we'll load the cache entry lazily
2,665✔
2743
        // during the next query for this edge.
2,665✔
2744
        if entry, ok := c.rejectCache.get(e.ChannelID); ok {
2,673✔
2745
                if isUpdate1 {
14✔
2746
                        entry.upd1Time = e.LastUpdate.Unix()
6✔
2747
                } else {
11✔
2748
                        entry.upd2Time = e.LastUpdate.Unix()
5✔
2749
                }
5✔
2750
                c.rejectCache.insert(e.ChannelID, entry)
8✔
2751
        }
2752

2753
        // If an entry for this channel is found in channel cache, we'll modify
2754
        // the entry with the updated policy for the direction that was just
2755
        // written. If the edge doesn't exist, we'll defer loading the info and
2756
        // policies and lazily read from disk during the next query.
2757
        if channel, ok := c.chanCache.get(e.ChannelID); ok {
2,668✔
2758
                if isUpdate1 {
6✔
2759
                        channel.Policy1 = e
3✔
2760
                } else {
6✔
2761
                        channel.Policy2 = e
3✔
2762
                }
3✔
2763
                c.chanCache.insert(e.ChannelID, channel)
3✔
2764
        }
2765
}
2766

2767
// updateEdgePolicy attempts to update an edge's policy within the relevant
2768
// buckets using an existing database transaction. The returned boolean will be
2769
// true if the updated policy belongs to node1, and false if the policy belonged
2770
// to node2.
2771
func updateEdgePolicy(tx kvdb.RwTx, edge *models.ChannelEdgePolicy) (
2772
        route.Vertex, route.Vertex, bool, error) {
2,668✔
2773

2,668✔
2774
        var noVertex route.Vertex
2,668✔
2775

2,668✔
2776
        edges := tx.ReadWriteBucket(edgeBucket)
2,668✔
2777
        if edges == nil {
2,668✔
2778
                return noVertex, noVertex, false, ErrEdgeNotFound
×
2779
        }
×
2780
        edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
2,668✔
2781
        if edgeIndex == nil {
2,668✔
2782
                return noVertex, noVertex, false, ErrEdgeNotFound
×
2783
        }
×
2784

2785
        // Create the channelID key be converting the channel ID
2786
        // integer into a byte slice.
2787
        var chanID [8]byte
2,668✔
2788
        byteOrder.PutUint64(chanID[:], edge.ChannelID)
2,668✔
2789

2,668✔
2790
        // With the channel ID, we then fetch the value storing the two
2,668✔
2791
        // nodes which connect this channel edge.
2,668✔
2792
        nodeInfo := edgeIndex.Get(chanID[:])
2,668✔
2793
        if nodeInfo == nil {
2,671✔
2794
                return noVertex, noVertex, false, ErrEdgeNotFound
3✔
2795
        }
3✔
2796

2797
        // Depending on the flags value passed above, either the first
2798
        // or second edge policy is being updated.
2799
        var fromNode, toNode []byte
2,665✔
2800
        var isUpdate1 bool
2,665✔
2801
        if edge.ChannelFlags&lnwire.ChanUpdateDirection == 0 {
4,001✔
2802
                fromNode = nodeInfo[:33]
1,336✔
2803
                toNode = nodeInfo[33:66]
1,336✔
2804
                isUpdate1 = true
1,336✔
2805
        } else {
2,668✔
2806
                fromNode = nodeInfo[33:66]
1,332✔
2807
                toNode = nodeInfo[:33]
1,332✔
2808
                isUpdate1 = false
1,332✔
2809
        }
1,332✔
2810

2811
        // Finally, with the direction of the edge being updated
2812
        // identified, we update the on-disk edge representation.
2813
        err := putChanEdgePolicy(edges, edge, fromNode, toNode)
2,665✔
2814
        if err != nil {
2,665✔
2815
                return noVertex, noVertex, false, err
×
2816
        }
×
2817

2818
        var (
2,665✔
2819
                fromNodePubKey route.Vertex
2,665✔
2820
                toNodePubKey   route.Vertex
2,665✔
2821
        )
2,665✔
2822
        copy(fromNodePubKey[:], fromNode)
2,665✔
2823
        copy(toNodePubKey[:], toNode)
2,665✔
2824

2,665✔
2825
        return fromNodePubKey, toNodePubKey, isUpdate1, nil
2,665✔
2826
}
2827

2828
// isPublic determines whether the node is seen as public within the graph from
2829
// the source node's point of view. An existing database transaction can also be
2830
// specified.
2831
func (c *KVStore) isPublic(tx kvdb.RTx, nodePub route.Vertex,
2832
        sourcePubKey []byte) (bool, error) {
16✔
2833

16✔
2834
        // In order to determine whether this node is publicly advertised within
16✔
2835
        // the graph, we'll need to look at all of its edges and check whether
16✔
2836
        // they extend to any other node than the source node. errDone will be
16✔
2837
        // used to terminate the check early.
16✔
2838
        nodeIsPublic := false
16✔
2839
        errDone := errors.New("done")
16✔
2840
        err := c.forEachNodeChannelTx(tx, nodePub, func(tx kvdb.RTx,
16✔
2841
                info *models.ChannelEdgeInfo, _ *models.ChannelEdgePolicy,
16✔
2842
                _ *models.ChannelEdgePolicy) error {
29✔
2843

13✔
2844
                // If this edge doesn't extend to the source node, we'll
13✔
2845
                // terminate our search as we can now conclude that the node is
13✔
2846
                // publicly advertised within the graph due to the local node
13✔
2847
                // knowing of the current edge.
13✔
2848
                if !bytes.Equal(info.NodeKey1Bytes[:], sourcePubKey) &&
13✔
2849
                        !bytes.Equal(info.NodeKey2Bytes[:], sourcePubKey) {
19✔
2850

6✔
2851
                        nodeIsPublic = true
6✔
2852
                        return errDone
6✔
2853
                }
6✔
2854

2855
                // Since the edge _does_ extend to the source node, we'll also
2856
                // need to ensure that this is a public edge.
2857
                if info.AuthProof != nil {
19✔
2858
                        nodeIsPublic = true
9✔
2859
                        return errDone
9✔
2860
                }
9✔
2861

2862
                // Otherwise, we'll continue our search.
2863
                return nil
4✔
2864
        })
2865
        if err != nil && !errors.Is(err, errDone) {
16✔
2866
                return false, err
×
2867
        }
×
2868

2869
        return nodeIsPublic, nil
16✔
2870
}
2871

2872
// FetchLightningNodeTx attempts to look up a target node by its identity
2873
// public key. If the node isn't found in the database, then
2874
// ErrGraphNodeNotFound is returned. An optional transaction may be provided.
2875
// If none is provided, then a new one will be created.
2876
func (c *KVStore) FetchLightningNodeTx(tx kvdb.RTx, nodePub route.Vertex) (
2877
        *models.LightningNode, error) {
3,633✔
2878

3,633✔
2879
        return c.fetchLightningNode(tx, nodePub)
3,633✔
2880
}
3,633✔
2881

2882
// FetchLightningNode attempts to look up a target node by its identity public
2883
// key. If the node isn't found in the database, then ErrGraphNodeNotFound is
2884
// returned.
2885
func (c *KVStore) FetchLightningNode(nodePub route.Vertex) (
2886
        *models.LightningNode, error) {
155✔
2887

155✔
2888
        return c.fetchLightningNode(nil, nodePub)
155✔
2889
}
155✔
2890

2891
// fetchLightningNode attempts to look up a target node by its identity public
2892
// key. If the node isn't found in the database, then ErrGraphNodeNotFound is
2893
// returned. An optional transaction may be provided. If none is provided, then
2894
// a new one will be created.
2895
func (c *KVStore) fetchLightningNode(tx kvdb.RTx,
2896
        nodePub route.Vertex) (*models.LightningNode, error) {
3,785✔
2897

3,785✔
2898
        var node *models.LightningNode
3,785✔
2899
        fetch := func(tx kvdb.RTx) error {
7,570✔
2900
                // First grab the nodes bucket which stores the mapping from
3,785✔
2901
                // pubKey to node information.
3,785✔
2902
                nodes := tx.ReadBucket(nodeBucket)
3,785✔
2903
                if nodes == nil {
3,785✔
2904
                        return ErrGraphNotFound
×
2905
                }
×
2906

2907
                // If a key for this serialized public key isn't found, then
2908
                // the target node doesn't exist within the database.
2909
                nodeBytes := nodes.Get(nodePub[:])
3,785✔
2910
                if nodeBytes == nil {
3,802✔
2911
                        return ErrGraphNodeNotFound
17✔
2912
                }
17✔
2913

2914
                // If the node is found, then we can de deserialize the node
2915
                // information to return to the user.
2916
                nodeReader := bytes.NewReader(nodeBytes)
3,771✔
2917
                n, err := deserializeLightningNode(nodeReader)
3,771✔
2918
                if err != nil {
3,771✔
2919
                        return err
×
2920
                }
×
2921

2922
                node = &n
3,771✔
2923

3,771✔
2924
                return nil
3,771✔
2925
        }
2926

2927
        if tx == nil {
3,943✔
2928
                err := kvdb.View(
158✔
2929
                        c.db, fetch, func() {
316✔
2930
                                node = nil
158✔
2931
                        },
158✔
2932
                )
2933
                if err != nil {
164✔
2934
                        return nil, err
6✔
2935
                }
6✔
2936

2937
                return node, nil
155✔
2938
        }
2939

2940
        err := fetch(tx)
3,627✔
2941
        if err != nil {
3,638✔
2942
                return nil, err
11✔
2943
        }
11✔
2944

2945
        return node, nil
3,616✔
2946
}
2947

2948
// HasLightningNode determines if the graph has a vertex identified by the
2949
// target node identity public key. If the node exists in the database, a
2950
// timestamp of when the data for the node was lasted updated is returned along
2951
// with a true boolean. Otherwise, an empty time.Time is returned with a false
2952
// boolean.
2953
func (c *KVStore) HasLightningNode(nodePub [33]byte) (time.Time, bool,
2954
        error) {
20✔
2955

20✔
2956
        var (
20✔
2957
                updateTime time.Time
20✔
2958
                exists     bool
20✔
2959
        )
20✔
2960

20✔
2961
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
40✔
2962
                // First grab the nodes bucket which stores the mapping from
20✔
2963
                // pubKey to node information.
20✔
2964
                nodes := tx.ReadBucket(nodeBucket)
20✔
2965
                if nodes == nil {
20✔
2966
                        return ErrGraphNotFound
×
2967
                }
×
2968

2969
                // If a key for this serialized public key isn't found, we can
2970
                // exit early.
2971
                nodeBytes := nodes.Get(nodePub[:])
20✔
2972
                if nodeBytes == nil {
26✔
2973
                        exists = false
6✔
2974
                        return nil
6✔
2975
                }
6✔
2976

2977
                // Otherwise we continue on to obtain the time stamp
2978
                // representing the last time the data for this node was
2979
                // updated.
2980
                nodeReader := bytes.NewReader(nodeBytes)
17✔
2981
                node, err := deserializeLightningNode(nodeReader)
17✔
2982
                if err != nil {
17✔
2983
                        return err
×
2984
                }
×
2985

2986
                exists = true
17✔
2987
                updateTime = node.LastUpdate
17✔
2988

17✔
2989
                return nil
17✔
2990
        }, func() {
20✔
2991
                updateTime = time.Time{}
20✔
2992
                exists = false
20✔
2993
        })
20✔
2994
        if err != nil {
20✔
2995
                return time.Time{}, exists, err
×
2996
        }
×
2997

2998
        return updateTime, exists, nil
20✔
2999
}
3000

3001
// nodeTraversal is used to traverse all channels of a node given by its
3002
// public key and passes channel information into the specified callback.
3003
func nodeTraversal(tx kvdb.RTx, nodePub []byte, db kvdb.Backend,
3004
        cb func(kvdb.RTx, *models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3005
                *models.ChannelEdgePolicy) error) error {
1,250✔
3006

1,250✔
3007
        traversal := func(tx kvdb.RTx) error {
2,500✔
3008
                edges := tx.ReadBucket(edgeBucket)
1,250✔
3009
                if edges == nil {
1,250✔
3010
                        return ErrGraphNotFound
×
3011
                }
×
3012
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
1,250✔
3013
                if edgeIndex == nil {
1,250✔
3014
                        return ErrGraphNoEdgesFound
×
3015
                }
×
3016

3017
                // In order to reach all the edges for this node, we take
3018
                // advantage of the construction of the key-space within the
3019
                // edge bucket. The keys are stored in the form: pubKey ||
3020
                // chanID. Therefore, starting from a chanID of zero, we can
3021
                // scan forward in the bucket, grabbing all the edges for the
3022
                // node. Once the prefix no longer matches, then we know we're
3023
                // done.
3024
                var nodeStart [33 + 8]byte
1,250✔
3025
                copy(nodeStart[:], nodePub)
1,250✔
3026
                copy(nodeStart[33:], chanStart[:])
1,250✔
3027

1,250✔
3028
                // Starting from the key pubKey || 0, we seek forward in the
1,250✔
3029
                // bucket until the retrieved key no longer has the public key
1,250✔
3030
                // as its prefix. This indicates that we've stepped over into
1,250✔
3031
                // another node's edges, so we can terminate our scan.
1,250✔
3032
                edgeCursor := edges.ReadCursor()
1,250✔
3033
                for nodeEdge, _ := edgeCursor.Seek(nodeStart[:]); bytes.HasPrefix(nodeEdge, nodePub); nodeEdge, _ = edgeCursor.Next() { //nolint:ll
4,905✔
3034
                        // If the prefix still matches, the channel id is
3,655✔
3035
                        // returned in nodeEdge. Channel id is used to lookup
3,655✔
3036
                        // the node at the other end of the channel and both
3,655✔
3037
                        // edge policies.
3,655✔
3038
                        chanID := nodeEdge[33:]
3,655✔
3039
                        edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
3,655✔
3040
                        if err != nil {
3,655✔
3041
                                return err
×
3042
                        }
×
3043

3044
                        outgoingPolicy, err := fetchChanEdgePolicy(
3,655✔
3045
                                edges, chanID, nodePub,
3,655✔
3046
                        )
3,655✔
3047
                        if err != nil {
3,655✔
3048
                                return err
×
3049
                        }
×
3050

3051
                        otherNode, err := edgeInfo.OtherNodeKeyBytes(nodePub)
3,655✔
3052
                        if err != nil {
3,655✔
3053
                                return err
×
3054
                        }
×
3055

3056
                        incomingPolicy, err := fetchChanEdgePolicy(
3,655✔
3057
                                edges, chanID, otherNode[:],
3,655✔
3058
                        )
3,655✔
3059
                        if err != nil {
3,655✔
3060
                                return err
×
3061
                        }
×
3062

3063
                        // Finally, we execute the callback.
3064
                        err = cb(tx, &edgeInfo, outgoingPolicy, incomingPolicy)
3,655✔
3065
                        if err != nil {
3,667✔
3066
                                return err
12✔
3067
                        }
12✔
3068
                }
3069

3070
                return nil
1,241✔
3071
        }
3072

3073
        // If no transaction was provided, then we'll create a new transaction
3074
        // to execute the transaction within.
3075
        if tx == nil {
1,262✔
3076
                return kvdb.View(db, traversal, func() {})
24✔
3077
        }
3078

3079
        // Otherwise, we re-use the existing transaction to execute the graph
3080
        // traversal.
3081
        return traversal(tx)
1,241✔
3082
}
3083

3084
// ForEachNodeChannel iterates through all channels of the given node,
3085
// executing the passed callback with an edge info structure and the policies
3086
// of each end of the channel. The first edge policy is the outgoing edge *to*
3087
// the connecting node, while the second is the incoming edge *from* the
3088
// connecting node. If the callback returns an error, then the iteration is
3089
// halted with the error propagated back up to the caller.
3090
//
3091
// Unknown policies are passed into the callback as nil values.
3092
func (c *KVStore) ForEachNodeChannel(nodePub route.Vertex,
3093
        cb func(*models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3094
                *models.ChannelEdgePolicy) error) error {
9✔
3095

9✔
3096
        return nodeTraversal(nil, nodePub[:], c.db, func(_ kvdb.RTx,
9✔
3097
                info *models.ChannelEdgeInfo, policy,
9✔
3098
                policy2 *models.ChannelEdgePolicy) error {
22✔
3099

13✔
3100
                return cb(info, policy, policy2)
13✔
3101
        })
13✔
3102
}
3103

3104
// ForEachSourceNodeChannel iterates through all channels of the source node,
3105
// executing the passed callback on each. The callback is provided with the
3106
// channel's outpoint, whether we have a policy for the channel and the channel
3107
// peer's node information.
3108
func (c *KVStore) ForEachSourceNodeChannel(cb func(chanPoint wire.OutPoint,
3109
        havePolicy bool, otherNode *models.LightningNode) error) error {
4✔
3110

4✔
3111
        return kvdb.View(c.db, func(tx kvdb.RTx) error {
8✔
3112
                nodes := tx.ReadBucket(nodeBucket)
4✔
3113
                if nodes == nil {
4✔
NEW
3114
                        return ErrGraphNotFound
×
NEW
3115
                }
×
3116

3117
                node, err := c.sourceNode(nodes)
4✔
3118
                if err != nil {
4✔
NEW
3119
                        return err
×
NEW
3120
                }
×
3121

3122
                return nodeTraversal(
4✔
3123
                        tx, node.PubKeyBytes[:], c.db, func(tx kvdb.RTx,
4✔
3124
                                info *models.ChannelEdgeInfo,
4✔
3125
                                policy, _ *models.ChannelEdgePolicy) error {
9✔
3126

5✔
3127
                                peer, err := c.fetchOtherNode(
5✔
3128
                                        tx, info, node.PubKeyBytes[:],
5✔
3129
                                )
5✔
3130
                                if err != nil {
5✔
NEW
3131
                                        return err
×
NEW
3132
                                }
×
3133

3134
                                return cb(
5✔
3135
                                        info.ChannelPoint, policy != nil, peer,
5✔
3136
                                )
5✔
3137
                        },
3138
                )
3139
        }, func() {})
4✔
3140
}
3141

3142
// forEachNodeChannelTx iterates through all channels of the given node,
3143
// executing the passed callback with an edge info structure and the policies
3144
// of each end of the channel. The first edge policy is the outgoing edge *to*
3145
// the connecting node, while the second is the incoming edge *from* the
3146
// connecting node. If the callback returns an error, then the iteration is
3147
// halted with the error propagated back up to the caller.
3148
//
3149
// Unknown policies are passed into the callback as nil values.
3150
//
3151
// If the caller wishes to re-use an existing boltdb transaction, then it
3152
// should be passed as the first argument.  Otherwise, the first argument should
3153
// be nil and a fresh transaction will be created to execute the graph
3154
// traversal.
3155
func (c *KVStore) forEachNodeChannelTx(tx kvdb.RTx,
3156
        nodePub route.Vertex, cb func(kvdb.RTx, *models.ChannelEdgeInfo,
3157
                *models.ChannelEdgePolicy,
3158
                *models.ChannelEdgePolicy) error) error {
1,001✔
3159

1,001✔
3160
        return nodeTraversal(tx, nodePub[:], c.db, cb)
1,001✔
3161
}
1,001✔
3162

3163
// fetchOtherNode attempts to fetch the full LightningNode that's opposite of
3164
// the target node in the channel. This is useful when one knows the pubkey of
3165
// one of the nodes, and wishes to obtain the full LightningNode for the other
3166
// end of the channel.
3167
func (c *KVStore) fetchOtherNode(tx kvdb.RTx,
3168
        channel *models.ChannelEdgeInfo, thisNodeKey []byte) (
3169
        *models.LightningNode, error) {
5✔
3170

5✔
3171
        // Ensure that the node passed in is actually a member of the channel.
5✔
3172
        var targetNodeBytes [33]byte
5✔
3173
        switch {
5✔
3174
        case bytes.Equal(channel.NodeKey1Bytes[:], thisNodeKey):
5✔
3175
                targetNodeBytes = channel.NodeKey2Bytes
5✔
3176
        case bytes.Equal(channel.NodeKey2Bytes[:], thisNodeKey):
3✔
3177
                targetNodeBytes = channel.NodeKey1Bytes
3✔
3178
        default:
×
3179
                return nil, fmt.Errorf("node not participating in this channel")
×
3180
        }
3181

3182
        var targetNode *models.LightningNode
5✔
3183
        fetchNodeFunc := func(tx kvdb.RTx) error {
10✔
3184
                // First grab the nodes bucket which stores the mapping from
5✔
3185
                // pubKey to node information.
5✔
3186
                nodes := tx.ReadBucket(nodeBucket)
5✔
3187
                if nodes == nil {
5✔
3188
                        return ErrGraphNotFound
×
3189
                }
×
3190

3191
                node, err := fetchLightningNode(nodes, targetNodeBytes[:])
5✔
3192
                if err != nil {
5✔
3193
                        return err
×
3194
                }
×
3195

3196
                targetNode = &node
5✔
3197

5✔
3198
                return nil
5✔
3199
        }
3200

3201
        // If the transaction is nil, then we'll need to create a new one,
3202
        // otherwise we can use the existing db transaction.
3203
        var err error
5✔
3204
        if tx == nil {
5✔
3205
                err = kvdb.View(c.db, fetchNodeFunc, func() {
×
3206
                        targetNode = nil
×
3207
                })
×
3208
        } else {
5✔
3209
                err = fetchNodeFunc(tx)
5✔
3210
        }
5✔
3211

3212
        return targetNode, err
5✔
3213
}
3214

3215
// computeEdgePolicyKeys is a helper function that can be used to compute the
3216
// keys used to index the channel edge policy info for the two nodes of the
3217
// edge. The keys for node 1 and node 2 are returned respectively.
3218
func computeEdgePolicyKeys(info *models.ChannelEdgeInfo) ([]byte, []byte) {
25✔
3219
        var (
25✔
3220
                node1Key [33 + 8]byte
25✔
3221
                node2Key [33 + 8]byte
25✔
3222
        )
25✔
3223

25✔
3224
        copy(node1Key[:], info.NodeKey1Bytes[:])
25✔
3225
        copy(node2Key[:], info.NodeKey2Bytes[:])
25✔
3226

25✔
3227
        byteOrder.PutUint64(node1Key[33:], info.ChannelID)
25✔
3228
        byteOrder.PutUint64(node2Key[33:], info.ChannelID)
25✔
3229

25✔
3230
        return node1Key[:], node2Key[:]
25✔
3231
}
25✔
3232

3233
// FetchChannelEdgesByOutpoint attempts to lookup the two directed edges for
3234
// the channel identified by the funding outpoint. If the channel can't be
3235
// found, then ErrEdgeNotFound is returned. A struct which houses the general
3236
// information for the channel itself is returned as well as two structs that
3237
// contain the routing policies for the channel in either direction.
3238
func (c *KVStore) FetchChannelEdgesByOutpoint(op *wire.OutPoint) (
3239
        *models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3240
        *models.ChannelEdgePolicy, error) {
14✔
3241

14✔
3242
        var (
14✔
3243
                edgeInfo *models.ChannelEdgeInfo
14✔
3244
                policy1  *models.ChannelEdgePolicy
14✔
3245
                policy2  *models.ChannelEdgePolicy
14✔
3246
        )
14✔
3247

14✔
3248
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
28✔
3249
                // First, grab the node bucket. This will be used to populate
14✔
3250
                // the Node pointers in each edge read from disk.
14✔
3251
                nodes := tx.ReadBucket(nodeBucket)
14✔
3252
                if nodes == nil {
14✔
3253
                        return ErrGraphNotFound
×
3254
                }
×
3255

3256
                // Next, grab the edge bucket which stores the edges, and also
3257
                // the index itself so we can group the directed edges together
3258
                // logically.
3259
                edges := tx.ReadBucket(edgeBucket)
14✔
3260
                if edges == nil {
14✔
3261
                        return ErrGraphNoEdgesFound
×
3262
                }
×
3263
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
14✔
3264
                if edgeIndex == nil {
14✔
3265
                        return ErrGraphNoEdgesFound
×
3266
                }
×
3267

3268
                // If the channel's outpoint doesn't exist within the outpoint
3269
                // index, then the edge does not exist.
3270
                chanIndex := edges.NestedReadBucket(channelPointBucket)
14✔
3271
                if chanIndex == nil {
14✔
3272
                        return ErrGraphNoEdgesFound
×
3273
                }
×
3274
                var b bytes.Buffer
14✔
3275
                if err := WriteOutpoint(&b, op); err != nil {
14✔
3276
                        return err
×
3277
                }
×
3278
                chanID := chanIndex.Get(b.Bytes())
14✔
3279
                if chanID == nil {
27✔
3280
                        return fmt.Errorf("%w: op=%v", ErrEdgeNotFound, op)
13✔
3281
                }
13✔
3282

3283
                // If the channel is found to exists, then we'll first retrieve
3284
                // the general information for the channel.
3285
                edge, err := fetchChanEdgeInfo(edgeIndex, chanID)
4✔
3286
                if err != nil {
4✔
3287
                        return fmt.Errorf("%w: chanID=%x", err, chanID)
×
3288
                }
×
3289
                edgeInfo = &edge
4✔
3290

4✔
3291
                // Once we have the information about the channels' parameters,
4✔
3292
                // we'll fetch the routing policies for each for the directed
4✔
3293
                // edges.
4✔
3294
                e1, e2, err := fetchChanEdgePolicies(edgeIndex, edges, chanID)
4✔
3295
                if err != nil {
4✔
3296
                        return fmt.Errorf("failed to find policy: %w", err)
×
3297
                }
×
3298

3299
                policy1 = e1
4✔
3300
                policy2 = e2
4✔
3301

4✔
3302
                return nil
4✔
3303
        }, func() {
14✔
3304
                edgeInfo = nil
14✔
3305
                policy1 = nil
14✔
3306
                policy2 = nil
14✔
3307
        })
14✔
3308
        if err != nil {
27✔
3309
                return nil, nil, nil, err
13✔
3310
        }
13✔
3311

3312
        return edgeInfo, policy1, policy2, nil
4✔
3313
}
3314

3315
// FetchChannelEdgesByID attempts to lookup the two directed edges for the
3316
// channel identified by the channel ID. If the channel can't be found, then
3317
// ErrEdgeNotFound is returned. A struct which houses the general information
3318
// for the channel itself is returned as well as two structs that contain the
3319
// routing policies for the channel in either direction.
3320
//
3321
// ErrZombieEdge an be returned if the edge is currently marked as a zombie
3322
// within the database. In this case, the ChannelEdgePolicy's will be nil, and
3323
// the ChannelEdgeInfo will only include the public keys of each node.
3324
func (c *KVStore) FetchChannelEdgesByID(chanID uint64) (
3325
        *models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3326
        *models.ChannelEdgePolicy, error) {
2,684✔
3327

2,684✔
3328
        var (
2,684✔
3329
                edgeInfo  *models.ChannelEdgeInfo
2,684✔
3330
                policy1   *models.ChannelEdgePolicy
2,684✔
3331
                policy2   *models.ChannelEdgePolicy
2,684✔
3332
                channelID [8]byte
2,684✔
3333
        )
2,684✔
3334

2,684✔
3335
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
5,363✔
3336
                // First, grab the node bucket. This will be used to populate
2,679✔
3337
                // the Node pointers in each edge read from disk.
2,679✔
3338
                nodes := tx.ReadBucket(nodeBucket)
2,679✔
3339
                if nodes == nil {
2,679✔
3340
                        return ErrGraphNotFound
×
3341
                }
×
3342

3343
                // Next, grab the edge bucket which stores the edges, and also
3344
                // the index itself so we can group the directed edges together
3345
                // logically.
3346
                edges := tx.ReadBucket(edgeBucket)
2,679✔
3347
                if edges == nil {
2,679✔
3348
                        return ErrGraphNoEdgesFound
×
3349
                }
×
3350
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
2,679✔
3351
                if edgeIndex == nil {
2,679✔
3352
                        return ErrGraphNoEdgesFound
×
3353
                }
×
3354

3355
                byteOrder.PutUint64(channelID[:], chanID)
2,679✔
3356

2,679✔
3357
                // Now, attempt to fetch edge.
2,679✔
3358
                edge, err := fetchChanEdgeInfo(edgeIndex, channelID[:])
2,679✔
3359

2,679✔
3360
                // If it doesn't exist, we'll quickly check our zombie index to
2,679✔
3361
                // see if we've previously marked it as so.
2,679✔
3362
                if errors.Is(err, ErrEdgeNotFound) {
2,683✔
3363
                        // If the zombie index doesn't exist, or the edge is not
4✔
3364
                        // marked as a zombie within it, then we'll return the
4✔
3365
                        // original ErrEdgeNotFound error.
4✔
3366
                        zombieIndex := edges.NestedReadBucket(zombieBucket)
4✔
3367
                        if zombieIndex == nil {
4✔
3368
                                return ErrEdgeNotFound
×
3369
                        }
×
3370

3371
                        isZombie, pubKey1, pubKey2 := isZombieEdge(
4✔
3372
                                zombieIndex, chanID,
4✔
3373
                        )
4✔
3374
                        if !isZombie {
7✔
3375
                                return ErrEdgeNotFound
3✔
3376
                        }
3✔
3377

3378
                        // Otherwise, the edge is marked as a zombie, so we'll
3379
                        // populate the edge info with the public keys of each
3380
                        // party as this is the only information we have about
3381
                        // it and return an error signaling so.
3382
                        edgeInfo = &models.ChannelEdgeInfo{
4✔
3383
                                NodeKey1Bytes: pubKey1,
4✔
3384
                                NodeKey2Bytes: pubKey2,
4✔
3385
                        }
4✔
3386

4✔
3387
                        return ErrZombieEdge
4✔
3388
                }
3389

3390
                // Otherwise, we'll just return the error if any.
3391
                if err != nil {
2,678✔
3392
                        return err
×
3393
                }
×
3394

3395
                edgeInfo = &edge
2,678✔
3396

2,678✔
3397
                // Then we'll attempt to fetch the accompanying policies of this
2,678✔
3398
                // edge.
2,678✔
3399
                e1, e2, err := fetchChanEdgePolicies(
2,678✔
3400
                        edgeIndex, edges, channelID[:],
2,678✔
3401
                )
2,678✔
3402
                if err != nil {
2,678✔
3403
                        return err
×
3404
                }
×
3405

3406
                policy1 = e1
2,678✔
3407
                policy2 = e2
2,678✔
3408

2,678✔
3409
                return nil
2,678✔
3410
        }, func() {
2,684✔
3411
                edgeInfo = nil
2,684✔
3412
                policy1 = nil
2,684✔
3413
                policy2 = nil
2,684✔
3414
        })
2,684✔
3415
        if errors.Is(err, ErrZombieEdge) {
2,688✔
3416
                return edgeInfo, nil, nil, err
4✔
3417
        }
4✔
3418
        if err != nil {
2,691✔
3419
                return nil, nil, nil, err
8✔
3420
        }
8✔
3421

3422
        return edgeInfo, policy1, policy2, nil
2,678✔
3423
}
3424

3425
// IsPublicNode is a helper method that determines whether the node with the
3426
// given public key is seen as a public node in the graph from the graph's
3427
// source node's point of view.
3428
func (c *KVStore) IsPublicNode(pubKey [33]byte) (bool, error) {
16✔
3429
        var nodeIsPublic bool
16✔
3430
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
32✔
3431
                nodes := tx.ReadBucket(nodeBucket)
16✔
3432
                if nodes == nil {
16✔
3433
                        return ErrGraphNodesNotFound
×
3434
                }
×
3435
                ourPubKey := nodes.Get(sourceKey)
16✔
3436
                if ourPubKey == nil {
16✔
3437
                        return ErrSourceNodeNotSet
×
3438
                }
×
3439
                node, err := fetchLightningNode(nodes, pubKey[:])
16✔
3440
                if err != nil {
16✔
3441
                        return err
×
3442
                }
×
3443

3444
                nodeIsPublic, err = c.isPublic(tx, node.PubKeyBytes, ourPubKey)
16✔
3445

16✔
3446
                return err
16✔
3447
        }, func() {
16✔
3448
                nodeIsPublic = false
16✔
3449
        })
16✔
3450
        if err != nil {
16✔
3451
                return false, err
×
3452
        }
×
3453

3454
        return nodeIsPublic, nil
16✔
3455
}
3456

3457
// genMultiSigP2WSH generates the p2wsh'd multisig script for 2 of 2 pubkeys.
3458
func genMultiSigP2WSH(aPub, bPub []byte) ([]byte, error) {
49✔
3459
        witnessScript, err := input.GenMultiSigScript(aPub, bPub)
49✔
3460
        if err != nil {
49✔
3461
                return nil, err
×
3462
        }
×
3463

3464
        // With the witness script generated, we'll now turn it into a p2wsh
3465
        // script:
3466
        //  * OP_0 <sha256(script)>
3467
        bldr := txscript.NewScriptBuilder(
49✔
3468
                txscript.WithScriptAllocSize(input.P2WSHSize),
49✔
3469
        )
49✔
3470
        bldr.AddOp(txscript.OP_0)
49✔
3471
        scriptHash := sha256.Sum256(witnessScript)
49✔
3472
        bldr.AddData(scriptHash[:])
49✔
3473

49✔
3474
        return bldr.Script()
49✔
3475
}
3476

3477
// EdgePoint couples the outpoint of a channel with the funding script that it
3478
// creates. The FilteredChainView will use this to watch for spends of this
3479
// edge point on chain. We require both of these values as depending on the
3480
// concrete implementation, either the pkScript, or the out point will be used.
3481
type EdgePoint struct {
3482
        // FundingPkScript is the p2wsh multi-sig script of the target channel.
3483
        FundingPkScript []byte
3484

3485
        // OutPoint is the outpoint of the target channel.
3486
        OutPoint wire.OutPoint
3487
}
3488

3489
// String returns a human readable version of the target EdgePoint. We return
3490
// the outpoint directly as it is enough to uniquely identify the edge point.
3491
func (e *EdgePoint) String() string {
×
3492
        return e.OutPoint.String()
×
3493
}
×
3494

3495
// ChannelView returns the verifiable edge information for each active channel
3496
// within the known channel graph. The set of UTXO's (along with their scripts)
3497
// returned are the ones that need to be watched on chain to detect channel
3498
// closes on the resident blockchain.
3499
func (c *KVStore) ChannelView() ([]EdgePoint, error) {
25✔
3500
        var edgePoints []EdgePoint
25✔
3501
        if err := kvdb.View(c.db, func(tx kvdb.RTx) error {
50✔
3502
                // We're going to iterate over the entire channel index, so
25✔
3503
                // we'll need to fetch the edgeBucket to get to the index as
25✔
3504
                // it's a sub-bucket.
25✔
3505
                edges := tx.ReadBucket(edgeBucket)
25✔
3506
                if edges == nil {
25✔
3507
                        return ErrGraphNoEdgesFound
×
3508
                }
×
3509
                chanIndex := edges.NestedReadBucket(channelPointBucket)
25✔
3510
                if chanIndex == nil {
25✔
3511
                        return ErrGraphNoEdgesFound
×
3512
                }
×
3513
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
25✔
3514
                if edgeIndex == nil {
25✔
3515
                        return ErrGraphNoEdgesFound
×
3516
                }
×
3517

3518
                // Once we have the proper bucket, we'll range over each key
3519
                // (which is the channel point for the channel) and decode it,
3520
                // accumulating each entry.
3521
                return chanIndex.ForEach(
25✔
3522
                        func(chanPointBytes, chanID []byte) error {
70✔
3523
                                chanPointReader := bytes.NewReader(
45✔
3524
                                        chanPointBytes,
45✔
3525
                                )
45✔
3526

45✔
3527
                                var chanPoint wire.OutPoint
45✔
3528
                                err := ReadOutpoint(chanPointReader, &chanPoint)
45✔
3529
                                if err != nil {
45✔
3530
                                        return err
×
3531
                                }
×
3532

3533
                                edgeInfo, err := fetchChanEdgeInfo(
45✔
3534
                                        edgeIndex, chanID,
45✔
3535
                                )
45✔
3536
                                if err != nil {
45✔
3537
                                        return err
×
3538
                                }
×
3539

3540
                                pkScript, err := genMultiSigP2WSH(
45✔
3541
                                        edgeInfo.BitcoinKey1Bytes[:],
45✔
3542
                                        edgeInfo.BitcoinKey2Bytes[:],
45✔
3543
                                )
45✔
3544
                                if err != nil {
45✔
3545
                                        return err
×
3546
                                }
×
3547

3548
                                edgePoints = append(edgePoints, EdgePoint{
45✔
3549
                                        FundingPkScript: pkScript,
45✔
3550
                                        OutPoint:        chanPoint,
45✔
3551
                                })
45✔
3552

45✔
3553
                                return nil
45✔
3554
                        },
3555
                )
3556
        }, func() {
25✔
3557
                edgePoints = nil
25✔
3558
        }); err != nil {
25✔
3559
                return nil, err
×
3560
        }
×
3561

3562
        return edgePoints, nil
25✔
3563
}
3564

3565
// MarkEdgeZombie attempts to mark a channel identified by its channel ID as a
3566
// zombie. This method is used on an ad-hoc basis, when channels need to be
3567
// marked as zombies outside the normal pruning cycle.
3568
func (c *KVStore) MarkEdgeZombie(chanID uint64,
3569
        pubKey1, pubKey2 [33]byte) error {
130✔
3570

130✔
3571
        c.cacheMu.Lock()
130✔
3572
        defer c.cacheMu.Unlock()
130✔
3573

130✔
3574
        err := kvdb.Batch(c.db, func(tx kvdb.RwTx) error {
260✔
3575
                edges := tx.ReadWriteBucket(edgeBucket)
130✔
3576
                if edges == nil {
130✔
3577
                        return ErrGraphNoEdgesFound
×
3578
                }
×
3579
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
130✔
3580
                if err != nil {
130✔
3581
                        return fmt.Errorf("unable to create zombie "+
×
3582
                                "bucket: %w", err)
×
3583
                }
×
3584

3585
                return markEdgeZombie(zombieIndex, chanID, pubKey1, pubKey2)
130✔
3586
        })
3587
        if err != nil {
130✔
3588
                return err
×
3589
        }
×
3590

3591
        c.rejectCache.remove(chanID)
130✔
3592
        c.chanCache.remove(chanID)
130✔
3593

130✔
3594
        return nil
130✔
3595
}
3596

3597
// markEdgeZombie marks an edge as a zombie within our zombie index. The public
3598
// keys should represent the node public keys of the two parties involved in the
3599
// edge.
3600
func markEdgeZombie(zombieIndex kvdb.RwBucket, chanID uint64, pubKey1,
3601
        pubKey2 [33]byte) error {
156✔
3602

156✔
3603
        var k [8]byte
156✔
3604
        byteOrder.PutUint64(k[:], chanID)
156✔
3605

156✔
3606
        var v [66]byte
156✔
3607
        copy(v[:33], pubKey1[:])
156✔
3608
        copy(v[33:], pubKey2[:])
156✔
3609

156✔
3610
        return zombieIndex.Put(k[:], v[:])
156✔
3611
}
156✔
3612

3613
// MarkEdgeLive clears an edge from our zombie index, deeming it as live.
3614
func (c *KVStore) MarkEdgeLive(chanID uint64) error {
20✔
3615
        c.cacheMu.Lock()
20✔
3616
        defer c.cacheMu.Unlock()
20✔
3617

20✔
3618
        return c.markEdgeLiveUnsafe(nil, chanID)
20✔
3619
}
20✔
3620

3621
// markEdgeLiveUnsafe clears an edge from the zombie index. This method can be
3622
// called with an existing kvdb.RwTx or the argument can be set to nil in which
3623
// case a new transaction will be created.
3624
//
3625
// NOTE: this method MUST only be called if the cacheMu has already been
3626
// acquired.
3627
func (c *KVStore) markEdgeLiveUnsafe(tx kvdb.RwTx, chanID uint64) error {
20✔
3628
        dbFn := func(tx kvdb.RwTx) error {
40✔
3629
                edges := tx.ReadWriteBucket(edgeBucket)
20✔
3630
                if edges == nil {
20✔
3631
                        return ErrGraphNoEdgesFound
×
3632
                }
×
3633
                zombieIndex := edges.NestedReadWriteBucket(zombieBucket)
20✔
3634
                if zombieIndex == nil {
20✔
3635
                        return nil
×
3636
                }
×
3637

3638
                var k [8]byte
20✔
3639
                byteOrder.PutUint64(k[:], chanID)
20✔
3640

20✔
3641
                if len(zombieIndex.Get(k[:])) == 0 {
21✔
3642
                        return ErrZombieEdgeNotFound
1✔
3643
                }
1✔
3644

3645
                return zombieIndex.Delete(k[:])
19✔
3646
        }
3647

3648
        // If the transaction is nil, we'll create a new one. Otherwise, we use
3649
        // the existing transaction
3650
        var err error
20✔
3651
        if tx == nil {
40✔
3652
                err = kvdb.Update(c.db, dbFn, func() {})
40✔
3653
        } else {
×
3654
                err = dbFn(tx)
×
3655
        }
×
3656
        if err != nil {
21✔
3657
                return err
1✔
3658
        }
1✔
3659

3660
        c.rejectCache.remove(chanID)
19✔
3661
        c.chanCache.remove(chanID)
19✔
3662

19✔
3663
        return nil
19✔
3664
}
3665

3666
// IsZombieEdge returns whether the edge is considered zombie. If it is a
3667
// zombie, then the two node public keys corresponding to this edge are also
3668
// returned.
3669
func (c *KVStore) IsZombieEdge(chanID uint64) (bool, [33]byte, [33]byte) {
14✔
3670
        var (
14✔
3671
                isZombie         bool
14✔
3672
                pubKey1, pubKey2 [33]byte
14✔
3673
        )
14✔
3674

14✔
3675
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
28✔
3676
                edges := tx.ReadBucket(edgeBucket)
14✔
3677
                if edges == nil {
14✔
3678
                        return ErrGraphNoEdgesFound
×
3679
                }
×
3680
                zombieIndex := edges.NestedReadBucket(zombieBucket)
14✔
3681
                if zombieIndex == nil {
14✔
3682
                        return nil
×
3683
                }
×
3684

3685
                isZombie, pubKey1, pubKey2 = isZombieEdge(zombieIndex, chanID)
14✔
3686

14✔
3687
                return nil
14✔
3688
        }, func() {
14✔
3689
                isZombie = false
14✔
3690
                pubKey1 = [33]byte{}
14✔
3691
                pubKey2 = [33]byte{}
14✔
3692
        })
14✔
3693
        if err != nil {
14✔
3694
                return false, [33]byte{}, [33]byte{}
×
3695
        }
×
3696

3697
        return isZombie, pubKey1, pubKey2
14✔
3698
}
3699

3700
// isZombieEdge returns whether an entry exists for the given channel in the
3701
// zombie index. If an entry exists, then the two node public keys corresponding
3702
// to this edge are also returned.
3703
func isZombieEdge(zombieIndex kvdb.RBucket,
3704
        chanID uint64) (bool, [33]byte, [33]byte) {
180✔
3705

180✔
3706
        var k [8]byte
180✔
3707
        byteOrder.PutUint64(k[:], chanID)
180✔
3708

180✔
3709
        v := zombieIndex.Get(k[:])
180✔
3710
        if v == nil {
280✔
3711
                return false, [33]byte{}, [33]byte{}
100✔
3712
        }
100✔
3713

3714
        var pubKey1, pubKey2 [33]byte
83✔
3715
        copy(pubKey1[:], v[:33])
83✔
3716
        copy(pubKey2[:], v[33:])
83✔
3717

83✔
3718
        return true, pubKey1, pubKey2
83✔
3719
}
3720

3721
// NumZombies returns the current number of zombie channels in the graph.
3722
func (c *KVStore) NumZombies() (uint64, error) {
4✔
3723
        var numZombies uint64
4✔
3724
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
8✔
3725
                edges := tx.ReadBucket(edgeBucket)
4✔
3726
                if edges == nil {
4✔
3727
                        return nil
×
3728
                }
×
3729
                zombieIndex := edges.NestedReadBucket(zombieBucket)
4✔
3730
                if zombieIndex == nil {
4✔
3731
                        return nil
×
3732
                }
×
3733

3734
                return zombieIndex.ForEach(func(_, _ []byte) error {
6✔
3735
                        numZombies++
2✔
3736
                        return nil
2✔
3737
                })
2✔
3738
        }, func() {
4✔
3739
                numZombies = 0
4✔
3740
        })
4✔
3741
        if err != nil {
4✔
3742
                return 0, err
×
3743
        }
×
3744

3745
        return numZombies, nil
4✔
3746
}
3747

3748
// PutClosedScid stores a SCID for a closed channel in the database. This is so
3749
// that we can ignore channel announcements that we know to be closed without
3750
// having to validate them and fetch a block.
3751
func (c *KVStore) PutClosedScid(scid lnwire.ShortChannelID) error {
1✔
3752
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
2✔
3753
                closedScids, err := tx.CreateTopLevelBucket(closedScidBucket)
1✔
3754
                if err != nil {
1✔
3755
                        return err
×
3756
                }
×
3757

3758
                var k [8]byte
1✔
3759
                byteOrder.PutUint64(k[:], scid.ToUint64())
1✔
3760

1✔
3761
                return closedScids.Put(k[:], []byte{})
1✔
3762
        }, func() {})
1✔
3763
}
3764

3765
// IsClosedScid checks whether a channel identified by the passed in scid is
3766
// closed. This helps avoid having to perform expensive validation checks.
3767
// TODO: Add an LRU cache to cut down on disc reads.
3768
func (c *KVStore) IsClosedScid(scid lnwire.ShortChannelID) (bool, error) {
5✔
3769
        var isClosed bool
5✔
3770
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
10✔
3771
                closedScids := tx.ReadBucket(closedScidBucket)
5✔
3772
                if closedScids == nil {
5✔
3773
                        return ErrClosedScidsNotFound
×
3774
                }
×
3775

3776
                var k [8]byte
5✔
3777
                byteOrder.PutUint64(k[:], scid.ToUint64())
5✔
3778

5✔
3779
                if closedScids.Get(k[:]) != nil {
6✔
3780
                        isClosed = true
1✔
3781
                        return nil
1✔
3782
                }
1✔
3783

3784
                return nil
4✔
3785
        }, func() {
5✔
3786
                isClosed = false
5✔
3787
        })
5✔
3788
        if err != nil {
5✔
3789
                return false, err
×
3790
        }
×
3791

3792
        return isClosed, nil
5✔
3793
}
3794

3795
// GraphSession will provide the call-back with access to a NodeTraverser
3796
// instance which can be used to perform queries against the channel graph.
3797
func (c *KVStore) GraphSession(cb func(graph NodeTraverser) error) error {
54✔
3798
        return c.db.View(func(tx walletdb.ReadTx) error {
108✔
3799
                return cb(&nodeTraverserSession{
54✔
3800
                        db: c,
54✔
3801
                        tx: tx,
54✔
3802
                })
54✔
3803
        }, func() {})
108✔
3804
}
3805

3806
// nodeTraverserSession implements the NodeTraverser interface but with a
3807
// backing read only transaction for a consistent view of the graph.
3808
type nodeTraverserSession struct {
3809
        tx kvdb.RTx
3810
        db *KVStore
3811
}
3812

3813
// ForEachNodeDirectedChannel calls the callback for every channel of the given
3814
// node.
3815
//
3816
// NOTE: Part of the NodeTraverser interface.
3817
func (c *nodeTraverserSession) ForEachNodeDirectedChannel(nodePub route.Vertex,
3818
        cb func(channel *DirectedChannel) error) error {
239✔
3819

239✔
3820
        return c.db.forEachNodeDirectedChannel(c.tx, nodePub, cb)
239✔
3821
}
239✔
3822

3823
// FetchNodeFeatures returns the features of the given node. If the node is
3824
// unknown, assume no additional features are supported.
3825
//
3826
// NOTE: Part of the NodeTraverser interface.
3827
func (c *nodeTraverserSession) FetchNodeFeatures(nodePub route.Vertex) (
3828
        *lnwire.FeatureVector, error) {
254✔
3829

254✔
3830
        return c.db.fetchNodeFeatures(c.tx, nodePub)
254✔
3831
}
254✔
3832

3833
func putLightningNode(nodeBucket, aliasBucket, updateIndex kvdb.RwBucket,
3834
        node *models.LightningNode) error {
990✔
3835

990✔
3836
        var (
990✔
3837
                scratch [16]byte
990✔
3838
                b       bytes.Buffer
990✔
3839
        )
990✔
3840

990✔
3841
        pub, err := node.PubKey()
990✔
3842
        if err != nil {
990✔
3843
                return err
×
3844
        }
×
3845
        nodePub := pub.SerializeCompressed()
990✔
3846

990✔
3847
        // If the node has the update time set, write it, else write 0.
990✔
3848
        updateUnix := uint64(0)
990✔
3849
        if node.LastUpdate.Unix() > 0 {
1,852✔
3850
                updateUnix = uint64(node.LastUpdate.Unix())
862✔
3851
        }
862✔
3852

3853
        byteOrder.PutUint64(scratch[:8], updateUnix)
990✔
3854
        if _, err := b.Write(scratch[:8]); err != nil {
990✔
3855
                return err
×
3856
        }
×
3857

3858
        if _, err := b.Write(nodePub); err != nil {
990✔
3859
                return err
×
3860
        }
×
3861

3862
        // If we got a node announcement for this node, we will have the rest
3863
        // of the data available. If not we don't have more data to write.
3864
        if !node.HaveNodeAnnouncement {
1,068✔
3865
                // Write HaveNodeAnnouncement=0.
78✔
3866
                byteOrder.PutUint16(scratch[:2], 0)
78✔
3867
                if _, err := b.Write(scratch[:2]); err != nil {
78✔
3868
                        return err
×
3869
                }
×
3870

3871
                return nodeBucket.Put(nodePub, b.Bytes())
78✔
3872
        }
3873

3874
        // Write HaveNodeAnnouncement=1.
3875
        byteOrder.PutUint16(scratch[:2], 1)
915✔
3876
        if _, err := b.Write(scratch[:2]); err != nil {
915✔
3877
                return err
×
3878
        }
×
3879

3880
        if err := binary.Write(&b, byteOrder, node.Color.R); err != nil {
915✔
3881
                return err
×
3882
        }
×
3883
        if err := binary.Write(&b, byteOrder, node.Color.G); err != nil {
915✔
3884
                return err
×
3885
        }
×
3886
        if err := binary.Write(&b, byteOrder, node.Color.B); err != nil {
915✔
3887
                return err
×
3888
        }
×
3889

3890
        if err := wire.WriteVarString(&b, 0, node.Alias); err != nil {
915✔
3891
                return err
×
3892
        }
×
3893

3894
        if err := node.Features.Encode(&b); err != nil {
915✔
3895
                return err
×
3896
        }
×
3897

3898
        numAddresses := uint16(len(node.Addresses))
915✔
3899
        byteOrder.PutUint16(scratch[:2], numAddresses)
915✔
3900
        if _, err := b.Write(scratch[:2]); err != nil {
915✔
3901
                return err
×
3902
        }
×
3903

3904
        for _, address := range node.Addresses {
2,059✔
3905
                if err := SerializeAddr(&b, address); err != nil {
1,144✔
3906
                        return err
×
3907
                }
×
3908
        }
3909

3910
        sigLen := len(node.AuthSigBytes)
915✔
3911
        if sigLen > 80 {
915✔
3912
                return fmt.Errorf("max sig len allowed is 80, had %v",
×
3913
                        sigLen)
×
3914
        }
×
3915

3916
        err = wire.WriteVarBytes(&b, 0, node.AuthSigBytes)
915✔
3917
        if err != nil {
915✔
3918
                return err
×
3919
        }
×
3920

3921
        if len(node.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
915✔
3922
                return ErrTooManyExtraOpaqueBytes(len(node.ExtraOpaqueData))
×
3923
        }
×
3924
        err = wire.WriteVarBytes(&b, 0, node.ExtraOpaqueData)
915✔
3925
        if err != nil {
915✔
3926
                return err
×
3927
        }
×
3928

3929
        if err := aliasBucket.Put(nodePub, []byte(node.Alias)); err != nil {
915✔
3930
                return err
×
3931
        }
×
3932

3933
        // With the alias bucket updated, we'll now update the index that
3934
        // tracks the time series of node updates.
3935
        var indexKey [8 + 33]byte
915✔
3936
        byteOrder.PutUint64(indexKey[:8], updateUnix)
915✔
3937
        copy(indexKey[8:], nodePub)
915✔
3938

915✔
3939
        // If there was already an old index entry for this node, then we'll
915✔
3940
        // delete the old one before we write the new entry.
915✔
3941
        if nodeBytes := nodeBucket.Get(nodePub); nodeBytes != nil {
1,022✔
3942
                // Extract out the old update time to we can reconstruct the
107✔
3943
                // prior index key to delete it from the index.
107✔
3944
                oldUpdateTime := nodeBytes[:8]
107✔
3945

107✔
3946
                var oldIndexKey [8 + 33]byte
107✔
3947
                copy(oldIndexKey[:8], oldUpdateTime)
107✔
3948
                copy(oldIndexKey[8:], nodePub)
107✔
3949

107✔
3950
                if err := updateIndex.Delete(oldIndexKey[:]); err != nil {
107✔
3951
                        return err
×
3952
                }
×
3953
        }
3954

3955
        if err := updateIndex.Put(indexKey[:], nil); err != nil {
915✔
3956
                return err
×
3957
        }
×
3958

3959
        return nodeBucket.Put(nodePub, b.Bytes())
915✔
3960
}
3961

3962
func fetchLightningNode(nodeBucket kvdb.RBucket,
3963
        nodePub []byte) (models.LightningNode, error) {
3,607✔
3964

3,607✔
3965
        nodeBytes := nodeBucket.Get(nodePub)
3,607✔
3966
        if nodeBytes == nil {
3,685✔
3967
                return models.LightningNode{}, ErrGraphNodeNotFound
78✔
3968
        }
78✔
3969

3970
        nodeReader := bytes.NewReader(nodeBytes)
3,532✔
3971

3,532✔
3972
        return deserializeLightningNode(nodeReader)
3,532✔
3973
}
3974

3975
func deserializeLightningNodeCacheable(r io.Reader) (route.Vertex,
3976
        *lnwire.FeatureVector, error) {
123✔
3977

123✔
3978
        var (
123✔
3979
                pubKey      route.Vertex
123✔
3980
                features    = lnwire.EmptyFeatureVector()
123✔
3981
                nodeScratch [8]byte
123✔
3982
        )
123✔
3983

123✔
3984
        // Skip ahead:
123✔
3985
        // - LastUpdate (8 bytes)
123✔
3986
        if _, err := r.Read(nodeScratch[:]); err != nil {
123✔
3987
                return pubKey, nil, err
×
3988
        }
×
3989

3990
        if _, err := io.ReadFull(r, pubKey[:]); err != nil {
123✔
3991
                return pubKey, nil, err
×
3992
        }
×
3993

3994
        // Read the node announcement flag.
3995
        if _, err := r.Read(nodeScratch[:2]); err != nil {
123✔
3996
                return pubKey, nil, err
×
3997
        }
×
3998
        hasNodeAnn := byteOrder.Uint16(nodeScratch[:2])
123✔
3999

123✔
4000
        // The rest of the data is optional, and will only be there if we got a
123✔
4001
        // node announcement for this node.
123✔
4002
        if hasNodeAnn == 0 {
126✔
4003
                return pubKey, features, nil
3✔
4004
        }
3✔
4005

4006
        // We did get a node announcement for this node, so we'll have the rest
4007
        // of the data available.
4008
        var rgb uint8
123✔
4009
        if err := binary.Read(r, byteOrder, &rgb); err != nil {
123✔
4010
                return pubKey, nil, err
×
4011
        }
×
4012
        if err := binary.Read(r, byteOrder, &rgb); err != nil {
123✔
4013
                return pubKey, nil, err
×
4014
        }
×
4015
        if err := binary.Read(r, byteOrder, &rgb); err != nil {
123✔
4016
                return pubKey, nil, err
×
4017
        }
×
4018

4019
        if _, err := wire.ReadVarString(r, 0); err != nil {
123✔
4020
                return pubKey, nil, err
×
4021
        }
×
4022

4023
        if err := features.Decode(r); err != nil {
123✔
4024
                return pubKey, nil, err
×
4025
        }
×
4026

4027
        return pubKey, features, nil
123✔
4028
}
4029

4030
func deserializeLightningNode(r io.Reader) (models.LightningNode, error) {
8,492✔
4031
        var (
8,492✔
4032
                node    models.LightningNode
8,492✔
4033
                scratch [8]byte
8,492✔
4034
                err     error
8,492✔
4035
        )
8,492✔
4036

8,492✔
4037
        // Always populate a feature vector, even if we don't have a node
8,492✔
4038
        // announcement and short circuit below.
8,492✔
4039
        node.Features = lnwire.EmptyFeatureVector()
8,492✔
4040

8,492✔
4041
        if _, err := r.Read(scratch[:]); err != nil {
8,492✔
4042
                return models.LightningNode{}, err
×
4043
        }
×
4044

4045
        unix := int64(byteOrder.Uint64(scratch[:]))
8,492✔
4046
        node.LastUpdate = time.Unix(unix, 0)
8,492✔
4047

8,492✔
4048
        if _, err := io.ReadFull(r, node.PubKeyBytes[:]); err != nil {
8,492✔
4049
                return models.LightningNode{}, err
×
4050
        }
×
4051

4052
        if _, err := r.Read(scratch[:2]); err != nil {
8,492✔
4053
                return models.LightningNode{}, err
×
4054
        }
×
4055

4056
        hasNodeAnn := byteOrder.Uint16(scratch[:2])
8,492✔
4057
        if hasNodeAnn == 1 {
16,846✔
4058
                node.HaveNodeAnnouncement = true
8,354✔
4059
        } else {
8,495✔
4060
                node.HaveNodeAnnouncement = false
141✔
4061
        }
141✔
4062

4063
        // The rest of the data is optional, and will only be there if we got a
4064
        // node announcement for this node.
4065
        if !node.HaveNodeAnnouncement {
8,633✔
4066
                return node, nil
141✔
4067
        }
141✔
4068

4069
        // We did get a node announcement for this node, so we'll have the rest
4070
        // of the data available.
4071
        if err := binary.Read(r, byteOrder, &node.Color.R); err != nil {
8,354✔
4072
                return models.LightningNode{}, err
×
4073
        }
×
4074
        if err := binary.Read(r, byteOrder, &node.Color.G); err != nil {
8,354✔
4075
                return models.LightningNode{}, err
×
4076
        }
×
4077
        if err := binary.Read(r, byteOrder, &node.Color.B); err != nil {
8,354✔
4078
                return models.LightningNode{}, err
×
4079
        }
×
4080

4081
        node.Alias, err = wire.ReadVarString(r, 0)
8,354✔
4082
        if err != nil {
8,354✔
4083
                return models.LightningNode{}, err
×
4084
        }
×
4085

4086
        err = node.Features.Decode(r)
8,354✔
4087
        if err != nil {
8,354✔
4088
                return models.LightningNode{}, err
×
4089
        }
×
4090

4091
        if _, err := r.Read(scratch[:2]); err != nil {
8,354✔
4092
                return models.LightningNode{}, err
×
4093
        }
×
4094
        numAddresses := int(byteOrder.Uint16(scratch[:2]))
8,354✔
4095

8,354✔
4096
        var addresses []net.Addr
8,354✔
4097
        for i := 0; i < numAddresses; i++ {
18,933✔
4098
                address, err := DeserializeAddr(r)
10,579✔
4099
                if err != nil {
10,579✔
4100
                        return models.LightningNode{}, err
×
4101
                }
×
4102
                addresses = append(addresses, address)
10,579✔
4103
        }
4104
        node.Addresses = addresses
8,354✔
4105

8,354✔
4106
        node.AuthSigBytes, err = wire.ReadVarBytes(r, 0, 80, "sig")
8,354✔
4107
        if err != nil {
8,354✔
4108
                return models.LightningNode{}, err
×
4109
        }
×
4110

4111
        // We'll try and see if there are any opaque bytes left, if not, then
4112
        // we'll ignore the EOF error and return the node as is.
4113
        node.ExtraOpaqueData, err = wire.ReadVarBytes(
8,354✔
4114
                r, 0, MaxAllowedExtraOpaqueBytes, "blob",
8,354✔
4115
        )
8,354✔
4116
        switch {
8,354✔
4117
        case errors.Is(err, io.ErrUnexpectedEOF):
×
4118
        case errors.Is(err, io.EOF):
×
4119
        case err != nil:
×
4120
                return models.LightningNode{}, err
×
4121
        }
4122

4123
        return node, nil
8,354✔
4124
}
4125

4126
func putChanEdgeInfo(edgeIndex kvdb.RwBucket,
4127
        edgeInfo *models.ChannelEdgeInfo, chanID [8]byte) error {
1,481✔
4128

1,481✔
4129
        var b bytes.Buffer
1,481✔
4130

1,481✔
4131
        if _, err := b.Write(edgeInfo.NodeKey1Bytes[:]); err != nil {
1,481✔
4132
                return err
×
4133
        }
×
4134
        if _, err := b.Write(edgeInfo.NodeKey2Bytes[:]); err != nil {
1,481✔
4135
                return err
×
4136
        }
×
4137
        if _, err := b.Write(edgeInfo.BitcoinKey1Bytes[:]); err != nil {
1,481✔
4138
                return err
×
4139
        }
×
4140
        if _, err := b.Write(edgeInfo.BitcoinKey2Bytes[:]); err != nil {
1,481✔
4141
                return err
×
4142
        }
×
4143

4144
        if err := wire.WriteVarBytes(&b, 0, edgeInfo.Features); err != nil {
1,481✔
4145
                return err
×
4146
        }
×
4147

4148
        authProof := edgeInfo.AuthProof
1,481✔
4149
        var nodeSig1, nodeSig2, bitcoinSig1, bitcoinSig2 []byte
1,481✔
4150
        if authProof != nil {
2,879✔
4151
                nodeSig1 = authProof.NodeSig1Bytes
1,398✔
4152
                nodeSig2 = authProof.NodeSig2Bytes
1,398✔
4153
                bitcoinSig1 = authProof.BitcoinSig1Bytes
1,398✔
4154
                bitcoinSig2 = authProof.BitcoinSig2Bytes
1,398✔
4155
        }
1,398✔
4156

4157
        if err := wire.WriteVarBytes(&b, 0, nodeSig1); err != nil {
1,481✔
4158
                return err
×
4159
        }
×
4160
        if err := wire.WriteVarBytes(&b, 0, nodeSig2); err != nil {
1,481✔
4161
                return err
×
4162
        }
×
4163
        if err := wire.WriteVarBytes(&b, 0, bitcoinSig1); err != nil {
1,481✔
4164
                return err
×
4165
        }
×
4166
        if err := wire.WriteVarBytes(&b, 0, bitcoinSig2); err != nil {
1,481✔
4167
                return err
×
4168
        }
×
4169

4170
        if err := WriteOutpoint(&b, &edgeInfo.ChannelPoint); err != nil {
1,481✔
4171
                return err
×
4172
        }
×
4173
        err := binary.Write(&b, byteOrder, uint64(edgeInfo.Capacity))
1,481✔
4174
        if err != nil {
1,481✔
4175
                return err
×
4176
        }
×
4177
        if _, err := b.Write(chanID[:]); err != nil {
1,481✔
4178
                return err
×
4179
        }
×
4180
        if _, err := b.Write(edgeInfo.ChainHash[:]); err != nil {
1,481✔
4181
                return err
×
4182
        }
×
4183

4184
        if len(edgeInfo.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
1,481✔
4185
                return ErrTooManyExtraOpaqueBytes(len(edgeInfo.ExtraOpaqueData))
×
4186
        }
×
4187
        err = wire.WriteVarBytes(&b, 0, edgeInfo.ExtraOpaqueData)
1,481✔
4188
        if err != nil {
1,481✔
4189
                return err
×
4190
        }
×
4191

4192
        return edgeIndex.Put(chanID[:], b.Bytes())
1,481✔
4193
}
4194

4195
func fetchChanEdgeInfo(edgeIndex kvdb.RBucket,
4196
        chanID []byte) (models.ChannelEdgeInfo, error) {
6,600✔
4197

6,600✔
4198
        edgeInfoBytes := edgeIndex.Get(chanID)
6,600✔
4199
        if edgeInfoBytes == nil {
6,671✔
4200
                return models.ChannelEdgeInfo{}, ErrEdgeNotFound
71✔
4201
        }
71✔
4202

4203
        edgeInfoReader := bytes.NewReader(edgeInfoBytes)
6,532✔
4204

6,532✔
4205
        return deserializeChanEdgeInfo(edgeInfoReader)
6,532✔
4206
}
4207

4208
func deserializeChanEdgeInfo(r io.Reader) (models.ChannelEdgeInfo, error) {
7,071✔
4209
        var (
7,071✔
4210
                err      error
7,071✔
4211
                edgeInfo models.ChannelEdgeInfo
7,071✔
4212
        )
7,071✔
4213

7,071✔
4214
        if _, err := io.ReadFull(r, edgeInfo.NodeKey1Bytes[:]); err != nil {
7,071✔
4215
                return models.ChannelEdgeInfo{}, err
×
4216
        }
×
4217
        if _, err := io.ReadFull(r, edgeInfo.NodeKey2Bytes[:]); err != nil {
7,071✔
4218
                return models.ChannelEdgeInfo{}, err
×
4219
        }
×
4220
        if _, err := io.ReadFull(r, edgeInfo.BitcoinKey1Bytes[:]); err != nil {
7,071✔
4221
                return models.ChannelEdgeInfo{}, err
×
4222
        }
×
4223
        if _, err := io.ReadFull(r, edgeInfo.BitcoinKey2Bytes[:]); err != nil {
7,071✔
4224
                return models.ChannelEdgeInfo{}, err
×
4225
        }
×
4226

4227
        edgeInfo.Features, err = wire.ReadVarBytes(r, 0, 900, "features")
7,071✔
4228
        if err != nil {
7,071✔
4229
                return models.ChannelEdgeInfo{}, err
×
4230
        }
×
4231

4232
        proof := &models.ChannelAuthProof{}
7,071✔
4233

7,071✔
4234
        proof.NodeSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
7,071✔
4235
        if err != nil {
7,071✔
4236
                return models.ChannelEdgeInfo{}, err
×
4237
        }
×
4238
        proof.NodeSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
7,071✔
4239
        if err != nil {
7,071✔
4240
                return models.ChannelEdgeInfo{}, err
×
4241
        }
×
4242
        proof.BitcoinSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
7,071✔
4243
        if err != nil {
7,071✔
4244
                return models.ChannelEdgeInfo{}, err
×
4245
        }
×
4246
        proof.BitcoinSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
7,071✔
4247
        if err != nil {
7,071✔
4248
                return models.ChannelEdgeInfo{}, err
×
4249
        }
×
4250

4251
        if !proof.IsEmpty() {
11,041✔
4252
                edgeInfo.AuthProof = proof
3,970✔
4253
        }
3,970✔
4254

4255
        edgeInfo.ChannelPoint = wire.OutPoint{}
7,071✔
4256
        if err := ReadOutpoint(r, &edgeInfo.ChannelPoint); err != nil {
7,071✔
4257
                return models.ChannelEdgeInfo{}, err
×
4258
        }
×
4259
        if err := binary.Read(r, byteOrder, &edgeInfo.Capacity); err != nil {
7,071✔
4260
                return models.ChannelEdgeInfo{}, err
×
4261
        }
×
4262
        if err := binary.Read(r, byteOrder, &edgeInfo.ChannelID); err != nil {
7,071✔
4263
                return models.ChannelEdgeInfo{}, err
×
4264
        }
×
4265

4266
        if _, err := io.ReadFull(r, edgeInfo.ChainHash[:]); err != nil {
7,071✔
4267
                return models.ChannelEdgeInfo{}, err
×
4268
        }
×
4269

4270
        // We'll try and see if there are any opaque bytes left, if not, then
4271
        // we'll ignore the EOF error and return the edge as is.
4272
        edgeInfo.ExtraOpaqueData, err = wire.ReadVarBytes(
7,071✔
4273
                r, 0, MaxAllowedExtraOpaqueBytes, "blob",
7,071✔
4274
        )
7,071✔
4275
        switch {
7,071✔
4276
        case errors.Is(err, io.ErrUnexpectedEOF):
×
4277
        case errors.Is(err, io.EOF):
×
4278
        case err != nil:
×
4279
                return models.ChannelEdgeInfo{}, err
×
4280
        }
4281

4282
        return edgeInfo, nil
7,071✔
4283
}
4284

4285
func putChanEdgePolicy(edges kvdb.RwBucket, edge *models.ChannelEdgePolicy,
4286
        from, to []byte) error {
2,665✔
4287

2,665✔
4288
        var edgeKey [33 + 8]byte
2,665✔
4289
        copy(edgeKey[:], from)
2,665✔
4290
        byteOrder.PutUint64(edgeKey[33:], edge.ChannelID)
2,665✔
4291

2,665✔
4292
        var b bytes.Buffer
2,665✔
4293
        if err := serializeChanEdgePolicy(&b, edge, to); err != nil {
2,665✔
4294
                return err
×
4295
        }
×
4296

4297
        // Before we write out the new edge, we'll create a new entry in the
4298
        // update index in order to keep it fresh.
4299
        updateUnix := uint64(edge.LastUpdate.Unix())
2,665✔
4300
        var indexKey [8 + 8]byte
2,665✔
4301
        byteOrder.PutUint64(indexKey[:8], updateUnix)
2,665✔
4302
        byteOrder.PutUint64(indexKey[8:], edge.ChannelID)
2,665✔
4303

2,665✔
4304
        updateIndex, err := edges.CreateBucketIfNotExists(edgeUpdateIndexBucket)
2,665✔
4305
        if err != nil {
2,665✔
4306
                return err
×
4307
        }
×
4308

4309
        // If there was already an entry for this edge, then we'll need to
4310
        // delete the old one to ensure we don't leave around any after-images.
4311
        // An unknown policy value does not have a update time recorded, so
4312
        // it also does not need to be removed.
4313
        if edgeBytes := edges.Get(edgeKey[:]); edgeBytes != nil &&
2,665✔
4314
                !bytes.Equal(edgeBytes, unknownPolicy) {
2,692✔
4315

27✔
4316
                // In order to delete the old entry, we'll need to obtain the
27✔
4317
                // *prior* update time in order to delete it. To do this, we'll
27✔
4318
                // need to deserialize the existing policy within the database
27✔
4319
                // (now outdated by the new one), and delete its corresponding
27✔
4320
                // entry within the update index. We'll ignore any
27✔
4321
                // ErrEdgePolicyOptionalFieldNotFound error, as we only need
27✔
4322
                // the channel ID and update time to delete the entry.
27✔
4323
                // TODO(halseth): get rid of these invalid policies in a
27✔
4324
                // migration.
27✔
4325
                oldEdgePolicy, err := deserializeChanEdgePolicy(
27✔
4326
                        bytes.NewReader(edgeBytes),
27✔
4327
                )
27✔
4328
                if err != nil &&
27✔
4329
                        !errors.Is(err, ErrEdgePolicyOptionalFieldNotFound) {
27✔
4330

×
4331
                        return err
×
4332
                }
×
4333

4334
                oldUpdateTime := uint64(oldEdgePolicy.LastUpdate.Unix())
27✔
4335

27✔
4336
                var oldIndexKey [8 + 8]byte
27✔
4337
                byteOrder.PutUint64(oldIndexKey[:8], oldUpdateTime)
27✔
4338
                byteOrder.PutUint64(oldIndexKey[8:], edge.ChannelID)
27✔
4339

27✔
4340
                if err := updateIndex.Delete(oldIndexKey[:]); err != nil {
27✔
4341
                        return err
×
4342
                }
×
4343
        }
4344

4345
        if err := updateIndex.Put(indexKey[:], nil); err != nil {
2,665✔
4346
                return err
×
4347
        }
×
4348

4349
        err = updateEdgePolicyDisabledIndex(
2,665✔
4350
                edges, edge.ChannelID,
2,665✔
4351
                edge.ChannelFlags&lnwire.ChanUpdateDirection > 0,
2,665✔
4352
                edge.IsDisabled(),
2,665✔
4353
        )
2,665✔
4354
        if err != nil {
2,665✔
4355
                return err
×
4356
        }
×
4357

4358
        return edges.Put(edgeKey[:], b.Bytes())
2,665✔
4359
}
4360

4361
// updateEdgePolicyDisabledIndex is used to update the disabledEdgePolicyIndex
4362
// bucket by either add a new disabled ChannelEdgePolicy or remove an existing
4363
// one.
4364
// The direction represents the direction of the edge and disabled is used for
4365
// deciding whether to remove or add an entry to the bucket.
4366
// In general a channel is disabled if two entries for the same chanID exist
4367
// in this bucket.
4368
// Maintaining the bucket this way allows a fast retrieval of disabled
4369
// channels, for example when prune is needed.
4370
func updateEdgePolicyDisabledIndex(edges kvdb.RwBucket, chanID uint64,
4371
        direction bool, disabled bool) error {
2,929✔
4372

2,929✔
4373
        var disabledEdgeKey [8 + 1]byte
2,929✔
4374
        byteOrder.PutUint64(disabledEdgeKey[0:], chanID)
2,929✔
4375
        if direction {
4,393✔
4376
                disabledEdgeKey[8] = 1
1,464✔
4377
        }
1,464✔
4378

4379
        disabledEdgePolicyIndex, err := edges.CreateBucketIfNotExists(
2,929✔
4380
                disabledEdgePolicyBucket,
2,929✔
4381
        )
2,929✔
4382
        if err != nil {
2,929✔
4383
                return err
×
4384
        }
×
4385

4386
        if disabled {
2,958✔
4387
                return disabledEdgePolicyIndex.Put(disabledEdgeKey[:], []byte{})
29✔
4388
        }
29✔
4389

4390
        return disabledEdgePolicyIndex.Delete(disabledEdgeKey[:])
2,903✔
4391
}
4392

4393
// putChanEdgePolicyUnknown marks the edge policy as unknown
4394
// in the edges bucket.
4395
func putChanEdgePolicyUnknown(edges kvdb.RwBucket, channelID uint64,
4396
        from []byte) error {
2,957✔
4397

2,957✔
4398
        var edgeKey [33 + 8]byte
2,957✔
4399
        copy(edgeKey[:], from)
2,957✔
4400
        byteOrder.PutUint64(edgeKey[33:], channelID)
2,957✔
4401

2,957✔
4402
        if edges.Get(edgeKey[:]) != nil {
2,957✔
4403
                return fmt.Errorf("cannot write unknown policy for channel %v "+
×
4404
                        " when there is already a policy present", channelID)
×
4405
        }
×
4406

4407
        return edges.Put(edgeKey[:], unknownPolicy)
2,957✔
4408
}
4409

4410
func fetchChanEdgePolicy(edges kvdb.RBucket, chanID []byte,
4411
        nodePub []byte) (*models.ChannelEdgePolicy, error) {
13,081✔
4412

13,081✔
4413
        var edgeKey [33 + 8]byte
13,081✔
4414
        copy(edgeKey[:], nodePub)
13,081✔
4415
        copy(edgeKey[33:], chanID)
13,081✔
4416

13,081✔
4417
        edgeBytes := edges.Get(edgeKey[:])
13,081✔
4418
        if edgeBytes == nil {
13,081✔
4419
                return nil, ErrEdgeNotFound
×
4420
        }
×
4421

4422
        // No need to deserialize unknown policy.
4423
        if bytes.Equal(edgeBytes, unknownPolicy) {
14,685✔
4424
                return nil, nil
1,604✔
4425
        }
1,604✔
4426

4427
        edgeReader := bytes.NewReader(edgeBytes)
11,480✔
4428

11,480✔
4429
        ep, err := deserializeChanEdgePolicy(edgeReader)
11,480✔
4430
        switch {
11,480✔
4431
        // If the db policy was missing an expected optional field, we return
4432
        // nil as if the policy was unknown.
4433
        case errors.Is(err, ErrEdgePolicyOptionalFieldNotFound):
2✔
4434
                return nil, nil
2✔
4435

4436
        case err != nil:
×
4437
                return nil, err
×
4438
        }
4439

4440
        return ep, nil
11,478✔
4441
}
4442

4443
func fetchChanEdgePolicies(edgeIndex kvdb.RBucket, edges kvdb.RBucket,
4444
        chanID []byte) (*models.ChannelEdgePolicy, *models.ChannelEdgePolicy,
4445
        error) {
2,890✔
4446

2,890✔
4447
        edgeInfo := edgeIndex.Get(chanID)
2,890✔
4448
        if edgeInfo == nil {
2,890✔
4449
                return nil, nil, fmt.Errorf("%w: chanID=%x", ErrEdgeNotFound,
×
4450
                        chanID)
×
4451
        }
×
4452

4453
        // The first node is contained within the first half of the edge
4454
        // information. We only propagate the error here and below if it's
4455
        // something other than edge non-existence.
4456
        node1Pub := edgeInfo[:33]
2,890✔
4457
        edge1, err := fetchChanEdgePolicy(edges, chanID, node1Pub)
2,890✔
4458
        if err != nil {
2,890✔
4459
                return nil, nil, fmt.Errorf("%w: node1Pub=%x", ErrEdgeNotFound,
×
4460
                        node1Pub)
×
4461
        }
×
4462

4463
        // Similarly, the second node is contained within the latter
4464
        // half of the edge information.
4465
        node2Pub := edgeInfo[33:66]
2,890✔
4466
        edge2, err := fetchChanEdgePolicy(edges, chanID, node2Pub)
2,890✔
4467
        if err != nil {
2,890✔
4468
                return nil, nil, fmt.Errorf("%w: node2Pub=%x", ErrEdgeNotFound,
×
4469
                        node2Pub)
×
4470
        }
×
4471

4472
        return edge1, edge2, nil
2,890✔
4473
}
4474

4475
func serializeChanEdgePolicy(w io.Writer, edge *models.ChannelEdgePolicy,
4476
        to []byte) error {
2,667✔
4477

2,667✔
4478
        err := wire.WriteVarBytes(w, 0, edge.SigBytes)
2,667✔
4479
        if err != nil {
2,667✔
4480
                return err
×
4481
        }
×
4482

4483
        if err := binary.Write(w, byteOrder, edge.ChannelID); err != nil {
2,667✔
4484
                return err
×
4485
        }
×
4486

4487
        var scratch [8]byte
2,667✔
4488
        updateUnix := uint64(edge.LastUpdate.Unix())
2,667✔
4489
        byteOrder.PutUint64(scratch[:], updateUnix)
2,667✔
4490
        if _, err := w.Write(scratch[:]); err != nil {
2,667✔
4491
                return err
×
4492
        }
×
4493

4494
        if err := binary.Write(w, byteOrder, edge.MessageFlags); err != nil {
2,667✔
4495
                return err
×
4496
        }
×
4497
        if err := binary.Write(w, byteOrder, edge.ChannelFlags); err != nil {
2,667✔
4498
                return err
×
4499
        }
×
4500
        if err := binary.Write(w, byteOrder, edge.TimeLockDelta); err != nil {
2,667✔
4501
                return err
×
4502
        }
×
4503
        if err := binary.Write(w, byteOrder, uint64(edge.MinHTLC)); err != nil {
2,667✔
4504
                return err
×
4505
        }
×
4506
        err = binary.Write(w, byteOrder, uint64(edge.FeeBaseMSat))
2,667✔
4507
        if err != nil {
2,667✔
4508
                return err
×
4509
        }
×
4510
        err = binary.Write(
2,667✔
4511
                w, byteOrder, uint64(edge.FeeProportionalMillionths),
2,667✔
4512
        )
2,667✔
4513
        if err != nil {
2,667✔
4514
                return err
×
4515
        }
×
4516

4517
        if _, err := w.Write(to); err != nil {
2,667✔
4518
                return err
×
4519
        }
×
4520

4521
        // If the max_htlc field is present, we write it. To be compatible with
4522
        // older versions that wasn't aware of this field, we write it as part
4523
        // of the opaque data.
4524
        // TODO(halseth): clean up when moving to TLV.
4525
        var opaqueBuf bytes.Buffer
2,667✔
4526
        if edge.MessageFlags.HasMaxHtlc() {
4,950✔
4527
                err := binary.Write(&opaqueBuf, byteOrder, uint64(edge.MaxHTLC))
2,283✔
4528
                if err != nil {
2,283✔
4529
                        return err
×
4530
                }
×
4531
        }
4532

4533
        if len(edge.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
2,667✔
4534
                return ErrTooManyExtraOpaqueBytes(len(edge.ExtraOpaqueData))
×
4535
        }
×
4536
        if _, err := opaqueBuf.Write(edge.ExtraOpaqueData); err != nil {
2,667✔
4537
                return err
×
4538
        }
×
4539

4540
        if err := wire.WriteVarBytes(w, 0, opaqueBuf.Bytes()); err != nil {
2,667✔
4541
                return err
×
4542
        }
×
4543

4544
        return nil
2,667✔
4545
}
4546

4547
func deserializeChanEdgePolicy(r io.Reader) (*models.ChannelEdgePolicy, error) {
11,505✔
4548
        // Deserialize the policy. Note that in case an optional field is not
11,505✔
4549
        // found, both an error and a populated policy object are returned.
11,505✔
4550
        edge, deserializeErr := deserializeChanEdgePolicyRaw(r)
11,505✔
4551
        if deserializeErr != nil &&
11,505✔
4552
                !errors.Is(deserializeErr, ErrEdgePolicyOptionalFieldNotFound) {
11,505✔
4553

×
4554
                return nil, deserializeErr
×
4555
        }
×
4556

4557
        return edge, deserializeErr
11,505✔
4558
}
4559

4560
func deserializeChanEdgePolicyRaw(r io.Reader) (*models.ChannelEdgePolicy,
4561
        error) {
12,512✔
4562

12,512✔
4563
        edge := &models.ChannelEdgePolicy{}
12,512✔
4564

12,512✔
4565
        var err error
12,512✔
4566
        edge.SigBytes, err = wire.ReadVarBytes(r, 0, 80, "sig")
12,512✔
4567
        if err != nil {
12,512✔
4568
                return nil, err
×
4569
        }
×
4570

4571
        if err := binary.Read(r, byteOrder, &edge.ChannelID); err != nil {
12,512✔
4572
                return nil, err
×
4573
        }
×
4574

4575
        var scratch [8]byte
12,512✔
4576
        if _, err := r.Read(scratch[:]); err != nil {
12,512✔
4577
                return nil, err
×
4578
        }
×
4579
        unix := int64(byteOrder.Uint64(scratch[:]))
12,512✔
4580
        edge.LastUpdate = time.Unix(unix, 0)
12,512✔
4581

12,512✔
4582
        if err := binary.Read(r, byteOrder, &edge.MessageFlags); err != nil {
12,512✔
4583
                return nil, err
×
4584
        }
×
4585
        if err := binary.Read(r, byteOrder, &edge.ChannelFlags); err != nil {
12,512✔
4586
                return nil, err
×
4587
        }
×
4588
        if err := binary.Read(r, byteOrder, &edge.TimeLockDelta); err != nil {
12,512✔
4589
                return nil, err
×
4590
        }
×
4591

4592
        var n uint64
12,512✔
4593
        if err := binary.Read(r, byteOrder, &n); err != nil {
12,512✔
4594
                return nil, err
×
4595
        }
×
4596
        edge.MinHTLC = lnwire.MilliSatoshi(n)
12,512✔
4597

12,512✔
4598
        if err := binary.Read(r, byteOrder, &n); err != nil {
12,512✔
4599
                return nil, err
×
4600
        }
×
4601
        edge.FeeBaseMSat = lnwire.MilliSatoshi(n)
12,512✔
4602

12,512✔
4603
        if err := binary.Read(r, byteOrder, &n); err != nil {
12,512✔
4604
                return nil, err
×
4605
        }
×
4606
        edge.FeeProportionalMillionths = lnwire.MilliSatoshi(n)
12,512✔
4607

12,512✔
4608
        if _, err := r.Read(edge.ToNode[:]); err != nil {
12,512✔
4609
                return nil, err
×
4610
        }
×
4611

4612
        // We'll try and see if there are any opaque bytes left, if not, then
4613
        // we'll ignore the EOF error and return the edge as is.
4614
        edge.ExtraOpaqueData, err = wire.ReadVarBytes(
12,512✔
4615
                r, 0, MaxAllowedExtraOpaqueBytes, "blob",
12,512✔
4616
        )
12,512✔
4617
        switch {
12,512✔
4618
        case errors.Is(err, io.ErrUnexpectedEOF):
×
4619
        case errors.Is(err, io.EOF):
4✔
4620
        case err != nil:
×
4621
                return nil, err
×
4622
        }
4623

4624
        // See if optional fields are present.
4625
        if edge.MessageFlags.HasMaxHtlc() {
24,083✔
4626
                // The max_htlc field should be at the beginning of the opaque
11,571✔
4627
                // bytes.
11,571✔
4628
                opq := edge.ExtraOpaqueData
11,571✔
4629

11,571✔
4630
                // If the max_htlc field is not present, it might be old data
11,571✔
4631
                // stored before this field was validated. We'll return the
11,571✔
4632
                // edge along with an error.
11,571✔
4633
                if len(opq) < 8 {
11,575✔
4634
                        return edge, ErrEdgePolicyOptionalFieldNotFound
4✔
4635
                }
4✔
4636

4637
                maxHtlc := byteOrder.Uint64(opq[:8])
11,567✔
4638
                edge.MaxHTLC = lnwire.MilliSatoshi(maxHtlc)
11,567✔
4639

11,567✔
4640
                // Exclude the parsed field from the rest of the opaque data.
11,567✔
4641
                edge.ExtraOpaqueData = opq[8:]
11,567✔
4642
        }
4643

4644
        return edge, nil
12,508✔
4645
}
4646

4647
// chanGraphNodeTx is an implementation of the NodeRTx interface backed by the
4648
// KVStore and a kvdb.RTx.
4649
type chanGraphNodeTx struct {
4650
        tx   kvdb.RTx
4651
        db   *KVStore
4652
        node *models.LightningNode
4653
}
4654

4655
// A compile-time constraint to ensure chanGraphNodeTx implements the NodeRTx
4656
// interface.
4657
var _ NodeRTx = (*chanGraphNodeTx)(nil)
4658

4659
func newChanGraphNodeTx(tx kvdb.RTx, db *KVStore,
4660
        node *models.LightningNode) *chanGraphNodeTx {
4,105✔
4661

4,105✔
4662
        return &chanGraphNodeTx{
4,105✔
4663
                tx:   tx,
4,105✔
4664
                db:   db,
4,105✔
4665
                node: node,
4,105✔
4666
        }
4,105✔
4667
}
4,105✔
4668

4669
// Node returns the raw information of the node.
4670
//
4671
// NOTE: This is a part of the NodeRTx interface.
4672
func (c *chanGraphNodeTx) Node() *models.LightningNode {
5,022✔
4673
        return c.node
5,022✔
4674
}
5,022✔
4675

4676
// FetchNode fetches the node with the given pub key under the same transaction
4677
// used to fetch the current node. The returned node is also a NodeRTx and any
4678
// operations on that NodeRTx will also be done under the same transaction.
4679
//
4680
// NOTE: This is a part of the NodeRTx interface.
4681
func (c *chanGraphNodeTx) FetchNode(nodePub route.Vertex) (NodeRTx, error) {
2,944✔
4682
        node, err := c.db.FetchLightningNodeTx(c.tx, nodePub)
2,944✔
4683
        if err != nil {
2,944✔
4684
                return nil, err
×
4685
        }
×
4686

4687
        return newChanGraphNodeTx(c.tx, c.db, node), nil
2,944✔
4688
}
4689

4690
// ForEachChannel can be used to iterate over the node's channels under
4691
// the same transaction used to fetch the node.
4692
//
4693
// NOTE: This is a part of the NodeRTx interface.
4694
func (c *chanGraphNodeTx) ForEachChannel(f func(*models.ChannelEdgeInfo,
4695
        *models.ChannelEdgePolicy, *models.ChannelEdgePolicy) error) error {
965✔
4696

965✔
4697
        return c.db.forEachNodeChannelTx(c.tx, c.node.PubKeyBytes,
965✔
4698
                func(_ kvdb.RTx, info *models.ChannelEdgeInfo, policy1,
965✔
4699
                        policy2 *models.ChannelEdgePolicy) error {
3,909✔
4700

2,944✔
4701
                        return f(info, policy1, policy2)
2,944✔
4702
                },
2,944✔
4703
        )
4704
}
4705

4706
// MakeTestGraph creates a new instance of the KVStore for testing
4707
// purposes.
4708
func MakeTestGraph(t testing.TB, modifiers ...KVStoreOptionModifier) (
4709
        *ChannelGraph, error) {
36✔
4710

36✔
4711
        opts := DefaultOptions()
36✔
4712
        for _, modifier := range modifiers {
36✔
4713
                modifier(opts)
×
4714
        }
×
4715

4716
        // Next, create KVStore for the first time.
4717
        backend, backendCleanup, err := kvdb.GetTestBackend(t.TempDir(), "cgr")
36✔
4718
        if err != nil {
36✔
4719
                backendCleanup()
×
4720

×
4721
                return nil, err
×
4722
        }
×
4723

4724
        graph, err := NewChannelGraph(&Config{
36✔
4725
                KVDB:        backend,
36✔
4726
                KVStoreOpts: modifiers,
36✔
4727
        })
36✔
4728
        if err != nil {
36✔
4729
                backendCleanup()
×
4730

×
4731
                return nil, err
×
4732
        }
×
4733
        require.NoError(t, graph.Start())
36✔
4734

36✔
4735
        t.Cleanup(func() {
72✔
4736
                _ = backend.Close()
36✔
4737
                backendCleanup()
36✔
4738
                require.NoError(t, graph.Stop())
36✔
4739
        })
36✔
4740

4741
        return graph, nil
36✔
4742
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc