• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 15959600311

29 Jun 2025 09:33PM UTC coverage: 67.577% (-0.03%) from 67.606%
15959600311

Pull #8825

github

web-flow
Merge b3542eca4 into 6290edf14
Pull Request #8825: lnd: use persisted node announcement settings across restarts

44 of 49 new or added lines in 1 file covered. (89.8%)

92 existing lines in 17 files now uncovered.

135081 of 199891 relevant lines covered (67.58%)

21854.87 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

78.12
/graph/db/kv_store.go
1
package graphdb
2

3
import (
4
        "bytes"
5
        "context"
6
        "crypto/sha256"
7
        "encoding/binary"
8
        "errors"
9
        "fmt"
10
        "io"
11
        "math"
12
        "net"
13
        "sort"
14
        "sync"
15
        "time"
16

17
        "github.com/btcsuite/btcd/btcec/v2"
18
        "github.com/btcsuite/btcd/chaincfg/chainhash"
19
        "github.com/btcsuite/btcd/txscript"
20
        "github.com/btcsuite/btcd/wire"
21
        "github.com/btcsuite/btcwallet/walletdb"
22
        "github.com/lightningnetwork/lnd/aliasmgr"
23
        "github.com/lightningnetwork/lnd/batch"
24
        "github.com/lightningnetwork/lnd/fn/v2"
25
        "github.com/lightningnetwork/lnd/graph/db/models"
26
        "github.com/lightningnetwork/lnd/input"
27
        "github.com/lightningnetwork/lnd/kvdb"
28
        "github.com/lightningnetwork/lnd/lnwire"
29
        "github.com/lightningnetwork/lnd/routing/route"
30
)
31

32
var (
33
        // nodeBucket is a bucket which houses all the vertices or nodes within
34
        // the channel graph. This bucket has a single-sub bucket which adds an
35
        // additional index from pubkey -> alias. Within the top-level of this
36
        // bucket, the key space maps a node's compressed public key to the
37
        // serialized information for that node. Additionally, there's a
38
        // special key "source" which stores the pubkey of the source node. The
39
        // source node is used as the starting point for all graph/queries and
40
        // traversals. The graph is formed as a star-graph with the source node
41
        // at the center.
42
        //
43
        // maps: pubKey -> nodeInfo
44
        // maps: source -> selfPubKey
45
        nodeBucket = []byte("graph-node")
46

47
        // nodeUpdateIndexBucket is a sub-bucket of the nodeBucket. This bucket
48
        // will be used to quickly look up the "freshness" of a node's last
49
        // update to the network. The bucket only contains keys, and no values,
50
        // it's mapping:
51
        //
52
        // maps: updateTime || nodeID -> nil
53
        nodeUpdateIndexBucket = []byte("graph-node-update-index")
54

55
        // sourceKey is a special key that resides within the nodeBucket. The
56
        // sourceKey maps a key to the public key of the "self node".
57
        sourceKey = []byte("source")
58

59
        // aliasIndexBucket is a sub-bucket that's nested within the main
60
        // nodeBucket. This bucket maps the public key of a node to its
61
        // current alias. This bucket is provided as it can be used within a
62
        // future UI layer to add an additional degree of confirmation.
63
        aliasIndexBucket = []byte("alias")
64

65
        // edgeBucket is a bucket which houses all of the edge or channel
66
        // information within the channel graph. This bucket essentially acts
67
        // as an adjacency list, which in conjunction with a range scan, can be
68
        // used to iterate over all the incoming and outgoing edges for a
69
        // particular node. Key in the bucket use a prefix scheme which leads
70
        // with the node's public key and sends with the compact edge ID.
71
        // For each chanID, there will be two entries within the bucket, as the
72
        // graph is directed: nodes may have different policies w.r.t to fees
73
        // for their respective directions.
74
        //
75
        // maps: pubKey || chanID -> channel edge policy for node
76
        edgeBucket = []byte("graph-edge")
77

78
        // unknownPolicy is represented as an empty slice. It is
79
        // used as the value in edgeBucket for unknown channel edge policies.
80
        // Unknown policies are still stored in the database to enable efficient
81
        // lookup of incoming channel edges.
82
        unknownPolicy = []byte{}
83

84
        // chanStart is an array of all zero bytes which is used to perform
85
        // range scans within the edgeBucket to obtain all of the outgoing
86
        // edges for a particular node.
87
        chanStart [8]byte
88

89
        // edgeIndexBucket is an index which can be used to iterate all edges
90
        // in the bucket, grouping them according to their in/out nodes.
91
        // Additionally, the items in this bucket also contain the complete
92
        // edge information for a channel. The edge information includes the
93
        // capacity of the channel, the nodes that made the channel, etc. This
94
        // bucket resides within the edgeBucket above. Creation of an edge
95
        // proceeds in two phases: first the edge is added to the edge index,
96
        // afterwards the edgeBucket can be updated with the latest details of
97
        // the edge as they are announced on the network.
98
        //
99
        // maps: chanID -> pubKey1 || pubKey2 || restofEdgeInfo
100
        edgeIndexBucket = []byte("edge-index")
101

102
        // edgeUpdateIndexBucket is a sub-bucket of the main edgeBucket. This
103
        // bucket contains an index which allows us to gauge the "freshness" of
104
        // a channel's last updates.
105
        //
106
        // maps: updateTime || chanID -> nil
107
        edgeUpdateIndexBucket = []byte("edge-update-index")
108

109
        // channelPointBucket maps a channel's full outpoint (txid:index) to
110
        // its short 8-byte channel ID. This bucket resides within the
111
        // edgeBucket above, and can be used to quickly remove an edge due to
112
        // the outpoint being spent, or to query for existence of a channel.
113
        //
114
        // maps: outPoint -> chanID
115
        channelPointBucket = []byte("chan-index")
116

117
        // zombieBucket is a sub-bucket of the main edgeBucket bucket
118
        // responsible for maintaining an index of zombie channels. Each entry
119
        // exists within the bucket as follows:
120
        //
121
        // maps: chanID -> pubKey1 || pubKey2
122
        //
123
        // The chanID represents the channel ID of the edge that is marked as a
124
        // zombie and is used as the key, which maps to the public keys of the
125
        // edge's participants.
126
        zombieBucket = []byte("zombie-index")
127

128
        // disabledEdgePolicyBucket is a sub-bucket of the main edgeBucket
129
        // bucket responsible for maintaining an index of disabled edge
130
        // policies. Each entry exists within the bucket as follows:
131
        //
132
        // maps: <chanID><direction> -> []byte{}
133
        //
134
        // The chanID represents the channel ID of the edge and the direction is
135
        // one byte representing the direction of the edge. The main purpose of
136
        // this index is to allow pruning disabled channels in a fast way
137
        // without the need to iterate all over the graph.
138
        disabledEdgePolicyBucket = []byte("disabled-edge-policy-index")
139

140
        // graphMetaBucket is a top-level bucket which stores various meta-deta
141
        // related to the on-disk channel graph. Data stored in this bucket
142
        // includes the block to which the graph has been synced to, the total
143
        // number of channels, etc.
144
        graphMetaBucket = []byte("graph-meta")
145

146
        // pruneLogBucket is a bucket within the graphMetaBucket that stores
147
        // a mapping from the block height to the hash for the blocks used to
148
        // prune the graph.
149
        // Once a new block is discovered, any channels that have been closed
150
        // (by spending the outpoint) can safely be removed from the graph, and
151
        // the block is added to the prune log. We need to keep such a log for
152
        // the case where a reorg happens, and we must "rewind" the state of the
153
        // graph by removing channels that were previously confirmed. In such a
154
        // case we'll remove all entries from the prune log with a block height
155
        // that no longer exists.
156
        pruneLogBucket = []byte("prune-log")
157

158
        // closedScidBucket is a top-level bucket that stores scids for
159
        // channels that we know to be closed. This is used so that we don't
160
        // need to perform expensive validation checks if we receive a channel
161
        // announcement for the channel again.
162
        //
163
        // maps: scid -> []byte{}
164
        closedScidBucket = []byte("closed-scid")
165
)
166

167
const (
168
        // MaxAllowedExtraOpaqueBytes is the largest amount of opaque bytes that
169
        // we'll permit to be written to disk. We limit this as otherwise, it
170
        // would be possible for a node to create a ton of updates and slowly
171
        // fill our disk, and also waste bandwidth due to relaying.
172
        MaxAllowedExtraOpaqueBytes = 10000
173
)
174

175
// KVStore is a persistent, on-disk graph representation of the Lightning
176
// Network. This struct can be used to implement path finding algorithms on top
177
// of, and also to update a node's view based on information received from the
178
// p2p network. Internally, the graph is stored using a modified adjacency list
179
// representation with some added object interaction possible with each
180
// serialized edge/node. The graph is stored is directed, meaning that are two
181
// edges stored for each channel: an inbound/outbound edge for each node pair.
182
// Nodes, edges, and edge information can all be added to the graph
183
// independently. Edge removal results in the deletion of all edge information
184
// for that edge.
185
type KVStore struct {
186
        db kvdb.Backend
187

188
        // cacheMu guards all caches (rejectCache and chanCache). If
189
        // this mutex will be acquired at the same time as the DB mutex then
190
        // the cacheMu MUST be acquired first to prevent deadlock.
191
        cacheMu     sync.RWMutex
192
        rejectCache *rejectCache
193
        chanCache   *channelCache
194

195
        chanScheduler batch.Scheduler[kvdb.RwTx]
196
        nodeScheduler batch.Scheduler[kvdb.RwTx]
197
}
198

199
// A compile-time assertion to ensure that the KVStore struct implements the
200
// V1Store interface.
201
var _ V1Store = (*KVStore)(nil)
202

203
// NewKVStore allocates a new KVStore backed by a DB instance. The
204
// returned instance has its own unique reject cache and channel cache.
205
func NewKVStore(db kvdb.Backend, options ...StoreOptionModifier) (*KVStore,
206
        error) {
172✔
207

172✔
208
        opts := DefaultOptions()
172✔
209
        for _, o := range options {
175✔
210
                o(opts)
3✔
211
        }
3✔
212

213
        if !opts.NoMigration {
344✔
214
                if err := initKVStore(db); err != nil {
172✔
215
                        return nil, err
×
216
                }
×
217
        }
218

219
        g := &KVStore{
172✔
220
                db:          db,
172✔
221
                rejectCache: newRejectCache(opts.RejectCacheSize),
172✔
222
                chanCache:   newChannelCache(opts.ChannelCacheSize),
172✔
223
        }
172✔
224
        g.chanScheduler = batch.NewTimeScheduler(
172✔
225
                batch.NewBoltBackend[kvdb.RwTx](db), &g.cacheMu,
172✔
226
                opts.BatchCommitInterval,
172✔
227
        )
172✔
228
        g.nodeScheduler = batch.NewTimeScheduler(
172✔
229
                batch.NewBoltBackend[kvdb.RwTx](db), nil,
172✔
230
                opts.BatchCommitInterval,
172✔
231
        )
172✔
232

172✔
233
        return g, nil
172✔
234
}
235

236
// channelMapKey is the key structure used for storing channel edge policies.
237
type channelMapKey struct {
238
        nodeKey route.Vertex
239
        chanID  [8]byte
240
}
241

242
// String returns a human-readable representation of the key.
243
func (c channelMapKey) String() string {
×
244
        return fmt.Sprintf("node=%v, chanID=%x", c.nodeKey, c.chanID)
×
245
}
×
246

247
// getChannelMap loads all channel edge policies from the database and stores
248
// them in a map.
249
func (c *KVStore) getChannelMap(edges kvdb.RBucket) (
250
        map[channelMapKey]*models.ChannelEdgePolicy, error) {
144✔
251

144✔
252
        // Create a map to store all channel edge policies.
144✔
253
        channelMap := make(map[channelMapKey]*models.ChannelEdgePolicy)
144✔
254

144✔
255
        err := kvdb.ForAll(edges, func(k, edgeBytes []byte) error {
1,706✔
256
                // Skip embedded buckets.
1,562✔
257
                if bytes.Equal(k, edgeIndexBucket) ||
1,562✔
258
                        bytes.Equal(k, edgeUpdateIndexBucket) ||
1,562✔
259
                        bytes.Equal(k, zombieBucket) ||
1,562✔
260
                        bytes.Equal(k, disabledEdgePolicyBucket) ||
1,562✔
261
                        bytes.Equal(k, channelPointBucket) {
2,134✔
262

572✔
263
                        return nil
572✔
264
                }
572✔
265

266
                // Validate key length.
267
                if len(k) != 33+8 {
993✔
268
                        return fmt.Errorf("invalid edge key %x encountered", k)
×
269
                }
×
270

271
                var key channelMapKey
993✔
272
                copy(key.nodeKey[:], k[:33])
993✔
273
                copy(key.chanID[:], k[33:])
993✔
274

993✔
275
                // No need to deserialize unknown policy.
993✔
276
                if bytes.Equal(edgeBytes, unknownPolicy) {
993✔
277
                        return nil
×
278
                }
×
279

280
                edgeReader := bytes.NewReader(edgeBytes)
993✔
281
                edge, err := deserializeChanEdgePolicyRaw(
993✔
282
                        edgeReader,
993✔
283
                )
993✔
284

993✔
285
                switch {
993✔
286
                // If the db policy was missing an expected optional field, we
287
                // return nil as if the policy was unknown.
288
                case errors.Is(err, ErrEdgePolicyOptionalFieldNotFound):
×
289
                        return nil
×
290

291
                // We don't want a single policy with bad TLV data to stop us
292
                // from loading the rest of the data, so we just skip this
293
                // policy. This is for backwards compatibility since we did not
294
                // use to validate TLV data in the past before persisting it.
295
                case errors.Is(err, ErrParsingExtraTLVBytes):
×
296
                        return nil
×
297

298
                case err != nil:
×
299
                        return err
×
300
                }
301

302
                channelMap[key] = edge
993✔
303

993✔
304
                return nil
993✔
305
        })
306
        if err != nil {
144✔
307
                return nil, err
×
308
        }
×
309

310
        return channelMap, nil
144✔
311
}
312

313
var graphTopLevelBuckets = [][]byte{
314
        nodeBucket,
315
        edgeBucket,
316
        graphMetaBucket,
317
        closedScidBucket,
318
}
319

320
// createChannelDB creates and initializes a fresh version of  In
321
// the case that the target path has not yet been created or doesn't yet exist,
322
// then the path is created. Additionally, all required top-level buckets used
323
// within the database are created.
324
func initKVStore(db kvdb.Backend) error {
172✔
325
        err := kvdb.Update(db, func(tx kvdb.RwTx) error {
344✔
326
                for _, tlb := range graphTopLevelBuckets {
851✔
327
                        if _, err := tx.CreateTopLevelBucket(tlb); err != nil {
679✔
328
                                return err
×
329
                        }
×
330
                }
331

332
                nodes := tx.ReadWriteBucket(nodeBucket)
172✔
333
                _, err := nodes.CreateBucketIfNotExists(aliasIndexBucket)
172✔
334
                if err != nil {
172✔
335
                        return err
×
336
                }
×
337
                _, err = nodes.CreateBucketIfNotExists(nodeUpdateIndexBucket)
172✔
338
                if err != nil {
172✔
339
                        return err
×
340
                }
×
341

342
                edges := tx.ReadWriteBucket(edgeBucket)
172✔
343
                _, err = edges.CreateBucketIfNotExists(edgeIndexBucket)
172✔
344
                if err != nil {
172✔
345
                        return err
×
346
                }
×
347
                _, err = edges.CreateBucketIfNotExists(edgeUpdateIndexBucket)
172✔
348
                if err != nil {
172✔
349
                        return err
×
350
                }
×
351
                _, err = edges.CreateBucketIfNotExists(channelPointBucket)
172✔
352
                if err != nil {
172✔
353
                        return err
×
354
                }
×
355
                _, err = edges.CreateBucketIfNotExists(zombieBucket)
172✔
356
                if err != nil {
172✔
357
                        return err
×
358
                }
×
359

360
                graphMeta := tx.ReadWriteBucket(graphMetaBucket)
172✔
361
                _, err = graphMeta.CreateBucketIfNotExists(pruneLogBucket)
172✔
362

172✔
363
                return err
172✔
364
        }, func() {})
172✔
365
        if err != nil {
172✔
366
                return fmt.Errorf("unable to create new channel graph: %w", err)
×
367
        }
×
368

369
        return nil
172✔
370
}
371

372
// AddrsForNode returns all known addresses for the target node public key that
373
// the graph DB is aware of. The returned boolean indicates if the given node is
374
// unknown to the graph DB or not.
375
//
376
// NOTE: this is part of the channeldb.AddrSource interface.
377
func (c *KVStore) AddrsForNode(ctx context.Context,
378
        nodePub *btcec.PublicKey) (bool, []net.Addr, error) {
6✔
379

6✔
380
        pubKey, err := route.NewVertexFromBytes(nodePub.SerializeCompressed())
6✔
381
        if err != nil {
6✔
382
                return false, nil, err
×
383
        }
×
384

385
        node, err := c.FetchLightningNode(ctx, pubKey)
6✔
386
        // We don't consider it an error if the graph is unaware of the node.
6✔
387
        switch {
6✔
388
        case err != nil && !errors.Is(err, ErrGraphNodeNotFound):
×
389
                return false, nil, err
×
390

391
        case errors.Is(err, ErrGraphNodeNotFound):
4✔
392
                return false, nil, nil
4✔
393
        }
394

395
        return true, node.Addresses, nil
5✔
396
}
397

398
// ForEachChannel iterates through all the channel edges stored within the
399
// graph and invokes the passed callback for each edge. The callback takes two
400
// edges as since this is a directed graph, both the in/out edges are visited.
401
// If the callback returns an error, then the transaction is aborted and the
402
// iteration stops early.
403
//
404
// NOTE: If an edge can't be found, or wasn't advertised, then a nil pointer
405
// for that particular channel edge routing policy will be passed into the
406
// callback.
407
func (c *KVStore) ForEachChannel(cb func(*models.ChannelEdgeInfo,
408
        *models.ChannelEdgePolicy, *models.ChannelEdgePolicy) error) error {
7✔
409

7✔
410
        return c.db.View(func(tx kvdb.RTx) error {
14✔
411
                edges := tx.ReadBucket(edgeBucket)
7✔
412
                if edges == nil {
7✔
413
                        return ErrGraphNoEdgesFound
×
414
                }
×
415

416
                // First, load all edges in memory indexed by node and channel
417
                // id.
418
                channelMap, err := c.getChannelMap(edges)
7✔
419
                if err != nil {
7✔
420
                        return err
×
421
                }
×
422

423
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
7✔
424
                if edgeIndex == nil {
7✔
425
                        return ErrGraphNoEdgesFound
×
426
                }
×
427

428
                // Load edge index, recombine each channel with the policies
429
                // loaded above and invoke the callback.
430
                return kvdb.ForAll(
7✔
431
                        edgeIndex, func(k, edgeInfoBytes []byte) error {
109✔
432
                                var chanID [8]byte
102✔
433
                                copy(chanID[:], k)
102✔
434

102✔
435
                                edgeInfoReader := bytes.NewReader(edgeInfoBytes)
102✔
436
                                info, err := deserializeChanEdgeInfo(
102✔
437
                                        edgeInfoReader,
102✔
438
                                )
102✔
439
                                if err != nil {
102✔
440
                                        return err
×
441
                                }
×
442

443
                                policy1 := channelMap[channelMapKey{
102✔
444
                                        nodeKey: info.NodeKey1Bytes,
102✔
445
                                        chanID:  chanID,
102✔
446
                                }]
102✔
447

102✔
448
                                policy2 := channelMap[channelMapKey{
102✔
449
                                        nodeKey: info.NodeKey2Bytes,
102✔
450
                                        chanID:  chanID,
102✔
451
                                }]
102✔
452

102✔
453
                                return cb(&info, policy1, policy2)
102✔
454
                        },
455
                )
456
        }, func() {})
7✔
457
}
458

459
// ForEachChannelCacheable iterates through all the channel edges stored within
460
// the graph and invokes the passed callback for each edge. The callback takes
461
// two edges as since this is a directed graph, both the in/out edges are
462
// visited. If the callback returns an error, then the transaction is aborted
463
// and the iteration stops early.
464
//
465
// NOTE: If an edge can't be found, or wasn't advertised, then a nil pointer
466
// for that particular channel edge routing policy will be passed into the
467
// callback.
468
//
469
// NOTE: this method is like ForEachChannel but fetches only the data required
470
// for the graph cache.
471
func (c *KVStore) ForEachChannelCacheable(cb func(*models.CachedEdgeInfo,
472
        *models.CachedEdgePolicy, *models.CachedEdgePolicy) error) error {
140✔
473

140✔
474
        return c.db.View(func(tx kvdb.RTx) error {
280✔
475
                edges := tx.ReadBucket(edgeBucket)
140✔
476
                if edges == nil {
140✔
477
                        return ErrGraphNoEdgesFound
×
478
                }
×
479

480
                // First, load all edges in memory indexed by node and channel
481
                // id.
482
                channelMap, err := c.getChannelMap(edges)
140✔
483
                if err != nil {
140✔
484
                        return err
×
485
                }
×
486

487
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
140✔
488
                if edgeIndex == nil {
140✔
489
                        return ErrGraphNoEdgesFound
×
490
                }
×
491

492
                // Load edge index, recombine each channel with the policies
493
                // loaded above and invoke the callback.
494
                return kvdb.ForAll(
140✔
495
                        edgeIndex, func(k, edgeInfoBytes []byte) error {
539✔
496
                                var chanID [8]byte
399✔
497
                                copy(chanID[:], k)
399✔
498

399✔
499
                                edgeInfoReader := bytes.NewReader(edgeInfoBytes)
399✔
500
                                info, err := deserializeChanEdgeInfo(
399✔
501
                                        edgeInfoReader,
399✔
502
                                )
399✔
503
                                if err != nil {
399✔
504
                                        return err
×
505
                                }
×
506

507
                                key1 := channelMapKey{
399✔
508
                                        nodeKey: info.NodeKey1Bytes,
399✔
509
                                        chanID:  chanID,
399✔
510
                                }
399✔
511
                                policy1 := channelMap[key1]
399✔
512

399✔
513
                                key2 := channelMapKey{
399✔
514
                                        nodeKey: info.NodeKey2Bytes,
399✔
515
                                        chanID:  chanID,
399✔
516
                                }
399✔
517
                                policy2 := channelMap[key2]
399✔
518

399✔
519
                                // We now create the cached edge policies, but
399✔
520
                                // only when the above policies are found in the
399✔
521
                                // `channelMap`.
399✔
522
                                var (
399✔
523
                                        cachedPolicy1 *models.CachedEdgePolicy
399✔
524
                                        cachedPolicy2 *models.CachedEdgePolicy
399✔
525
                                )
399✔
526

399✔
527
                                if policy1 != nil {
798✔
528
                                        cachedPolicy1 = models.NewCachedPolicy(
399✔
529
                                                policy1,
399✔
530
                                        )
399✔
531
                                }
399✔
532

533
                                if policy2 != nil {
798✔
534
                                        cachedPolicy2 = models.NewCachedPolicy(
399✔
535
                                                policy2,
399✔
536
                                        )
399✔
537
                                }
399✔
538

539
                                return cb(
399✔
540
                                        models.NewCachedEdge(&info),
399✔
541
                                        cachedPolicy1, cachedPolicy2,
399✔
542
                                )
399✔
543
                        },
544
                )
545
        }, func() {})
140✔
546
}
547

548
// forEachNodeDirectedChannel iterates through all channels of a given node,
549
// executing the passed callback on the directed edge representing the channel
550
// and its incoming policy. If the callback returns an error, then the iteration
551
// is halted with the error propagated back up to the caller. An optional read
552
// transaction may be provided. If none is provided, a new one will be created.
553
//
554
// Unknown policies are passed into the callback as nil values.
555
func (c *KVStore) forEachNodeDirectedChannel(tx kvdb.RTx,
556
        node route.Vertex, cb func(channel *DirectedChannel) error) error {
265✔
557

265✔
558
        // Fallback that uses the database.
265✔
559
        toNodeCallback := func() route.Vertex {
400✔
560
                return node
135✔
561
        }
135✔
562
        toNodeFeatures, err := c.fetchNodeFeatures(tx, node)
265✔
563
        if err != nil {
265✔
564
                return err
×
565
        }
×
566

567
        dbCallback := func(tx kvdb.RTx, e *models.ChannelEdgeInfo, p1,
265✔
568
                p2 *models.ChannelEdgePolicy) error {
954✔
569

689✔
570
                var cachedInPolicy *models.CachedEdgePolicy
689✔
571
                if p2 != nil {
1,375✔
572
                        cachedInPolicy = models.NewCachedPolicy(p2)
686✔
573
                        cachedInPolicy.ToNodePubKey = toNodeCallback
686✔
574
                        cachedInPolicy.ToNodeFeatures = toNodeFeatures
686✔
575
                }
686✔
576

577
                directedChannel := &DirectedChannel{
689✔
578
                        ChannelID:    e.ChannelID,
689✔
579
                        IsNode1:      node == e.NodeKey1Bytes,
689✔
580
                        OtherNode:    e.NodeKey2Bytes,
689✔
581
                        Capacity:     e.Capacity,
689✔
582
                        OutPolicySet: p1 != nil,
689✔
583
                        InPolicy:     cachedInPolicy,
689✔
584
                }
689✔
585

689✔
586
                if p1 != nil {
1,377✔
587
                        p1.InboundFee.WhenSome(func(fee lnwire.Fee) {
1,024✔
588
                                directedChannel.InboundFee = fee
336✔
589
                        })
336✔
590
                }
591

592
                if node == e.NodeKey2Bytes {
1,035✔
593
                        directedChannel.OtherNode = e.NodeKey1Bytes
346✔
594
                }
346✔
595

596
                return cb(directedChannel)
689✔
597
        }
598

599
        return nodeTraversal(tx, node[:], c.db, dbCallback)
265✔
600
}
601

602
// fetchNodeFeatures returns the features of a given node. If no features are
603
// known for the node, an empty feature vector is returned. An optional read
604
// transaction may be provided. If none is provided, a new one will be created.
605
func (c *KVStore) fetchNodeFeatures(tx kvdb.RTx,
606
        node route.Vertex) (*lnwire.FeatureVector, error) {
710✔
607

710✔
608
        // Fallback that uses the database.
710✔
609
        targetNode, err := c.FetchLightningNodeTx(tx, node)
710✔
610
        switch {
710✔
611
        // If the node exists and has features, return them directly.
612
        case err == nil:
699✔
613
                return targetNode.Features, nil
699✔
614

615
        // If we couldn't find a node announcement, populate a blank feature
616
        // vector.
617
        case errors.Is(err, ErrGraphNodeNotFound):
11✔
618
                return lnwire.EmptyFeatureVector(), nil
11✔
619

620
        // Otherwise, bubble the error up.
621
        default:
×
622
                return nil, err
×
623
        }
624
}
625

626
// ForEachNodeDirectedChannel iterates through all channels of a given node,
627
// executing the passed callback on the directed edge representing the channel
628
// and its incoming policy. If the callback returns an error, then the iteration
629
// is halted with the error propagated back up to the caller.
630
//
631
// Unknown policies are passed into the callback as nil values.
632
//
633
// NOTE: this is part of the graphdb.NodeTraverser interface.
634
func (c *KVStore) ForEachNodeDirectedChannel(nodePub route.Vertex,
635
        cb func(channel *DirectedChannel) error) error {
26✔
636

26✔
637
        return c.forEachNodeDirectedChannel(nil, nodePub, cb)
26✔
638
}
26✔
639

640
// FetchNodeFeatures returns the features of the given node. If no features are
641
// known for the node, an empty feature vector is returned.
642
//
643
// NOTE: this is part of the graphdb.NodeTraverser interface.
644
func (c *KVStore) FetchNodeFeatures(nodePub route.Vertex) (
645
        *lnwire.FeatureVector, error) {
4✔
646

4✔
647
        return c.fetchNodeFeatures(nil, nodePub)
4✔
648
}
4✔
649

650
// ForEachNodeCached is similar to forEachNode, but it returns DirectedChannel
651
// data to the call-back.
652
//
653
// NOTE: The callback contents MUST not be modified.
654
func (c *KVStore) ForEachNodeCached(cb func(node route.Vertex,
655
        chans map[uint64]*DirectedChannel) error) error {
1✔
656

1✔
657
        // Otherwise call back to a version that uses the database directly.
1✔
658
        // We'll iterate over each node, then the set of channels for each
1✔
659
        // node, and construct a similar callback functiopn signature as the
1✔
660
        // main funcotin expects.
1✔
661
        return c.forEachNode(func(tx kvdb.RTx,
1✔
662
                node *models.LightningNode) error {
21✔
663

20✔
664
                channels := make(map[uint64]*DirectedChannel)
20✔
665

20✔
666
                err := c.forEachNodeChannelTx(tx, node.PubKeyBytes,
20✔
667
                        func(tx kvdb.RTx, e *models.ChannelEdgeInfo,
20✔
668
                                p1 *models.ChannelEdgePolicy,
20✔
669
                                p2 *models.ChannelEdgePolicy) error {
210✔
670

190✔
671
                                toNodeCallback := func() route.Vertex {
190✔
672
                                        return node.PubKeyBytes
×
673
                                }
×
674
                                toNodeFeatures, err := c.fetchNodeFeatures(
190✔
675
                                        tx, node.PubKeyBytes,
190✔
676
                                )
190✔
677
                                if err != nil {
190✔
678
                                        return err
×
679
                                }
×
680

681
                                var cachedInPolicy *models.CachedEdgePolicy
190✔
682
                                if p2 != nil {
380✔
683
                                        cachedInPolicy =
190✔
684
                                                models.NewCachedPolicy(p2)
190✔
685
                                        cachedInPolicy.ToNodePubKey =
190✔
686
                                                toNodeCallback
190✔
687
                                        cachedInPolicy.ToNodeFeatures =
190✔
688
                                                toNodeFeatures
190✔
689
                                }
190✔
690

691
                                directedChannel := &DirectedChannel{
190✔
692
                                        ChannelID: e.ChannelID,
190✔
693
                                        IsNode1: node.PubKeyBytes ==
190✔
694
                                                e.NodeKey1Bytes,
190✔
695
                                        OtherNode:    e.NodeKey2Bytes,
190✔
696
                                        Capacity:     e.Capacity,
190✔
697
                                        OutPolicySet: p1 != nil,
190✔
698
                                        InPolicy:     cachedInPolicy,
190✔
699
                                }
190✔
700

190✔
701
                                if node.PubKeyBytes == e.NodeKey2Bytes {
285✔
702
                                        directedChannel.OtherNode =
95✔
703
                                                e.NodeKey1Bytes
95✔
704
                                }
95✔
705

706
                                channels[e.ChannelID] = directedChannel
190✔
707

190✔
708
                                return nil
190✔
709
                        })
710
                if err != nil {
20✔
711
                        return err
×
712
                }
×
713

714
                return cb(node.PubKeyBytes, channels)
20✔
715
        })
716
}
717

718
// DisabledChannelIDs returns the channel ids of disabled channels.
719
// A channel is disabled when two of the associated ChanelEdgePolicies
720
// have their disabled bit on.
721
func (c *KVStore) DisabledChannelIDs() ([]uint64, error) {
6✔
722
        var disabledChanIDs []uint64
6✔
723
        var chanEdgeFound map[uint64]struct{}
6✔
724

6✔
725
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
12✔
726
                edges := tx.ReadBucket(edgeBucket)
6✔
727
                if edges == nil {
6✔
728
                        return ErrGraphNoEdgesFound
×
729
                }
×
730

731
                disabledEdgePolicyIndex := edges.NestedReadBucket(
6✔
732
                        disabledEdgePolicyBucket,
6✔
733
                )
6✔
734
                if disabledEdgePolicyIndex == nil {
7✔
735
                        return nil
1✔
736
                }
1✔
737

738
                // We iterate over all disabled policies and we add each channel
739
                // that has more than one disabled policy to disabledChanIDs
740
                // array.
741
                return disabledEdgePolicyIndex.ForEach(
5✔
742
                        func(k, v []byte) error {
16✔
743
                                chanID := byteOrder.Uint64(k[:8])
11✔
744
                                _, edgeFound := chanEdgeFound[chanID]
11✔
745
                                if edgeFound {
15✔
746
                                        delete(chanEdgeFound, chanID)
4✔
747
                                        disabledChanIDs = append(
4✔
748
                                                disabledChanIDs, chanID,
4✔
749
                                        )
4✔
750

4✔
751
                                        return nil
4✔
752
                                }
4✔
753

754
                                chanEdgeFound[chanID] = struct{}{}
7✔
755

7✔
756
                                return nil
7✔
757
                        },
758
                )
759
        }, func() {
6✔
760
                disabledChanIDs = nil
6✔
761
                chanEdgeFound = make(map[uint64]struct{})
6✔
762
        })
6✔
763
        if err != nil {
6✔
764
                return nil, err
×
765
        }
×
766

767
        return disabledChanIDs, nil
6✔
768
}
769

770
// ForEachNode iterates through all the stored vertices/nodes in the graph,
771
// executing the passed callback with each node encountered. If the callback
772
// returns an error, then the transaction is aborted and the iteration stops
773
// early. Any operations performed on the NodeTx passed to the call-back are
774
// executed under the same read transaction and so, methods on the NodeTx object
775
// _MUST_ only be called from within the call-back.
776
func (c *KVStore) ForEachNode(cb func(tx NodeRTx) error) error {
131✔
777
        return c.forEachNode(func(tx kvdb.RTx,
131✔
778
                node *models.LightningNode) error {
1,292✔
779

1,161✔
780
                return cb(newChanGraphNodeTx(tx, c, node))
1,161✔
781
        })
1,161✔
782
}
783

784
// forEachNode iterates through all the stored vertices/nodes in the graph,
785
// executing the passed callback with each node encountered. If the callback
786
// returns an error, then the transaction is aborted and the iteration stops
787
// early.
788
//
789
// TODO(roasbeef): add iterator interface to allow for memory efficient graph
790
// traversal when graph gets mega.
791
func (c *KVStore) forEachNode(
792
        cb func(kvdb.RTx, *models.LightningNode) error) error {
132✔
793

132✔
794
        traversal := func(tx kvdb.RTx) error {
264✔
795
                // First grab the nodes bucket which stores the mapping from
132✔
796
                // pubKey to node information.
132✔
797
                nodes := tx.ReadBucket(nodeBucket)
132✔
798
                if nodes == nil {
132✔
799
                        return ErrGraphNotFound
×
800
                }
×
801

802
                return nodes.ForEach(func(pubKey, nodeBytes []byte) error {
1,574✔
803
                        // If this is the source key, then we skip this
1,442✔
804
                        // iteration as the value for this key is a pubKey
1,442✔
805
                        // rather than raw node information.
1,442✔
806
                        if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 {
1,706✔
807
                                return nil
264✔
808
                        }
264✔
809

810
                        nodeReader := bytes.NewReader(nodeBytes)
1,181✔
811
                        node, err := deserializeLightningNode(nodeReader)
1,181✔
812
                        if err != nil {
1,181✔
813
                                return err
×
814
                        }
×
815

816
                        // Execute the callback, the transaction will abort if
817
                        // this returns an error.
818
                        return cb(tx, &node)
1,181✔
819
                })
820
        }
821

822
        return kvdb.View(c.db, traversal, func() {})
264✔
823
}
824

825
// ForEachNodeCacheable iterates through all the stored vertices/nodes in the
826
// graph, executing the passed callback with each node encountered. If the
827
// callback returns an error, then the transaction is aborted and the iteration
828
// stops early.
829
func (c *KVStore) ForEachNodeCacheable(cb func(route.Vertex,
830
        *lnwire.FeatureVector) error) error {
141✔
831

141✔
832
        traversal := func(tx kvdb.RTx) error {
282✔
833
                // First grab the nodes bucket which stores the mapping from
141✔
834
                // pubKey to node information.
141✔
835
                nodes := tx.ReadBucket(nodeBucket)
141✔
836
                if nodes == nil {
141✔
837
                        return ErrGraphNotFound
×
838
                }
×
839

840
                return nodes.ForEach(func(pubKey, nodeBytes []byte) error {
540✔
841
                        // If this is the source key, then we skip this
399✔
842
                        // iteration as the value for this key is a pubKey
399✔
843
                        // rather than raw node information.
399✔
844
                        if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 {
678✔
845
                                return nil
279✔
846
                        }
279✔
847

848
                        nodeReader := bytes.NewReader(nodeBytes)
123✔
849
                        node, features, err := deserializeLightningNodeCacheable( //nolint:ll
123✔
850
                                nodeReader,
123✔
851
                        )
123✔
852
                        if err != nil {
123✔
853
                                return err
×
854
                        }
×
855

856
                        // Execute the callback, the transaction will abort if
857
                        // this returns an error.
858
                        return cb(node, features)
123✔
859
                })
860
        }
861

862
        return kvdb.View(c.db, traversal, func() {})
282✔
863
}
864

865
// SourceNode returns the source node of the graph. The source node is treated
866
// as the center node within a star-graph. This method may be used to kick off
867
// a path finding algorithm in order to explore the reachability of another
868
// node based off the source node.
869
func (c *KVStore) SourceNode(_ context.Context) (*models.LightningNode,
870
        error) {
241✔
871

241✔
872
        var source *models.LightningNode
241✔
873
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
482✔
874
                // First grab the nodes bucket which stores the mapping from
241✔
875
                // pubKey to node information.
241✔
876
                nodes := tx.ReadBucket(nodeBucket)
241✔
877
                if nodes == nil {
241✔
878
                        return ErrGraphNotFound
×
879
                }
×
880

881
                node, err := c.sourceNode(nodes)
241✔
882
                if err != nil {
245✔
883
                        return err
4✔
884
                }
4✔
885
                source = node
240✔
886

240✔
887
                return nil
240✔
888
        }, func() {
241✔
889
                source = nil
241✔
890
        })
241✔
891
        if err != nil {
245✔
892
                return nil, err
4✔
893
        }
4✔
894

895
        return source, nil
240✔
896
}
897

898
// sourceNode uses an existing database transaction and returns the source node
899
// of the graph. The source node is treated as the center node within a
900
// star-graph. This method may be used to kick off a path finding algorithm in
901
// order to explore the reachability of another node based off the source node.
902
func (c *KVStore) sourceNode(nodes kvdb.RBucket) (*models.LightningNode,
903
        error) {
508✔
904

508✔
905
        selfPub := nodes.Get(sourceKey)
508✔
906
        if selfPub == nil {
512✔
907
                return nil, ErrSourceNodeNotSet
4✔
908
        }
4✔
909

910
        // With the pubKey of the source node retrieved, we're able to
911
        // fetch the full node information.
912
        node, err := fetchLightningNode(nodes, selfPub)
507✔
913
        if err != nil {
507✔
914
                return nil, err
×
915
        }
×
916

917
        return &node, nil
507✔
918
}
919

920
// SetSourceNode sets the source node within the graph database. The source
921
// node is to be used as the center of a star-graph within path finding
922
// algorithms.
923
func (c *KVStore) SetSourceNode(_ context.Context,
924
        node *models.LightningNode) error {
117✔
925

117✔
926
        nodePubBytes := node.PubKeyBytes[:]
117✔
927

117✔
928
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
234✔
929
                // First grab the nodes bucket which stores the mapping from
117✔
930
                // pubKey to node information.
117✔
931
                nodes, err := tx.CreateTopLevelBucket(nodeBucket)
117✔
932
                if err != nil {
117✔
933
                        return err
×
934
                }
×
935

936
                // Next we create the mapping from source to the targeted
937
                // public key.
938
                if err := nodes.Put(sourceKey, nodePubBytes); err != nil {
117✔
939
                        return err
×
940
                }
×
941

942
                // Finally, we commit the information of the lightning node
943
                // itself.
944
                return addLightningNode(tx, node)
117✔
945
        }, func() {})
117✔
946
}
947

948
// AddLightningNode adds a vertex/node to the graph database. If the node is not
949
// in the database from before, this will add a new, unconnected one to the
950
// graph. If it is present from before, this will update that node's
951
// information. Note that this method is expected to only be called to update an
952
// already present node from a node announcement, or to insert a node found in a
953
// channel update.
954
//
955
// TODO(roasbeef): also need sig of announcement.
956
func (c *KVStore) AddLightningNode(ctx context.Context,
957
        node *models.LightningNode, opts ...batch.SchedulerOption) error {
715✔
958

715✔
959
        r := &batch.Request[kvdb.RwTx]{
715✔
960
                Opts: batch.NewSchedulerOptions(opts...),
715✔
961
                Do: func(tx kvdb.RwTx) error {
1,430✔
962
                        return addLightningNode(tx, node)
715✔
963
                },
715✔
964
        }
965

966
        return c.nodeScheduler.Execute(ctx, r)
715✔
967
}
968

969
func addLightningNode(tx kvdb.RwTx, node *models.LightningNode) error {
901✔
970
        nodes, err := tx.CreateTopLevelBucket(nodeBucket)
901✔
971
        if err != nil {
901✔
972
                return err
×
973
        }
×
974

975
        aliases, err := nodes.CreateBucketIfNotExists(aliasIndexBucket)
901✔
976
        if err != nil {
901✔
977
                return err
×
978
        }
×
979

980
        updateIndex, err := nodes.CreateBucketIfNotExists(
901✔
981
                nodeUpdateIndexBucket,
901✔
982
        )
901✔
983
        if err != nil {
901✔
984
                return err
×
985
        }
×
986

987
        return putLightningNode(nodes, aliases, updateIndex, node)
901✔
988
}
989

990
// LookupAlias attempts to return the alias as advertised by the target node.
991
// TODO(roasbeef): currently assumes that aliases are unique...
992
func (c *KVStore) LookupAlias(_ context.Context,
993
        pub *btcec.PublicKey) (string, error) {
5✔
994

5✔
995
        var alias string
5✔
996

5✔
997
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
10✔
998
                nodes := tx.ReadBucket(nodeBucket)
5✔
999
                if nodes == nil {
5✔
1000
                        return ErrGraphNodesNotFound
×
1001
                }
×
1002

1003
                aliases := nodes.NestedReadBucket(aliasIndexBucket)
5✔
1004
                if aliases == nil {
5✔
1005
                        return ErrGraphNodesNotFound
×
1006
                }
×
1007

1008
                nodePub := pub.SerializeCompressed()
5✔
1009
                a := aliases.Get(nodePub)
5✔
1010
                if a == nil {
6✔
1011
                        return ErrNodeAliasNotFound
1✔
1012
                }
1✔
1013

1014
                // TODO(roasbeef): should actually be using the utf-8
1015
                // package...
1016
                alias = string(a)
4✔
1017

4✔
1018
                return nil
4✔
1019
        }, func() {
5✔
1020
                alias = ""
5✔
1021
        })
5✔
1022
        if err != nil {
6✔
1023
                return "", err
1✔
1024
        }
1✔
1025

1026
        return alias, nil
4✔
1027
}
1028

1029
// DeleteLightningNode starts a new database transaction to remove a vertex/node
1030
// from the database according to the node's public key.
1031
func (c *KVStore) DeleteLightningNode(_ context.Context,
1032
        nodePub route.Vertex) error {
4✔
1033

4✔
1034
        // TODO(roasbeef): ensure dangling edges are removed...
4✔
1035
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
8✔
1036
                nodes := tx.ReadWriteBucket(nodeBucket)
4✔
1037
                if nodes == nil {
4✔
1038
                        return ErrGraphNodeNotFound
×
1039
                }
×
1040

1041
                return c.deleteLightningNode(nodes, nodePub[:])
4✔
1042
        }, func() {})
4✔
1043
}
1044

1045
// deleteLightningNode uses an existing database transaction to remove a
1046
// vertex/node from the database according to the node's public key.
1047
func (c *KVStore) deleteLightningNode(nodes kvdb.RwBucket,
1048
        compressedPubKey []byte) error {
61✔
1049

61✔
1050
        aliases := nodes.NestedReadWriteBucket(aliasIndexBucket)
61✔
1051
        if aliases == nil {
61✔
1052
                return ErrGraphNodesNotFound
×
1053
        }
×
1054

1055
        if err := aliases.Delete(compressedPubKey); err != nil {
61✔
1056
                return err
×
1057
        }
×
1058

1059
        // Before we delete the node, we'll fetch its current state so we can
1060
        // determine when its last update was to clear out the node update
1061
        // index.
1062
        node, err := fetchLightningNode(nodes, compressedPubKey)
61✔
1063
        if err != nil {
62✔
1064
                return err
1✔
1065
        }
1✔
1066

1067
        if err := nodes.Delete(compressedPubKey); err != nil {
60✔
1068
                return err
×
1069
        }
×
1070

1071
        // Finally, we'll delete the index entry for the node within the
1072
        // nodeUpdateIndexBucket as this node is no longer active, so we don't
1073
        // need to track its last update.
1074
        nodeUpdateIndex := nodes.NestedReadWriteBucket(nodeUpdateIndexBucket)
60✔
1075
        if nodeUpdateIndex == nil {
60✔
1076
                return ErrGraphNodesNotFound
×
1077
        }
×
1078

1079
        // In order to delete the entry, we'll need to reconstruct the key for
1080
        // its last update.
1081
        updateUnix := uint64(node.LastUpdate.Unix())
60✔
1082
        var indexKey [8 + 33]byte
60✔
1083
        byteOrder.PutUint64(indexKey[:8], updateUnix)
60✔
1084
        copy(indexKey[8:], compressedPubKey)
60✔
1085

60✔
1086
        return nodeUpdateIndex.Delete(indexKey[:])
60✔
1087
}
1088

1089
// AddChannelEdge adds a new (undirected, blank) edge to the graph database. An
1090
// undirected edge from the two target nodes are created. The information stored
1091
// denotes the static attributes of the channel, such as the channelID, the keys
1092
// involved in creation of the channel, and the set of features that the channel
1093
// supports. The chanPoint and chanID are used to uniquely identify the edge
1094
// globally within the database.
1095
func (c *KVStore) AddChannelEdge(ctx context.Context,
1096
        edge *models.ChannelEdgeInfo, opts ...batch.SchedulerOption) error {
1,734✔
1097

1,734✔
1098
        var alreadyExists bool
1,734✔
1099
        r := &batch.Request[kvdb.RwTx]{
1,734✔
1100
                Opts: batch.NewSchedulerOptions(opts...),
1,734✔
1101
                Reset: func() {
3,468✔
1102
                        alreadyExists = false
1,734✔
1103
                },
1,734✔
1104
                Do: func(tx kvdb.RwTx) error {
1,734✔
1105
                        err := c.addChannelEdge(tx, edge)
1,734✔
1106

1,734✔
1107
                        // Silence ErrEdgeAlreadyExist so that the batch can
1,734✔
1108
                        // succeed, but propagate the error via local state.
1,734✔
1109
                        if errors.Is(err, ErrEdgeAlreadyExist) {
1,971✔
1110
                                alreadyExists = true
237✔
1111
                                return nil
237✔
1112
                        }
237✔
1113

1114
                        return err
1,497✔
1115
                },
1116
                OnCommit: func(err error) error {
1,734✔
1117
                        switch {
1,734✔
1118
                        case err != nil:
×
1119
                                return err
×
1120
                        case alreadyExists:
237✔
1121
                                return ErrEdgeAlreadyExist
237✔
1122
                        default:
1,497✔
1123
                                c.rejectCache.remove(edge.ChannelID)
1,497✔
1124
                                c.chanCache.remove(edge.ChannelID)
1,497✔
1125
                                return nil
1,497✔
1126
                        }
1127
                },
1128
        }
1129

1130
        return c.chanScheduler.Execute(ctx, r)
1,734✔
1131
}
1132

1133
// addChannelEdge is the private form of AddChannelEdge that allows callers to
1134
// utilize an existing db transaction.
1135
func (c *KVStore) addChannelEdge(tx kvdb.RwTx,
1136
        edge *models.ChannelEdgeInfo) error {
1,734✔
1137

1,734✔
1138
        // Construct the channel's primary key which is the 8-byte channel ID.
1,734✔
1139
        var chanKey [8]byte
1,734✔
1140
        binary.BigEndian.PutUint64(chanKey[:], edge.ChannelID)
1,734✔
1141

1,734✔
1142
        nodes, err := tx.CreateTopLevelBucket(nodeBucket)
1,734✔
1143
        if err != nil {
1,734✔
1144
                return err
×
1145
        }
×
1146
        edges, err := tx.CreateTopLevelBucket(edgeBucket)
1,734✔
1147
        if err != nil {
1,734✔
1148
                return err
×
1149
        }
×
1150
        edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
1,734✔
1151
        if err != nil {
1,734✔
1152
                return err
×
1153
        }
×
1154
        chanIndex, err := edges.CreateBucketIfNotExists(channelPointBucket)
1,734✔
1155
        if err != nil {
1,734✔
1156
                return err
×
1157
        }
×
1158

1159
        // First, attempt to check if this edge has already been created. If
1160
        // so, then we can exit early as this method is meant to be idempotent.
1161
        if edgeInfo := edgeIndex.Get(chanKey[:]); edgeInfo != nil {
1,971✔
1162
                return ErrEdgeAlreadyExist
237✔
1163
        }
237✔
1164

1165
        // Before we insert the channel into the database, we'll ensure that
1166
        // both nodes already exist in the channel graph. If either node
1167
        // doesn't, then we'll insert a "shell" node that just includes its
1168
        // public key, so subsequent validation and queries can work properly.
1169
        _, node1Err := fetchLightningNode(nodes, edge.NodeKey1Bytes[:])
1,497✔
1170
        switch {
1,497✔
1171
        case errors.Is(node1Err, ErrGraphNodeNotFound):
25✔
1172
                node1Shell := models.LightningNode{
25✔
1173
                        PubKeyBytes:          edge.NodeKey1Bytes,
25✔
1174
                        HaveNodeAnnouncement: false,
25✔
1175
                }
25✔
1176
                err := addLightningNode(tx, &node1Shell)
25✔
1177
                if err != nil {
25✔
1178
                        return fmt.Errorf("unable to create shell node "+
×
1179
                                "for: %x: %w", edge.NodeKey1Bytes, err)
×
1180
                }
×
1181
        case node1Err != nil:
×
1182
                return node1Err
×
1183
        }
1184

1185
        _, node2Err := fetchLightningNode(nodes, edge.NodeKey2Bytes[:])
1,497✔
1186
        switch {
1,497✔
1187
        case errors.Is(node2Err, ErrGraphNodeNotFound):
53✔
1188
                node2Shell := models.LightningNode{
53✔
1189
                        PubKeyBytes:          edge.NodeKey2Bytes,
53✔
1190
                        HaveNodeAnnouncement: false,
53✔
1191
                }
53✔
1192
                err := addLightningNode(tx, &node2Shell)
53✔
1193
                if err != nil {
53✔
1194
                        return fmt.Errorf("unable to create shell node "+
×
1195
                                "for: %x: %w", edge.NodeKey2Bytes, err)
×
1196
                }
×
1197
        case node2Err != nil:
×
1198
                return node2Err
×
1199
        }
1200

1201
        // If the edge hasn't been created yet, then we'll first add it to the
1202
        // edge index in order to associate the edge between two nodes and also
1203
        // store the static components of the channel.
1204
        if err := putChanEdgeInfo(edgeIndex, edge, chanKey); err != nil {
1,497✔
1205
                return err
×
1206
        }
×
1207

1208
        // Mark edge policies for both sides as unknown. This is to enable
1209
        // efficient incoming channel lookup for a node.
1210
        keys := []*[33]byte{
1,497✔
1211
                &edge.NodeKey1Bytes,
1,497✔
1212
                &edge.NodeKey2Bytes,
1,497✔
1213
        }
1,497✔
1214
        for _, key := range keys {
4,488✔
1215
                err := putChanEdgePolicyUnknown(edges, edge.ChannelID, key[:])
2,991✔
1216
                if err != nil {
2,991✔
1217
                        return err
×
1218
                }
×
1219
        }
1220

1221
        // Finally we add it to the channel index which maps channel points
1222
        // (outpoints) to the shorter channel ID's.
1223
        var b bytes.Buffer
1,497✔
1224
        if err := WriteOutpoint(&b, &edge.ChannelPoint); err != nil {
1,497✔
1225
                return err
×
1226
        }
×
1227

1228
        return chanIndex.Put(b.Bytes(), chanKey[:])
1,497✔
1229
}
1230

1231
// HasChannelEdge returns true if the database knows of a channel edge with the
1232
// passed channel ID, and false otherwise. If an edge with that ID is found
1233
// within the graph, then two time stamps representing the last time the edge
1234
// was updated for both directed edges are returned along with the boolean. If
1235
// it is not found, then the zombie index is checked and its result is returned
1236
// as the second boolean.
1237
func (c *KVStore) HasChannelEdge(
1238
        chanID uint64) (time.Time, time.Time, bool, bool, error) {
217✔
1239

217✔
1240
        var (
217✔
1241
                upd1Time time.Time
217✔
1242
                upd2Time time.Time
217✔
1243
                exists   bool
217✔
1244
                isZombie bool
217✔
1245
        )
217✔
1246

217✔
1247
        // We'll query the cache with the shared lock held to allow multiple
217✔
1248
        // readers to access values in the cache concurrently if they exist.
217✔
1249
        c.cacheMu.RLock()
217✔
1250
        if entry, ok := c.rejectCache.get(chanID); ok {
286✔
1251
                c.cacheMu.RUnlock()
69✔
1252
                upd1Time = time.Unix(entry.upd1Time, 0)
69✔
1253
                upd2Time = time.Unix(entry.upd2Time, 0)
69✔
1254
                exists, isZombie = entry.flags.unpack()
69✔
1255

69✔
1256
                return upd1Time, upd2Time, exists, isZombie, nil
69✔
1257
        }
69✔
1258
        c.cacheMu.RUnlock()
151✔
1259

151✔
1260
        c.cacheMu.Lock()
151✔
1261
        defer c.cacheMu.Unlock()
151✔
1262

151✔
1263
        // The item was not found with the shared lock, so we'll acquire the
151✔
1264
        // exclusive lock and check the cache again in case another method added
151✔
1265
        // the entry to the cache while no lock was held.
151✔
1266
        if entry, ok := c.rejectCache.get(chanID); ok {
155✔
1267
                upd1Time = time.Unix(entry.upd1Time, 0)
4✔
1268
                upd2Time = time.Unix(entry.upd2Time, 0)
4✔
1269
                exists, isZombie = entry.flags.unpack()
4✔
1270

4✔
1271
                return upd1Time, upd2Time, exists, isZombie, nil
4✔
1272
        }
4✔
1273

1274
        if err := kvdb.View(c.db, func(tx kvdb.RTx) error {
298✔
1275
                edges := tx.ReadBucket(edgeBucket)
149✔
1276
                if edges == nil {
149✔
1277
                        return ErrGraphNoEdgesFound
×
1278
                }
×
1279
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
149✔
1280
                if edgeIndex == nil {
149✔
1281
                        return ErrGraphNoEdgesFound
×
1282
                }
×
1283

1284
                var channelID [8]byte
149✔
1285
                byteOrder.PutUint64(channelID[:], chanID)
149✔
1286

149✔
1287
                // If the edge doesn't exist, then we'll also check our zombie
149✔
1288
                // index.
149✔
1289
                if edgeIndex.Get(channelID[:]) == nil {
248✔
1290
                        exists = false
99✔
1291
                        zombieIndex := edges.NestedReadBucket(zombieBucket)
99✔
1292
                        if zombieIndex != nil {
198✔
1293
                                isZombie, _, _ = isZombieEdge(
99✔
1294
                                        zombieIndex, chanID,
99✔
1295
                                )
99✔
1296
                        }
99✔
1297

1298
                        return nil
99✔
1299
                }
1300

1301
                exists = true
53✔
1302
                isZombie = false
53✔
1303

53✔
1304
                // If the channel has been found in the graph, then retrieve
53✔
1305
                // the edges itself so we can return the last updated
53✔
1306
                // timestamps.
53✔
1307
                nodes := tx.ReadBucket(nodeBucket)
53✔
1308
                if nodes == nil {
53✔
1309
                        return ErrGraphNodeNotFound
×
1310
                }
×
1311

1312
                e1, e2, err := fetchChanEdgePolicies(
53✔
1313
                        edgeIndex, edges, channelID[:],
53✔
1314
                )
53✔
1315
                if err != nil {
53✔
1316
                        return err
×
1317
                }
×
1318

1319
                // As we may have only one of the edges populated, only set the
1320
                // update time if the edge was found in the database.
1321
                if e1 != nil {
74✔
1322
                        upd1Time = e1.LastUpdate
21✔
1323
                }
21✔
1324
                if e2 != nil {
72✔
1325
                        upd2Time = e2.LastUpdate
19✔
1326
                }
19✔
1327

1328
                return nil
53✔
1329
        }, func() {}); err != nil {
149✔
1330
                return time.Time{}, time.Time{}, exists, isZombie, err
×
1331
        }
×
1332

1333
        c.rejectCache.insert(chanID, rejectCacheEntry{
149✔
1334
                upd1Time: upd1Time.Unix(),
149✔
1335
                upd2Time: upd2Time.Unix(),
149✔
1336
                flags:    packRejectFlags(exists, isZombie),
149✔
1337
        })
149✔
1338

149✔
1339
        return upd1Time, upd2Time, exists, isZombie, nil
149✔
1340
}
1341

1342
// AddEdgeProof sets the proof of an existing edge in the graph database.
1343
func (c *KVStore) AddEdgeProof(chanID lnwire.ShortChannelID,
1344
        proof *models.ChannelAuthProof) error {
5✔
1345

5✔
1346
        // Construct the channel's primary key which is the 8-byte channel ID.
5✔
1347
        var chanKey [8]byte
5✔
1348
        binary.BigEndian.PutUint64(chanKey[:], chanID.ToUint64())
5✔
1349

5✔
1350
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
10✔
1351
                edges := tx.ReadWriteBucket(edgeBucket)
5✔
1352
                if edges == nil {
5✔
1353
                        return ErrEdgeNotFound
×
1354
                }
×
1355

1356
                edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
5✔
1357
                if edgeIndex == nil {
5✔
1358
                        return ErrEdgeNotFound
×
1359
                }
×
1360

1361
                edge, err := fetchChanEdgeInfo(edgeIndex, chanKey[:])
5✔
1362
                if err != nil {
5✔
1363
                        return err
×
1364
                }
×
1365

1366
                edge.AuthProof = proof
5✔
1367

5✔
1368
                return putChanEdgeInfo(edgeIndex, &edge, chanKey)
5✔
1369
        }, func() {})
5✔
1370
}
1371

1372
const (
1373
        // pruneTipBytes is the total size of the value which stores a prune
1374
        // entry of the graph in the prune log. The "prune tip" is the last
1375
        // entry in the prune log, and indicates if the channel graph is in
1376
        // sync with the current UTXO state. The structure of the value
1377
        // is: blockHash, taking 32 bytes total.
1378
        pruneTipBytes = 32
1379
)
1380

1381
// PruneGraph prunes newly closed channels from the channel graph in response
1382
// to a new block being solved on the network. Any transactions which spend the
1383
// funding output of any known channels within he graph will be deleted.
1384
// Additionally, the "prune tip", or the last block which has been used to
1385
// prune the graph is stored so callers can ensure the graph is fully in sync
1386
// with the current UTXO state. A slice of channels that have been closed by
1387
// the target block along with any pruned nodes are returned if the function
1388
// succeeds without error.
1389
func (c *KVStore) PruneGraph(spentOutputs []*wire.OutPoint,
1390
        blockHash *chainhash.Hash, blockHeight uint32) (
1391
        []*models.ChannelEdgeInfo, []route.Vertex, error) {
246✔
1392

246✔
1393
        c.cacheMu.Lock()
246✔
1394
        defer c.cacheMu.Unlock()
246✔
1395

246✔
1396
        var (
246✔
1397
                chansClosed []*models.ChannelEdgeInfo
246✔
1398
                prunedNodes []route.Vertex
246✔
1399
        )
246✔
1400

246✔
1401
        err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
492✔
1402
                // First grab the edges bucket which houses the information
246✔
1403
                // we'd like to delete
246✔
1404
                edges, err := tx.CreateTopLevelBucket(edgeBucket)
246✔
1405
                if err != nil {
246✔
1406
                        return err
×
1407
                }
×
1408

1409
                // Next grab the two edge indexes which will also need to be
1410
                // updated.
1411
                edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
246✔
1412
                if err != nil {
246✔
1413
                        return err
×
1414
                }
×
1415
                chanIndex, err := edges.CreateBucketIfNotExists(
246✔
1416
                        channelPointBucket,
246✔
1417
                )
246✔
1418
                if err != nil {
246✔
1419
                        return err
×
1420
                }
×
1421
                nodes := tx.ReadWriteBucket(nodeBucket)
246✔
1422
                if nodes == nil {
246✔
1423
                        return ErrSourceNodeNotSet
×
1424
                }
×
1425
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
246✔
1426
                if err != nil {
246✔
1427
                        return err
×
1428
                }
×
1429

1430
                // For each of the outpoints that have been spent within the
1431
                // block, we attempt to delete them from the graph as if that
1432
                // outpoint was a channel, then it has now been closed.
1433
                for _, chanPoint := range spentOutputs {
383✔
1434
                        // TODO(roasbeef): load channel bloom filter, continue
137✔
1435
                        // if NOT if filter
137✔
1436

137✔
1437
                        var opBytes bytes.Buffer
137✔
1438
                        err := WriteOutpoint(&opBytes, chanPoint)
137✔
1439
                        if err != nil {
137✔
1440
                                return err
×
1441
                        }
×
1442

1443
                        // First attempt to see if the channel exists within
1444
                        // the database, if not, then we can exit early.
1445
                        chanID := chanIndex.Get(opBytes.Bytes())
137✔
1446
                        if chanID == nil {
246✔
1447
                                continue
109✔
1448
                        }
1449

1450
                        // Attempt to delete the channel, an ErrEdgeNotFound
1451
                        // will be returned if that outpoint isn't known to be
1452
                        // a channel. If no error is returned, then a channel
1453
                        // was successfully pruned.
1454
                        edgeInfo, err := c.delChannelEdgeUnsafe(
28✔
1455
                                edges, edgeIndex, chanIndex, zombieIndex,
28✔
1456
                                chanID, false, false,
28✔
1457
                        )
28✔
1458
                        if err != nil && !errors.Is(err, ErrEdgeNotFound) {
28✔
1459
                                return err
×
1460
                        }
×
1461

1462
                        chansClosed = append(chansClosed, edgeInfo)
28✔
1463
                }
1464

1465
                metaBucket, err := tx.CreateTopLevelBucket(graphMetaBucket)
246✔
1466
                if err != nil {
246✔
1467
                        return err
×
1468
                }
×
1469

1470
                pruneBucket, err := metaBucket.CreateBucketIfNotExists(
246✔
1471
                        pruneLogBucket,
246✔
1472
                )
246✔
1473
                if err != nil {
246✔
1474
                        return err
×
1475
                }
×
1476

1477
                // With the graph pruned, add a new entry to the prune log,
1478
                // which can be used to check if the graph is fully synced with
1479
                // the current UTXO state.
1480
                var blockHeightBytes [4]byte
246✔
1481
                byteOrder.PutUint32(blockHeightBytes[:], blockHeight)
246✔
1482

246✔
1483
                var newTip [pruneTipBytes]byte
246✔
1484
                copy(newTip[:], blockHash[:])
246✔
1485

246✔
1486
                err = pruneBucket.Put(blockHeightBytes[:], newTip[:])
246✔
1487
                if err != nil {
246✔
1488
                        return err
×
1489
                }
×
1490

1491
                // Now that the graph has been pruned, we'll also attempt to
1492
                // prune any nodes that have had a channel closed within the
1493
                // latest block.
1494
                prunedNodes, err = c.pruneGraphNodes(nodes, edgeIndex)
246✔
1495

246✔
1496
                return err
246✔
1497
        }, func() {
246✔
1498
                chansClosed = nil
246✔
1499
                prunedNodes = nil
246✔
1500
        })
246✔
1501
        if err != nil {
246✔
1502
                return nil, nil, err
×
1503
        }
×
1504

1505
        for _, channel := range chansClosed {
274✔
1506
                c.rejectCache.remove(channel.ChannelID)
28✔
1507
                c.chanCache.remove(channel.ChannelID)
28✔
1508
        }
28✔
1509

1510
        return chansClosed, prunedNodes, nil
246✔
1511
}
1512

1513
// PruneGraphNodes is a garbage collection method which attempts to prune out
1514
// any nodes from the channel graph that are currently unconnected. This ensure
1515
// that we only maintain a graph of reachable nodes. In the event that a pruned
1516
// node gains more channels, it will be re-added back to the graph.
1517
func (c *KVStore) PruneGraphNodes() ([]route.Vertex, error) {
26✔
1518
        var prunedNodes []route.Vertex
26✔
1519
        err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
52✔
1520
                nodes := tx.ReadWriteBucket(nodeBucket)
26✔
1521
                if nodes == nil {
26✔
1522
                        return ErrGraphNodesNotFound
×
1523
                }
×
1524
                edges := tx.ReadWriteBucket(edgeBucket)
26✔
1525
                if edges == nil {
26✔
1526
                        return ErrGraphNotFound
×
1527
                }
×
1528
                edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
26✔
1529
                if edgeIndex == nil {
26✔
1530
                        return ErrGraphNoEdgesFound
×
1531
                }
×
1532

1533
                var err error
26✔
1534
                prunedNodes, err = c.pruneGraphNodes(nodes, edgeIndex)
26✔
1535
                if err != nil {
26✔
1536
                        return err
×
1537
                }
×
1538

1539
                return nil
26✔
1540
        }, func() {
26✔
1541
                prunedNodes = nil
26✔
1542
        })
26✔
1543

1544
        return prunedNodes, err
26✔
1545
}
1546

1547
// pruneGraphNodes attempts to remove any nodes from the graph who have had a
1548
// channel closed within the current block. If the node still has existing
1549
// channels in the graph, this will act as a no-op.
1550
func (c *KVStore) pruneGraphNodes(nodes kvdb.RwBucket,
1551
        edgeIndex kvdb.RwBucket) ([]route.Vertex, error) {
269✔
1552

269✔
1553
        log.Trace("Pruning nodes from graph with no open channels")
269✔
1554

269✔
1555
        // We'll retrieve the graph's source node to ensure we don't remove it
269✔
1556
        // even if it no longer has any open channels.
269✔
1557
        sourceNode, err := c.sourceNode(nodes)
269✔
1558
        if err != nil {
269✔
1559
                return nil, err
×
1560
        }
×
1561

1562
        // We'll use this map to keep count the number of references to a node
1563
        // in the graph. A node should only be removed once it has no more
1564
        // references in the graph.
1565
        nodeRefCounts := make(map[[33]byte]int)
269✔
1566
        err = nodes.ForEach(func(pubKey, nodeBytes []byte) error {
1,598✔
1567
                // If this is the source key, then we skip this
1,329✔
1568
                // iteration as the value for this key is a pubKey
1,329✔
1569
                // rather than raw node information.
1,329✔
1570
                if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 {
2,130✔
1571
                        return nil
801✔
1572
                }
801✔
1573

1574
                var nodePub [33]byte
531✔
1575
                copy(nodePub[:], pubKey)
531✔
1576
                nodeRefCounts[nodePub] = 0
531✔
1577

531✔
1578
                return nil
531✔
1579
        })
1580
        if err != nil {
269✔
1581
                return nil, err
×
1582
        }
×
1583

1584
        // To ensure we never delete the source node, we'll start off by
1585
        // bumping its ref count to 1.
1586
        nodeRefCounts[sourceNode.PubKeyBytes] = 1
269✔
1587

269✔
1588
        // Next, we'll run through the edgeIndex which maps a channel ID to the
269✔
1589
        // edge info. We'll use this scan to populate our reference count map
269✔
1590
        // above.
269✔
1591
        err = edgeIndex.ForEach(func(chanID, edgeInfoBytes []byte) error {
489✔
1592
                // The first 66 bytes of the edge info contain the pubkeys of
220✔
1593
                // the nodes that this edge attaches. We'll extract them, and
220✔
1594
                // add them to the ref count map.
220✔
1595
                var node1, node2 [33]byte
220✔
1596
                copy(node1[:], edgeInfoBytes[:33])
220✔
1597
                copy(node2[:], edgeInfoBytes[33:])
220✔
1598

220✔
1599
                // With the nodes extracted, we'll increase the ref count of
220✔
1600
                // each of the nodes.
220✔
1601
                nodeRefCounts[node1]++
220✔
1602
                nodeRefCounts[node2]++
220✔
1603

220✔
1604
                return nil
220✔
1605
        })
220✔
1606
        if err != nil {
269✔
1607
                return nil, err
×
1608
        }
×
1609

1610
        // Finally, we'll make a second pass over the set of nodes, and delete
1611
        // any nodes that have a ref count of zero.
1612
        var pruned []route.Vertex
269✔
1613
        for nodePubKey, refCount := range nodeRefCounts {
800✔
1614
                // If the ref count of the node isn't zero, then we can safely
531✔
1615
                // skip it as it still has edges to or from it within the
531✔
1616
                // graph.
531✔
1617
                if refCount != 0 {
1,008✔
1618
                        continue
477✔
1619
                }
1620

1621
                // If we reach this point, then there are no longer any edges
1622
                // that connect this node, so we can delete it.
1623
                err := c.deleteLightningNode(nodes, nodePubKey[:])
57✔
1624
                if err != nil {
57✔
1625
                        if errors.Is(err, ErrGraphNodeNotFound) ||
×
1626
                                errors.Is(err, ErrGraphNodesNotFound) {
×
1627

×
1628
                                log.Warnf("Unable to prune node %x from the "+
×
1629
                                        "graph: %v", nodePubKey, err)
×
1630
                                continue
×
1631
                        }
1632

1633
                        return nil, err
×
1634
                }
1635

1636
                log.Infof("Pruned unconnected node %x from channel graph",
57✔
1637
                        nodePubKey[:])
57✔
1638

57✔
1639
                pruned = append(pruned, nodePubKey)
57✔
1640
        }
1641

1642
        if len(pruned) > 0 {
310✔
1643
                log.Infof("Pruned %v unconnected nodes from the channel graph",
41✔
1644
                        len(pruned))
41✔
1645
        }
41✔
1646

1647
        return pruned, err
269✔
1648
}
1649

1650
// DisconnectBlockAtHeight is used to indicate that the block specified
1651
// by the passed height has been disconnected from the main chain. This
1652
// will "rewind" the graph back to the height below, deleting channels
1653
// that are no longer confirmed from the graph. The prune log will be
1654
// set to the last prune height valid for the remaining chain.
1655
// Channels that were removed from the graph resulting from the
1656
// disconnected block are returned.
1657
func (c *KVStore) DisconnectBlockAtHeight(height uint32) (
1658
        []*models.ChannelEdgeInfo, error) {
154✔
1659

154✔
1660
        // Every channel having a ShortChannelID starting at 'height'
154✔
1661
        // will no longer be confirmed.
154✔
1662
        startShortChanID := lnwire.ShortChannelID{
154✔
1663
                BlockHeight: height,
154✔
1664
        }
154✔
1665

154✔
1666
        // Delete everything after this height from the db up until the
154✔
1667
        // SCID alias range.
154✔
1668
        endShortChanID := aliasmgr.StartingAlias
154✔
1669

154✔
1670
        // The block height will be the 3 first bytes of the channel IDs.
154✔
1671
        var chanIDStart [8]byte
154✔
1672
        byteOrder.PutUint64(chanIDStart[:], startShortChanID.ToUint64())
154✔
1673
        var chanIDEnd [8]byte
154✔
1674
        byteOrder.PutUint64(chanIDEnd[:], endShortChanID.ToUint64())
154✔
1675

154✔
1676
        c.cacheMu.Lock()
154✔
1677
        defer c.cacheMu.Unlock()
154✔
1678

154✔
1679
        // Keep track of the channels that are removed from the graph.
154✔
1680
        var removedChans []*models.ChannelEdgeInfo
154✔
1681

154✔
1682
        if err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
308✔
1683
                edges, err := tx.CreateTopLevelBucket(edgeBucket)
154✔
1684
                if err != nil {
154✔
1685
                        return err
×
1686
                }
×
1687
                edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
154✔
1688
                if err != nil {
154✔
1689
                        return err
×
1690
                }
×
1691
                chanIndex, err := edges.CreateBucketIfNotExists(
154✔
1692
                        channelPointBucket,
154✔
1693
                )
154✔
1694
                if err != nil {
154✔
1695
                        return err
×
1696
                }
×
1697
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
154✔
1698
                if err != nil {
154✔
1699
                        return err
×
1700
                }
×
1701

1702
                // Scan from chanIDStart to chanIDEnd, deleting every
1703
                // found edge.
1704
                // NOTE: we must delete the edges after the cursor loop, since
1705
                // modifying the bucket while traversing is not safe.
1706
                // NOTE: We use a < comparison in bytes.Compare instead of <=
1707
                // so that the StartingAlias itself isn't deleted.
1708
                var keys [][]byte
154✔
1709
                cursor := edgeIndex.ReadWriteCursor()
154✔
1710

154✔
1711
                //nolint:ll
154✔
1712
                for k, _ := cursor.Seek(chanIDStart[:]); k != nil &&
154✔
1713
                        bytes.Compare(k, chanIDEnd[:]) < 0; k, _ = cursor.Next() {
251✔
1714
                        keys = append(keys, k)
97✔
1715
                }
97✔
1716

1717
                for _, k := range keys {
251✔
1718
                        edgeInfo, err := c.delChannelEdgeUnsafe(
97✔
1719
                                edges, edgeIndex, chanIndex, zombieIndex,
97✔
1720
                                k, false, false,
97✔
1721
                        )
97✔
1722
                        if err != nil && !errors.Is(err, ErrEdgeNotFound) {
97✔
1723
                                return err
×
1724
                        }
×
1725

1726
                        removedChans = append(removedChans, edgeInfo)
97✔
1727
                }
1728

1729
                // Delete all the entries in the prune log having a height
1730
                // greater or equal to the block disconnected.
1731
                metaBucket, err := tx.CreateTopLevelBucket(graphMetaBucket)
154✔
1732
                if err != nil {
154✔
1733
                        return err
×
1734
                }
×
1735

1736
                pruneBucket, err := metaBucket.CreateBucketIfNotExists(
154✔
1737
                        pruneLogBucket,
154✔
1738
                )
154✔
1739
                if err != nil {
154✔
1740
                        return err
×
1741
                }
×
1742

1743
                var pruneKeyStart [4]byte
154✔
1744
                byteOrder.PutUint32(pruneKeyStart[:], height)
154✔
1745

154✔
1746
                var pruneKeyEnd [4]byte
154✔
1747
                byteOrder.PutUint32(pruneKeyEnd[:], math.MaxUint32)
154✔
1748

154✔
1749
                // To avoid modifying the bucket while traversing, we delete
154✔
1750
                // the keys in a second loop.
154✔
1751
                var pruneKeys [][]byte
154✔
1752
                pruneCursor := pruneBucket.ReadWriteCursor()
154✔
1753
                //nolint:ll
154✔
1754
                for k, _ := pruneCursor.Seek(pruneKeyStart[:]); k != nil &&
154✔
1755
                        bytes.Compare(k, pruneKeyEnd[:]) <= 0; k, _ = pruneCursor.Next() {
247✔
1756
                        pruneKeys = append(pruneKeys, k)
93✔
1757
                }
93✔
1758

1759
                for _, k := range pruneKeys {
247✔
1760
                        if err := pruneBucket.Delete(k); err != nil {
93✔
1761
                                return err
×
1762
                        }
×
1763
                }
1764

1765
                return nil
154✔
1766
        }, func() {
154✔
1767
                removedChans = nil
154✔
1768
        }); err != nil {
154✔
1769
                return nil, err
×
1770
        }
×
1771

1772
        for _, channel := range removedChans {
251✔
1773
                c.rejectCache.remove(channel.ChannelID)
97✔
1774
                c.chanCache.remove(channel.ChannelID)
97✔
1775
        }
97✔
1776

1777
        return removedChans, nil
154✔
1778
}
1779

1780
// PruneTip returns the block height and hash of the latest block that has been
1781
// used to prune channels in the graph. Knowing the "prune tip" allows callers
1782
// to tell if the graph is currently in sync with the current best known UTXO
1783
// state.
1784
func (c *KVStore) PruneTip() (*chainhash.Hash, uint32, error) {
56✔
1785
        var (
56✔
1786
                tipHash   chainhash.Hash
56✔
1787
                tipHeight uint32
56✔
1788
        )
56✔
1789

56✔
1790
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
112✔
1791
                graphMeta := tx.ReadBucket(graphMetaBucket)
56✔
1792
                if graphMeta == nil {
56✔
1793
                        return ErrGraphNotFound
×
1794
                }
×
1795
                pruneBucket := graphMeta.NestedReadBucket(pruneLogBucket)
56✔
1796
                if pruneBucket == nil {
56✔
1797
                        return ErrGraphNeverPruned
×
1798
                }
×
1799

1800
                pruneCursor := pruneBucket.ReadCursor()
56✔
1801

56✔
1802
                // The prune key with the largest block height will be our
56✔
1803
                // prune tip.
56✔
1804
                k, v := pruneCursor.Last()
56✔
1805
                if k == nil {
77✔
1806
                        return ErrGraphNeverPruned
21✔
1807
                }
21✔
1808

1809
                // Once we have the prune tip, the value will be the block hash,
1810
                // and the key the block height.
1811
                copy(tipHash[:], v)
38✔
1812
                tipHeight = byteOrder.Uint32(k)
38✔
1813

38✔
1814
                return nil
38✔
1815
        }, func() {})
56✔
1816
        if err != nil {
77✔
1817
                return nil, 0, err
21✔
1818
        }
21✔
1819

1820
        return &tipHash, tipHeight, nil
38✔
1821
}
1822

1823
// DeleteChannelEdges removes edges with the given channel IDs from the
1824
// database and marks them as zombies. This ensures that we're unable to re-add
1825
// it to our database once again. If an edge does not exist within the
1826
// database, then ErrEdgeNotFound will be returned. If strictZombiePruning is
1827
// true, then when we mark these edges as zombies, we'll set up the keys such
1828
// that we require the node that failed to send the fresh update to be the one
1829
// that resurrects the channel from its zombie state. The markZombie bool
1830
// denotes whether or not to mark the channel as a zombie.
1831
func (c *KVStore) DeleteChannelEdges(strictZombiePruning, markZombie bool,
1832
        chanIDs ...uint64) ([]*models.ChannelEdgeInfo, error) {
144✔
1833

144✔
1834
        // TODO(roasbeef): possibly delete from node bucket if node has no more
144✔
1835
        // channels
144✔
1836
        // TODO(roasbeef): don't delete both edges?
144✔
1837

144✔
1838
        c.cacheMu.Lock()
144✔
1839
        defer c.cacheMu.Unlock()
144✔
1840

144✔
1841
        var infos []*models.ChannelEdgeInfo
144✔
1842
        err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
288✔
1843
                edges := tx.ReadWriteBucket(edgeBucket)
144✔
1844
                if edges == nil {
144✔
1845
                        return ErrEdgeNotFound
×
1846
                }
×
1847
                edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
144✔
1848
                if edgeIndex == nil {
144✔
1849
                        return ErrEdgeNotFound
×
1850
                }
×
1851
                chanIndex := edges.NestedReadWriteBucket(channelPointBucket)
144✔
1852
                if chanIndex == nil {
144✔
1853
                        return ErrEdgeNotFound
×
1854
                }
×
1855
                nodes := tx.ReadWriteBucket(nodeBucket)
144✔
1856
                if nodes == nil {
144✔
1857
                        return ErrGraphNodeNotFound
×
1858
                }
×
1859
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
144✔
1860
                if err != nil {
144✔
1861
                        return err
×
1862
                }
×
1863

1864
                var rawChanID [8]byte
144✔
1865
                for _, chanID := range chanIDs {
234✔
1866
                        byteOrder.PutUint64(rawChanID[:], chanID)
90✔
1867
                        edgeInfo, err := c.delChannelEdgeUnsafe(
90✔
1868
                                edges, edgeIndex, chanIndex, zombieIndex,
90✔
1869
                                rawChanID[:], markZombie, strictZombiePruning,
90✔
1870
                        )
90✔
1871
                        if err != nil {
152✔
1872
                                return err
62✔
1873
                        }
62✔
1874

1875
                        infos = append(infos, edgeInfo)
28✔
1876
                }
1877

1878
                return nil
82✔
1879
        }, func() {
144✔
1880
                infos = nil
144✔
1881
        })
144✔
1882
        if err != nil {
206✔
1883
                return nil, err
62✔
1884
        }
62✔
1885

1886
        for _, chanID := range chanIDs {
110✔
1887
                c.rejectCache.remove(chanID)
28✔
1888
                c.chanCache.remove(chanID)
28✔
1889
        }
28✔
1890

1891
        return infos, nil
82✔
1892
}
1893

1894
// ChannelID attempt to lookup the 8-byte compact channel ID which maps to the
1895
// passed channel point (outpoint). If the passed channel doesn't exist within
1896
// the database, then ErrEdgeNotFound is returned.
1897
func (c *KVStore) ChannelID(chanPoint *wire.OutPoint) (uint64, error) {
4✔
1898
        var chanID uint64
4✔
1899
        if err := kvdb.View(c.db, func(tx kvdb.RTx) error {
8✔
1900
                var err error
4✔
1901
                chanID, err = getChanID(tx, chanPoint)
4✔
1902
                return err
4✔
1903
        }, func() {
8✔
1904
                chanID = 0
4✔
1905
        }); err != nil {
7✔
1906
                return 0, err
3✔
1907
        }
3✔
1908

1909
        return chanID, nil
4✔
1910
}
1911

1912
// getChanID returns the assigned channel ID for a given channel point.
1913
func getChanID(tx kvdb.RTx, chanPoint *wire.OutPoint) (uint64, error) {
4✔
1914
        var b bytes.Buffer
4✔
1915
        if err := WriteOutpoint(&b, chanPoint); err != nil {
4✔
1916
                return 0, err
×
1917
        }
×
1918

1919
        edges := tx.ReadBucket(edgeBucket)
4✔
1920
        if edges == nil {
4✔
1921
                return 0, ErrGraphNoEdgesFound
×
1922
        }
×
1923
        chanIndex := edges.NestedReadBucket(channelPointBucket)
4✔
1924
        if chanIndex == nil {
4✔
1925
                return 0, ErrGraphNoEdgesFound
×
1926
        }
×
1927

1928
        chanIDBytes := chanIndex.Get(b.Bytes())
4✔
1929
        if chanIDBytes == nil {
7✔
1930
                return 0, ErrEdgeNotFound
3✔
1931
        }
3✔
1932

1933
        chanID := byteOrder.Uint64(chanIDBytes)
4✔
1934

4✔
1935
        return chanID, nil
4✔
1936
}
1937

1938
// TODO(roasbeef): allow updates to use Batch?
1939

1940
// HighestChanID returns the "highest" known channel ID in the channel graph.
1941
// This represents the "newest" channel from the PoV of the chain. This method
1942
// can be used by peers to quickly determine if they're graphs are in sync.
1943
func (c *KVStore) HighestChanID(_ context.Context) (uint64, error) {
6✔
1944
        var cid uint64
6✔
1945

6✔
1946
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
12✔
1947
                edges := tx.ReadBucket(edgeBucket)
6✔
1948
                if edges == nil {
6✔
1949
                        return ErrGraphNoEdgesFound
×
1950
                }
×
1951
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
6✔
1952
                if edgeIndex == nil {
6✔
1953
                        return ErrGraphNoEdgesFound
×
1954
                }
×
1955

1956
                // In order to find the highest chan ID, we'll fetch a cursor
1957
                // and use that to seek to the "end" of our known rage.
1958
                cidCursor := edgeIndex.ReadCursor()
6✔
1959

6✔
1960
                lastChanID, _ := cidCursor.Last()
6✔
1961

6✔
1962
                // If there's no key, then this means that we don't actually
6✔
1963
                // know of any channels, so we'll return a predicable error.
6✔
1964
                if lastChanID == nil {
10✔
1965
                        return ErrGraphNoEdgesFound
4✔
1966
                }
4✔
1967

1968
                // Otherwise, we'll de serialize the channel ID and return it
1969
                // to the caller.
1970
                cid = byteOrder.Uint64(lastChanID)
5✔
1971

5✔
1972
                return nil
5✔
1973
        }, func() {
6✔
1974
                cid = 0
6✔
1975
        })
6✔
1976
        if err != nil && !errors.Is(err, ErrGraphNoEdgesFound) {
6✔
1977
                return 0, err
×
1978
        }
×
1979

1980
        return cid, nil
6✔
1981
}
1982

1983
// ChannelEdge represents the complete set of information for a channel edge in
1984
// the known channel graph. This struct couples the core information of the
1985
// edge as well as each of the known advertised edge policies.
1986
type ChannelEdge struct {
1987
        // Info contains all the static information describing the channel.
1988
        Info *models.ChannelEdgeInfo
1989

1990
        // Policy1 points to the "first" edge policy of the channel containing
1991
        // the dynamic information required to properly route through the edge.
1992
        Policy1 *models.ChannelEdgePolicy
1993

1994
        // Policy2 points to the "second" edge policy of the channel containing
1995
        // the dynamic information required to properly route through the edge.
1996
        Policy2 *models.ChannelEdgePolicy
1997

1998
        // Node1 is "node 1" in the channel. This is the node that would have
1999
        // produced Policy1 if it exists.
2000
        Node1 *models.LightningNode
2001

2002
        // Node2 is "node 2" in the channel. This is the node that would have
2003
        // produced Policy2 if it exists.
2004
        Node2 *models.LightningNode
2005
}
2006

2007
// ChanUpdatesInHorizon returns all the known channel edges which have at least
2008
// one edge that has an update timestamp within the specified horizon.
2009
func (c *KVStore) ChanUpdatesInHorizon(startTime,
2010
        endTime time.Time) ([]ChannelEdge, error) {
137✔
2011

137✔
2012
        // To ensure we don't return duplicate ChannelEdges, we'll use an
137✔
2013
        // additional map to keep track of the edges already seen to prevent
137✔
2014
        // re-adding it.
137✔
2015
        var edgesSeen map[uint64]struct{}
137✔
2016
        var edgesToCache map[uint64]ChannelEdge
137✔
2017
        var edgesInHorizon []ChannelEdge
137✔
2018

137✔
2019
        c.cacheMu.Lock()
137✔
2020
        defer c.cacheMu.Unlock()
137✔
2021

137✔
2022
        var hits int
137✔
2023
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
274✔
2024
                edges := tx.ReadBucket(edgeBucket)
137✔
2025
                if edges == nil {
137✔
2026
                        return ErrGraphNoEdgesFound
×
2027
                }
×
2028
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
137✔
2029
                if edgeIndex == nil {
137✔
2030
                        return ErrGraphNoEdgesFound
×
2031
                }
×
2032
                edgeUpdateIndex := edges.NestedReadBucket(edgeUpdateIndexBucket)
137✔
2033
                if edgeUpdateIndex == nil {
137✔
2034
                        return ErrGraphNoEdgesFound
×
2035
                }
×
2036

2037
                nodes := tx.ReadBucket(nodeBucket)
137✔
2038
                if nodes == nil {
137✔
2039
                        return ErrGraphNodesNotFound
×
2040
                }
×
2041

2042
                // We'll now obtain a cursor to perform a range query within
2043
                // the index to find all channels within the horizon.
2044
                updateCursor := edgeUpdateIndex.ReadCursor()
137✔
2045

137✔
2046
                var startTimeBytes, endTimeBytes [8 + 8]byte
137✔
2047
                byteOrder.PutUint64(
137✔
2048
                        startTimeBytes[:8], uint64(startTime.Unix()),
137✔
2049
                )
137✔
2050
                byteOrder.PutUint64(
137✔
2051
                        endTimeBytes[:8], uint64(endTime.Unix()),
137✔
2052
                )
137✔
2053

137✔
2054
                // With our start and end times constructed, we'll step through
137✔
2055
                // the index collecting the info and policy of each update of
137✔
2056
                // each channel that has a last update within the time range.
137✔
2057
                //
137✔
2058
                //nolint:ll
137✔
2059
                for indexKey, _ := updateCursor.Seek(startTimeBytes[:]); indexKey != nil &&
137✔
2060
                        bytes.Compare(indexKey, endTimeBytes[:]) <= 0; indexKey, _ = updateCursor.Next() {
186✔
2061
                        // We have a new eligible entry, so we'll slice of the
49✔
2062
                        // chan ID so we can query it in the DB.
49✔
2063
                        chanID := indexKey[8:]
49✔
2064

49✔
2065
                        // If we've already retrieved the info and policies for
49✔
2066
                        // this edge, then we can skip it as we don't need to do
49✔
2067
                        // so again.
49✔
2068
                        chanIDInt := byteOrder.Uint64(chanID)
49✔
2069
                        if _, ok := edgesSeen[chanIDInt]; ok {
68✔
2070
                                continue
19✔
2071
                        }
2072

2073
                        if channel, ok := c.chanCache.get(chanIDInt); ok {
41✔
2074
                                hits++
11✔
2075
                                edgesSeen[chanIDInt] = struct{}{}
11✔
2076
                                edgesInHorizon = append(edgesInHorizon, channel)
11✔
2077

11✔
2078
                                continue
11✔
2079
                        }
2080

2081
                        // First, we'll fetch the static edge information.
2082
                        edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
21✔
2083
                        if err != nil {
21✔
2084
                                chanID := byteOrder.Uint64(chanID)
×
2085
                                return fmt.Errorf("unable to fetch info for "+
×
2086
                                        "edge with chan_id=%v: %v", chanID, err)
×
2087
                        }
×
2088

2089
                        // With the static information obtained, we'll now
2090
                        // fetch the dynamic policy info.
2091
                        edge1, edge2, err := fetchChanEdgePolicies(
21✔
2092
                                edgeIndex, edges, chanID,
21✔
2093
                        )
21✔
2094
                        if err != nil {
21✔
2095
                                chanID := byteOrder.Uint64(chanID)
×
2096
                                return fmt.Errorf("unable to fetch policies "+
×
2097
                                        "for edge with chan_id=%v: %v", chanID,
×
2098
                                        err)
×
2099
                        }
×
2100

2101
                        node1, err := fetchLightningNode(
21✔
2102
                                nodes, edgeInfo.NodeKey1Bytes[:],
21✔
2103
                        )
21✔
2104
                        if err != nil {
21✔
2105
                                return err
×
2106
                        }
×
2107

2108
                        node2, err := fetchLightningNode(
21✔
2109
                                nodes, edgeInfo.NodeKey2Bytes[:],
21✔
2110
                        )
21✔
2111
                        if err != nil {
21✔
2112
                                return err
×
2113
                        }
×
2114

2115
                        // Finally, we'll collate this edge with the rest of
2116
                        // edges to be returned.
2117
                        edgesSeen[chanIDInt] = struct{}{}
21✔
2118
                        channel := ChannelEdge{
21✔
2119
                                Info:    &edgeInfo,
21✔
2120
                                Policy1: edge1,
21✔
2121
                                Policy2: edge2,
21✔
2122
                                Node1:   &node1,
21✔
2123
                                Node2:   &node2,
21✔
2124
                        }
21✔
2125
                        edgesInHorizon = append(edgesInHorizon, channel)
21✔
2126
                        edgesToCache[chanIDInt] = channel
21✔
2127
                }
2128

2129
                return nil
137✔
2130
        }, func() {
137✔
2131
                edgesSeen = make(map[uint64]struct{})
137✔
2132
                edgesToCache = make(map[uint64]ChannelEdge)
137✔
2133
                edgesInHorizon = nil
137✔
2134
        })
137✔
2135
        switch {
137✔
2136
        case errors.Is(err, ErrGraphNoEdgesFound):
×
2137
                fallthrough
×
2138
        case errors.Is(err, ErrGraphNodesNotFound):
×
2139
                break
×
2140

2141
        case err != nil:
×
2142
                return nil, err
×
2143
        }
2144

2145
        // Insert any edges loaded from disk into the cache.
2146
        for chanid, channel := range edgesToCache {
158✔
2147
                c.chanCache.insert(chanid, channel)
21✔
2148
        }
21✔
2149

2150
        if len(edgesInHorizon) > 0 {
145✔
2151
                log.Debugf("ChanUpdatesInHorizon hit percentage: %.2f (%d/%d)",
8✔
2152
                        float64(hits)*100/float64(len(edgesInHorizon)), hits,
8✔
2153
                        len(edgesInHorizon))
8✔
2154
        } else {
140✔
2155
                log.Debugf("ChanUpdatesInHorizon returned no edges in "+
132✔
2156
                        "horizon (%s, %s)", startTime, endTime)
132✔
2157
        }
132✔
2158

2159
        return edgesInHorizon, nil
137✔
2160
}
2161

2162
// NodeUpdatesInHorizon returns all the known lightning node which have an
2163
// update timestamp within the passed range. This method can be used by two
2164
// nodes to quickly determine if they have the same set of up to date node
2165
// announcements.
2166
func (c *KVStore) NodeUpdatesInHorizon(startTime,
2167
        endTime time.Time) ([]models.LightningNode, error) {
11✔
2168

11✔
2169
        var nodesInHorizon []models.LightningNode
11✔
2170

11✔
2171
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
22✔
2172
                nodes := tx.ReadBucket(nodeBucket)
11✔
2173
                if nodes == nil {
11✔
2174
                        return ErrGraphNodesNotFound
×
2175
                }
×
2176

2177
                nodeUpdateIndex := nodes.NestedReadBucket(nodeUpdateIndexBucket)
11✔
2178
                if nodeUpdateIndex == nil {
11✔
2179
                        return ErrGraphNodesNotFound
×
2180
                }
×
2181

2182
                // We'll now obtain a cursor to perform a range query within
2183
                // the index to find all node announcements within the horizon.
2184
                updateCursor := nodeUpdateIndex.ReadCursor()
11✔
2185

11✔
2186
                var startTimeBytes, endTimeBytes [8 + 33]byte
11✔
2187
                byteOrder.PutUint64(
11✔
2188
                        startTimeBytes[:8], uint64(startTime.Unix()),
11✔
2189
                )
11✔
2190
                byteOrder.PutUint64(
11✔
2191
                        endTimeBytes[:8], uint64(endTime.Unix()),
11✔
2192
                )
11✔
2193

11✔
2194
                // With our start and end times constructed, we'll step through
11✔
2195
                // the index collecting info for each node within the time
11✔
2196
                // range.
11✔
2197
                //
11✔
2198
                //nolint:ll
11✔
2199
                for indexKey, _ := updateCursor.Seek(startTimeBytes[:]); indexKey != nil &&
11✔
2200
                        bytes.Compare(indexKey, endTimeBytes[:]) <= 0; indexKey, _ = updateCursor.Next() {
43✔
2201
                        nodePub := indexKey[8:]
32✔
2202
                        node, err := fetchLightningNode(nodes, nodePub)
32✔
2203
                        if err != nil {
32✔
2204
                                return err
×
2205
                        }
×
2206

2207
                        nodesInHorizon = append(nodesInHorizon, node)
32✔
2208
                }
2209

2210
                return nil
11✔
2211
        }, func() {
11✔
2212
                nodesInHorizon = nil
11✔
2213
        })
11✔
2214
        switch {
11✔
2215
        case errors.Is(err, ErrGraphNoEdgesFound):
×
2216
                fallthrough
×
2217
        case errors.Is(err, ErrGraphNodesNotFound):
×
2218
                break
×
2219

2220
        case err != nil:
×
2221
                return nil, err
×
2222
        }
2223

2224
        return nodesInHorizon, nil
11✔
2225
}
2226

2227
// FilterKnownChanIDs takes a set of channel IDs and return the subset of chan
2228
// ID's that we don't know and are not known zombies of the passed set. In other
2229
// words, we perform a set difference of our set of chan ID's and the ones
2230
// passed in. This method can be used by callers to determine the set of
2231
// channels another peer knows of that we don't. The ChannelUpdateInfos for the
2232
// known zombies is also returned.
2233
func (c *KVStore) FilterKnownChanIDs(chansInfo []ChannelUpdateInfo) ([]uint64,
2234
        []ChannelUpdateInfo, error) {
126✔
2235

126✔
2236
        var (
126✔
2237
                newChanIDs   []uint64
126✔
2238
                knownZombies []ChannelUpdateInfo
126✔
2239
        )
126✔
2240

126✔
2241
        c.cacheMu.Lock()
126✔
2242
        defer c.cacheMu.Unlock()
126✔
2243

126✔
2244
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
252✔
2245
                edges := tx.ReadBucket(edgeBucket)
126✔
2246
                if edges == nil {
126✔
2247
                        return ErrGraphNoEdgesFound
×
2248
                }
×
2249
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
126✔
2250
                if edgeIndex == nil {
126✔
2251
                        return ErrGraphNoEdgesFound
×
2252
                }
×
2253

2254
                // Fetch the zombie index, it may not exist if no edges have
2255
                // ever been marked as zombies. If the index has been
2256
                // initialized, we will use it later to skip known zombie edges.
2257
                zombieIndex := edges.NestedReadBucket(zombieBucket)
126✔
2258

126✔
2259
                // We'll run through the set of chanIDs and collate only the
126✔
2260
                // set of channel that are unable to be found within our db.
126✔
2261
                var cidBytes [8]byte
126✔
2262
                for _, info := range chansInfo {
247✔
2263
                        scid := info.ShortChannelID.ToUint64()
121✔
2264
                        byteOrder.PutUint64(cidBytes[:], scid)
121✔
2265

121✔
2266
                        // If the edge is already known, skip it.
121✔
2267
                        if v := edgeIndex.Get(cidBytes[:]); v != nil {
140✔
2268
                                continue
19✔
2269
                        }
2270

2271
                        // If the edge is a known zombie, skip it.
2272
                        if zombieIndex != nil {
210✔
2273
                                isZombie, _, _ := isZombieEdge(
105✔
2274
                                        zombieIndex, scid,
105✔
2275
                                )
105✔
2276

105✔
2277
                                if isZombie {
150✔
2278
                                        knownZombies = append(
45✔
2279
                                                knownZombies, info,
45✔
2280
                                        )
45✔
2281

45✔
2282
                                        continue
45✔
2283
                                }
2284
                        }
2285

2286
                        newChanIDs = append(newChanIDs, scid)
60✔
2287
                }
2288

2289
                return nil
126✔
2290
        }, func() {
126✔
2291
                newChanIDs = nil
126✔
2292
                knownZombies = nil
126✔
2293
        })
126✔
2294
        switch {
126✔
2295
        // If we don't know of any edges yet, then we'll return the entire set
2296
        // of chan IDs specified.
2297
        case errors.Is(err, ErrGraphNoEdgesFound):
×
2298
                ogChanIDs := make([]uint64, len(chansInfo))
×
2299
                for i, info := range chansInfo {
×
2300
                        ogChanIDs[i] = info.ShortChannelID.ToUint64()
×
2301
                }
×
2302

2303
                return ogChanIDs, nil, nil
×
2304

2305
        case err != nil:
×
2306
                return nil, nil, err
×
2307
        }
2308

2309
        return newChanIDs, knownZombies, nil
126✔
2310
}
2311

2312
// ChannelUpdateInfo couples the SCID of a channel with the timestamps of the
2313
// latest received channel updates for the channel.
2314
type ChannelUpdateInfo struct {
2315
        // ShortChannelID is the SCID identifier of the channel.
2316
        ShortChannelID lnwire.ShortChannelID
2317

2318
        // Node1UpdateTimestamp is the timestamp of the latest received update
2319
        // from the node 1 channel peer. This will be set to zero time if no
2320
        // update has yet been received from this node.
2321
        Node1UpdateTimestamp time.Time
2322

2323
        // Node2UpdateTimestamp is the timestamp of the latest received update
2324
        // from the node 2 channel peer. This will be set to zero time if no
2325
        // update has yet been received from this node.
2326
        Node2UpdateTimestamp time.Time
2327
}
2328

2329
// NewChannelUpdateInfo is a constructor which makes sure we initialize the
2330
// timestamps with zero seconds unix timestamp which equals
2331
// `January 1, 1970, 00:00:00 UTC` in case the value is `time.Time{}`.
2332
func NewChannelUpdateInfo(scid lnwire.ShortChannelID, node1Timestamp,
2333
        node2Timestamp time.Time) ChannelUpdateInfo {
199✔
2334

199✔
2335
        chanInfo := ChannelUpdateInfo{
199✔
2336
                ShortChannelID:       scid,
199✔
2337
                Node1UpdateTimestamp: node1Timestamp,
199✔
2338
                Node2UpdateTimestamp: node2Timestamp,
199✔
2339
        }
199✔
2340

199✔
2341
        if node1Timestamp.IsZero() {
388✔
2342
                chanInfo.Node1UpdateTimestamp = time.Unix(0, 0)
189✔
2343
        }
189✔
2344

2345
        if node2Timestamp.IsZero() {
388✔
2346
                chanInfo.Node2UpdateTimestamp = time.Unix(0, 0)
189✔
2347
        }
189✔
2348

2349
        return chanInfo
199✔
2350
}
2351

2352
// BlockChannelRange represents a range of channels for a given block height.
2353
type BlockChannelRange struct {
2354
        // Height is the height of the block all of the channels below were
2355
        // included in.
2356
        Height uint32
2357

2358
        // Channels is the list of channels identified by their short ID
2359
        // representation known to us that were included in the block height
2360
        // above. The list may include channel update timestamp information if
2361
        // requested.
2362
        Channels []ChannelUpdateInfo
2363
}
2364

2365
// FilterChannelRange returns the channel ID's of all known channels which were
2366
// mined in a block height within the passed range. The channel IDs are grouped
2367
// by their common block height. This method can be used to quickly share with a
2368
// peer the set of channels we know of within a particular range to catch them
2369
// up after a period of time offline. If withTimestamps is true then the
2370
// timestamp info of the latest received channel update messages of the channel
2371
// will be included in the response.
2372
func (c *KVStore) FilterChannelRange(startHeight,
2373
        endHeight uint32, withTimestamps bool) ([]BlockChannelRange, error) {
14✔
2374

14✔
2375
        startChanID := &lnwire.ShortChannelID{
14✔
2376
                BlockHeight: startHeight,
14✔
2377
        }
14✔
2378

14✔
2379
        endChanID := lnwire.ShortChannelID{
14✔
2380
                BlockHeight: endHeight,
14✔
2381
                TxIndex:     math.MaxUint32 & 0x00ffffff,
14✔
2382
                TxPosition:  math.MaxUint16,
14✔
2383
        }
14✔
2384

14✔
2385
        // As we need to perform a range scan, we'll convert the starting and
14✔
2386
        // ending height to their corresponding values when encoded using short
14✔
2387
        // channel ID's.
14✔
2388
        var chanIDStart, chanIDEnd [8]byte
14✔
2389
        byteOrder.PutUint64(chanIDStart[:], startChanID.ToUint64())
14✔
2390
        byteOrder.PutUint64(chanIDEnd[:], endChanID.ToUint64())
14✔
2391

14✔
2392
        var channelsPerBlock map[uint32][]ChannelUpdateInfo
14✔
2393
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
28✔
2394
                edges := tx.ReadBucket(edgeBucket)
14✔
2395
                if edges == nil {
14✔
2396
                        return ErrGraphNoEdgesFound
×
2397
                }
×
2398
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
14✔
2399
                if edgeIndex == nil {
14✔
2400
                        return ErrGraphNoEdgesFound
×
2401
                }
×
2402

2403
                cursor := edgeIndex.ReadCursor()
14✔
2404

14✔
2405
                // We'll now iterate through the database, and find each
14✔
2406
                // channel ID that resides within the specified range.
14✔
2407
                //
14✔
2408
                //nolint:ll
14✔
2409
                for k, v := cursor.Seek(chanIDStart[:]); k != nil &&
14✔
2410
                        bytes.Compare(k, chanIDEnd[:]) <= 0; k, v = cursor.Next() {
61✔
2411
                        // Don't send alias SCIDs during gossip sync.
47✔
2412
                        edgeReader := bytes.NewReader(v)
47✔
2413
                        edgeInfo, err := deserializeChanEdgeInfo(edgeReader)
47✔
2414
                        if err != nil {
47✔
2415
                                return err
×
2416
                        }
×
2417

2418
                        if edgeInfo.AuthProof == nil {
50✔
2419
                                continue
3✔
2420
                        }
2421

2422
                        // This channel ID rests within the target range, so
2423
                        // we'll add it to our returned set.
2424
                        rawCid := byteOrder.Uint64(k)
47✔
2425
                        cid := lnwire.NewShortChanIDFromInt(rawCid)
47✔
2426

47✔
2427
                        chanInfo := NewChannelUpdateInfo(
47✔
2428
                                cid, time.Time{}, time.Time{},
47✔
2429
                        )
47✔
2430

47✔
2431
                        if !withTimestamps {
69✔
2432
                                channelsPerBlock[cid.BlockHeight] = append(
22✔
2433
                                        channelsPerBlock[cid.BlockHeight],
22✔
2434
                                        chanInfo,
22✔
2435
                                )
22✔
2436

22✔
2437
                                continue
22✔
2438
                        }
2439

2440
                        node1Key, node2Key := computeEdgePolicyKeys(&edgeInfo)
25✔
2441

25✔
2442
                        rawPolicy := edges.Get(node1Key)
25✔
2443
                        if len(rawPolicy) != 0 {
44✔
2444
                                r := bytes.NewReader(rawPolicy)
19✔
2445

19✔
2446
                                edge, err := deserializeChanEdgePolicyRaw(r)
19✔
2447
                                if err != nil && !errors.Is(
19✔
2448
                                        err, ErrEdgePolicyOptionalFieldNotFound,
19✔
2449
                                ) && !errors.Is(err, ErrParsingExtraTLVBytes) {
19✔
2450

×
2451
                                        return err
×
2452
                                }
×
2453

2454
                                chanInfo.Node1UpdateTimestamp = edge.LastUpdate
19✔
2455
                        }
2456

2457
                        rawPolicy = edges.Get(node2Key)
25✔
2458
                        if len(rawPolicy) != 0 {
32✔
2459
                                r := bytes.NewReader(rawPolicy)
7✔
2460

7✔
2461
                                edge, err := deserializeChanEdgePolicyRaw(r)
7✔
2462
                                if err != nil && !errors.Is(
7✔
2463
                                        err, ErrEdgePolicyOptionalFieldNotFound,
7✔
2464
                                ) && !errors.Is(err, ErrParsingExtraTLVBytes) {
7✔
2465

×
2466
                                        return err
×
2467
                                }
×
2468

2469
                                chanInfo.Node2UpdateTimestamp = edge.LastUpdate
7✔
2470
                        }
2471

2472
                        channelsPerBlock[cid.BlockHeight] = append(
25✔
2473
                                channelsPerBlock[cid.BlockHeight], chanInfo,
25✔
2474
                        )
25✔
2475
                }
2476

2477
                return nil
14✔
2478
        }, func() {
14✔
2479
                channelsPerBlock = make(map[uint32][]ChannelUpdateInfo)
14✔
2480
        })
14✔
2481

2482
        switch {
14✔
2483
        // If we don't know of any channels yet, then there's nothing to
2484
        // filter, so we'll return an empty slice.
2485
        case errors.Is(err, ErrGraphNoEdgesFound) || len(channelsPerBlock) == 0:
6✔
2486
                return nil, nil
6✔
2487

2488
        case err != nil:
×
2489
                return nil, err
×
2490
        }
2491

2492
        // Return the channel ranges in ascending block height order.
2493
        blocks := make([]uint32, 0, len(channelsPerBlock))
11✔
2494
        for block := range channelsPerBlock {
36✔
2495
                blocks = append(blocks, block)
25✔
2496
        }
25✔
2497
        sort.Slice(blocks, func(i, j int) bool {
28✔
2498
                return blocks[i] < blocks[j]
17✔
2499
        })
17✔
2500

2501
        channelRanges := make([]BlockChannelRange, 0, len(channelsPerBlock))
11✔
2502
        for _, block := range blocks {
36✔
2503
                channelRanges = append(channelRanges, BlockChannelRange{
25✔
2504
                        Height:   block,
25✔
2505
                        Channels: channelsPerBlock[block],
25✔
2506
                })
25✔
2507
        }
25✔
2508

2509
        return channelRanges, nil
11✔
2510
}
2511

2512
// FetchChanInfos returns the set of channel edges that correspond to the passed
2513
// channel ID's. If an edge is the query is unknown to the database, it will
2514
// skipped and the result will contain only those edges that exist at the time
2515
// of the query. This can be used to respond to peer queries that are seeking to
2516
// fill in gaps in their view of the channel graph.
2517
func (c *KVStore) FetchChanInfos(chanIDs []uint64) ([]ChannelEdge, error) {
7✔
2518
        return c.fetchChanInfos(nil, chanIDs)
7✔
2519
}
7✔
2520

2521
// fetchChanInfos returns the set of channel edges that correspond to the passed
2522
// channel ID's. If an edge is the query is unknown to the database, it will
2523
// skipped and the result will contain only those edges that exist at the time
2524
// of the query. This can be used to respond to peer queries that are seeking to
2525
// fill in gaps in their view of the channel graph.
2526
//
2527
// NOTE: An optional transaction may be provided. If none is provided, then a
2528
// new one will be created.
2529
func (c *KVStore) fetchChanInfos(tx kvdb.RTx, chanIDs []uint64) (
2530
        []ChannelEdge, error) {
7✔
2531
        // TODO(roasbeef): sort cids?
7✔
2532

7✔
2533
        var (
7✔
2534
                chanEdges []ChannelEdge
7✔
2535
                cidBytes  [8]byte
7✔
2536
        )
7✔
2537

7✔
2538
        fetchChanInfos := func(tx kvdb.RTx) error {
14✔
2539
                edges := tx.ReadBucket(edgeBucket)
7✔
2540
                if edges == nil {
7✔
2541
                        return ErrGraphNoEdgesFound
×
2542
                }
×
2543
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
7✔
2544
                if edgeIndex == nil {
7✔
2545
                        return ErrGraphNoEdgesFound
×
2546
                }
×
2547
                nodes := tx.ReadBucket(nodeBucket)
7✔
2548
                if nodes == nil {
7✔
2549
                        return ErrGraphNotFound
×
2550
                }
×
2551

2552
                for _, cid := range chanIDs {
21✔
2553
                        byteOrder.PutUint64(cidBytes[:], cid)
14✔
2554

14✔
2555
                        // First, we'll fetch the static edge information. If
14✔
2556
                        // the edge is unknown, we will skip the edge and
14✔
2557
                        // continue gathering all known edges.
14✔
2558
                        edgeInfo, err := fetchChanEdgeInfo(
14✔
2559
                                edgeIndex, cidBytes[:],
14✔
2560
                        )
14✔
2561
                        switch {
14✔
2562
                        case errors.Is(err, ErrEdgeNotFound):
3✔
2563
                                continue
3✔
2564
                        case err != nil:
×
2565
                                return err
×
2566
                        }
2567

2568
                        // With the static information obtained, we'll now
2569
                        // fetch the dynamic policy info.
2570
                        edge1, edge2, err := fetchChanEdgePolicies(
11✔
2571
                                edgeIndex, edges, cidBytes[:],
11✔
2572
                        )
11✔
2573
                        if err != nil {
11✔
2574
                                return err
×
2575
                        }
×
2576

2577
                        node1, err := fetchLightningNode(
11✔
2578
                                nodes, edgeInfo.NodeKey1Bytes[:],
11✔
2579
                        )
11✔
2580
                        if err != nil {
11✔
2581
                                return err
×
2582
                        }
×
2583

2584
                        node2, err := fetchLightningNode(
11✔
2585
                                nodes, edgeInfo.NodeKey2Bytes[:],
11✔
2586
                        )
11✔
2587
                        if err != nil {
11✔
2588
                                return err
×
2589
                        }
×
2590

2591
                        chanEdges = append(chanEdges, ChannelEdge{
11✔
2592
                                Info:    &edgeInfo,
11✔
2593
                                Policy1: edge1,
11✔
2594
                                Policy2: edge2,
11✔
2595
                                Node1:   &node1,
11✔
2596
                                Node2:   &node2,
11✔
2597
                        })
11✔
2598
                }
2599

2600
                return nil
7✔
2601
        }
2602

2603
        if tx == nil {
14✔
2604
                err := kvdb.View(c.db, fetchChanInfos, func() {
14✔
2605
                        chanEdges = nil
7✔
2606
                })
7✔
2607
                if err != nil {
7✔
2608
                        return nil, err
×
2609
                }
×
2610

2611
                return chanEdges, nil
7✔
2612
        }
2613

2614
        err := fetchChanInfos(tx)
×
2615
        if err != nil {
×
2616
                return nil, err
×
2617
        }
×
2618

2619
        return chanEdges, nil
×
2620
}
2621

2622
func delEdgeUpdateIndexEntry(edgesBucket kvdb.RwBucket, chanID uint64,
2623
        edge1, edge2 *models.ChannelEdgePolicy) error {
148✔
2624

148✔
2625
        // First, we'll fetch the edge update index bucket which currently
148✔
2626
        // stores an entry for the channel we're about to delete.
148✔
2627
        updateIndex := edgesBucket.NestedReadWriteBucket(edgeUpdateIndexBucket)
148✔
2628
        if updateIndex == nil {
148✔
2629
                // No edges in bucket, return early.
×
2630
                return nil
×
2631
        }
×
2632

2633
        // Now that we have the bucket, we'll attempt to construct a template
2634
        // for the index key: updateTime || chanid.
2635
        var indexKey [8 + 8]byte
148✔
2636
        byteOrder.PutUint64(indexKey[8:], chanID)
148✔
2637

148✔
2638
        // With the template constructed, we'll attempt to delete an entry that
148✔
2639
        // would have been created by both edges: we'll alternate the update
148✔
2640
        // times, as one may had overridden the other.
148✔
2641
        if edge1 != nil {
161✔
2642
                byteOrder.PutUint64(
13✔
2643
                        indexKey[:8], uint64(edge1.LastUpdate.Unix()),
13✔
2644
                )
13✔
2645
                if err := updateIndex.Delete(indexKey[:]); err != nil {
13✔
2646
                        return err
×
2647
                }
×
2648
        }
2649

2650
        // We'll also attempt to delete the entry that may have been created by
2651
        // the second edge.
2652
        if edge2 != nil {
163✔
2653
                byteOrder.PutUint64(
15✔
2654
                        indexKey[:8], uint64(edge2.LastUpdate.Unix()),
15✔
2655
                )
15✔
2656
                if err := updateIndex.Delete(indexKey[:]); err != nil {
15✔
2657
                        return err
×
2658
                }
×
2659
        }
2660

2661
        return nil
148✔
2662
}
2663

2664
// delChannelEdgeUnsafe deletes the edge with the given chanID from the graph
2665
// cache. It then goes on to delete any policy info and edge info for this
2666
// channel from the DB and finally, if isZombie is true, it will add an entry
2667
// for this channel in the zombie index.
2668
//
2669
// NOTE: this method MUST only be called if the cacheMu has already been
2670
// acquired.
2671
func (c *KVStore) delChannelEdgeUnsafe(edges, edgeIndex, chanIndex,
2672
        zombieIndex kvdb.RwBucket, chanID []byte, isZombie,
2673
        strictZombie bool) (*models.ChannelEdgeInfo, error) {
210✔
2674

210✔
2675
        edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
210✔
2676
        if err != nil {
272✔
2677
                return nil, err
62✔
2678
        }
62✔
2679

2680
        // We'll also remove the entry in the edge update index bucket before
2681
        // we delete the edges themselves so we can access their last update
2682
        // times.
2683
        cid := byteOrder.Uint64(chanID)
148✔
2684
        edge1, edge2, err := fetchChanEdgePolicies(edgeIndex, edges, chanID)
148✔
2685
        if err != nil {
148✔
2686
                return nil, err
×
2687
        }
×
2688
        err = delEdgeUpdateIndexEntry(edges, cid, edge1, edge2)
148✔
2689
        if err != nil {
148✔
2690
                return nil, err
×
2691
        }
×
2692

2693
        // The edge key is of the format pubKey || chanID. First we construct
2694
        // the latter half, populating the channel ID.
2695
        var edgeKey [33 + 8]byte
148✔
2696
        copy(edgeKey[33:], chanID)
148✔
2697

148✔
2698
        // With the latter half constructed, copy over the first public key to
148✔
2699
        // delete the edge in this direction, then the second to delete the
148✔
2700
        // edge in the opposite direction.
148✔
2701
        copy(edgeKey[:33], edgeInfo.NodeKey1Bytes[:])
148✔
2702
        if edges.Get(edgeKey[:]) != nil {
296✔
2703
                if err := edges.Delete(edgeKey[:]); err != nil {
148✔
2704
                        return nil, err
×
2705
                }
×
2706
        }
2707
        copy(edgeKey[:33], edgeInfo.NodeKey2Bytes[:])
148✔
2708
        if edges.Get(edgeKey[:]) != nil {
296✔
2709
                if err := edges.Delete(edgeKey[:]); err != nil {
148✔
2710
                        return nil, err
×
2711
                }
×
2712
        }
2713

2714
        // As part of deleting the edge we also remove all disabled entries
2715
        // from the edgePolicyDisabledIndex bucket. We do that for both
2716
        // directions.
2717
        err = updateEdgePolicyDisabledIndex(edges, cid, false, false)
148✔
2718
        if err != nil {
148✔
2719
                return nil, err
×
2720
        }
×
2721
        err = updateEdgePolicyDisabledIndex(edges, cid, true, false)
148✔
2722
        if err != nil {
148✔
2723
                return nil, err
×
2724
        }
×
2725

2726
        // With the edge data deleted, we can purge the information from the two
2727
        // edge indexes.
2728
        if err := edgeIndex.Delete(chanID); err != nil {
148✔
2729
                return nil, err
×
2730
        }
×
2731
        var b bytes.Buffer
148✔
2732
        if err := WriteOutpoint(&b, &edgeInfo.ChannelPoint); err != nil {
148✔
2733
                return nil, err
×
2734
        }
×
2735
        if err := chanIndex.Delete(b.Bytes()); err != nil {
148✔
2736
                return nil, err
×
2737
        }
×
2738

2739
        // Finally, we'll mark the edge as a zombie within our index if it's
2740
        // being removed due to the channel becoming a zombie. We do this to
2741
        // ensure we don't store unnecessary data for spent channels.
2742
        if !isZombie {
274✔
2743
                return &edgeInfo, nil
126✔
2744
        }
126✔
2745

2746
        nodeKey1, nodeKey2 := edgeInfo.NodeKey1Bytes, edgeInfo.NodeKey2Bytes
25✔
2747
        if strictZombie {
28✔
2748
                var e1UpdateTime, e2UpdateTime *time.Time
3✔
2749
                if edge1 != nil {
5✔
2750
                        e1UpdateTime = &edge1.LastUpdate
2✔
2751
                }
2✔
2752
                if edge2 != nil {
6✔
2753
                        e2UpdateTime = &edge2.LastUpdate
3✔
2754
                }
3✔
2755

2756
                nodeKey1, nodeKey2 = makeZombiePubkeys(
3✔
2757
                        &edgeInfo, e1UpdateTime, e2UpdateTime,
3✔
2758
                )
3✔
2759
        }
2760

2761
        return &edgeInfo, markEdgeZombie(
25✔
2762
                zombieIndex, byteOrder.Uint64(chanID), nodeKey1, nodeKey2,
25✔
2763
        )
25✔
2764
}
2765

2766
// makeZombiePubkeys derives the node pubkeys to store in the zombie index for a
2767
// particular pair of channel policies. The return values are one of:
2768
//  1. (pubkey1, pubkey2)
2769
//  2. (pubkey1, blank)
2770
//  3. (blank, pubkey2)
2771
//
2772
// A blank pubkey means that corresponding node will be unable to resurrect a
2773
// channel on its own. For example, node1 may continue to publish recent
2774
// updates, but node2 has fallen way behind. After marking an edge as a zombie,
2775
// we don't want another fresh update from node1 to resurrect, as the edge can
2776
// only become live once node2 finally sends something recent.
2777
//
2778
// In the case where we have neither update, we allow either party to resurrect
2779
// the channel. If the channel were to be marked zombie again, it would be
2780
// marked with the correct lagging channel since we received an update from only
2781
// one side.
2782
func makeZombiePubkeys(info *models.ChannelEdgeInfo,
2783
        e1, e2 *time.Time) ([33]byte, [33]byte) {
3✔
2784

3✔
2785
        switch {
3✔
2786
        // If we don't have either edge policy, we'll return both pubkeys so
2787
        // that the channel can be resurrected by either party.
UNCOV
2788
        case e1 == nil && e2 == nil:
×
UNCOV
2789
                return info.NodeKey1Bytes, info.NodeKey2Bytes
×
2790

2791
        // If we're missing edge1, or if both edges are present but edge1 is
2792
        // older, we'll return edge1's pubkey and a blank pubkey for edge2. This
2793
        // means that only an update from edge1 will be able to resurrect the
2794
        // channel.
2795
        case e1 == nil || (e2 != nil && e1.Before(*e2)):
1✔
2796
                return info.NodeKey1Bytes, [33]byte{}
1✔
2797

2798
        // Otherwise, we're missing edge2 or edge2 is the older side, so we
2799
        // return a blank pubkey for edge1. In this case, only an update from
2800
        // edge2 can resurect the channel.
2801
        default:
2✔
2802
                return [33]byte{}, info.NodeKey2Bytes
2✔
2803
        }
2804
}
2805

2806
// UpdateEdgePolicy updates the edge routing policy for a single directed edge
2807
// within the database for the referenced channel. The `flags` attribute within
2808
// the ChannelEdgePolicy determines which of the directed edges are being
2809
// updated. If the flag is 1, then the first node's information is being
2810
// updated, otherwise it's the second node's information. The node ordering is
2811
// determined by the lexicographical ordering of the identity public keys of the
2812
// nodes on either side of the channel.
2813
func (c *KVStore) UpdateEdgePolicy(ctx context.Context,
2814
        edge *models.ChannelEdgePolicy,
2815
        opts ...batch.SchedulerOption) (route.Vertex, route.Vertex, error) {
2,669✔
2816

2,669✔
2817
        var (
2,669✔
2818
                isUpdate1    bool
2,669✔
2819
                edgeNotFound bool
2,669✔
2820
                from, to     route.Vertex
2,669✔
2821
        )
2,669✔
2822

2,669✔
2823
        r := &batch.Request[kvdb.RwTx]{
2,669✔
2824
                Opts: batch.NewSchedulerOptions(opts...),
2,669✔
2825
                Reset: func() {
5,339✔
2826
                        isUpdate1 = false
2,670✔
2827
                        edgeNotFound = false
2,670✔
2828
                },
2,670✔
2829
                Do: func(tx kvdb.RwTx) error {
2,670✔
2830
                        var err error
2,670✔
2831
                        from, to, isUpdate1, err = updateEdgePolicy(tx, edge)
2,670✔
2832
                        if err != nil {
2,675✔
2833
                                log.Errorf("UpdateEdgePolicy faild: %v", err)
5✔
2834
                        }
5✔
2835

2836
                        // Silence ErrEdgeNotFound so that the batch can
2837
                        // succeed, but propagate the error via local state.
2838
                        if errors.Is(err, ErrEdgeNotFound) {
2,673✔
2839
                                edgeNotFound = true
3✔
2840
                                return nil
3✔
2841
                        }
3✔
2842

2843
                        return err
2,667✔
2844
                },
2845
                OnCommit: func(err error) error {
2,669✔
2846
                        switch {
2,669✔
2847
                        case err != nil:
1✔
2848
                                return err
1✔
2849
                        case edgeNotFound:
3✔
2850
                                return ErrEdgeNotFound
3✔
2851
                        default:
2,665✔
2852
                                c.updateEdgeCache(edge, isUpdate1)
2,665✔
2853
                                return nil
2,665✔
2854
                        }
2855
                },
2856
        }
2857

2858
        err := c.chanScheduler.Execute(ctx, r)
2,669✔
2859

2,669✔
2860
        return from, to, err
2,669✔
2861
}
2862

2863
func (c *KVStore) updateEdgeCache(e *models.ChannelEdgePolicy,
2864
        isUpdate1 bool) {
2,665✔
2865

2,665✔
2866
        // If an entry for this channel is found in reject cache, we'll modify
2,665✔
2867
        // the entry with the updated timestamp for the direction that was just
2,665✔
2868
        // written. If the edge doesn't exist, we'll load the cache entry lazily
2,665✔
2869
        // during the next query for this edge.
2,665✔
2870
        if entry, ok := c.rejectCache.get(e.ChannelID); ok {
2,673✔
2871
                if isUpdate1 {
14✔
2872
                        entry.upd1Time = e.LastUpdate.Unix()
6✔
2873
                } else {
11✔
2874
                        entry.upd2Time = e.LastUpdate.Unix()
5✔
2875
                }
5✔
2876
                c.rejectCache.insert(e.ChannelID, entry)
8✔
2877
        }
2878

2879
        // If an entry for this channel is found in channel cache, we'll modify
2880
        // the entry with the updated policy for the direction that was just
2881
        // written. If the edge doesn't exist, we'll defer loading the info and
2882
        // policies and lazily read from disk during the next query.
2883
        if channel, ok := c.chanCache.get(e.ChannelID); ok {
2,668✔
2884
                if isUpdate1 {
6✔
2885
                        channel.Policy1 = e
3✔
2886
                } else {
6✔
2887
                        channel.Policy2 = e
3✔
2888
                }
3✔
2889
                c.chanCache.insert(e.ChannelID, channel)
3✔
2890
        }
2891
}
2892

2893
// updateEdgePolicy attempts to update an edge's policy within the relevant
2894
// buckets using an existing database transaction. The returned boolean will be
2895
// true if the updated policy belongs to node1, and false if the policy belonged
2896
// to node2.
2897
func updateEdgePolicy(tx kvdb.RwTx, edge *models.ChannelEdgePolicy) (
2898
        route.Vertex, route.Vertex, bool, error) {
2,670✔
2899

2,670✔
2900
        var noVertex route.Vertex
2,670✔
2901

2,670✔
2902
        edges := tx.ReadWriteBucket(edgeBucket)
2,670✔
2903
        if edges == nil {
2,670✔
2904
                return noVertex, noVertex, false, ErrEdgeNotFound
×
2905
        }
×
2906
        edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
2,670✔
2907
        if edgeIndex == nil {
2,670✔
2908
                return noVertex, noVertex, false, ErrEdgeNotFound
×
2909
        }
×
2910

2911
        // Create the channelID key be converting the channel ID
2912
        // integer into a byte slice.
2913
        var chanID [8]byte
2,670✔
2914
        byteOrder.PutUint64(chanID[:], edge.ChannelID)
2,670✔
2915

2,670✔
2916
        // With the channel ID, we then fetch the value storing the two
2,670✔
2917
        // nodes which connect this channel edge.
2,670✔
2918
        nodeInfo := edgeIndex.Get(chanID[:])
2,670✔
2919
        if nodeInfo == nil {
2,673✔
2920
                return noVertex, noVertex, false, ErrEdgeNotFound
3✔
2921
        }
3✔
2922

2923
        // Depending on the flags value passed above, either the first
2924
        // or second edge policy is being updated.
2925
        var fromNode, toNode []byte
2,667✔
2926
        var isUpdate1 bool
2,667✔
2927
        if edge.ChannelFlags&lnwire.ChanUpdateDirection == 0 {
4,011✔
2928
                fromNode = nodeInfo[:33]
1,344✔
2929
                toNode = nodeInfo[33:66]
1,344✔
2930
                isUpdate1 = true
1,344✔
2931
        } else {
2,670✔
2932
                fromNode = nodeInfo[33:66]
1,326✔
2933
                toNode = nodeInfo[:33]
1,326✔
2934
                isUpdate1 = false
1,326✔
2935
        }
1,326✔
2936

2937
        // Finally, with the direction of the edge being updated
2938
        // identified, we update the on-disk edge representation.
2939
        err := putChanEdgePolicy(edges, edge, fromNode, toNode)
2,667✔
2940
        if err != nil {
2,669✔
2941
                return noVertex, noVertex, false, err
2✔
2942
        }
2✔
2943

2944
        var (
2,665✔
2945
                fromNodePubKey route.Vertex
2,665✔
2946
                toNodePubKey   route.Vertex
2,665✔
2947
        )
2,665✔
2948
        copy(fromNodePubKey[:], fromNode)
2,665✔
2949
        copy(toNodePubKey[:], toNode)
2,665✔
2950

2,665✔
2951
        return fromNodePubKey, toNodePubKey, isUpdate1, nil
2,665✔
2952
}
2953

2954
// isPublic determines whether the node is seen as public within the graph from
2955
// the source node's point of view. An existing database transaction can also be
2956
// specified.
2957
func (c *KVStore) isPublic(tx kvdb.RTx, nodePub route.Vertex,
2958
        sourcePubKey []byte) (bool, error) {
16✔
2959

16✔
2960
        // In order to determine whether this node is publicly advertised within
16✔
2961
        // the graph, we'll need to look at all of its edges and check whether
16✔
2962
        // they extend to any other node than the source node. errDone will be
16✔
2963
        // used to terminate the check early.
16✔
2964
        nodeIsPublic := false
16✔
2965
        errDone := errors.New("done")
16✔
2966
        err := c.forEachNodeChannelTx(tx, nodePub, func(tx kvdb.RTx,
16✔
2967
                info *models.ChannelEdgeInfo, _ *models.ChannelEdgePolicy,
16✔
2968
                _ *models.ChannelEdgePolicy) error {
29✔
2969

13✔
2970
                // If this edge doesn't extend to the source node, we'll
13✔
2971
                // terminate our search as we can now conclude that the node is
13✔
2972
                // publicly advertised within the graph due to the local node
13✔
2973
                // knowing of the current edge.
13✔
2974
                if !bytes.Equal(info.NodeKey1Bytes[:], sourcePubKey) &&
13✔
2975
                        !bytes.Equal(info.NodeKey2Bytes[:], sourcePubKey) {
19✔
2976

6✔
2977
                        nodeIsPublic = true
6✔
2978
                        return errDone
6✔
2979
                }
6✔
2980

2981
                // Since the edge _does_ extend to the source node, we'll also
2982
                // need to ensure that this is a public edge.
2983
                if info.AuthProof != nil {
19✔
2984
                        nodeIsPublic = true
9✔
2985
                        return errDone
9✔
2986
                }
9✔
2987

2988
                // Otherwise, we'll continue our search.
2989
                return nil
4✔
2990
        })
2991
        if err != nil && !errors.Is(err, errDone) {
16✔
2992
                return false, err
×
2993
        }
×
2994

2995
        return nodeIsPublic, nil
16✔
2996
}
2997

2998
// FetchLightningNodeTx attempts to look up a target node by its identity
2999
// public key. If the node isn't found in the database, then
3000
// ErrGraphNodeNotFound is returned. An optional transaction may be provided.
3001
// If none is provided, then a new one will be created.
3002
func (c *KVStore) FetchLightningNodeTx(tx kvdb.RTx, nodePub route.Vertex) (
3003
        *models.LightningNode, error) {
3,654✔
3004

3,654✔
3005
        return c.fetchLightningNode(tx, nodePub)
3,654✔
3006
}
3,654✔
3007

3008
// FetchLightningNode attempts to look up a target node by its identity public
3009
// key. If the node isn't found in the database, then ErrGraphNodeNotFound is
3010
// returned.
3011
func (c *KVStore) FetchLightningNode(_ context.Context,
3012
        nodePub route.Vertex) (*models.LightningNode, error) {
162✔
3013

162✔
3014
        return c.fetchLightningNode(nil, nodePub)
162✔
3015
}
162✔
3016

3017
// fetchLightningNode attempts to look up a target node by its identity public
3018
// key. If the node isn't found in the database, then ErrGraphNodeNotFound is
3019
// returned. An optional transaction may be provided. If none is provided, then
3020
// a new one will be created.
3021
func (c *KVStore) fetchLightningNode(tx kvdb.RTx,
3022
        nodePub route.Vertex) (*models.LightningNode, error) {
3,813✔
3023

3,813✔
3024
        var node *models.LightningNode
3,813✔
3025
        fetch := func(tx kvdb.RTx) error {
7,626✔
3026
                // First grab the nodes bucket which stores the mapping from
3,813✔
3027
                // pubKey to node information.
3,813✔
3028
                nodes := tx.ReadBucket(nodeBucket)
3,813✔
3029
                if nodes == nil {
3,813✔
3030
                        return ErrGraphNotFound
×
3031
                }
×
3032

3033
                // If a key for this serialized public key isn't found, then
3034
                // the target node doesn't exist within the database.
3035
                nodeBytes := nodes.Get(nodePub[:])
3,813✔
3036
                if nodeBytes == nil {
3,831✔
3037
                        return ErrGraphNodeNotFound
18✔
3038
                }
18✔
3039

3040
                // If the node is found, then we can de deserialize the node
3041
                // information to return to the user.
3042
                nodeReader := bytes.NewReader(nodeBytes)
3,798✔
3043
                n, err := deserializeLightningNode(nodeReader)
3,798✔
3044
                if err != nil {
3,798✔
3045
                        return err
×
3046
                }
×
3047

3048
                node = &n
3,798✔
3049

3,798✔
3050
                return nil
3,798✔
3051
        }
3052

3053
        if tx == nil {
3,999✔
3054
                err := kvdb.View(
186✔
3055
                        c.db, fetch, func() {
372✔
3056
                                node = nil
186✔
3057
                        },
186✔
3058
                )
3059
                if err != nil {
193✔
3060
                        return nil, err
7✔
3061
                }
7✔
3062

3063
                return node, nil
182✔
3064
        }
3065

3066
        err := fetch(tx)
3,627✔
3067
        if err != nil {
3,638✔
3068
                return nil, err
11✔
3069
        }
11✔
3070

3071
        return node, nil
3,616✔
3072
}
3073

3074
// HasLightningNode determines if the graph has a vertex identified by the
3075
// target node identity public key. If the node exists in the database, a
3076
// timestamp of when the data for the node was lasted updated is returned along
3077
// with a true boolean. Otherwise, an empty time.Time is returned with a false
3078
// boolean.
3079
func (c *KVStore) HasLightningNode(_ context.Context,
3080
        nodePub [33]byte) (time.Time, bool, error) {
20✔
3081

20✔
3082
        var (
20✔
3083
                updateTime time.Time
20✔
3084
                exists     bool
20✔
3085
        )
20✔
3086

20✔
3087
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
40✔
3088
                // First grab the nodes bucket which stores the mapping from
20✔
3089
                // pubKey to node information.
20✔
3090
                nodes := tx.ReadBucket(nodeBucket)
20✔
3091
                if nodes == nil {
20✔
3092
                        return ErrGraphNotFound
×
3093
                }
×
3094

3095
                // If a key for this serialized public key isn't found, we can
3096
                // exit early.
3097
                nodeBytes := nodes.Get(nodePub[:])
20✔
3098
                if nodeBytes == nil {
26✔
3099
                        exists = false
6✔
3100
                        return nil
6✔
3101
                }
6✔
3102

3103
                // Otherwise we continue on to obtain the time stamp
3104
                // representing the last time the data for this node was
3105
                // updated.
3106
                nodeReader := bytes.NewReader(nodeBytes)
17✔
3107
                node, err := deserializeLightningNode(nodeReader)
17✔
3108
                if err != nil {
17✔
3109
                        return err
×
3110
                }
×
3111

3112
                exists = true
17✔
3113
                updateTime = node.LastUpdate
17✔
3114

17✔
3115
                return nil
17✔
3116
        }, func() {
20✔
3117
                updateTime = time.Time{}
20✔
3118
                exists = false
20✔
3119
        })
20✔
3120
        if err != nil {
20✔
3121
                return time.Time{}, exists, err
×
3122
        }
×
3123

3124
        return updateTime, exists, nil
20✔
3125
}
3126

3127
// nodeTraversal is used to traverse all channels of a node given by its
3128
// public key and passes channel information into the specified callback.
3129
func nodeTraversal(tx kvdb.RTx, nodePub []byte, db kvdb.Backend,
3130
        cb func(kvdb.RTx, *models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3131
                *models.ChannelEdgePolicy) error) error {
1,270✔
3132

1,270✔
3133
        traversal := func(tx kvdb.RTx) error {
2,540✔
3134
                edges := tx.ReadBucket(edgeBucket)
1,270✔
3135
                if edges == nil {
1,270✔
3136
                        return ErrGraphNotFound
×
3137
                }
×
3138
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
1,270✔
3139
                if edgeIndex == nil {
1,270✔
3140
                        return ErrGraphNoEdgesFound
×
3141
                }
×
3142

3143
                // In order to reach all the edges for this node, we take
3144
                // advantage of the construction of the key-space within the
3145
                // edge bucket. The keys are stored in the form: pubKey ||
3146
                // chanID. Therefore, starting from a chanID of zero, we can
3147
                // scan forward in the bucket, grabbing all the edges for the
3148
                // node. Once the prefix no longer matches, then we know we're
3149
                // done.
3150
                var nodeStart [33 + 8]byte
1,270✔
3151
                copy(nodeStart[:], nodePub)
1,270✔
3152
                copy(nodeStart[33:], chanStart[:])
1,270✔
3153

1,270✔
3154
                // Starting from the key pubKey || 0, we seek forward in the
1,270✔
3155
                // bucket until the retrieved key no longer has the public key
1,270✔
3156
                // as its prefix. This indicates that we've stepped over into
1,270✔
3157
                // another node's edges, so we can terminate our scan.
1,270✔
3158
                edgeCursor := edges.ReadCursor()
1,270✔
3159
                for nodeEdge, _ := edgeCursor.Seek(nodeStart[:]); bytes.HasPrefix(nodeEdge, nodePub); nodeEdge, _ = edgeCursor.Next() { //nolint:ll
5,115✔
3160
                        // If the prefix still matches, the channel id is
3,845✔
3161
                        // returned in nodeEdge. Channel id is used to lookup
3,845✔
3162
                        // the node at the other end of the channel and both
3,845✔
3163
                        // edge policies.
3,845✔
3164
                        chanID := nodeEdge[33:]
3,845✔
3165
                        edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
3,845✔
3166
                        if err != nil {
3,845✔
3167
                                return err
×
3168
                        }
×
3169

3170
                        outgoingPolicy, err := fetchChanEdgePolicy(
3,845✔
3171
                                edges, chanID, nodePub,
3,845✔
3172
                        )
3,845✔
3173
                        if err != nil {
3,845✔
3174
                                return err
×
3175
                        }
×
3176

3177
                        otherNode, err := edgeInfo.OtherNodeKeyBytes(nodePub)
3,845✔
3178
                        if err != nil {
3,845✔
3179
                                return err
×
3180
                        }
×
3181

3182
                        incomingPolicy, err := fetchChanEdgePolicy(
3,845✔
3183
                                edges, chanID, otherNode[:],
3,845✔
3184
                        )
3,845✔
3185
                        if err != nil {
3,845✔
3186
                                return err
×
3187
                        }
×
3188

3189
                        // Finally, we execute the callback.
3190
                        err = cb(tx, &edgeInfo, outgoingPolicy, incomingPolicy)
3,845✔
3191
                        if err != nil {
3,857✔
3192
                                return err
12✔
3193
                        }
12✔
3194
                }
3195

3196
                return nil
1,261✔
3197
        }
3198

3199
        // If no transaction was provided, then we'll create a new transaction
3200
        // to execute the transaction within.
3201
        if tx == nil {
1,302✔
3202
                return kvdb.View(db, traversal, func() {})
64✔
3203
        }
3204

3205
        // Otherwise, we re-use the existing transaction to execute the graph
3206
        // traversal.
3207
        return traversal(tx)
1,241✔
3208
}
3209

3210
// ForEachNodeChannel iterates through all channels of the given node,
3211
// executing the passed callback with an edge info structure and the policies
3212
// of each end of the channel. The first edge policy is the outgoing edge *to*
3213
// the connecting node, while the second is the incoming edge *from* the
3214
// connecting node. If the callback returns an error, then the iteration is
3215
// halted with the error propagated back up to the caller.
3216
//
3217
// Unknown policies are passed into the callback as nil values.
3218
func (c *KVStore) ForEachNodeChannel(nodePub route.Vertex,
3219
        cb func(*models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3220
                *models.ChannelEdgePolicy) error) error {
9✔
3221

9✔
3222
        return nodeTraversal(nil, nodePub[:], c.db, func(_ kvdb.RTx,
9✔
3223
                info *models.ChannelEdgeInfo, policy,
9✔
3224
                policy2 *models.ChannelEdgePolicy) error {
22✔
3225

13✔
3226
                return cb(info, policy, policy2)
13✔
3227
        })
13✔
3228
}
3229

3230
// ForEachSourceNodeChannel iterates through all channels of the source node,
3231
// executing the passed callback on each. The callback is provided with the
3232
// channel's outpoint, whether we have a policy for the channel and the channel
3233
// peer's node information.
3234
func (c *KVStore) ForEachSourceNodeChannel(cb func(chanPoint wire.OutPoint,
3235
        havePolicy bool, otherNode *models.LightningNode) error) error {
4✔
3236

4✔
3237
        return kvdb.View(c.db, func(tx kvdb.RTx) error {
8✔
3238
                nodes := tx.ReadBucket(nodeBucket)
4✔
3239
                if nodes == nil {
4✔
3240
                        return ErrGraphNotFound
×
3241
                }
×
3242

3243
                node, err := c.sourceNode(nodes)
4✔
3244
                if err != nil {
4✔
3245
                        return err
×
3246
                }
×
3247

3248
                return nodeTraversal(
4✔
3249
                        tx, node.PubKeyBytes[:], c.db, func(tx kvdb.RTx,
4✔
3250
                                info *models.ChannelEdgeInfo,
4✔
3251
                                policy, _ *models.ChannelEdgePolicy) error {
9✔
3252

5✔
3253
                                peer, err := c.fetchOtherNode(
5✔
3254
                                        tx, info, node.PubKeyBytes[:],
5✔
3255
                                )
5✔
3256
                                if err != nil {
5✔
3257
                                        return err
×
3258
                                }
×
3259

3260
                                return cb(
5✔
3261
                                        info.ChannelPoint, policy != nil, peer,
5✔
3262
                                )
5✔
3263
                        },
3264
                )
3265
        }, func() {})
4✔
3266
}
3267

3268
// forEachNodeChannelTx iterates through all channels of the given node,
3269
// executing the passed callback with an edge info structure and the policies
3270
// of each end of the channel. The first edge policy is the outgoing edge *to*
3271
// the connecting node, while the second is the incoming edge *from* the
3272
// connecting node. If the callback returns an error, then the iteration is
3273
// halted with the error propagated back up to the caller.
3274
//
3275
// Unknown policies are passed into the callback as nil values.
3276
//
3277
// If the caller wishes to re-use an existing boltdb transaction, then it
3278
// should be passed as the first argument.  Otherwise, the first argument should
3279
// be nil and a fresh transaction will be created to execute the graph
3280
// traversal.
3281
func (c *KVStore) forEachNodeChannelTx(tx kvdb.RTx,
3282
        nodePub route.Vertex, cb func(kvdb.RTx, *models.ChannelEdgeInfo,
3283
                *models.ChannelEdgePolicy,
3284
                *models.ChannelEdgePolicy) error) error {
1,001✔
3285

1,001✔
3286
        return nodeTraversal(tx, nodePub[:], c.db, cb)
1,001✔
3287
}
1,001✔
3288

3289
// fetchOtherNode attempts to fetch the full LightningNode that's opposite of
3290
// the target node in the channel. This is useful when one knows the pubkey of
3291
// one of the nodes, and wishes to obtain the full LightningNode for the other
3292
// end of the channel.
3293
func (c *KVStore) fetchOtherNode(tx kvdb.RTx,
3294
        channel *models.ChannelEdgeInfo, thisNodeKey []byte) (
3295
        *models.LightningNode, error) {
5✔
3296

5✔
3297
        // Ensure that the node passed in is actually a member of the channel.
5✔
3298
        var targetNodeBytes [33]byte
5✔
3299
        switch {
5✔
3300
        case bytes.Equal(channel.NodeKey1Bytes[:], thisNodeKey):
4✔
3301
                targetNodeBytes = channel.NodeKey2Bytes
4✔
3302
        case bytes.Equal(channel.NodeKey2Bytes[:], thisNodeKey):
4✔
3303
                targetNodeBytes = channel.NodeKey1Bytes
4✔
3304
        default:
×
3305
                return nil, fmt.Errorf("node not participating in this channel")
×
3306
        }
3307

3308
        var targetNode *models.LightningNode
5✔
3309
        fetchNodeFunc := func(tx kvdb.RTx) error {
10✔
3310
                // First grab the nodes bucket which stores the mapping from
5✔
3311
                // pubKey to node information.
5✔
3312
                nodes := tx.ReadBucket(nodeBucket)
5✔
3313
                if nodes == nil {
5✔
3314
                        return ErrGraphNotFound
×
3315
                }
×
3316

3317
                node, err := fetchLightningNode(nodes, targetNodeBytes[:])
5✔
3318
                if err != nil {
5✔
3319
                        return err
×
3320
                }
×
3321

3322
                targetNode = &node
5✔
3323

5✔
3324
                return nil
5✔
3325
        }
3326

3327
        // If the transaction is nil, then we'll need to create a new one,
3328
        // otherwise we can use the existing db transaction.
3329
        var err error
5✔
3330
        if tx == nil {
5✔
3331
                err = kvdb.View(c.db, fetchNodeFunc, func() {
×
3332
                        targetNode = nil
×
3333
                })
×
3334
        } else {
5✔
3335
                err = fetchNodeFunc(tx)
5✔
3336
        }
5✔
3337

3338
        return targetNode, err
5✔
3339
}
3340

3341
// computeEdgePolicyKeys is a helper function that can be used to compute the
3342
// keys used to index the channel edge policy info for the two nodes of the
3343
// edge. The keys for node 1 and node 2 are returned respectively.
3344
func computeEdgePolicyKeys(info *models.ChannelEdgeInfo) ([]byte, []byte) {
25✔
3345
        var (
25✔
3346
                node1Key [33 + 8]byte
25✔
3347
                node2Key [33 + 8]byte
25✔
3348
        )
25✔
3349

25✔
3350
        copy(node1Key[:], info.NodeKey1Bytes[:])
25✔
3351
        copy(node2Key[:], info.NodeKey2Bytes[:])
25✔
3352

25✔
3353
        byteOrder.PutUint64(node1Key[33:], info.ChannelID)
25✔
3354
        byteOrder.PutUint64(node2Key[33:], info.ChannelID)
25✔
3355

25✔
3356
        return node1Key[:], node2Key[:]
25✔
3357
}
25✔
3358

3359
// FetchChannelEdgesByOutpoint attempts to lookup the two directed edges for
3360
// the channel identified by the funding outpoint. If the channel can't be
3361
// found, then ErrEdgeNotFound is returned. A struct which houses the general
3362
// information for the channel itself is returned as well as two structs that
3363
// contain the routing policies for the channel in either direction.
3364
func (c *KVStore) FetchChannelEdgesByOutpoint(op *wire.OutPoint) (
3365
        *models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3366
        *models.ChannelEdgePolicy, error) {
14✔
3367

14✔
3368
        var (
14✔
3369
                edgeInfo *models.ChannelEdgeInfo
14✔
3370
                policy1  *models.ChannelEdgePolicy
14✔
3371
                policy2  *models.ChannelEdgePolicy
14✔
3372
        )
14✔
3373

14✔
3374
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
28✔
3375
                // First, grab the node bucket. This will be used to populate
14✔
3376
                // the Node pointers in each edge read from disk.
14✔
3377
                nodes := tx.ReadBucket(nodeBucket)
14✔
3378
                if nodes == nil {
14✔
3379
                        return ErrGraphNotFound
×
3380
                }
×
3381

3382
                // Next, grab the edge bucket which stores the edges, and also
3383
                // the index itself so we can group the directed edges together
3384
                // logically.
3385
                edges := tx.ReadBucket(edgeBucket)
14✔
3386
                if edges == nil {
14✔
3387
                        return ErrGraphNoEdgesFound
×
3388
                }
×
3389
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
14✔
3390
                if edgeIndex == nil {
14✔
3391
                        return ErrGraphNoEdgesFound
×
3392
                }
×
3393

3394
                // If the channel's outpoint doesn't exist within the outpoint
3395
                // index, then the edge does not exist.
3396
                chanIndex := edges.NestedReadBucket(channelPointBucket)
14✔
3397
                if chanIndex == nil {
14✔
3398
                        return ErrGraphNoEdgesFound
×
3399
                }
×
3400
                var b bytes.Buffer
14✔
3401
                if err := WriteOutpoint(&b, op); err != nil {
14✔
3402
                        return err
×
3403
                }
×
3404
                chanID := chanIndex.Get(b.Bytes())
14✔
3405
                if chanID == nil {
27✔
3406
                        return fmt.Errorf("%w: op=%v", ErrEdgeNotFound, op)
13✔
3407
                }
13✔
3408

3409
                // If the channel is found to exists, then we'll first retrieve
3410
                // the general information for the channel.
3411
                edge, err := fetchChanEdgeInfo(edgeIndex, chanID)
4✔
3412
                if err != nil {
4✔
3413
                        return fmt.Errorf("%w: chanID=%x", err, chanID)
×
3414
                }
×
3415
                edgeInfo = &edge
4✔
3416

4✔
3417
                // Once we have the information about the channels' parameters,
4✔
3418
                // we'll fetch the routing policies for each for the directed
4✔
3419
                // edges.
4✔
3420
                e1, e2, err := fetchChanEdgePolicies(edgeIndex, edges, chanID)
4✔
3421
                if err != nil {
4✔
3422
                        return fmt.Errorf("failed to find policy: %w", err)
×
3423
                }
×
3424

3425
                policy1 = e1
4✔
3426
                policy2 = e2
4✔
3427

4✔
3428
                return nil
4✔
3429
        }, func() {
14✔
3430
                edgeInfo = nil
14✔
3431
                policy1 = nil
14✔
3432
                policy2 = nil
14✔
3433
        })
14✔
3434
        if err != nil {
27✔
3435
                return nil, nil, nil, err
13✔
3436
        }
13✔
3437

3438
        return edgeInfo, policy1, policy2, nil
4✔
3439
}
3440

3441
// FetchChannelEdgesByID attempts to lookup the two directed edges for the
3442
// channel identified by the channel ID. If the channel can't be found, then
3443
// ErrEdgeNotFound is returned. A struct which houses the general information
3444
// for the channel itself is returned as well as two structs that contain the
3445
// routing policies for the channel in either direction.
3446
//
3447
// ErrZombieEdge an be returned if the edge is currently marked as a zombie
3448
// within the database. In this case, the ChannelEdgePolicy's will be nil, and
3449
// the ChannelEdgeInfo will only include the public keys of each node.
3450
func (c *KVStore) FetchChannelEdgesByID(chanID uint64) (
3451
        *models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3452
        *models.ChannelEdgePolicy, error) {
2,687✔
3453

2,687✔
3454
        var (
2,687✔
3455
                edgeInfo  *models.ChannelEdgeInfo
2,687✔
3456
                policy1   *models.ChannelEdgePolicy
2,687✔
3457
                policy2   *models.ChannelEdgePolicy
2,687✔
3458
                channelID [8]byte
2,687✔
3459
        )
2,687✔
3460

2,687✔
3461
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
5,374✔
3462
                // First, grab the node bucket. This will be used to populate
2,687✔
3463
                // the Node pointers in each edge read from disk.
2,687✔
3464
                nodes := tx.ReadBucket(nodeBucket)
2,687✔
3465
                if nodes == nil {
2,687✔
3466
                        return ErrGraphNotFound
×
3467
                }
×
3468

3469
                // Next, grab the edge bucket which stores the edges, and also
3470
                // the index itself so we can group the directed edges together
3471
                // logically.
3472
                edges := tx.ReadBucket(edgeBucket)
2,687✔
3473
                if edges == nil {
2,687✔
3474
                        return ErrGraphNoEdgesFound
×
3475
                }
×
3476
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
2,687✔
3477
                if edgeIndex == nil {
2,687✔
3478
                        return ErrGraphNoEdgesFound
×
3479
                }
×
3480

3481
                byteOrder.PutUint64(channelID[:], chanID)
2,687✔
3482

2,687✔
3483
                // Now, attempt to fetch edge.
2,687✔
3484
                edge, err := fetchChanEdgeInfo(edgeIndex, channelID[:])
2,687✔
3485

2,687✔
3486
                // If it doesn't exist, we'll quickly check our zombie index to
2,687✔
3487
                // see if we've previously marked it as so.
2,687✔
3488
                if errors.Is(err, ErrEdgeNotFound) {
2,691✔
3489
                        // If the zombie index doesn't exist, or the edge is not
4✔
3490
                        // marked as a zombie within it, then we'll return the
4✔
3491
                        // original ErrEdgeNotFound error.
4✔
3492
                        zombieIndex := edges.NestedReadBucket(zombieBucket)
4✔
3493
                        if zombieIndex == nil {
4✔
3494
                                return ErrEdgeNotFound
×
3495
                        }
×
3496

3497
                        isZombie, pubKey1, pubKey2 := isZombieEdge(
4✔
3498
                                zombieIndex, chanID,
4✔
3499
                        )
4✔
3500
                        if !isZombie {
7✔
3501
                                return ErrEdgeNotFound
3✔
3502
                        }
3✔
3503

3504
                        // Otherwise, the edge is marked as a zombie, so we'll
3505
                        // populate the edge info with the public keys of each
3506
                        // party as this is the only information we have about
3507
                        // it and return an error signaling so.
3508
                        edgeInfo = &models.ChannelEdgeInfo{
4✔
3509
                                NodeKey1Bytes: pubKey1,
4✔
3510
                                NodeKey2Bytes: pubKey2,
4✔
3511
                        }
4✔
3512

4✔
3513
                        return ErrZombieEdge
4✔
3514
                }
3515

3516
                // Otherwise, we'll just return the error if any.
3517
                if err != nil {
2,686✔
3518
                        return err
×
3519
                }
×
3520

3521
                edgeInfo = &edge
2,686✔
3522

2,686✔
3523
                // Then we'll attempt to fetch the accompanying policies of this
2,686✔
3524
                // edge.
2,686✔
3525
                e1, e2, err := fetchChanEdgePolicies(
2,686✔
3526
                        edgeIndex, edges, channelID[:],
2,686✔
3527
                )
2,686✔
3528
                if err != nil {
2,686✔
3529
                        return err
×
3530
                }
×
3531

3532
                policy1 = e1
2,686✔
3533
                policy2 = e2
2,686✔
3534

2,686✔
3535
                return nil
2,686✔
3536
        }, func() {
2,687✔
3537
                edgeInfo = nil
2,687✔
3538
                policy1 = nil
2,687✔
3539
                policy2 = nil
2,687✔
3540
        })
2,687✔
3541
        if errors.Is(err, ErrZombieEdge) {
2,691✔
3542
                return edgeInfo, nil, nil, err
4✔
3543
        }
4✔
3544
        if err != nil {
2,689✔
3545
                return nil, nil, nil, err
3✔
3546
        }
3✔
3547

3548
        return edgeInfo, policy1, policy2, nil
2,686✔
3549
}
3550

3551
// IsPublicNode is a helper method that determines whether the node with the
3552
// given public key is seen as a public node in the graph from the graph's
3553
// source node's point of view.
3554
func (c *KVStore) IsPublicNode(pubKey [33]byte) (bool, error) {
16✔
3555
        var nodeIsPublic bool
16✔
3556
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
32✔
3557
                nodes := tx.ReadBucket(nodeBucket)
16✔
3558
                if nodes == nil {
16✔
3559
                        return ErrGraphNodesNotFound
×
3560
                }
×
3561
                ourPubKey := nodes.Get(sourceKey)
16✔
3562
                if ourPubKey == nil {
16✔
3563
                        return ErrSourceNodeNotSet
×
3564
                }
×
3565
                node, err := fetchLightningNode(nodes, pubKey[:])
16✔
3566
                if err != nil {
16✔
3567
                        return err
×
3568
                }
×
3569

3570
                nodeIsPublic, err = c.isPublic(tx, node.PubKeyBytes, ourPubKey)
16✔
3571

16✔
3572
                return err
16✔
3573
        }, func() {
16✔
3574
                nodeIsPublic = false
16✔
3575
        })
16✔
3576
        if err != nil {
16✔
3577
                return false, err
×
3578
        }
×
3579

3580
        return nodeIsPublic, nil
16✔
3581
}
3582

3583
// genMultiSigP2WSH generates the p2wsh'd multisig script for 2 of 2 pubkeys.
3584
func genMultiSigP2WSH(aPub, bPub []byte) ([]byte, error) {
49✔
3585
        witnessScript, err := input.GenMultiSigScript(aPub, bPub)
49✔
3586
        if err != nil {
49✔
3587
                return nil, err
×
3588
        }
×
3589

3590
        // With the witness script generated, we'll now turn it into a p2wsh
3591
        // script:
3592
        //  * OP_0 <sha256(script)>
3593
        bldr := txscript.NewScriptBuilder(
49✔
3594
                txscript.WithScriptAllocSize(input.P2WSHSize),
49✔
3595
        )
49✔
3596
        bldr.AddOp(txscript.OP_0)
49✔
3597
        scriptHash := sha256.Sum256(witnessScript)
49✔
3598
        bldr.AddData(scriptHash[:])
49✔
3599

49✔
3600
        return bldr.Script()
49✔
3601
}
3602

3603
// EdgePoint couples the outpoint of a channel with the funding script that it
3604
// creates. The FilteredChainView will use this to watch for spends of this
3605
// edge point on chain. We require both of these values as depending on the
3606
// concrete implementation, either the pkScript, or the out point will be used.
3607
type EdgePoint struct {
3608
        // FundingPkScript is the p2wsh multi-sig script of the target channel.
3609
        FundingPkScript []byte
3610

3611
        // OutPoint is the outpoint of the target channel.
3612
        OutPoint wire.OutPoint
3613
}
3614

3615
// String returns a human readable version of the target EdgePoint. We return
3616
// the outpoint directly as it is enough to uniquely identify the edge point.
3617
func (e *EdgePoint) String() string {
×
3618
        return e.OutPoint.String()
×
3619
}
×
3620

3621
// ChannelView returns the verifiable edge information for each active channel
3622
// within the known channel graph. The set of UTXO's (along with their scripts)
3623
// returned are the ones that need to be watched on chain to detect channel
3624
// closes on the resident blockchain.
3625
func (c *KVStore) ChannelView() ([]EdgePoint, error) {
25✔
3626
        var edgePoints []EdgePoint
25✔
3627
        if err := kvdb.View(c.db, func(tx kvdb.RTx) error {
50✔
3628
                // We're going to iterate over the entire channel index, so
25✔
3629
                // we'll need to fetch the edgeBucket to get to the index as
25✔
3630
                // it's a sub-bucket.
25✔
3631
                edges := tx.ReadBucket(edgeBucket)
25✔
3632
                if edges == nil {
25✔
3633
                        return ErrGraphNoEdgesFound
×
3634
                }
×
3635
                chanIndex := edges.NestedReadBucket(channelPointBucket)
25✔
3636
                if chanIndex == nil {
25✔
3637
                        return ErrGraphNoEdgesFound
×
3638
                }
×
3639
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
25✔
3640
                if edgeIndex == nil {
25✔
3641
                        return ErrGraphNoEdgesFound
×
3642
                }
×
3643

3644
                // Once we have the proper bucket, we'll range over each key
3645
                // (which is the channel point for the channel) and decode it,
3646
                // accumulating each entry.
3647
                return chanIndex.ForEach(
25✔
3648
                        func(chanPointBytes, chanID []byte) error {
70✔
3649
                                chanPointReader := bytes.NewReader(
45✔
3650
                                        chanPointBytes,
45✔
3651
                                )
45✔
3652

45✔
3653
                                var chanPoint wire.OutPoint
45✔
3654
                                err := ReadOutpoint(chanPointReader, &chanPoint)
45✔
3655
                                if err != nil {
45✔
3656
                                        return err
×
3657
                                }
×
3658

3659
                                edgeInfo, err := fetchChanEdgeInfo(
45✔
3660
                                        edgeIndex, chanID,
45✔
3661
                                )
45✔
3662
                                if err != nil {
45✔
3663
                                        return err
×
3664
                                }
×
3665

3666
                                pkScript, err := genMultiSigP2WSH(
45✔
3667
                                        edgeInfo.BitcoinKey1Bytes[:],
45✔
3668
                                        edgeInfo.BitcoinKey2Bytes[:],
45✔
3669
                                )
45✔
3670
                                if err != nil {
45✔
3671
                                        return err
×
3672
                                }
×
3673

3674
                                edgePoints = append(edgePoints, EdgePoint{
45✔
3675
                                        FundingPkScript: pkScript,
45✔
3676
                                        OutPoint:        chanPoint,
45✔
3677
                                })
45✔
3678

45✔
3679
                                return nil
45✔
3680
                        },
3681
                )
3682
        }, func() {
25✔
3683
                edgePoints = nil
25✔
3684
        }); err != nil {
25✔
3685
                return nil, err
×
3686
        }
×
3687

3688
        return edgePoints, nil
25✔
3689
}
3690

3691
// MarkEdgeZombie attempts to mark a channel identified by its channel ID as a
3692
// zombie. This method is used on an ad-hoc basis, when channels need to be
3693
// marked as zombies outside the normal pruning cycle.
3694
func (c *KVStore) MarkEdgeZombie(chanID uint64,
3695
        pubKey1, pubKey2 [33]byte) error {
130✔
3696

130✔
3697
        c.cacheMu.Lock()
130✔
3698
        defer c.cacheMu.Unlock()
130✔
3699

130✔
3700
        err := kvdb.Batch(c.db, func(tx kvdb.RwTx) error {
260✔
3701
                edges := tx.ReadWriteBucket(edgeBucket)
130✔
3702
                if edges == nil {
130✔
3703
                        return ErrGraphNoEdgesFound
×
3704
                }
×
3705
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
130✔
3706
                if err != nil {
130✔
3707
                        return fmt.Errorf("unable to create zombie "+
×
3708
                                "bucket: %w", err)
×
3709
                }
×
3710

3711
                return markEdgeZombie(zombieIndex, chanID, pubKey1, pubKey2)
130✔
3712
        })
3713
        if err != nil {
130✔
3714
                return err
×
3715
        }
×
3716

3717
        c.rejectCache.remove(chanID)
130✔
3718
        c.chanCache.remove(chanID)
130✔
3719

130✔
3720
        return nil
130✔
3721
}
3722

3723
// markEdgeZombie marks an edge as a zombie within our zombie index. The public
3724
// keys should represent the node public keys of the two parties involved in the
3725
// edge.
3726
func markEdgeZombie(zombieIndex kvdb.RwBucket, chanID uint64, pubKey1,
3727
        pubKey2 [33]byte) error {
155✔
3728

155✔
3729
        var k [8]byte
155✔
3730
        byteOrder.PutUint64(k[:], chanID)
155✔
3731

155✔
3732
        var v [66]byte
155✔
3733
        copy(v[:33], pubKey1[:])
155✔
3734
        copy(v[33:], pubKey2[:])
155✔
3735

155✔
3736
        return zombieIndex.Put(k[:], v[:])
155✔
3737
}
155✔
3738

3739
// MarkEdgeLive clears an edge from our zombie index, deeming it as live.
3740
func (c *KVStore) MarkEdgeLive(chanID uint64) error {
21✔
3741
        c.cacheMu.Lock()
21✔
3742
        defer c.cacheMu.Unlock()
21✔
3743

21✔
3744
        return c.markEdgeLiveUnsafe(nil, chanID)
21✔
3745
}
21✔
3746

3747
// markEdgeLiveUnsafe clears an edge from the zombie index. This method can be
3748
// called with an existing kvdb.RwTx or the argument can be set to nil in which
3749
// case a new transaction will be created.
3750
//
3751
// NOTE: this method MUST only be called if the cacheMu has already been
3752
// acquired.
3753
func (c *KVStore) markEdgeLiveUnsafe(tx kvdb.RwTx, chanID uint64) error {
21✔
3754
        dbFn := func(tx kvdb.RwTx) error {
42✔
3755
                edges := tx.ReadWriteBucket(edgeBucket)
21✔
3756
                if edges == nil {
21✔
3757
                        return ErrGraphNoEdgesFound
×
3758
                }
×
3759
                zombieIndex := edges.NestedReadWriteBucket(zombieBucket)
21✔
3760
                if zombieIndex == nil {
21✔
3761
                        return nil
×
3762
                }
×
3763

3764
                var k [8]byte
21✔
3765
                byteOrder.PutUint64(k[:], chanID)
21✔
3766

21✔
3767
                if len(zombieIndex.Get(k[:])) == 0 {
22✔
3768
                        return ErrZombieEdgeNotFound
1✔
3769
                }
1✔
3770

3771
                return zombieIndex.Delete(k[:])
20✔
3772
        }
3773

3774
        // If the transaction is nil, we'll create a new one. Otherwise, we use
3775
        // the existing transaction
3776
        var err error
21✔
3777
        if tx == nil {
42✔
3778
                err = kvdb.Update(c.db, dbFn, func() {})
42✔
3779
        } else {
×
3780
                err = dbFn(tx)
×
3781
        }
×
3782
        if err != nil {
22✔
3783
                return err
1✔
3784
        }
1✔
3785

3786
        c.rejectCache.remove(chanID)
20✔
3787
        c.chanCache.remove(chanID)
20✔
3788

20✔
3789
        return nil
20✔
3790
}
3791

3792
// IsZombieEdge returns whether the edge is considered zombie. If it is a
3793
// zombie, then the two node public keys corresponding to this edge are also
3794
// returned.
3795
func (c *KVStore) IsZombieEdge(chanID uint64) (bool, [33]byte, [33]byte,
3796
        error) {
14✔
3797

14✔
3798
        var (
14✔
3799
                isZombie         bool
14✔
3800
                pubKey1, pubKey2 [33]byte
14✔
3801
        )
14✔
3802

14✔
3803
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
28✔
3804
                edges := tx.ReadBucket(edgeBucket)
14✔
3805
                if edges == nil {
14✔
3806
                        return ErrGraphNoEdgesFound
×
3807
                }
×
3808
                zombieIndex := edges.NestedReadBucket(zombieBucket)
14✔
3809
                if zombieIndex == nil {
14✔
3810
                        return nil
×
3811
                }
×
3812

3813
                isZombie, pubKey1, pubKey2 = isZombieEdge(zombieIndex, chanID)
14✔
3814

14✔
3815
                return nil
14✔
3816
        }, func() {
14✔
3817
                isZombie = false
14✔
3818
                pubKey1 = [33]byte{}
14✔
3819
                pubKey2 = [33]byte{}
14✔
3820
        })
14✔
3821
        if err != nil {
14✔
3822
                return false, [33]byte{}, [33]byte{}, fmt.Errorf("%w: %w "+
×
3823
                        "(chanID=%d)", ErrCantCheckIfZombieEdgeStr, err, chanID)
×
3824
        }
×
3825

3826
        return isZombie, pubKey1, pubKey2, nil
14✔
3827
}
3828

3829
// isZombieEdge returns whether an entry exists for the given channel in the
3830
// zombie index. If an entry exists, then the two node public keys corresponding
3831
// to this edge are also returned.
3832
func isZombieEdge(zombieIndex kvdb.RBucket,
3833
        chanID uint64) (bool, [33]byte, [33]byte) {
216✔
3834

216✔
3835
        var k [8]byte
216✔
3836
        byteOrder.PutUint64(k[:], chanID)
216✔
3837

216✔
3838
        v := zombieIndex.Get(k[:])
216✔
3839
        if v == nil {
339✔
3840
                return false, [33]byte{}, [33]byte{}
123✔
3841
        }
123✔
3842

3843
        var pubKey1, pubKey2 [33]byte
96✔
3844
        copy(pubKey1[:], v[:33])
96✔
3845
        copy(pubKey2[:], v[33:])
96✔
3846

96✔
3847
        return true, pubKey1, pubKey2
96✔
3848
}
3849

3850
// NumZombies returns the current number of zombie channels in the graph.
3851
func (c *KVStore) NumZombies() (uint64, error) {
4✔
3852
        var numZombies uint64
4✔
3853
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
8✔
3854
                edges := tx.ReadBucket(edgeBucket)
4✔
3855
                if edges == nil {
4✔
3856
                        return nil
×
3857
                }
×
3858
                zombieIndex := edges.NestedReadBucket(zombieBucket)
4✔
3859
                if zombieIndex == nil {
4✔
3860
                        return nil
×
3861
                }
×
3862

3863
                return zombieIndex.ForEach(func(_, _ []byte) error {
6✔
3864
                        numZombies++
2✔
3865
                        return nil
2✔
3866
                })
2✔
3867
        }, func() {
4✔
3868
                numZombies = 0
4✔
3869
        })
4✔
3870
        if err != nil {
4✔
3871
                return 0, err
×
3872
        }
×
3873

3874
        return numZombies, nil
4✔
3875
}
3876

3877
// PutClosedScid stores a SCID for a closed channel in the database. This is so
3878
// that we can ignore channel announcements that we know to be closed without
3879
// having to validate them and fetch a block.
3880
func (c *KVStore) PutClosedScid(scid lnwire.ShortChannelID) error {
1✔
3881
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
2✔
3882
                closedScids, err := tx.CreateTopLevelBucket(closedScidBucket)
1✔
3883
                if err != nil {
1✔
3884
                        return err
×
3885
                }
×
3886

3887
                var k [8]byte
1✔
3888
                byteOrder.PutUint64(k[:], scid.ToUint64())
1✔
3889

1✔
3890
                return closedScids.Put(k[:], []byte{})
1✔
3891
        }, func() {})
1✔
3892
}
3893

3894
// IsClosedScid checks whether a channel identified by the passed in scid is
3895
// closed. This helps avoid having to perform expensive validation checks.
3896
// TODO: Add an LRU cache to cut down on disc reads.
3897
func (c *KVStore) IsClosedScid(scid lnwire.ShortChannelID) (bool, error) {
5✔
3898
        var isClosed bool
5✔
3899
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
10✔
3900
                closedScids := tx.ReadBucket(closedScidBucket)
5✔
3901
                if closedScids == nil {
5✔
3902
                        return ErrClosedScidsNotFound
×
3903
                }
×
3904

3905
                var k [8]byte
5✔
3906
                byteOrder.PutUint64(k[:], scid.ToUint64())
5✔
3907

5✔
3908
                if closedScids.Get(k[:]) != nil {
6✔
3909
                        isClosed = true
1✔
3910
                        return nil
1✔
3911
                }
1✔
3912

3913
                return nil
4✔
3914
        }, func() {
5✔
3915
                isClosed = false
5✔
3916
        })
5✔
3917
        if err != nil {
5✔
3918
                return false, err
×
3919
        }
×
3920

3921
        return isClosed, nil
5✔
3922
}
3923

3924
// GraphSession will provide the call-back with access to a NodeTraverser
3925
// instance which can be used to perform queries against the channel graph.
3926
func (c *KVStore) GraphSession(cb func(graph NodeTraverser) error) error {
54✔
3927
        return c.db.View(func(tx walletdb.ReadTx) error {
108✔
3928
                return cb(&nodeTraverserSession{
54✔
3929
                        db: c,
54✔
3930
                        tx: tx,
54✔
3931
                })
54✔
3932
        }, func() {})
108✔
3933
}
3934

3935
// nodeTraverserSession implements the NodeTraverser interface but with a
3936
// backing read only transaction for a consistent view of the graph.
3937
type nodeTraverserSession struct {
3938
        tx kvdb.RTx
3939
        db *KVStore
3940
}
3941

3942
// ForEachNodeDirectedChannel calls the callback for every channel of the given
3943
// node.
3944
//
3945
// NOTE: Part of the NodeTraverser interface.
3946
func (c *nodeTraverserSession) ForEachNodeDirectedChannel(nodePub route.Vertex,
3947
        cb func(channel *DirectedChannel) error) error {
239✔
3948

239✔
3949
        return c.db.forEachNodeDirectedChannel(c.tx, nodePub, cb)
239✔
3950
}
239✔
3951

3952
// FetchNodeFeatures returns the features of the given node. If the node is
3953
// unknown, assume no additional features are supported.
3954
//
3955
// NOTE: Part of the NodeTraverser interface.
3956
func (c *nodeTraverserSession) FetchNodeFeatures(nodePub route.Vertex) (
3957
        *lnwire.FeatureVector, error) {
254✔
3958

254✔
3959
        return c.db.fetchNodeFeatures(c.tx, nodePub)
254✔
3960
}
254✔
3961

3962
func putLightningNode(nodeBucket, aliasBucket, updateIndex kvdb.RwBucket,
3963
        node *models.LightningNode) error {
901✔
3964

901✔
3965
        var (
901✔
3966
                scratch [16]byte
901✔
3967
                b       bytes.Buffer
901✔
3968
        )
901✔
3969

901✔
3970
        pub, err := node.PubKey()
901✔
3971
        if err != nil {
901✔
3972
                return err
×
3973
        }
×
3974
        nodePub := pub.SerializeCompressed()
901✔
3975

901✔
3976
        // If the node has the update time set, write it, else write 0.
901✔
3977
        updateUnix := uint64(0)
901✔
3978
        if node.LastUpdate.Unix() > 0 {
1,676✔
3979
                updateUnix = uint64(node.LastUpdate.Unix())
775✔
3980
        }
775✔
3981

3982
        byteOrder.PutUint64(scratch[:8], updateUnix)
901✔
3983
        if _, err := b.Write(scratch[:8]); err != nil {
901✔
3984
                return err
×
3985
        }
×
3986

3987
        if _, err := b.Write(nodePub); err != nil {
901✔
3988
                return err
×
3989
        }
×
3990

3991
        // If we got a node announcement for this node, we will have the rest
3992
        // of the data available. If not we don't have more data to write.
3993
        if !node.HaveNodeAnnouncement {
977✔
3994
                // Write HaveNodeAnnouncement=0.
76✔
3995
                byteOrder.PutUint16(scratch[:2], 0)
76✔
3996
                if _, err := b.Write(scratch[:2]); err != nil {
76✔
3997
                        return err
×
3998
                }
×
3999

4000
                return nodeBucket.Put(nodePub, b.Bytes())
76✔
4001
        }
4002

4003
        // Write HaveNodeAnnouncement=1.
4004
        byteOrder.PutUint16(scratch[:2], 1)
828✔
4005
        if _, err := b.Write(scratch[:2]); err != nil {
828✔
4006
                return err
×
4007
        }
×
4008

4009
        if err := binary.Write(&b, byteOrder, node.Color.R); err != nil {
828✔
4010
                return err
×
4011
        }
×
4012
        if err := binary.Write(&b, byteOrder, node.Color.G); err != nil {
828✔
4013
                return err
×
4014
        }
×
4015
        if err := binary.Write(&b, byteOrder, node.Color.B); err != nil {
828✔
4016
                return err
×
4017
        }
×
4018

4019
        if err := wire.WriteVarString(&b, 0, node.Alias); err != nil {
828✔
4020
                return err
×
4021
        }
×
4022

4023
        if err := node.Features.Encode(&b); err != nil {
828✔
4024
                return err
×
4025
        }
×
4026

4027
        numAddresses := uint16(len(node.Addresses))
828✔
4028
        byteOrder.PutUint16(scratch[:2], numAddresses)
828✔
4029
        if _, err := b.Write(scratch[:2]); err != nil {
828✔
4030
                return err
×
4031
        }
×
4032

4033
        for _, address := range node.Addresses {
1,893✔
4034
                if err := SerializeAddr(&b, address); err != nil {
1,065✔
4035
                        return err
×
4036
                }
×
4037
        }
4038

4039
        sigLen := len(node.AuthSigBytes)
828✔
4040
        if sigLen > 80 {
828✔
4041
                return fmt.Errorf("max sig len allowed is 80, had %v",
×
4042
                        sigLen)
×
4043
        }
×
4044

4045
        err = wire.WriteVarBytes(&b, 0, node.AuthSigBytes)
828✔
4046
        if err != nil {
828✔
4047
                return err
×
4048
        }
×
4049

4050
        if len(node.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
828✔
4051
                return ErrTooManyExtraOpaqueBytes(len(node.ExtraOpaqueData))
×
4052
        }
×
4053
        err = wire.WriteVarBytes(&b, 0, node.ExtraOpaqueData)
828✔
4054
        if err != nil {
828✔
4055
                return err
×
4056
        }
×
4057

4058
        if err := aliasBucket.Put(nodePub, []byte(node.Alias)); err != nil {
828✔
4059
                return err
×
4060
        }
×
4061

4062
        // With the alias bucket updated, we'll now update the index that
4063
        // tracks the time series of node updates.
4064
        var indexKey [8 + 33]byte
828✔
4065
        byteOrder.PutUint64(indexKey[:8], updateUnix)
828✔
4066
        copy(indexKey[8:], nodePub)
828✔
4067

828✔
4068
        // If there was already an old index entry for this node, then we'll
828✔
4069
        // delete the old one before we write the new entry.
828✔
4070
        if nodeBytes := nodeBucket.Get(nodePub); nodeBytes != nil {
847✔
4071
                // Extract out the old update time to we can reconstruct the
19✔
4072
                // prior index key to delete it from the index.
19✔
4073
                oldUpdateTime := nodeBytes[:8]
19✔
4074

19✔
4075
                var oldIndexKey [8 + 33]byte
19✔
4076
                copy(oldIndexKey[:8], oldUpdateTime)
19✔
4077
                copy(oldIndexKey[8:], nodePub)
19✔
4078

19✔
4079
                if err := updateIndex.Delete(oldIndexKey[:]); err != nil {
19✔
4080
                        return err
×
4081
                }
×
4082
        }
4083

4084
        if err := updateIndex.Put(indexKey[:], nil); err != nil {
828✔
4085
                return err
×
4086
        }
×
4087

4088
        return nodeBucket.Put(nodePub, b.Bytes())
828✔
4089
}
4090

4091
func fetchLightningNode(nodeBucket kvdb.RBucket,
4092
        nodePub []byte) (models.LightningNode, error) {
3,649✔
4093

3,649✔
4094
        nodeBytes := nodeBucket.Get(nodePub)
3,649✔
4095
        if nodeBytes == nil {
3,725✔
4096
                return models.LightningNode{}, ErrGraphNodeNotFound
76✔
4097
        }
76✔
4098

4099
        nodeReader := bytes.NewReader(nodeBytes)
3,576✔
4100

3,576✔
4101
        return deserializeLightningNode(nodeReader)
3,576✔
4102
}
4103

4104
func deserializeLightningNodeCacheable(r io.Reader) (route.Vertex,
4105
        *lnwire.FeatureVector, error) {
123✔
4106

123✔
4107
        var (
123✔
4108
                pubKey      route.Vertex
123✔
4109
                features    = lnwire.EmptyFeatureVector()
123✔
4110
                nodeScratch [8]byte
123✔
4111
        )
123✔
4112

123✔
4113
        // Skip ahead:
123✔
4114
        // - LastUpdate (8 bytes)
123✔
4115
        if _, err := r.Read(nodeScratch[:]); err != nil {
123✔
4116
                return pubKey, nil, err
×
4117
        }
×
4118

4119
        if _, err := io.ReadFull(r, pubKey[:]); err != nil {
123✔
4120
                return pubKey, nil, err
×
4121
        }
×
4122

4123
        // Read the node announcement flag.
4124
        if _, err := r.Read(nodeScratch[:2]); err != nil {
123✔
4125
                return pubKey, nil, err
×
4126
        }
×
4127
        hasNodeAnn := byteOrder.Uint16(nodeScratch[:2])
123✔
4128

123✔
4129
        // The rest of the data is optional, and will only be there if we got a
123✔
4130
        // node announcement for this node.
123✔
4131
        if hasNodeAnn == 0 {
126✔
4132
                return pubKey, features, nil
3✔
4133
        }
3✔
4134

4135
        // We did get a node announcement for this node, so we'll have the rest
4136
        // of the data available.
4137
        var rgb uint8
123✔
4138
        if err := binary.Read(r, byteOrder, &rgb); err != nil {
123✔
4139
                return pubKey, nil, err
×
4140
        }
×
4141
        if err := binary.Read(r, byteOrder, &rgb); err != nil {
123✔
4142
                return pubKey, nil, err
×
4143
        }
×
4144
        if err := binary.Read(r, byteOrder, &rgb); err != nil {
123✔
4145
                return pubKey, nil, err
×
4146
        }
×
4147

4148
        if _, err := wire.ReadVarString(r, 0); err != nil {
123✔
4149
                return pubKey, nil, err
×
4150
        }
×
4151

4152
        if err := features.Decode(r); err != nil {
123✔
4153
                return pubKey, nil, err
×
4154
        }
×
4155

4156
        return pubKey, features, nil
123✔
4157
}
4158

4159
func deserializeLightningNode(r io.Reader) (models.LightningNode, error) {
8,563✔
4160
        var (
8,563✔
4161
                node    models.LightningNode
8,563✔
4162
                scratch [8]byte
8,563✔
4163
                err     error
8,563✔
4164
        )
8,563✔
4165

8,563✔
4166
        // Always populate a feature vector, even if we don't have a node
8,563✔
4167
        // announcement and short circuit below.
8,563✔
4168
        node.Features = lnwire.EmptyFeatureVector()
8,563✔
4169

8,563✔
4170
        if _, err := r.Read(scratch[:]); err != nil {
8,563✔
4171
                return models.LightningNode{}, err
×
4172
        }
×
4173

4174
        unix := int64(byteOrder.Uint64(scratch[:]))
8,563✔
4175
        node.LastUpdate = time.Unix(unix, 0)
8,563✔
4176

8,563✔
4177
        if _, err := io.ReadFull(r, node.PubKeyBytes[:]); err != nil {
8,563✔
4178
                return models.LightningNode{}, err
×
4179
        }
×
4180

4181
        if _, err := r.Read(scratch[:2]); err != nil {
8,563✔
4182
                return models.LightningNode{}, err
×
4183
        }
×
4184

4185
        hasNodeAnn := byteOrder.Uint16(scratch[:2])
8,563✔
4186
        if hasNodeAnn == 1 {
16,972✔
4187
                node.HaveNodeAnnouncement = true
8,409✔
4188
        } else {
8,566✔
4189
                node.HaveNodeAnnouncement = false
157✔
4190
        }
157✔
4191

4192
        // The rest of the data is optional, and will only be there if we got a
4193
        // node announcement for this node.
4194
        if !node.HaveNodeAnnouncement {
8,720✔
4195
                return node, nil
157✔
4196
        }
157✔
4197

4198
        // We did get a node announcement for this node, so we'll have the rest
4199
        // of the data available.
4200
        if err := binary.Read(r, byteOrder, &node.Color.R); err != nil {
8,409✔
4201
                return models.LightningNode{}, err
×
4202
        }
×
4203
        if err := binary.Read(r, byteOrder, &node.Color.G); err != nil {
8,409✔
4204
                return models.LightningNode{}, err
×
4205
        }
×
4206
        if err := binary.Read(r, byteOrder, &node.Color.B); err != nil {
8,409✔
4207
                return models.LightningNode{}, err
×
4208
        }
×
4209

4210
        node.Alias, err = wire.ReadVarString(r, 0)
8,409✔
4211
        if err != nil {
8,409✔
4212
                return models.LightningNode{}, err
×
4213
        }
×
4214

4215
        err = node.Features.Decode(r)
8,409✔
4216
        if err != nil {
8,409✔
4217
                return models.LightningNode{}, err
×
4218
        }
×
4219

4220
        if _, err := r.Read(scratch[:2]); err != nil {
8,409✔
4221
                return models.LightningNode{}, err
×
4222
        }
×
4223
        numAddresses := int(byteOrder.Uint16(scratch[:2]))
8,409✔
4224

8,409✔
4225
        var addresses []net.Addr
8,409✔
4226
        for i := 0; i < numAddresses; i++ {
19,093✔
4227
                address, err := DeserializeAddr(r)
10,684✔
4228
                if err != nil {
10,684✔
4229
                        return models.LightningNode{}, err
×
4230
                }
×
4231
                addresses = append(addresses, address)
10,684✔
4232
        }
4233
        node.Addresses = addresses
8,409✔
4234

8,409✔
4235
        node.AuthSigBytes, err = wire.ReadVarBytes(r, 0, 80, "sig")
8,409✔
4236
        if err != nil {
8,409✔
4237
                return models.LightningNode{}, err
×
4238
        }
×
4239

4240
        // We'll try and see if there are any opaque bytes left, if not, then
4241
        // we'll ignore the EOF error and return the node as is.
4242
        extraBytes, err := wire.ReadVarBytes(
8,409✔
4243
                r, 0, MaxAllowedExtraOpaqueBytes, "blob",
8,409✔
4244
        )
8,409✔
4245
        switch {
8,409✔
4246
        case errors.Is(err, io.ErrUnexpectedEOF):
×
4247
        case errors.Is(err, io.EOF):
×
4248
        case err != nil:
×
4249
                return models.LightningNode{}, err
×
4250
        }
4251

4252
        if len(extraBytes) > 0 {
8,419✔
4253
                node.ExtraOpaqueData = extraBytes
10✔
4254
        }
10✔
4255

4256
        return node, nil
8,409✔
4257
}
4258

4259
func putChanEdgeInfo(edgeIndex kvdb.RwBucket,
4260
        edgeInfo *models.ChannelEdgeInfo, chanID [8]byte) error {
1,499✔
4261

1,499✔
4262
        var b bytes.Buffer
1,499✔
4263

1,499✔
4264
        if _, err := b.Write(edgeInfo.NodeKey1Bytes[:]); err != nil {
1,499✔
4265
                return err
×
4266
        }
×
4267
        if _, err := b.Write(edgeInfo.NodeKey2Bytes[:]); err != nil {
1,499✔
4268
                return err
×
4269
        }
×
4270
        if _, err := b.Write(edgeInfo.BitcoinKey1Bytes[:]); err != nil {
1,499✔
4271
                return err
×
4272
        }
×
4273
        if _, err := b.Write(edgeInfo.BitcoinKey2Bytes[:]); err != nil {
1,499✔
4274
                return err
×
4275
        }
×
4276

4277
        if err := wire.WriteVarBytes(&b, 0, edgeInfo.Features); err != nil {
1,499✔
4278
                return err
×
4279
        }
×
4280

4281
        authProof := edgeInfo.AuthProof
1,499✔
4282
        var nodeSig1, nodeSig2, bitcoinSig1, bitcoinSig2 []byte
1,499✔
4283
        if authProof != nil {
2,914✔
4284
                nodeSig1 = authProof.NodeSig1Bytes
1,415✔
4285
                nodeSig2 = authProof.NodeSig2Bytes
1,415✔
4286
                bitcoinSig1 = authProof.BitcoinSig1Bytes
1,415✔
4287
                bitcoinSig2 = authProof.BitcoinSig2Bytes
1,415✔
4288
        }
1,415✔
4289

4290
        if err := wire.WriteVarBytes(&b, 0, nodeSig1); err != nil {
1,499✔
4291
                return err
×
4292
        }
×
4293
        if err := wire.WriteVarBytes(&b, 0, nodeSig2); err != nil {
1,499✔
4294
                return err
×
4295
        }
×
4296
        if err := wire.WriteVarBytes(&b, 0, bitcoinSig1); err != nil {
1,499✔
4297
                return err
×
4298
        }
×
4299
        if err := wire.WriteVarBytes(&b, 0, bitcoinSig2); err != nil {
1,499✔
4300
                return err
×
4301
        }
×
4302

4303
        if err := WriteOutpoint(&b, &edgeInfo.ChannelPoint); err != nil {
1,499✔
4304
                return err
×
4305
        }
×
4306
        err := binary.Write(&b, byteOrder, uint64(edgeInfo.Capacity))
1,499✔
4307
        if err != nil {
1,499✔
4308
                return err
×
4309
        }
×
4310
        if _, err := b.Write(chanID[:]); err != nil {
1,499✔
4311
                return err
×
4312
        }
×
4313
        if _, err := b.Write(edgeInfo.ChainHash[:]); err != nil {
1,499✔
4314
                return err
×
4315
        }
×
4316

4317
        if len(edgeInfo.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
1,499✔
4318
                return ErrTooManyExtraOpaqueBytes(len(edgeInfo.ExtraOpaqueData))
×
4319
        }
×
4320
        err = wire.WriteVarBytes(&b, 0, edgeInfo.ExtraOpaqueData)
1,499✔
4321
        if err != nil {
1,499✔
4322
                return err
×
4323
        }
×
4324

4325
        return edgeIndex.Put(chanID[:], b.Bytes())
1,499✔
4326
}
4327

4328
func fetchChanEdgeInfo(edgeIndex kvdb.RBucket,
4329
        chanID []byte) (models.ChannelEdgeInfo, error) {
6,810✔
4330

6,810✔
4331
        edgeInfoBytes := edgeIndex.Get(chanID)
6,810✔
4332
        if edgeInfoBytes == nil {
6,879✔
4333
                return models.ChannelEdgeInfo{}, ErrEdgeNotFound
69✔
4334
        }
69✔
4335

4336
        edgeInfoReader := bytes.NewReader(edgeInfoBytes)
6,744✔
4337

6,744✔
4338
        return deserializeChanEdgeInfo(edgeInfoReader)
6,744✔
4339
}
4340

4341
func deserializeChanEdgeInfo(r io.Reader) (models.ChannelEdgeInfo, error) {
7,283✔
4342
        var (
7,283✔
4343
                err      error
7,283✔
4344
                edgeInfo models.ChannelEdgeInfo
7,283✔
4345
        )
7,283✔
4346

7,283✔
4347
        if _, err := io.ReadFull(r, edgeInfo.NodeKey1Bytes[:]); err != nil {
7,283✔
4348
                return models.ChannelEdgeInfo{}, err
×
4349
        }
×
4350
        if _, err := io.ReadFull(r, edgeInfo.NodeKey2Bytes[:]); err != nil {
7,283✔
4351
                return models.ChannelEdgeInfo{}, err
×
4352
        }
×
4353
        if _, err := io.ReadFull(r, edgeInfo.BitcoinKey1Bytes[:]); err != nil {
7,283✔
4354
                return models.ChannelEdgeInfo{}, err
×
4355
        }
×
4356
        if _, err := io.ReadFull(r, edgeInfo.BitcoinKey2Bytes[:]); err != nil {
7,283✔
4357
                return models.ChannelEdgeInfo{}, err
×
4358
        }
×
4359

4360
        edgeInfo.Features, err = wire.ReadVarBytes(r, 0, 900, "features")
7,283✔
4361
        if err != nil {
7,283✔
4362
                return models.ChannelEdgeInfo{}, err
×
4363
        }
×
4364

4365
        proof := &models.ChannelAuthProof{}
7,283✔
4366

7,283✔
4367
        proof.NodeSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
7,283✔
4368
        if err != nil {
7,283✔
4369
                return models.ChannelEdgeInfo{}, err
×
4370
        }
×
4371
        proof.NodeSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
7,283✔
4372
        if err != nil {
7,283✔
4373
                return models.ChannelEdgeInfo{}, err
×
4374
        }
×
4375
        proof.BitcoinSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
7,283✔
4376
        if err != nil {
7,283✔
4377
                return models.ChannelEdgeInfo{}, err
×
4378
        }
×
4379
        proof.BitcoinSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
7,283✔
4380
        if err != nil {
7,283✔
4381
                return models.ChannelEdgeInfo{}, err
×
4382
        }
×
4383

4384
        if !proof.IsEmpty() {
11,463✔
4385
                edgeInfo.AuthProof = proof
4,180✔
4386
        }
4,180✔
4387

4388
        edgeInfo.ChannelPoint = wire.OutPoint{}
7,283✔
4389
        if err := ReadOutpoint(r, &edgeInfo.ChannelPoint); err != nil {
7,283✔
4390
                return models.ChannelEdgeInfo{}, err
×
4391
        }
×
4392
        if err := binary.Read(r, byteOrder, &edgeInfo.Capacity); err != nil {
7,283✔
4393
                return models.ChannelEdgeInfo{}, err
×
4394
        }
×
4395
        if err := binary.Read(r, byteOrder, &edgeInfo.ChannelID); err != nil {
7,283✔
4396
                return models.ChannelEdgeInfo{}, err
×
4397
        }
×
4398

4399
        if _, err := io.ReadFull(r, edgeInfo.ChainHash[:]); err != nil {
7,283✔
4400
                return models.ChannelEdgeInfo{}, err
×
4401
        }
×
4402

4403
        // We'll try and see if there are any opaque bytes left, if not, then
4404
        // we'll ignore the EOF error and return the edge as is.
4405
        edgeInfo.ExtraOpaqueData, err = wire.ReadVarBytes(
7,283✔
4406
                r, 0, MaxAllowedExtraOpaqueBytes, "blob",
7,283✔
4407
        )
7,283✔
4408
        switch {
7,283✔
4409
        case errors.Is(err, io.ErrUnexpectedEOF):
×
4410
        case errors.Is(err, io.EOF):
×
4411
        case err != nil:
×
4412
                return models.ChannelEdgeInfo{}, err
×
4413
        }
4414

4415
        return edgeInfo, nil
7,283✔
4416
}
4417

4418
func putChanEdgePolicy(edges kvdb.RwBucket, edge *models.ChannelEdgePolicy,
4419
        from, to []byte) error {
2,667✔
4420

2,667✔
4421
        var edgeKey [33 + 8]byte
2,667✔
4422
        copy(edgeKey[:], from)
2,667✔
4423
        byteOrder.PutUint64(edgeKey[33:], edge.ChannelID)
2,667✔
4424

2,667✔
4425
        var b bytes.Buffer
2,667✔
4426
        if err := serializeChanEdgePolicy(&b, edge, to); err != nil {
2,669✔
4427
                return err
2✔
4428
        }
2✔
4429

4430
        // Before we write out the new edge, we'll create a new entry in the
4431
        // update index in order to keep it fresh.
4432
        updateUnix := uint64(edge.LastUpdate.Unix())
2,665✔
4433
        var indexKey [8 + 8]byte
2,665✔
4434
        byteOrder.PutUint64(indexKey[:8], updateUnix)
2,665✔
4435
        byteOrder.PutUint64(indexKey[8:], edge.ChannelID)
2,665✔
4436

2,665✔
4437
        updateIndex, err := edges.CreateBucketIfNotExists(edgeUpdateIndexBucket)
2,665✔
4438
        if err != nil {
2,665✔
4439
                return err
×
4440
        }
×
4441

4442
        // If there was already an entry for this edge, then we'll need to
4443
        // delete the old one to ensure we don't leave around any after-images.
4444
        // An unknown policy value does not have a update time recorded, so
4445
        // it also does not need to be removed.
4446
        if edgeBytes := edges.Get(edgeKey[:]); edgeBytes != nil &&
2,665✔
4447
                !bytes.Equal(edgeBytes, unknownPolicy) {
2,691✔
4448

26✔
4449
                // In order to delete the old entry, we'll need to obtain the
26✔
4450
                // *prior* update time in order to delete it. To do this, we'll
26✔
4451
                // need to deserialize the existing policy within the database
26✔
4452
                // (now outdated by the new one), and delete its corresponding
26✔
4453
                // entry within the update index. We'll ignore any
26✔
4454
                // ErrEdgePolicyOptionalFieldNotFound or ErrParsingExtraTLVBytes
26✔
4455
                // errors, as we only need the channel ID and update time to
26✔
4456
                // delete the entry.
26✔
4457
                //
26✔
4458
                // TODO(halseth): get rid of these invalid policies in a
26✔
4459
                // migration.
26✔
4460
                // TODO(elle): complete the above TODO in migration from kvdb
26✔
4461
                // to SQL.
26✔
4462
                oldEdgePolicy, err := deserializeChanEdgePolicy(
26✔
4463
                        bytes.NewReader(edgeBytes),
26✔
4464
                )
26✔
4465
                if err != nil &&
26✔
4466
                        !errors.Is(err, ErrEdgePolicyOptionalFieldNotFound) &&
26✔
4467
                        !errors.Is(err, ErrParsingExtraTLVBytes) {
26✔
4468

×
4469
                        return err
×
4470
                }
×
4471

4472
                oldUpdateTime := uint64(oldEdgePolicy.LastUpdate.Unix())
26✔
4473

26✔
4474
                var oldIndexKey [8 + 8]byte
26✔
4475
                byteOrder.PutUint64(oldIndexKey[:8], oldUpdateTime)
26✔
4476
                byteOrder.PutUint64(oldIndexKey[8:], edge.ChannelID)
26✔
4477

26✔
4478
                if err := updateIndex.Delete(oldIndexKey[:]); err != nil {
26✔
4479
                        return err
×
4480
                }
×
4481
        }
4482

4483
        if err := updateIndex.Put(indexKey[:], nil); err != nil {
2,665✔
4484
                return err
×
4485
        }
×
4486

4487
        err = updateEdgePolicyDisabledIndex(
2,665✔
4488
                edges, edge.ChannelID,
2,665✔
4489
                edge.ChannelFlags&lnwire.ChanUpdateDirection > 0,
2,665✔
4490
                edge.IsDisabled(),
2,665✔
4491
        )
2,665✔
4492
        if err != nil {
2,665✔
4493
                return err
×
4494
        }
×
4495

4496
        return edges.Put(edgeKey[:], b.Bytes())
2,665✔
4497
}
4498

4499
// updateEdgePolicyDisabledIndex is used to update the disabledEdgePolicyIndex
4500
// bucket by either add a new disabled ChannelEdgePolicy or remove an existing
4501
// one.
4502
// The direction represents the direction of the edge and disabled is used for
4503
// deciding whether to remove or add an entry to the bucket.
4504
// In general a channel is disabled if two entries for the same chanID exist
4505
// in this bucket.
4506
// Maintaining the bucket this way allows a fast retrieval of disabled
4507
// channels, for example when prune is needed.
4508
func updateEdgePolicyDisabledIndex(edges kvdb.RwBucket, chanID uint64,
4509
        direction bool, disabled bool) error {
2,955✔
4510

2,955✔
4511
        var disabledEdgeKey [8 + 1]byte
2,955✔
4512
        byteOrder.PutUint64(disabledEdgeKey[0:], chanID)
2,955✔
4513
        if direction {
4,426✔
4514
                disabledEdgeKey[8] = 1
1,471✔
4515
        }
1,471✔
4516

4517
        disabledEdgePolicyIndex, err := edges.CreateBucketIfNotExists(
2,955✔
4518
                disabledEdgePolicyBucket,
2,955✔
4519
        )
2,955✔
4520
        if err != nil {
2,955✔
4521
                return err
×
4522
        }
×
4523

4524
        if disabled {
2,984✔
4525
                return disabledEdgePolicyIndex.Put(disabledEdgeKey[:], []byte{})
29✔
4526
        }
29✔
4527

4528
        return disabledEdgePolicyIndex.Delete(disabledEdgeKey[:])
2,929✔
4529
}
4530

4531
// putChanEdgePolicyUnknown marks the edge policy as unknown
4532
// in the edges bucket.
4533
func putChanEdgePolicyUnknown(edges kvdb.RwBucket, channelID uint64,
4534
        from []byte) error {
2,991✔
4535

2,991✔
4536
        var edgeKey [33 + 8]byte
2,991✔
4537
        copy(edgeKey[:], from)
2,991✔
4538
        byteOrder.PutUint64(edgeKey[33:], channelID)
2,991✔
4539

2,991✔
4540
        if edges.Get(edgeKey[:]) != nil {
2,991✔
4541
                return fmt.Errorf("cannot write unknown policy for channel %v "+
×
4542
                        " when there is already a policy present", channelID)
×
4543
        }
×
4544

4545
        return edges.Put(edgeKey[:], unknownPolicy)
2,991✔
4546
}
4547

4548
func fetchChanEdgePolicy(edges kvdb.RBucket, chanID []byte,
4549
        nodePub []byte) (*models.ChannelEdgePolicy, error) {
13,497✔
4550

13,497✔
4551
        var edgeKey [33 + 8]byte
13,497✔
4552
        copy(edgeKey[:], nodePub)
13,497✔
4553
        copy(edgeKey[33:], chanID)
13,497✔
4554

13,497✔
4555
        edgeBytes := edges.Get(edgeKey[:])
13,497✔
4556
        if edgeBytes == nil {
13,497✔
4557
                return nil, ErrEdgeNotFound
×
4558
        }
×
4559

4560
        // No need to deserialize unknown policy.
4561
        if bytes.Equal(edgeBytes, unknownPolicy) {
15,047✔
4562
                return nil, nil
1,550✔
4563
        }
1,550✔
4564

4565
        edgeReader := bytes.NewReader(edgeBytes)
11,950✔
4566

11,950✔
4567
        ep, err := deserializeChanEdgePolicy(edgeReader)
11,950✔
4568
        switch {
11,950✔
4569
        // If the db policy was missing an expected optional field, we return
4570
        // nil as if the policy was unknown.
4571
        case errors.Is(err, ErrEdgePolicyOptionalFieldNotFound):
2✔
4572
                return nil, nil
2✔
4573

4574
        // If the policy contains invalid TLV bytes, we return nil as if
4575
        // the policy was unknown.
4576
        case errors.Is(err, ErrParsingExtraTLVBytes):
×
4577
                return nil, nil
×
4578

4579
        case err != nil:
×
4580
                return nil, err
×
4581
        }
4582

4583
        return ep, nil
11,948✔
4584
}
4585

4586
func fetchChanEdgePolicies(edgeIndex kvdb.RBucket, edges kvdb.RBucket,
4587
        chanID []byte) (*models.ChannelEdgePolicy, *models.ChannelEdgePolicy,
4588
        error) {
2,908✔
4589

2,908✔
4590
        edgeInfo := edgeIndex.Get(chanID)
2,908✔
4591
        if edgeInfo == nil {
2,908✔
4592
                return nil, nil, fmt.Errorf("%w: chanID=%x", ErrEdgeNotFound,
×
4593
                        chanID)
×
4594
        }
×
4595

4596
        // The first node is contained within the first half of the edge
4597
        // information. We only propagate the error here and below if it's
4598
        // something other than edge non-existence.
4599
        node1Pub := edgeInfo[:33]
2,908✔
4600
        edge1, err := fetchChanEdgePolicy(edges, chanID, node1Pub)
2,908✔
4601
        if err != nil {
2,908✔
4602
                return nil, nil, fmt.Errorf("%w: node1Pub=%x", ErrEdgeNotFound,
×
4603
                        node1Pub)
×
4604
        }
×
4605

4606
        // Similarly, the second node is contained within the latter
4607
        // half of the edge information.
4608
        node2Pub := edgeInfo[33:66]
2,908✔
4609
        edge2, err := fetchChanEdgePolicy(edges, chanID, node2Pub)
2,908✔
4610
        if err != nil {
2,908✔
4611
                return nil, nil, fmt.Errorf("%w: node2Pub=%x", ErrEdgeNotFound,
×
4612
                        node2Pub)
×
4613
        }
×
4614

4615
        return edge1, edge2, nil
2,908✔
4616
}
4617

4618
func serializeChanEdgePolicy(w io.Writer, edge *models.ChannelEdgePolicy,
4619
        to []byte) error {
2,669✔
4620

2,669✔
4621
        err := wire.WriteVarBytes(w, 0, edge.SigBytes)
2,669✔
4622
        if err != nil {
2,669✔
4623
                return err
×
4624
        }
×
4625

4626
        if err := binary.Write(w, byteOrder, edge.ChannelID); err != nil {
2,669✔
4627
                return err
×
4628
        }
×
4629

4630
        var scratch [8]byte
2,669✔
4631
        updateUnix := uint64(edge.LastUpdate.Unix())
2,669✔
4632
        byteOrder.PutUint64(scratch[:], updateUnix)
2,669✔
4633
        if _, err := w.Write(scratch[:]); err != nil {
2,669✔
4634
                return err
×
4635
        }
×
4636

4637
        if err := binary.Write(w, byteOrder, edge.MessageFlags); err != nil {
2,669✔
4638
                return err
×
4639
        }
×
4640
        if err := binary.Write(w, byteOrder, edge.ChannelFlags); err != nil {
2,669✔
4641
                return err
×
4642
        }
×
4643
        if err := binary.Write(w, byteOrder, edge.TimeLockDelta); err != nil {
2,669✔
4644
                return err
×
4645
        }
×
4646
        if err := binary.Write(w, byteOrder, uint64(edge.MinHTLC)); err != nil {
2,669✔
4647
                return err
×
4648
        }
×
4649
        err = binary.Write(w, byteOrder, uint64(edge.FeeBaseMSat))
2,669✔
4650
        if err != nil {
2,669✔
4651
                return err
×
4652
        }
×
4653
        err = binary.Write(
2,669✔
4654
                w, byteOrder, uint64(edge.FeeProportionalMillionths),
2,669✔
4655
        )
2,669✔
4656
        if err != nil {
2,669✔
4657
                return err
×
4658
        }
×
4659

4660
        if _, err := w.Write(to); err != nil {
2,669✔
4661
                return err
×
4662
        }
×
4663

4664
        // If the max_htlc field is present, we write it. To be compatible with
4665
        // older versions that wasn't aware of this field, we write it as part
4666
        // of the opaque data.
4667
        // TODO(halseth): clean up when moving to TLV.
4668
        var opaqueBuf bytes.Buffer
2,669✔
4669
        if edge.MessageFlags.HasMaxHtlc() {
4,953✔
4670
                err := binary.Write(&opaqueBuf, byteOrder, uint64(edge.MaxHTLC))
2,284✔
4671
                if err != nil {
2,284✔
4672
                        return err
×
4673
                }
×
4674
        }
4675

4676
        // Validate that the ExtraOpaqueData is in fact a valid TLV stream.
4677
        err = edge.ExtraOpaqueData.ValidateTLV()
2,669✔
4678
        if err != nil {
2,671✔
4679
                return fmt.Errorf("%w: %w", ErrParsingExtraTLVBytes, err)
2✔
4680
        }
2✔
4681

4682
        if len(edge.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
2,667✔
4683
                return ErrTooManyExtraOpaqueBytes(len(edge.ExtraOpaqueData))
×
4684
        }
×
4685
        if _, err := opaqueBuf.Write(edge.ExtraOpaqueData); err != nil {
2,667✔
4686
                return err
×
4687
        }
×
4688

4689
        if err := wire.WriteVarBytes(w, 0, opaqueBuf.Bytes()); err != nil {
2,667✔
4690
                return err
×
4691
        }
×
4692

4693
        return nil
2,667✔
4694
}
4695

4696
func deserializeChanEdgePolicy(r io.Reader) (*models.ChannelEdgePolicy, error) {
11,974✔
4697
        // Deserialize the policy. Note that in case an optional field is not
11,974✔
4698
        // found or if the edge has invalid TLV data, then both an error and a
11,974✔
4699
        // populated policy object are returned so that the caller can decide
11,974✔
4700
        // if it still wants to use the edge or not.
11,974✔
4701
        edge, err := deserializeChanEdgePolicyRaw(r)
11,974✔
4702
        if err != nil &&
11,974✔
4703
                !errors.Is(err, ErrEdgePolicyOptionalFieldNotFound) &&
11,974✔
4704
                !errors.Is(err, ErrParsingExtraTLVBytes) {
11,974✔
4705

×
4706
                return nil, err
×
4707
        }
×
4708

4709
        return edge, err
11,974✔
4710
}
4711

4712
func deserializeChanEdgePolicyRaw(r io.Reader) (*models.ChannelEdgePolicy,
4713
        error) {
12,984✔
4714

12,984✔
4715
        edge := &models.ChannelEdgePolicy{}
12,984✔
4716

12,984✔
4717
        var err error
12,984✔
4718
        edge.SigBytes, err = wire.ReadVarBytes(r, 0, 80, "sig")
12,984✔
4719
        if err != nil {
12,984✔
4720
                return nil, err
×
4721
        }
×
4722

4723
        if err := binary.Read(r, byteOrder, &edge.ChannelID); err != nil {
12,984✔
4724
                return nil, err
×
4725
        }
×
4726

4727
        var scratch [8]byte
12,984✔
4728
        if _, err := r.Read(scratch[:]); err != nil {
12,984✔
4729
                return nil, err
×
4730
        }
×
4731
        unix := int64(byteOrder.Uint64(scratch[:]))
12,984✔
4732
        edge.LastUpdate = time.Unix(unix, 0)
12,984✔
4733

12,984✔
4734
        if err := binary.Read(r, byteOrder, &edge.MessageFlags); err != nil {
12,984✔
4735
                return nil, err
×
4736
        }
×
4737
        if err := binary.Read(r, byteOrder, &edge.ChannelFlags); err != nil {
12,984✔
4738
                return nil, err
×
4739
        }
×
4740
        if err := binary.Read(r, byteOrder, &edge.TimeLockDelta); err != nil {
12,984✔
4741
                return nil, err
×
4742
        }
×
4743

4744
        var n uint64
12,984✔
4745
        if err := binary.Read(r, byteOrder, &n); err != nil {
12,984✔
4746
                return nil, err
×
4747
        }
×
4748
        edge.MinHTLC = lnwire.MilliSatoshi(n)
12,984✔
4749

12,984✔
4750
        if err := binary.Read(r, byteOrder, &n); err != nil {
12,984✔
4751
                return nil, err
×
4752
        }
×
4753
        edge.FeeBaseMSat = lnwire.MilliSatoshi(n)
12,984✔
4754

12,984✔
4755
        if err := binary.Read(r, byteOrder, &n); err != nil {
12,984✔
4756
                return nil, err
×
4757
        }
×
4758
        edge.FeeProportionalMillionths = lnwire.MilliSatoshi(n)
12,984✔
4759

12,984✔
4760
        if _, err := r.Read(edge.ToNode[:]); err != nil {
12,984✔
4761
                return nil, err
×
4762
        }
×
4763

4764
        // We'll try and see if there are any opaque bytes left, if not, then
4765
        // we'll ignore the EOF error and return the edge as is.
4766
        edge.ExtraOpaqueData, err = wire.ReadVarBytes(
12,984✔
4767
                r, 0, MaxAllowedExtraOpaqueBytes, "blob",
12,984✔
4768
        )
12,984✔
4769
        switch {
12,984✔
4770
        case errors.Is(err, io.ErrUnexpectedEOF):
×
4771
        case errors.Is(err, io.EOF):
4✔
4772
        case err != nil:
×
4773
                return nil, err
×
4774
        }
4775

4776
        // See if optional fields are present.
4777
        if edge.MessageFlags.HasMaxHtlc() {
24,987✔
4778
                // The max_htlc field should be at the beginning of the opaque
12,003✔
4779
                // bytes.
12,003✔
4780
                opq := edge.ExtraOpaqueData
12,003✔
4781

12,003✔
4782
                // If the max_htlc field is not present, it might be old data
12,003✔
4783
                // stored before this field was validated. We'll return the
12,003✔
4784
                // edge along with an error.
12,003✔
4785
                if len(opq) < 8 {
12,007✔
4786
                        return edge, ErrEdgePolicyOptionalFieldNotFound
4✔
4787
                }
4✔
4788

4789
                maxHtlc := byteOrder.Uint64(opq[:8])
11,999✔
4790
                edge.MaxHTLC = lnwire.MilliSatoshi(maxHtlc)
11,999✔
4791

11,999✔
4792
                // Exclude the parsed field from the rest of the opaque data.
11,999✔
4793
                edge.ExtraOpaqueData = opq[8:]
11,999✔
4794
        }
4795

4796
        // Attempt to extract the inbound fee from the opaque data. If we fail
4797
        // to parse the TLV here, we return an error we also return the edge
4798
        // so that the caller can still use it. This is for backwards
4799
        // compatibility in case we have already persisted some policies that
4800
        // have invalid TLV data.
4801
        var inboundFee lnwire.Fee
12,980✔
4802
        typeMap, err := edge.ExtraOpaqueData.ExtractRecords(&inboundFee)
12,980✔
4803
        if err != nil {
12,980✔
4804
                return edge, fmt.Errorf("%w: %w", ErrParsingExtraTLVBytes, err)
×
4805
        }
×
4806

4807
        val, ok := typeMap[lnwire.FeeRecordType]
12,980✔
4808
        if ok && val == nil {
14,709✔
4809
                edge.InboundFee = fn.Some(inboundFee)
1,729✔
4810
        }
1,729✔
4811

4812
        return edge, nil
12,980✔
4813
}
4814

4815
// chanGraphNodeTx is an implementation of the NodeRTx interface backed by the
4816
// KVStore and a kvdb.RTx.
4817
type chanGraphNodeTx struct {
4818
        tx   kvdb.RTx
4819
        db   *KVStore
4820
        node *models.LightningNode
4821
}
4822

4823
// A compile-time constraint to ensure chanGraphNodeTx implements the NodeRTx
4824
// interface.
4825
var _ NodeRTx = (*chanGraphNodeTx)(nil)
4826

4827
func newChanGraphNodeTx(tx kvdb.RTx, db *KVStore,
4828
        node *models.LightningNode) *chanGraphNodeTx {
4,105✔
4829

4,105✔
4830
        return &chanGraphNodeTx{
4,105✔
4831
                tx:   tx,
4,105✔
4832
                db:   db,
4,105✔
4833
                node: node,
4,105✔
4834
        }
4,105✔
4835
}
4,105✔
4836

4837
// Node returns the raw information of the node.
4838
//
4839
// NOTE: This is a part of the NodeRTx interface.
4840
func (c *chanGraphNodeTx) Node() *models.LightningNode {
5,022✔
4841
        return c.node
5,022✔
4842
}
5,022✔
4843

4844
// FetchNode fetches the node with the given pub key under the same transaction
4845
// used to fetch the current node. The returned node is also a NodeRTx and any
4846
// operations on that NodeRTx will also be done under the same transaction.
4847
//
4848
// NOTE: This is a part of the NodeRTx interface.
4849
func (c *chanGraphNodeTx) FetchNode(nodePub route.Vertex) (NodeRTx, error) {
2,944✔
4850
        node, err := c.db.FetchLightningNodeTx(c.tx, nodePub)
2,944✔
4851
        if err != nil {
2,944✔
4852
                return nil, err
×
4853
        }
×
4854

4855
        return newChanGraphNodeTx(c.tx, c.db, node), nil
2,944✔
4856
}
4857

4858
// ForEachChannel can be used to iterate over the node's channels under
4859
// the same transaction used to fetch the node.
4860
//
4861
// NOTE: This is a part of the NodeRTx interface.
4862
func (c *chanGraphNodeTx) ForEachChannel(f func(*models.ChannelEdgeInfo,
4863
        *models.ChannelEdgePolicy, *models.ChannelEdgePolicy) error) error {
965✔
4864

965✔
4865
        return c.db.forEachNodeChannelTx(c.tx, c.node.PubKeyBytes,
965✔
4866
                func(_ kvdb.RTx, info *models.ChannelEdgeInfo, policy1,
965✔
4867
                        policy2 *models.ChannelEdgePolicy) error {
3,909✔
4868

2,944✔
4869
                        return f(info, policy1, policy2)
2,944✔
4870
                },
2,944✔
4871
        )
4872
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc