• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 15872517631

25 Jun 2025 09:22AM UTC coverage: 67.648% (-0.2%) from 67.8%
15872517631

Pull #9939

github

web-flow
Merge e875183c4 into 33e6f2854
Pull Request #9939: [15] graph/db: SQL prune log

0 of 386 new or added lines in 2 files covered. (0.0%)

83 existing lines in 18 files now uncovered.

134987 of 199542 relevant lines covered (67.65%)

21930.35 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

78.27
/graph/db/kv_store.go
1
package graphdb
2

3
import (
4
        "bytes"
5
        "context"
6
        "crypto/sha256"
7
        "encoding/binary"
8
        "errors"
9
        "fmt"
10
        "io"
11
        "math"
12
        "net"
13
        "sort"
14
        "sync"
15
        "testing"
16
        "time"
17

18
        "github.com/btcsuite/btcd/btcec/v2"
19
        "github.com/btcsuite/btcd/chaincfg/chainhash"
20
        "github.com/btcsuite/btcd/txscript"
21
        "github.com/btcsuite/btcd/wire"
22
        "github.com/btcsuite/btcwallet/walletdb"
23
        "github.com/lightningnetwork/lnd/aliasmgr"
24
        "github.com/lightningnetwork/lnd/batch"
25
        "github.com/lightningnetwork/lnd/fn/v2"
26
        "github.com/lightningnetwork/lnd/graph/db/models"
27
        "github.com/lightningnetwork/lnd/input"
28
        "github.com/lightningnetwork/lnd/kvdb"
29
        "github.com/lightningnetwork/lnd/lnwire"
30
        "github.com/lightningnetwork/lnd/routing/route"
31
        "github.com/stretchr/testify/require"
32
)
33

34
var (
35
        // nodeBucket is a bucket which houses all the vertices or nodes within
36
        // the channel graph. This bucket has a single-sub bucket which adds an
37
        // additional index from pubkey -> alias. Within the top-level of this
38
        // bucket, the key space maps a node's compressed public key to the
39
        // serialized information for that node. Additionally, there's a
40
        // special key "source" which stores the pubkey of the source node. The
41
        // source node is used as the starting point for all graph/queries and
42
        // traversals. The graph is formed as a star-graph with the source node
43
        // at the center.
44
        //
45
        // maps: pubKey -> nodeInfo
46
        // maps: source -> selfPubKey
47
        nodeBucket = []byte("graph-node")
48

49
        // nodeUpdateIndexBucket is a sub-bucket of the nodeBucket. This bucket
50
        // will be used to quickly look up the "freshness" of a node's last
51
        // update to the network. The bucket only contains keys, and no values,
52
        // it's mapping:
53
        //
54
        // maps: updateTime || nodeID -> nil
55
        nodeUpdateIndexBucket = []byte("graph-node-update-index")
56

57
        // sourceKey is a special key that resides within the nodeBucket. The
58
        // sourceKey maps a key to the public key of the "self node".
59
        sourceKey = []byte("source")
60

61
        // aliasIndexBucket is a sub-bucket that's nested within the main
62
        // nodeBucket. This bucket maps the public key of a node to its
63
        // current alias. This bucket is provided as it can be used within a
64
        // future UI layer to add an additional degree of confirmation.
65
        aliasIndexBucket = []byte("alias")
66

67
        // edgeBucket is a bucket which houses all of the edge or channel
68
        // information within the channel graph. This bucket essentially acts
69
        // as an adjacency list, which in conjunction with a range scan, can be
70
        // used to iterate over all the incoming and outgoing edges for a
71
        // particular node. Key in the bucket use a prefix scheme which leads
72
        // with the node's public key and sends with the compact edge ID.
73
        // For each chanID, there will be two entries within the bucket, as the
74
        // graph is directed: nodes may have different policies w.r.t to fees
75
        // for their respective directions.
76
        //
77
        // maps: pubKey || chanID -> channel edge policy for node
78
        edgeBucket = []byte("graph-edge")
79

80
        // unknownPolicy is represented as an empty slice. It is
81
        // used as the value in edgeBucket for unknown channel edge policies.
82
        // Unknown policies are still stored in the database to enable efficient
83
        // lookup of incoming channel edges.
84
        unknownPolicy = []byte{}
85

86
        // chanStart is an array of all zero bytes which is used to perform
87
        // range scans within the edgeBucket to obtain all of the outgoing
88
        // edges for a particular node.
89
        chanStart [8]byte
90

91
        // edgeIndexBucket is an index which can be used to iterate all edges
92
        // in the bucket, grouping them according to their in/out nodes.
93
        // Additionally, the items in this bucket also contain the complete
94
        // edge information for a channel. The edge information includes the
95
        // capacity of the channel, the nodes that made the channel, etc. This
96
        // bucket resides within the edgeBucket above. Creation of an edge
97
        // proceeds in two phases: first the edge is added to the edge index,
98
        // afterwards the edgeBucket can be updated with the latest details of
99
        // the edge as they are announced on the network.
100
        //
101
        // maps: chanID -> pubKey1 || pubKey2 || restofEdgeInfo
102
        edgeIndexBucket = []byte("edge-index")
103

104
        // edgeUpdateIndexBucket is a sub-bucket of the main edgeBucket. This
105
        // bucket contains an index which allows us to gauge the "freshness" of
106
        // a channel's last updates.
107
        //
108
        // maps: updateTime || chanID -> nil
109
        edgeUpdateIndexBucket = []byte("edge-update-index")
110

111
        // channelPointBucket maps a channel's full outpoint (txid:index) to
112
        // its short 8-byte channel ID. This bucket resides within the
113
        // edgeBucket above, and can be used to quickly remove an edge due to
114
        // the outpoint being spent, or to query for existence of a channel.
115
        //
116
        // maps: outPoint -> chanID
117
        channelPointBucket = []byte("chan-index")
118

119
        // zombieBucket is a sub-bucket of the main edgeBucket bucket
120
        // responsible for maintaining an index of zombie channels. Each entry
121
        // exists within the bucket as follows:
122
        //
123
        // maps: chanID -> pubKey1 || pubKey2
124
        //
125
        // The chanID represents the channel ID of the edge that is marked as a
126
        // zombie and is used as the key, which maps to the public keys of the
127
        // edge's participants.
128
        zombieBucket = []byte("zombie-index")
129

130
        // disabledEdgePolicyBucket is a sub-bucket of the main edgeBucket
131
        // bucket responsible for maintaining an index of disabled edge
132
        // policies. Each entry exists within the bucket as follows:
133
        //
134
        // maps: <chanID><direction> -> []byte{}
135
        //
136
        // The chanID represents the channel ID of the edge and the direction is
137
        // one byte representing the direction of the edge. The main purpose of
138
        // this index is to allow pruning disabled channels in a fast way
139
        // without the need to iterate all over the graph.
140
        disabledEdgePolicyBucket = []byte("disabled-edge-policy-index")
141

142
        // graphMetaBucket is a top-level bucket which stores various meta-deta
143
        // related to the on-disk channel graph. Data stored in this bucket
144
        // includes the block to which the graph has been synced to, the total
145
        // number of channels, etc.
146
        graphMetaBucket = []byte("graph-meta")
147

148
        // pruneLogBucket is a bucket within the graphMetaBucket that stores
149
        // a mapping from the block height to the hash for the blocks used to
150
        // prune the graph.
151
        // Once a new block is discovered, any channels that have been closed
152
        // (by spending the outpoint) can safely be removed from the graph, and
153
        // the block is added to the prune log. We need to keep such a log for
154
        // the case where a reorg happens, and we must "rewind" the state of the
155
        // graph by removing channels that were previously confirmed. In such a
156
        // case we'll remove all entries from the prune log with a block height
157
        // that no longer exists.
158
        pruneLogBucket = []byte("prune-log")
159

160
        // closedScidBucket is a top-level bucket that stores scids for
161
        // channels that we know to be closed. This is used so that we don't
162
        // need to perform expensive validation checks if we receive a channel
163
        // announcement for the channel again.
164
        //
165
        // maps: scid -> []byte{}
166
        closedScidBucket = []byte("closed-scid")
167
)
168

169
const (
170
        // MaxAllowedExtraOpaqueBytes is the largest amount of opaque bytes that
171
        // we'll permit to be written to disk. We limit this as otherwise, it
172
        // would be possible for a node to create a ton of updates and slowly
173
        // fill our disk, and also waste bandwidth due to relaying.
174
        MaxAllowedExtraOpaqueBytes = 10000
175
)
176

177
// KVStore is a persistent, on-disk graph representation of the Lightning
178
// Network. This struct can be used to implement path finding algorithms on top
179
// of, and also to update a node's view based on information received from the
180
// p2p network. Internally, the graph is stored using a modified adjacency list
181
// representation with some added object interaction possible with each
182
// serialized edge/node. The graph is stored is directed, meaning that are two
183
// edges stored for each channel: an inbound/outbound edge for each node pair.
184
// Nodes, edges, and edge information can all be added to the graph
185
// independently. Edge removal results in the deletion of all edge information
186
// for that edge.
187
type KVStore struct {
188
        db kvdb.Backend
189

190
        // cacheMu guards all caches (rejectCache and chanCache). If
191
        // this mutex will be acquired at the same time as the DB mutex then
192
        // the cacheMu MUST be acquired first to prevent deadlock.
193
        cacheMu     sync.RWMutex
194
        rejectCache *rejectCache
195
        chanCache   *channelCache
196

197
        chanScheduler batch.Scheduler[kvdb.RwTx]
198
        nodeScheduler batch.Scheduler[kvdb.RwTx]
199
}
200

201
// A compile-time assertion to ensure that the KVStore struct implements the
202
// V1Store interface.
203
var _ V1Store = (*KVStore)(nil)
204

205
// NewKVStore allocates a new KVStore backed by a DB instance. The
206
// returned instance has its own unique reject cache and channel cache.
207
func NewKVStore(db kvdb.Backend, options ...StoreOptionModifier) (*KVStore,
208
        error) {
172✔
209

172✔
210
        opts := DefaultOptions()
172✔
211
        for _, o := range options {
175✔
212
                o(opts)
3✔
213
        }
3✔
214

215
        if !opts.NoMigration {
344✔
216
                if err := initKVStore(db); err != nil {
172✔
217
                        return nil, err
×
218
                }
×
219
        }
220

221
        g := &KVStore{
172✔
222
                db:          db,
172✔
223
                rejectCache: newRejectCache(opts.RejectCacheSize),
172✔
224
                chanCache:   newChannelCache(opts.ChannelCacheSize),
172✔
225
        }
172✔
226
        g.chanScheduler = batch.NewTimeScheduler(
172✔
227
                batch.NewBoltBackend[kvdb.RwTx](db), &g.cacheMu,
172✔
228
                opts.BatchCommitInterval,
172✔
229
        )
172✔
230
        g.nodeScheduler = batch.NewTimeScheduler(
172✔
231
                batch.NewBoltBackend[kvdb.RwTx](db), nil,
172✔
232
                opts.BatchCommitInterval,
172✔
233
        )
172✔
234

172✔
235
        return g, nil
172✔
236
}
237

238
// channelMapKey is the key structure used for storing channel edge policies.
239
type channelMapKey struct {
240
        nodeKey route.Vertex
241
        chanID  [8]byte
242
}
243

244
// String returns a human-readable representation of the key.
245
func (c channelMapKey) String() string {
×
246
        return fmt.Sprintf("node=%v, chanID=%x", c.nodeKey, c.chanID)
×
247
}
×
248

249
// getChannelMap loads all channel edge policies from the database and stores
250
// them in a map.
251
func (c *KVStore) getChannelMap(edges kvdb.RBucket) (
252
        map[channelMapKey]*models.ChannelEdgePolicy, error) {
144✔
253

144✔
254
        // Create a map to store all channel edge policies.
144✔
255
        channelMap := make(map[channelMapKey]*models.ChannelEdgePolicy)
144✔
256

144✔
257
        err := kvdb.ForAll(edges, func(k, edgeBytes []byte) error {
1,706✔
258
                // Skip embedded buckets.
1,562✔
259
                if bytes.Equal(k, edgeIndexBucket) ||
1,562✔
260
                        bytes.Equal(k, edgeUpdateIndexBucket) ||
1,562✔
261
                        bytes.Equal(k, zombieBucket) ||
1,562✔
262
                        bytes.Equal(k, disabledEdgePolicyBucket) ||
1,562✔
263
                        bytes.Equal(k, channelPointBucket) {
2,134✔
264

572✔
265
                        return nil
572✔
266
                }
572✔
267

268
                // Validate key length.
269
                if len(k) != 33+8 {
993✔
270
                        return fmt.Errorf("invalid edge key %x encountered", k)
×
271
                }
×
272

273
                var key channelMapKey
993✔
274
                copy(key.nodeKey[:], k[:33])
993✔
275
                copy(key.chanID[:], k[33:])
993✔
276

993✔
277
                // No need to deserialize unknown policy.
993✔
278
                if bytes.Equal(edgeBytes, unknownPolicy) {
993✔
279
                        return nil
×
280
                }
×
281

282
                edgeReader := bytes.NewReader(edgeBytes)
993✔
283
                edge, err := deserializeChanEdgePolicyRaw(
993✔
284
                        edgeReader,
993✔
285
                )
993✔
286

993✔
287
                switch {
993✔
288
                // If the db policy was missing an expected optional field, we
289
                // return nil as if the policy was unknown.
290
                case errors.Is(err, ErrEdgePolicyOptionalFieldNotFound):
×
291
                        return nil
×
292

293
                // We don't want a single policy with bad TLV data to stop us
294
                // from loading the rest of the data, so we just skip this
295
                // policy. This is for backwards compatibility since we did not
296
                // use to validate TLV data in the past before persisting it.
297
                case errors.Is(err, ErrParsingExtraTLVBytes):
×
298
                        return nil
×
299

300
                case err != nil:
×
301
                        return err
×
302
                }
303

304
                channelMap[key] = edge
993✔
305

993✔
306
                return nil
993✔
307
        })
308
        if err != nil {
144✔
309
                return nil, err
×
310
        }
×
311

312
        return channelMap, nil
144✔
313
}
314

315
var graphTopLevelBuckets = [][]byte{
316
        nodeBucket,
317
        edgeBucket,
318
        graphMetaBucket,
319
        closedScidBucket,
320
}
321

322
// createChannelDB creates and initializes a fresh version of  In
323
// the case that the target path has not yet been created or doesn't yet exist,
324
// then the path is created. Additionally, all required top-level buckets used
325
// within the database are created.
326
func initKVStore(db kvdb.Backend) error {
172✔
327
        err := kvdb.Update(db, func(tx kvdb.RwTx) error {
344✔
328
                for _, tlb := range graphTopLevelBuckets {
851✔
329
                        if _, err := tx.CreateTopLevelBucket(tlb); err != nil {
679✔
330
                                return err
×
331
                        }
×
332
                }
333

334
                nodes := tx.ReadWriteBucket(nodeBucket)
172✔
335
                _, err := nodes.CreateBucketIfNotExists(aliasIndexBucket)
172✔
336
                if err != nil {
172✔
337
                        return err
×
338
                }
×
339
                _, err = nodes.CreateBucketIfNotExists(nodeUpdateIndexBucket)
172✔
340
                if err != nil {
172✔
341
                        return err
×
342
                }
×
343

344
                edges := tx.ReadWriteBucket(edgeBucket)
172✔
345
                _, err = edges.CreateBucketIfNotExists(edgeIndexBucket)
172✔
346
                if err != nil {
172✔
347
                        return err
×
348
                }
×
349
                _, err = edges.CreateBucketIfNotExists(edgeUpdateIndexBucket)
172✔
350
                if err != nil {
172✔
351
                        return err
×
352
                }
×
353
                _, err = edges.CreateBucketIfNotExists(channelPointBucket)
172✔
354
                if err != nil {
172✔
355
                        return err
×
356
                }
×
357
                _, err = edges.CreateBucketIfNotExists(zombieBucket)
172✔
358
                if err != nil {
172✔
359
                        return err
×
360
                }
×
361

362
                graphMeta := tx.ReadWriteBucket(graphMetaBucket)
172✔
363
                _, err = graphMeta.CreateBucketIfNotExists(pruneLogBucket)
172✔
364

172✔
365
                return err
172✔
366
        }, func() {})
172✔
367
        if err != nil {
172✔
368
                return fmt.Errorf("unable to create new channel graph: %w", err)
×
369
        }
×
370

371
        return nil
172✔
372
}
373

374
// AddrsForNode returns all known addresses for the target node public key that
375
// the graph DB is aware of. The returned boolean indicates if the given node is
376
// unknown to the graph DB or not.
377
//
378
// NOTE: this is part of the channeldb.AddrSource interface.
379
func (c *KVStore) AddrsForNode(ctx context.Context,
380
        nodePub *btcec.PublicKey) (bool, []net.Addr, error) {
6✔
381

6✔
382
        pubKey, err := route.NewVertexFromBytes(nodePub.SerializeCompressed())
6✔
383
        if err != nil {
6✔
384
                return false, nil, err
×
385
        }
×
386

387
        node, err := c.FetchLightningNode(ctx, pubKey)
6✔
388
        // We don't consider it an error if the graph is unaware of the node.
6✔
389
        switch {
6✔
390
        case err != nil && !errors.Is(err, ErrGraphNodeNotFound):
×
391
                return false, nil, err
×
392

393
        case errors.Is(err, ErrGraphNodeNotFound):
4✔
394
                return false, nil, nil
4✔
395
        }
396

397
        return true, node.Addresses, nil
5✔
398
}
399

400
// ForEachChannel iterates through all the channel edges stored within the
401
// graph and invokes the passed callback for each edge. The callback takes two
402
// edges as since this is a directed graph, both the in/out edges are visited.
403
// If the callback returns an error, then the transaction is aborted and the
404
// iteration stops early.
405
//
406
// NOTE: If an edge can't be found, or wasn't advertised, then a nil pointer
407
// for that particular channel edge routing policy will be passed into the
408
// callback.
409
func (c *KVStore) ForEachChannel(cb func(*models.ChannelEdgeInfo,
410
        *models.ChannelEdgePolicy, *models.ChannelEdgePolicy) error) error {
7✔
411

7✔
412
        return c.db.View(func(tx kvdb.RTx) error {
14✔
413
                edges := tx.ReadBucket(edgeBucket)
7✔
414
                if edges == nil {
7✔
415
                        return ErrGraphNoEdgesFound
×
416
                }
×
417

418
                // First, load all edges in memory indexed by node and channel
419
                // id.
420
                channelMap, err := c.getChannelMap(edges)
7✔
421
                if err != nil {
7✔
422
                        return err
×
423
                }
×
424

425
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
7✔
426
                if edgeIndex == nil {
7✔
427
                        return ErrGraphNoEdgesFound
×
428
                }
×
429

430
                // Load edge index, recombine each channel with the policies
431
                // loaded above and invoke the callback.
432
                return kvdb.ForAll(
7✔
433
                        edgeIndex, func(k, edgeInfoBytes []byte) error {
109✔
434
                                var chanID [8]byte
102✔
435
                                copy(chanID[:], k)
102✔
436

102✔
437
                                edgeInfoReader := bytes.NewReader(edgeInfoBytes)
102✔
438
                                info, err := deserializeChanEdgeInfo(
102✔
439
                                        edgeInfoReader,
102✔
440
                                )
102✔
441
                                if err != nil {
102✔
442
                                        return err
×
443
                                }
×
444

445
                                policy1 := channelMap[channelMapKey{
102✔
446
                                        nodeKey: info.NodeKey1Bytes,
102✔
447
                                        chanID:  chanID,
102✔
448
                                }]
102✔
449

102✔
450
                                policy2 := channelMap[channelMapKey{
102✔
451
                                        nodeKey: info.NodeKey2Bytes,
102✔
452
                                        chanID:  chanID,
102✔
453
                                }]
102✔
454

102✔
455
                                return cb(&info, policy1, policy2)
102✔
456
                        },
457
                )
458
        }, func() {})
7✔
459
}
460

461
// ForEachChannelCacheable iterates through all the channel edges stored within
462
// the graph and invokes the passed callback for each edge. The callback takes
463
// two edges as since this is a directed graph, both the in/out edges are
464
// visited. If the callback returns an error, then the transaction is aborted
465
// and the iteration stops early.
466
//
467
// NOTE: If an edge can't be found, or wasn't advertised, then a nil pointer
468
// for that particular channel edge routing policy will be passed into the
469
// callback.
470
//
471
// NOTE: this method is like ForEachChannel but fetches only the data required
472
// for the graph cache.
473
func (c *KVStore) ForEachChannelCacheable(cb func(*models.CachedEdgeInfo,
474
        *models.CachedEdgePolicy, *models.CachedEdgePolicy) error) error {
140✔
475

140✔
476
        return c.db.View(func(tx kvdb.RTx) error {
280✔
477
                edges := tx.ReadBucket(edgeBucket)
140✔
478
                if edges == nil {
140✔
479
                        return ErrGraphNoEdgesFound
×
480
                }
×
481

482
                // First, load all edges in memory indexed by node and channel
483
                // id.
484
                channelMap, err := c.getChannelMap(edges)
140✔
485
                if err != nil {
140✔
486
                        return err
×
487
                }
×
488

489
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
140✔
490
                if edgeIndex == nil {
140✔
491
                        return ErrGraphNoEdgesFound
×
492
                }
×
493

494
                // Load edge index, recombine each channel with the policies
495
                // loaded above and invoke the callback.
496
                return kvdb.ForAll(
140✔
497
                        edgeIndex, func(k, edgeInfoBytes []byte) error {
539✔
498
                                var chanID [8]byte
399✔
499
                                copy(chanID[:], k)
399✔
500

399✔
501
                                edgeInfoReader := bytes.NewReader(edgeInfoBytes)
399✔
502
                                info, err := deserializeChanEdgeInfo(
399✔
503
                                        edgeInfoReader,
399✔
504
                                )
399✔
505
                                if err != nil {
399✔
506
                                        return err
×
507
                                }
×
508

509
                                key1 := channelMapKey{
399✔
510
                                        nodeKey: info.NodeKey1Bytes,
399✔
511
                                        chanID:  chanID,
399✔
512
                                }
399✔
513
                                policy1 := channelMap[key1]
399✔
514

399✔
515
                                key2 := channelMapKey{
399✔
516
                                        nodeKey: info.NodeKey2Bytes,
399✔
517
                                        chanID:  chanID,
399✔
518
                                }
399✔
519
                                policy2 := channelMap[key2]
399✔
520

399✔
521
                                // We now create the cached edge policies, but
399✔
522
                                // only when the above policies are found in the
399✔
523
                                // `channelMap`.
399✔
524
                                var (
399✔
525
                                        cachedPolicy1 *models.CachedEdgePolicy
399✔
526
                                        cachedPolicy2 *models.CachedEdgePolicy
399✔
527
                                )
399✔
528

399✔
529
                                if policy1 != nil {
798✔
530
                                        cachedPolicy1 = models.NewCachedPolicy(
399✔
531
                                                policy1,
399✔
532
                                        )
399✔
533
                                }
399✔
534

535
                                if policy2 != nil {
798✔
536
                                        cachedPolicy2 = models.NewCachedPolicy(
399✔
537
                                                policy2,
399✔
538
                                        )
399✔
539
                                }
399✔
540

541
                                return cb(
399✔
542
                                        models.NewCachedEdge(&info),
399✔
543
                                        cachedPolicy1, cachedPolicy2,
399✔
544
                                )
399✔
545
                        },
546
                )
547
        }, func() {})
140✔
548
}
549

550
// forEachNodeDirectedChannel iterates through all channels of a given node,
551
// executing the passed callback on the directed edge representing the channel
552
// and its incoming policy. If the callback returns an error, then the iteration
553
// is halted with the error propagated back up to the caller. An optional read
554
// transaction may be provided. If none is provided, a new one will be created.
555
//
556
// Unknown policies are passed into the callback as nil values.
557
func (c *KVStore) forEachNodeDirectedChannel(tx kvdb.RTx,
558
        node route.Vertex, cb func(channel *DirectedChannel) error) error {
265✔
559

265✔
560
        // Fallback that uses the database.
265✔
561
        toNodeCallback := func() route.Vertex {
400✔
562
                return node
135✔
563
        }
135✔
564
        toNodeFeatures, err := c.fetchNodeFeatures(tx, node)
265✔
565
        if err != nil {
265✔
566
                return err
×
567
        }
×
568

569
        dbCallback := func(tx kvdb.RTx, e *models.ChannelEdgeInfo, p1,
265✔
570
                p2 *models.ChannelEdgePolicy) error {
954✔
571

689✔
572
                var cachedInPolicy *models.CachedEdgePolicy
689✔
573
                if p2 != nil {
1,375✔
574
                        cachedInPolicy = models.NewCachedPolicy(p2)
686✔
575
                        cachedInPolicy.ToNodePubKey = toNodeCallback
686✔
576
                        cachedInPolicy.ToNodeFeatures = toNodeFeatures
686✔
577
                }
686✔
578

579
                directedChannel := &DirectedChannel{
689✔
580
                        ChannelID:    e.ChannelID,
689✔
581
                        IsNode1:      node == e.NodeKey1Bytes,
689✔
582
                        OtherNode:    e.NodeKey2Bytes,
689✔
583
                        Capacity:     e.Capacity,
689✔
584
                        OutPolicySet: p1 != nil,
689✔
585
                        InPolicy:     cachedInPolicy,
689✔
586
                }
689✔
587

689✔
588
                if p1 != nil {
1,377✔
589
                        p1.InboundFee.WhenSome(func(fee lnwire.Fee) {
1,024✔
590
                                directedChannel.InboundFee = fee
336✔
591
                        })
336✔
592
                }
593

594
                if node == e.NodeKey2Bytes {
1,038✔
595
                        directedChannel.OtherNode = e.NodeKey1Bytes
349✔
596
                }
349✔
597

598
                return cb(directedChannel)
689✔
599
        }
600

601
        return nodeTraversal(tx, node[:], c.db, dbCallback)
265✔
602
}
603

604
// fetchNodeFeatures returns the features of a given node. If no features are
605
// known for the node, an empty feature vector is returned. An optional read
606
// transaction may be provided. If none is provided, a new one will be created.
607
func (c *KVStore) fetchNodeFeatures(tx kvdb.RTx,
608
        node route.Vertex) (*lnwire.FeatureVector, error) {
710✔
609

710✔
610
        // Fallback that uses the database.
710✔
611
        targetNode, err := c.FetchLightningNodeTx(tx, node)
710✔
612
        switch {
710✔
613
        // If the node exists and has features, return them directly.
614
        case err == nil:
699✔
615
                return targetNode.Features, nil
699✔
616

617
        // If we couldn't find a node announcement, populate a blank feature
618
        // vector.
619
        case errors.Is(err, ErrGraphNodeNotFound):
11✔
620
                return lnwire.EmptyFeatureVector(), nil
11✔
621

622
        // Otherwise, bubble the error up.
623
        default:
×
624
                return nil, err
×
625
        }
626
}
627

628
// ForEachNodeDirectedChannel iterates through all channels of a given node,
629
// executing the passed callback on the directed edge representing the channel
630
// and its incoming policy. If the callback returns an error, then the iteration
631
// is halted with the error propagated back up to the caller.
632
//
633
// Unknown policies are passed into the callback as nil values.
634
//
635
// NOTE: this is part of the graphdb.NodeTraverser interface.
636
func (c *KVStore) ForEachNodeDirectedChannel(nodePub route.Vertex,
637
        cb func(channel *DirectedChannel) error) error {
26✔
638

26✔
639
        return c.forEachNodeDirectedChannel(nil, nodePub, cb)
26✔
640
}
26✔
641

642
// FetchNodeFeatures returns the features of the given node. If no features are
643
// known for the node, an empty feature vector is returned.
644
//
645
// NOTE: this is part of the graphdb.NodeTraverser interface.
646
func (c *KVStore) FetchNodeFeatures(nodePub route.Vertex) (
647
        *lnwire.FeatureVector, error) {
4✔
648

4✔
649
        return c.fetchNodeFeatures(nil, nodePub)
4✔
650
}
4✔
651

652
// ForEachNodeCached is similar to forEachNode, but it returns DirectedChannel
653
// data to the call-back.
654
//
655
// NOTE: The callback contents MUST not be modified.
656
func (c *KVStore) ForEachNodeCached(cb func(node route.Vertex,
657
        chans map[uint64]*DirectedChannel) error) error {
1✔
658

1✔
659
        // Otherwise call back to a version that uses the database directly.
1✔
660
        // We'll iterate over each node, then the set of channels for each
1✔
661
        // node, and construct a similar callback functiopn signature as the
1✔
662
        // main funcotin expects.
1✔
663
        return c.forEachNode(func(tx kvdb.RTx,
1✔
664
                node *models.LightningNode) error {
21✔
665

20✔
666
                channels := make(map[uint64]*DirectedChannel)
20✔
667

20✔
668
                err := c.forEachNodeChannelTx(tx, node.PubKeyBytes,
20✔
669
                        func(tx kvdb.RTx, e *models.ChannelEdgeInfo,
20✔
670
                                p1 *models.ChannelEdgePolicy,
20✔
671
                                p2 *models.ChannelEdgePolicy) error {
210✔
672

190✔
673
                                toNodeCallback := func() route.Vertex {
190✔
674
                                        return node.PubKeyBytes
×
675
                                }
×
676
                                toNodeFeatures, err := c.fetchNodeFeatures(
190✔
677
                                        tx, node.PubKeyBytes,
190✔
678
                                )
190✔
679
                                if err != nil {
190✔
680
                                        return err
×
681
                                }
×
682

683
                                var cachedInPolicy *models.CachedEdgePolicy
190✔
684
                                if p2 != nil {
380✔
685
                                        cachedInPolicy =
190✔
686
                                                models.NewCachedPolicy(p2)
190✔
687
                                        cachedInPolicy.ToNodePubKey =
190✔
688
                                                toNodeCallback
190✔
689
                                        cachedInPolicy.ToNodeFeatures =
190✔
690
                                                toNodeFeatures
190✔
691
                                }
190✔
692

693
                                directedChannel := &DirectedChannel{
190✔
694
                                        ChannelID: e.ChannelID,
190✔
695
                                        IsNode1: node.PubKeyBytes ==
190✔
696
                                                e.NodeKey1Bytes,
190✔
697
                                        OtherNode:    e.NodeKey2Bytes,
190✔
698
                                        Capacity:     e.Capacity,
190✔
699
                                        OutPolicySet: p1 != nil,
190✔
700
                                        InPolicy:     cachedInPolicy,
190✔
701
                                }
190✔
702

190✔
703
                                if node.PubKeyBytes == e.NodeKey2Bytes {
285✔
704
                                        directedChannel.OtherNode =
95✔
705
                                                e.NodeKey1Bytes
95✔
706
                                }
95✔
707

708
                                channels[e.ChannelID] = directedChannel
190✔
709

190✔
710
                                return nil
190✔
711
                        })
712
                if err != nil {
20✔
713
                        return err
×
714
                }
×
715

716
                return cb(node.PubKeyBytes, channels)
20✔
717
        })
718
}
719

720
// DisabledChannelIDs returns the channel ids of disabled channels.
721
// A channel is disabled when two of the associated ChanelEdgePolicies
722
// have their disabled bit on.
723
func (c *KVStore) DisabledChannelIDs() ([]uint64, error) {
6✔
724
        var disabledChanIDs []uint64
6✔
725
        var chanEdgeFound map[uint64]struct{}
6✔
726

6✔
727
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
12✔
728
                edges := tx.ReadBucket(edgeBucket)
6✔
729
                if edges == nil {
6✔
730
                        return ErrGraphNoEdgesFound
×
731
                }
×
732

733
                disabledEdgePolicyIndex := edges.NestedReadBucket(
6✔
734
                        disabledEdgePolicyBucket,
6✔
735
                )
6✔
736
                if disabledEdgePolicyIndex == nil {
7✔
737
                        return nil
1✔
738
                }
1✔
739

740
                // We iterate over all disabled policies and we add each channel
741
                // that has more than one disabled policy to disabledChanIDs
742
                // array.
743
                return disabledEdgePolicyIndex.ForEach(
5✔
744
                        func(k, v []byte) error {
16✔
745
                                chanID := byteOrder.Uint64(k[:8])
11✔
746
                                _, edgeFound := chanEdgeFound[chanID]
11✔
747
                                if edgeFound {
15✔
748
                                        delete(chanEdgeFound, chanID)
4✔
749
                                        disabledChanIDs = append(
4✔
750
                                                disabledChanIDs, chanID,
4✔
751
                                        )
4✔
752

4✔
753
                                        return nil
4✔
754
                                }
4✔
755

756
                                chanEdgeFound[chanID] = struct{}{}
7✔
757

7✔
758
                                return nil
7✔
759
                        },
760
                )
761
        }, func() {
6✔
762
                disabledChanIDs = nil
6✔
763
                chanEdgeFound = make(map[uint64]struct{})
6✔
764
        })
6✔
765
        if err != nil {
6✔
766
                return nil, err
×
767
        }
×
768

769
        return disabledChanIDs, nil
6✔
770
}
771

772
// ForEachNode iterates through all the stored vertices/nodes in the graph,
773
// executing the passed callback with each node encountered. If the callback
774
// returns an error, then the transaction is aborted and the iteration stops
775
// early. Any operations performed on the NodeTx passed to the call-back are
776
// executed under the same read transaction and so, methods on the NodeTx object
777
// _MUST_ only be called from within the call-back.
778
func (c *KVStore) ForEachNode(cb func(tx NodeRTx) error) error {
131✔
779
        return c.forEachNode(func(tx kvdb.RTx,
131✔
780
                node *models.LightningNode) error {
1,292✔
781

1,161✔
782
                return cb(newChanGraphNodeTx(tx, c, node))
1,161✔
783
        })
1,161✔
784
}
785

786
// forEachNode iterates through all the stored vertices/nodes in the graph,
787
// executing the passed callback with each node encountered. If the callback
788
// returns an error, then the transaction is aborted and the iteration stops
789
// early.
790
//
791
// TODO(roasbeef): add iterator interface to allow for memory efficient graph
792
// traversal when graph gets mega.
793
func (c *KVStore) forEachNode(
794
        cb func(kvdb.RTx, *models.LightningNode) error) error {
132✔
795

132✔
796
        traversal := func(tx kvdb.RTx) error {
264✔
797
                // First grab the nodes bucket which stores the mapping from
132✔
798
                // pubKey to node information.
132✔
799
                nodes := tx.ReadBucket(nodeBucket)
132✔
800
                if nodes == nil {
132✔
801
                        return ErrGraphNotFound
×
802
                }
×
803

804
                return nodes.ForEach(func(pubKey, nodeBytes []byte) error {
1,574✔
805
                        // If this is the source key, then we skip this
1,442✔
806
                        // iteration as the value for this key is a pubKey
1,442✔
807
                        // rather than raw node information.
1,442✔
808
                        if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 {
1,706✔
809
                                return nil
264✔
810
                        }
264✔
811

812
                        nodeReader := bytes.NewReader(nodeBytes)
1,181✔
813
                        node, err := deserializeLightningNode(nodeReader)
1,181✔
814
                        if err != nil {
1,181✔
815
                                return err
×
816
                        }
×
817

818
                        // Execute the callback, the transaction will abort if
819
                        // this returns an error.
820
                        return cb(tx, &node)
1,181✔
821
                })
822
        }
823

824
        return kvdb.View(c.db, traversal, func() {})
264✔
825
}
826

827
// ForEachNodeCacheable iterates through all the stored vertices/nodes in the
828
// graph, executing the passed callback with each node encountered. If the
829
// callback returns an error, then the transaction is aborted and the iteration
830
// stops early.
831
func (c *KVStore) ForEachNodeCacheable(cb func(route.Vertex,
832
        *lnwire.FeatureVector) error) error {
141✔
833

141✔
834
        traversal := func(tx kvdb.RTx) error {
282✔
835
                // First grab the nodes bucket which stores the mapping from
141✔
836
                // pubKey to node information.
141✔
837
                nodes := tx.ReadBucket(nodeBucket)
141✔
838
                if nodes == nil {
141✔
839
                        return ErrGraphNotFound
×
840
                }
×
841

842
                return nodes.ForEach(func(pubKey, nodeBytes []byte) error {
540✔
843
                        // If this is the source key, then we skip this
399✔
844
                        // iteration as the value for this key is a pubKey
399✔
845
                        // rather than raw node information.
399✔
846
                        if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 {
678✔
847
                                return nil
279✔
848
                        }
279✔
849

850
                        nodeReader := bytes.NewReader(nodeBytes)
123✔
851
                        node, features, err := deserializeLightningNodeCacheable( //nolint:ll
123✔
852
                                nodeReader,
123✔
853
                        )
123✔
854
                        if err != nil {
123✔
855
                                return err
×
856
                        }
×
857

858
                        // Execute the callback, the transaction will abort if
859
                        // this returns an error.
860
                        return cb(node, features)
123✔
861
                })
862
        }
863

864
        return kvdb.View(c.db, traversal, func() {})
282✔
865
}
866

867
// SourceNode returns the source node of the graph. The source node is treated
868
// as the center node within a star-graph. This method may be used to kick off
869
// a path finding algorithm in order to explore the reachability of another
870
// node based off the source node.
871
func (c *KVStore) SourceNode(_ context.Context) (*models.LightningNode,
872
        error) {
240✔
873

240✔
874
        var source *models.LightningNode
240✔
875
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
480✔
876
                // First grab the nodes bucket which stores the mapping from
240✔
877
                // pubKey to node information.
240✔
878
                nodes := tx.ReadBucket(nodeBucket)
240✔
879
                if nodes == nil {
240✔
880
                        return ErrGraphNotFound
×
881
                }
×
882

883
                node, err := c.sourceNode(nodes)
240✔
884
                if err != nil {
241✔
885
                        return err
1✔
886
                }
1✔
887
                source = node
239✔
888

239✔
889
                return nil
239✔
890
        }, func() {
240✔
891
                source = nil
240✔
892
        })
240✔
893
        if err != nil {
241✔
894
                return nil, err
1✔
895
        }
1✔
896

897
        return source, nil
239✔
898
}
899

900
// sourceNode uses an existing database transaction and returns the source node
901
// of the graph. The source node is treated as the center node within a
902
// star-graph. This method may be used to kick off a path finding algorithm in
903
// order to explore the reachability of another node based off the source node.
904
func (c *KVStore) sourceNode(nodes kvdb.RBucket) (*models.LightningNode,
905
        error) {
498✔
906

498✔
907
        selfPub := nodes.Get(sourceKey)
498✔
908
        if selfPub == nil {
499✔
909
                return nil, ErrSourceNodeNotSet
1✔
910
        }
1✔
911

912
        // With the pubKey of the source node retrieved, we're able to
913
        // fetch the full node information.
914
        node, err := fetchLightningNode(nodes, selfPub)
497✔
915
        if err != nil {
497✔
916
                return nil, err
×
917
        }
×
918

919
        return &node, nil
497✔
920
}
921

922
// SetSourceNode sets the source node within the graph database. The source
923
// node is to be used as the center of a star-graph within path finding
924
// algorithms.
925
func (c *KVStore) SetSourceNode(_ context.Context,
926
        node *models.LightningNode) error {
117✔
927

117✔
928
        nodePubBytes := node.PubKeyBytes[:]
117✔
929

117✔
930
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
234✔
931
                // First grab the nodes bucket which stores the mapping from
117✔
932
                // pubKey to node information.
117✔
933
                nodes, err := tx.CreateTopLevelBucket(nodeBucket)
117✔
934
                if err != nil {
117✔
935
                        return err
×
936
                }
×
937

938
                // Next we create the mapping from source to the targeted
939
                // public key.
940
                if err := nodes.Put(sourceKey, nodePubBytes); err != nil {
117✔
941
                        return err
×
942
                }
×
943

944
                // Finally, we commit the information of the lightning node
945
                // itself.
946
                return addLightningNode(tx, node)
117✔
947
        }, func() {})
117✔
948
}
949

950
// AddLightningNode adds a vertex/node to the graph database. If the node is not
951
// in the database from before, this will add a new, unconnected one to the
952
// graph. If it is present from before, this will update that node's
953
// information. Note that this method is expected to only be called to update an
954
// already present node from a node announcement, or to insert a node found in a
955
// channel update.
956
//
957
// TODO(roasbeef): also need sig of announcement.
958
func (c *KVStore) AddLightningNode(ctx context.Context,
959
        node *models.LightningNode, opts ...batch.SchedulerOption) error {
806✔
960

806✔
961
        r := &batch.Request[kvdb.RwTx]{
806✔
962
                Opts: batch.NewSchedulerOptions(opts...),
806✔
963
                Do: func(tx kvdb.RwTx) error {
1,612✔
964
                        return addLightningNode(tx, node)
806✔
965
                },
806✔
966
        }
967

968
        return c.nodeScheduler.Execute(ctx, r)
806✔
969
}
970

971
func addLightningNode(tx kvdb.RwTx, node *models.LightningNode) error {
1,001✔
972
        nodes, err := tx.CreateTopLevelBucket(nodeBucket)
1,001✔
973
        if err != nil {
1,001✔
974
                return err
×
975
        }
×
976

977
        aliases, err := nodes.CreateBucketIfNotExists(aliasIndexBucket)
1,001✔
978
        if err != nil {
1,001✔
979
                return err
×
980
        }
×
981

982
        updateIndex, err := nodes.CreateBucketIfNotExists(
1,001✔
983
                nodeUpdateIndexBucket,
1,001✔
984
        )
1,001✔
985
        if err != nil {
1,001✔
986
                return err
×
987
        }
×
988

989
        return putLightningNode(nodes, aliases, updateIndex, node)
1,001✔
990
}
991

992
// LookupAlias attempts to return the alias as advertised by the target node.
993
// TODO(roasbeef): currently assumes that aliases are unique...
994
func (c *KVStore) LookupAlias(_ context.Context,
995
        pub *btcec.PublicKey) (string, error) {
5✔
996

5✔
997
        var alias string
5✔
998

5✔
999
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
10✔
1000
                nodes := tx.ReadBucket(nodeBucket)
5✔
1001
                if nodes == nil {
5✔
1002
                        return ErrGraphNodesNotFound
×
1003
                }
×
1004

1005
                aliases := nodes.NestedReadBucket(aliasIndexBucket)
5✔
1006
                if aliases == nil {
5✔
1007
                        return ErrGraphNodesNotFound
×
1008
                }
×
1009

1010
                nodePub := pub.SerializeCompressed()
5✔
1011
                a := aliases.Get(nodePub)
5✔
1012
                if a == nil {
6✔
1013
                        return ErrNodeAliasNotFound
1✔
1014
                }
1✔
1015

1016
                // TODO(roasbeef): should actually be using the utf-8
1017
                // package...
1018
                alias = string(a)
4✔
1019

4✔
1020
                return nil
4✔
1021
        }, func() {
5✔
1022
                alias = ""
5✔
1023
        })
5✔
1024
        if err != nil {
6✔
1025
                return "", err
1✔
1026
        }
1✔
1027

1028
        return alias, nil
4✔
1029
}
1030

1031
// DeleteLightningNode starts a new database transaction to remove a vertex/node
1032
// from the database according to the node's public key.
1033
func (c *KVStore) DeleteLightningNode(_ context.Context,
1034
        nodePub route.Vertex) error {
4✔
1035

4✔
1036
        // TODO(roasbeef): ensure dangling edges are removed...
4✔
1037
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
8✔
1038
                nodes := tx.ReadWriteBucket(nodeBucket)
4✔
1039
                if nodes == nil {
4✔
1040
                        return ErrGraphNodeNotFound
×
1041
                }
×
1042

1043
                return c.deleteLightningNode(nodes, nodePub[:])
4✔
1044
        }, func() {})
4✔
1045
}
1046

1047
// deleteLightningNode uses an existing database transaction to remove a
1048
// vertex/node from the database according to the node's public key.
1049
func (c *KVStore) deleteLightningNode(nodes kvdb.RwBucket,
1050
        compressedPubKey []byte) error {
70✔
1051

70✔
1052
        aliases := nodes.NestedReadWriteBucket(aliasIndexBucket)
70✔
1053
        if aliases == nil {
70✔
1054
                return ErrGraphNodesNotFound
×
1055
        }
×
1056

1057
        if err := aliases.Delete(compressedPubKey); err != nil {
70✔
1058
                return err
×
1059
        }
×
1060

1061
        // Before we delete the node, we'll fetch its current state so we can
1062
        // determine when its last update was to clear out the node update
1063
        // index.
1064
        node, err := fetchLightningNode(nodes, compressedPubKey)
70✔
1065
        if err != nil {
71✔
1066
                return err
1✔
1067
        }
1✔
1068

1069
        if err := nodes.Delete(compressedPubKey); err != nil {
69✔
1070
                return err
×
1071
        }
×
1072

1073
        // Finally, we'll delete the index entry for the node within the
1074
        // nodeUpdateIndexBucket as this node is no longer active, so we don't
1075
        // need to track its last update.
1076
        nodeUpdateIndex := nodes.NestedReadWriteBucket(nodeUpdateIndexBucket)
69✔
1077
        if nodeUpdateIndex == nil {
69✔
1078
                return ErrGraphNodesNotFound
×
1079
        }
×
1080

1081
        // In order to delete the entry, we'll need to reconstruct the key for
1082
        // its last update.
1083
        updateUnix := uint64(node.LastUpdate.Unix())
69✔
1084
        var indexKey [8 + 33]byte
69✔
1085
        byteOrder.PutUint64(indexKey[:8], updateUnix)
69✔
1086
        copy(indexKey[8:], compressedPubKey)
69✔
1087

69✔
1088
        return nodeUpdateIndex.Delete(indexKey[:])
69✔
1089
}
1090

1091
// AddChannelEdge adds a new (undirected, blank) edge to the graph database. An
1092
// undirected edge from the two target nodes are created. The information stored
1093
// denotes the static attributes of the channel, such as the channelID, the keys
1094
// involved in creation of the channel, and the set of features that the channel
1095
// supports. The chanPoint and chanID are used to uniquely identify the edge
1096
// globally within the database.
1097
func (c *KVStore) AddChannelEdge(ctx context.Context,
1098
        edge *models.ChannelEdgeInfo, opts ...batch.SchedulerOption) error {
1,723✔
1099

1,723✔
1100
        var alreadyExists bool
1,723✔
1101
        r := &batch.Request[kvdb.RwTx]{
1,723✔
1102
                Opts: batch.NewSchedulerOptions(opts...),
1,723✔
1103
                Reset: func() {
3,446✔
1104
                        alreadyExists = false
1,723✔
1105
                },
1,723✔
1106
                Do: func(tx kvdb.RwTx) error {
1,723✔
1107
                        err := c.addChannelEdge(tx, edge)
1,723✔
1108

1,723✔
1109
                        // Silence ErrEdgeAlreadyExist so that the batch can
1,723✔
1110
                        // succeed, but propagate the error via local state.
1,723✔
1111
                        if errors.Is(err, ErrEdgeAlreadyExist) {
1,960✔
1112
                                alreadyExists = true
237✔
1113
                                return nil
237✔
1114
                        }
237✔
1115

1116
                        return err
1,486✔
1117
                },
1118
                OnCommit: func(err error) error {
1,723✔
1119
                        switch {
1,723✔
1120
                        case err != nil:
×
1121
                                return err
×
1122
                        case alreadyExists:
237✔
1123
                                return ErrEdgeAlreadyExist
237✔
1124
                        default:
1,486✔
1125
                                c.rejectCache.remove(edge.ChannelID)
1,486✔
1126
                                c.chanCache.remove(edge.ChannelID)
1,486✔
1127
                                return nil
1,486✔
1128
                        }
1129
                },
1130
        }
1131

1132
        return c.chanScheduler.Execute(ctx, r)
1,723✔
1133
}
1134

1135
// addChannelEdge is the private form of AddChannelEdge that allows callers to
1136
// utilize an existing db transaction.
1137
func (c *KVStore) addChannelEdge(tx kvdb.RwTx,
1138
        edge *models.ChannelEdgeInfo) error {
1,723✔
1139

1,723✔
1140
        // Construct the channel's primary key which is the 8-byte channel ID.
1,723✔
1141
        var chanKey [8]byte
1,723✔
1142
        binary.BigEndian.PutUint64(chanKey[:], edge.ChannelID)
1,723✔
1143

1,723✔
1144
        nodes, err := tx.CreateTopLevelBucket(nodeBucket)
1,723✔
1145
        if err != nil {
1,723✔
1146
                return err
×
1147
        }
×
1148
        edges, err := tx.CreateTopLevelBucket(edgeBucket)
1,723✔
1149
        if err != nil {
1,723✔
1150
                return err
×
1151
        }
×
1152
        edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
1,723✔
1153
        if err != nil {
1,723✔
1154
                return err
×
1155
        }
×
1156
        chanIndex, err := edges.CreateBucketIfNotExists(channelPointBucket)
1,723✔
1157
        if err != nil {
1,723✔
1158
                return err
×
1159
        }
×
1160

1161
        // First, attempt to check if this edge has already been created. If
1162
        // so, then we can exit early as this method is meant to be idempotent.
1163
        if edgeInfo := edgeIndex.Get(chanKey[:]); edgeInfo != nil {
1,960✔
1164
                return ErrEdgeAlreadyExist
237✔
1165
        }
237✔
1166

1167
        // Before we insert the channel into the database, we'll ensure that
1168
        // both nodes already exist in the channel graph. If either node
1169
        // doesn't, then we'll insert a "shell" node that just includes its
1170
        // public key, so subsequent validation and queries can work properly.
1171
        _, node1Err := fetchLightningNode(nodes, edge.NodeKey1Bytes[:])
1,486✔
1172
        switch {
1,486✔
1173
        case errors.Is(node1Err, ErrGraphNodeNotFound):
23✔
1174
                node1Shell := models.LightningNode{
23✔
1175
                        PubKeyBytes:          edge.NodeKey1Bytes,
23✔
1176
                        HaveNodeAnnouncement: false,
23✔
1177
                }
23✔
1178
                err := addLightningNode(tx, &node1Shell)
23✔
1179
                if err != nil {
23✔
1180
                        return fmt.Errorf("unable to create shell node "+
×
1181
                                "for: %x: %w", edge.NodeKey1Bytes, err)
×
1182
                }
×
1183
        case node1Err != nil:
×
1184
                return node1Err
×
1185
        }
1186

1187
        _, node2Err := fetchLightningNode(nodes, edge.NodeKey2Bytes[:])
1,486✔
1188
        switch {
1,486✔
1189
        case errors.Is(node2Err, ErrGraphNodeNotFound):
64✔
1190
                node2Shell := models.LightningNode{
64✔
1191
                        PubKeyBytes:          edge.NodeKey2Bytes,
64✔
1192
                        HaveNodeAnnouncement: false,
64✔
1193
                }
64✔
1194
                err := addLightningNode(tx, &node2Shell)
64✔
1195
                if err != nil {
64✔
1196
                        return fmt.Errorf("unable to create shell node "+
×
1197
                                "for: %x: %w", edge.NodeKey2Bytes, err)
×
1198
                }
×
1199
        case node2Err != nil:
×
1200
                return node2Err
×
1201
        }
1202

1203
        // If the edge hasn't been created yet, then we'll first add it to the
1204
        // edge index in order to associate the edge between two nodes and also
1205
        // store the static components of the channel.
1206
        if err := putChanEdgeInfo(edgeIndex, edge, chanKey); err != nil {
1,486✔
1207
                return err
×
1208
        }
×
1209

1210
        // Mark edge policies for both sides as unknown. This is to enable
1211
        // efficient incoming channel lookup for a node.
1212
        keys := []*[33]byte{
1,486✔
1213
                &edge.NodeKey1Bytes,
1,486✔
1214
                &edge.NodeKey2Bytes,
1,486✔
1215
        }
1,486✔
1216
        for _, key := range keys {
4,455✔
1217
                err := putChanEdgePolicyUnknown(edges, edge.ChannelID, key[:])
2,969✔
1218
                if err != nil {
2,969✔
1219
                        return err
×
1220
                }
×
1221
        }
1222

1223
        // Finally we add it to the channel index which maps channel points
1224
        // (outpoints) to the shorter channel ID's.
1225
        var b bytes.Buffer
1,486✔
1226
        if err := WriteOutpoint(&b, &edge.ChannelPoint); err != nil {
1,486✔
1227
                return err
×
1228
        }
×
1229

1230
        return chanIndex.Put(b.Bytes(), chanKey[:])
1,486✔
1231
}
1232

1233
// HasChannelEdge returns true if the database knows of a channel edge with the
1234
// passed channel ID, and false otherwise. If an edge with that ID is found
1235
// within the graph, then two time stamps representing the last time the edge
1236
// was updated for both directed edges are returned along with the boolean. If
1237
// it is not found, then the zombie index is checked and its result is returned
1238
// as the second boolean.
1239
func (c *KVStore) HasChannelEdge(
1240
        chanID uint64) (time.Time, time.Time, bool, bool, error) {
225✔
1241

225✔
1242
        var (
225✔
1243
                upd1Time time.Time
225✔
1244
                upd2Time time.Time
225✔
1245
                exists   bool
225✔
1246
                isZombie bool
225✔
1247
        )
225✔
1248

225✔
1249
        // We'll query the cache with the shared lock held to allow multiple
225✔
1250
        // readers to access values in the cache concurrently if they exist.
225✔
1251
        c.cacheMu.RLock()
225✔
1252
        if entry, ok := c.rejectCache.get(chanID); ok {
305✔
1253
                c.cacheMu.RUnlock()
80✔
1254
                upd1Time = time.Unix(entry.upd1Time, 0)
80✔
1255
                upd2Time = time.Unix(entry.upd2Time, 0)
80✔
1256
                exists, isZombie = entry.flags.unpack()
80✔
1257

80✔
1258
                return upd1Time, upd2Time, exists, isZombie, nil
80✔
1259
        }
80✔
1260
        c.cacheMu.RUnlock()
148✔
1261

148✔
1262
        c.cacheMu.Lock()
148✔
1263
        defer c.cacheMu.Unlock()
148✔
1264

148✔
1265
        // The item was not found with the shared lock, so we'll acquire the
148✔
1266
        // exclusive lock and check the cache again in case another method added
148✔
1267
        // the entry to the cache while no lock was held.
148✔
1268
        if entry, ok := c.rejectCache.get(chanID); ok {
153✔
1269
                upd1Time = time.Unix(entry.upd1Time, 0)
5✔
1270
                upd2Time = time.Unix(entry.upd2Time, 0)
5✔
1271
                exists, isZombie = entry.flags.unpack()
5✔
1272

5✔
1273
                return upd1Time, upd2Time, exists, isZombie, nil
5✔
1274
        }
5✔
1275

1276
        if err := kvdb.View(c.db, func(tx kvdb.RTx) error {
290✔
1277
                edges := tx.ReadBucket(edgeBucket)
145✔
1278
                if edges == nil {
145✔
1279
                        return ErrGraphNoEdgesFound
×
1280
                }
×
1281
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
145✔
1282
                if edgeIndex == nil {
145✔
1283
                        return ErrGraphNoEdgesFound
×
1284
                }
×
1285

1286
                var channelID [8]byte
145✔
1287
                byteOrder.PutUint64(channelID[:], chanID)
145✔
1288

145✔
1289
                // If the edge doesn't exist, then we'll also check our zombie
145✔
1290
                // index.
145✔
1291
                if edgeIndex.Get(channelID[:]) == nil {
236✔
1292
                        exists = false
91✔
1293
                        zombieIndex := edges.NestedReadBucket(zombieBucket)
91✔
1294
                        if zombieIndex != nil {
182✔
1295
                                isZombie, _, _ = isZombieEdge(
91✔
1296
                                        zombieIndex, chanID,
91✔
1297
                                )
91✔
1298
                        }
91✔
1299

1300
                        return nil
91✔
1301
                }
1302

1303
                exists = true
57✔
1304
                isZombie = false
57✔
1305

57✔
1306
                // If the channel has been found in the graph, then retrieve
57✔
1307
                // the edges itself so we can return the last updated
57✔
1308
                // timestamps.
57✔
1309
                nodes := tx.ReadBucket(nodeBucket)
57✔
1310
                if nodes == nil {
57✔
1311
                        return ErrGraphNodeNotFound
×
1312
                }
×
1313

1314
                e1, e2, err := fetchChanEdgePolicies(
57✔
1315
                        edgeIndex, edges, channelID[:],
57✔
1316
                )
57✔
1317
                if err != nil {
57✔
1318
                        return err
×
1319
                }
×
1320

1321
                // As we may have only one of the edges populated, only set the
1322
                // update time if the edge was found in the database.
1323
                if e1 != nil {
78✔
1324
                        upd1Time = e1.LastUpdate
21✔
1325
                }
21✔
1326
                if e2 != nil {
76✔
1327
                        upd2Time = e2.LastUpdate
19✔
1328
                }
19✔
1329

1330
                return nil
57✔
1331
        }, func() {}); err != nil {
145✔
1332
                return time.Time{}, time.Time{}, exists, isZombie, err
×
1333
        }
×
1334

1335
        c.rejectCache.insert(chanID, rejectCacheEntry{
145✔
1336
                upd1Time: upd1Time.Unix(),
145✔
1337
                upd2Time: upd2Time.Unix(),
145✔
1338
                flags:    packRejectFlags(exists, isZombie),
145✔
1339
        })
145✔
1340

145✔
1341
        return upd1Time, upd2Time, exists, isZombie, nil
145✔
1342
}
1343

1344
// AddEdgeProof sets the proof of an existing edge in the graph database.
1345
func (c *KVStore) AddEdgeProof(chanID lnwire.ShortChannelID,
1346
        proof *models.ChannelAuthProof) error {
5✔
1347

5✔
1348
        // Construct the channel's primary key which is the 8-byte channel ID.
5✔
1349
        var chanKey [8]byte
5✔
1350
        binary.BigEndian.PutUint64(chanKey[:], chanID.ToUint64())
5✔
1351

5✔
1352
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
10✔
1353
                edges := tx.ReadWriteBucket(edgeBucket)
5✔
1354
                if edges == nil {
5✔
1355
                        return ErrEdgeNotFound
×
1356
                }
×
1357

1358
                edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
5✔
1359
                if edgeIndex == nil {
5✔
1360
                        return ErrEdgeNotFound
×
1361
                }
×
1362

1363
                edge, err := fetchChanEdgeInfo(edgeIndex, chanKey[:])
5✔
1364
                if err != nil {
5✔
1365
                        return err
×
1366
                }
×
1367

1368
                edge.AuthProof = proof
5✔
1369

5✔
1370
                return putChanEdgeInfo(edgeIndex, &edge, chanKey)
5✔
1371
        }, func() {})
5✔
1372
}
1373

1374
const (
1375
        // pruneTipBytes is the total size of the value which stores a prune
1376
        // entry of the graph in the prune log. The "prune tip" is the last
1377
        // entry in the prune log, and indicates if the channel graph is in
1378
        // sync with the current UTXO state. The structure of the value
1379
        // is: blockHash, taking 32 bytes total.
1380
        pruneTipBytes = 32
1381
)
1382

1383
// PruneGraph prunes newly closed channels from the channel graph in response
1384
// to a new block being solved on the network. Any transactions which spend the
1385
// funding output of any known channels within he graph will be deleted.
1386
// Additionally, the "prune tip", or the last block which has been used to
1387
// prune the graph is stored so callers can ensure the graph is fully in sync
1388
// with the current UTXO state. A slice of channels that have been closed by
1389
// the target block along with any pruned nodes are returned if the function
1390
// succeeds without error.
1391
func (c *KVStore) PruneGraph(spentOutputs []*wire.OutPoint,
1392
        blockHash *chainhash.Hash, blockHeight uint32) (
1393
        []*models.ChannelEdgeInfo, []route.Vertex, error) {
237✔
1394

237✔
1395
        c.cacheMu.Lock()
237✔
1396
        defer c.cacheMu.Unlock()
237✔
1397

237✔
1398
        var (
237✔
1399
                chansClosed []*models.ChannelEdgeInfo
237✔
1400
                prunedNodes []route.Vertex
237✔
1401
        )
237✔
1402

237✔
1403
        err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
474✔
1404
                // First grab the edges bucket which houses the information
237✔
1405
                // we'd like to delete
237✔
1406
                edges, err := tx.CreateTopLevelBucket(edgeBucket)
237✔
1407
                if err != nil {
237✔
1408
                        return err
×
1409
                }
×
1410

1411
                // Next grab the two edge indexes which will also need to be
1412
                // updated.
1413
                edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
237✔
1414
                if err != nil {
237✔
1415
                        return err
×
1416
                }
×
1417
                chanIndex, err := edges.CreateBucketIfNotExists(
237✔
1418
                        channelPointBucket,
237✔
1419
                )
237✔
1420
                if err != nil {
237✔
1421
                        return err
×
1422
                }
×
1423
                nodes := tx.ReadWriteBucket(nodeBucket)
237✔
1424
                if nodes == nil {
237✔
1425
                        return ErrSourceNodeNotSet
×
1426
                }
×
1427
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
237✔
1428
                if err != nil {
237✔
1429
                        return err
×
1430
                }
×
1431

1432
                // For each of the outpoints that have been spent within the
1433
                // block, we attempt to delete them from the graph as if that
1434
                // outpoint was a channel, then it has now been closed.
1435
                for _, chanPoint := range spentOutputs {
364✔
1436
                        // TODO(roasbeef): load channel bloom filter, continue
127✔
1437
                        // if NOT if filter
127✔
1438

127✔
1439
                        var opBytes bytes.Buffer
127✔
1440
                        err := WriteOutpoint(&opBytes, chanPoint)
127✔
1441
                        if err != nil {
127✔
1442
                                return err
×
1443
                        }
×
1444

1445
                        // First attempt to see if the channel exists within
1446
                        // the database, if not, then we can exit early.
1447
                        chanID := chanIndex.Get(opBytes.Bytes())
127✔
1448
                        if chanID == nil {
230✔
1449
                                continue
103✔
1450
                        }
1451

1452
                        // Attempt to delete the channel, an ErrEdgeNotFound
1453
                        // will be returned if that outpoint isn't known to be
1454
                        // a channel. If no error is returned, then a channel
1455
                        // was successfully pruned.
1456
                        edgeInfo, err := c.delChannelEdgeUnsafe(
24✔
1457
                                edges, edgeIndex, chanIndex, zombieIndex,
24✔
1458
                                chanID, false, false,
24✔
1459
                        )
24✔
1460
                        if err != nil && !errors.Is(err, ErrEdgeNotFound) {
24✔
1461
                                return err
×
1462
                        }
×
1463

1464
                        chansClosed = append(chansClosed, edgeInfo)
24✔
1465
                }
1466

1467
                metaBucket, err := tx.CreateTopLevelBucket(graphMetaBucket)
237✔
1468
                if err != nil {
237✔
1469
                        return err
×
1470
                }
×
1471

1472
                pruneBucket, err := metaBucket.CreateBucketIfNotExists(
237✔
1473
                        pruneLogBucket,
237✔
1474
                )
237✔
1475
                if err != nil {
237✔
1476
                        return err
×
1477
                }
×
1478

1479
                // With the graph pruned, add a new entry to the prune log,
1480
                // which can be used to check if the graph is fully synced with
1481
                // the current UTXO state.
1482
                var blockHeightBytes [4]byte
237✔
1483
                byteOrder.PutUint32(blockHeightBytes[:], blockHeight)
237✔
1484

237✔
1485
                var newTip [pruneTipBytes]byte
237✔
1486
                copy(newTip[:], blockHash[:])
237✔
1487

237✔
1488
                err = pruneBucket.Put(blockHeightBytes[:], newTip[:])
237✔
1489
                if err != nil {
237✔
1490
                        return err
×
1491
                }
×
1492

1493
                // Now that the graph has been pruned, we'll also attempt to
1494
                // prune any nodes that have had a channel closed within the
1495
                // latest block.
1496
                prunedNodes, err = c.pruneGraphNodes(nodes, edgeIndex)
237✔
1497

237✔
1498
                return err
237✔
1499
        }, func() {
237✔
1500
                chansClosed = nil
237✔
1501
                prunedNodes = nil
237✔
1502
        })
237✔
1503
        if err != nil {
237✔
1504
                return nil, nil, err
×
1505
        }
×
1506

1507
        for _, channel := range chansClosed {
261✔
1508
                c.rejectCache.remove(channel.ChannelID)
24✔
1509
                c.chanCache.remove(channel.ChannelID)
24✔
1510
        }
24✔
1511

1512
        return chansClosed, prunedNodes, nil
237✔
1513
}
1514

1515
// PruneGraphNodes is a garbage collection method which attempts to prune out
1516
// any nodes from the channel graph that are currently unconnected. This ensure
1517
// that we only maintain a graph of reachable nodes. In the event that a pruned
1518
// node gains more channels, it will be re-added back to the graph.
1519
func (c *KVStore) PruneGraphNodes() ([]route.Vertex, error) {
26✔
1520
        var prunedNodes []route.Vertex
26✔
1521
        err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
52✔
1522
                nodes := tx.ReadWriteBucket(nodeBucket)
26✔
1523
                if nodes == nil {
26✔
1524
                        return ErrGraphNodesNotFound
×
1525
                }
×
1526
                edges := tx.ReadWriteBucket(edgeBucket)
26✔
1527
                if edges == nil {
26✔
1528
                        return ErrGraphNotFound
×
1529
                }
×
1530
                edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
26✔
1531
                if edgeIndex == nil {
26✔
1532
                        return ErrGraphNoEdgesFound
×
1533
                }
×
1534

1535
                var err error
26✔
1536
                prunedNodes, err = c.pruneGraphNodes(nodes, edgeIndex)
26✔
1537
                if err != nil {
26✔
1538
                        return err
×
1539
                }
×
1540

1541
                return nil
26✔
1542
        }, func() {
26✔
1543
                prunedNodes = nil
26✔
1544
        })
26✔
1545

1546
        return prunedNodes, err
26✔
1547
}
1548

1549
// pruneGraphNodes attempts to remove any nodes from the graph who have had a
1550
// channel closed within the current block. If the node still has existing
1551
// channels in the graph, this will act as a no-op.
1552
func (c *KVStore) pruneGraphNodes(nodes kvdb.RwBucket,
1553
        edgeIndex kvdb.RwBucket) ([]route.Vertex, error) {
260✔
1554

260✔
1555
        log.Trace("Pruning nodes from graph with no open channels")
260✔
1556

260✔
1557
        // We'll retrieve the graph's source node to ensure we don't remove it
260✔
1558
        // even if it no longer has any open channels.
260✔
1559
        sourceNode, err := c.sourceNode(nodes)
260✔
1560
        if err != nil {
260✔
1561
                return nil, err
×
1562
        }
×
1563

1564
        // We'll use this map to keep count the number of references to a node
1565
        // in the graph. A node should only be removed once it has no more
1566
        // references in the graph.
1567
        nodeRefCounts := make(map[[33]byte]int)
260✔
1568
        err = nodes.ForEach(func(pubKey, nodeBytes []byte) error {
1,534✔
1569
                // If this is the source key, then we skip this
1,274✔
1570
                // iteration as the value for this key is a pubKey
1,274✔
1571
                // rather than raw node information.
1,274✔
1572
                if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 {
2,048✔
1573
                        return nil
774✔
1574
                }
774✔
1575

1576
                var nodePub [33]byte
503✔
1577
                copy(nodePub[:], pubKey)
503✔
1578
                nodeRefCounts[nodePub] = 0
503✔
1579

503✔
1580
                return nil
503✔
1581
        })
1582
        if err != nil {
260✔
1583
                return nil, err
×
1584
        }
×
1585

1586
        // To ensure we never delete the source node, we'll start off by
1587
        // bumping its ref count to 1.
1588
        nodeRefCounts[sourceNode.PubKeyBytes] = 1
260✔
1589

260✔
1590
        // Next, we'll run through the edgeIndex which maps a channel ID to the
260✔
1591
        // edge info. We'll use this scan to populate our reference count map
260✔
1592
        // above.
260✔
1593
        err = edgeIndex.ForEach(func(chanID, edgeInfoBytes []byte) error {
456✔
1594
                // The first 66 bytes of the edge info contain the pubkeys of
196✔
1595
                // the nodes that this edge attaches. We'll extract them, and
196✔
1596
                // add them to the ref count map.
196✔
1597
                var node1, node2 [33]byte
196✔
1598
                copy(node1[:], edgeInfoBytes[:33])
196✔
1599
                copy(node2[:], edgeInfoBytes[33:])
196✔
1600

196✔
1601
                // With the nodes extracted, we'll increase the ref count of
196✔
1602
                // each of the nodes.
196✔
1603
                nodeRefCounts[node1]++
196✔
1604
                nodeRefCounts[node2]++
196✔
1605

196✔
1606
                return nil
196✔
1607
        })
196✔
1608
        if err != nil {
260✔
1609
                return nil, err
×
1610
        }
×
1611

1612
        // Finally, we'll make a second pass over the set of nodes, and delete
1613
        // any nodes that have a ref count of zero.
1614
        var pruned []route.Vertex
260✔
1615
        for nodePubKey, refCount := range nodeRefCounts {
763✔
1616
                // If the ref count of the node isn't zero, then we can safely
503✔
1617
                // skip it as it still has edges to or from it within the
503✔
1618
                // graph.
503✔
1619
                if refCount != 0 {
943✔
1620
                        continue
440✔
1621
                }
1622

1623
                // If we reach this point, then there are no longer any edges
1624
                // that connect this node, so we can delete it.
1625
                err := c.deleteLightningNode(nodes, nodePubKey[:])
66✔
1626
                if err != nil {
66✔
1627
                        if errors.Is(err, ErrGraphNodeNotFound) ||
×
1628
                                errors.Is(err, ErrGraphNodesNotFound) {
×
1629

×
1630
                                log.Warnf("Unable to prune node %x from the "+
×
1631
                                        "graph: %v", nodePubKey, err)
×
1632
                                continue
×
1633
                        }
1634

1635
                        return nil, err
×
1636
                }
1637

1638
                log.Infof("Pruned unconnected node %x from channel graph",
66✔
1639
                        nodePubKey[:])
66✔
1640

66✔
1641
                pruned = append(pruned, nodePubKey)
66✔
1642
        }
1643

1644
        if len(pruned) > 0 {
310✔
1645
                log.Infof("Pruned %v unconnected nodes from the channel graph",
50✔
1646
                        len(pruned))
50✔
1647
        }
50✔
1648

1649
        return pruned, err
260✔
1650
}
1651

1652
// DisconnectBlockAtHeight is used to indicate that the block specified
1653
// by the passed height has been disconnected from the main chain. This
1654
// will "rewind" the graph back to the height below, deleting channels
1655
// that are no longer confirmed from the graph. The prune log will be
1656
// set to the last prune height valid for the remaining chain.
1657
// Channels that were removed from the graph resulting from the
1658
// disconnected block are returned.
1659
func (c *KVStore) DisconnectBlockAtHeight(height uint32) (
1660
        []*models.ChannelEdgeInfo, error) {
158✔
1661

158✔
1662
        // Every channel having a ShortChannelID starting at 'height'
158✔
1663
        // will no longer be confirmed.
158✔
1664
        startShortChanID := lnwire.ShortChannelID{
158✔
1665
                BlockHeight: height,
158✔
1666
        }
158✔
1667

158✔
1668
        // Delete everything after this height from the db up until the
158✔
1669
        // SCID alias range.
158✔
1670
        endShortChanID := aliasmgr.StartingAlias
158✔
1671

158✔
1672
        // The block height will be the 3 first bytes of the channel IDs.
158✔
1673
        var chanIDStart [8]byte
158✔
1674
        byteOrder.PutUint64(chanIDStart[:], startShortChanID.ToUint64())
158✔
1675
        var chanIDEnd [8]byte
158✔
1676
        byteOrder.PutUint64(chanIDEnd[:], endShortChanID.ToUint64())
158✔
1677

158✔
1678
        c.cacheMu.Lock()
158✔
1679
        defer c.cacheMu.Unlock()
158✔
1680

158✔
1681
        // Keep track of the channels that are removed from the graph.
158✔
1682
        var removedChans []*models.ChannelEdgeInfo
158✔
1683

158✔
1684
        if err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
316✔
1685
                edges, err := tx.CreateTopLevelBucket(edgeBucket)
158✔
1686
                if err != nil {
158✔
1687
                        return err
×
1688
                }
×
1689
                edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
158✔
1690
                if err != nil {
158✔
1691
                        return err
×
1692
                }
×
1693
                chanIndex, err := edges.CreateBucketIfNotExists(
158✔
1694
                        channelPointBucket,
158✔
1695
                )
158✔
1696
                if err != nil {
158✔
1697
                        return err
×
1698
                }
×
1699
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
158✔
1700
                if err != nil {
158✔
1701
                        return err
×
1702
                }
×
1703

1704
                // Scan from chanIDStart to chanIDEnd, deleting every
1705
                // found edge.
1706
                // NOTE: we must delete the edges after the cursor loop, since
1707
                // modifying the bucket while traversing is not safe.
1708
                // NOTE: We use a < comparison in bytes.Compare instead of <=
1709
                // so that the StartingAlias itself isn't deleted.
1710
                var keys [][]byte
158✔
1711
                cursor := edgeIndex.ReadWriteCursor()
158✔
1712

158✔
1713
                //nolint:ll
158✔
1714
                for k, _ := cursor.Seek(chanIDStart[:]); k != nil &&
158✔
1715
                        bytes.Compare(k, chanIDEnd[:]) < 0; k, _ = cursor.Next() {
248✔
1716
                        keys = append(keys, k)
90✔
1717
                }
90✔
1718

1719
                for _, k := range keys {
248✔
1720
                        edgeInfo, err := c.delChannelEdgeUnsafe(
90✔
1721
                                edges, edgeIndex, chanIndex, zombieIndex,
90✔
1722
                                k, false, false,
90✔
1723
                        )
90✔
1724
                        if err != nil && !errors.Is(err, ErrEdgeNotFound) {
90✔
1725
                                return err
×
1726
                        }
×
1727

1728
                        removedChans = append(removedChans, edgeInfo)
90✔
1729
                }
1730

1731
                // Delete all the entries in the prune log having a height
1732
                // greater or equal to the block disconnected.
1733
                metaBucket, err := tx.CreateTopLevelBucket(graphMetaBucket)
158✔
1734
                if err != nil {
158✔
1735
                        return err
×
1736
                }
×
1737

1738
                pruneBucket, err := metaBucket.CreateBucketIfNotExists(
158✔
1739
                        pruneLogBucket,
158✔
1740
                )
158✔
1741
                if err != nil {
158✔
1742
                        return err
×
1743
                }
×
1744

1745
                var pruneKeyStart [4]byte
158✔
1746
                byteOrder.PutUint32(pruneKeyStart[:], height)
158✔
1747

158✔
1748
                var pruneKeyEnd [4]byte
158✔
1749
                byteOrder.PutUint32(pruneKeyEnd[:], math.MaxUint32)
158✔
1750

158✔
1751
                // To avoid modifying the bucket while traversing, we delete
158✔
1752
                // the keys in a second loop.
158✔
1753
                var pruneKeys [][]byte
158✔
1754
                pruneCursor := pruneBucket.ReadWriteCursor()
158✔
1755
                //nolint:ll
158✔
1756
                for k, _ := pruneCursor.Seek(pruneKeyStart[:]); k != nil &&
158✔
1757
                        bytes.Compare(k, pruneKeyEnd[:]) <= 0; k, _ = pruneCursor.Next() {
252✔
1758
                        pruneKeys = append(pruneKeys, k)
94✔
1759
                }
94✔
1760

1761
                for _, k := range pruneKeys {
252✔
1762
                        if err := pruneBucket.Delete(k); err != nil {
94✔
1763
                                return err
×
1764
                        }
×
1765
                }
1766

1767
                return nil
158✔
1768
        }, func() {
158✔
1769
                removedChans = nil
158✔
1770
        }); err != nil {
158✔
1771
                return nil, err
×
1772
        }
×
1773

1774
        for _, channel := range removedChans {
248✔
1775
                c.rejectCache.remove(channel.ChannelID)
90✔
1776
                c.chanCache.remove(channel.ChannelID)
90✔
1777
        }
90✔
1778

1779
        return removedChans, nil
158✔
1780
}
1781

1782
// PruneTip returns the block height and hash of the latest block that has been
1783
// used to prune channels in the graph. Knowing the "prune tip" allows callers
1784
// to tell if the graph is currently in sync with the current best known UTXO
1785
// state.
1786
func (c *KVStore) PruneTip() (*chainhash.Hash, uint32, error) {
56✔
1787
        var (
56✔
1788
                tipHash   chainhash.Hash
56✔
1789
                tipHeight uint32
56✔
1790
        )
56✔
1791

56✔
1792
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
112✔
1793
                graphMeta := tx.ReadBucket(graphMetaBucket)
56✔
1794
                if graphMeta == nil {
56✔
1795
                        return ErrGraphNotFound
×
1796
                }
×
1797
                pruneBucket := graphMeta.NestedReadBucket(pruneLogBucket)
56✔
1798
                if pruneBucket == nil {
56✔
1799
                        return ErrGraphNeverPruned
×
1800
                }
×
1801

1802
                pruneCursor := pruneBucket.ReadCursor()
56✔
1803

56✔
1804
                // The prune key with the largest block height will be our
56✔
1805
                // prune tip.
56✔
1806
                k, v := pruneCursor.Last()
56✔
1807
                if k == nil {
77✔
1808
                        return ErrGraphNeverPruned
21✔
1809
                }
21✔
1810

1811
                // Once we have the prune tip, the value will be the block hash,
1812
                // and the key the block height.
1813
                copy(tipHash[:], v)
38✔
1814
                tipHeight = byteOrder.Uint32(k)
38✔
1815

38✔
1816
                return nil
38✔
1817
        }, func() {})
56✔
1818
        if err != nil {
77✔
1819
                return nil, 0, err
21✔
1820
        }
21✔
1821

1822
        return &tipHash, tipHeight, nil
38✔
1823
}
1824

1825
// DeleteChannelEdges removes edges with the given channel IDs from the
1826
// database and marks them as zombies. This ensures that we're unable to re-add
1827
// it to our database once again. If an edge does not exist within the
1828
// database, then ErrEdgeNotFound will be returned. If strictZombiePruning is
1829
// true, then when we mark these edges as zombies, we'll set up the keys such
1830
// that we require the node that failed to send the fresh update to be the one
1831
// that resurrects the channel from its zombie state. The markZombie bool
1832
// denotes whether or not to mark the channel as a zombie.
1833
func (c *KVStore) DeleteChannelEdges(strictZombiePruning, markZombie bool,
1834
        chanIDs ...uint64) ([]*models.ChannelEdgeInfo, error) {
140✔
1835

140✔
1836
        // TODO(roasbeef): possibly delete from node bucket if node has no more
140✔
1837
        // channels
140✔
1838
        // TODO(roasbeef): don't delete both edges?
140✔
1839

140✔
1840
        c.cacheMu.Lock()
140✔
1841
        defer c.cacheMu.Unlock()
140✔
1842

140✔
1843
        var infos []*models.ChannelEdgeInfo
140✔
1844
        err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
280✔
1845
                edges := tx.ReadWriteBucket(edgeBucket)
140✔
1846
                if edges == nil {
140✔
1847
                        return ErrEdgeNotFound
×
1848
                }
×
1849
                edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
140✔
1850
                if edgeIndex == nil {
140✔
1851
                        return ErrEdgeNotFound
×
1852
                }
×
1853
                chanIndex := edges.NestedReadWriteBucket(channelPointBucket)
140✔
1854
                if chanIndex == nil {
140✔
1855
                        return ErrEdgeNotFound
×
1856
                }
×
1857
                nodes := tx.ReadWriteBucket(nodeBucket)
140✔
1858
                if nodes == nil {
140✔
1859
                        return ErrGraphNodeNotFound
×
1860
                }
×
1861
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
140✔
1862
                if err != nil {
140✔
1863
                        return err
×
1864
                }
×
1865

1866
                var rawChanID [8]byte
140✔
1867
                for _, chanID := range chanIDs {
232✔
1868
                        byteOrder.PutUint64(rawChanID[:], chanID)
92✔
1869
                        edgeInfo, err := c.delChannelEdgeUnsafe(
92✔
1870
                                edges, edgeIndex, chanIndex, zombieIndex,
92✔
1871
                                rawChanID[:], markZombie, strictZombiePruning,
92✔
1872
                        )
92✔
1873
                        if err != nil {
154✔
1874
                                return err
62✔
1875
                        }
62✔
1876

1877
                        infos = append(infos, edgeInfo)
30✔
1878
                }
1879

1880
                return nil
78✔
1881
        }, func() {
140✔
1882
                infos = nil
140✔
1883
        })
140✔
1884
        if err != nil {
202✔
1885
                return nil, err
62✔
1886
        }
62✔
1887

1888
        for _, chanID := range chanIDs {
108✔
1889
                c.rejectCache.remove(chanID)
30✔
1890
                c.chanCache.remove(chanID)
30✔
1891
        }
30✔
1892

1893
        return infos, nil
78✔
1894
}
1895

1896
// ChannelID attempt to lookup the 8-byte compact channel ID which maps to the
1897
// passed channel point (outpoint). If the passed channel doesn't exist within
1898
// the database, then ErrEdgeNotFound is returned.
1899
func (c *KVStore) ChannelID(chanPoint *wire.OutPoint) (uint64, error) {
4✔
1900
        var chanID uint64
4✔
1901
        if err := kvdb.View(c.db, func(tx kvdb.RTx) error {
8✔
1902
                var err error
4✔
1903
                chanID, err = getChanID(tx, chanPoint)
4✔
1904
                return err
4✔
1905
        }, func() {
8✔
1906
                chanID = 0
4✔
1907
        }); err != nil {
7✔
1908
                return 0, err
3✔
1909
        }
3✔
1910

1911
        return chanID, nil
4✔
1912
}
1913

1914
// getChanID returns the assigned channel ID for a given channel point.
1915
func getChanID(tx kvdb.RTx, chanPoint *wire.OutPoint) (uint64, error) {
4✔
1916
        var b bytes.Buffer
4✔
1917
        if err := WriteOutpoint(&b, chanPoint); err != nil {
4✔
1918
                return 0, err
×
1919
        }
×
1920

1921
        edges := tx.ReadBucket(edgeBucket)
4✔
1922
        if edges == nil {
4✔
1923
                return 0, ErrGraphNoEdgesFound
×
1924
        }
×
1925
        chanIndex := edges.NestedReadBucket(channelPointBucket)
4✔
1926
        if chanIndex == nil {
4✔
1927
                return 0, ErrGraphNoEdgesFound
×
1928
        }
×
1929

1930
        chanIDBytes := chanIndex.Get(b.Bytes())
4✔
1931
        if chanIDBytes == nil {
7✔
1932
                return 0, ErrEdgeNotFound
3✔
1933
        }
3✔
1934

1935
        chanID := byteOrder.Uint64(chanIDBytes)
4✔
1936

4✔
1937
        return chanID, nil
4✔
1938
}
1939

1940
// TODO(roasbeef): allow updates to use Batch?
1941

1942
// HighestChanID returns the "highest" known channel ID in the channel graph.
1943
// This represents the "newest" channel from the PoV of the chain. This method
1944
// can be used by peers to quickly determine if they're graphs are in sync.
1945
func (c *KVStore) HighestChanID(_ context.Context) (uint64, error) {
6✔
1946
        var cid uint64
6✔
1947

6✔
1948
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
12✔
1949
                edges := tx.ReadBucket(edgeBucket)
6✔
1950
                if edges == nil {
6✔
1951
                        return ErrGraphNoEdgesFound
×
1952
                }
×
1953
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
6✔
1954
                if edgeIndex == nil {
6✔
1955
                        return ErrGraphNoEdgesFound
×
1956
                }
×
1957

1958
                // In order to find the highest chan ID, we'll fetch a cursor
1959
                // and use that to seek to the "end" of our known rage.
1960
                cidCursor := edgeIndex.ReadCursor()
6✔
1961

6✔
1962
                lastChanID, _ := cidCursor.Last()
6✔
1963

6✔
1964
                // If there's no key, then this means that we don't actually
6✔
1965
                // know of any channels, so we'll return a predicable error.
6✔
1966
                if lastChanID == nil {
10✔
1967
                        return ErrGraphNoEdgesFound
4✔
1968
                }
4✔
1969

1970
                // Otherwise, we'll de serialize the channel ID and return it
1971
                // to the caller.
1972
                cid = byteOrder.Uint64(lastChanID)
5✔
1973

5✔
1974
                return nil
5✔
1975
        }, func() {
6✔
1976
                cid = 0
6✔
1977
        })
6✔
1978
        if err != nil && !errors.Is(err, ErrGraphNoEdgesFound) {
6✔
1979
                return 0, err
×
1980
        }
×
1981

1982
        return cid, nil
6✔
1983
}
1984

1985
// ChannelEdge represents the complete set of information for a channel edge in
1986
// the known channel graph. This struct couples the core information of the
1987
// edge as well as each of the known advertised edge policies.
1988
type ChannelEdge struct {
1989
        // Info contains all the static information describing the channel.
1990
        Info *models.ChannelEdgeInfo
1991

1992
        // Policy1 points to the "first" edge policy of the channel containing
1993
        // the dynamic information required to properly route through the edge.
1994
        Policy1 *models.ChannelEdgePolicy
1995

1996
        // Policy2 points to the "second" edge policy of the channel containing
1997
        // the dynamic information required to properly route through the edge.
1998
        Policy2 *models.ChannelEdgePolicy
1999

2000
        // Node1 is "node 1" in the channel. This is the node that would have
2001
        // produced Policy1 if it exists.
2002
        Node1 *models.LightningNode
2003

2004
        // Node2 is "node 2" in the channel. This is the node that would have
2005
        // produced Policy2 if it exists.
2006
        Node2 *models.LightningNode
2007
}
2008

2009
// ChanUpdatesInHorizon returns all the known channel edges which have at least
2010
// one edge that has an update timestamp within the specified horizon.
2011
func (c *KVStore) ChanUpdatesInHorizon(startTime,
2012
        endTime time.Time) ([]ChannelEdge, error) {
144✔
2013

144✔
2014
        // To ensure we don't return duplicate ChannelEdges, we'll use an
144✔
2015
        // additional map to keep track of the edges already seen to prevent
144✔
2016
        // re-adding it.
144✔
2017
        var edgesSeen map[uint64]struct{}
144✔
2018
        var edgesToCache map[uint64]ChannelEdge
144✔
2019
        var edgesInHorizon []ChannelEdge
144✔
2020

144✔
2021
        c.cacheMu.Lock()
144✔
2022
        defer c.cacheMu.Unlock()
144✔
2023

144✔
2024
        var hits int
144✔
2025
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
288✔
2026
                edges := tx.ReadBucket(edgeBucket)
144✔
2027
                if edges == nil {
144✔
2028
                        return ErrGraphNoEdgesFound
×
2029
                }
×
2030
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
144✔
2031
                if edgeIndex == nil {
144✔
2032
                        return ErrGraphNoEdgesFound
×
2033
                }
×
2034
                edgeUpdateIndex := edges.NestedReadBucket(edgeUpdateIndexBucket)
144✔
2035
                if edgeUpdateIndex == nil {
144✔
2036
                        return ErrGraphNoEdgesFound
×
2037
                }
×
2038

2039
                nodes := tx.ReadBucket(nodeBucket)
144✔
2040
                if nodes == nil {
144✔
2041
                        return ErrGraphNodesNotFound
×
2042
                }
×
2043

2044
                // We'll now obtain a cursor to perform a range query within
2045
                // the index to find all channels within the horizon.
2046
                updateCursor := edgeUpdateIndex.ReadCursor()
144✔
2047

144✔
2048
                var startTimeBytes, endTimeBytes [8 + 8]byte
144✔
2049
                byteOrder.PutUint64(
144✔
2050
                        startTimeBytes[:8], uint64(startTime.Unix()),
144✔
2051
                )
144✔
2052
                byteOrder.PutUint64(
144✔
2053
                        endTimeBytes[:8], uint64(endTime.Unix()),
144✔
2054
                )
144✔
2055

144✔
2056
                // With our start and end times constructed, we'll step through
144✔
2057
                // the index collecting the info and policy of each update of
144✔
2058
                // each channel that has a last update within the time range.
144✔
2059
                //
144✔
2060
                //nolint:ll
144✔
2061
                for indexKey, _ := updateCursor.Seek(startTimeBytes[:]); indexKey != nil &&
144✔
2062
                        bytes.Compare(indexKey, endTimeBytes[:]) <= 0; indexKey, _ = updateCursor.Next() {
193✔
2063
                        // We have a new eligible entry, so we'll slice of the
49✔
2064
                        // chan ID so we can query it in the DB.
49✔
2065
                        chanID := indexKey[8:]
49✔
2066

49✔
2067
                        // If we've already retrieved the info and policies for
49✔
2068
                        // this edge, then we can skip it as we don't need to do
49✔
2069
                        // so again.
49✔
2070
                        chanIDInt := byteOrder.Uint64(chanID)
49✔
2071
                        if _, ok := edgesSeen[chanIDInt]; ok {
68✔
2072
                                continue
19✔
2073
                        }
2074

2075
                        if channel, ok := c.chanCache.get(chanIDInt); ok {
41✔
2076
                                hits++
11✔
2077
                                edgesSeen[chanIDInt] = struct{}{}
11✔
2078
                                edgesInHorizon = append(edgesInHorizon, channel)
11✔
2079

11✔
2080
                                continue
11✔
2081
                        }
2082

2083
                        // First, we'll fetch the static edge information.
2084
                        edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
21✔
2085
                        if err != nil {
21✔
2086
                                chanID := byteOrder.Uint64(chanID)
×
2087
                                return fmt.Errorf("unable to fetch info for "+
×
2088
                                        "edge with chan_id=%v: %v", chanID, err)
×
2089
                        }
×
2090

2091
                        // With the static information obtained, we'll now
2092
                        // fetch the dynamic policy info.
2093
                        edge1, edge2, err := fetchChanEdgePolicies(
21✔
2094
                                edgeIndex, edges, chanID,
21✔
2095
                        )
21✔
2096
                        if err != nil {
21✔
2097
                                chanID := byteOrder.Uint64(chanID)
×
2098
                                return fmt.Errorf("unable to fetch policies "+
×
2099
                                        "for edge with chan_id=%v: %v", chanID,
×
2100
                                        err)
×
2101
                        }
×
2102

2103
                        node1, err := fetchLightningNode(
21✔
2104
                                nodes, edgeInfo.NodeKey1Bytes[:],
21✔
2105
                        )
21✔
2106
                        if err != nil {
21✔
2107
                                return err
×
2108
                        }
×
2109

2110
                        node2, err := fetchLightningNode(
21✔
2111
                                nodes, edgeInfo.NodeKey2Bytes[:],
21✔
2112
                        )
21✔
2113
                        if err != nil {
21✔
2114
                                return err
×
2115
                        }
×
2116

2117
                        // Finally, we'll collate this edge with the rest of
2118
                        // edges to be returned.
2119
                        edgesSeen[chanIDInt] = struct{}{}
21✔
2120
                        channel := ChannelEdge{
21✔
2121
                                Info:    &edgeInfo,
21✔
2122
                                Policy1: edge1,
21✔
2123
                                Policy2: edge2,
21✔
2124
                                Node1:   &node1,
21✔
2125
                                Node2:   &node2,
21✔
2126
                        }
21✔
2127
                        edgesInHorizon = append(edgesInHorizon, channel)
21✔
2128
                        edgesToCache[chanIDInt] = channel
21✔
2129
                }
2130

2131
                return nil
144✔
2132
        }, func() {
144✔
2133
                edgesSeen = make(map[uint64]struct{})
144✔
2134
                edgesToCache = make(map[uint64]ChannelEdge)
144✔
2135
                edgesInHorizon = nil
144✔
2136
        })
144✔
2137
        switch {
144✔
2138
        case errors.Is(err, ErrGraphNoEdgesFound):
×
2139
                fallthrough
×
2140
        case errors.Is(err, ErrGraphNodesNotFound):
×
2141
                break
×
2142

2143
        case err != nil:
×
2144
                return nil, err
×
2145
        }
2146

2147
        // Insert any edges loaded from disk into the cache.
2148
        for chanid, channel := range edgesToCache {
165✔
2149
                c.chanCache.insert(chanid, channel)
21✔
2150
        }
21✔
2151

2152
        if len(edgesInHorizon) > 0 {
152✔
2153
                log.Debugf("ChanUpdatesInHorizon hit percentage: %f (%d/%d)",
8✔
2154
                        float64(hits)/float64(len(edgesInHorizon)), hits,
8✔
2155
                        len(edgesInHorizon))
8✔
2156
        } else {
147✔
2157
                log.Debugf("ChanUpdatesInHorizon returned no edges in "+
139✔
2158
                        "horizon (%s, %s)", startTime, endTime)
139✔
2159
        }
139✔
2160

2161
        return edgesInHorizon, nil
144✔
2162
}
2163

2164
// NodeUpdatesInHorizon returns all the known lightning node which have an
2165
// update timestamp within the passed range. This method can be used by two
2166
// nodes to quickly determine if they have the same set of up to date node
2167
// announcements.
2168
func (c *KVStore) NodeUpdatesInHorizon(startTime,
2169
        endTime time.Time) ([]models.LightningNode, error) {
11✔
2170

11✔
2171
        var nodesInHorizon []models.LightningNode
11✔
2172

11✔
2173
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
22✔
2174
                nodes := tx.ReadBucket(nodeBucket)
11✔
2175
                if nodes == nil {
11✔
2176
                        return ErrGraphNodesNotFound
×
2177
                }
×
2178

2179
                nodeUpdateIndex := nodes.NestedReadBucket(nodeUpdateIndexBucket)
11✔
2180
                if nodeUpdateIndex == nil {
11✔
2181
                        return ErrGraphNodesNotFound
×
2182
                }
×
2183

2184
                // We'll now obtain a cursor to perform a range query within
2185
                // the index to find all node announcements within the horizon.
2186
                updateCursor := nodeUpdateIndex.ReadCursor()
11✔
2187

11✔
2188
                var startTimeBytes, endTimeBytes [8 + 33]byte
11✔
2189
                byteOrder.PutUint64(
11✔
2190
                        startTimeBytes[:8], uint64(startTime.Unix()),
11✔
2191
                )
11✔
2192
                byteOrder.PutUint64(
11✔
2193
                        endTimeBytes[:8], uint64(endTime.Unix()),
11✔
2194
                )
11✔
2195

11✔
2196
                // With our start and end times constructed, we'll step through
11✔
2197
                // the index collecting info for each node within the time
11✔
2198
                // range.
11✔
2199
                //
11✔
2200
                //nolint:ll
11✔
2201
                for indexKey, _ := updateCursor.Seek(startTimeBytes[:]); indexKey != nil &&
11✔
2202
                        bytes.Compare(indexKey, endTimeBytes[:]) <= 0; indexKey, _ = updateCursor.Next() {
43✔
2203
                        nodePub := indexKey[8:]
32✔
2204
                        node, err := fetchLightningNode(nodes, nodePub)
32✔
2205
                        if err != nil {
32✔
2206
                                return err
×
2207
                        }
×
2208

2209
                        nodesInHorizon = append(nodesInHorizon, node)
32✔
2210
                }
2211

2212
                return nil
11✔
2213
        }, func() {
11✔
2214
                nodesInHorizon = nil
11✔
2215
        })
11✔
2216
        switch {
11✔
2217
        case errors.Is(err, ErrGraphNoEdgesFound):
×
2218
                fallthrough
×
2219
        case errors.Is(err, ErrGraphNodesNotFound):
×
2220
                break
×
2221

2222
        case err != nil:
×
2223
                return nil, err
×
2224
        }
2225

2226
        return nodesInHorizon, nil
11✔
2227
}
2228

2229
// FilterKnownChanIDs takes a set of channel IDs and return the subset of chan
2230
// ID's that we don't know and are not known zombies of the passed set. In other
2231
// words, we perform a set difference of our set of chan ID's and the ones
2232
// passed in. This method can be used by callers to determine the set of
2233
// channels another peer knows of that we don't. The ChannelUpdateInfos for the
2234
// known zombies is also returned.
2235
func (c *KVStore) FilterKnownChanIDs(chansInfo []ChannelUpdateInfo) ([]uint64,
2236
        []ChannelUpdateInfo, error) {
125✔
2237

125✔
2238
        var (
125✔
2239
                newChanIDs   []uint64
125✔
2240
                knownZombies []ChannelUpdateInfo
125✔
2241
        )
125✔
2242

125✔
2243
        c.cacheMu.Lock()
125✔
2244
        defer c.cacheMu.Unlock()
125✔
2245

125✔
2246
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
250✔
2247
                edges := tx.ReadBucket(edgeBucket)
125✔
2248
                if edges == nil {
125✔
2249
                        return ErrGraphNoEdgesFound
×
2250
                }
×
2251
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
125✔
2252
                if edgeIndex == nil {
125✔
2253
                        return ErrGraphNoEdgesFound
×
2254
                }
×
2255

2256
                // Fetch the zombie index, it may not exist if no edges have
2257
                // ever been marked as zombies. If the index has been
2258
                // initialized, we will use it later to skip known zombie edges.
2259
                zombieIndex := edges.NestedReadBucket(zombieBucket)
125✔
2260

125✔
2261
                // We'll run through the set of chanIDs and collate only the
125✔
2262
                // set of channel that are unable to be found within our db.
125✔
2263
                var cidBytes [8]byte
125✔
2264
                for _, info := range chansInfo {
233✔
2265
                        scid := info.ShortChannelID.ToUint64()
108✔
2266
                        byteOrder.PutUint64(cidBytes[:], scid)
108✔
2267

108✔
2268
                        // If the edge is already known, skip it.
108✔
2269
                        if v := edgeIndex.Get(cidBytes[:]); v != nil {
124✔
2270
                                continue
16✔
2271
                        }
2272

2273
                        // If the edge is a known zombie, skip it.
2274
                        if zombieIndex != nil {
190✔
2275
                                isZombie, _, _ := isZombieEdge(
95✔
2276
                                        zombieIndex, scid,
95✔
2277
                                )
95✔
2278

95✔
2279
                                if isZombie {
138✔
2280
                                        knownZombies = append(
43✔
2281
                                                knownZombies, info,
43✔
2282
                                        )
43✔
2283

43✔
2284
                                        continue
43✔
2285
                                }
2286
                        }
2287

2288
                        newChanIDs = append(newChanIDs, scid)
52✔
2289
                }
2290

2291
                return nil
125✔
2292
        }, func() {
125✔
2293
                newChanIDs = nil
125✔
2294
                knownZombies = nil
125✔
2295
        })
125✔
2296
        switch {
125✔
2297
        // If we don't know of any edges yet, then we'll return the entire set
2298
        // of chan IDs specified.
2299
        case errors.Is(err, ErrGraphNoEdgesFound):
×
2300
                ogChanIDs := make([]uint64, len(chansInfo))
×
2301
                for i, info := range chansInfo {
×
2302
                        ogChanIDs[i] = info.ShortChannelID.ToUint64()
×
2303
                }
×
2304

2305
                return ogChanIDs, nil, nil
×
2306

2307
        case err != nil:
×
2308
                return nil, nil, err
×
2309
        }
2310

2311
        return newChanIDs, knownZombies, nil
125✔
2312
}
2313

2314
// ChannelUpdateInfo couples the SCID of a channel with the timestamps of the
2315
// latest received channel updates for the channel.
2316
type ChannelUpdateInfo struct {
2317
        // ShortChannelID is the SCID identifier of the channel.
2318
        ShortChannelID lnwire.ShortChannelID
2319

2320
        // Node1UpdateTimestamp is the timestamp of the latest received update
2321
        // from the node 1 channel peer. This will be set to zero time if no
2322
        // update has yet been received from this node.
2323
        Node1UpdateTimestamp time.Time
2324

2325
        // Node2UpdateTimestamp is the timestamp of the latest received update
2326
        // from the node 2 channel peer. This will be set to zero time if no
2327
        // update has yet been received from this node.
2328
        Node2UpdateTimestamp time.Time
2329
}
2330

2331
// NewChannelUpdateInfo is a constructor which makes sure we initialize the
2332
// timestamps with zero seconds unix timestamp which equals
2333
// `January 1, 1970, 00:00:00 UTC` in case the value is `time.Time{}`.
2334
func NewChannelUpdateInfo(scid lnwire.ShortChannelID, node1Timestamp,
2335
        node2Timestamp time.Time) ChannelUpdateInfo {
199✔
2336

199✔
2337
        chanInfo := ChannelUpdateInfo{
199✔
2338
                ShortChannelID:       scid,
199✔
2339
                Node1UpdateTimestamp: node1Timestamp,
199✔
2340
                Node2UpdateTimestamp: node2Timestamp,
199✔
2341
        }
199✔
2342

199✔
2343
        if node1Timestamp.IsZero() {
388✔
2344
                chanInfo.Node1UpdateTimestamp = time.Unix(0, 0)
189✔
2345
        }
189✔
2346

2347
        if node2Timestamp.IsZero() {
388✔
2348
                chanInfo.Node2UpdateTimestamp = time.Unix(0, 0)
189✔
2349
        }
189✔
2350

2351
        return chanInfo
199✔
2352
}
2353

2354
// BlockChannelRange represents a range of channels for a given block height.
2355
type BlockChannelRange struct {
2356
        // Height is the height of the block all of the channels below were
2357
        // included in.
2358
        Height uint32
2359

2360
        // Channels is the list of channels identified by their short ID
2361
        // representation known to us that were included in the block height
2362
        // above. The list may include channel update timestamp information if
2363
        // requested.
2364
        Channels []ChannelUpdateInfo
2365
}
2366

2367
// FilterChannelRange returns the channel ID's of all known channels which were
2368
// mined in a block height within the passed range. The channel IDs are grouped
2369
// by their common block height. This method can be used to quickly share with a
2370
// peer the set of channels we know of within a particular range to catch them
2371
// up after a period of time offline. If withTimestamps is true then the
2372
// timestamp info of the latest received channel update messages of the channel
2373
// will be included in the response.
2374
func (c *KVStore) FilterChannelRange(startHeight,
2375
        endHeight uint32, withTimestamps bool) ([]BlockChannelRange, error) {
14✔
2376

14✔
2377
        startChanID := &lnwire.ShortChannelID{
14✔
2378
                BlockHeight: startHeight,
14✔
2379
        }
14✔
2380

14✔
2381
        endChanID := lnwire.ShortChannelID{
14✔
2382
                BlockHeight: endHeight,
14✔
2383
                TxIndex:     math.MaxUint32 & 0x00ffffff,
14✔
2384
                TxPosition:  math.MaxUint16,
14✔
2385
        }
14✔
2386

14✔
2387
        // As we need to perform a range scan, we'll convert the starting and
14✔
2388
        // ending height to their corresponding values when encoded using short
14✔
2389
        // channel ID's.
14✔
2390
        var chanIDStart, chanIDEnd [8]byte
14✔
2391
        byteOrder.PutUint64(chanIDStart[:], startChanID.ToUint64())
14✔
2392
        byteOrder.PutUint64(chanIDEnd[:], endChanID.ToUint64())
14✔
2393

14✔
2394
        var channelsPerBlock map[uint32][]ChannelUpdateInfo
14✔
2395
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
28✔
2396
                edges := tx.ReadBucket(edgeBucket)
14✔
2397
                if edges == nil {
14✔
2398
                        return ErrGraphNoEdgesFound
×
2399
                }
×
2400
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
14✔
2401
                if edgeIndex == nil {
14✔
2402
                        return ErrGraphNoEdgesFound
×
2403
                }
×
2404

2405
                cursor := edgeIndex.ReadCursor()
14✔
2406

14✔
2407
                // We'll now iterate through the database, and find each
14✔
2408
                // channel ID that resides within the specified range.
14✔
2409
                //
14✔
2410
                //nolint:ll
14✔
2411
                for k, v := cursor.Seek(chanIDStart[:]); k != nil &&
14✔
2412
                        bytes.Compare(k, chanIDEnd[:]) <= 0; k, v = cursor.Next() {
61✔
2413
                        // Don't send alias SCIDs during gossip sync.
47✔
2414
                        edgeReader := bytes.NewReader(v)
47✔
2415
                        edgeInfo, err := deserializeChanEdgeInfo(edgeReader)
47✔
2416
                        if err != nil {
47✔
2417
                                return err
×
2418
                        }
×
2419

2420
                        if edgeInfo.AuthProof == nil {
50✔
2421
                                continue
3✔
2422
                        }
2423

2424
                        // This channel ID rests within the target range, so
2425
                        // we'll add it to our returned set.
2426
                        rawCid := byteOrder.Uint64(k)
47✔
2427
                        cid := lnwire.NewShortChanIDFromInt(rawCid)
47✔
2428

47✔
2429
                        chanInfo := NewChannelUpdateInfo(
47✔
2430
                                cid, time.Time{}, time.Time{},
47✔
2431
                        )
47✔
2432

47✔
2433
                        if !withTimestamps {
69✔
2434
                                channelsPerBlock[cid.BlockHeight] = append(
22✔
2435
                                        channelsPerBlock[cid.BlockHeight],
22✔
2436
                                        chanInfo,
22✔
2437
                                )
22✔
2438

22✔
2439
                                continue
22✔
2440
                        }
2441

2442
                        node1Key, node2Key := computeEdgePolicyKeys(&edgeInfo)
25✔
2443

25✔
2444
                        rawPolicy := edges.Get(node1Key)
25✔
2445
                        if len(rawPolicy) != 0 {
34✔
2446
                                r := bytes.NewReader(rawPolicy)
9✔
2447

9✔
2448
                                edge, err := deserializeChanEdgePolicyRaw(r)
9✔
2449
                                if err != nil && !errors.Is(
9✔
2450
                                        err, ErrEdgePolicyOptionalFieldNotFound,
9✔
2451
                                ) && !errors.Is(err, ErrParsingExtraTLVBytes) {
9✔
2452

×
2453
                                        return err
×
2454
                                }
×
2455

2456
                                chanInfo.Node1UpdateTimestamp = edge.LastUpdate
9✔
2457
                        }
2458

2459
                        rawPolicy = edges.Get(node2Key)
25✔
2460
                        if len(rawPolicy) != 0 {
39✔
2461
                                r := bytes.NewReader(rawPolicy)
14✔
2462

14✔
2463
                                edge, err := deserializeChanEdgePolicyRaw(r)
14✔
2464
                                if err != nil && !errors.Is(
14✔
2465
                                        err, ErrEdgePolicyOptionalFieldNotFound,
14✔
2466
                                ) && !errors.Is(err, ErrParsingExtraTLVBytes) {
14✔
2467

×
2468
                                        return err
×
2469
                                }
×
2470

2471
                                chanInfo.Node2UpdateTimestamp = edge.LastUpdate
14✔
2472
                        }
2473

2474
                        channelsPerBlock[cid.BlockHeight] = append(
25✔
2475
                                channelsPerBlock[cid.BlockHeight], chanInfo,
25✔
2476
                        )
25✔
2477
                }
2478

2479
                return nil
14✔
2480
        }, func() {
14✔
2481
                channelsPerBlock = make(map[uint32][]ChannelUpdateInfo)
14✔
2482
        })
14✔
2483

2484
        switch {
14✔
2485
        // If we don't know of any channels yet, then there's nothing to
2486
        // filter, so we'll return an empty slice.
2487
        case errors.Is(err, ErrGraphNoEdgesFound) || len(channelsPerBlock) == 0:
6✔
2488
                return nil, nil
6✔
2489

2490
        case err != nil:
×
2491
                return nil, err
×
2492
        }
2493

2494
        // Return the channel ranges in ascending block height order.
2495
        blocks := make([]uint32, 0, len(channelsPerBlock))
11✔
2496
        for block := range channelsPerBlock {
36✔
2497
                blocks = append(blocks, block)
25✔
2498
        }
25✔
2499
        sort.Slice(blocks, func(i, j int) bool {
38✔
2500
                return blocks[i] < blocks[j]
27✔
2501
        })
27✔
2502

2503
        channelRanges := make([]BlockChannelRange, 0, len(channelsPerBlock))
11✔
2504
        for _, block := range blocks {
36✔
2505
                channelRanges = append(channelRanges, BlockChannelRange{
25✔
2506
                        Height:   block,
25✔
2507
                        Channels: channelsPerBlock[block],
25✔
2508
                })
25✔
2509
        }
25✔
2510

2511
        return channelRanges, nil
11✔
2512
}
2513

2514
// FetchChanInfos returns the set of channel edges that correspond to the passed
2515
// channel ID's. If an edge is the query is unknown to the database, it will
2516
// skipped and the result will contain only those edges that exist at the time
2517
// of the query. This can be used to respond to peer queries that are seeking to
2518
// fill in gaps in their view of the channel graph.
2519
func (c *KVStore) FetchChanInfos(chanIDs []uint64) ([]ChannelEdge, error) {
7✔
2520
        return c.fetchChanInfos(nil, chanIDs)
7✔
2521
}
7✔
2522

2523
// fetchChanInfos returns the set of channel edges that correspond to the passed
2524
// channel ID's. If an edge is the query is unknown to the database, it will
2525
// skipped and the result will contain only those edges that exist at the time
2526
// of the query. This can be used to respond to peer queries that are seeking to
2527
// fill in gaps in their view of the channel graph.
2528
//
2529
// NOTE: An optional transaction may be provided. If none is provided, then a
2530
// new one will be created.
2531
func (c *KVStore) fetchChanInfos(tx kvdb.RTx, chanIDs []uint64) (
2532
        []ChannelEdge, error) {
7✔
2533
        // TODO(roasbeef): sort cids?
7✔
2534

7✔
2535
        var (
7✔
2536
                chanEdges []ChannelEdge
7✔
2537
                cidBytes  [8]byte
7✔
2538
        )
7✔
2539

7✔
2540
        fetchChanInfos := func(tx kvdb.RTx) error {
14✔
2541
                edges := tx.ReadBucket(edgeBucket)
7✔
2542
                if edges == nil {
7✔
2543
                        return ErrGraphNoEdgesFound
×
2544
                }
×
2545
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
7✔
2546
                if edgeIndex == nil {
7✔
2547
                        return ErrGraphNoEdgesFound
×
2548
                }
×
2549
                nodes := tx.ReadBucket(nodeBucket)
7✔
2550
                if nodes == nil {
7✔
2551
                        return ErrGraphNotFound
×
2552
                }
×
2553

2554
                for _, cid := range chanIDs {
21✔
2555
                        byteOrder.PutUint64(cidBytes[:], cid)
14✔
2556

14✔
2557
                        // First, we'll fetch the static edge information. If
14✔
2558
                        // the edge is unknown, we will skip the edge and
14✔
2559
                        // continue gathering all known edges.
14✔
2560
                        edgeInfo, err := fetchChanEdgeInfo(
14✔
2561
                                edgeIndex, cidBytes[:],
14✔
2562
                        )
14✔
2563
                        switch {
14✔
2564
                        case errors.Is(err, ErrEdgeNotFound):
3✔
2565
                                continue
3✔
2566
                        case err != nil:
×
2567
                                return err
×
2568
                        }
2569

2570
                        // With the static information obtained, we'll now
2571
                        // fetch the dynamic policy info.
2572
                        edge1, edge2, err := fetchChanEdgePolicies(
11✔
2573
                                edgeIndex, edges, cidBytes[:],
11✔
2574
                        )
11✔
2575
                        if err != nil {
11✔
2576
                                return err
×
2577
                        }
×
2578

2579
                        node1, err := fetchLightningNode(
11✔
2580
                                nodes, edgeInfo.NodeKey1Bytes[:],
11✔
2581
                        )
11✔
2582
                        if err != nil {
11✔
2583
                                return err
×
2584
                        }
×
2585

2586
                        node2, err := fetchLightningNode(
11✔
2587
                                nodes, edgeInfo.NodeKey2Bytes[:],
11✔
2588
                        )
11✔
2589
                        if err != nil {
11✔
2590
                                return err
×
2591
                        }
×
2592

2593
                        chanEdges = append(chanEdges, ChannelEdge{
11✔
2594
                                Info:    &edgeInfo,
11✔
2595
                                Policy1: edge1,
11✔
2596
                                Policy2: edge2,
11✔
2597
                                Node1:   &node1,
11✔
2598
                                Node2:   &node2,
11✔
2599
                        })
11✔
2600
                }
2601

2602
                return nil
7✔
2603
        }
2604

2605
        if tx == nil {
14✔
2606
                err := kvdb.View(c.db, fetchChanInfos, func() {
14✔
2607
                        chanEdges = nil
7✔
2608
                })
7✔
2609
                if err != nil {
7✔
2610
                        return nil, err
×
2611
                }
×
2612

2613
                return chanEdges, nil
7✔
2614
        }
2615

2616
        err := fetchChanInfos(tx)
×
2617
        if err != nil {
×
2618
                return nil, err
×
2619
        }
×
2620

2621
        return chanEdges, nil
×
2622
}
2623

2624
func delEdgeUpdateIndexEntry(edgesBucket kvdb.RwBucket, chanID uint64,
2625
        edge1, edge2 *models.ChannelEdgePolicy) error {
139✔
2626

139✔
2627
        // First, we'll fetch the edge update index bucket which currently
139✔
2628
        // stores an entry for the channel we're about to delete.
139✔
2629
        updateIndex := edgesBucket.NestedReadWriteBucket(edgeUpdateIndexBucket)
139✔
2630
        if updateIndex == nil {
139✔
2631
                // No edges in bucket, return early.
×
2632
                return nil
×
2633
        }
×
2634

2635
        // Now that we have the bucket, we'll attempt to construct a template
2636
        // for the index key: updateTime || chanid.
2637
        var indexKey [8 + 8]byte
139✔
2638
        byteOrder.PutUint64(indexKey[8:], chanID)
139✔
2639

139✔
2640
        // With the template constructed, we'll attempt to delete an entry that
139✔
2641
        // would have been created by both edges: we'll alternate the update
139✔
2642
        // times, as one may had overridden the other.
139✔
2643
        if edge1 != nil {
152✔
2644
                byteOrder.PutUint64(
13✔
2645
                        indexKey[:8], uint64(edge1.LastUpdate.Unix()),
13✔
2646
                )
13✔
2647
                if err := updateIndex.Delete(indexKey[:]); err != nil {
13✔
2648
                        return err
×
2649
                }
×
2650
        }
2651

2652
        // We'll also attempt to delete the entry that may have been created by
2653
        // the second edge.
2654
        if edge2 != nil {
154✔
2655
                byteOrder.PutUint64(
15✔
2656
                        indexKey[:8], uint64(edge2.LastUpdate.Unix()),
15✔
2657
                )
15✔
2658
                if err := updateIndex.Delete(indexKey[:]); err != nil {
15✔
2659
                        return err
×
2660
                }
×
2661
        }
2662

2663
        return nil
139✔
2664
}
2665

2666
// delChannelEdgeUnsafe deletes the edge with the given chanID from the graph
2667
// cache. It then goes on to delete any policy info and edge info for this
2668
// channel from the DB and finally, if isZombie is true, it will add an entry
2669
// for this channel in the zombie index.
2670
//
2671
// NOTE: this method MUST only be called if the cacheMu has already been
2672
// acquired.
2673
func (c *KVStore) delChannelEdgeUnsafe(edges, edgeIndex, chanIndex,
2674
        zombieIndex kvdb.RwBucket, chanID []byte, isZombie,
2675
        strictZombie bool) (*models.ChannelEdgeInfo, error) {
201✔
2676

201✔
2677
        edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
201✔
2678
        if err != nil {
263✔
2679
                return nil, err
62✔
2680
        }
62✔
2681

2682
        // We'll also remove the entry in the edge update index bucket before
2683
        // we delete the edges themselves so we can access their last update
2684
        // times.
2685
        cid := byteOrder.Uint64(chanID)
139✔
2686
        edge1, edge2, err := fetchChanEdgePolicies(edgeIndex, edges, chanID)
139✔
2687
        if err != nil {
139✔
2688
                return nil, err
×
2689
        }
×
2690
        err = delEdgeUpdateIndexEntry(edges, cid, edge1, edge2)
139✔
2691
        if err != nil {
139✔
2692
                return nil, err
×
2693
        }
×
2694

2695
        // The edge key is of the format pubKey || chanID. First we construct
2696
        // the latter half, populating the channel ID.
2697
        var edgeKey [33 + 8]byte
139✔
2698
        copy(edgeKey[33:], chanID)
139✔
2699

139✔
2700
        // With the latter half constructed, copy over the first public key to
139✔
2701
        // delete the edge in this direction, then the second to delete the
139✔
2702
        // edge in the opposite direction.
139✔
2703
        copy(edgeKey[:33], edgeInfo.NodeKey1Bytes[:])
139✔
2704
        if edges.Get(edgeKey[:]) != nil {
278✔
2705
                if err := edges.Delete(edgeKey[:]); err != nil {
139✔
2706
                        return nil, err
×
2707
                }
×
2708
        }
2709
        copy(edgeKey[:33], edgeInfo.NodeKey2Bytes[:])
139✔
2710
        if edges.Get(edgeKey[:]) != nil {
278✔
2711
                if err := edges.Delete(edgeKey[:]); err != nil {
139✔
2712
                        return nil, err
×
2713
                }
×
2714
        }
2715

2716
        // As part of deleting the edge we also remove all disabled entries
2717
        // from the edgePolicyDisabledIndex bucket. We do that for both
2718
        // directions.
2719
        err = updateEdgePolicyDisabledIndex(edges, cid, false, false)
139✔
2720
        if err != nil {
139✔
2721
                return nil, err
×
2722
        }
×
2723
        err = updateEdgePolicyDisabledIndex(edges, cid, true, false)
139✔
2724
        if err != nil {
139✔
2725
                return nil, err
×
2726
        }
×
2727

2728
        // With the edge data deleted, we can purge the information from the two
2729
        // edge indexes.
2730
        if err := edgeIndex.Delete(chanID); err != nil {
139✔
2731
                return nil, err
×
2732
        }
×
2733
        var b bytes.Buffer
139✔
2734
        if err := WriteOutpoint(&b, &edgeInfo.ChannelPoint); err != nil {
139✔
2735
                return nil, err
×
2736
        }
×
2737
        if err := chanIndex.Delete(b.Bytes()); err != nil {
139✔
2738
                return nil, err
×
2739
        }
×
2740

2741
        // Finally, we'll mark the edge as a zombie within our index if it's
2742
        // being removed due to the channel becoming a zombie. We do this to
2743
        // ensure we don't store unnecessary data for spent channels.
2744
        if !isZombie {
256✔
2745
                return &edgeInfo, nil
117✔
2746
        }
117✔
2747

2748
        nodeKey1, nodeKey2 := edgeInfo.NodeKey1Bytes, edgeInfo.NodeKey2Bytes
25✔
2749
        if strictZombie {
28✔
2750
                var e1UpdateTime, e2UpdateTime *time.Time
3✔
2751
                if edge1 != nil {
5✔
2752
                        e1UpdateTime = &edge1.LastUpdate
2✔
2753
                }
2✔
2754
                if edge2 != nil {
6✔
2755
                        e2UpdateTime = &edge2.LastUpdate
3✔
2756
                }
3✔
2757

2758
                nodeKey1, nodeKey2 = makeZombiePubkeys(
3✔
2759
                        &edgeInfo, e1UpdateTime, e2UpdateTime,
3✔
2760
                )
3✔
2761
        }
2762

2763
        return &edgeInfo, markEdgeZombie(
25✔
2764
                zombieIndex, byteOrder.Uint64(chanID), nodeKey1, nodeKey2,
25✔
2765
        )
25✔
2766
}
2767

2768
// makeZombiePubkeys derives the node pubkeys to store in the zombie index for a
2769
// particular pair of channel policies. The return values are one of:
2770
//  1. (pubkey1, pubkey2)
2771
//  2. (pubkey1, blank)
2772
//  3. (blank, pubkey2)
2773
//
2774
// A blank pubkey means that corresponding node will be unable to resurrect a
2775
// channel on its own. For example, node1 may continue to publish recent
2776
// updates, but node2 has fallen way behind. After marking an edge as a zombie,
2777
// we don't want another fresh update from node1 to resurrect, as the edge can
2778
// only become live once node2 finally sends something recent.
2779
//
2780
// In the case where we have neither update, we allow either party to resurrect
2781
// the channel. If the channel were to be marked zombie again, it would be
2782
// marked with the correct lagging channel since we received an update from only
2783
// one side.
2784
func makeZombiePubkeys(info *models.ChannelEdgeInfo,
2785
        e1, e2 *time.Time) ([33]byte, [33]byte) {
3✔
2786

3✔
2787
        switch {
3✔
2788
        // If we don't have either edge policy, we'll return both pubkeys so
2789
        // that the channel can be resurrected by either party.
UNCOV
2790
        case e1 == nil && e2 == nil:
×
UNCOV
2791
                return info.NodeKey1Bytes, info.NodeKey2Bytes
×
2792

2793
        // If we're missing edge1, or if both edges are present but edge1 is
2794
        // older, we'll return edge1's pubkey and a blank pubkey for edge2. This
2795
        // means that only an update from edge1 will be able to resurrect the
2796
        // channel.
2797
        case e1 == nil || (e2 != nil && e1.Before(*e2)):
1✔
2798
                return info.NodeKey1Bytes, [33]byte{}
1✔
2799

2800
        // Otherwise, we're missing edge2 or edge2 is the older side, so we
2801
        // return a blank pubkey for edge1. In this case, only an update from
2802
        // edge2 can resurect the channel.
2803
        default:
2✔
2804
                return [33]byte{}, info.NodeKey2Bytes
2✔
2805
        }
2806
}
2807

2808
// UpdateEdgePolicy updates the edge routing policy for a single directed edge
2809
// within the database for the referenced channel. The `flags` attribute within
2810
// the ChannelEdgePolicy determines which of the directed edges are being
2811
// updated. If the flag is 1, then the first node's information is being
2812
// updated, otherwise it's the second node's information. The node ordering is
2813
// determined by the lexicographical ordering of the identity public keys of the
2814
// nodes on either side of the channel.
2815
func (c *KVStore) UpdateEdgePolicy(ctx context.Context,
2816
        edge *models.ChannelEdgePolicy,
2817
        opts ...batch.SchedulerOption) (route.Vertex, route.Vertex, error) {
2,668✔
2818

2,668✔
2819
        var (
2,668✔
2820
                isUpdate1    bool
2,668✔
2821
                edgeNotFound bool
2,668✔
2822
                from, to     route.Vertex
2,668✔
2823
        )
2,668✔
2824

2,668✔
2825
        r := &batch.Request[kvdb.RwTx]{
2,668✔
2826
                Opts: batch.NewSchedulerOptions(opts...),
2,668✔
2827
                Reset: func() {
5,337✔
2828
                        isUpdate1 = false
2,669✔
2829
                        edgeNotFound = false
2,669✔
2830
                },
2,669✔
2831
                Do: func(tx kvdb.RwTx) error {
2,669✔
2832
                        var err error
2,669✔
2833
                        from, to, isUpdate1, err = updateEdgePolicy(tx, edge)
2,669✔
2834
                        if err != nil {
2,674✔
2835
                                log.Errorf("UpdateEdgePolicy faild: %v", err)
5✔
2836
                        }
5✔
2837

2838
                        // Silence ErrEdgeNotFound so that the batch can
2839
                        // succeed, but propagate the error via local state.
2840
                        if errors.Is(err, ErrEdgeNotFound) {
2,672✔
2841
                                edgeNotFound = true
3✔
2842
                                return nil
3✔
2843
                        }
3✔
2844

2845
                        return err
2,666✔
2846
                },
2847
                OnCommit: func(err error) error {
2,668✔
2848
                        switch {
2,668✔
2849
                        case err != nil:
1✔
2850
                                return err
1✔
2851
                        case edgeNotFound:
3✔
2852
                                return ErrEdgeNotFound
3✔
2853
                        default:
2,664✔
2854
                                c.updateEdgeCache(edge, isUpdate1)
2,664✔
2855
                                return nil
2,664✔
2856
                        }
2857
                },
2858
        }
2859

2860
        err := c.chanScheduler.Execute(ctx, r)
2,668✔
2861

2,668✔
2862
        return from, to, err
2,668✔
2863
}
2864

2865
func (c *KVStore) updateEdgeCache(e *models.ChannelEdgePolicy,
2866
        isUpdate1 bool) {
2,664✔
2867

2,664✔
2868
        // If an entry for this channel is found in reject cache, we'll modify
2,664✔
2869
        // the entry with the updated timestamp for the direction that was just
2,664✔
2870
        // written. If the edge doesn't exist, we'll load the cache entry lazily
2,664✔
2871
        // during the next query for this edge.
2,664✔
2872
        if entry, ok := c.rejectCache.get(e.ChannelID); ok {
2,672✔
2873
                if isUpdate1 {
14✔
2874
                        entry.upd1Time = e.LastUpdate.Unix()
6✔
2875
                } else {
11✔
2876
                        entry.upd2Time = e.LastUpdate.Unix()
5✔
2877
                }
5✔
2878
                c.rejectCache.insert(e.ChannelID, entry)
8✔
2879
        }
2880

2881
        // If an entry for this channel is found in channel cache, we'll modify
2882
        // the entry with the updated policy for the direction that was just
2883
        // written. If the edge doesn't exist, we'll defer loading the info and
2884
        // policies and lazily read from disk during the next query.
2885
        if channel, ok := c.chanCache.get(e.ChannelID); ok {
2,667✔
2886
                if isUpdate1 {
6✔
2887
                        channel.Policy1 = e
3✔
2888
                } else {
6✔
2889
                        channel.Policy2 = e
3✔
2890
                }
3✔
2891
                c.chanCache.insert(e.ChannelID, channel)
3✔
2892
        }
2893
}
2894

2895
// updateEdgePolicy attempts to update an edge's policy within the relevant
2896
// buckets using an existing database transaction. The returned boolean will be
2897
// true if the updated policy belongs to node1, and false if the policy belonged
2898
// to node2.
2899
func updateEdgePolicy(tx kvdb.RwTx, edge *models.ChannelEdgePolicy) (
2900
        route.Vertex, route.Vertex, bool, error) {
2,669✔
2901

2,669✔
2902
        var noVertex route.Vertex
2,669✔
2903

2,669✔
2904
        edges := tx.ReadWriteBucket(edgeBucket)
2,669✔
2905
        if edges == nil {
2,669✔
2906
                return noVertex, noVertex, false, ErrEdgeNotFound
×
2907
        }
×
2908
        edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
2,669✔
2909
        if edgeIndex == nil {
2,669✔
2910
                return noVertex, noVertex, false, ErrEdgeNotFound
×
2911
        }
×
2912

2913
        // Create the channelID key be converting the channel ID
2914
        // integer into a byte slice.
2915
        var chanID [8]byte
2,669✔
2916
        byteOrder.PutUint64(chanID[:], edge.ChannelID)
2,669✔
2917

2,669✔
2918
        // With the channel ID, we then fetch the value storing the two
2,669✔
2919
        // nodes which connect this channel edge.
2,669✔
2920
        nodeInfo := edgeIndex.Get(chanID[:])
2,669✔
2921
        if nodeInfo == nil {
2,672✔
2922
                return noVertex, noVertex, false, ErrEdgeNotFound
3✔
2923
        }
3✔
2924

2925
        // Depending on the flags value passed above, either the first
2926
        // or second edge policy is being updated.
2927
        var fromNode, toNode []byte
2,666✔
2928
        var isUpdate1 bool
2,666✔
2929
        if edge.ChannelFlags&lnwire.ChanUpdateDirection == 0 {
4,002✔
2930
                fromNode = nodeInfo[:33]
1,336✔
2931
                toNode = nodeInfo[33:66]
1,336✔
2932
                isUpdate1 = true
1,336✔
2933
        } else {
2,669✔
2934
                fromNode = nodeInfo[33:66]
1,333✔
2935
                toNode = nodeInfo[:33]
1,333✔
2936
                isUpdate1 = false
1,333✔
2937
        }
1,333✔
2938

2939
        // Finally, with the direction of the edge being updated
2940
        // identified, we update the on-disk edge representation.
2941
        err := putChanEdgePolicy(edges, edge, fromNode, toNode)
2,666✔
2942
        if err != nil {
2,668✔
2943
                return noVertex, noVertex, false, err
2✔
2944
        }
2✔
2945

2946
        var (
2,664✔
2947
                fromNodePubKey route.Vertex
2,664✔
2948
                toNodePubKey   route.Vertex
2,664✔
2949
        )
2,664✔
2950
        copy(fromNodePubKey[:], fromNode)
2,664✔
2951
        copy(toNodePubKey[:], toNode)
2,664✔
2952

2,664✔
2953
        return fromNodePubKey, toNodePubKey, isUpdate1, nil
2,664✔
2954
}
2955

2956
// isPublic determines whether the node is seen as public within the graph from
2957
// the source node's point of view. An existing database transaction can also be
2958
// specified.
2959
func (c *KVStore) isPublic(tx kvdb.RTx, nodePub route.Vertex,
2960
        sourcePubKey []byte) (bool, error) {
16✔
2961

16✔
2962
        // In order to determine whether this node is publicly advertised within
16✔
2963
        // the graph, we'll need to look at all of its edges and check whether
16✔
2964
        // they extend to any other node than the source node. errDone will be
16✔
2965
        // used to terminate the check early.
16✔
2966
        nodeIsPublic := false
16✔
2967
        errDone := errors.New("done")
16✔
2968
        err := c.forEachNodeChannelTx(tx, nodePub, func(tx kvdb.RTx,
16✔
2969
                info *models.ChannelEdgeInfo, _ *models.ChannelEdgePolicy,
16✔
2970
                _ *models.ChannelEdgePolicy) error {
29✔
2971

13✔
2972
                // If this edge doesn't extend to the source node, we'll
13✔
2973
                // terminate our search as we can now conclude that the node is
13✔
2974
                // publicly advertised within the graph due to the local node
13✔
2975
                // knowing of the current edge.
13✔
2976
                if !bytes.Equal(info.NodeKey1Bytes[:], sourcePubKey) &&
13✔
2977
                        !bytes.Equal(info.NodeKey2Bytes[:], sourcePubKey) {
19✔
2978

6✔
2979
                        nodeIsPublic = true
6✔
2980
                        return errDone
6✔
2981
                }
6✔
2982

2983
                // Since the edge _does_ extend to the source node, we'll also
2984
                // need to ensure that this is a public edge.
2985
                if info.AuthProof != nil {
19✔
2986
                        nodeIsPublic = true
9✔
2987
                        return errDone
9✔
2988
                }
9✔
2989

2990
                // Otherwise, we'll continue our search.
2991
                return nil
4✔
2992
        })
2993
        if err != nil && !errors.Is(err, errDone) {
16✔
2994
                return false, err
×
2995
        }
×
2996

2997
        return nodeIsPublic, nil
16✔
2998
}
2999

3000
// FetchLightningNodeTx attempts to look up a target node by its identity
3001
// public key. If the node isn't found in the database, then
3002
// ErrGraphNodeNotFound is returned. An optional transaction may be provided.
3003
// If none is provided, then a new one will be created.
3004
func (c *KVStore) FetchLightningNodeTx(tx kvdb.RTx, nodePub route.Vertex) (
3005
        *models.LightningNode, error) {
3,654✔
3006

3,654✔
3007
        return c.fetchLightningNode(tx, nodePub)
3,654✔
3008
}
3,654✔
3009

3010
// FetchLightningNode attempts to look up a target node by its identity public
3011
// key. If the node isn't found in the database, then ErrGraphNodeNotFound is
3012
// returned.
3013
func (c *KVStore) FetchLightningNode(_ context.Context,
3014
        nodePub route.Vertex) (*models.LightningNode, error) {
162✔
3015

162✔
3016
        return c.fetchLightningNode(nil, nodePub)
162✔
3017
}
162✔
3018

3019
// fetchLightningNode attempts to look up a target node by its identity public
3020
// key. If the node isn't found in the database, then ErrGraphNodeNotFound is
3021
// returned. An optional transaction may be provided. If none is provided, then
3022
// a new one will be created.
3023
func (c *KVStore) fetchLightningNode(tx kvdb.RTx,
3024
        nodePub route.Vertex) (*models.LightningNode, error) {
3,813✔
3025

3,813✔
3026
        var node *models.LightningNode
3,813✔
3027
        fetch := func(tx kvdb.RTx) error {
7,626✔
3028
                // First grab the nodes bucket which stores the mapping from
3,813✔
3029
                // pubKey to node information.
3,813✔
3030
                nodes := tx.ReadBucket(nodeBucket)
3,813✔
3031
                if nodes == nil {
3,813✔
3032
                        return ErrGraphNotFound
×
3033
                }
×
3034

3035
                // If a key for this serialized public key isn't found, then
3036
                // the target node doesn't exist within the database.
3037
                nodeBytes := nodes.Get(nodePub[:])
3,813✔
3038
                if nodeBytes == nil {
3,831✔
3039
                        return ErrGraphNodeNotFound
18✔
3040
                }
18✔
3041

3042
                // If the node is found, then we can de deserialize the node
3043
                // information to return to the user.
3044
                nodeReader := bytes.NewReader(nodeBytes)
3,798✔
3045
                n, err := deserializeLightningNode(nodeReader)
3,798✔
3046
                if err != nil {
3,798✔
3047
                        return err
×
3048
                }
×
3049

3050
                node = &n
3,798✔
3051

3,798✔
3052
                return nil
3,798✔
3053
        }
3054

3055
        if tx == nil {
3,999✔
3056
                err := kvdb.View(
186✔
3057
                        c.db, fetch, func() {
372✔
3058
                                node = nil
186✔
3059
                        },
186✔
3060
                )
3061
                if err != nil {
193✔
3062
                        return nil, err
7✔
3063
                }
7✔
3064

3065
                return node, nil
182✔
3066
        }
3067

3068
        err := fetch(tx)
3,627✔
3069
        if err != nil {
3,638✔
3070
                return nil, err
11✔
3071
        }
11✔
3072

3073
        return node, nil
3,616✔
3074
}
3075

3076
// HasLightningNode determines if the graph has a vertex identified by the
3077
// target node identity public key. If the node exists in the database, a
3078
// timestamp of when the data for the node was lasted updated is returned along
3079
// with a true boolean. Otherwise, an empty time.Time is returned with a false
3080
// boolean.
3081
func (c *KVStore) HasLightningNode(_ context.Context,
3082
        nodePub [33]byte) (time.Time, bool, error) {
20✔
3083

20✔
3084
        var (
20✔
3085
                updateTime time.Time
20✔
3086
                exists     bool
20✔
3087
        )
20✔
3088

20✔
3089
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
40✔
3090
                // First grab the nodes bucket which stores the mapping from
20✔
3091
                // pubKey to node information.
20✔
3092
                nodes := tx.ReadBucket(nodeBucket)
20✔
3093
                if nodes == nil {
20✔
3094
                        return ErrGraphNotFound
×
3095
                }
×
3096

3097
                // If a key for this serialized public key isn't found, we can
3098
                // exit early.
3099
                nodeBytes := nodes.Get(nodePub[:])
20✔
3100
                if nodeBytes == nil {
26✔
3101
                        exists = false
6✔
3102
                        return nil
6✔
3103
                }
6✔
3104

3105
                // Otherwise we continue on to obtain the time stamp
3106
                // representing the last time the data for this node was
3107
                // updated.
3108
                nodeReader := bytes.NewReader(nodeBytes)
17✔
3109
                node, err := deserializeLightningNode(nodeReader)
17✔
3110
                if err != nil {
17✔
3111
                        return err
×
3112
                }
×
3113

3114
                exists = true
17✔
3115
                updateTime = node.LastUpdate
17✔
3116

17✔
3117
                return nil
17✔
3118
        }, func() {
20✔
3119
                updateTime = time.Time{}
20✔
3120
                exists = false
20✔
3121
        })
20✔
3122
        if err != nil {
20✔
3123
                return time.Time{}, exists, err
×
3124
        }
×
3125

3126
        return updateTime, exists, nil
20✔
3127
}
3128

3129
// nodeTraversal is used to traverse all channels of a node given by its
3130
// public key and passes channel information into the specified callback.
3131
func nodeTraversal(tx kvdb.RTx, nodePub []byte, db kvdb.Backend,
3132
        cb func(kvdb.RTx, *models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3133
                *models.ChannelEdgePolicy) error) error {
1,270✔
3134

1,270✔
3135
        traversal := func(tx kvdb.RTx) error {
2,540✔
3136
                edges := tx.ReadBucket(edgeBucket)
1,270✔
3137
                if edges == nil {
1,270✔
3138
                        return ErrGraphNotFound
×
3139
                }
×
3140
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
1,270✔
3141
                if edgeIndex == nil {
1,270✔
3142
                        return ErrGraphNoEdgesFound
×
3143
                }
×
3144

3145
                // In order to reach all the edges for this node, we take
3146
                // advantage of the construction of the key-space within the
3147
                // edge bucket. The keys are stored in the form: pubKey ||
3148
                // chanID. Therefore, starting from a chanID of zero, we can
3149
                // scan forward in the bucket, grabbing all the edges for the
3150
                // node. Once the prefix no longer matches, then we know we're
3151
                // done.
3152
                var nodeStart [33 + 8]byte
1,270✔
3153
                copy(nodeStart[:], nodePub)
1,270✔
3154
                copy(nodeStart[33:], chanStart[:])
1,270✔
3155

1,270✔
3156
                // Starting from the key pubKey || 0, we seek forward in the
1,270✔
3157
                // bucket until the retrieved key no longer has the public key
1,270✔
3158
                // as its prefix. This indicates that we've stepped over into
1,270✔
3159
                // another node's edges, so we can terminate our scan.
1,270✔
3160
                edgeCursor := edges.ReadCursor()
1,270✔
3161
                for nodeEdge, _ := edgeCursor.Seek(nodeStart[:]); bytes.HasPrefix(nodeEdge, nodePub); nodeEdge, _ = edgeCursor.Next() { //nolint:ll
5,115✔
3162
                        // If the prefix still matches, the channel id is
3,845✔
3163
                        // returned in nodeEdge. Channel id is used to lookup
3,845✔
3164
                        // the node at the other end of the channel and both
3,845✔
3165
                        // edge policies.
3,845✔
3166
                        chanID := nodeEdge[33:]
3,845✔
3167
                        edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
3,845✔
3168
                        if err != nil {
3,845✔
3169
                                return err
×
3170
                        }
×
3171

3172
                        outgoingPolicy, err := fetchChanEdgePolicy(
3,845✔
3173
                                edges, chanID, nodePub,
3,845✔
3174
                        )
3,845✔
3175
                        if err != nil {
3,845✔
3176
                                return err
×
3177
                        }
×
3178

3179
                        otherNode, err := edgeInfo.OtherNodeKeyBytes(nodePub)
3,845✔
3180
                        if err != nil {
3,845✔
3181
                                return err
×
3182
                        }
×
3183

3184
                        incomingPolicy, err := fetchChanEdgePolicy(
3,845✔
3185
                                edges, chanID, otherNode[:],
3,845✔
3186
                        )
3,845✔
3187
                        if err != nil {
3,845✔
3188
                                return err
×
3189
                        }
×
3190

3191
                        // Finally, we execute the callback.
3192
                        err = cb(tx, &edgeInfo, outgoingPolicy, incomingPolicy)
3,845✔
3193
                        if err != nil {
3,857✔
3194
                                return err
12✔
3195
                        }
12✔
3196
                }
3197

3198
                return nil
1,261✔
3199
        }
3200

3201
        // If no transaction was provided, then we'll create a new transaction
3202
        // to execute the transaction within.
3203
        if tx == nil {
1,302✔
3204
                return kvdb.View(db, traversal, func() {})
64✔
3205
        }
3206

3207
        // Otherwise, we re-use the existing transaction to execute the graph
3208
        // traversal.
3209
        return traversal(tx)
1,241✔
3210
}
3211

3212
// ForEachNodeChannel iterates through all channels of the given node,
3213
// executing the passed callback with an edge info structure and the policies
3214
// of each end of the channel. The first edge policy is the outgoing edge *to*
3215
// the connecting node, while the second is the incoming edge *from* the
3216
// connecting node. If the callback returns an error, then the iteration is
3217
// halted with the error propagated back up to the caller.
3218
//
3219
// Unknown policies are passed into the callback as nil values.
3220
func (c *KVStore) ForEachNodeChannel(nodePub route.Vertex,
3221
        cb func(*models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3222
                *models.ChannelEdgePolicy) error) error {
9✔
3223

9✔
3224
        return nodeTraversal(nil, nodePub[:], c.db, func(_ kvdb.RTx,
9✔
3225
                info *models.ChannelEdgeInfo, policy,
9✔
3226
                policy2 *models.ChannelEdgePolicy) error {
22✔
3227

13✔
3228
                return cb(info, policy, policy2)
13✔
3229
        })
13✔
3230
}
3231

3232
// ForEachSourceNodeChannel iterates through all channels of the source node,
3233
// executing the passed callback on each. The callback is provided with the
3234
// channel's outpoint, whether we have a policy for the channel and the channel
3235
// peer's node information.
3236
func (c *KVStore) ForEachSourceNodeChannel(cb func(chanPoint wire.OutPoint,
3237
        havePolicy bool, otherNode *models.LightningNode) error) error {
4✔
3238

4✔
3239
        return kvdb.View(c.db, func(tx kvdb.RTx) error {
8✔
3240
                nodes := tx.ReadBucket(nodeBucket)
4✔
3241
                if nodes == nil {
4✔
3242
                        return ErrGraphNotFound
×
3243
                }
×
3244

3245
                node, err := c.sourceNode(nodes)
4✔
3246
                if err != nil {
4✔
3247
                        return err
×
3248
                }
×
3249

3250
                return nodeTraversal(
4✔
3251
                        tx, node.PubKeyBytes[:], c.db, func(tx kvdb.RTx,
4✔
3252
                                info *models.ChannelEdgeInfo,
4✔
3253
                                policy, _ *models.ChannelEdgePolicy) error {
9✔
3254

5✔
3255
                                peer, err := c.fetchOtherNode(
5✔
3256
                                        tx, info, node.PubKeyBytes[:],
5✔
3257
                                )
5✔
3258
                                if err != nil {
5✔
3259
                                        return err
×
3260
                                }
×
3261

3262
                                return cb(
5✔
3263
                                        info.ChannelPoint, policy != nil, peer,
5✔
3264
                                )
5✔
3265
                        },
3266
                )
3267
        }, func() {})
4✔
3268
}
3269

3270
// forEachNodeChannelTx iterates through all channels of the given node,
3271
// executing the passed callback with an edge info structure and the policies
3272
// of each end of the channel. The first edge policy is the outgoing edge *to*
3273
// the connecting node, while the second is the incoming edge *from* the
3274
// connecting node. If the callback returns an error, then the iteration is
3275
// halted with the error propagated back up to the caller.
3276
//
3277
// Unknown policies are passed into the callback as nil values.
3278
//
3279
// If the caller wishes to re-use an existing boltdb transaction, then it
3280
// should be passed as the first argument.  Otherwise, the first argument should
3281
// be nil and a fresh transaction will be created to execute the graph
3282
// traversal.
3283
func (c *KVStore) forEachNodeChannelTx(tx kvdb.RTx,
3284
        nodePub route.Vertex, cb func(kvdb.RTx, *models.ChannelEdgeInfo,
3285
                *models.ChannelEdgePolicy,
3286
                *models.ChannelEdgePolicy) error) error {
1,001✔
3287

1,001✔
3288
        return nodeTraversal(tx, nodePub[:], c.db, cb)
1,001✔
3289
}
1,001✔
3290

3291
// fetchOtherNode attempts to fetch the full LightningNode that's opposite of
3292
// the target node in the channel. This is useful when one knows the pubkey of
3293
// one of the nodes, and wishes to obtain the full LightningNode for the other
3294
// end of the channel.
3295
func (c *KVStore) fetchOtherNode(tx kvdb.RTx,
3296
        channel *models.ChannelEdgeInfo, thisNodeKey []byte) (
3297
        *models.LightningNode, error) {
5✔
3298

5✔
3299
        // Ensure that the node passed in is actually a member of the channel.
5✔
3300
        var targetNodeBytes [33]byte
5✔
3301
        switch {
5✔
3302
        case bytes.Equal(channel.NodeKey1Bytes[:], thisNodeKey):
5✔
3303
                targetNodeBytes = channel.NodeKey2Bytes
5✔
3304
        case bytes.Equal(channel.NodeKey2Bytes[:], thisNodeKey):
3✔
3305
                targetNodeBytes = channel.NodeKey1Bytes
3✔
3306
        default:
×
3307
                return nil, fmt.Errorf("node not participating in this channel")
×
3308
        }
3309

3310
        var targetNode *models.LightningNode
5✔
3311
        fetchNodeFunc := func(tx kvdb.RTx) error {
10✔
3312
                // First grab the nodes bucket which stores the mapping from
5✔
3313
                // pubKey to node information.
5✔
3314
                nodes := tx.ReadBucket(nodeBucket)
5✔
3315
                if nodes == nil {
5✔
3316
                        return ErrGraphNotFound
×
3317
                }
×
3318

3319
                node, err := fetchLightningNode(nodes, targetNodeBytes[:])
5✔
3320
                if err != nil {
5✔
3321
                        return err
×
3322
                }
×
3323

3324
                targetNode = &node
5✔
3325

5✔
3326
                return nil
5✔
3327
        }
3328

3329
        // If the transaction is nil, then we'll need to create a new one,
3330
        // otherwise we can use the existing db transaction.
3331
        var err error
5✔
3332
        if tx == nil {
5✔
3333
                err = kvdb.View(c.db, fetchNodeFunc, func() {
×
3334
                        targetNode = nil
×
3335
                })
×
3336
        } else {
5✔
3337
                err = fetchNodeFunc(tx)
5✔
3338
        }
5✔
3339

3340
        return targetNode, err
5✔
3341
}
3342

3343
// computeEdgePolicyKeys is a helper function that can be used to compute the
3344
// keys used to index the channel edge policy info for the two nodes of the
3345
// edge. The keys for node 1 and node 2 are returned respectively.
3346
func computeEdgePolicyKeys(info *models.ChannelEdgeInfo) ([]byte, []byte) {
25✔
3347
        var (
25✔
3348
                node1Key [33 + 8]byte
25✔
3349
                node2Key [33 + 8]byte
25✔
3350
        )
25✔
3351

25✔
3352
        copy(node1Key[:], info.NodeKey1Bytes[:])
25✔
3353
        copy(node2Key[:], info.NodeKey2Bytes[:])
25✔
3354

25✔
3355
        byteOrder.PutUint64(node1Key[33:], info.ChannelID)
25✔
3356
        byteOrder.PutUint64(node2Key[33:], info.ChannelID)
25✔
3357

25✔
3358
        return node1Key[:], node2Key[:]
25✔
3359
}
25✔
3360

3361
// FetchChannelEdgesByOutpoint attempts to lookup the two directed edges for
3362
// the channel identified by the funding outpoint. If the channel can't be
3363
// found, then ErrEdgeNotFound is returned. A struct which houses the general
3364
// information for the channel itself is returned as well as two structs that
3365
// contain the routing policies for the channel in either direction.
3366
func (c *KVStore) FetchChannelEdgesByOutpoint(op *wire.OutPoint) (
3367
        *models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3368
        *models.ChannelEdgePolicy, error) {
14✔
3369

14✔
3370
        var (
14✔
3371
                edgeInfo *models.ChannelEdgeInfo
14✔
3372
                policy1  *models.ChannelEdgePolicy
14✔
3373
                policy2  *models.ChannelEdgePolicy
14✔
3374
        )
14✔
3375

14✔
3376
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
28✔
3377
                // First, grab the node bucket. This will be used to populate
14✔
3378
                // the Node pointers in each edge read from disk.
14✔
3379
                nodes := tx.ReadBucket(nodeBucket)
14✔
3380
                if nodes == nil {
14✔
3381
                        return ErrGraphNotFound
×
3382
                }
×
3383

3384
                // Next, grab the edge bucket which stores the edges, and also
3385
                // the index itself so we can group the directed edges together
3386
                // logically.
3387
                edges := tx.ReadBucket(edgeBucket)
14✔
3388
                if edges == nil {
14✔
3389
                        return ErrGraphNoEdgesFound
×
3390
                }
×
3391
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
14✔
3392
                if edgeIndex == nil {
14✔
3393
                        return ErrGraphNoEdgesFound
×
3394
                }
×
3395

3396
                // If the channel's outpoint doesn't exist within the outpoint
3397
                // index, then the edge does not exist.
3398
                chanIndex := edges.NestedReadBucket(channelPointBucket)
14✔
3399
                if chanIndex == nil {
14✔
3400
                        return ErrGraphNoEdgesFound
×
3401
                }
×
3402
                var b bytes.Buffer
14✔
3403
                if err := WriteOutpoint(&b, op); err != nil {
14✔
3404
                        return err
×
3405
                }
×
3406
                chanID := chanIndex.Get(b.Bytes())
14✔
3407
                if chanID == nil {
27✔
3408
                        return fmt.Errorf("%w: op=%v", ErrEdgeNotFound, op)
13✔
3409
                }
13✔
3410

3411
                // If the channel is found to exists, then we'll first retrieve
3412
                // the general information for the channel.
3413
                edge, err := fetchChanEdgeInfo(edgeIndex, chanID)
4✔
3414
                if err != nil {
4✔
3415
                        return fmt.Errorf("%w: chanID=%x", err, chanID)
×
3416
                }
×
3417
                edgeInfo = &edge
4✔
3418

4✔
3419
                // Once we have the information about the channels' parameters,
4✔
3420
                // we'll fetch the routing policies for each for the directed
4✔
3421
                // edges.
4✔
3422
                e1, e2, err := fetchChanEdgePolicies(edgeIndex, edges, chanID)
4✔
3423
                if err != nil {
4✔
3424
                        return fmt.Errorf("failed to find policy: %w", err)
×
3425
                }
×
3426

3427
                policy1 = e1
4✔
3428
                policy2 = e2
4✔
3429

4✔
3430
                return nil
4✔
3431
        }, func() {
14✔
3432
                edgeInfo = nil
14✔
3433
                policy1 = nil
14✔
3434
                policy2 = nil
14✔
3435
        })
14✔
3436
        if err != nil {
27✔
3437
                return nil, nil, nil, err
13✔
3438
        }
13✔
3439

3440
        return edgeInfo, policy1, policy2, nil
4✔
3441
}
3442

3443
// FetchChannelEdgesByID attempts to lookup the two directed edges for the
3444
// channel identified by the channel ID. If the channel can't be found, then
3445
// ErrEdgeNotFound is returned. A struct which houses the general information
3446
// for the channel itself is returned as well as two structs that contain the
3447
// routing policies for the channel in either direction.
3448
//
3449
// ErrZombieEdge an be returned if the edge is currently marked as a zombie
3450
// within the database. In this case, the ChannelEdgePolicy's will be nil, and
3451
// the ChannelEdgeInfo will only include the public keys of each node.
3452
func (c *KVStore) FetchChannelEdgesByID(chanID uint64) (
3453
        *models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3454
        *models.ChannelEdgePolicy, error) {
2,686✔
3455

2,686✔
3456
        var (
2,686✔
3457
                edgeInfo  *models.ChannelEdgeInfo
2,686✔
3458
                policy1   *models.ChannelEdgePolicy
2,686✔
3459
                policy2   *models.ChannelEdgePolicy
2,686✔
3460
                channelID [8]byte
2,686✔
3461
        )
2,686✔
3462

2,686✔
3463
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
5,372✔
3464
                // First, grab the node bucket. This will be used to populate
2,686✔
3465
                // the Node pointers in each edge read from disk.
2,686✔
3466
                nodes := tx.ReadBucket(nodeBucket)
2,686✔
3467
                if nodes == nil {
2,686✔
3468
                        return ErrGraphNotFound
×
3469
                }
×
3470

3471
                // Next, grab the edge bucket which stores the edges, and also
3472
                // the index itself so we can group the directed edges together
3473
                // logically.
3474
                edges := tx.ReadBucket(edgeBucket)
2,686✔
3475
                if edges == nil {
2,686✔
3476
                        return ErrGraphNoEdgesFound
×
3477
                }
×
3478
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
2,686✔
3479
                if edgeIndex == nil {
2,686✔
3480
                        return ErrGraphNoEdgesFound
×
3481
                }
×
3482

3483
                byteOrder.PutUint64(channelID[:], chanID)
2,686✔
3484

2,686✔
3485
                // Now, attempt to fetch edge.
2,686✔
3486
                edge, err := fetchChanEdgeInfo(edgeIndex, channelID[:])
2,686✔
3487

2,686✔
3488
                // If it doesn't exist, we'll quickly check our zombie index to
2,686✔
3489
                // see if we've previously marked it as so.
2,686✔
3490
                if errors.Is(err, ErrEdgeNotFound) {
2,690✔
3491
                        // If the zombie index doesn't exist, or the edge is not
4✔
3492
                        // marked as a zombie within it, then we'll return the
4✔
3493
                        // original ErrEdgeNotFound error.
4✔
3494
                        zombieIndex := edges.NestedReadBucket(zombieBucket)
4✔
3495
                        if zombieIndex == nil {
4✔
3496
                                return ErrEdgeNotFound
×
3497
                        }
×
3498

3499
                        isZombie, pubKey1, pubKey2 := isZombieEdge(
4✔
3500
                                zombieIndex, chanID,
4✔
3501
                        )
4✔
3502
                        if !isZombie {
7✔
3503
                                return ErrEdgeNotFound
3✔
3504
                        }
3✔
3505

3506
                        // Otherwise, the edge is marked as a zombie, so we'll
3507
                        // populate the edge info with the public keys of each
3508
                        // party as this is the only information we have about
3509
                        // it and return an error signaling so.
3510
                        edgeInfo = &models.ChannelEdgeInfo{
4✔
3511
                                NodeKey1Bytes: pubKey1,
4✔
3512
                                NodeKey2Bytes: pubKey2,
4✔
3513
                        }
4✔
3514

4✔
3515
                        return ErrZombieEdge
4✔
3516
                }
3517

3518
                // Otherwise, we'll just return the error if any.
3519
                if err != nil {
2,685✔
3520
                        return err
×
3521
                }
×
3522

3523
                edgeInfo = &edge
2,685✔
3524

2,685✔
3525
                // Then we'll attempt to fetch the accompanying policies of this
2,685✔
3526
                // edge.
2,685✔
3527
                e1, e2, err := fetchChanEdgePolicies(
2,685✔
3528
                        edgeIndex, edges, channelID[:],
2,685✔
3529
                )
2,685✔
3530
                if err != nil {
2,685✔
3531
                        return err
×
3532
                }
×
3533

3534
                policy1 = e1
2,685✔
3535
                policy2 = e2
2,685✔
3536

2,685✔
3537
                return nil
2,685✔
3538
        }, func() {
2,686✔
3539
                edgeInfo = nil
2,686✔
3540
                policy1 = nil
2,686✔
3541
                policy2 = nil
2,686✔
3542
        })
2,686✔
3543
        if errors.Is(err, ErrZombieEdge) {
2,690✔
3544
                return edgeInfo, nil, nil, err
4✔
3545
        }
4✔
3546
        if err != nil {
2,688✔
3547
                return nil, nil, nil, err
3✔
3548
        }
3✔
3549

3550
        return edgeInfo, policy1, policy2, nil
2,685✔
3551
}
3552

3553
// IsPublicNode is a helper method that determines whether the node with the
3554
// given public key is seen as a public node in the graph from the graph's
3555
// source node's point of view.
3556
func (c *KVStore) IsPublicNode(pubKey [33]byte) (bool, error) {
16✔
3557
        var nodeIsPublic bool
16✔
3558
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
32✔
3559
                nodes := tx.ReadBucket(nodeBucket)
16✔
3560
                if nodes == nil {
16✔
3561
                        return ErrGraphNodesNotFound
×
3562
                }
×
3563
                ourPubKey := nodes.Get(sourceKey)
16✔
3564
                if ourPubKey == nil {
16✔
3565
                        return ErrSourceNodeNotSet
×
3566
                }
×
3567
                node, err := fetchLightningNode(nodes, pubKey[:])
16✔
3568
                if err != nil {
16✔
3569
                        return err
×
3570
                }
×
3571

3572
                nodeIsPublic, err = c.isPublic(tx, node.PubKeyBytes, ourPubKey)
16✔
3573

16✔
3574
                return err
16✔
3575
        }, func() {
16✔
3576
                nodeIsPublic = false
16✔
3577
        })
16✔
3578
        if err != nil {
16✔
3579
                return false, err
×
3580
        }
×
3581

3582
        return nodeIsPublic, nil
16✔
3583
}
3584

3585
// genMultiSigP2WSH generates the p2wsh'd multisig script for 2 of 2 pubkeys.
3586
func genMultiSigP2WSH(aPub, bPub []byte) ([]byte, error) {
49✔
3587
        witnessScript, err := input.GenMultiSigScript(aPub, bPub)
49✔
3588
        if err != nil {
49✔
3589
                return nil, err
×
3590
        }
×
3591

3592
        // With the witness script generated, we'll now turn it into a p2wsh
3593
        // script:
3594
        //  * OP_0 <sha256(script)>
3595
        bldr := txscript.NewScriptBuilder(
49✔
3596
                txscript.WithScriptAllocSize(input.P2WSHSize),
49✔
3597
        )
49✔
3598
        bldr.AddOp(txscript.OP_0)
49✔
3599
        scriptHash := sha256.Sum256(witnessScript)
49✔
3600
        bldr.AddData(scriptHash[:])
49✔
3601

49✔
3602
        return bldr.Script()
49✔
3603
}
3604

3605
// EdgePoint couples the outpoint of a channel with the funding script that it
3606
// creates. The FilteredChainView will use this to watch for spends of this
3607
// edge point on chain. We require both of these values as depending on the
3608
// concrete implementation, either the pkScript, or the out point will be used.
3609
type EdgePoint struct {
3610
        // FundingPkScript is the p2wsh multi-sig script of the target channel.
3611
        FundingPkScript []byte
3612

3613
        // OutPoint is the outpoint of the target channel.
3614
        OutPoint wire.OutPoint
3615
}
3616

3617
// String returns a human readable version of the target EdgePoint. We return
3618
// the outpoint directly as it is enough to uniquely identify the edge point.
3619
func (e *EdgePoint) String() string {
×
3620
        return e.OutPoint.String()
×
3621
}
×
3622

3623
// ChannelView returns the verifiable edge information for each active channel
3624
// within the known channel graph. The set of UTXO's (along with their scripts)
3625
// returned are the ones that need to be watched on chain to detect channel
3626
// closes on the resident blockchain.
3627
func (c *KVStore) ChannelView() ([]EdgePoint, error) {
25✔
3628
        var edgePoints []EdgePoint
25✔
3629
        if err := kvdb.View(c.db, func(tx kvdb.RTx) error {
50✔
3630
                // We're going to iterate over the entire channel index, so
25✔
3631
                // we'll need to fetch the edgeBucket to get to the index as
25✔
3632
                // it's a sub-bucket.
25✔
3633
                edges := tx.ReadBucket(edgeBucket)
25✔
3634
                if edges == nil {
25✔
3635
                        return ErrGraphNoEdgesFound
×
3636
                }
×
3637
                chanIndex := edges.NestedReadBucket(channelPointBucket)
25✔
3638
                if chanIndex == nil {
25✔
3639
                        return ErrGraphNoEdgesFound
×
3640
                }
×
3641
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
25✔
3642
                if edgeIndex == nil {
25✔
3643
                        return ErrGraphNoEdgesFound
×
3644
                }
×
3645

3646
                // Once we have the proper bucket, we'll range over each key
3647
                // (which is the channel point for the channel) and decode it,
3648
                // accumulating each entry.
3649
                return chanIndex.ForEach(
25✔
3650
                        func(chanPointBytes, chanID []byte) error {
70✔
3651
                                chanPointReader := bytes.NewReader(
45✔
3652
                                        chanPointBytes,
45✔
3653
                                )
45✔
3654

45✔
3655
                                var chanPoint wire.OutPoint
45✔
3656
                                err := ReadOutpoint(chanPointReader, &chanPoint)
45✔
3657
                                if err != nil {
45✔
3658
                                        return err
×
3659
                                }
×
3660

3661
                                edgeInfo, err := fetchChanEdgeInfo(
45✔
3662
                                        edgeIndex, chanID,
45✔
3663
                                )
45✔
3664
                                if err != nil {
45✔
3665
                                        return err
×
3666
                                }
×
3667

3668
                                pkScript, err := genMultiSigP2WSH(
45✔
3669
                                        edgeInfo.BitcoinKey1Bytes[:],
45✔
3670
                                        edgeInfo.BitcoinKey2Bytes[:],
45✔
3671
                                )
45✔
3672
                                if err != nil {
45✔
3673
                                        return err
×
3674
                                }
×
3675

3676
                                edgePoints = append(edgePoints, EdgePoint{
45✔
3677
                                        FundingPkScript: pkScript,
45✔
3678
                                        OutPoint:        chanPoint,
45✔
3679
                                })
45✔
3680

45✔
3681
                                return nil
45✔
3682
                        },
3683
                )
3684
        }, func() {
25✔
3685
                edgePoints = nil
25✔
3686
        }); err != nil {
25✔
3687
                return nil, err
×
3688
        }
×
3689

3690
        return edgePoints, nil
25✔
3691
}
3692

3693
// MarkEdgeZombie attempts to mark a channel identified by its channel ID as a
3694
// zombie. This method is used on an ad-hoc basis, when channels need to be
3695
// marked as zombies outside the normal pruning cycle.
3696
func (c *KVStore) MarkEdgeZombie(chanID uint64,
3697
        pubKey1, pubKey2 [33]byte) error {
131✔
3698

131✔
3699
        c.cacheMu.Lock()
131✔
3700
        defer c.cacheMu.Unlock()
131✔
3701

131✔
3702
        err := kvdb.Batch(c.db, func(tx kvdb.RwTx) error {
262✔
3703
                edges := tx.ReadWriteBucket(edgeBucket)
131✔
3704
                if edges == nil {
131✔
3705
                        return ErrGraphNoEdgesFound
×
3706
                }
×
3707
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
131✔
3708
                if err != nil {
131✔
3709
                        return fmt.Errorf("unable to create zombie "+
×
3710
                                "bucket: %w", err)
×
3711
                }
×
3712

3713
                return markEdgeZombie(zombieIndex, chanID, pubKey1, pubKey2)
131✔
3714
        })
3715
        if err != nil {
131✔
3716
                return err
×
3717
        }
×
3718

3719
        c.rejectCache.remove(chanID)
131✔
3720
        c.chanCache.remove(chanID)
131✔
3721

131✔
3722
        return nil
131✔
3723
}
3724

3725
// markEdgeZombie marks an edge as a zombie within our zombie index. The public
3726
// keys should represent the node public keys of the two parties involved in the
3727
// edge.
3728
func markEdgeZombie(zombieIndex kvdb.RwBucket, chanID uint64, pubKey1,
3729
        pubKey2 [33]byte) error {
156✔
3730

156✔
3731
        var k [8]byte
156✔
3732
        byteOrder.PutUint64(k[:], chanID)
156✔
3733

156✔
3734
        var v [66]byte
156✔
3735
        copy(v[:33], pubKey1[:])
156✔
3736
        copy(v[33:], pubKey2[:])
156✔
3737

156✔
3738
        return zombieIndex.Put(k[:], v[:])
156✔
3739
}
156✔
3740

3741
// MarkEdgeLive clears an edge from our zombie index, deeming it as live.
3742
func (c *KVStore) MarkEdgeLive(chanID uint64) error {
23✔
3743
        c.cacheMu.Lock()
23✔
3744
        defer c.cacheMu.Unlock()
23✔
3745

23✔
3746
        return c.markEdgeLiveUnsafe(nil, chanID)
23✔
3747
}
23✔
3748

3749
// markEdgeLiveUnsafe clears an edge from the zombie index. This method can be
3750
// called with an existing kvdb.RwTx or the argument can be set to nil in which
3751
// case a new transaction will be created.
3752
//
3753
// NOTE: this method MUST only be called if the cacheMu has already been
3754
// acquired.
3755
func (c *KVStore) markEdgeLiveUnsafe(tx kvdb.RwTx, chanID uint64) error {
23✔
3756
        dbFn := func(tx kvdb.RwTx) error {
46✔
3757
                edges := tx.ReadWriteBucket(edgeBucket)
23✔
3758
                if edges == nil {
23✔
3759
                        return ErrGraphNoEdgesFound
×
3760
                }
×
3761
                zombieIndex := edges.NestedReadWriteBucket(zombieBucket)
23✔
3762
                if zombieIndex == nil {
23✔
3763
                        return nil
×
3764
                }
×
3765

3766
                var k [8]byte
23✔
3767
                byteOrder.PutUint64(k[:], chanID)
23✔
3768

23✔
3769
                if len(zombieIndex.Get(k[:])) == 0 {
25✔
3770
                        return ErrZombieEdgeNotFound
2✔
3771
                }
2✔
3772

3773
                return zombieIndex.Delete(k[:])
21✔
3774
        }
3775

3776
        // If the transaction is nil, we'll create a new one. Otherwise, we use
3777
        // the existing transaction
3778
        var err error
23✔
3779
        if tx == nil {
46✔
3780
                err = kvdb.Update(c.db, dbFn, func() {})
46✔
3781
        } else {
×
3782
                err = dbFn(tx)
×
3783
        }
×
3784
        if err != nil {
25✔
3785
                return err
2✔
3786
        }
2✔
3787

3788
        c.rejectCache.remove(chanID)
21✔
3789
        c.chanCache.remove(chanID)
21✔
3790

21✔
3791
        return nil
21✔
3792
}
3793

3794
// IsZombieEdge returns whether the edge is considered zombie. If it is a
3795
// zombie, then the two node public keys corresponding to this edge are also
3796
// returned.
3797
func (c *KVStore) IsZombieEdge(chanID uint64) (bool, [33]byte, [33]byte) {
14✔
3798
        var (
14✔
3799
                isZombie         bool
14✔
3800
                pubKey1, pubKey2 [33]byte
14✔
3801
        )
14✔
3802

14✔
3803
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
28✔
3804
                edges := tx.ReadBucket(edgeBucket)
14✔
3805
                if edges == nil {
14✔
3806
                        return ErrGraphNoEdgesFound
×
3807
                }
×
3808
                zombieIndex := edges.NestedReadBucket(zombieBucket)
14✔
3809
                if zombieIndex == nil {
14✔
3810
                        return nil
×
3811
                }
×
3812

3813
                isZombie, pubKey1, pubKey2 = isZombieEdge(zombieIndex, chanID)
14✔
3814

14✔
3815
                return nil
14✔
3816
        }, func() {
14✔
3817
                isZombie = false
14✔
3818
                pubKey1 = [33]byte{}
14✔
3819
                pubKey2 = [33]byte{}
14✔
3820
        })
14✔
3821
        if err != nil {
14✔
3822
                return false, [33]byte{}, [33]byte{}
×
3823
        }
×
3824

3825
        return isZombie, pubKey1, pubKey2
14✔
3826
}
3827

3828
// isZombieEdge returns whether an entry exists for the given channel in the
3829
// zombie index. If an entry exists, then the two node public keys corresponding
3830
// to this edge are also returned.
3831
func isZombieEdge(zombieIndex kvdb.RBucket,
3832
        chanID uint64) (bool, [33]byte, [33]byte) {
198✔
3833

198✔
3834
        var k [8]byte
198✔
3835
        byteOrder.PutUint64(k[:], chanID)
198✔
3836

198✔
3837
        v := zombieIndex.Get(k[:])
198✔
3838
        if v == nil {
309✔
3839
                return false, [33]byte{}, [33]byte{}
111✔
3840
        }
111✔
3841

3842
        var pubKey1, pubKey2 [33]byte
90✔
3843
        copy(pubKey1[:], v[:33])
90✔
3844
        copy(pubKey2[:], v[33:])
90✔
3845

90✔
3846
        return true, pubKey1, pubKey2
90✔
3847
}
3848

3849
// NumZombies returns the current number of zombie channels in the graph.
3850
func (c *KVStore) NumZombies() (uint64, error) {
4✔
3851
        var numZombies uint64
4✔
3852
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
8✔
3853
                edges := tx.ReadBucket(edgeBucket)
4✔
3854
                if edges == nil {
4✔
3855
                        return nil
×
3856
                }
×
3857
                zombieIndex := edges.NestedReadBucket(zombieBucket)
4✔
3858
                if zombieIndex == nil {
4✔
3859
                        return nil
×
3860
                }
×
3861

3862
                return zombieIndex.ForEach(func(_, _ []byte) error {
6✔
3863
                        numZombies++
2✔
3864
                        return nil
2✔
3865
                })
2✔
3866
        }, func() {
4✔
3867
                numZombies = 0
4✔
3868
        })
4✔
3869
        if err != nil {
4✔
3870
                return 0, err
×
3871
        }
×
3872

3873
        return numZombies, nil
4✔
3874
}
3875

3876
// PutClosedScid stores a SCID for a closed channel in the database. This is so
3877
// that we can ignore channel announcements that we know to be closed without
3878
// having to validate them and fetch a block.
3879
func (c *KVStore) PutClosedScid(scid lnwire.ShortChannelID) error {
1✔
3880
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
2✔
3881
                closedScids, err := tx.CreateTopLevelBucket(closedScidBucket)
1✔
3882
                if err != nil {
1✔
3883
                        return err
×
3884
                }
×
3885

3886
                var k [8]byte
1✔
3887
                byteOrder.PutUint64(k[:], scid.ToUint64())
1✔
3888

1✔
3889
                return closedScids.Put(k[:], []byte{})
1✔
3890
        }, func() {})
1✔
3891
}
3892

3893
// IsClosedScid checks whether a channel identified by the passed in scid is
3894
// closed. This helps avoid having to perform expensive validation checks.
3895
// TODO: Add an LRU cache to cut down on disc reads.
3896
func (c *KVStore) IsClosedScid(scid lnwire.ShortChannelID) (bool, error) {
5✔
3897
        var isClosed bool
5✔
3898
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
10✔
3899
                closedScids := tx.ReadBucket(closedScidBucket)
5✔
3900
                if closedScids == nil {
5✔
3901
                        return ErrClosedScidsNotFound
×
3902
                }
×
3903

3904
                var k [8]byte
5✔
3905
                byteOrder.PutUint64(k[:], scid.ToUint64())
5✔
3906

5✔
3907
                if closedScids.Get(k[:]) != nil {
6✔
3908
                        isClosed = true
1✔
3909
                        return nil
1✔
3910
                }
1✔
3911

3912
                return nil
4✔
3913
        }, func() {
5✔
3914
                isClosed = false
5✔
3915
        })
5✔
3916
        if err != nil {
5✔
3917
                return false, err
×
3918
        }
×
3919

3920
        return isClosed, nil
5✔
3921
}
3922

3923
// GraphSession will provide the call-back with access to a NodeTraverser
3924
// instance which can be used to perform queries against the channel graph.
3925
func (c *KVStore) GraphSession(cb func(graph NodeTraverser) error) error {
54✔
3926
        return c.db.View(func(tx walletdb.ReadTx) error {
108✔
3927
                return cb(&nodeTraverserSession{
54✔
3928
                        db: c,
54✔
3929
                        tx: tx,
54✔
3930
                })
54✔
3931
        }, func() {})
108✔
3932
}
3933

3934
// nodeTraverserSession implements the NodeTraverser interface but with a
3935
// backing read only transaction for a consistent view of the graph.
3936
type nodeTraverserSession struct {
3937
        tx kvdb.RTx
3938
        db *KVStore
3939
}
3940

3941
// ForEachNodeDirectedChannel calls the callback for every channel of the given
3942
// node.
3943
//
3944
// NOTE: Part of the NodeTraverser interface.
3945
func (c *nodeTraverserSession) ForEachNodeDirectedChannel(nodePub route.Vertex,
3946
        cb func(channel *DirectedChannel) error) error {
239✔
3947

239✔
3948
        return c.db.forEachNodeDirectedChannel(c.tx, nodePub, cb)
239✔
3949
}
239✔
3950

3951
// FetchNodeFeatures returns the features of the given node. If the node is
3952
// unknown, assume no additional features are supported.
3953
//
3954
// NOTE: Part of the NodeTraverser interface.
3955
func (c *nodeTraverserSession) FetchNodeFeatures(nodePub route.Vertex) (
3956
        *lnwire.FeatureVector, error) {
254✔
3957

254✔
3958
        return c.db.fetchNodeFeatures(c.tx, nodePub)
254✔
3959
}
254✔
3960

3961
func putLightningNode(nodeBucket, aliasBucket, updateIndex kvdb.RwBucket,
3962
        node *models.LightningNode) error {
1,001✔
3963

1,001✔
3964
        var (
1,001✔
3965
                scratch [16]byte
1,001✔
3966
                b       bytes.Buffer
1,001✔
3967
        )
1,001✔
3968

1,001✔
3969
        pub, err := node.PubKey()
1,001✔
3970
        if err != nil {
1,001✔
3971
                return err
×
3972
        }
×
3973
        nodePub := pub.SerializeCompressed()
1,001✔
3974

1,001✔
3975
        // If the node has the update time set, write it, else write 0.
1,001✔
3976
        updateUnix := uint64(0)
1,001✔
3977
        if node.LastUpdate.Unix() > 0 {
1,867✔
3978
                updateUnix = uint64(node.LastUpdate.Unix())
866✔
3979
        }
866✔
3980

3981
        byteOrder.PutUint64(scratch[:8], updateUnix)
1,001✔
3982
        if _, err := b.Write(scratch[:8]); err != nil {
1,001✔
3983
                return err
×
3984
        }
×
3985

3986
        if _, err := b.Write(nodePub); err != nil {
1,001✔
3987
                return err
×
3988
        }
×
3989

3990
        // If we got a node announcement for this node, we will have the rest
3991
        // of the data available. If not we don't have more data to write.
3992
        if !node.HaveNodeAnnouncement {
1,086✔
3993
                // Write HaveNodeAnnouncement=0.
85✔
3994
                byteOrder.PutUint16(scratch[:2], 0)
85✔
3995
                if _, err := b.Write(scratch[:2]); err != nil {
85✔
3996
                        return err
×
3997
                }
×
3998

3999
                return nodeBucket.Put(nodePub, b.Bytes())
85✔
4000
        }
4001

4002
        // Write HaveNodeAnnouncement=1.
4003
        byteOrder.PutUint16(scratch[:2], 1)
919✔
4004
        if _, err := b.Write(scratch[:2]); err != nil {
919✔
4005
                return err
×
4006
        }
×
4007

4008
        if err := binary.Write(&b, byteOrder, node.Color.R); err != nil {
919✔
4009
                return err
×
4010
        }
×
4011
        if err := binary.Write(&b, byteOrder, node.Color.G); err != nil {
919✔
4012
                return err
×
4013
        }
×
4014
        if err := binary.Write(&b, byteOrder, node.Color.B); err != nil {
919✔
4015
                return err
×
4016
        }
×
4017

4018
        if err := wire.WriteVarString(&b, 0, node.Alias); err != nil {
919✔
4019
                return err
×
4020
        }
×
4021

4022
        if err := node.Features.Encode(&b); err != nil {
919✔
4023
                return err
×
4024
        }
×
4025

4026
        numAddresses := uint16(len(node.Addresses))
919✔
4027
        byteOrder.PutUint16(scratch[:2], numAddresses)
919✔
4028
        if _, err := b.Write(scratch[:2]); err != nil {
919✔
4029
                return err
×
4030
        }
×
4031

4032
        for _, address := range node.Addresses {
2,075✔
4033
                if err := SerializeAddr(&b, address); err != nil {
1,156✔
4034
                        return err
×
4035
                }
×
4036
        }
4037

4038
        sigLen := len(node.AuthSigBytes)
919✔
4039
        if sigLen > 80 {
919✔
4040
                return fmt.Errorf("max sig len allowed is 80, had %v",
×
4041
                        sigLen)
×
4042
        }
×
4043

4044
        err = wire.WriteVarBytes(&b, 0, node.AuthSigBytes)
919✔
4045
        if err != nil {
919✔
4046
                return err
×
4047
        }
×
4048

4049
        if len(node.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
919✔
4050
                return ErrTooManyExtraOpaqueBytes(len(node.ExtraOpaqueData))
×
4051
        }
×
4052
        err = wire.WriteVarBytes(&b, 0, node.ExtraOpaqueData)
919✔
4053
        if err != nil {
919✔
4054
                return err
×
4055
        }
×
4056

4057
        if err := aliasBucket.Put(nodePub, []byte(node.Alias)); err != nil {
919✔
4058
                return err
×
4059
        }
×
4060

4061
        // With the alias bucket updated, we'll now update the index that
4062
        // tracks the time series of node updates.
4063
        var indexKey [8 + 33]byte
919✔
4064
        byteOrder.PutUint64(indexKey[:8], updateUnix)
919✔
4065
        copy(indexKey[8:], nodePub)
919✔
4066

919✔
4067
        // If there was already an old index entry for this node, then we'll
919✔
4068
        // delete the old one before we write the new entry.
919✔
4069
        if nodeBytes := nodeBucket.Get(nodePub); nodeBytes != nil {
1,029✔
4070
                // Extract out the old update time to we can reconstruct the
110✔
4071
                // prior index key to delete it from the index.
110✔
4072
                oldUpdateTime := nodeBytes[:8]
110✔
4073

110✔
4074
                var oldIndexKey [8 + 33]byte
110✔
4075
                copy(oldIndexKey[:8], oldUpdateTime)
110✔
4076
                copy(oldIndexKey[8:], nodePub)
110✔
4077

110✔
4078
                if err := updateIndex.Delete(oldIndexKey[:]); err != nil {
110✔
4079
                        return err
×
4080
                }
×
4081
        }
4082

4083
        if err := updateIndex.Put(indexKey[:], nil); err != nil {
919✔
4084
                return err
×
4085
        }
×
4086

4087
        return nodeBucket.Put(nodePub, b.Bytes())
919✔
4088
}
4089

4090
func fetchLightningNode(nodeBucket kvdb.RBucket,
4091
        nodePub []byte) (models.LightningNode, error) {
3,626✔
4092

3,626✔
4093
        nodeBytes := nodeBucket.Get(nodePub)
3,626✔
4094
        if nodeBytes == nil {
3,711✔
4095
                return models.LightningNode{}, ErrGraphNodeNotFound
85✔
4096
        }
85✔
4097

4098
        nodeReader := bytes.NewReader(nodeBytes)
3,544✔
4099

3,544✔
4100
        return deserializeLightningNode(nodeReader)
3,544✔
4101
}
4102

4103
func deserializeLightningNodeCacheable(r io.Reader) (route.Vertex,
4104
        *lnwire.FeatureVector, error) {
123✔
4105

123✔
4106
        var (
123✔
4107
                pubKey      route.Vertex
123✔
4108
                features    = lnwire.EmptyFeatureVector()
123✔
4109
                nodeScratch [8]byte
123✔
4110
        )
123✔
4111

123✔
4112
        // Skip ahead:
123✔
4113
        // - LastUpdate (8 bytes)
123✔
4114
        if _, err := r.Read(nodeScratch[:]); err != nil {
123✔
4115
                return pubKey, nil, err
×
4116
        }
×
4117

4118
        if _, err := io.ReadFull(r, pubKey[:]); err != nil {
123✔
4119
                return pubKey, nil, err
×
4120
        }
×
4121

4122
        // Read the node announcement flag.
4123
        if _, err := r.Read(nodeScratch[:2]); err != nil {
123✔
4124
                return pubKey, nil, err
×
4125
        }
×
4126
        hasNodeAnn := byteOrder.Uint16(nodeScratch[:2])
123✔
4127

123✔
4128
        // The rest of the data is optional, and will only be there if we got a
123✔
4129
        // node announcement for this node.
123✔
4130
        if hasNodeAnn == 0 {
126✔
4131
                return pubKey, features, nil
3✔
4132
        }
3✔
4133

4134
        // We did get a node announcement for this node, so we'll have the rest
4135
        // of the data available.
4136
        var rgb uint8
123✔
4137
        if err := binary.Read(r, byteOrder, &rgb); err != nil {
123✔
4138
                return pubKey, nil, err
×
4139
        }
×
4140
        if err := binary.Read(r, byteOrder, &rgb); err != nil {
123✔
4141
                return pubKey, nil, err
×
4142
        }
×
4143
        if err := binary.Read(r, byteOrder, &rgb); err != nil {
123✔
4144
                return pubKey, nil, err
×
4145
        }
×
4146

4147
        if _, err := wire.ReadVarString(r, 0); err != nil {
123✔
4148
                return pubKey, nil, err
×
4149
        }
×
4150

4151
        if err := features.Decode(r); err != nil {
123✔
4152
                return pubKey, nil, err
×
4153
        }
×
4154

4155
        return pubKey, features, nil
123✔
4156
}
4157

4158
func deserializeLightningNode(r io.Reader) (models.LightningNode, error) {
8,531✔
4159
        var (
8,531✔
4160
                node    models.LightningNode
8,531✔
4161
                scratch [8]byte
8,531✔
4162
                err     error
8,531✔
4163
        )
8,531✔
4164

8,531✔
4165
        // Always populate a feature vector, even if we don't have a node
8,531✔
4166
        // announcement and short circuit below.
8,531✔
4167
        node.Features = lnwire.EmptyFeatureVector()
8,531✔
4168

8,531✔
4169
        if _, err := r.Read(scratch[:]); err != nil {
8,531✔
4170
                return models.LightningNode{}, err
×
4171
        }
×
4172

4173
        unix := int64(byteOrder.Uint64(scratch[:]))
8,531✔
4174
        node.LastUpdate = time.Unix(unix, 0)
8,531✔
4175

8,531✔
4176
        if _, err := io.ReadFull(r, node.PubKeyBytes[:]); err != nil {
8,531✔
4177
                return models.LightningNode{}, err
×
4178
        }
×
4179

4180
        if _, err := r.Read(scratch[:2]); err != nil {
8,531✔
4181
                return models.LightningNode{}, err
×
4182
        }
×
4183

4184
        hasNodeAnn := byteOrder.Uint16(scratch[:2])
8,531✔
4185
        if hasNodeAnn == 1 {
16,919✔
4186
                node.HaveNodeAnnouncement = true
8,388✔
4187
        } else {
8,534✔
4188
                node.HaveNodeAnnouncement = false
146✔
4189
        }
146✔
4190

4191
        // The rest of the data is optional, and will only be there if we got a
4192
        // node announcement for this node.
4193
        if !node.HaveNodeAnnouncement {
8,677✔
4194
                return node, nil
146✔
4195
        }
146✔
4196

4197
        // We did get a node announcement for this node, so we'll have the rest
4198
        // of the data available.
4199
        if err := binary.Read(r, byteOrder, &node.Color.R); err != nil {
8,388✔
4200
                return models.LightningNode{}, err
×
4201
        }
×
4202
        if err := binary.Read(r, byteOrder, &node.Color.G); err != nil {
8,388✔
4203
                return models.LightningNode{}, err
×
4204
        }
×
4205
        if err := binary.Read(r, byteOrder, &node.Color.B); err != nil {
8,388✔
4206
                return models.LightningNode{}, err
×
4207
        }
×
4208

4209
        node.Alias, err = wire.ReadVarString(r, 0)
8,388✔
4210
        if err != nil {
8,388✔
4211
                return models.LightningNode{}, err
×
4212
        }
×
4213

4214
        err = node.Features.Decode(r)
8,388✔
4215
        if err != nil {
8,388✔
4216
                return models.LightningNode{}, err
×
4217
        }
×
4218

4219
        if _, err := r.Read(scratch[:2]); err != nil {
8,388✔
4220
                return models.LightningNode{}, err
×
4221
        }
×
4222
        numAddresses := int(byteOrder.Uint16(scratch[:2]))
8,388✔
4223

8,388✔
4224
        var addresses []net.Addr
8,388✔
4225
        for i := 0; i < numAddresses; i++ {
19,036✔
4226
                address, err := DeserializeAddr(r)
10,648✔
4227
                if err != nil {
10,648✔
4228
                        return models.LightningNode{}, err
×
4229
                }
×
4230
                addresses = append(addresses, address)
10,648✔
4231
        }
4232
        node.Addresses = addresses
8,388✔
4233

8,388✔
4234
        node.AuthSigBytes, err = wire.ReadVarBytes(r, 0, 80, "sig")
8,388✔
4235
        if err != nil {
8,388✔
4236
                return models.LightningNode{}, err
×
4237
        }
×
4238

4239
        // We'll try and see if there are any opaque bytes left, if not, then
4240
        // we'll ignore the EOF error and return the node as is.
4241
        extraBytes, err := wire.ReadVarBytes(
8,388✔
4242
                r, 0, MaxAllowedExtraOpaqueBytes, "blob",
8,388✔
4243
        )
8,388✔
4244
        switch {
8,388✔
4245
        case errors.Is(err, io.ErrUnexpectedEOF):
×
4246
        case errors.Is(err, io.EOF):
×
4247
        case err != nil:
×
4248
                return models.LightningNode{}, err
×
4249
        }
4250

4251
        if len(extraBytes) > 0 {
8,398✔
4252
                node.ExtraOpaqueData = extraBytes
10✔
4253
        }
10✔
4254

4255
        return node, nil
8,388✔
4256
}
4257

4258
func putChanEdgeInfo(edgeIndex kvdb.RwBucket,
4259
        edgeInfo *models.ChannelEdgeInfo, chanID [8]byte) error {
1,488✔
4260

1,488✔
4261
        var b bytes.Buffer
1,488✔
4262

1,488✔
4263
        if _, err := b.Write(edgeInfo.NodeKey1Bytes[:]); err != nil {
1,488✔
4264
                return err
×
4265
        }
×
4266
        if _, err := b.Write(edgeInfo.NodeKey2Bytes[:]); err != nil {
1,488✔
4267
                return err
×
4268
        }
×
4269
        if _, err := b.Write(edgeInfo.BitcoinKey1Bytes[:]); err != nil {
1,488✔
4270
                return err
×
4271
        }
×
4272
        if _, err := b.Write(edgeInfo.BitcoinKey2Bytes[:]); err != nil {
1,488✔
4273
                return err
×
4274
        }
×
4275

4276
        if err := wire.WriteVarBytes(&b, 0, edgeInfo.Features); err != nil {
1,488✔
4277
                return err
×
4278
        }
×
4279

4280
        authProof := edgeInfo.AuthProof
1,488✔
4281
        var nodeSig1, nodeSig2, bitcoinSig1, bitcoinSig2 []byte
1,488✔
4282
        if authProof != nil {
2,892✔
4283
                nodeSig1 = authProof.NodeSig1Bytes
1,404✔
4284
                nodeSig2 = authProof.NodeSig2Bytes
1,404✔
4285
                bitcoinSig1 = authProof.BitcoinSig1Bytes
1,404✔
4286
                bitcoinSig2 = authProof.BitcoinSig2Bytes
1,404✔
4287
        }
1,404✔
4288

4289
        if err := wire.WriteVarBytes(&b, 0, nodeSig1); err != nil {
1,488✔
4290
                return err
×
4291
        }
×
4292
        if err := wire.WriteVarBytes(&b, 0, nodeSig2); err != nil {
1,488✔
4293
                return err
×
4294
        }
×
4295
        if err := wire.WriteVarBytes(&b, 0, bitcoinSig1); err != nil {
1,488✔
4296
                return err
×
4297
        }
×
4298
        if err := wire.WriteVarBytes(&b, 0, bitcoinSig2); err != nil {
1,488✔
4299
                return err
×
4300
        }
×
4301

4302
        if err := WriteOutpoint(&b, &edgeInfo.ChannelPoint); err != nil {
1,488✔
4303
                return err
×
4304
        }
×
4305
        err := binary.Write(&b, byteOrder, uint64(edgeInfo.Capacity))
1,488✔
4306
        if err != nil {
1,488✔
4307
                return err
×
4308
        }
×
4309
        if _, err := b.Write(chanID[:]); err != nil {
1,488✔
4310
                return err
×
4311
        }
×
4312
        if _, err := b.Write(edgeInfo.ChainHash[:]); err != nil {
1,488✔
4313
                return err
×
4314
        }
×
4315

4316
        if len(edgeInfo.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
1,488✔
4317
                return ErrTooManyExtraOpaqueBytes(len(edgeInfo.ExtraOpaqueData))
×
4318
        }
×
4319
        err = wire.WriteVarBytes(&b, 0, edgeInfo.ExtraOpaqueData)
1,488✔
4320
        if err != nil {
1,488✔
4321
                return err
×
4322
        }
×
4323

4324
        return edgeIndex.Put(chanID[:], b.Bytes())
1,488✔
4325
}
4326

4327
func fetchChanEdgeInfo(edgeIndex kvdb.RBucket,
4328
        chanID []byte) (models.ChannelEdgeInfo, error) {
6,800✔
4329

6,800✔
4330
        edgeInfoBytes := edgeIndex.Get(chanID)
6,800✔
4331
        if edgeInfoBytes == nil {
6,869✔
4332
                return models.ChannelEdgeInfo{}, ErrEdgeNotFound
69✔
4333
        }
69✔
4334

4335
        edgeInfoReader := bytes.NewReader(edgeInfoBytes)
6,734✔
4336

6,734✔
4337
        return deserializeChanEdgeInfo(edgeInfoReader)
6,734✔
4338
}
4339

4340
func deserializeChanEdgeInfo(r io.Reader) (models.ChannelEdgeInfo, error) {
7,273✔
4341
        var (
7,273✔
4342
                err      error
7,273✔
4343
                edgeInfo models.ChannelEdgeInfo
7,273✔
4344
        )
7,273✔
4345

7,273✔
4346
        if _, err := io.ReadFull(r, edgeInfo.NodeKey1Bytes[:]); err != nil {
7,273✔
4347
                return models.ChannelEdgeInfo{}, err
×
4348
        }
×
4349
        if _, err := io.ReadFull(r, edgeInfo.NodeKey2Bytes[:]); err != nil {
7,273✔
4350
                return models.ChannelEdgeInfo{}, err
×
4351
        }
×
4352
        if _, err := io.ReadFull(r, edgeInfo.BitcoinKey1Bytes[:]); err != nil {
7,273✔
4353
                return models.ChannelEdgeInfo{}, err
×
4354
        }
×
4355
        if _, err := io.ReadFull(r, edgeInfo.BitcoinKey2Bytes[:]); err != nil {
7,273✔
4356
                return models.ChannelEdgeInfo{}, err
×
4357
        }
×
4358

4359
        edgeInfo.Features, err = wire.ReadVarBytes(r, 0, 900, "features")
7,273✔
4360
        if err != nil {
7,273✔
4361
                return models.ChannelEdgeInfo{}, err
×
4362
        }
×
4363

4364
        proof := &models.ChannelAuthProof{}
7,273✔
4365

7,273✔
4366
        proof.NodeSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
7,273✔
4367
        if err != nil {
7,273✔
4368
                return models.ChannelEdgeInfo{}, err
×
4369
        }
×
4370
        proof.NodeSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
7,273✔
4371
        if err != nil {
7,273✔
4372
                return models.ChannelEdgeInfo{}, err
×
4373
        }
×
4374
        proof.BitcoinSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
7,273✔
4375
        if err != nil {
7,273✔
4376
                return models.ChannelEdgeInfo{}, err
×
4377
        }
×
4378
        proof.BitcoinSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
7,273✔
4379
        if err != nil {
7,273✔
4380
                return models.ChannelEdgeInfo{}, err
×
4381
        }
×
4382

4383
        if !proof.IsEmpty() {
11,443✔
4384
                edgeInfo.AuthProof = proof
4,170✔
4385
        }
4,170✔
4386

4387
        edgeInfo.ChannelPoint = wire.OutPoint{}
7,273✔
4388
        if err := ReadOutpoint(r, &edgeInfo.ChannelPoint); err != nil {
7,273✔
4389
                return models.ChannelEdgeInfo{}, err
×
4390
        }
×
4391
        if err := binary.Read(r, byteOrder, &edgeInfo.Capacity); err != nil {
7,273✔
4392
                return models.ChannelEdgeInfo{}, err
×
4393
        }
×
4394
        if err := binary.Read(r, byteOrder, &edgeInfo.ChannelID); err != nil {
7,273✔
4395
                return models.ChannelEdgeInfo{}, err
×
4396
        }
×
4397

4398
        if _, err := io.ReadFull(r, edgeInfo.ChainHash[:]); err != nil {
7,273✔
4399
                return models.ChannelEdgeInfo{}, err
×
4400
        }
×
4401

4402
        // We'll try and see if there are any opaque bytes left, if not, then
4403
        // we'll ignore the EOF error and return the edge as is.
4404
        edgeInfo.ExtraOpaqueData, err = wire.ReadVarBytes(
7,273✔
4405
                r, 0, MaxAllowedExtraOpaqueBytes, "blob",
7,273✔
4406
        )
7,273✔
4407
        switch {
7,273✔
4408
        case errors.Is(err, io.ErrUnexpectedEOF):
×
4409
        case errors.Is(err, io.EOF):
×
4410
        case err != nil:
×
4411
                return models.ChannelEdgeInfo{}, err
×
4412
        }
4413

4414
        return edgeInfo, nil
7,273✔
4415
}
4416

4417
func putChanEdgePolicy(edges kvdb.RwBucket, edge *models.ChannelEdgePolicy,
4418
        from, to []byte) error {
2,666✔
4419

2,666✔
4420
        var edgeKey [33 + 8]byte
2,666✔
4421
        copy(edgeKey[:], from)
2,666✔
4422
        byteOrder.PutUint64(edgeKey[33:], edge.ChannelID)
2,666✔
4423

2,666✔
4424
        var b bytes.Buffer
2,666✔
4425
        if err := serializeChanEdgePolicy(&b, edge, to); err != nil {
2,668✔
4426
                return err
2✔
4427
        }
2✔
4428

4429
        // Before we write out the new edge, we'll create a new entry in the
4430
        // update index in order to keep it fresh.
4431
        updateUnix := uint64(edge.LastUpdate.Unix())
2,664✔
4432
        var indexKey [8 + 8]byte
2,664✔
4433
        byteOrder.PutUint64(indexKey[:8], updateUnix)
2,664✔
4434
        byteOrder.PutUint64(indexKey[8:], edge.ChannelID)
2,664✔
4435

2,664✔
4436
        updateIndex, err := edges.CreateBucketIfNotExists(edgeUpdateIndexBucket)
2,664✔
4437
        if err != nil {
2,664✔
4438
                return err
×
4439
        }
×
4440

4441
        // If there was already an entry for this edge, then we'll need to
4442
        // delete the old one to ensure we don't leave around any after-images.
4443
        // An unknown policy value does not have a update time recorded, so
4444
        // it also does not need to be removed.
4445
        if edgeBytes := edges.Get(edgeKey[:]); edgeBytes != nil &&
2,664✔
4446
                !bytes.Equal(edgeBytes, unknownPolicy) {
2,690✔
4447

26✔
4448
                // In order to delete the old entry, we'll need to obtain the
26✔
4449
                // *prior* update time in order to delete it. To do this, we'll
26✔
4450
                // need to deserialize the existing policy within the database
26✔
4451
                // (now outdated by the new one), and delete its corresponding
26✔
4452
                // entry within the update index. We'll ignore any
26✔
4453
                // ErrEdgePolicyOptionalFieldNotFound or ErrParsingExtraTLVBytes
26✔
4454
                // errors, as we only need the channel ID and update time to
26✔
4455
                // delete the entry.
26✔
4456
                //
26✔
4457
                // TODO(halseth): get rid of these invalid policies in a
26✔
4458
                // migration.
26✔
4459
                // TODO(elle): complete the above TODO in migration from kvdb
26✔
4460
                // to SQL.
26✔
4461
                oldEdgePolicy, err := deserializeChanEdgePolicy(
26✔
4462
                        bytes.NewReader(edgeBytes),
26✔
4463
                )
26✔
4464
                if err != nil &&
26✔
4465
                        !errors.Is(err, ErrEdgePolicyOptionalFieldNotFound) &&
26✔
4466
                        !errors.Is(err, ErrParsingExtraTLVBytes) {
26✔
4467

×
4468
                        return err
×
4469
                }
×
4470

4471
                oldUpdateTime := uint64(oldEdgePolicy.LastUpdate.Unix())
26✔
4472

26✔
4473
                var oldIndexKey [8 + 8]byte
26✔
4474
                byteOrder.PutUint64(oldIndexKey[:8], oldUpdateTime)
26✔
4475
                byteOrder.PutUint64(oldIndexKey[8:], edge.ChannelID)
26✔
4476

26✔
4477
                if err := updateIndex.Delete(oldIndexKey[:]); err != nil {
26✔
4478
                        return err
×
4479
                }
×
4480
        }
4481

4482
        if err := updateIndex.Put(indexKey[:], nil); err != nil {
2,664✔
4483
                return err
×
4484
        }
×
4485

4486
        err = updateEdgePolicyDisabledIndex(
2,664✔
4487
                edges, edge.ChannelID,
2,664✔
4488
                edge.ChannelFlags&lnwire.ChanUpdateDirection > 0,
2,664✔
4489
                edge.IsDisabled(),
2,664✔
4490
        )
2,664✔
4491
        if err != nil {
2,664✔
4492
                return err
×
4493
        }
×
4494

4495
        return edges.Put(edgeKey[:], b.Bytes())
2,664✔
4496
}
4497

4498
// updateEdgePolicyDisabledIndex is used to update the disabledEdgePolicyIndex
4499
// bucket by either add a new disabled ChannelEdgePolicy or remove an existing
4500
// one.
4501
// The direction represents the direction of the edge and disabled is used for
4502
// deciding whether to remove or add an entry to the bucket.
4503
// In general a channel is disabled if two entries for the same chanID exist
4504
// in this bucket.
4505
// Maintaining the bucket this way allows a fast retrieval of disabled
4506
// channels, for example when prune is needed.
4507
func updateEdgePolicyDisabledIndex(edges kvdb.RwBucket, chanID uint64,
4508
        direction bool, disabled bool) error {
2,936✔
4509

2,936✔
4510
        var disabledEdgeKey [8 + 1]byte
2,936✔
4511
        byteOrder.PutUint64(disabledEdgeKey[0:], chanID)
2,936✔
4512
        if direction {
4,403✔
4513
                disabledEdgeKey[8] = 1
1,467✔
4514
        }
1,467✔
4515

4516
        disabledEdgePolicyIndex, err := edges.CreateBucketIfNotExists(
2,936✔
4517
                disabledEdgePolicyBucket,
2,936✔
4518
        )
2,936✔
4519
        if err != nil {
2,936✔
4520
                return err
×
4521
        }
×
4522

4523
        if disabled {
2,965✔
4524
                return disabledEdgePolicyIndex.Put(disabledEdgeKey[:], []byte{})
29✔
4525
        }
29✔
4526

4527
        return disabledEdgePolicyIndex.Delete(disabledEdgeKey[:])
2,910✔
4528
}
4529

4530
// putChanEdgePolicyUnknown marks the edge policy as unknown
4531
// in the edges bucket.
4532
func putChanEdgePolicyUnknown(edges kvdb.RwBucket, channelID uint64,
4533
        from []byte) error {
2,969✔
4534

2,969✔
4535
        var edgeKey [33 + 8]byte
2,969✔
4536
        copy(edgeKey[:], from)
2,969✔
4537
        byteOrder.PutUint64(edgeKey[33:], channelID)
2,969✔
4538

2,969✔
4539
        if edges.Get(edgeKey[:]) != nil {
2,969✔
4540
                return fmt.Errorf("cannot write unknown policy for channel %v "+
×
4541
                        " when there is already a policy present", channelID)
×
4542
        }
×
4543

4544
        return edges.Put(edgeKey[:], unknownPolicy)
2,969✔
4545
}
4546

4547
func fetchChanEdgePolicy(edges kvdb.RBucket, chanID []byte,
4548
        nodePub []byte) (*models.ChannelEdgePolicy, error) {
13,485✔
4549

13,485✔
4550
        var edgeKey [33 + 8]byte
13,485✔
4551
        copy(edgeKey[:], nodePub)
13,485✔
4552
        copy(edgeKey[33:], chanID)
13,485✔
4553

13,485✔
4554
        edgeBytes := edges.Get(edgeKey[:])
13,485✔
4555
        if edgeBytes == nil {
13,485✔
4556
                return nil, ErrEdgeNotFound
×
4557
        }
×
4558

4559
        // No need to deserialize unknown policy.
4560
        if bytes.Equal(edgeBytes, unknownPolicy) {
14,962✔
4561
                return nil, nil
1,477✔
4562
        }
1,477✔
4563

4564
        edgeReader := bytes.NewReader(edgeBytes)
12,011✔
4565

12,011✔
4566
        ep, err := deserializeChanEdgePolicy(edgeReader)
12,011✔
4567
        switch {
12,011✔
4568
        // If the db policy was missing an expected optional field, we return
4569
        // nil as if the policy was unknown.
4570
        case errors.Is(err, ErrEdgePolicyOptionalFieldNotFound):
2✔
4571
                return nil, nil
2✔
4572

4573
        // If the policy contains invalid TLV bytes, we return nil as if
4574
        // the policy was unknown.
4575
        case errors.Is(err, ErrParsingExtraTLVBytes):
×
4576
                return nil, nil
×
4577

4578
        case err != nil:
×
4579
                return nil, err
×
4580
        }
4581

4582
        return ep, nil
12,009✔
4583
}
4584

4585
func fetchChanEdgePolicies(edgeIndex kvdb.RBucket, edges kvdb.RBucket,
4586
        chanID []byte) (*models.ChannelEdgePolicy, *models.ChannelEdgePolicy,
4587
        error) {
2,902✔
4588

2,902✔
4589
        edgeInfo := edgeIndex.Get(chanID)
2,902✔
4590
        if edgeInfo == nil {
2,902✔
4591
                return nil, nil, fmt.Errorf("%w: chanID=%x", ErrEdgeNotFound,
×
4592
                        chanID)
×
4593
        }
×
4594

4595
        // The first node is contained within the first half of the edge
4596
        // information. We only propagate the error here and below if it's
4597
        // something other than edge non-existence.
4598
        node1Pub := edgeInfo[:33]
2,902✔
4599
        edge1, err := fetchChanEdgePolicy(edges, chanID, node1Pub)
2,902✔
4600
        if err != nil {
2,902✔
4601
                return nil, nil, fmt.Errorf("%w: node1Pub=%x", ErrEdgeNotFound,
×
4602
                        node1Pub)
×
4603
        }
×
4604

4605
        // Similarly, the second node is contained within the latter
4606
        // half of the edge information.
4607
        node2Pub := edgeInfo[33:66]
2,902✔
4608
        edge2, err := fetchChanEdgePolicy(edges, chanID, node2Pub)
2,902✔
4609
        if err != nil {
2,902✔
4610
                return nil, nil, fmt.Errorf("%w: node2Pub=%x", ErrEdgeNotFound,
×
4611
                        node2Pub)
×
4612
        }
×
4613

4614
        return edge1, edge2, nil
2,902✔
4615
}
4616

4617
func serializeChanEdgePolicy(w io.Writer, edge *models.ChannelEdgePolicy,
4618
        to []byte) error {
2,668✔
4619

2,668✔
4620
        err := wire.WriteVarBytes(w, 0, edge.SigBytes)
2,668✔
4621
        if err != nil {
2,668✔
4622
                return err
×
4623
        }
×
4624

4625
        if err := binary.Write(w, byteOrder, edge.ChannelID); err != nil {
2,668✔
4626
                return err
×
4627
        }
×
4628

4629
        var scratch [8]byte
2,668✔
4630
        updateUnix := uint64(edge.LastUpdate.Unix())
2,668✔
4631
        byteOrder.PutUint64(scratch[:], updateUnix)
2,668✔
4632
        if _, err := w.Write(scratch[:]); err != nil {
2,668✔
4633
                return err
×
4634
        }
×
4635

4636
        if err := binary.Write(w, byteOrder, edge.MessageFlags); err != nil {
2,668✔
4637
                return err
×
4638
        }
×
4639
        if err := binary.Write(w, byteOrder, edge.ChannelFlags); err != nil {
2,668✔
4640
                return err
×
4641
        }
×
4642
        if err := binary.Write(w, byteOrder, edge.TimeLockDelta); err != nil {
2,668✔
4643
                return err
×
4644
        }
×
4645
        if err := binary.Write(w, byteOrder, uint64(edge.MinHTLC)); err != nil {
2,668✔
4646
                return err
×
4647
        }
×
4648
        err = binary.Write(w, byteOrder, uint64(edge.FeeBaseMSat))
2,668✔
4649
        if err != nil {
2,668✔
4650
                return err
×
4651
        }
×
4652
        err = binary.Write(
2,668✔
4653
                w, byteOrder, uint64(edge.FeeProportionalMillionths),
2,668✔
4654
        )
2,668✔
4655
        if err != nil {
2,668✔
4656
                return err
×
4657
        }
×
4658

4659
        if _, err := w.Write(to); err != nil {
2,668✔
4660
                return err
×
4661
        }
×
4662

4663
        // If the max_htlc field is present, we write it. To be compatible with
4664
        // older versions that wasn't aware of this field, we write it as part
4665
        // of the opaque data.
4666
        // TODO(halseth): clean up when moving to TLV.
4667
        var opaqueBuf bytes.Buffer
2,668✔
4668
        if edge.MessageFlags.HasMaxHtlc() {
4,952✔
4669
                err := binary.Write(&opaqueBuf, byteOrder, uint64(edge.MaxHTLC))
2,284✔
4670
                if err != nil {
2,284✔
4671
                        return err
×
4672
                }
×
4673
        }
4674

4675
        // Validate that the ExtraOpaqueData is in fact a valid TLV stream.
4676
        err = edge.ExtraOpaqueData.ValidateTLV()
2,668✔
4677
        if err != nil {
2,670✔
4678
                return fmt.Errorf("%w: %w", ErrParsingExtraTLVBytes, err)
2✔
4679
        }
2✔
4680

4681
        if len(edge.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
2,666✔
4682
                return ErrTooManyExtraOpaqueBytes(len(edge.ExtraOpaqueData))
×
4683
        }
×
4684
        if _, err := opaqueBuf.Write(edge.ExtraOpaqueData); err != nil {
2,666✔
4685
                return err
×
4686
        }
×
4687

4688
        if err := wire.WriteVarBytes(w, 0, opaqueBuf.Bytes()); err != nil {
2,666✔
4689
                return err
×
4690
        }
×
4691

4692
        return nil
2,666✔
4693
}
4694

4695
func deserializeChanEdgePolicy(r io.Reader) (*models.ChannelEdgePolicy, error) {
12,035✔
4696
        // Deserialize the policy. Note that in case an optional field is not
12,035✔
4697
        // found or if the edge has invalid TLV data, then both an error and a
12,035✔
4698
        // populated policy object are returned so that the caller can decide
12,035✔
4699
        // if it still wants to use the edge or not.
12,035✔
4700
        edge, err := deserializeChanEdgePolicyRaw(r)
12,035✔
4701
        if err != nil &&
12,035✔
4702
                !errors.Is(err, ErrEdgePolicyOptionalFieldNotFound) &&
12,035✔
4703
                !errors.Is(err, ErrParsingExtraTLVBytes) {
12,035✔
4704

×
4705
                return nil, err
×
4706
        }
×
4707

4708
        return edge, err
12,035✔
4709
}
4710

4711
func deserializeChanEdgePolicyRaw(r io.Reader) (*models.ChannelEdgePolicy,
4712
        error) {
13,042✔
4713

13,042✔
4714
        edge := &models.ChannelEdgePolicy{}
13,042✔
4715

13,042✔
4716
        var err error
13,042✔
4717
        edge.SigBytes, err = wire.ReadVarBytes(r, 0, 80, "sig")
13,042✔
4718
        if err != nil {
13,042✔
4719
                return nil, err
×
4720
        }
×
4721

4722
        if err := binary.Read(r, byteOrder, &edge.ChannelID); err != nil {
13,042✔
4723
                return nil, err
×
4724
        }
×
4725

4726
        var scratch [8]byte
13,042✔
4727
        if _, err := r.Read(scratch[:]); err != nil {
13,042✔
4728
                return nil, err
×
4729
        }
×
4730
        unix := int64(byteOrder.Uint64(scratch[:]))
13,042✔
4731
        edge.LastUpdate = time.Unix(unix, 0)
13,042✔
4732

13,042✔
4733
        if err := binary.Read(r, byteOrder, &edge.MessageFlags); err != nil {
13,042✔
4734
                return nil, err
×
4735
        }
×
4736
        if err := binary.Read(r, byteOrder, &edge.ChannelFlags); err != nil {
13,042✔
4737
                return nil, err
×
4738
        }
×
4739
        if err := binary.Read(r, byteOrder, &edge.TimeLockDelta); err != nil {
13,042✔
4740
                return nil, err
×
4741
        }
×
4742

4743
        var n uint64
13,042✔
4744
        if err := binary.Read(r, byteOrder, &n); err != nil {
13,042✔
4745
                return nil, err
×
4746
        }
×
4747
        edge.MinHTLC = lnwire.MilliSatoshi(n)
13,042✔
4748

13,042✔
4749
        if err := binary.Read(r, byteOrder, &n); err != nil {
13,042✔
4750
                return nil, err
×
4751
        }
×
4752
        edge.FeeBaseMSat = lnwire.MilliSatoshi(n)
13,042✔
4753

13,042✔
4754
        if err := binary.Read(r, byteOrder, &n); err != nil {
13,042✔
4755
                return nil, err
×
4756
        }
×
4757
        edge.FeeProportionalMillionths = lnwire.MilliSatoshi(n)
13,042✔
4758

13,042✔
4759
        if _, err := r.Read(edge.ToNode[:]); err != nil {
13,042✔
4760
                return nil, err
×
4761
        }
×
4762

4763
        // We'll try and see if there are any opaque bytes left, if not, then
4764
        // we'll ignore the EOF error and return the edge as is.
4765
        edge.ExtraOpaqueData, err = wire.ReadVarBytes(
13,042✔
4766
                r, 0, MaxAllowedExtraOpaqueBytes, "blob",
13,042✔
4767
        )
13,042✔
4768
        switch {
13,042✔
4769
        case errors.Is(err, io.ErrUnexpectedEOF):
×
4770
        case errors.Is(err, io.EOF):
4✔
4771
        case err != nil:
×
4772
                return nil, err
×
4773
        }
4774

4775
        // See if optional fields are present.
4776
        if edge.MessageFlags.HasMaxHtlc() {
25,140✔
4777
                // The max_htlc field should be at the beginning of the opaque
12,098✔
4778
                // bytes.
12,098✔
4779
                opq := edge.ExtraOpaqueData
12,098✔
4780

12,098✔
4781
                // If the max_htlc field is not present, it might be old data
12,098✔
4782
                // stored before this field was validated. We'll return the
12,098✔
4783
                // edge along with an error.
12,098✔
4784
                if len(opq) < 8 {
12,102✔
4785
                        return edge, ErrEdgePolicyOptionalFieldNotFound
4✔
4786
                }
4✔
4787

4788
                maxHtlc := byteOrder.Uint64(opq[:8])
12,094✔
4789
                edge.MaxHTLC = lnwire.MilliSatoshi(maxHtlc)
12,094✔
4790

12,094✔
4791
                // Exclude the parsed field from the rest of the opaque data.
12,094✔
4792
                edge.ExtraOpaqueData = opq[8:]
12,094✔
4793
        }
4794

4795
        // Attempt to extract the inbound fee from the opaque data. If we fail
4796
        // to parse the TLV here, we return an error we also return the edge
4797
        // so that the caller can still use it. This is for backwards
4798
        // compatibility in case we have already persisted some policies that
4799
        // have invalid TLV data.
4800
        var inboundFee lnwire.Fee
13,038✔
4801
        typeMap, err := edge.ExtraOpaqueData.ExtractRecords(&inboundFee)
13,038✔
4802
        if err != nil {
13,038✔
4803
                return edge, fmt.Errorf("%w: %w", ErrParsingExtraTLVBytes, err)
×
4804
        }
×
4805

4806
        val, ok := typeMap[lnwire.FeeRecordType]
13,038✔
4807
        if ok && val == nil {
14,722✔
4808
                edge.InboundFee = fn.Some(inboundFee)
1,684✔
4809
        }
1,684✔
4810

4811
        return edge, nil
13,038✔
4812
}
4813

4814
// chanGraphNodeTx is an implementation of the NodeRTx interface backed by the
4815
// KVStore and a kvdb.RTx.
4816
type chanGraphNodeTx struct {
4817
        tx   kvdb.RTx
4818
        db   *KVStore
4819
        node *models.LightningNode
4820
}
4821

4822
// A compile-time constraint to ensure chanGraphNodeTx implements the NodeRTx
4823
// interface.
4824
var _ NodeRTx = (*chanGraphNodeTx)(nil)
4825

4826
func newChanGraphNodeTx(tx kvdb.RTx, db *KVStore,
4827
        node *models.LightningNode) *chanGraphNodeTx {
4,105✔
4828

4,105✔
4829
        return &chanGraphNodeTx{
4,105✔
4830
                tx:   tx,
4,105✔
4831
                db:   db,
4,105✔
4832
                node: node,
4,105✔
4833
        }
4,105✔
4834
}
4,105✔
4835

4836
// Node returns the raw information of the node.
4837
//
4838
// NOTE: This is a part of the NodeRTx interface.
4839
func (c *chanGraphNodeTx) Node() *models.LightningNode {
5,022✔
4840
        return c.node
5,022✔
4841
}
5,022✔
4842

4843
// FetchNode fetches the node with the given pub key under the same transaction
4844
// used to fetch the current node. The returned node is also a NodeRTx and any
4845
// operations on that NodeRTx will also be done under the same transaction.
4846
//
4847
// NOTE: This is a part of the NodeRTx interface.
4848
func (c *chanGraphNodeTx) FetchNode(nodePub route.Vertex) (NodeRTx, error) {
2,944✔
4849
        node, err := c.db.FetchLightningNodeTx(c.tx, nodePub)
2,944✔
4850
        if err != nil {
2,944✔
4851
                return nil, err
×
4852
        }
×
4853

4854
        return newChanGraphNodeTx(c.tx, c.db, node), nil
2,944✔
4855
}
4856

4857
// ForEachChannel can be used to iterate over the node's channels under
4858
// the same transaction used to fetch the node.
4859
//
4860
// NOTE: This is a part of the NodeRTx interface.
4861
func (c *chanGraphNodeTx) ForEachChannel(f func(*models.ChannelEdgeInfo,
4862
        *models.ChannelEdgePolicy, *models.ChannelEdgePolicy) error) error {
965✔
4863

965✔
4864
        return c.db.forEachNodeChannelTx(c.tx, c.node.PubKeyBytes,
965✔
4865
                func(_ kvdb.RTx, info *models.ChannelEdgeInfo, policy1,
965✔
4866
                        policy2 *models.ChannelEdgePolicy) error {
3,909✔
4867

2,944✔
4868
                        return f(info, policy1, policy2)
2,944✔
4869
                },
2,944✔
4870
        )
4871
}
4872

4873
// MakeTestGraph creates a new instance of the ChannelGraph for testing
4874
// purposes.
4875
//
4876
// NOTE: this helper currently creates a ChannelGraph that is only ever backed
4877
// by the `KVStore` of the `V1Store` interface.
4878
func MakeTestGraph(t testing.TB, opts ...ChanGraphOption) *ChannelGraph {
135✔
4879
        t.Helper()
135✔
4880

135✔
4881
        // Next, create KVStore for the first time.
135✔
4882
        backend, backendCleanup, err := kvdb.GetTestBackend(t.TempDir(), "cgr")
135✔
4883
        t.Cleanup(backendCleanup)
135✔
4884
        require.NoError(t, err)
135✔
4885
        t.Cleanup(func() {
270✔
4886
                require.NoError(t, backend.Close())
135✔
4887
        })
135✔
4888

4889
        graphStore, err := NewKVStore(backend)
135✔
4890
        require.NoError(t, err)
135✔
4891

135✔
4892
        graph, err := NewChannelGraph(graphStore, opts...)
135✔
4893
        require.NoError(t, err)
135✔
4894
        require.NoError(t, graph.Start())
135✔
4895
        t.Cleanup(func() {
270✔
4896
                require.NoError(t, graph.Stop())
135✔
4897
        })
135✔
4898

4899
        return graph
135✔
4900
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc