• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 19924300449

04 Dec 2025 09:35AM UTC coverage: 53.479% (-1.9%) from 55.404%
19924300449

Pull #10419

github

web-flow
Merge f811805c6 into 20473482d
Pull Request #10419: [docs] Document use-native-sql=true for SQL migration step 2

110496 of 206616 relevant lines covered (53.48%)

21221.61 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

77.54
/graph/db/kv_store.go
1
package graphdb
2

3
import (
4
        "bytes"
5
        "context"
6
        "crypto/sha256"
7
        "encoding/binary"
8
        "errors"
9
        "fmt"
10
        "image/color"
11
        "io"
12
        "iter"
13
        "math"
14
        "net"
15
        "sort"
16
        "sync"
17
        "time"
18

19
        "github.com/btcsuite/btcd/btcec/v2"
20
        "github.com/btcsuite/btcd/chaincfg/chainhash"
21
        "github.com/btcsuite/btcd/txscript"
22
        "github.com/btcsuite/btcd/wire"
23
        "github.com/btcsuite/btcwallet/walletdb"
24
        "github.com/lightningnetwork/lnd/aliasmgr"
25
        "github.com/lightningnetwork/lnd/batch"
26
        "github.com/lightningnetwork/lnd/fn/v2"
27
        "github.com/lightningnetwork/lnd/graph/db/models"
28
        "github.com/lightningnetwork/lnd/input"
29
        "github.com/lightningnetwork/lnd/kvdb"
30
        "github.com/lightningnetwork/lnd/lnwire"
31
        "github.com/lightningnetwork/lnd/routing/route"
32
)
33

34
var (
35
        // nodeBucket is a bucket which houses all the vertices or nodes within
36
        // the channel graph. This bucket has a single-sub bucket which adds an
37
        // additional index from pubkey -> alias. Within the top-level of this
38
        // bucket, the key space maps a node's compressed public key to the
39
        // serialized information for that node. Additionally, there's a
40
        // special key "source" which stores the pubkey of the source node. The
41
        // source node is used as the starting point for all graph/queries and
42
        // traversals. The graph is formed as a star-graph with the source node
43
        // at the center.
44
        //
45
        // maps: pubKey -> nodeInfo
46
        // maps: source -> selfPubKey
47
        nodeBucket = []byte("graph-node")
48

49
        // nodeUpdateIndexBucket is a sub-bucket of the nodeBucket. This bucket
50
        // will be used to quickly look up the "freshness" of a node's last
51
        // update to the network. The bucket only contains keys, and no values,
52
        // it's mapping:
53
        //
54
        // maps: updateTime || nodeID -> nil
55
        nodeUpdateIndexBucket = []byte("graph-node-update-index")
56

57
        // sourceKey is a special key that resides within the nodeBucket. The
58
        // sourceKey maps a key to the public key of the "self node".
59
        sourceKey = []byte("source")
60

61
        // aliasIndexBucket is a sub-bucket that's nested within the main
62
        // nodeBucket. This bucket maps the public key of a node to its
63
        // current alias. This bucket is provided as it can be used within a
64
        // future UI layer to add an additional degree of confirmation.
65
        aliasIndexBucket = []byte("alias")
66

67
        // edgeBucket is a bucket which houses all of the edge or channel
68
        // information within the channel graph. This bucket essentially acts
69
        // as an adjacency list, which in conjunction with a range scan, can be
70
        // used to iterate over all the incoming and outgoing edges for a
71
        // particular node. Key in the bucket use a prefix scheme which leads
72
        // with the node's public key and sends with the compact edge ID.
73
        // For each chanID, there will be two entries within the bucket, as the
74
        // graph is directed: nodes may have different policies w.r.t to fees
75
        // for their respective directions.
76
        //
77
        // maps: pubKey || chanID -> channel edge policy for node
78
        edgeBucket = []byte("graph-edge")
79

80
        // unknownPolicy is represented as an empty slice. It is
81
        // used as the value in edgeBucket for unknown channel edge policies.
82
        // Unknown policies are still stored in the database to enable efficient
83
        // lookup of incoming channel edges.
84
        unknownPolicy = []byte{}
85

86
        // chanStart is an array of all zero bytes which is used to perform
87
        // range scans within the edgeBucket to obtain all of the outgoing
88
        // edges for a particular node.
89
        chanStart [8]byte
90

91
        // edgeIndexBucket is an index which can be used to iterate all edges
92
        // in the bucket, grouping them according to their in/out nodes.
93
        // Additionally, the items in this bucket also contain the complete
94
        // edge information for a channel. The edge information includes the
95
        // capacity of the channel, the nodes that made the channel, etc. This
96
        // bucket resides within the edgeBucket above. Creation of an edge
97
        // proceeds in two phases: first the edge is added to the edge index,
98
        // afterwards the edgeBucket can be updated with the latest details of
99
        // the edge as they are announced on the network.
100
        //
101
        // maps: chanID -> pubKey1 || pubKey2 || restofEdgeInfo
102
        edgeIndexBucket = []byte("edge-index")
103

104
        // edgeUpdateIndexBucket is a sub-bucket of the main edgeBucket. This
105
        // bucket contains an index which allows us to gauge the "freshness" of
106
        // a channel's last updates.
107
        //
108
        // maps: updateTime || chanID -> nil
109
        edgeUpdateIndexBucket = []byte("edge-update-index")
110

111
        // channelPointBucket maps a channel's full outpoint (txid:index) to
112
        // its short 8-byte channel ID. This bucket resides within the
113
        // edgeBucket above, and can be used to quickly remove an edge due to
114
        // the outpoint being spent, or to query for existence of a channel.
115
        //
116
        // maps: outPoint -> chanID
117
        channelPointBucket = []byte("chan-index")
118

119
        // zombieBucket is a sub-bucket of the main edgeBucket bucket
120
        // responsible for maintaining an index of zombie channels. Each entry
121
        // exists within the bucket as follows:
122
        //
123
        // maps: chanID -> pubKey1 || pubKey2
124
        //
125
        // The chanID represents the channel ID of the edge that is marked as a
126
        // zombie and is used as the key, which maps to the public keys of the
127
        // edge's participants.
128
        zombieBucket = []byte("zombie-index")
129

130
        // disabledEdgePolicyBucket is a sub-bucket of the main edgeBucket
131
        // bucket responsible for maintaining an index of disabled edge
132
        // policies. Each entry exists within the bucket as follows:
133
        //
134
        // maps: <chanID><direction> -> []byte{}
135
        //
136
        // The chanID represents the channel ID of the edge and the direction is
137
        // one byte representing the direction of the edge. The main purpose of
138
        // this index is to allow pruning disabled channels in a fast way
139
        // without the need to iterate all over the graph.
140
        disabledEdgePolicyBucket = []byte("disabled-edge-policy-index")
141

142
        // graphMetaBucket is a top-level bucket which stores various meta-deta
143
        // related to the on-disk channel graph. Data stored in this bucket
144
        // includes the block to which the graph has been synced to, the total
145
        // number of channels, etc.
146
        graphMetaBucket = []byte("graph-meta")
147

148
        // pruneLogBucket is a bucket within the graphMetaBucket that stores
149
        // a mapping from the block height to the hash for the blocks used to
150
        // prune the graph.
151
        // Once a new block is discovered, any channels that have been closed
152
        // (by spending the outpoint) can safely be removed from the graph, and
153
        // the block is added to the prune log. We need to keep such a log for
154
        // the case where a reorg happens, and we must "rewind" the state of the
155
        // graph by removing channels that were previously confirmed. In such a
156
        // case we'll remove all entries from the prune log with a block height
157
        // that no longer exists.
158
        pruneLogBucket = []byte("prune-log")
159

160
        // closedScidBucket is a top-level bucket that stores scids for
161
        // channels that we know to be closed. This is used so that we don't
162
        // need to perform expensive validation checks if we receive a channel
163
        // announcement for the channel again.
164
        //
165
        // maps: scid -> []byte{}
166
        closedScidBucket = []byte("closed-scid")
167
)
168

169
const (
170
        // MaxAllowedExtraOpaqueBytes is the largest amount of opaque bytes that
171
        // we'll permit to be written to disk. We limit this as otherwise, it
172
        // would be possible for a node to create a ton of updates and slowly
173
        // fill our disk, and also waste bandwidth due to relaying.
174
        MaxAllowedExtraOpaqueBytes = 10000
175
)
176

177
// KVStore is a persistent, on-disk graph representation of the Lightning
178
// Network. This struct can be used to implement path finding algorithms on top
179
// of, and also to update a node's view based on information received from the
180
// p2p network. Internally, the graph is stored using a modified adjacency list
181
// representation with some added object interaction possible with each
182
// serialized edge/node. The graph is stored is directed, meaning that are two
183
// edges stored for each channel: an inbound/outbound edge for each node pair.
184
// Nodes, edges, and edge information can all be added to the graph
185
// independently. Edge removal results in the deletion of all edge information
186
// for that edge.
187
type KVStore struct {
188
        db kvdb.Backend
189

190
        // cacheMu guards all caches (rejectCache and chanCache). If
191
        // this mutex will be acquired at the same time as the DB mutex then
192
        // the cacheMu MUST be acquired first to prevent deadlock.
193
        cacheMu     sync.RWMutex
194
        rejectCache *rejectCache
195
        chanCache   *channelCache
196

197
        chanScheduler batch.Scheduler[kvdb.RwTx]
198
        nodeScheduler batch.Scheduler[kvdb.RwTx]
199
}
200

201
// A compile-time assertion to ensure that the KVStore struct implements the
202
// V1Store interface.
203
var _ V1Store = (*KVStore)(nil)
204

205
// NewKVStore allocates a new KVStore backed by a DB instance. The
206
// returned instance has its own unique reject cache and channel cache.
207
func NewKVStore(db kvdb.Backend, options ...StoreOptionModifier) (*KVStore,
208
        error) {
182✔
209

182✔
210
        opts := DefaultOptions()
182✔
211
        for _, o := range options {
182✔
212
                o(opts)
×
213
        }
×
214

215
        if !opts.NoMigration {
364✔
216
                if err := initKVStore(db); err != nil {
182✔
217
                        return nil, err
×
218
                }
×
219
        }
220

221
        g := &KVStore{
182✔
222
                db:          db,
182✔
223
                rejectCache: newRejectCache(opts.RejectCacheSize),
182✔
224
                chanCache:   newChannelCache(opts.ChannelCacheSize),
182✔
225
        }
182✔
226
        g.chanScheduler = batch.NewTimeScheduler(
182✔
227
                batch.NewBoltBackend[kvdb.RwTx](db), &g.cacheMu,
182✔
228
                opts.BatchCommitInterval,
182✔
229
        )
182✔
230
        g.nodeScheduler = batch.NewTimeScheduler(
182✔
231
                batch.NewBoltBackend[kvdb.RwTx](db), nil,
182✔
232
                opts.BatchCommitInterval,
182✔
233
        )
182✔
234

182✔
235
        return g, nil
182✔
236
}
237

238
// channelMapKey is the key structure used for storing channel edge policies.
239
type channelMapKey struct {
240
        nodeKey route.Vertex
241
        chanID  [8]byte
242
}
243

244
// String returns a human-readable representation of the key.
245
func (c channelMapKey) String() string {
×
246
        return fmt.Sprintf("node=%v, chanID=%x", c.nodeKey, c.chanID)
×
247
}
×
248

249
// getChannelMap loads all channel edge policies from the database and stores
250
// them in a map.
251
func getChannelMap(edges kvdb.RBucket) (
252
        map[channelMapKey]*models.ChannelEdgePolicy, error) {
157✔
253

157✔
254
        // Create a map to store all channel edge policies.
157✔
255
        channelMap := make(map[channelMapKey]*models.ChannelEdgePolicy)
157✔
256

157✔
257
        err := kvdb.ForAll(edges, func(k, edgeBytes []byte) error {
1,789✔
258
                // Skip embedded buckets.
1,632✔
259
                if bytes.Equal(k, edgeIndexBucket) ||
1,632✔
260
                        bytes.Equal(k, edgeUpdateIndexBucket) ||
1,632✔
261
                        bytes.Equal(k, zombieBucket) ||
1,632✔
262
                        bytes.Equal(k, disabledEdgePolicyBucket) ||
1,632✔
263
                        bytes.Equal(k, channelPointBucket) {
2,268✔
264

636✔
265
                        return nil
636✔
266
                }
636✔
267

268
                // Validate key length.
269
                if len(k) != 33+8 {
996✔
270
                        return fmt.Errorf("invalid edge key %x encountered", k)
×
271
                }
×
272

273
                var key channelMapKey
996✔
274
                copy(key.nodeKey[:], k[:33])
996✔
275
                copy(key.chanID[:], k[33:])
996✔
276

996✔
277
                // No need to deserialize unknown policy.
996✔
278
                if bytes.Equal(edgeBytes, unknownPolicy) {
996✔
279
                        return nil
×
280
                }
×
281

282
                edgeReader := bytes.NewReader(edgeBytes)
996✔
283
                edge, err := deserializeChanEdgePolicyRaw(
996✔
284
                        edgeReader,
996✔
285
                )
996✔
286

996✔
287
                switch {
996✔
288
                // If the db policy was missing an expected optional field, we
289
                // return nil as if the policy was unknown.
290
                case errors.Is(err, ErrEdgePolicyOptionalFieldNotFound):
×
291
                        return nil
×
292

293
                // We don't want a single policy with bad TLV data to stop us
294
                // from loading the rest of the data, so we just skip this
295
                // policy. This is for backwards compatibility since we did not
296
                // use to validate TLV data in the past before persisting it.
297
                case errors.Is(err, ErrParsingExtraTLVBytes):
×
298
                        return nil
×
299

300
                case err != nil:
×
301
                        return err
×
302
                }
303

304
                channelMap[key] = edge
996✔
305

996✔
306
                return nil
996✔
307
        })
308
        if err != nil {
157✔
309
                return nil, err
×
310
        }
×
311

312
        return channelMap, nil
157✔
313
}
314

315
var graphTopLevelBuckets = [][]byte{
316
        nodeBucket,
317
        edgeBucket,
318
        graphMetaBucket,
319
        closedScidBucket,
320
}
321

322
// createChannelDB creates and initializes a fresh version of  In
323
// the case that the target path has not yet been created or doesn't yet exist,
324
// then the path is created. Additionally, all required top-level buckets used
325
// within the database are created.
326
func initKVStore(db kvdb.Backend) error {
182✔
327
        err := kvdb.Update(db, func(tx kvdb.RwTx) error {
364✔
328
                for _, tlb := range graphTopLevelBuckets {
910✔
329
                        if _, err := tx.CreateTopLevelBucket(tlb); err != nil {
728✔
330
                                return err
×
331
                        }
×
332
                }
333

334
                nodes := tx.ReadWriteBucket(nodeBucket)
182✔
335
                _, err := nodes.CreateBucketIfNotExists(aliasIndexBucket)
182✔
336
                if err != nil {
182✔
337
                        return err
×
338
                }
×
339
                _, err = nodes.CreateBucketIfNotExists(nodeUpdateIndexBucket)
182✔
340
                if err != nil {
182✔
341
                        return err
×
342
                }
×
343

344
                edges := tx.ReadWriteBucket(edgeBucket)
182✔
345
                _, err = edges.CreateBucketIfNotExists(edgeIndexBucket)
182✔
346
                if err != nil {
182✔
347
                        return err
×
348
                }
×
349
                _, err = edges.CreateBucketIfNotExists(edgeUpdateIndexBucket)
182✔
350
                if err != nil {
182✔
351
                        return err
×
352
                }
×
353
                _, err = edges.CreateBucketIfNotExists(channelPointBucket)
182✔
354
                if err != nil {
182✔
355
                        return err
×
356
                }
×
357
                _, err = edges.CreateBucketIfNotExists(zombieBucket)
182✔
358
                if err != nil {
182✔
359
                        return err
×
360
                }
×
361

362
                graphMeta := tx.ReadWriteBucket(graphMetaBucket)
182✔
363
                _, err = graphMeta.CreateBucketIfNotExists(pruneLogBucket)
182✔
364

182✔
365
                return err
182✔
366
        }, func() {})
182✔
367
        if err != nil {
182✔
368
                return fmt.Errorf("unable to create new channel graph: %w", err)
×
369
        }
×
370

371
        return nil
182✔
372
}
373

374
// AddrsForNode returns all known addresses for the target node public key that
375
// the graph DB is aware of. The returned boolean indicates if the given node is
376
// unknown to the graph DB or not.
377
//
378
// NOTE: this is part of the channeldb.AddrSource interface.
379
func (c *KVStore) AddrsForNode(ctx context.Context,
380
        nodePub *btcec.PublicKey) (bool, []net.Addr, error) {
3✔
381

3✔
382
        pubKey, err := route.NewVertexFromBytes(nodePub.SerializeCompressed())
3✔
383
        if err != nil {
3✔
384
                return false, nil, err
×
385
        }
×
386

387
        node, err := c.FetchNode(ctx, pubKey)
3✔
388
        // We don't consider it an error if the graph is unaware of the node.
3✔
389
        switch {
3✔
390
        case err != nil && !errors.Is(err, ErrGraphNodeNotFound):
×
391
                return false, nil, err
×
392

393
        case errors.Is(err, ErrGraphNodeNotFound):
1✔
394
                return false, nil, nil
1✔
395
        }
396

397
        return true, node.Addresses, nil
2✔
398
}
399

400
// ForEachChannel iterates through all the channel edges stored within the
401
// graph and invokes the passed callback for each edge. The callback takes two
402
// edges as since this is a directed graph, both the in/out edges are visited.
403
// If the callback returns an error, then the transaction is aborted and the
404
// iteration stops early.
405
//
406
// NOTE: If an edge can't be found, or wasn't advertised, then a nil pointer
407
// for that particular channel edge routing policy will be passed into the
408
// callback.
409
func (c *KVStore) ForEachChannel(_ context.Context,
410
        cb func(*models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
411
                *models.ChannelEdgePolicy) error, reset func()) error {
7✔
412

7✔
413
        return forEachChannel(c.db, cb, reset)
7✔
414
}
7✔
415

416
// forEachChannel iterates through all the channel edges stored within the
417
// graph and invokes the passed callback for each edge. The callback takes two
418
// edges as since this is a directed graph, both the in/out edges are visited.
419
// If the callback returns an error, then the transaction is aborted and the
420
// iteration stops early.
421
//
422
// NOTE: If an edge can't be found, or wasn't advertised, then a nil pointer
423
// for that particular channel edge routing policy will be passed into the
424
// callback.
425
func forEachChannel(db kvdb.Backend, cb func(*models.ChannelEdgeInfo,
426
        *models.ChannelEdgePolicy, *models.ChannelEdgePolicy) error,
427
        reset func()) error {
7✔
428

7✔
429
        return db.View(func(tx kvdb.RTx) error {
14✔
430
                edges := tx.ReadBucket(edgeBucket)
7✔
431
                if edges == nil {
7✔
432
                        return ErrGraphNoEdgesFound
×
433
                }
×
434

435
                // First, load all edges in memory indexed by node and channel
436
                // id.
437
                channelMap, err := getChannelMap(edges)
7✔
438
                if err != nil {
7✔
439
                        return err
×
440
                }
×
441

442
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
7✔
443
                if edgeIndex == nil {
7✔
444
                        return ErrGraphNoEdgesFound
×
445
                }
×
446

447
                // Load edge index, recombine each channel with the policies
448
                // loaded above and invoke the callback.
449
                return kvdb.ForAll(
7✔
450
                        edgeIndex, func(k, edgeInfoBytes []byte) error {
109✔
451
                                var chanID [8]byte
102✔
452
                                copy(chanID[:], k)
102✔
453

102✔
454
                                edgeInfoReader := bytes.NewReader(edgeInfoBytes)
102✔
455
                                info, err := deserializeChanEdgeInfo(
102✔
456
                                        edgeInfoReader,
102✔
457
                                )
102✔
458
                                if err != nil {
102✔
459
                                        return err
×
460
                                }
×
461

462
                                policy1 := channelMap[channelMapKey{
102✔
463
                                        nodeKey: info.NodeKey1Bytes,
102✔
464
                                        chanID:  chanID,
102✔
465
                                }]
102✔
466

102✔
467
                                policy2 := channelMap[channelMapKey{
102✔
468
                                        nodeKey: info.NodeKey2Bytes,
102✔
469
                                        chanID:  chanID,
102✔
470
                                }]
102✔
471

102✔
472
                                return cb(info, policy1, policy2)
102✔
473
                        },
474
                )
475
        }, reset)
476
}
477

478
// ForEachChannelCacheable iterates through all the channel edges stored within
479
// the graph and invokes the passed callback for each edge. The callback takes
480
// two edges as since this is a directed graph, both the in/out edges are
481
// visited. If the callback returns an error, then the transaction is aborted
482
// and the iteration stops early.
483
//
484
// NOTE: If an edge can't be found, or wasn't advertised, then a nil pointer
485
// for that particular channel edge routing policy will be passed into the
486
// callback.
487
//
488
// NOTE: this method is like ForEachChannel but fetches only the data required
489
// for the graph cache.
490
func (c *KVStore) ForEachChannelCacheable(cb func(*models.CachedEdgeInfo,
491
        *models.CachedEdgePolicy, *models.CachedEdgePolicy) error,
492
        reset func()) error {
150✔
493

150✔
494
        return c.db.View(func(tx kvdb.RTx) error {
300✔
495
                edges := tx.ReadBucket(edgeBucket)
150✔
496
                if edges == nil {
150✔
497
                        return ErrGraphNoEdgesFound
×
498
                }
×
499

500
                // First, load all edges in memory indexed by node and channel
501
                // id.
502
                channelMap, err := getChannelMap(edges)
150✔
503
                if err != nil {
150✔
504
                        return err
×
505
                }
×
506

507
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
150✔
508
                if edgeIndex == nil {
150✔
509
                        return ErrGraphNoEdgesFound
×
510
                }
×
511

512
                // Load edge index, recombine each channel with the policies
513
                // loaded above and invoke the callback.
514
                return kvdb.ForAll(
150✔
515
                        edgeIndex, func(k, edgeInfoBytes []byte) error {
546✔
516
                                var chanID [8]byte
396✔
517
                                copy(chanID[:], k)
396✔
518

396✔
519
                                edgeInfoReader := bytes.NewReader(edgeInfoBytes)
396✔
520
                                info, err := deserializeChanEdgeInfo(
396✔
521
                                        edgeInfoReader,
396✔
522
                                )
396✔
523
                                if err != nil {
396✔
524
                                        return err
×
525
                                }
×
526

527
                                key1 := channelMapKey{
396✔
528
                                        nodeKey: info.NodeKey1Bytes,
396✔
529
                                        chanID:  chanID,
396✔
530
                                }
396✔
531
                                policy1 := channelMap[key1]
396✔
532

396✔
533
                                key2 := channelMapKey{
396✔
534
                                        nodeKey: info.NodeKey2Bytes,
396✔
535
                                        chanID:  chanID,
396✔
536
                                }
396✔
537
                                policy2 := channelMap[key2]
396✔
538

396✔
539
                                // We now create the cached edge policies, but
396✔
540
                                // only when the above policies are found in the
396✔
541
                                // `channelMap`.
396✔
542
                                var (
396✔
543
                                        cachedPolicy1 *models.CachedEdgePolicy
396✔
544
                                        cachedPolicy2 *models.CachedEdgePolicy
396✔
545
                                )
396✔
546

396✔
547
                                if policy1 != nil {
792✔
548
                                        cachedPolicy1 = models.NewCachedPolicy(
396✔
549
                                                policy1,
396✔
550
                                        )
396✔
551
                                }
396✔
552

553
                                if policy2 != nil {
792✔
554
                                        cachedPolicy2 = models.NewCachedPolicy(
396✔
555
                                                policy2,
396✔
556
                                        )
396✔
557
                                }
396✔
558

559
                                return cb(
396✔
560
                                        models.NewCachedEdge(info),
396✔
561
                                        cachedPolicy1, cachedPolicy2,
396✔
562
                                )
396✔
563
                        },
564
                )
565
        }, reset)
566
}
567

568
// forEachNodeDirectedChannel iterates through all channels of a given node,
569
// executing the passed callback on the directed edge representing the channel
570
// and its incoming policy. If the callback returns an error, then the iteration
571
// is halted with the error propagated back up to the caller. An optional read
572
// transaction may be provided. If none is provided, a new one will be created.
573
//
574
// Unknown policies are passed into the callback as nil values.
575
//
576
// NOTE: the reset param is only meaningful if the tx param is nil. If it is
577
// not nil, the caller is expected to have passed in a reset to the parent
578
// function's View/Update call which will then apply to the whole transaction.
579
func (c *KVStore) forEachNodeDirectedChannel(tx kvdb.RTx,
580
        node route.Vertex, cb func(channel *DirectedChannel) error,
581
        reset func()) error {
262✔
582

262✔
583
        // Fallback that uses the database.
262✔
584
        toNodeCallback := func() route.Vertex {
394✔
585
                return node
132✔
586
        }
132✔
587
        toNodeFeatures, err := c.fetchNodeFeatures(tx, node)
262✔
588
        if err != nil {
262✔
589
                return err
×
590
        }
×
591

592
        dbCallback := func(tx kvdb.RTx, e *models.ChannelEdgeInfo, p1,
262✔
593
                p2 *models.ChannelEdgePolicy) error {
948✔
594

686✔
595
                var cachedInPolicy *models.CachedEdgePolicy
686✔
596
                if p2 != nil {
1,369✔
597
                        cachedInPolicy = models.NewCachedPolicy(p2)
683✔
598
                        cachedInPolicy.ToNodePubKey = toNodeCallback
683✔
599
                        cachedInPolicy.ToNodeFeatures = toNodeFeatures
683✔
600
                }
683✔
601

602
                directedChannel := &DirectedChannel{
686✔
603
                        ChannelID:    e.ChannelID,
686✔
604
                        IsNode1:      node == e.NodeKey1Bytes,
686✔
605
                        OtherNode:    e.NodeKey2Bytes,
686✔
606
                        Capacity:     e.Capacity,
686✔
607
                        OutPolicySet: p1 != nil,
686✔
608
                        InPolicy:     cachedInPolicy,
686✔
609
                }
686✔
610

686✔
611
                if p1 != nil {
1,371✔
612
                        p1.InboundFee.WhenSome(func(fee lnwire.Fee) {
1,021✔
613
                                directedChannel.InboundFee = fee
336✔
614
                        })
336✔
615
                }
616

617
                if node == e.NodeKey2Bytes {
1,032✔
618
                        directedChannel.OtherNode = e.NodeKey1Bytes
346✔
619
                }
346✔
620

621
                return cb(directedChannel)
686✔
622
        }
623

624
        return nodeTraversal(tx, node[:], c.db, dbCallback, reset)
262✔
625
}
626

627
// fetchNodeFeatures returns the features of a given node. If no features are
628
// known for the node, an empty feature vector is returned. An optional read
629
// transaction may be provided. If none is provided, a new one will be created.
630
func (c *KVStore) fetchNodeFeatures(tx kvdb.RTx,
631
        node route.Vertex) (*lnwire.FeatureVector, error) {
3,651✔
632

3,651✔
633
        // Fallback that uses the database.
3,651✔
634
        targetNode, err := c.fetchNodeTx(tx, node)
3,651✔
635
        switch {
3,651✔
636
        // If the node exists and has features, return them directly.
637
        case err == nil:
3,640✔
638
                return targetNode.Features, nil
3,640✔
639

640
        // If we couldn't find a node announcement, populate a blank feature
641
        // vector.
642
        case errors.Is(err, ErrGraphNodeNotFound):
11✔
643
                return lnwire.EmptyFeatureVector(), nil
11✔
644

645
        // Otherwise, bubble the error up.
646
        default:
×
647
                return nil, err
×
648
        }
649
}
650

651
// ForEachNodeDirectedChannel iterates through all channels of a given node,
652
// executing the passed callback on the directed edge representing the channel
653
// and its incoming policy. If the callback returns an error, then the iteration
654
// is halted with the error propagated back up to the caller.
655
//
656
// Unknown policies are passed into the callback as nil values.
657
//
658
// NOTE: this is part of the graphdb.NodeTraverser interface.
659
func (c *KVStore) ForEachNodeDirectedChannel(nodePub route.Vertex,
660
        cb func(channel *DirectedChannel) error, reset func()) error {
23✔
661

23✔
662
        return c.forEachNodeDirectedChannel(nil, nodePub, cb, reset)
23✔
663
}
23✔
664

665
// FetchNodeFeatures returns the features of the given node. If no features are
666
// known for the node, an empty feature vector is returned.
667
//
668
// NOTE: this is part of the graphdb.NodeTraverser interface.
669
func (c *KVStore) FetchNodeFeatures(nodePub route.Vertex) (
670
        *lnwire.FeatureVector, error) {
1✔
671

1✔
672
        return c.fetchNodeFeatures(nil, nodePub)
1✔
673
}
1✔
674

675
// ForEachNodeCached is similar to forEachNode, but it returns DirectedChannel
676
// data to the call-back.
677
//
678
// NOTE: The callback contents MUST not be modified.
679
func (c *KVStore) ForEachNodeCached(ctx context.Context, withAddrs bool,
680
        cb func(ctx context.Context, node route.Vertex, addrs []net.Addr,
681
                chans map[uint64]*DirectedChannel) error, reset func()) error {
120✔
682

120✔
683
        // Otherwise call back to a version that uses the database directly.
120✔
684
        // We'll iterate over each node, then the set of channels for each
120✔
685
        // node, and construct a similar callback functiopn signature as the
120✔
686
        // main funcotin expects.
120✔
687
        return forEachNode(c.db, func(tx kvdb.RTx,
120✔
688
                node *models.Node) error {
1,106✔
689

986✔
690
                channels := make(map[uint64]*DirectedChannel)
986✔
691

986✔
692
                err := c.forEachNodeChannelTx(tx, node.PubKeyBytes,
986✔
693
                        func(tx kvdb.RTx, e *models.ChannelEdgeInfo,
986✔
694
                                p1 *models.ChannelEdgePolicy,
986✔
695
                                p2 *models.ChannelEdgePolicy) error {
4,120✔
696

3,134✔
697
                                toNodeCallback := func() route.Vertex {
3,134✔
698
                                        return node.PubKeyBytes
×
699
                                }
×
700
                                toNodeFeatures, err := c.fetchNodeFeatures(
3,134✔
701
                                        tx, node.PubKeyBytes,
3,134✔
702
                                )
3,134✔
703
                                if err != nil {
3,134✔
704
                                        return err
×
705
                                }
×
706

707
                                var cachedInPolicy *models.CachedEdgePolicy
3,134✔
708
                                if p2 != nil {
6,268✔
709
                                        cachedInPolicy =
3,134✔
710
                                                models.NewCachedPolicy(p2)
3,134✔
711
                                        cachedInPolicy.ToNodePubKey =
3,134✔
712
                                                toNodeCallback
3,134✔
713
                                        cachedInPolicy.ToNodeFeatures =
3,134✔
714
                                                toNodeFeatures
3,134✔
715
                                }
3,134✔
716

717
                                directedChannel := &DirectedChannel{
3,134✔
718
                                        ChannelID: e.ChannelID,
3,134✔
719
                                        IsNode1: node.PubKeyBytes ==
3,134✔
720
                                                e.NodeKey1Bytes,
3,134✔
721
                                        OtherNode:    e.NodeKey2Bytes,
3,134✔
722
                                        Capacity:     e.Capacity,
3,134✔
723
                                        OutPolicySet: p1 != nil,
3,134✔
724
                                        InPolicy:     cachedInPolicy,
3,134✔
725
                                }
3,134✔
726

3,134✔
727
                                if node.PubKeyBytes == e.NodeKey2Bytes {
4,701✔
728
                                        directedChannel.OtherNode =
1,567✔
729
                                                e.NodeKey1Bytes
1,567✔
730
                                }
1,567✔
731

732
                                channels[e.ChannelID] = directedChannel
3,134✔
733

3,134✔
734
                                return nil
3,134✔
735
                        }, reset,
736
                )
737
                if err != nil {
986✔
738
                        return err
×
739
                }
×
740

741
                var addrs []net.Addr
986✔
742
                if withAddrs {
1,952✔
743
                        addrs = node.Addresses
966✔
744
                }
966✔
745

746
                return cb(ctx, node.PubKeyBytes, addrs, channels)
986✔
747
        }, reset)
748
}
749

750
// DisabledChannelIDs returns the channel ids of disabled channels.
751
// A channel is disabled when two of the associated ChanelEdgePolicies
752
// have their disabled bit on.
753
func (c *KVStore) DisabledChannelIDs() ([]uint64, error) {
6✔
754
        var disabledChanIDs []uint64
6✔
755
        var chanEdgeFound map[uint64]struct{}
6✔
756

6✔
757
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
12✔
758
                edges := tx.ReadBucket(edgeBucket)
6✔
759
                if edges == nil {
6✔
760
                        return ErrGraphNoEdgesFound
×
761
                }
×
762

763
                disabledEdgePolicyIndex := edges.NestedReadBucket(
6✔
764
                        disabledEdgePolicyBucket,
6✔
765
                )
6✔
766
                if disabledEdgePolicyIndex == nil {
7✔
767
                        return nil
1✔
768
                }
1✔
769

770
                // We iterate over all disabled policies and we add each channel
771
                // that has more than one disabled policy to disabledChanIDs
772
                // array.
773
                return disabledEdgePolicyIndex.ForEach(
5✔
774
                        func(k, v []byte) error {
16✔
775
                                chanID := byteOrder.Uint64(k[:8])
11✔
776
                                _, edgeFound := chanEdgeFound[chanID]
11✔
777
                                if edgeFound {
15✔
778
                                        delete(chanEdgeFound, chanID)
4✔
779
                                        disabledChanIDs = append(
4✔
780
                                                disabledChanIDs, chanID,
4✔
781
                                        )
4✔
782

4✔
783
                                        return nil
4✔
784
                                }
4✔
785

786
                                chanEdgeFound[chanID] = struct{}{}
7✔
787

7✔
788
                                return nil
7✔
789
                        },
790
                )
791
        }, func() {
6✔
792
                disabledChanIDs = nil
6✔
793
                chanEdgeFound = make(map[uint64]struct{})
6✔
794
        })
6✔
795
        if err != nil {
6✔
796
                return nil, err
×
797
        }
×
798

799
        return disabledChanIDs, nil
6✔
800
}
801

802
// ForEachNode iterates through all the stored vertices/nodes in the graph,
803
// executing the passed callback with each node encountered. If the callback
804
// returns an error, then the transaction is aborted and the iteration stops
805
// early.
806
//
807
// NOTE: this is part of the V1Store interface.
808
func (c *KVStore) ForEachNode(_ context.Context,
809
        cb func(*models.Node) error, reset func()) error {
10✔
810

10✔
811
        return forEachNode(c.db, func(tx kvdb.RTx,
10✔
812
                node *models.Node) error {
203✔
813

193✔
814
                return cb(node)
193✔
815
        }, reset)
193✔
816
}
817

818
// forEachNode iterates through all the stored vertices/nodes in the graph,
819
// executing the passed callback with each node encountered. If the callback
820
// returns an error, then the transaction is aborted and the iteration stops
821
// early.
822
//
823
// TODO(roasbeef): add iterator interface to allow for memory efficient graph
824
// traversal when graph gets mega.
825
func forEachNode(db kvdb.Backend,
826
        cb func(kvdb.RTx, *models.Node) error, reset func()) error {
130✔
827

130✔
828
        traversal := func(tx kvdb.RTx) error {
260✔
829
                // First grab the nodes bucket which stores the mapping from
130✔
830
                // pubKey to node information.
130✔
831
                nodes := tx.ReadBucket(nodeBucket)
130✔
832
                if nodes == nil {
130✔
833
                        return ErrGraphNotFound
×
834
                }
×
835

836
                return nodes.ForEach(func(pubKey, nodeBytes []byte) error {
1,572✔
837
                        // If this is the source key, then we skip this
1,442✔
838
                        // iteration as the value for this key is a pubKey
1,442✔
839
                        // rather than raw node information.
1,442✔
840
                        if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 {
1,705✔
841
                                return nil
263✔
842
                        }
263✔
843

844
                        nodeReader := bytes.NewReader(nodeBytes)
1,179✔
845
                        node, err := deserializeLightningNode(nodeReader)
1,179✔
846
                        if err != nil {
1,179✔
847
                                return err
×
848
                        }
×
849

850
                        // Execute the callback, the transaction will abort if
851
                        // this returns an error.
852
                        return cb(tx, node)
1,179✔
853
                })
854
        }
855

856
        return kvdb.View(db, traversal, reset)
130✔
857
}
858

859
// ForEachNodeCacheable iterates through all the stored vertices/nodes in the
860
// graph, executing the passed callback with each node encountered. If the
861
// callback returns an error, then the transaction is aborted and the iteration
862
// stops early.
863
func (c *KVStore) ForEachNodeCacheable(_ context.Context,
864
        cb func(route.Vertex, *lnwire.FeatureVector) error,
865
        reset func()) error {
151✔
866

151✔
867
        traversal := func(tx kvdb.RTx) error {
302✔
868
                // First grab the nodes bucket which stores the mapping from
151✔
869
                // pubKey to node information.
151✔
870
                nodes := tx.ReadBucket(nodeBucket)
151✔
871
                if nodes == nil {
151✔
872
                        return ErrGraphNotFound
×
873
                }
×
874

875
                return nodes.ForEach(func(pubKey, nodeBytes []byte) error {
573✔
876
                        // If this is the source key, then we skip this
422✔
877
                        // iteration as the value for this key is a pubKey
422✔
878
                        // rather than raw node information.
422✔
879
                        if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 {
724✔
880
                                return nil
302✔
881
                        }
302✔
882

883
                        nodeReader := bytes.NewReader(nodeBytes)
120✔
884
                        node, features, err := deserializeLightningNodeCacheable( //nolint:ll
120✔
885
                                nodeReader,
120✔
886
                        )
120✔
887
                        if err != nil {
120✔
888
                                return err
×
889
                        }
×
890

891
                        // Execute the callback, the transaction will abort if
892
                        // this returns an error.
893
                        return cb(node, features)
120✔
894
                })
895
        }
896

897
        return kvdb.View(c.db, traversal, reset)
151✔
898
}
899

900
// SourceNode returns the source node of the graph. The source node is treated
901
// as the center node within a star-graph. This method may be used to kick off
902
// a path finding algorithm in order to explore the reachability of another
903
// node based off the source node.
904
func (c *KVStore) SourceNode(_ context.Context) (*models.Node, error) {
240✔
905
        return sourceNode(c.db)
240✔
906
}
240✔
907

908
// sourceNode fetches the source node of the graph. The source node is treated
909
// as the center node within a star-graph.
910
func sourceNode(db kvdb.Backend) (*models.Node, error) {
240✔
911
        var source *models.Node
240✔
912
        err := kvdb.View(db, func(tx kvdb.RTx) error {
480✔
913
                // First grab the nodes bucket which stores the mapping from
240✔
914
                // pubKey to node information.
240✔
915
                nodes := tx.ReadBucket(nodeBucket)
240✔
916
                if nodes == nil {
240✔
917
                        return ErrGraphNotFound
×
918
                }
×
919

920
                node, err := sourceNodeWithTx(nodes)
240✔
921
                if err != nil {
241✔
922
                        return err
1✔
923
                }
1✔
924
                source = node
239✔
925

239✔
926
                return nil
239✔
927
        }, func() {
240✔
928
                source = nil
240✔
929
        })
240✔
930
        if err != nil {
241✔
931
                return nil, err
1✔
932
        }
1✔
933

934
        return source, nil
239✔
935
}
936

937
// sourceNodeWithTx uses an existing database transaction and returns the source
938
// node of the graph. The source node is treated as the center node within a
939
// star-graph. This method may be used to kick off a path finding algorithm in
940
// order to explore the reachability of another node based off the source node.
941
func sourceNodeWithTx(nodes kvdb.RBucket) (*models.Node, error) {
504✔
942
        selfPub := nodes.Get(sourceKey)
504✔
943
        if selfPub == nil {
505✔
944
                return nil, ErrSourceNodeNotSet
1✔
945
        }
1✔
946

947
        // With the pubKey of the source node retrieved, we're able to
948
        // fetch the full node information.
949
        return fetchLightningNode(nodes, selfPub)
503✔
950
}
951

952
// SetSourceNode sets the source node within the graph database. The source
953
// node is to be used as the center of a star-graph within path finding
954
// algorithms.
955
func (c *KVStore) SetSourceNode(_ context.Context,
956
        node *models.Node) error {
116✔
957

116✔
958
        nodePubBytes := node.PubKeyBytes[:]
116✔
959

116✔
960
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
232✔
961
                // First grab the nodes bucket which stores the mapping from
116✔
962
                // pubKey to node information.
116✔
963
                nodes, err := tx.CreateTopLevelBucket(nodeBucket)
116✔
964
                if err != nil {
116✔
965
                        return err
×
966
                }
×
967

968
                // Next we create the mapping from source to the targeted
969
                // public key.
970
                if err := nodes.Put(sourceKey, nodePubBytes); err != nil {
116✔
971
                        return err
×
972
                }
×
973

974
                // Finally, we commit the information of the lightning node
975
                // itself.
976
                return addLightningNode(tx, node)
116✔
977
        }, func() {})
116✔
978
}
979

980
// AddNode adds a vertex/node to the graph database. If the node is not
981
// in the database from before, this will add a new, unconnected one to the
982
// graph. If it is present from before, this will update that node's
983
// information. Note that this method is expected to only be called to update an
984
// already present node from a node announcement, or to insert a node found in a
985
// channel update.
986
//
987
// TODO(roasbeef): also need sig of announcement.
988
func (c *KVStore) AddNode(ctx context.Context,
989
        node *models.Node, opts ...batch.SchedulerOption) error {
971✔
990

971✔
991
        r := &batch.Request[kvdb.RwTx]{
971✔
992
                Opts: batch.NewSchedulerOptions(opts...),
971✔
993
                Do: func(tx kvdb.RwTx) error {
1,942✔
994
                        return addLightningNode(tx, node)
971✔
995
                },
971✔
996
        }
997

998
        return c.nodeScheduler.Execute(ctx, r)
971✔
999
}
1000

1001
func addLightningNode(tx kvdb.RwTx, node *models.Node) error {
1,169✔
1002
        nodes, err := tx.CreateTopLevelBucket(nodeBucket)
1,169✔
1003
        if err != nil {
1,169✔
1004
                return err
×
1005
        }
×
1006

1007
        aliases, err := nodes.CreateBucketIfNotExists(aliasIndexBucket)
1,169✔
1008
        if err != nil {
1,169✔
1009
                return err
×
1010
        }
×
1011

1012
        updateIndex, err := nodes.CreateBucketIfNotExists(
1,169✔
1013
                nodeUpdateIndexBucket,
1,169✔
1014
        )
1,169✔
1015
        if err != nil {
1,169✔
1016
                return err
×
1017
        }
×
1018

1019
        return putLightningNode(nodes, aliases, updateIndex, node)
1,169✔
1020
}
1021

1022
// LookupAlias attempts to return the alias as advertised by the target node.
1023
// TODO(roasbeef): currently assumes that aliases are unique...
1024
func (c *KVStore) LookupAlias(_ context.Context,
1025
        pub *btcec.PublicKey) (string, error) {
2✔
1026

2✔
1027
        var alias string
2✔
1028

2✔
1029
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
4✔
1030
                nodes := tx.ReadBucket(nodeBucket)
2✔
1031
                if nodes == nil {
2✔
1032
                        return ErrGraphNodesNotFound
×
1033
                }
×
1034

1035
                aliases := nodes.NestedReadBucket(aliasIndexBucket)
2✔
1036
                if aliases == nil {
2✔
1037
                        return ErrGraphNodesNotFound
×
1038
                }
×
1039

1040
                nodePub := pub.SerializeCompressed()
2✔
1041
                a := aliases.Get(nodePub)
2✔
1042
                if a == nil {
3✔
1043
                        return ErrNodeAliasNotFound
1✔
1044
                }
1✔
1045

1046
                // TODO(roasbeef): should actually be using the utf-8
1047
                // package...
1048
                alias = string(a)
1✔
1049

1✔
1050
                return nil
1✔
1051
        }, func() {
2✔
1052
                alias = ""
2✔
1053
        })
2✔
1054
        if err != nil {
3✔
1055
                return "", err
1✔
1056
        }
1✔
1057

1058
        return alias, nil
1✔
1059
}
1060

1061
// DeleteNode starts a new database transaction to remove a vertex/node
1062
// from the database according to the node's public key.
1063
func (c *KVStore) DeleteNode(_ context.Context,
1064
        nodePub route.Vertex) error {
4✔
1065

4✔
1066
        // TODO(roasbeef): ensure dangling edges are removed...
4✔
1067
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
8✔
1068
                nodes := tx.ReadWriteBucket(nodeBucket)
4✔
1069
                if nodes == nil {
4✔
1070
                        return ErrGraphNodeNotFound
×
1071
                }
×
1072

1073
                return c.deleteLightningNode(nodes, nodePub[:])
4✔
1074
        }, func() {})
4✔
1075
}
1076

1077
// deleteLightningNode uses an existing database transaction to remove a
1078
// vertex/node from the database according to the node's public key.
1079
func (c *KVStore) deleteLightningNode(nodes kvdb.RwBucket,
1080
        compressedPubKey []byte) error {
66✔
1081

66✔
1082
        aliases := nodes.NestedReadWriteBucket(aliasIndexBucket)
66✔
1083
        if aliases == nil {
66✔
1084
                return ErrGraphNodesNotFound
×
1085
        }
×
1086

1087
        if err := aliases.Delete(compressedPubKey); err != nil {
66✔
1088
                return err
×
1089
        }
×
1090

1091
        // Before we delete the node, we'll fetch its current state so we can
1092
        // determine when its last update was to clear out the node update
1093
        // index.
1094
        node, err := fetchLightningNode(nodes, compressedPubKey)
66✔
1095
        if err != nil {
67✔
1096
                return err
1✔
1097
        }
1✔
1098

1099
        if err := nodes.Delete(compressedPubKey); err != nil {
65✔
1100
                return err
×
1101
        }
×
1102

1103
        // Finally, we'll delete the index entry for the node within the
1104
        // nodeUpdateIndexBucket as this node is no longer active, so we don't
1105
        // need to track its last update.
1106
        nodeUpdateIndex := nodes.NestedReadWriteBucket(nodeUpdateIndexBucket)
65✔
1107
        if nodeUpdateIndex == nil {
65✔
1108
                return ErrGraphNodesNotFound
×
1109
        }
×
1110

1111
        // In order to delete the entry, we'll need to reconstruct the key for
1112
        // its last update.
1113
        updateUnix := uint64(node.LastUpdate.Unix())
65✔
1114
        var indexKey [8 + 33]byte
65✔
1115
        byteOrder.PutUint64(indexKey[:8], updateUnix)
65✔
1116
        copy(indexKey[8:], compressedPubKey)
65✔
1117

65✔
1118
        return nodeUpdateIndex.Delete(indexKey[:])
65✔
1119
}
1120

1121
// AddChannelEdge adds a new (undirected, blank) edge to the graph database. An
1122
// undirected edge from the two target nodes are created. The information stored
1123
// denotes the static attributes of the channel, such as the channelID, the keys
1124
// involved in creation of the channel, and the set of features that the channel
1125
// supports. The chanPoint and chanID are used to uniquely identify the edge
1126
// globally within the database.
1127
func (c *KVStore) AddChannelEdge(ctx context.Context,
1128
        edge *models.ChannelEdgeInfo, opts ...batch.SchedulerOption) error {
1,824✔
1129

1,824✔
1130
        var alreadyExists bool
1,824✔
1131
        r := &batch.Request[kvdb.RwTx]{
1,824✔
1132
                Opts: batch.NewSchedulerOptions(opts...),
1,824✔
1133
                Reset: func() {
3,648✔
1134
                        alreadyExists = false
1,824✔
1135
                },
1,824✔
1136
                Do: func(tx kvdb.RwTx) error {
1,824✔
1137
                        err := c.addChannelEdge(tx, edge)
1,824✔
1138

1,824✔
1139
                        // Silence ErrEdgeAlreadyExist so that the batch can
1,824✔
1140
                        // succeed, but propagate the error via local state.
1,824✔
1141
                        if errors.Is(err, ErrEdgeAlreadyExist) {
2,061✔
1142
                                alreadyExists = true
237✔
1143
                                return nil
237✔
1144
                        }
237✔
1145

1146
                        return err
1,587✔
1147
                },
1148
                OnCommit: func(err error) error {
1,824✔
1149
                        switch {
1,824✔
1150
                        case err != nil:
×
1151
                                return err
×
1152
                        case alreadyExists:
237✔
1153
                                return ErrEdgeAlreadyExist
237✔
1154
                        default:
1,587✔
1155
                                c.rejectCache.remove(edge.ChannelID)
1,587✔
1156
                                c.chanCache.remove(edge.ChannelID)
1,587✔
1157
                                return nil
1,587✔
1158
                        }
1159
                },
1160
        }
1161

1162
        return c.chanScheduler.Execute(ctx, r)
1,824✔
1163
}
1164

1165
// addChannelEdge is the private form of AddChannelEdge that allows callers to
1166
// utilize an existing db transaction.
1167
func (c *KVStore) addChannelEdge(tx kvdb.RwTx,
1168
        edge *models.ChannelEdgeInfo) error {
1,824✔
1169

1,824✔
1170
        // Construct the channel's primary key which is the 8-byte channel ID.
1,824✔
1171
        var chanKey [8]byte
1,824✔
1172
        binary.BigEndian.PutUint64(chanKey[:], edge.ChannelID)
1,824✔
1173

1,824✔
1174
        nodes, err := tx.CreateTopLevelBucket(nodeBucket)
1,824✔
1175
        if err != nil {
1,824✔
1176
                return err
×
1177
        }
×
1178
        edges, err := tx.CreateTopLevelBucket(edgeBucket)
1,824✔
1179
        if err != nil {
1,824✔
1180
                return err
×
1181
        }
×
1182
        edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
1,824✔
1183
        if err != nil {
1,824✔
1184
                return err
×
1185
        }
×
1186
        chanIndex, err := edges.CreateBucketIfNotExists(channelPointBucket)
1,824✔
1187
        if err != nil {
1,824✔
1188
                return err
×
1189
        }
×
1190

1191
        // First, attempt to check if this edge has already been created. If
1192
        // so, then we can exit early as this method is meant to be idempotent.
1193
        if edgeInfo := edgeIndex.Get(chanKey[:]); edgeInfo != nil {
2,061✔
1194
                return ErrEdgeAlreadyExist
237✔
1195
        }
237✔
1196

1197
        // Before we insert the channel into the database, we'll ensure that
1198
        // both nodes already exist in the channel graph. If either node
1199
        // doesn't, then we'll insert a "shell" node that just includes its
1200
        // public key, so subsequent validation and queries can work properly.
1201
        _, node1Err := fetchLightningNode(nodes, edge.NodeKey1Bytes[:])
1,587✔
1202
        switch {
1,587✔
1203
        case errors.Is(node1Err, ErrGraphNodeNotFound):
24✔
1204
                err := addLightningNode(
24✔
1205
                        tx, models.NewV1ShellNode(edge.NodeKey1Bytes),
24✔
1206
                )
24✔
1207
                if err != nil {
24✔
1208
                        return fmt.Errorf("unable to create shell node "+
×
1209
                                "for: %x: %w", edge.NodeKey1Bytes, err)
×
1210
                }
×
1211
        case node1Err != nil:
×
1212
                return node1Err
×
1213
        }
1214

1215
        _, node2Err := fetchLightningNode(nodes, edge.NodeKey2Bytes[:])
1,587✔
1216
        switch {
1,587✔
1217
        case errors.Is(node2Err, ErrGraphNodeNotFound):
58✔
1218
                err := addLightningNode(
58✔
1219
                        tx, models.NewV1ShellNode(edge.NodeKey2Bytes),
58✔
1220
                )
58✔
1221
                if err != nil {
58✔
1222
                        return fmt.Errorf("unable to create shell node "+
×
1223
                                "for: %x: %w", edge.NodeKey2Bytes, err)
×
1224
                }
×
1225
        case node2Err != nil:
×
1226
                return node2Err
×
1227
        }
1228

1229
        // If the edge hasn't been created yet, then we'll first add it to the
1230
        // edge index in order to associate the edge between two nodes and also
1231
        // store the static components of the channel.
1232
        if err := putChanEdgeInfo(edgeIndex, edge, chanKey); err != nil {
1,587✔
1233
                return err
×
1234
        }
×
1235

1236
        // Mark edge policies for both sides as unknown. This is to enable
1237
        // efficient incoming channel lookup for a node.
1238
        keys := []*[33]byte{
1,587✔
1239
                &edge.NodeKey1Bytes,
1,587✔
1240
                &edge.NodeKey2Bytes,
1,587✔
1241
        }
1,587✔
1242
        for _, key := range keys {
4,761✔
1243
                err := putChanEdgePolicyUnknown(edges, edge.ChannelID, key[:])
3,174✔
1244
                if err != nil {
3,174✔
1245
                        return err
×
1246
                }
×
1247
        }
1248

1249
        // Finally we add it to the channel index which maps channel points
1250
        // (outpoints) to the shorter channel ID's.
1251
        var b bytes.Buffer
1,587✔
1252
        if err := WriteOutpoint(&b, &edge.ChannelPoint); err != nil {
1,587✔
1253
                return err
×
1254
        }
×
1255

1256
        return chanIndex.Put(b.Bytes(), chanKey[:])
1,587✔
1257
}
1258

1259
// HasChannelEdge returns true if the database knows of a channel edge with the
1260
// passed channel ID, and false otherwise. If an edge with that ID is found
1261
// within the graph, then two time stamps representing the last time the edge
1262
// was updated for both directed edges are returned along with the boolean. If
1263
// it is not found, then the zombie index is checked and its result is returned
1264
// as the second boolean.
1265
func (c *KVStore) HasChannelEdge(
1266
        chanID uint64) (time.Time, time.Time, bool, bool, error) {
214✔
1267

214✔
1268
        var (
214✔
1269
                upd1Time time.Time
214✔
1270
                upd2Time time.Time
214✔
1271
                exists   bool
214✔
1272
                isZombie bool
214✔
1273
        )
214✔
1274

214✔
1275
        // We'll query the cache with the shared lock held to allow multiple
214✔
1276
        // readers to access values in the cache concurrently if they exist.
214✔
1277
        c.cacheMu.RLock()
214✔
1278
        if entry, ok := c.rejectCache.get(chanID); ok {
283✔
1279
                c.cacheMu.RUnlock()
69✔
1280
                upd1Time = time.Unix(entry.upd1Time, 0)
69✔
1281
                upd2Time = time.Unix(entry.upd2Time, 0)
69✔
1282
                exists, isZombie = entry.flags.unpack()
69✔
1283

69✔
1284
                return upd1Time, upd2Time, exists, isZombie, nil
69✔
1285
        }
69✔
1286
        c.cacheMu.RUnlock()
145✔
1287

145✔
1288
        c.cacheMu.Lock()
145✔
1289
        defer c.cacheMu.Unlock()
145✔
1290

145✔
1291
        // The item was not found with the shared lock, so we'll acquire the
145✔
1292
        // exclusive lock and check the cache again in case another method added
145✔
1293
        // the entry to the cache while no lock was held.
145✔
1294
        if entry, ok := c.rejectCache.get(chanID); ok {
149✔
1295
                upd1Time = time.Unix(entry.upd1Time, 0)
4✔
1296
                upd2Time = time.Unix(entry.upd2Time, 0)
4✔
1297
                exists, isZombie = entry.flags.unpack()
4✔
1298

4✔
1299
                return upd1Time, upd2Time, exists, isZombie, nil
4✔
1300
        }
4✔
1301

1302
        if err := kvdb.View(c.db, func(tx kvdb.RTx) error {
282✔
1303
                edges := tx.ReadBucket(edgeBucket)
141✔
1304
                if edges == nil {
141✔
1305
                        return ErrGraphNoEdgesFound
×
1306
                }
×
1307
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
141✔
1308
                if edgeIndex == nil {
141✔
1309
                        return ErrGraphNoEdgesFound
×
1310
                }
×
1311

1312
                var channelID [8]byte
141✔
1313
                byteOrder.PutUint64(channelID[:], chanID)
141✔
1314

141✔
1315
                // If the edge doesn't exist, then we'll also check our zombie
141✔
1316
                // index.
141✔
1317
                if edgeIndex.Get(channelID[:]) == nil {
240✔
1318
                        exists = false
99✔
1319
                        zombieIndex := edges.NestedReadBucket(zombieBucket)
99✔
1320
                        if zombieIndex != nil {
198✔
1321
                                isZombie, _, _ = isZombieEdge(
99✔
1322
                                        zombieIndex, chanID,
99✔
1323
                                )
99✔
1324
                        }
99✔
1325

1326
                        return nil
99✔
1327
                }
1328

1329
                exists = true
42✔
1330
                isZombie = false
42✔
1331

42✔
1332
                // If the channel has been found in the graph, then retrieve
42✔
1333
                // the edges itself so we can return the last updated
42✔
1334
                // timestamps.
42✔
1335
                nodes := tx.ReadBucket(nodeBucket)
42✔
1336
                if nodes == nil {
42✔
1337
                        return ErrGraphNodeNotFound
×
1338
                }
×
1339

1340
                e1, e2, err := fetchChanEdgePolicies(
42✔
1341
                        edgeIndex, edges, channelID[:],
42✔
1342
                )
42✔
1343
                if err != nil {
42✔
1344
                        return err
×
1345
                }
×
1346

1347
                // As we may have only one of the edges populated, only set the
1348
                // update time if the edge was found in the database.
1349
                if e1 != nil {
60✔
1350
                        upd1Time = e1.LastUpdate
18✔
1351
                }
18✔
1352
                if e2 != nil {
58✔
1353
                        upd2Time = e2.LastUpdate
16✔
1354
                }
16✔
1355

1356
                return nil
42✔
1357
        }, func() {}); err != nil {
141✔
1358
                return time.Time{}, time.Time{}, exists, isZombie, err
×
1359
        }
×
1360

1361
        c.rejectCache.insert(chanID, rejectCacheEntry{
141✔
1362
                upd1Time: upd1Time.Unix(),
141✔
1363
                upd2Time: upd2Time.Unix(),
141✔
1364
                flags:    packRejectFlags(exists, isZombie),
141✔
1365
        })
141✔
1366

141✔
1367
        return upd1Time, upd2Time, exists, isZombie, nil
141✔
1368
}
1369

1370
// AddEdgeProof sets the proof of an existing edge in the graph database.
1371
func (c *KVStore) AddEdgeProof(chanID lnwire.ShortChannelID,
1372
        proof *models.ChannelAuthProof) error {
2✔
1373

2✔
1374
        // Construct the channel's primary key which is the 8-byte channel ID.
2✔
1375
        var chanKey [8]byte
2✔
1376
        binary.BigEndian.PutUint64(chanKey[:], chanID.ToUint64())
2✔
1377

2✔
1378
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
4✔
1379
                edges := tx.ReadWriteBucket(edgeBucket)
2✔
1380
                if edges == nil {
2✔
1381
                        return ErrEdgeNotFound
×
1382
                }
×
1383

1384
                edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
2✔
1385
                if edgeIndex == nil {
2✔
1386
                        return ErrEdgeNotFound
×
1387
                }
×
1388

1389
                edge, err := fetchChanEdgeInfo(edgeIndex, chanKey[:])
2✔
1390
                if err != nil {
2✔
1391
                        return err
×
1392
                }
×
1393

1394
                edge.AuthProof = proof
2✔
1395

2✔
1396
                return putChanEdgeInfo(edgeIndex, edge, chanKey)
2✔
1397
        }, func() {})
2✔
1398
}
1399

1400
const (
1401
        // pruneTipBytes is the total size of the value which stores a prune
1402
        // entry of the graph in the prune log. The "prune tip" is the last
1403
        // entry in the prune log, and indicates if the channel graph is in
1404
        // sync with the current UTXO state. The structure of the value
1405
        // is: blockHash, taking 32 bytes total.
1406
        pruneTipBytes = 32
1407
)
1408

1409
// PruneGraph prunes newly closed channels from the channel graph in response
1410
// to a new block being solved on the network. Any transactions which spend the
1411
// funding output of any known channels within he graph will be deleted.
1412
// Additionally, the "prune tip", or the last block which has been used to
1413
// prune the graph is stored so callers can ensure the graph is fully in sync
1414
// with the current UTXO state. A slice of channels that have been closed by
1415
// the target block along with any pruned nodes are returned if the function
1416
// succeeds without error.
1417
func (c *KVStore) PruneGraph(spentOutputs []*wire.OutPoint,
1418
        blockHash *chainhash.Hash, blockHeight uint32) (
1419
        []*models.ChannelEdgeInfo, []route.Vertex, error) {
240✔
1420

240✔
1421
        c.cacheMu.Lock()
240✔
1422
        defer c.cacheMu.Unlock()
240✔
1423

240✔
1424
        var (
240✔
1425
                chansClosed []*models.ChannelEdgeInfo
240✔
1426
                prunedNodes []route.Vertex
240✔
1427
        )
240✔
1428

240✔
1429
        err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
480✔
1430
                // First grab the edges bucket which houses the information
240✔
1431
                // we'd like to delete
240✔
1432
                edges, err := tx.CreateTopLevelBucket(edgeBucket)
240✔
1433
                if err != nil {
240✔
1434
                        return err
×
1435
                }
×
1436

1437
                // Next grab the two edge indexes which will also need to be
1438
                // updated.
1439
                edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
240✔
1440
                if err != nil {
240✔
1441
                        return err
×
1442
                }
×
1443
                chanIndex, err := edges.CreateBucketIfNotExists(
240✔
1444
                        channelPointBucket,
240✔
1445
                )
240✔
1446
                if err != nil {
240✔
1447
                        return err
×
1448
                }
×
1449
                nodes := tx.ReadWriteBucket(nodeBucket)
240✔
1450
                if nodes == nil {
240✔
1451
                        return ErrSourceNodeNotSet
×
1452
                }
×
1453
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
240✔
1454
                if err != nil {
240✔
1455
                        return err
×
1456
                }
×
1457

1458
                // For each of the outpoints that have been spent within the
1459
                // block, we attempt to delete them from the graph as if that
1460
                // outpoint was a channel, then it has now been closed.
1461
                for _, chanPoint := range spentOutputs {
369✔
1462
                        // TODO(roasbeef): load channel bloom filter, continue
129✔
1463
                        // if NOT if filter
129✔
1464

129✔
1465
                        var opBytes bytes.Buffer
129✔
1466
                        err := WriteOutpoint(&opBytes, chanPoint)
129✔
1467
                        if err != nil {
129✔
1468
                                return err
×
1469
                        }
×
1470

1471
                        // First attempt to see if the channel exists within
1472
                        // the database, if not, then we can exit early.
1473
                        chanID := chanIndex.Get(opBytes.Bytes())
129✔
1474
                        if chanID == nil {
241✔
1475
                                continue
112✔
1476
                        }
1477

1478
                        // Attempt to delete the channel, an ErrEdgeNotFound
1479
                        // will be returned if that outpoint isn't known to be
1480
                        // a channel. If no error is returned, then a channel
1481
                        // was successfully pruned.
1482
                        edgeInfo, err := c.delChannelEdgeUnsafe(
17✔
1483
                                edges, edgeIndex, chanIndex, zombieIndex,
17✔
1484
                                chanID, false, false,
17✔
1485
                        )
17✔
1486
                        if err != nil && !errors.Is(err, ErrEdgeNotFound) {
17✔
1487
                                return err
×
1488
                        }
×
1489

1490
                        chansClosed = append(chansClosed, edgeInfo)
17✔
1491
                }
1492

1493
                metaBucket, err := tx.CreateTopLevelBucket(graphMetaBucket)
240✔
1494
                if err != nil {
240✔
1495
                        return err
×
1496
                }
×
1497

1498
                pruneBucket, err := metaBucket.CreateBucketIfNotExists(
240✔
1499
                        pruneLogBucket,
240✔
1500
                )
240✔
1501
                if err != nil {
240✔
1502
                        return err
×
1503
                }
×
1504

1505
                // With the graph pruned, add a new entry to the prune log,
1506
                // which can be used to check if the graph is fully synced with
1507
                // the current UTXO state.
1508
                var blockHeightBytes [4]byte
240✔
1509
                byteOrder.PutUint32(blockHeightBytes[:], blockHeight)
240✔
1510

240✔
1511
                var newTip [pruneTipBytes]byte
240✔
1512
                copy(newTip[:], blockHash[:])
240✔
1513

240✔
1514
                err = pruneBucket.Put(blockHeightBytes[:], newTip[:])
240✔
1515
                if err != nil {
240✔
1516
                        return err
×
1517
                }
×
1518

1519
                // Now that the graph has been pruned, we'll also attempt to
1520
                // prune any nodes that have had a channel closed within the
1521
                // latest block.
1522
                prunedNodes, err = c.pruneGraphNodes(nodes, edgeIndex)
240✔
1523

240✔
1524
                return err
240✔
1525
        }, func() {
240✔
1526
                chansClosed = nil
240✔
1527
                prunedNodes = nil
240✔
1528
        })
240✔
1529
        if err != nil {
240✔
1530
                return nil, nil, err
×
1531
        }
×
1532

1533
        for _, channel := range chansClosed {
257✔
1534
                c.rejectCache.remove(channel.ChannelID)
17✔
1535
                c.chanCache.remove(channel.ChannelID)
17✔
1536
        }
17✔
1537

1538
        return chansClosed, prunedNodes, nil
240✔
1539
}
1540

1541
// PruneGraphNodes is a garbage collection method which attempts to prune out
1542
// any nodes from the channel graph that are currently unconnected. This ensure
1543
// that we only maintain a graph of reachable nodes. In the event that a pruned
1544
// node gains more channels, it will be re-added back to the graph.
1545
func (c *KVStore) PruneGraphNodes() ([]route.Vertex, error) {
23✔
1546
        var prunedNodes []route.Vertex
23✔
1547
        err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
46✔
1548
                nodes := tx.ReadWriteBucket(nodeBucket)
23✔
1549
                if nodes == nil {
23✔
1550
                        return ErrGraphNodesNotFound
×
1551
                }
×
1552
                edges := tx.ReadWriteBucket(edgeBucket)
23✔
1553
                if edges == nil {
23✔
1554
                        return ErrGraphNotFound
×
1555
                }
×
1556
                edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
23✔
1557
                if edgeIndex == nil {
23✔
1558
                        return ErrGraphNoEdgesFound
×
1559
                }
×
1560

1561
                var err error
23✔
1562
                prunedNodes, err = c.pruneGraphNodes(nodes, edgeIndex)
23✔
1563
                if err != nil {
23✔
1564
                        return err
×
1565
                }
×
1566

1567
                return nil
23✔
1568
        }, func() {
23✔
1569
                prunedNodes = nil
23✔
1570
        })
23✔
1571

1572
        return prunedNodes, err
23✔
1573
}
1574

1575
// pruneGraphNodes attempts to remove any nodes from the graph who have had a
1576
// channel closed within the current block. If the node still has existing
1577
// channels in the graph, this will act as a no-op.
1578
func (c *KVStore) pruneGraphNodes(nodes kvdb.RwBucket,
1579
        edgeIndex kvdb.RwBucket) ([]route.Vertex, error) {
263✔
1580

263✔
1581
        log.Trace("Pruning nodes from graph with no open channels")
263✔
1582

263✔
1583
        // We'll retrieve the graph's source node to ensure we don't remove it
263✔
1584
        // even if it no longer has any open channels.
263✔
1585
        sourceNode, err := sourceNodeWithTx(nodes)
263✔
1586
        if err != nil {
263✔
1587
                return nil, err
×
1588
        }
×
1589

1590
        // We'll use this map to keep count the number of references to a node
1591
        // in the graph. A node should only be removed once it has no more
1592
        // references in the graph.
1593
        nodeRefCounts := make(map[[33]byte]int)
263✔
1594
        err = nodes.ForEach(func(pubKey, nodeBytes []byte) error {
1,557✔
1595
                // If this is the source key, then we skip this
1,294✔
1596
                // iteration as the value for this key is a pubKey
1,294✔
1597
                // rather than raw node information.
1,294✔
1598
                if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 {
2,083✔
1599
                        return nil
789✔
1600
                }
789✔
1601

1602
                var nodePub [33]byte
505✔
1603
                copy(nodePub[:], pubKey)
505✔
1604
                nodeRefCounts[nodePub] = 0
505✔
1605

505✔
1606
                return nil
505✔
1607
        })
1608
        if err != nil {
263✔
1609
                return nil, err
×
1610
        }
×
1611

1612
        // To ensure we never delete the source node, we'll start off by
1613
        // bumping its ref count to 1.
1614
        nodeRefCounts[sourceNode.PubKeyBytes] = 1
263✔
1615

263✔
1616
        // Next, we'll run through the edgeIndex which maps a channel ID to the
263✔
1617
        // edge info. We'll use this scan to populate our reference count map
263✔
1618
        // above.
263✔
1619
        err = edgeIndex.ForEach(func(chanID, edgeInfoBytes []byte) error {
456✔
1620
                // The first 66 bytes of the edge info contain the pubkeys of
193✔
1621
                // the nodes that this edge attaches. We'll extract them, and
193✔
1622
                // add them to the ref count map.
193✔
1623
                var node1, node2 [33]byte
193✔
1624
                copy(node1[:], edgeInfoBytes[:33])
193✔
1625
                copy(node2[:], edgeInfoBytes[33:])
193✔
1626

193✔
1627
                // With the nodes extracted, we'll increase the ref count of
193✔
1628
                // each of the nodes.
193✔
1629
                nodeRefCounts[node1]++
193✔
1630
                nodeRefCounts[node2]++
193✔
1631

193✔
1632
                return nil
193✔
1633
        })
193✔
1634
        if err != nil {
263✔
1635
                return nil, err
×
1636
        }
×
1637

1638
        // Finally, we'll make a second pass over the set of nodes, and delete
1639
        // any nodes that have a ref count of zero.
1640
        var pruned []route.Vertex
263✔
1641
        for nodePubKey, refCount := range nodeRefCounts {
768✔
1642
                // If the ref count of the node isn't zero, then we can safely
505✔
1643
                // skip it as it still has edges to or from it within the
505✔
1644
                // graph.
505✔
1645
                if refCount != 0 {
948✔
1646
                        continue
443✔
1647
                }
1648

1649
                // If we reach this point, then there are no longer any edges
1650
                // that connect this node, so we can delete it.
1651
                err := c.deleteLightningNode(nodes, nodePubKey[:])
62✔
1652
                if err != nil {
62✔
1653
                        if errors.Is(err, ErrGraphNodeNotFound) ||
×
1654
                                errors.Is(err, ErrGraphNodesNotFound) {
×
1655

×
1656
                                log.Warnf("Unable to prune node %x from the "+
×
1657
                                        "graph: %v", nodePubKey, err)
×
1658
                                continue
×
1659
                        }
1660

1661
                        return nil, err
×
1662
                }
1663

1664
                log.Infof("Pruned unconnected node %x from channel graph",
62✔
1665
                        nodePubKey[:])
62✔
1666

62✔
1667
                pruned = append(pruned, nodePubKey)
62✔
1668
        }
1669

1670
        if len(pruned) > 0 {
309✔
1671
                log.Infof("Pruned %v unconnected nodes from the channel graph",
46✔
1672
                        len(pruned))
46✔
1673
        }
46✔
1674

1675
        return pruned, err
263✔
1676
}
1677

1678
// DisconnectBlockAtHeight is used to indicate that the block specified
1679
// by the passed height has been disconnected from the main chain. This
1680
// will "rewind" the graph back to the height below, deleting channels
1681
// that are no longer confirmed from the graph. The prune log will be
1682
// set to the last prune height valid for the remaining chain.
1683
// Channels that were removed from the graph resulting from the
1684
// disconnected block are returned.
1685
func (c *KVStore) DisconnectBlockAtHeight(height uint32) (
1686
        []*models.ChannelEdgeInfo, error) {
154✔
1687

154✔
1688
        // Every channel having a ShortChannelID starting at 'height'
154✔
1689
        // will no longer be confirmed.
154✔
1690
        startShortChanID := lnwire.ShortChannelID{
154✔
1691
                BlockHeight: height,
154✔
1692
        }
154✔
1693

154✔
1694
        // Delete everything after this height from the db up until the
154✔
1695
        // SCID alias range.
154✔
1696
        endShortChanID := aliasmgr.StartingAlias
154✔
1697

154✔
1698
        // The block height will be the 3 first bytes of the channel IDs.
154✔
1699
        var chanIDStart [8]byte
154✔
1700
        byteOrder.PutUint64(chanIDStart[:], startShortChanID.ToUint64())
154✔
1701
        var chanIDEnd [8]byte
154✔
1702
        byteOrder.PutUint64(chanIDEnd[:], endShortChanID.ToUint64())
154✔
1703

154✔
1704
        c.cacheMu.Lock()
154✔
1705
        defer c.cacheMu.Unlock()
154✔
1706

154✔
1707
        // Keep track of the channels that are removed from the graph.
154✔
1708
        var removedChans []*models.ChannelEdgeInfo
154✔
1709

154✔
1710
        if err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
308✔
1711
                edges, err := tx.CreateTopLevelBucket(edgeBucket)
154✔
1712
                if err != nil {
154✔
1713
                        return err
×
1714
                }
×
1715
                edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
154✔
1716
                if err != nil {
154✔
1717
                        return err
×
1718
                }
×
1719
                chanIndex, err := edges.CreateBucketIfNotExists(
154✔
1720
                        channelPointBucket,
154✔
1721
                )
154✔
1722
                if err != nil {
154✔
1723
                        return err
×
1724
                }
×
1725
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
154✔
1726
                if err != nil {
154✔
1727
                        return err
×
1728
                }
×
1729

1730
                // Scan from chanIDStart to chanIDEnd, deleting every
1731
                // found edge.
1732
                // NOTE: we must delete the edges after the cursor loop, since
1733
                // modifying the bucket while traversing is not safe.
1734
                // NOTE: We use a < comparison in bytes.Compare instead of <=
1735
                // so that the StartingAlias itself isn't deleted.
1736
                var keys [][]byte
154✔
1737
                cursor := edgeIndex.ReadWriteCursor()
154✔
1738

154✔
1739
                //nolint:ll
154✔
1740
                for k, _ := cursor.Seek(chanIDStart[:]); k != nil &&
154✔
1741
                        bytes.Compare(k, chanIDEnd[:]) < 0; k, _ = cursor.Next() {
251✔
1742
                        keys = append(keys, k)
97✔
1743
                }
97✔
1744

1745
                for _, k := range keys {
251✔
1746
                        edgeInfo, err := c.delChannelEdgeUnsafe(
97✔
1747
                                edges, edgeIndex, chanIndex, zombieIndex,
97✔
1748
                                k, false, false,
97✔
1749
                        )
97✔
1750
                        if err != nil && !errors.Is(err, ErrEdgeNotFound) {
97✔
1751
                                return err
×
1752
                        }
×
1753

1754
                        removedChans = append(removedChans, edgeInfo)
97✔
1755
                }
1756

1757
                // Delete all the entries in the prune log having a height
1758
                // greater or equal to the block disconnected.
1759
                metaBucket, err := tx.CreateTopLevelBucket(graphMetaBucket)
154✔
1760
                if err != nil {
154✔
1761
                        return err
×
1762
                }
×
1763

1764
                pruneBucket, err := metaBucket.CreateBucketIfNotExists(
154✔
1765
                        pruneLogBucket,
154✔
1766
                )
154✔
1767
                if err != nil {
154✔
1768
                        return err
×
1769
                }
×
1770

1771
                var pruneKeyStart [4]byte
154✔
1772
                byteOrder.PutUint32(pruneKeyStart[:], height)
154✔
1773

154✔
1774
                var pruneKeyEnd [4]byte
154✔
1775
                byteOrder.PutUint32(pruneKeyEnd[:], math.MaxUint32)
154✔
1776

154✔
1777
                // To avoid modifying the bucket while traversing, we delete
154✔
1778
                // the keys in a second loop.
154✔
1779
                var pruneKeys [][]byte
154✔
1780
                pruneCursor := pruneBucket.ReadWriteCursor()
154✔
1781
                //nolint:ll
154✔
1782
                for k, _ := pruneCursor.Seek(pruneKeyStart[:]); k != nil &&
154✔
1783
                        bytes.Compare(k, pruneKeyEnd[:]) <= 0; k, _ = pruneCursor.Next() {
246✔
1784
                        pruneKeys = append(pruneKeys, k)
92✔
1785
                }
92✔
1786

1787
                for _, k := range pruneKeys {
246✔
1788
                        if err := pruneBucket.Delete(k); err != nil {
92✔
1789
                                return err
×
1790
                        }
×
1791
                }
1792

1793
                return nil
154✔
1794
        }, func() {
154✔
1795
                removedChans = nil
154✔
1796
        }); err != nil {
154✔
1797
                return nil, err
×
1798
        }
×
1799

1800
        for _, channel := range removedChans {
251✔
1801
                c.rejectCache.remove(channel.ChannelID)
97✔
1802
                c.chanCache.remove(channel.ChannelID)
97✔
1803
        }
97✔
1804

1805
        return removedChans, nil
154✔
1806
}
1807

1808
// PruneTip returns the block height and hash of the latest block that has been
1809
// used to prune channels in the graph. Knowing the "prune tip" allows callers
1810
// to tell if the graph is currently in sync with the current best known UTXO
1811
// state.
1812
func (c *KVStore) PruneTip() (*chainhash.Hash, uint32, error) {
53✔
1813
        var (
53✔
1814
                tipHash   chainhash.Hash
53✔
1815
                tipHeight uint32
53✔
1816
        )
53✔
1817

53✔
1818
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
106✔
1819
                graphMeta := tx.ReadBucket(graphMetaBucket)
53✔
1820
                if graphMeta == nil {
53✔
1821
                        return ErrGraphNotFound
×
1822
                }
×
1823
                pruneBucket := graphMeta.NestedReadBucket(pruneLogBucket)
53✔
1824
                if pruneBucket == nil {
53✔
1825
                        return ErrGraphNeverPruned
×
1826
                }
×
1827

1828
                pruneCursor := pruneBucket.ReadCursor()
53✔
1829

53✔
1830
                // The prune key with the largest block height will be our
53✔
1831
                // prune tip.
53✔
1832
                k, v := pruneCursor.Last()
53✔
1833
                if k == nil {
71✔
1834
                        return ErrGraphNeverPruned
18✔
1835
                }
18✔
1836

1837
                // Once we have the prune tip, the value will be the block hash,
1838
                // and the key the block height.
1839
                copy(tipHash[:], v)
35✔
1840
                tipHeight = byteOrder.Uint32(k)
35✔
1841

35✔
1842
                return nil
35✔
1843
        }, func() {})
53✔
1844
        if err != nil {
71✔
1845
                return nil, 0, err
18✔
1846
        }
18✔
1847

1848
        return &tipHash, tipHeight, nil
35✔
1849
}
1850

1851
// DeleteChannelEdges removes edges with the given channel IDs from the
1852
// database and marks them as zombies. This ensures that we're unable to re-add
1853
// it to our database once again. If an edge does not exist within the
1854
// database, then ErrEdgeNotFound will be returned. If strictZombiePruning is
1855
// true, then when we mark these edges as zombies, we'll set up the keys such
1856
// that we require the node that failed to send the fresh update to be the one
1857
// that resurrects the channel from its zombie state. The markZombie bool
1858
// denotes whether or not to mark the channel as a zombie.
1859
func (c *KVStore) DeleteChannelEdges(strictZombiePruning, markZombie bool,
1860
        chanIDs ...uint64) ([]*models.ChannelEdgeInfo, error) {
152✔
1861

152✔
1862
        // TODO(roasbeef): possibly delete from node bucket if node has no more
152✔
1863
        // channels
152✔
1864
        // TODO(roasbeef): don't delete both edges?
152✔
1865

152✔
1866
        c.cacheMu.Lock()
152✔
1867
        defer c.cacheMu.Unlock()
152✔
1868

152✔
1869
        var infos []*models.ChannelEdgeInfo
152✔
1870
        err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
304✔
1871
                edges := tx.ReadWriteBucket(edgeBucket)
152✔
1872
                if edges == nil {
152✔
1873
                        return ErrEdgeNotFound
×
1874
                }
×
1875
                edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
152✔
1876
                if edgeIndex == nil {
152✔
1877
                        return ErrEdgeNotFound
×
1878
                }
×
1879
                chanIndex := edges.NestedReadWriteBucket(channelPointBucket)
152✔
1880
                if chanIndex == nil {
152✔
1881
                        return ErrEdgeNotFound
×
1882
                }
×
1883
                nodes := tx.ReadWriteBucket(nodeBucket)
152✔
1884
                if nodes == nil {
152✔
1885
                        return ErrGraphNodeNotFound
×
1886
                }
×
1887
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
152✔
1888
                if err != nil {
152✔
1889
                        return err
×
1890
                }
×
1891

1892
                var rawChanID [8]byte
152✔
1893
                for _, chanID := range chanIDs {
236✔
1894
                        byteOrder.PutUint64(rawChanID[:], chanID)
84✔
1895
                        edgeInfo, err := c.delChannelEdgeUnsafe(
84✔
1896
                                edges, edgeIndex, chanIndex, zombieIndex,
84✔
1897
                                rawChanID[:], markZombie, strictZombiePruning,
84✔
1898
                        )
84✔
1899
                        if err != nil {
145✔
1900
                                return err
61✔
1901
                        }
61✔
1902

1903
                        infos = append(infos, edgeInfo)
23✔
1904
                }
1905

1906
                return nil
91✔
1907
        }, func() {
152✔
1908
                infos = nil
152✔
1909
        })
152✔
1910
        if err != nil {
213✔
1911
                return nil, err
61✔
1912
        }
61✔
1913

1914
        for _, chanID := range chanIDs {
114✔
1915
                c.rejectCache.remove(chanID)
23✔
1916
                c.chanCache.remove(chanID)
23✔
1917
        }
23✔
1918

1919
        return infos, nil
91✔
1920
}
1921

1922
// ChannelID attempt to lookup the 8-byte compact channel ID which maps to the
1923
// passed channel point (outpoint). If the passed channel doesn't exist within
1924
// the database, then ErrEdgeNotFound is returned.
1925
func (c *KVStore) ChannelID(chanPoint *wire.OutPoint) (uint64, error) {
1✔
1926
        var chanID uint64
1✔
1927
        if err := kvdb.View(c.db, func(tx kvdb.RTx) error {
2✔
1928
                var err error
1✔
1929
                chanID, err = getChanID(tx, chanPoint)
1✔
1930
                return err
1✔
1931
        }, func() {
2✔
1932
                chanID = 0
1✔
1933
        }); err != nil {
1✔
1934
                return 0, err
×
1935
        }
×
1936

1937
        return chanID, nil
1✔
1938
}
1939

1940
// getChanID returns the assigned channel ID for a given channel point.
1941
func getChanID(tx kvdb.RTx, chanPoint *wire.OutPoint) (uint64, error) {
1✔
1942
        var b bytes.Buffer
1✔
1943
        if err := WriteOutpoint(&b, chanPoint); err != nil {
1✔
1944
                return 0, err
×
1945
        }
×
1946

1947
        edges := tx.ReadBucket(edgeBucket)
1✔
1948
        if edges == nil {
1✔
1949
                return 0, ErrGraphNoEdgesFound
×
1950
        }
×
1951
        chanIndex := edges.NestedReadBucket(channelPointBucket)
1✔
1952
        if chanIndex == nil {
1✔
1953
                return 0, ErrGraphNoEdgesFound
×
1954
        }
×
1955

1956
        chanIDBytes := chanIndex.Get(b.Bytes())
1✔
1957
        if chanIDBytes == nil {
1✔
1958
                return 0, ErrEdgeNotFound
×
1959
        }
×
1960

1961
        chanID := byteOrder.Uint64(chanIDBytes)
1✔
1962

1✔
1963
        return chanID, nil
1✔
1964
}
1965

1966
// TODO(roasbeef): allow updates to use Batch?
1967

1968
// HighestChanID returns the "highest" known channel ID in the channel graph.
1969
// This represents the "newest" channel from the PoV of the chain. This method
1970
// can be used by peers to quickly determine if they're graphs are in sync.
1971
func (c *KVStore) HighestChanID(_ context.Context) (uint64, error) {
3✔
1972
        var cid uint64
3✔
1973

3✔
1974
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
1975
                edges := tx.ReadBucket(edgeBucket)
3✔
1976
                if edges == nil {
3✔
1977
                        return ErrGraphNoEdgesFound
×
1978
                }
×
1979
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
3✔
1980
                if edgeIndex == nil {
3✔
1981
                        return ErrGraphNoEdgesFound
×
1982
                }
×
1983

1984
                // In order to find the highest chan ID, we'll fetch a cursor
1985
                // and use that to seek to the "end" of our known rage.
1986
                cidCursor := edgeIndex.ReadCursor()
3✔
1987

3✔
1988
                lastChanID, _ := cidCursor.Last()
3✔
1989

3✔
1990
                // If there's no key, then this means that we don't actually
3✔
1991
                // know of any channels, so we'll return a predicable error.
3✔
1992
                if lastChanID == nil {
4✔
1993
                        return ErrGraphNoEdgesFound
1✔
1994
                }
1✔
1995

1996
                // Otherwise, we'll de serialize the channel ID and return it
1997
                // to the caller.
1998
                cid = byteOrder.Uint64(lastChanID)
2✔
1999

2✔
2000
                return nil
2✔
2001
        }, func() {
3✔
2002
                cid = 0
3✔
2003
        })
3✔
2004
        if err != nil && !errors.Is(err, ErrGraphNoEdgesFound) {
3✔
2005
                return 0, err
×
2006
        }
×
2007

2008
        return cid, nil
3✔
2009
}
2010

2011
// ChannelEdge represents the complete set of information for a channel edge in
2012
// the known channel graph. This struct couples the core information of the
2013
// edge as well as each of the known advertised edge policies.
2014
type ChannelEdge struct {
2015
        // Info contains all the static information describing the channel.
2016
        Info *models.ChannelEdgeInfo
2017

2018
        // Policy1 points to the "first" edge policy of the channel containing
2019
        // the dynamic information required to properly route through the edge.
2020
        Policy1 *models.ChannelEdgePolicy
2021

2022
        // Policy2 points to the "second" edge policy of the channel containing
2023
        // the dynamic information required to properly route through the edge.
2024
        Policy2 *models.ChannelEdgePolicy
2025

2026
        // Node1 is "node 1" in the channel. This is the node that would have
2027
        // produced Policy1 if it exists.
2028
        Node1 *models.Node
2029

2030
        // Node2 is "node 2" in the channel. This is the node that would have
2031
        // produced Policy2 if it exists.
2032
        Node2 *models.Node
2033
}
2034

2035
// updateChanCacheBatch updates the channel cache with multiple edges at once.
2036
// This method acquires the cache lock only once for the entire batch.
2037
func (c *KVStore) updateChanCacheBatch(edgesToCache map[uint64]ChannelEdge) {
180✔
2038
        if len(edgesToCache) == 0 {
313✔
2039
                return
133✔
2040
        }
133✔
2041

2042
        c.cacheMu.Lock()
47✔
2043
        defer c.cacheMu.Unlock()
47✔
2044

47✔
2045
        for cid, edge := range edgesToCache {
165✔
2046
                c.chanCache.insert(cid, edge)
118✔
2047
        }
118✔
2048
}
2049

2050
// isEmptyGraphError returns true if the error indicates the graph database
2051
// is empty (no edges or nodes exist). These errors are expected when the
2052
// graph is first created or has no data.
2053
func isEmptyGraphError(err error) bool {
×
2054
        return errors.Is(err, ErrGraphNoEdgesFound) ||
×
2055
                errors.Is(err, ErrGraphNodesNotFound)
×
2056
}
×
2057

2058
// chanUpdatesIterator holds the state for chunked channel update iteration.
2059
type chanUpdatesIterator struct {
2060
        // batchSize is the amount of channel updates to read at a single time.
2061
        batchSize int
2062

2063
        // startTime is the start time of the iteration request.
2064
        startTime time.Time
2065

2066
        // endTime is the end time of the iteration request.
2067
        endTime time.Time
2068

2069
        // edgesSeen is used to dedup edges.
2070
        edgesSeen map[uint64]struct{}
2071

2072
        // edgesToCache houses all the edges that we read from the disk which
2073
        // aren't yet cached. This is used to update the cache after a batch
2074
        // chunk.
2075
        edgesToCache map[uint64]ChannelEdge
2076

2077
        // lastSeenKey is the last index key seen. This is used to resume
2078
        // iteration.
2079
        lastSeenKey []byte
2080

2081
        // hits is the number of cache hits.
2082
        hits int
2083

2084
        // total is the total number of edges requested.
2085
        total int
2086
}
2087

2088
// newChanUpdatesIterator makes a new chan updates iterator.
2089
func newChanUpdatesIterator(batchSize int,
2090
        startTime, endTime time.Time) *chanUpdatesIterator {
142✔
2091

142✔
2092
        return &chanUpdatesIterator{
142✔
2093
                batchSize:    batchSize,
142✔
2094
                startTime:    startTime,
142✔
2095
                endTime:      endTime,
142✔
2096
                edgesSeen:    make(map[uint64]struct{}),
142✔
2097
                edgesToCache: make(map[uint64]ChannelEdge),
142✔
2098
                lastSeenKey:  nil,
142✔
2099
        }
142✔
2100
}
142✔
2101

2102
// fetchNextChanUpdateBatch retrieves the next batch of channel edges within the
2103
// horizon. Returns the batch, whether there are more edges, and any error.
2104
func (c *KVStore) fetchNextChanUpdateBatch(
2105
        state *chanUpdatesIterator) ([]ChannelEdge, bool, error) {
180✔
2106

180✔
2107
        var (
180✔
2108
                batch   []ChannelEdge
180✔
2109
                hasMore bool
180✔
2110
        )
180✔
2111
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
360✔
2112
                edges := tx.ReadBucket(edgeBucket)
180✔
2113
                if edges == nil {
180✔
2114
                        return ErrGraphNoEdgesFound
×
2115
                }
×
2116

2117
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
180✔
2118
                if edgeIndex == nil {
180✔
2119
                        return ErrGraphNoEdgesFound
×
2120
                }
×
2121

2122
                edgeUpdateIndex := edges.NestedReadBucket(edgeUpdateIndexBucket)
180✔
2123
                if edgeUpdateIndex == nil {
180✔
2124
                        return ErrGraphNoEdgesFound
×
2125
                }
×
2126
                nodes := tx.ReadBucket(nodeBucket)
180✔
2127
                if nodes == nil {
180✔
2128
                        return ErrGraphNodesNotFound
×
2129
                }
×
2130

2131
                // With all the relevant buckets read, we'll now create a fresh
2132
                // read cursor.
2133
                updateCursor := edgeUpdateIndex.ReadCursor()
180✔
2134

180✔
2135
                // We'll now use the start and end time to create the keys that
180✔
2136
                // we'll use to seek.
180✔
2137
                var startTimeBytes, endTimeBytes [8 + 8]byte
180✔
2138
                byteOrder.PutUint64(
180✔
2139
                        startTimeBytes[:8], uint64(state.startTime.Unix()),
180✔
2140
                )
180✔
2141
                byteOrder.PutUint64(
180✔
2142
                        endTimeBytes[:8], uint64(state.endTime.Unix()),
180✔
2143
                )
180✔
2144

180✔
2145
                var indexKey []byte
180✔
2146

180✔
2147
                // If we left off earlier, then we'll use that key as the
180✔
2148
                // starting point.
180✔
2149
                switch {
180✔
2150
                case state.lastSeenKey != nil:
38✔
2151
                        // Seek to the last seen key, moving to the key right
38✔
2152
                        // after it.
38✔
2153
                        indexKey, _ = updateCursor.Seek(state.lastSeenKey)
38✔
2154

38✔
2155
                        if bytes.Equal(indexKey, state.lastSeenKey) {
76✔
2156
                                indexKey, _ = updateCursor.Next()
38✔
2157
                        }
38✔
2158

2159
                // Otherwise, we'll move to the very start of the time range.
2160
                default:
142✔
2161
                        indexKey, _ = updateCursor.Seek(startTimeBytes[:])
142✔
2162
                }
2163

2164
                // TODO(roasbeef): iterate the channel graph cache instead w/ a
2165
                // treap ordering?
2166

2167
                // Now we'll read items up to the batch size, exiting early if
2168
                // we exceed the ending time.
2169
                for len(batch) < state.batchSize && indexKey != nil {
334✔
2170
                        // If we're at the end, then we'll break out now.
154✔
2171
                        if bytes.Compare(indexKey, endTimeBytes[:]) > 0 {
162✔
2172
                                break
8✔
2173
                        }
2174

2175
                        chanID := indexKey[8:]
146✔
2176
                        chanIDInt := byteOrder.Uint64(chanID)
146✔
2177

146✔
2178
                        if state.lastSeenKey == nil {
155✔
2179
                                state.lastSeenKey = make([]byte, len(indexKey))
9✔
2180
                        }
9✔
2181
                        copy(state.lastSeenKey, indexKey)
146✔
2182

146✔
2183
                        // If we've seen this channel ID already, then we'll
146✔
2184
                        // skip it.
146✔
2185
                        if _, ok := state.edgesSeen[chanIDInt]; ok {
165✔
2186
                                indexKey, _ = updateCursor.Next()
19✔
2187
                                continue
19✔
2188
                        }
2189

2190
                        // Before we read the edge info, we'll see if this
2191
                        // element is already in the cache or not.
2192
                        c.cacheMu.RLock()
127✔
2193
                        if channel, ok := c.chanCache.get(chanIDInt); ok {
136✔
2194
                                state.edgesSeen[chanIDInt] = struct{}{}
9✔
2195

9✔
2196
                                batch = append(batch, channel)
9✔
2197

9✔
2198
                                state.hits++
9✔
2199
                                state.total++
9✔
2200

9✔
2201
                                indexKey, _ = updateCursor.Next()
9✔
2202

9✔
2203
                                c.cacheMu.RUnlock()
9✔
2204

9✔
2205
                                continue
9✔
2206
                        }
2207
                        c.cacheMu.RUnlock()
118✔
2208

118✔
2209
                        // The edge wasn't in the cache, so we'll fetch it along
118✔
2210
                        // w/ the edge policies and nodes.
118✔
2211
                        edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
118✔
2212
                        if err != nil {
118✔
2213
                                return fmt.Errorf("unable to fetch info "+
×
2214
                                        "for edge with chan_id=%v: %v",
×
2215
                                        chanIDInt, err)
×
2216
                        }
×
2217
                        edge1, edge2, err := fetchChanEdgePolicies(
118✔
2218
                                edgeIndex, edges, chanID,
118✔
2219
                        )
118✔
2220
                        if err != nil {
118✔
2221
                                return fmt.Errorf("unable to fetch "+
×
2222
                                        "policies for edge with chan_id=%v: %v",
×
2223
                                        chanIDInt, err)
×
2224
                        }
×
2225
                        node1, err := fetchLightningNode(
118✔
2226
                                nodes, edgeInfo.NodeKey1Bytes[:],
118✔
2227
                        )
118✔
2228
                        if err != nil {
118✔
2229
                                return err
×
2230
                        }
×
2231
                        node2, err := fetchLightningNode(
118✔
2232
                                nodes, edgeInfo.NodeKey2Bytes[:],
118✔
2233
                        )
118✔
2234
                        if err != nil {
118✔
2235
                                return err
×
2236
                        }
×
2237

2238
                        // Now we have all the information we need to build the
2239
                        // channel edge.
2240
                        channel := ChannelEdge{
118✔
2241
                                Info:    edgeInfo,
118✔
2242
                                Policy1: edge1,
118✔
2243
                                Policy2: edge2,
118✔
2244
                                Node1:   node1,
118✔
2245
                                Node2:   node2,
118✔
2246
                        }
118✔
2247

118✔
2248
                        state.edgesSeen[chanIDInt] = struct{}{}
118✔
2249
                        state.edgesToCache[chanIDInt] = channel
118✔
2250

118✔
2251
                        batch = append(batch, channel)
118✔
2252

118✔
2253
                        state.total++
118✔
2254

118✔
2255
                        // Advance the iterator to the next entry.
118✔
2256
                        indexKey, _ = updateCursor.Next()
118✔
2257
                }
2258

2259
                // If we haven't yet crossed the endTimeBytes, then we still
2260
                // have more entries to deliver.
2261
                if indexKey != nil &&
180✔
2262
                        bytes.Compare(indexKey, endTimeBytes[:]) <= 0 {
218✔
2263

38✔
2264
                        hasMore = true
38✔
2265
                }
38✔
2266

2267
                return nil
180✔
2268
        }, func() {
180✔
2269
                batch = nil
180✔
2270
                hasMore = false
180✔
2271
        })
180✔
2272
        if err != nil {
180✔
2273
                return nil, false, err
×
2274
        }
×
2275

2276
        return batch, hasMore, nil
180✔
2277
}
2278

2279
// ChanUpdatesInHorizon returns all the known channel edges which have at least
2280
// one edge that has an update timestamp within the specified horizon.
2281
func (c *KVStore) ChanUpdatesInHorizon(startTime, endTime time.Time,
2282
        opts ...IteratorOption) iter.Seq2[ChannelEdge, error] {
142✔
2283

142✔
2284
        cfg := defaultIteratorConfig()
142✔
2285
        for _, opt := range opts {
146✔
2286
                opt(cfg)
4✔
2287
        }
4✔
2288

2289
        return func(yield func(ChannelEdge, error) bool) {
284✔
2290
                iterState := newChanUpdatesIterator(
142✔
2291
                        cfg.chanUpdateIterBatchSize, startTime, endTime,
142✔
2292
                )
142✔
2293

142✔
2294
                for {
322✔
2295
                        // At the top of the loop, we'll read the next batch
180✔
2296
                        // chunk from disk. We'll also determine if we have any
180✔
2297
                        // more entries after this or not.
180✔
2298
                        batch, hasMore, err := c.fetchNextChanUpdateBatch(
180✔
2299
                                iterState,
180✔
2300
                        )
180✔
2301
                        if err != nil {
180✔
2302
                                // These errors just mean the graph is empty,
×
2303
                                // which is OK.
×
2304
                                if !isEmptyGraphError(err) {
×
2305
                                        log.Errorf("ChanUpdatesInHorizon "+
×
2306
                                                "batch error: %v", err)
×
2307

×
2308
                                        yield(ChannelEdge{}, err)
×
2309

×
2310
                                        return
×
2311
                                }
×
2312
                                // Continue with empty batch
2313
                        }
2314

2315
                        // We'll now yield each edge that we just read. If yield
2316
                        // returns false, then that means that we'll exit early.
2317
                        for _, edge := range batch {
307✔
2318
                                if !yield(edge, nil) {
127✔
2319
                                        return
×
2320
                                }
×
2321
                        }
2322

2323
                        // Update cache after successful batch yield.
2324
                        c.updateChanCacheBatch(iterState.edgesToCache)
180✔
2325
                        iterState.edgesToCache = make(map[uint64]ChannelEdge)
180✔
2326

180✔
2327
                        // If we we're done, then we can just break out here
180✔
2328
                        // now.
180✔
2329
                        if !hasMore || len(batch) == 0 {
322✔
2330
                                break
142✔
2331
                        }
2332
                }
2333

2334
                if iterState.total > 0 {
151✔
2335
                        log.Tracef("ChanUpdatesInHorizon hit percentage: "+
9✔
2336
                                "%.2f (%d/%d)", float64(iterState.hits)*100/
9✔
2337
                                float64(iterState.total), iterState.hits,
9✔
2338
                                iterState.total)
9✔
2339
                } else {
142✔
2340
                        log.Tracef("ChanUpdatesInHorizon returned no edges "+
133✔
2341
                                "in horizon (%s, %s)", startTime, endTime)
133✔
2342
                }
133✔
2343
        }
2344
}
2345

2346
// nodeUpdatesIterator maintains state for iterating through node updates.
2347
//
2348
// Iterator Lifecycle:
2349
// 1. Initialize state with start/end time, batch size, and filtering options.
2350
// 2. Fetch batch using pagination cursor (lastSeenKey).
2351
// 3. Filter nodes if publicNodesOnly is set.
2352
// 4. Update lastSeenKey to the last processed node's index key.
2353
// 5. Repeat until we exceed endTime or no more nodes exist.
2354
type nodeUpdatesIterator struct {
2355
        // batchSize is the amount of node updates to read at a single time.
2356
        batchSize int
2357

2358
        // startTime is the start time of the iteration request.
2359
        startTime time.Time
2360

2361
        // endTime is the end time of the iteration request.
2362
        endTime time.Time
2363

2364
        // lastSeenKey is the last index key seen. This is used to resume
2365
        // iteration.
2366
        lastSeenKey []byte
2367

2368
        // publicNodesOnly filters to only return public nodes if true.
2369
        publicNodesOnly bool
2370

2371
        // total tracks total nodes processed.
2372
        total int
2373
}
2374

2375
// newNodeUpdatesIterator makes a new node updates iterator.
2376
func newNodeUpdatesIterator(batchSize int, startTime, endTime time.Time,
2377
        publicNodesOnly bool) *nodeUpdatesIterator {
51✔
2378

51✔
2379
        return &nodeUpdatesIterator{
51✔
2380
                batchSize:       batchSize,
51✔
2381
                startTime:       startTime,
51✔
2382
                endTime:         endTime,
51✔
2383
                lastSeenKey:     nil,
51✔
2384
                publicNodesOnly: publicNodesOnly,
51✔
2385
        }
51✔
2386
}
51✔
2387

2388
// fetchNextNodeBatch fetches the next batch of node announcements using the
2389
// iterator state.
2390
func (c *KVStore) fetchNextNodeBatch(
2391
        state *nodeUpdatesIterator) ([]*models.Node, bool, error) {
110✔
2392

110✔
2393
        var (
110✔
2394
                nodeBatch []*models.Node
110✔
2395
                hasMore   bool
110✔
2396
        )
110✔
2397

110✔
2398
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
220✔
2399
                nodes := tx.ReadBucket(nodeBucket)
110✔
2400
                if nodes == nil {
110✔
2401
                        return ErrGraphNodesNotFound
×
2402
                }
×
2403
                ourPubKey := nodes.Get(sourceKey)
110✔
2404
                if ourPubKey == nil && state.publicNodesOnly {
110✔
2405
                        // If we're filtering for public nodes only but don't
×
2406
                        // have a source node set, we can't determine if nodes
×
2407
                        // are public. A node is considered public if it has at
×
2408
                        // least one channel with our node (the source node).
×
2409
                        return ErrSourceNodeNotSet
×
2410
                }
×
2411
                nodeUpdateIndex := nodes.NestedReadBucket(nodeUpdateIndexBucket)
110✔
2412
                if nodeUpdateIndex == nil {
110✔
2413
                        return ErrGraphNodesNotFound
×
2414
                }
×
2415

2416
                // We'll now obtain a cursor to perform a range query within the
2417
                // index to find all node announcements within the horizon. The
2418
                // nodeUpdateIndex key format is: [8 bytes timestamp][33 bytes
2419
                // node pubkey] This allows efficient range queries by time
2420
                // while maintaining a stable sort order for nodes with the same
2421
                // timestamp.
2422
                updateCursor := nodeUpdateIndex.ReadCursor()
110✔
2423

110✔
2424
                var startTimeBytes, endTimeBytes [8 + 33]byte
110✔
2425
                byteOrder.PutUint64(
110✔
2426
                        startTimeBytes[:8], uint64(state.startTime.Unix()),
110✔
2427
                )
110✔
2428
                byteOrder.PutUint64(
110✔
2429
                        endTimeBytes[:8], uint64(state.endTime.Unix()),
110✔
2430
                )
110✔
2431

110✔
2432
                // If we have a last seen key (existing iteration), then that'll
110✔
2433
                // be our starting point. Otherwise, we'll seek to the start
110✔
2434
                // time.
110✔
2435
                var indexKey []byte
110✔
2436
                if state.lastSeenKey != nil {
169✔
2437
                        indexKey, _ = updateCursor.Seek(state.lastSeenKey)
59✔
2438

59✔
2439
                        if bytes.Equal(indexKey, state.lastSeenKey) {
118✔
2440
                                indexKey, _ = updateCursor.Next()
59✔
2441
                        }
59✔
2442
                } else {
51✔
2443
                        indexKey, _ = updateCursor.Seek(startTimeBytes[:])
51✔
2444
                }
51✔
2445

2446
                // Now we'll read items up to the batch size, exiting early if
2447
                // we exceed the ending time.
2448
                var lastProcessedKey []byte
110✔
2449
                for len(nodeBatch) < state.batchSize && indexKey != nil {
690✔
2450
                        // Extract the timestamp from the index key (first 8
580✔
2451
                        // bytes). Only compare timestamps, not the full key
580✔
2452
                        // with pubkey.
580✔
2453
                        keyTimestamp := byteOrder.Uint64(indexKey[:8])
580✔
2454
                        endTimestamp := uint64(state.endTime.Unix())
580✔
2455
                        if keyTimestamp > endTimestamp {
594✔
2456
                                break
14✔
2457
                        }
2458

2459
                        nodePub := indexKey[8:]
566✔
2460
                        node, err := fetchLightningNode(nodes, nodePub)
566✔
2461
                        if err != nil {
566✔
2462
                                return err
×
2463
                        }
×
2464

2465
                        if state.publicNodesOnly {
566✔
2466
                                nodeIsPublic, err := c.isPublic(
×
2467
                                        tx, node.PubKeyBytes, ourPubKey,
×
2468
                                )
×
2469
                                if err != nil {
×
2470
                                        return err
×
2471
                                }
×
2472
                                if !nodeIsPublic {
×
2473
                                        indexKey, _ = updateCursor.Next()
×
2474
                                        continue
×
2475
                                }
2476
                        }
2477

2478
                        nodeBatch = append(nodeBatch, node)
566✔
2479
                        state.total++
566✔
2480

566✔
2481
                        // Remember the last key we actually processed. We'll
566✔
2482
                        // use this to update the last seen key below.
566✔
2483
                        if lastProcessedKey == nil {
659✔
2484
                                lastProcessedKey = make([]byte, len(indexKey))
93✔
2485
                        }
93✔
2486
                        copy(lastProcessedKey, indexKey)
566✔
2487

566✔
2488
                        // Advance the iterator to the next entry.
566✔
2489
                        indexKey, _ = updateCursor.Next()
566✔
2490
                }
2491

2492
                // If we haven't yet crossed the endTime, then we still
2493
                // have more entries to deliver.
2494
                if indexKey != nil {
195✔
2495
                        keyTimestamp := byteOrder.Uint64(indexKey[:8])
85✔
2496
                        endTimestamp := uint64(state.endTime.Unix())
85✔
2497
                        if keyTimestamp <= endTimestamp {
150✔
2498
                                hasMore = true
65✔
2499
                        }
65✔
2500
                }
2501

2502
                // Update the cursor to the last key we actually processed.
2503
                if lastProcessedKey != nil {
203✔
2504
                        if state.lastSeenKey == nil {
127✔
2505
                                state.lastSeenKey = make(
34✔
2506
                                        []byte, len(lastProcessedKey),
34✔
2507
                                )
34✔
2508
                        }
34✔
2509
                        copy(state.lastSeenKey, lastProcessedKey)
93✔
2510
                }
2511

2512
                return nil
110✔
2513
        }, func() {
110✔
2514
                nodeBatch = nil
110✔
2515
        })
110✔
2516
        switch {
110✔
2517
        case errors.Is(err, ErrGraphNoEdgesFound):
×
2518
                fallthrough
×
2519
        case errors.Is(err, ErrGraphNodesNotFound):
×
2520
                break
×
2521

2522
        case err != nil:
×
2523
                return nil, false, err
×
2524
        }
2525

2526
        return nodeBatch, hasMore, nil
110✔
2527
}
2528

2529
// NodeUpdatesInHorizon returns all the known lightning node which have an
2530
// update timestamp within the passed range.
2531
func (c *KVStore) NodeUpdatesInHorizon(startTime,
2532
        endTime time.Time,
2533
        opts ...IteratorOption) iter.Seq2[*models.Node, error] {
51✔
2534

51✔
2535
        cfg := defaultIteratorConfig()
51✔
2536
        for _, opt := range opts {
94✔
2537
                opt(cfg)
43✔
2538
        }
43✔
2539

2540
        return func(yield func(*models.Node, error) bool) {
102✔
2541
                // Initialize iterator state.
51✔
2542
                state := newNodeUpdatesIterator(
51✔
2543
                        cfg.nodeUpdateIterBatchSize,
51✔
2544
                        startTime, endTime,
51✔
2545
                        cfg.iterPublicNodes,
51✔
2546
                )
51✔
2547

51✔
2548
                for {
161✔
2549
                        nodeAnns, hasMore, err := c.fetchNextNodeBatch(state)
110✔
2550
                        if err != nil {
110✔
2551
                                log.Errorf("unable to read node updates in "+
×
2552
                                        "horizon: %v", err)
×
2553

×
2554
                                yield(&models.Node{}, err)
×
2555

×
2556
                                return
×
2557
                        }
×
2558

2559
                        for _, node := range nodeAnns {
631✔
2560
                                if !yield(node, nil) {
528✔
2561
                                        return
7✔
2562
                                }
7✔
2563
                        }
2564

2565
                        // If we we're done, then we can just break out here
2566
                        // now.
2567
                        if !hasMore || len(nodeAnns) == 0 {
147✔
2568
                                break
44✔
2569
                        }
2570
                }
2571
        }
2572
}
2573

2574
// FilterKnownChanIDs takes a set of channel IDs and return the subset of chan
2575
// ID's that we don't know and are not known zombies of the passed set. In other
2576
// words, we perform a set difference of our set of chan ID's and the ones
2577
// passed in. This method can be used by callers to determine the set of
2578
// channels another peer knows of that we don't. The ChannelUpdateInfos for the
2579
// known zombies is also returned.
2580
func (c *KVStore) FilterKnownChanIDs(chansInfo []ChannelUpdateInfo) ([]uint64,
2581
        []ChannelUpdateInfo, error) {
122✔
2582

122✔
2583
        var (
122✔
2584
                newChanIDs   []uint64
122✔
2585
                knownZombies []ChannelUpdateInfo
122✔
2586
        )
122✔
2587

122✔
2588
        c.cacheMu.Lock()
122✔
2589
        defer c.cacheMu.Unlock()
122✔
2590

122✔
2591
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
244✔
2592
                edges := tx.ReadBucket(edgeBucket)
122✔
2593
                if edges == nil {
122✔
2594
                        return ErrGraphNoEdgesFound
×
2595
                }
×
2596
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
122✔
2597
                if edgeIndex == nil {
122✔
2598
                        return ErrGraphNoEdgesFound
×
2599
                }
×
2600

2601
                // Fetch the zombie index, it may not exist if no edges have
2602
                // ever been marked as zombies. If the index has been
2603
                // initialized, we will use it later to skip known zombie edges.
2604
                zombieIndex := edges.NestedReadBucket(zombieBucket)
122✔
2605

122✔
2606
                // We'll run through the set of chanIDs and collate only the
122✔
2607
                // set of channel that are unable to be found within our db.
122✔
2608
                var cidBytes [8]byte
122✔
2609
                for _, info := range chansInfo {
225✔
2610
                        scid := info.ShortChannelID.ToUint64()
103✔
2611
                        byteOrder.PutUint64(cidBytes[:], scid)
103✔
2612

103✔
2613
                        // If the edge is already known, skip it.
103✔
2614
                        if v := edgeIndex.Get(cidBytes[:]); v != nil {
119✔
2615
                                continue
16✔
2616
                        }
2617

2618
                        // If the edge is a known zombie, skip it.
2619
                        if zombieIndex != nil {
174✔
2620
                                isZombie, _, _ := isZombieEdge(
87✔
2621
                                        zombieIndex, scid,
87✔
2622
                                )
87✔
2623

87✔
2624
                                if isZombie {
131✔
2625
                                        knownZombies = append(
44✔
2626
                                                knownZombies, info,
44✔
2627
                                        )
44✔
2628

44✔
2629
                                        continue
44✔
2630
                                }
2631
                        }
2632

2633
                        newChanIDs = append(newChanIDs, scid)
43✔
2634
                }
2635

2636
                return nil
122✔
2637
        }, func() {
122✔
2638
                newChanIDs = nil
122✔
2639
                knownZombies = nil
122✔
2640
        })
122✔
2641
        switch {
122✔
2642
        // If we don't know of any edges yet, then we'll return the entire set
2643
        // of chan IDs specified.
2644
        case errors.Is(err, ErrGraphNoEdgesFound):
×
2645
                ogChanIDs := make([]uint64, len(chansInfo))
×
2646
                for i, info := range chansInfo {
×
2647
                        ogChanIDs[i] = info.ShortChannelID.ToUint64()
×
2648
                }
×
2649

2650
                return ogChanIDs, nil, nil
×
2651

2652
        case err != nil:
×
2653
                return nil, nil, err
×
2654
        }
2655

2656
        return newChanIDs, knownZombies, nil
122✔
2657
}
2658

2659
// ChannelUpdateInfo couples the SCID of a channel with the timestamps of the
2660
// latest received channel updates for the channel.
2661
type ChannelUpdateInfo struct {
2662
        // ShortChannelID is the SCID identifier of the channel.
2663
        ShortChannelID lnwire.ShortChannelID
2664

2665
        // Node1UpdateTimestamp is the timestamp of the latest received update
2666
        // from the node 1 channel peer. This will be set to zero time if no
2667
        // update has yet been received from this node.
2668
        Node1UpdateTimestamp time.Time
2669

2670
        // Node2UpdateTimestamp is the timestamp of the latest received update
2671
        // from the node 2 channel peer. This will be set to zero time if no
2672
        // update has yet been received from this node.
2673
        Node2UpdateTimestamp time.Time
2674
}
2675

2676
// NewChannelUpdateInfo is a constructor which makes sure we initialize the
2677
// timestamps with zero seconds unix timestamp which equals
2678
// `January 1, 1970, 00:00:00 UTC` in case the value is `time.Time{}`.
2679
func NewChannelUpdateInfo(scid lnwire.ShortChannelID, node1Timestamp,
2680
        node2Timestamp time.Time) ChannelUpdateInfo {
196✔
2681

196✔
2682
        chanInfo := ChannelUpdateInfo{
196✔
2683
                ShortChannelID:       scid,
196✔
2684
                Node1UpdateTimestamp: node1Timestamp,
196✔
2685
                Node2UpdateTimestamp: node2Timestamp,
196✔
2686
        }
196✔
2687

196✔
2688
        if node1Timestamp.IsZero() {
382✔
2689
                chanInfo.Node1UpdateTimestamp = time.Unix(0, 0)
186✔
2690
        }
186✔
2691

2692
        if node2Timestamp.IsZero() {
382✔
2693
                chanInfo.Node2UpdateTimestamp = time.Unix(0, 0)
186✔
2694
        }
186✔
2695

2696
        return chanInfo
196✔
2697
}
2698

2699
// BlockChannelRange represents a range of channels for a given block height.
2700
type BlockChannelRange struct {
2701
        // Height is the height of the block all of the channels below were
2702
        // included in.
2703
        Height uint32
2704

2705
        // Channels is the list of channels identified by their short ID
2706
        // representation known to us that were included in the block height
2707
        // above. The list may include channel update timestamp information if
2708
        // requested.
2709
        Channels []ChannelUpdateInfo
2710
}
2711

2712
// FilterChannelRange returns the channel ID's of all known channels which were
2713
// mined in a block height within the passed range. The channel IDs are grouped
2714
// by their common block height. This method can be used to quickly share with a
2715
// peer the set of channels we know of within a particular range to catch them
2716
// up after a period of time offline. If withTimestamps is true then the
2717
// timestamp info of the latest received channel update messages of the channel
2718
// will be included in the response.
2719
func (c *KVStore) FilterChannelRange(startHeight,
2720
        endHeight uint32, withTimestamps bool) ([]BlockChannelRange, error) {
11✔
2721

11✔
2722
        startChanID := &lnwire.ShortChannelID{
11✔
2723
                BlockHeight: startHeight,
11✔
2724
        }
11✔
2725

11✔
2726
        endChanID := lnwire.ShortChannelID{
11✔
2727
                BlockHeight: endHeight,
11✔
2728
                TxIndex:     math.MaxUint32 & 0x00ffffff,
11✔
2729
                TxPosition:  math.MaxUint16,
11✔
2730
        }
11✔
2731

11✔
2732
        // As we need to perform a range scan, we'll convert the starting and
11✔
2733
        // ending height to their corresponding values when encoded using short
11✔
2734
        // channel ID's.
11✔
2735
        var chanIDStart, chanIDEnd [8]byte
11✔
2736
        byteOrder.PutUint64(chanIDStart[:], startChanID.ToUint64())
11✔
2737
        byteOrder.PutUint64(chanIDEnd[:], endChanID.ToUint64())
11✔
2738

11✔
2739
        var channelsPerBlock map[uint32][]ChannelUpdateInfo
11✔
2740
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
22✔
2741
                edges := tx.ReadBucket(edgeBucket)
11✔
2742
                if edges == nil {
11✔
2743
                        return ErrGraphNoEdgesFound
×
2744
                }
×
2745
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
11✔
2746
                if edgeIndex == nil {
11✔
2747
                        return ErrGraphNoEdgesFound
×
2748
                }
×
2749

2750
                cursor := edgeIndex.ReadCursor()
11✔
2751

11✔
2752
                // We'll now iterate through the database, and find each
11✔
2753
                // channel ID that resides within the specified range.
11✔
2754
                //
11✔
2755
                //nolint:ll
11✔
2756
                for k, v := cursor.Seek(chanIDStart[:]); k != nil &&
11✔
2757
                        bytes.Compare(k, chanIDEnd[:]) <= 0; k, v = cursor.Next() {
55✔
2758
                        // Don't send alias SCIDs during gossip sync.
44✔
2759
                        edgeReader := bytes.NewReader(v)
44✔
2760
                        edgeInfo, err := deserializeChanEdgeInfo(edgeReader)
44✔
2761
                        if err != nil {
44✔
2762
                                return err
×
2763
                        }
×
2764

2765
                        if edgeInfo.AuthProof == nil {
44✔
2766
                                continue
×
2767
                        }
2768

2769
                        // This channel ID rests within the target range, so
2770
                        // we'll add it to our returned set.
2771
                        rawCid := byteOrder.Uint64(k)
44✔
2772
                        cid := lnwire.NewShortChanIDFromInt(rawCid)
44✔
2773

44✔
2774
                        chanInfo := NewChannelUpdateInfo(
44✔
2775
                                cid, time.Time{}, time.Time{},
44✔
2776
                        )
44✔
2777

44✔
2778
                        if !withTimestamps {
66✔
2779
                                channelsPerBlock[cid.BlockHeight] = append(
22✔
2780
                                        channelsPerBlock[cid.BlockHeight],
22✔
2781
                                        chanInfo,
22✔
2782
                                )
22✔
2783

22✔
2784
                                continue
22✔
2785
                        }
2786

2787
                        node1Key, node2Key := computeEdgePolicyKeys(edgeInfo)
22✔
2788

22✔
2789
                        rawPolicy := edges.Get(node1Key)
22✔
2790
                        if len(rawPolicy) != 0 {
28✔
2791
                                r := bytes.NewReader(rawPolicy)
6✔
2792

6✔
2793
                                edge, err := deserializeChanEdgePolicyRaw(r)
6✔
2794
                                if err != nil && !errors.Is(
6✔
2795
                                        err, ErrEdgePolicyOptionalFieldNotFound,
6✔
2796
                                ) && !errors.Is(err, ErrParsingExtraTLVBytes) {
6✔
2797

×
2798
                                        return err
×
2799
                                }
×
2800

2801
                                chanInfo.Node1UpdateTimestamp = edge.LastUpdate
6✔
2802
                        }
2803

2804
                        rawPolicy = edges.Get(node2Key)
22✔
2805
                        if len(rawPolicy) != 0 {
33✔
2806
                                r := bytes.NewReader(rawPolicy)
11✔
2807

11✔
2808
                                edge, err := deserializeChanEdgePolicyRaw(r)
11✔
2809
                                if err != nil && !errors.Is(
11✔
2810
                                        err, ErrEdgePolicyOptionalFieldNotFound,
11✔
2811
                                ) && !errors.Is(err, ErrParsingExtraTLVBytes) {
11✔
2812

×
2813
                                        return err
×
2814
                                }
×
2815

2816
                                chanInfo.Node2UpdateTimestamp = edge.LastUpdate
11✔
2817
                        }
2818

2819
                        channelsPerBlock[cid.BlockHeight] = append(
22✔
2820
                                channelsPerBlock[cid.BlockHeight], chanInfo,
22✔
2821
                        )
22✔
2822
                }
2823

2824
                return nil
11✔
2825
        }, func() {
11✔
2826
                channelsPerBlock = make(map[uint32][]ChannelUpdateInfo)
11✔
2827
        })
11✔
2828

2829
        switch {
11✔
2830
        // If we don't know of any channels yet, then there's nothing to
2831
        // filter, so we'll return an empty slice.
2832
        case errors.Is(err, ErrGraphNoEdgesFound) || len(channelsPerBlock) == 0:
3✔
2833
                return nil, nil
3✔
2834

2835
        case err != nil:
×
2836
                return nil, err
×
2837
        }
2838

2839
        // Return the channel ranges in ascending block height order.
2840
        blocks := make([]uint32, 0, len(channelsPerBlock))
8✔
2841
        for block := range channelsPerBlock {
30✔
2842
                blocks = append(blocks, block)
22✔
2843
        }
22✔
2844
        sort.Slice(blocks, func(i, j int) bool {
27✔
2845
                return blocks[i] < blocks[j]
19✔
2846
        })
19✔
2847

2848
        channelRanges := make([]BlockChannelRange, 0, len(channelsPerBlock))
8✔
2849
        for _, block := range blocks {
30✔
2850
                channelRanges = append(channelRanges, BlockChannelRange{
22✔
2851
                        Height:   block,
22✔
2852
                        Channels: channelsPerBlock[block],
22✔
2853
                })
22✔
2854
        }
22✔
2855

2856
        return channelRanges, nil
8✔
2857
}
2858

2859
// FetchChanInfos returns the set of channel edges that correspond to the passed
2860
// channel ID's. If an edge is the query is unknown to the database, it will
2861
// skipped and the result will contain only those edges that exist at the time
2862
// of the query. This can be used to respond to peer queries that are seeking to
2863
// fill in gaps in their view of the channel graph.
2864
func (c *KVStore) FetchChanInfos(chanIDs []uint64) ([]ChannelEdge, error) {
4✔
2865
        return c.fetchChanInfos(nil, chanIDs)
4✔
2866
}
4✔
2867

2868
// fetchChanInfos returns the set of channel edges that correspond to the passed
2869
// channel ID's. If an edge is the query is unknown to the database, it will
2870
// skipped and the result will contain only those edges that exist at the time
2871
// of the query. This can be used to respond to peer queries that are seeking to
2872
// fill in gaps in their view of the channel graph.
2873
//
2874
// NOTE: An optional transaction may be provided. If none is provided, then a
2875
// new one will be created.
2876
func (c *KVStore) fetchChanInfos(tx kvdb.RTx, chanIDs []uint64) (
2877
        []ChannelEdge, error) {
4✔
2878
        // TODO(roasbeef): sort cids?
4✔
2879

4✔
2880
        var (
4✔
2881
                chanEdges []ChannelEdge
4✔
2882
                cidBytes  [8]byte
4✔
2883
        )
4✔
2884

4✔
2885
        fetchChanInfos := func(tx kvdb.RTx) error {
8✔
2886
                edges := tx.ReadBucket(edgeBucket)
4✔
2887
                if edges == nil {
4✔
2888
                        return ErrGraphNoEdgesFound
×
2889
                }
×
2890
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
4✔
2891
                if edgeIndex == nil {
4✔
2892
                        return ErrGraphNoEdgesFound
×
2893
                }
×
2894
                nodes := tx.ReadBucket(nodeBucket)
4✔
2895
                if nodes == nil {
4✔
2896
                        return ErrGraphNotFound
×
2897
                }
×
2898

2899
                for _, cid := range chanIDs {
15✔
2900
                        byteOrder.PutUint64(cidBytes[:], cid)
11✔
2901

11✔
2902
                        // First, we'll fetch the static edge information. If
11✔
2903
                        // the edge is unknown, we will skip the edge and
11✔
2904
                        // continue gathering all known edges.
11✔
2905
                        edgeInfo, err := fetchChanEdgeInfo(
11✔
2906
                                edgeIndex, cidBytes[:],
11✔
2907
                        )
11✔
2908
                        switch {
11✔
2909
                        case errors.Is(err, ErrEdgeNotFound):
3✔
2910
                                continue
3✔
2911
                        case err != nil:
×
2912
                                return err
×
2913
                        }
2914

2915
                        // With the static information obtained, we'll now
2916
                        // fetch the dynamic policy info.
2917
                        edge1, edge2, err := fetchChanEdgePolicies(
8✔
2918
                                edgeIndex, edges, cidBytes[:],
8✔
2919
                        )
8✔
2920
                        if err != nil {
8✔
2921
                                return err
×
2922
                        }
×
2923

2924
                        node1, err := fetchLightningNode(
8✔
2925
                                nodes, edgeInfo.NodeKey1Bytes[:],
8✔
2926
                        )
8✔
2927
                        if err != nil {
8✔
2928
                                return err
×
2929
                        }
×
2930

2931
                        node2, err := fetchLightningNode(
8✔
2932
                                nodes, edgeInfo.NodeKey2Bytes[:],
8✔
2933
                        )
8✔
2934
                        if err != nil {
8✔
2935
                                return err
×
2936
                        }
×
2937

2938
                        chanEdges = append(chanEdges, ChannelEdge{
8✔
2939
                                Info:    edgeInfo,
8✔
2940
                                Policy1: edge1,
8✔
2941
                                Policy2: edge2,
8✔
2942
                                Node1:   node1,
8✔
2943
                                Node2:   node2,
8✔
2944
                        })
8✔
2945
                }
2946

2947
                return nil
4✔
2948
        }
2949

2950
        if tx == nil {
8✔
2951
                err := kvdb.View(c.db, fetchChanInfos, func() {
8✔
2952
                        chanEdges = nil
4✔
2953
                })
4✔
2954
                if err != nil {
4✔
2955
                        return nil, err
×
2956
                }
×
2957

2958
                return chanEdges, nil
4✔
2959
        }
2960

2961
        err := fetchChanInfos(tx)
×
2962
        if err != nil {
×
2963
                return nil, err
×
2964
        }
×
2965

2966
        return chanEdges, nil
×
2967
}
2968

2969
func delEdgeUpdateIndexEntry(edgesBucket kvdb.RwBucket, chanID uint64,
2970
        edge1, edge2 *models.ChannelEdgePolicy) error {
137✔
2971

137✔
2972
        // First, we'll fetch the edge update index bucket which currently
137✔
2973
        // stores an entry for the channel we're about to delete.
137✔
2974
        updateIndex := edgesBucket.NestedReadWriteBucket(edgeUpdateIndexBucket)
137✔
2975
        if updateIndex == nil {
137✔
2976
                // No edges in bucket, return early.
×
2977
                return nil
×
2978
        }
×
2979

2980
        // Now that we have the bucket, we'll attempt to construct a template
2981
        // for the index key: updateTime || chanid.
2982
        var indexKey [8 + 8]byte
137✔
2983
        byteOrder.PutUint64(indexKey[8:], chanID)
137✔
2984

137✔
2985
        // With the template constructed, we'll attempt to delete an entry that
137✔
2986
        // would have been created by both edges: we'll alternate the update
137✔
2987
        // times, as one may had overridden the other.
137✔
2988
        if edge1 != nil {
147✔
2989
                byteOrder.PutUint64(
10✔
2990
                        indexKey[:8], uint64(edge1.LastUpdate.Unix()),
10✔
2991
                )
10✔
2992
                if err := updateIndex.Delete(indexKey[:]); err != nil {
10✔
2993
                        return err
×
2994
                }
×
2995
        }
2996

2997
        // We'll also attempt to delete the entry that may have been created by
2998
        // the second edge.
2999
        if edge2 != nil {
149✔
3000
                byteOrder.PutUint64(
12✔
3001
                        indexKey[:8], uint64(edge2.LastUpdate.Unix()),
12✔
3002
                )
12✔
3003
                if err := updateIndex.Delete(indexKey[:]); err != nil {
12✔
3004
                        return err
×
3005
                }
×
3006
        }
3007

3008
        return nil
137✔
3009
}
3010

3011
// delChannelEdgeUnsafe deletes the edge with the given chanID from the graph
3012
// cache. It then goes on to delete any policy info and edge info for this
3013
// channel from the DB and finally, if isZombie is true, it will add an entry
3014
// for this channel in the zombie index.
3015
//
3016
// NOTE: this method MUST only be called if the cacheMu has already been
3017
// acquired.
3018
func (c *KVStore) delChannelEdgeUnsafe(edges, edgeIndex, chanIndex,
3019
        zombieIndex kvdb.RwBucket, chanID []byte, isZombie,
3020
        strictZombie bool) (*models.ChannelEdgeInfo, error) {
198✔
3021

198✔
3022
        edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
198✔
3023
        if err != nil {
259✔
3024
                return nil, err
61✔
3025
        }
61✔
3026

3027
        // We'll also remove the entry in the edge update index bucket before
3028
        // we delete the edges themselves so we can access their last update
3029
        // times.
3030
        cid := byteOrder.Uint64(chanID)
137✔
3031
        edge1, edge2, err := fetchChanEdgePolicies(edgeIndex, edges, chanID)
137✔
3032
        if err != nil {
137✔
3033
                return nil, err
×
3034
        }
×
3035
        err = delEdgeUpdateIndexEntry(edges, cid, edge1, edge2)
137✔
3036
        if err != nil {
137✔
3037
                return nil, err
×
3038
        }
×
3039

3040
        // The edge key is of the format pubKey || chanID. First we construct
3041
        // the latter half, populating the channel ID.
3042
        var edgeKey [33 + 8]byte
137✔
3043
        copy(edgeKey[33:], chanID)
137✔
3044

137✔
3045
        // With the latter half constructed, copy over the first public key to
137✔
3046
        // delete the edge in this direction, then the second to delete the
137✔
3047
        // edge in the opposite direction.
137✔
3048
        copy(edgeKey[:33], edgeInfo.NodeKey1Bytes[:])
137✔
3049
        if edges.Get(edgeKey[:]) != nil {
274✔
3050
                if err := edges.Delete(edgeKey[:]); err != nil {
137✔
3051
                        return nil, err
×
3052
                }
×
3053
        }
3054
        copy(edgeKey[:33], edgeInfo.NodeKey2Bytes[:])
137✔
3055
        if edges.Get(edgeKey[:]) != nil {
274✔
3056
                if err := edges.Delete(edgeKey[:]); err != nil {
137✔
3057
                        return nil, err
×
3058
                }
×
3059
        }
3060

3061
        // As part of deleting the edge we also remove all disabled entries
3062
        // from the edgePolicyDisabledIndex bucket. We do that for both
3063
        // directions.
3064
        err = updateEdgePolicyDisabledIndex(edges, cid, false, false)
137✔
3065
        if err != nil {
137✔
3066
                return nil, err
×
3067
        }
×
3068
        err = updateEdgePolicyDisabledIndex(edges, cid, true, false)
137✔
3069
        if err != nil {
137✔
3070
                return nil, err
×
3071
        }
×
3072

3073
        // With the edge data deleted, we can purge the information from the two
3074
        // edge indexes.
3075
        if err := edgeIndex.Delete(chanID); err != nil {
137✔
3076
                return nil, err
×
3077
        }
×
3078
        var b bytes.Buffer
137✔
3079
        if err := WriteOutpoint(&b, &edgeInfo.ChannelPoint); err != nil {
137✔
3080
                return nil, err
×
3081
        }
×
3082
        if err := chanIndex.Delete(b.Bytes()); err != nil {
137✔
3083
                return nil, err
×
3084
        }
×
3085

3086
        // Finally, we'll mark the edge as a zombie within our index if it's
3087
        // being removed due to the channel becoming a zombie. We do this to
3088
        // ensure we don't store unnecessary data for spent channels.
3089
        if !isZombie {
252✔
3090
                return edgeInfo, nil
115✔
3091
        }
115✔
3092

3093
        nodeKey1, nodeKey2 := edgeInfo.NodeKey1Bytes, edgeInfo.NodeKey2Bytes
22✔
3094
        if strictZombie {
25✔
3095
                var e1UpdateTime, e2UpdateTime *time.Time
3✔
3096
                if edge1 != nil {
5✔
3097
                        e1UpdateTime = &edge1.LastUpdate
2✔
3098
                }
2✔
3099
                if edge2 != nil {
6✔
3100
                        e2UpdateTime = &edge2.LastUpdate
3✔
3101
                }
3✔
3102

3103
                nodeKey1, nodeKey2 = makeZombiePubkeys(
3✔
3104
                        edgeInfo.NodeKey1Bytes, edgeInfo.NodeKey2Bytes,
3✔
3105
                        e1UpdateTime, e2UpdateTime,
3✔
3106
                )
3✔
3107
        }
3108

3109
        return edgeInfo, markEdgeZombie(
22✔
3110
                zombieIndex, byteOrder.Uint64(chanID), nodeKey1, nodeKey2,
22✔
3111
        )
22✔
3112
}
3113

3114
// makeZombiePubkeys derives the node pubkeys to store in the zombie index for a
3115
// particular pair of channel policies. The return values are one of:
3116
//  1. (pubkey1, pubkey2)
3117
//  2. (pubkey1, blank)
3118
//  3. (blank, pubkey2)
3119
//
3120
// A blank pubkey means that corresponding node will be unable to resurrect a
3121
// channel on its own. For example, node1 may continue to publish recent
3122
// updates, but node2 has fallen way behind. After marking an edge as a zombie,
3123
// we don't want another fresh update from node1 to resurrect, as the edge can
3124
// only become live once node2 finally sends something recent.
3125
//
3126
// In the case where we have neither update, we allow either party to resurrect
3127
// the channel. If the channel were to be marked zombie again, it would be
3128
// marked with the correct lagging channel since we received an update from only
3129
// one side.
3130
func makeZombiePubkeys(node1, node2 [33]byte, e1, e2 *time.Time) ([33]byte,
3131
        [33]byte) {
3✔
3132

3✔
3133
        switch {
3✔
3134
        // If we don't have either edge policy, we'll return both pubkeys so
3135
        // that the channel can be resurrected by either party.
3136
        case e1 == nil && e2 == nil:
×
3137
                return node1, node2
×
3138

3139
        // If we're missing edge1, or if both edges are present but edge1 is
3140
        // older, we'll return edge1's pubkey and a blank pubkey for edge2. This
3141
        // means that only an update from edge1 will be able to resurrect the
3142
        // channel.
3143
        case e1 == nil || (e2 != nil && e1.Before(*e2)):
1✔
3144
                return node1, [33]byte{}
1✔
3145

3146
        // Otherwise, we're missing edge2 or edge2 is the older side, so we
3147
        // return a blank pubkey for edge1. In this case, only an update from
3148
        // edge2 can resurect the channel.
3149
        default:
2✔
3150
                return [33]byte{}, node1
2✔
3151
        }
3152
}
3153

3154
// UpdateEdgePolicy updates the edge routing policy for a single directed edge
3155
// within the database for the referenced channel. The `flags` attribute within
3156
// the ChannelEdgePolicy determines which of the directed edges are being
3157
// updated. If the flag is 1, then the first node's information is being
3158
// updated, otherwise it's the second node's information. The node ordering is
3159
// determined by the lexicographical ordering of the identity public keys of the
3160
// nodes on either side of the channel.
3161
func (c *KVStore) UpdateEdgePolicy(ctx context.Context,
3162
        edge *models.ChannelEdgePolicy,
3163
        opts ...batch.SchedulerOption) (route.Vertex, route.Vertex, error) {
2,875✔
3164

2,875✔
3165
        var (
2,875✔
3166
                isUpdate1    bool
2,875✔
3167
                edgeNotFound bool
2,875✔
3168
                from, to     route.Vertex
2,875✔
3169
        )
2,875✔
3170

2,875✔
3171
        r := &batch.Request[kvdb.RwTx]{
2,875✔
3172
                Opts: batch.NewSchedulerOptions(opts...),
2,875✔
3173
                Reset: func() {
5,751✔
3174
                        isUpdate1 = false
2,876✔
3175
                        edgeNotFound = false
2,876✔
3176
                },
2,876✔
3177
                Do: func(tx kvdb.RwTx) error {
2,876✔
3178
                        // Validate that the ExtraOpaqueData is in fact a valid
2,876✔
3179
                        // TLV stream. This is done here instead of within
2,876✔
3180
                        // updateEdgePolicy so that updateEdgePolicy can be used
2,876✔
3181
                        // by unit tests to recreate the case where we already
2,876✔
3182
                        // have nodes persisted with invalid TLV data.
2,876✔
3183
                        err := edge.ExtraOpaqueData.ValidateTLV()
2,876✔
3184
                        if err != nil {
2,878✔
3185
                                return fmt.Errorf("%w: %w",
2✔
3186
                                        ErrParsingExtraTLVBytes, err)
2✔
3187
                        }
2✔
3188

3189
                        from, to, isUpdate1, err = updateEdgePolicy(tx, edge)
2,874✔
3190
                        if err != nil {
2,878✔
3191
                                log.Errorf("UpdateEdgePolicy faild: %v", err)
4✔
3192
                        }
4✔
3193

3194
                        // Silence ErrEdgeNotFound so that the batch can
3195
                        // succeed, but propagate the error via local state.
3196
                        if errors.Is(err, ErrEdgeNotFound) {
2,878✔
3197
                                edgeNotFound = true
4✔
3198
                                return nil
4✔
3199
                        }
4✔
3200

3201
                        return err
2,870✔
3202
                },
3203
                OnCommit: func(err error) error {
2,875✔
3204
                        switch {
2,875✔
3205
                        case err != nil:
1✔
3206
                                return err
1✔
3207
                        case edgeNotFound:
4✔
3208
                                return ErrEdgeNotFound
4✔
3209
                        default:
2,870✔
3210
                                c.updateEdgeCache(edge, isUpdate1)
2,870✔
3211
                                return nil
2,870✔
3212
                        }
3213
                },
3214
        }
3215

3216
        err := c.chanScheduler.Execute(ctx, r)
2,875✔
3217

2,875✔
3218
        return from, to, err
2,875✔
3219
}
3220

3221
func (c *KVStore) updateEdgeCache(e *models.ChannelEdgePolicy,
3222
        isUpdate1 bool) {
2,870✔
3223

2,870✔
3224
        // If an entry for this channel is found in reject cache, we'll modify
2,870✔
3225
        // the entry with the updated timestamp for the direction that was just
2,870✔
3226
        // written. If the edge doesn't exist, we'll load the cache entry lazily
2,870✔
3227
        // during the next query for this edge.
2,870✔
3228
        if entry, ok := c.rejectCache.get(e.ChannelID); ok {
2,875✔
3229
                if isUpdate1 {
8✔
3230
                        entry.upd1Time = e.LastUpdate.Unix()
3✔
3231
                } else {
5✔
3232
                        entry.upd2Time = e.LastUpdate.Unix()
2✔
3233
                }
2✔
3234
                c.rejectCache.insert(e.ChannelID, entry)
5✔
3235
        }
3236

3237
        // If an entry for this channel is found in channel cache, we'll modify
3238
        // the entry with the updated policy for the direction that was just
3239
        // written. If the edge doesn't exist, we'll defer loading the info and
3240
        // policies and lazily read from disk during the next query.
3241
        if channel, ok := c.chanCache.get(e.ChannelID); ok {
2,870✔
3242
                if isUpdate1 {
×
3243
                        channel.Policy1 = e
×
3244
                } else {
×
3245
                        channel.Policy2 = e
×
3246
                }
×
3247
                c.chanCache.insert(e.ChannelID, channel)
×
3248
        }
3249
}
3250

3251
// updateEdgePolicy attempts to update an edge's policy within the relevant
3252
// buckets using an existing database transaction. The returned boolean will be
3253
// true if the updated policy belongs to node1, and false if the policy belonged
3254
// to node2.
3255
func updateEdgePolicy(tx kvdb.RwTx, edge *models.ChannelEdgePolicy) (
3256
        route.Vertex, route.Vertex, bool, error) {
2,874✔
3257

2,874✔
3258
        var noVertex route.Vertex
2,874✔
3259

2,874✔
3260
        edges := tx.ReadWriteBucket(edgeBucket)
2,874✔
3261
        if edges == nil {
2,874✔
3262
                return noVertex, noVertex, false, ErrEdgeNotFound
×
3263
        }
×
3264
        edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
2,874✔
3265
        if edgeIndex == nil {
2,874✔
3266
                return noVertex, noVertex, false, ErrEdgeNotFound
×
3267
        }
×
3268

3269
        // Create the channelID key be converting the channel ID
3270
        // integer into a byte slice.
3271
        var chanID [8]byte
2,874✔
3272
        byteOrder.PutUint64(chanID[:], edge.ChannelID)
2,874✔
3273

2,874✔
3274
        // With the channel ID, we then fetch the value storing the two
2,874✔
3275
        // nodes which connect this channel edge.
2,874✔
3276
        nodeInfo := edgeIndex.Get(chanID[:])
2,874✔
3277
        if nodeInfo == nil {
2,878✔
3278
                return noVertex, noVertex, false, ErrEdgeNotFound
4✔
3279
        }
4✔
3280

3281
        // Depending on the flags value passed above, either the first
3282
        // or second edge policy is being updated.
3283
        var fromNode, toNode []byte
2,870✔
3284
        var isUpdate1 bool
2,870✔
3285
        if edge.ChannelFlags&lnwire.ChanUpdateDirection == 0 {
4,309✔
3286
                fromNode = nodeInfo[:33]
1,439✔
3287
                toNode = nodeInfo[33:66]
1,439✔
3288
                isUpdate1 = true
1,439✔
3289
        } else {
2,870✔
3290
                fromNode = nodeInfo[33:66]
1,431✔
3291
                toNode = nodeInfo[:33]
1,431✔
3292
                isUpdate1 = false
1,431✔
3293
        }
1,431✔
3294

3295
        // Finally, with the direction of the edge being updated
3296
        // identified, we update the on-disk edge representation.
3297
        err := putChanEdgePolicy(edges, edge, fromNode, toNode)
2,870✔
3298
        if err != nil {
2,870✔
3299
                return noVertex, noVertex, false, err
×
3300
        }
×
3301

3302
        var (
2,870✔
3303
                fromNodePubKey route.Vertex
2,870✔
3304
                toNodePubKey   route.Vertex
2,870✔
3305
        )
2,870✔
3306
        copy(fromNodePubKey[:], fromNode)
2,870✔
3307
        copy(toNodePubKey[:], toNode)
2,870✔
3308

2,870✔
3309
        return fromNodePubKey, toNodePubKey, isUpdate1, nil
2,870✔
3310
}
3311

3312
// isPublic determines whether the node is seen as public within the graph from
3313
// the source node's point of view. An existing database transaction can also be
3314
// specified.
3315
func (c *KVStore) isPublic(tx kvdb.RTx, nodePub route.Vertex,
3316
        sourcePubKey []byte) (bool, error) {
13✔
3317

13✔
3318
        // In order to determine whether this node is publicly advertised within
13✔
3319
        // the graph, we'll need to look at all of its edges and check whether
13✔
3320
        // they extend to any other node than the source node. errDone will be
13✔
3321
        // used to terminate the check early.
13✔
3322
        nodeIsPublic := false
13✔
3323
        errDone := errors.New("done")
13✔
3324
        err := c.forEachNodeChannelTx(tx, nodePub, func(tx kvdb.RTx,
13✔
3325
                info *models.ChannelEdgeInfo, _ *models.ChannelEdgePolicy,
13✔
3326
                _ *models.ChannelEdgePolicy) error {
23✔
3327

10✔
3328
                // If this edge doesn't extend to the source node, we'll
10✔
3329
                // terminate our search as we can now conclude that the node is
10✔
3330
                // publicly advertised within the graph due to the local node
10✔
3331
                // knowing of the current edge.
10✔
3332
                if !bytes.Equal(info.NodeKey1Bytes[:], sourcePubKey) &&
10✔
3333
                        !bytes.Equal(info.NodeKey2Bytes[:], sourcePubKey) {
13✔
3334

3✔
3335
                        nodeIsPublic = true
3✔
3336
                        return errDone
3✔
3337
                }
3✔
3338

3339
                // Since the edge _does_ extend to the source node, we'll also
3340
                // need to ensure that this is a public edge.
3341
                if info.AuthProof != nil {
13✔
3342
                        nodeIsPublic = true
6✔
3343
                        return errDone
6✔
3344
                }
6✔
3345

3346
                // Otherwise, we'll continue our search.
3347
                return nil
1✔
3348
        }, func() {
×
3349
                nodeIsPublic = false
×
3350
        })
×
3351
        if err != nil && !errors.Is(err, errDone) {
13✔
3352
                return false, err
×
3353
        }
×
3354

3355
        return nodeIsPublic, nil
13✔
3356
}
3357

3358
// fetchNodeTx attempts to look up a target node by its identity
3359
// public key. If the node isn't found in the database, then
3360
// ErrGraphNodeNotFound is returned. An optional transaction may be provided.
3361
// If none is provided, then a new one will be created.
3362
func (c *KVStore) fetchNodeTx(tx kvdb.RTx, nodePub route.Vertex) (*models.Node,
3363
        error) {
3,651✔
3364

3,651✔
3365
        return c.fetchLightningNode(tx, nodePub)
3,651✔
3366
}
3,651✔
3367

3368
// FetchNode attempts to look up a target node by its identity public
3369
// key. If the node isn't found in the database, then ErrGraphNodeNotFound is
3370
// returned.
3371
func (c *KVStore) FetchNode(_ context.Context,
3372
        nodePub route.Vertex) (*models.Node, error) {
159✔
3373

159✔
3374
        return c.fetchLightningNode(nil, nodePub)
159✔
3375
}
159✔
3376

3377
// fetchLightningNode attempts to look up a target node by its identity public
3378
// key. If the node isn't found in the database, then ErrGraphNodeNotFound is
3379
// returned. An optional transaction may be provided. If none is provided, then
3380
// a new one will be created.
3381
func (c *KVStore) fetchLightningNode(tx kvdb.RTx,
3382
        nodePub route.Vertex) (*models.Node, error) {
3,810✔
3383

3,810✔
3384
        var node *models.Node
3,810✔
3385
        fetch := func(tx kvdb.RTx) error {
7,620✔
3386
                // First grab the nodes bucket which stores the mapping from
3,810✔
3387
                // pubKey to node information.
3,810✔
3388
                nodes := tx.ReadBucket(nodeBucket)
3,810✔
3389
                if nodes == nil {
3,810✔
3390
                        return ErrGraphNotFound
×
3391
                }
×
3392

3393
                // If a key for this serialized public key isn't found, then
3394
                // the target node doesn't exist within the database.
3395
                nodeBytes := nodes.Get(nodePub[:])
3,810✔
3396
                if nodeBytes == nil {
3,825✔
3397
                        return ErrGraphNodeNotFound
15✔
3398
                }
15✔
3399

3400
                // If the node is found, then we can de deserialize the node
3401
                // information to return to the user.
3402
                nodeReader := bytes.NewReader(nodeBytes)
3,795✔
3403
                n, err := deserializeLightningNode(nodeReader)
3,795✔
3404
                if err != nil {
3,795✔
3405
                        return err
×
3406
                }
×
3407

3408
                node = n
3,795✔
3409

3,795✔
3410
                return nil
3,795✔
3411
        }
3412

3413
        if tx == nil {
3,993✔
3414
                err := kvdb.View(
183✔
3415
                        c.db, fetch, func() {
366✔
3416
                                node = nil
183✔
3417
                        },
183✔
3418
                )
3419
                if err != nil {
187✔
3420
                        return nil, err
4✔
3421
                }
4✔
3422

3423
                return node, nil
179✔
3424
        }
3425

3426
        err := fetch(tx)
3,627✔
3427
        if err != nil {
3,638✔
3428
                return nil, err
11✔
3429
        }
11✔
3430

3431
        return node, nil
3,616✔
3432
}
3433

3434
// HasLightningNode determines if the graph has a vertex identified by the
3435
// target node identity public key. If the node exists in the database, a
3436
// timestamp of when the data for the node was lasted updated is returned along
3437
// with a true boolean. Otherwise, an empty time.Time is returned with a false
3438
// boolean.
3439
func (c *KVStore) HasNode(_ context.Context,
3440
        nodePub [33]byte) (time.Time, bool, error) {
17✔
3441

17✔
3442
        var (
17✔
3443
                updateTime time.Time
17✔
3444
                exists     bool
17✔
3445
        )
17✔
3446

17✔
3447
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
34✔
3448
                // First grab the nodes bucket which stores the mapping from
17✔
3449
                // pubKey to node information.
17✔
3450
                nodes := tx.ReadBucket(nodeBucket)
17✔
3451
                if nodes == nil {
17✔
3452
                        return ErrGraphNotFound
×
3453
                }
×
3454

3455
                // If a key for this serialized public key isn't found, we can
3456
                // exit early.
3457
                nodeBytes := nodes.Get(nodePub[:])
17✔
3458
                if nodeBytes == nil {
20✔
3459
                        exists = false
3✔
3460
                        return nil
3✔
3461
                }
3✔
3462

3463
                // Otherwise we continue on to obtain the time stamp
3464
                // representing the last time the data for this node was
3465
                // updated.
3466
                nodeReader := bytes.NewReader(nodeBytes)
14✔
3467
                node, err := deserializeLightningNode(nodeReader)
14✔
3468
                if err != nil {
14✔
3469
                        return err
×
3470
                }
×
3471

3472
                exists = true
14✔
3473
                updateTime = node.LastUpdate
14✔
3474

14✔
3475
                return nil
14✔
3476
        }, func() {
17✔
3477
                updateTime = time.Time{}
17✔
3478
                exists = false
17✔
3479
        })
17✔
3480
        if err != nil {
17✔
3481
                return time.Time{}, exists, err
×
3482
        }
×
3483

3484
        return updateTime, exists, nil
17✔
3485
}
3486

3487
// nodeTraversal is used to traverse all channels of a node given by its
3488
// public key and passes channel information into the specified callback.
3489
//
3490
// NOTE: the reset param is only meaningful if the tx param is nil. If it is
3491
// not nil, the caller is expected to have passed in a reset to the parent
3492
// function's View/Update call which will then apply to the whole transaction.
3493
func nodeTraversal(tx kvdb.RTx, nodePub []byte, db kvdb.Backend,
3494
        cb func(kvdb.RTx, *models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3495
                *models.ChannelEdgePolicy) error, reset func()) error {
1,268✔
3496

1,268✔
3497
        traversal := func(tx kvdb.RTx) error {
2,536✔
3498
                edges := tx.ReadBucket(edgeBucket)
1,268✔
3499
                if edges == nil {
1,268✔
3500
                        return ErrGraphNotFound
×
3501
                }
×
3502
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
1,268✔
3503
                if edgeIndex == nil {
1,268✔
3504
                        return ErrGraphNoEdgesFound
×
3505
                }
×
3506

3507
                // In order to reach all the edges for this node, we take
3508
                // advantage of the construction of the key-space within the
3509
                // edge bucket. The keys are stored in the form: pubKey ||
3510
                // chanID. Therefore, starting from a chanID of zero, we can
3511
                // scan forward in the bucket, grabbing all the edges for the
3512
                // node. Once the prefix no longer matches, then we know we're
3513
                // done.
3514
                var nodeStart [33 + 8]byte
1,268✔
3515
                copy(nodeStart[:], nodePub)
1,268✔
3516
                copy(nodeStart[33:], chanStart[:])
1,268✔
3517

1,268✔
3518
                // Starting from the key pubKey || 0, we seek forward in the
1,268✔
3519
                // bucket until the retrieved key no longer has the public key
1,268✔
3520
                // as its prefix. This indicates that we've stepped over into
1,268✔
3521
                // another node's edges, so we can terminate our scan.
1,268✔
3522
                edgeCursor := edges.ReadCursor()
1,268✔
3523
                for nodeEdge, _ := edgeCursor.Seek(nodeStart[:]); bytes.HasPrefix(nodeEdge, nodePub); nodeEdge, _ = edgeCursor.Next() { //nolint:ll
5,110✔
3524
                        // If the prefix still matches, the channel id is
3,842✔
3525
                        // returned in nodeEdge. Channel id is used to lookup
3,842✔
3526
                        // the node at the other end of the channel and both
3,842✔
3527
                        // edge policies.
3,842✔
3528
                        chanID := nodeEdge[33:]
3,842✔
3529
                        edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
3,842✔
3530
                        if err != nil {
3,842✔
3531
                                return err
×
3532
                        }
×
3533

3534
                        outgoingPolicy, err := fetchChanEdgePolicy(
3,842✔
3535
                                edges, chanID, nodePub,
3,842✔
3536
                        )
3,842✔
3537
                        if err != nil {
3,842✔
3538
                                return err
×
3539
                        }
×
3540

3541
                        otherNode, err := edgeInfo.OtherNodeKeyBytes(nodePub)
3,842✔
3542
                        if err != nil {
3,842✔
3543
                                return err
×
3544
                        }
×
3545

3546
                        incomingPolicy, err := fetchChanEdgePolicy(
3,842✔
3547
                                edges, chanID, otherNode[:],
3,842✔
3548
                        )
3,842✔
3549
                        if err != nil {
3,842✔
3550
                                return err
×
3551
                        }
×
3552

3553
                        // Finally, we execute the callback.
3554
                        err = cb(tx, edgeInfo, outgoingPolicy, incomingPolicy)
3,842✔
3555
                        if err != nil {
3,851✔
3556
                                return err
9✔
3557
                        }
9✔
3558
                }
3559

3560
                return nil
1,259✔
3561
        }
3562

3563
        // If no transaction was provided, then we'll create a new transaction
3564
        // to execute the transaction within.
3565
        if tx == nil {
1,297✔
3566
                return kvdb.View(db, traversal, reset)
29✔
3567
        }
29✔
3568

3569
        // Otherwise, we re-use the existing transaction to execute the graph
3570
        // traversal.
3571
        return traversal(tx)
1,239✔
3572
}
3573

3574
// ForEachNodeChannel iterates through all channels of the given node,
3575
// executing the passed callback with an edge info structure and the policies
3576
// of each end of the channel. The first edge policy is the outgoing edge *to*
3577
// the connecting node, while the second is the incoming edge *from* the
3578
// connecting node. If the callback returns an error, then the iteration is
3579
// halted with the error propagated back up to the caller.
3580
//
3581
// Unknown policies are passed into the callback as nil values.
3582
func (c *KVStore) ForEachNodeChannel(_ context.Context, nodePub route.Vertex,
3583
        cb func(*models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3584
                *models.ChannelEdgePolicy) error, reset func()) error {
6✔
3585

6✔
3586
        return nodeTraversal(
6✔
3587
                nil, nodePub[:], c.db, func(_ kvdb.RTx,
6✔
3588
                        info *models.ChannelEdgeInfo, policy,
6✔
3589
                        policy2 *models.ChannelEdgePolicy) error {
16✔
3590

10✔
3591
                        return cb(info, policy, policy2)
10✔
3592
                }, reset,
10✔
3593
        )
3594
}
3595

3596
// ForEachSourceNodeChannel iterates through all channels of the source node,
3597
// executing the passed callback on each. The callback is provided with the
3598
// channel's outpoint, whether we have a policy for the channel and the channel
3599
// peer's node information.
3600
func (c *KVStore) ForEachSourceNodeChannel(_ context.Context,
3601
        cb func(chanPoint wire.OutPoint, havePolicy bool,
3602
                otherNode *models.Node) error, reset func()) error {
1✔
3603

1✔
3604
        return kvdb.View(c.db, func(tx kvdb.RTx) error {
2✔
3605
                nodes := tx.ReadBucket(nodeBucket)
1✔
3606
                if nodes == nil {
1✔
3607
                        return ErrGraphNotFound
×
3608
                }
×
3609

3610
                node, err := sourceNodeWithTx(nodes)
1✔
3611
                if err != nil {
1✔
3612
                        return err
×
3613
                }
×
3614

3615
                return nodeTraversal(
1✔
3616
                        tx, node.PubKeyBytes[:], c.db, func(tx kvdb.RTx,
1✔
3617
                                info *models.ChannelEdgeInfo,
1✔
3618
                                policy, _ *models.ChannelEdgePolicy) error {
3✔
3619

2✔
3620
                                peer, err := c.fetchOtherNode(
2✔
3621
                                        tx, info, node.PubKeyBytes[:],
2✔
3622
                                )
2✔
3623
                                if err != nil {
2✔
3624
                                        return err
×
3625
                                }
×
3626

3627
                                return cb(
2✔
3628
                                        info.ChannelPoint, policy != nil, peer,
2✔
3629
                                )
2✔
3630
                        }, reset,
3631
                )
3632
        }, reset)
3633
}
3634

3635
// forEachNodeChannelTx iterates through all channels of the given node,
3636
// executing the passed callback with an edge info structure and the policies
3637
// of each end of the channel. The first edge policy is the outgoing edge *to*
3638
// the connecting node, while the second is the incoming edge *from* the
3639
// connecting node. If the callback returns an error, then the iteration is
3640
// halted with the error propagated back up to the caller.
3641
//
3642
// Unknown policies are passed into the callback as nil values.
3643
//
3644
// If the caller wishes to re-use an existing boltdb transaction, then it
3645
// should be passed as the first argument.  Otherwise, the first argument should
3646
// be nil and a fresh transaction will be created to execute the graph
3647
// traversal.
3648
//
3649
// NOTE: the reset function is only meaningful if the tx param is nil.
3650
func (c *KVStore) forEachNodeChannelTx(tx kvdb.RTx,
3651
        nodePub route.Vertex, cb func(kvdb.RTx, *models.ChannelEdgeInfo,
3652
                *models.ChannelEdgePolicy, *models.ChannelEdgePolicy) error,
3653
        reset func()) error {
999✔
3654

999✔
3655
        return nodeTraversal(tx, nodePub[:], c.db, cb, reset)
999✔
3656
}
999✔
3657

3658
// fetchOtherNode attempts to fetch the full Node that's opposite of
3659
// the target node in the channel. This is useful when one knows the pubkey of
3660
// one of the nodes, and wishes to obtain the full Node for the other
3661
// end of the channel.
3662
func (c *KVStore) fetchOtherNode(tx kvdb.RTx,
3663
        channel *models.ChannelEdgeInfo, thisNodeKey []byte) (
3664
        *models.Node, error) {
2✔
3665

2✔
3666
        // Ensure that the node passed in is actually a member of the channel.
2✔
3667
        var targetNodeBytes [33]byte
2✔
3668
        switch {
2✔
3669
        case bytes.Equal(channel.NodeKey1Bytes[:], thisNodeKey):
×
3670
                targetNodeBytes = channel.NodeKey2Bytes
×
3671
        case bytes.Equal(channel.NodeKey2Bytes[:], thisNodeKey):
2✔
3672
                targetNodeBytes = channel.NodeKey1Bytes
2✔
3673
        default:
×
3674
                return nil, fmt.Errorf("node not participating in this channel")
×
3675
        }
3676

3677
        var targetNode *models.Node
2✔
3678
        fetchNodeFunc := func(tx kvdb.RTx) error {
4✔
3679
                // First grab the nodes bucket which stores the mapping from
2✔
3680
                // pubKey to node information.
2✔
3681
                nodes := tx.ReadBucket(nodeBucket)
2✔
3682
                if nodes == nil {
2✔
3683
                        return ErrGraphNotFound
×
3684
                }
×
3685

3686
                node, err := fetchLightningNode(nodes, targetNodeBytes[:])
2✔
3687
                if err != nil {
2✔
3688
                        return err
×
3689
                }
×
3690

3691
                targetNode = node
2✔
3692

2✔
3693
                return nil
2✔
3694
        }
3695

3696
        // If the transaction is nil, then we'll need to create a new one,
3697
        // otherwise we can use the existing db transaction.
3698
        var err error
2✔
3699
        if tx == nil {
2✔
3700
                err = kvdb.View(c.db, fetchNodeFunc, func() {
×
3701
                        targetNode = nil
×
3702
                })
×
3703
        } else {
2✔
3704
                err = fetchNodeFunc(tx)
2✔
3705
        }
2✔
3706

3707
        return targetNode, err
2✔
3708
}
3709

3710
// computeEdgePolicyKeys is a helper function that can be used to compute the
3711
// keys used to index the channel edge policy info for the two nodes of the
3712
// edge. The keys for node 1 and node 2 are returned respectively.
3713
func computeEdgePolicyKeys(info *models.ChannelEdgeInfo) ([]byte, []byte) {
22✔
3714
        var (
22✔
3715
                node1Key [33 + 8]byte
22✔
3716
                node2Key [33 + 8]byte
22✔
3717
        )
22✔
3718

22✔
3719
        copy(node1Key[:], info.NodeKey1Bytes[:])
22✔
3720
        copy(node2Key[:], info.NodeKey2Bytes[:])
22✔
3721

22✔
3722
        byteOrder.PutUint64(node1Key[33:], info.ChannelID)
22✔
3723
        byteOrder.PutUint64(node2Key[33:], info.ChannelID)
22✔
3724

22✔
3725
        return node1Key[:], node2Key[:]
22✔
3726
}
22✔
3727

3728
// FetchChannelEdgesByOutpoint attempts to lookup the two directed edges for
3729
// the channel identified by the funding outpoint. If the channel can't be
3730
// found, then ErrEdgeNotFound is returned. A struct which houses the general
3731
// information for the channel itself is returned as well as two structs that
3732
// contain the routing policies for the channel in either direction.
3733
func (c *KVStore) FetchChannelEdgesByOutpoint(op *wire.OutPoint) (
3734
        *models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3735
        *models.ChannelEdgePolicy, error) {
11✔
3736

11✔
3737
        var (
11✔
3738
                edgeInfo *models.ChannelEdgeInfo
11✔
3739
                policy1  *models.ChannelEdgePolicy
11✔
3740
                policy2  *models.ChannelEdgePolicy
11✔
3741
        )
11✔
3742

11✔
3743
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
22✔
3744
                // First, grab the node bucket. This will be used to populate
11✔
3745
                // the Node pointers in each edge read from disk.
11✔
3746
                nodes := tx.ReadBucket(nodeBucket)
11✔
3747
                if nodes == nil {
11✔
3748
                        return ErrGraphNotFound
×
3749
                }
×
3750

3751
                // Next, grab the edge bucket which stores the edges, and also
3752
                // the index itself so we can group the directed edges together
3753
                // logically.
3754
                edges := tx.ReadBucket(edgeBucket)
11✔
3755
                if edges == nil {
11✔
3756
                        return ErrGraphNoEdgesFound
×
3757
                }
×
3758
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
11✔
3759
                if edgeIndex == nil {
11✔
3760
                        return ErrGraphNoEdgesFound
×
3761
                }
×
3762

3763
                // If the channel's outpoint doesn't exist within the outpoint
3764
                // index, then the edge does not exist.
3765
                chanIndex := edges.NestedReadBucket(channelPointBucket)
11✔
3766
                if chanIndex == nil {
11✔
3767
                        return ErrGraphNoEdgesFound
×
3768
                }
×
3769
                var b bytes.Buffer
11✔
3770
                if err := WriteOutpoint(&b, op); err != nil {
11✔
3771
                        return err
×
3772
                }
×
3773
                chanID := chanIndex.Get(b.Bytes())
11✔
3774
                if chanID == nil {
21✔
3775
                        return fmt.Errorf("%w: op=%v", ErrEdgeNotFound, op)
10✔
3776
                }
10✔
3777

3778
                // If the channel is found to exists, then we'll first retrieve
3779
                // the general information for the channel.
3780
                edge, err := fetchChanEdgeInfo(edgeIndex, chanID)
1✔
3781
                if err != nil {
1✔
3782
                        return fmt.Errorf("%w: chanID=%x", err, chanID)
×
3783
                }
×
3784
                edgeInfo = edge
1✔
3785

1✔
3786
                // Once we have the information about the channels' parameters,
1✔
3787
                // we'll fetch the routing policies for each for the directed
1✔
3788
                // edges.
1✔
3789
                e1, e2, err := fetchChanEdgePolicies(edgeIndex, edges, chanID)
1✔
3790
                if err != nil {
1✔
3791
                        return fmt.Errorf("failed to find policy: %w", err)
×
3792
                }
×
3793

3794
                policy1 = e1
1✔
3795
                policy2 = e2
1✔
3796

1✔
3797
                return nil
1✔
3798
        }, func() {
11✔
3799
                edgeInfo = nil
11✔
3800
                policy1 = nil
11✔
3801
                policy2 = nil
11✔
3802
        })
11✔
3803
        if err != nil {
21✔
3804
                return nil, nil, nil, err
10✔
3805
        }
10✔
3806

3807
        return edgeInfo, policy1, policy2, nil
1✔
3808
}
3809

3810
// FetchChannelEdgesByID attempts to lookup the two directed edges for the
3811
// channel identified by the channel ID. If the channel can't be found, then
3812
// ErrEdgeNotFound is returned. A struct which houses the general information
3813
// for the channel itself is returned as well as two structs that contain the
3814
// routing policies for the channel in either direction.
3815
//
3816
// ErrZombieEdge an be returned if the edge is currently marked as a zombie
3817
// within the database. In this case, the ChannelEdgePolicy's will be nil, and
3818
// the ChannelEdgeInfo will only include the public keys of each node.
3819
func (c *KVStore) FetchChannelEdgesByID(chanID uint64) (
3820
        *models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3821
        *models.ChannelEdgePolicy, error) {
2,892✔
3822

2,892✔
3823
        var (
2,892✔
3824
                edgeInfo  *models.ChannelEdgeInfo
2,892✔
3825
                policy1   *models.ChannelEdgePolicy
2,892✔
3826
                policy2   *models.ChannelEdgePolicy
2,892✔
3827
                channelID [8]byte
2,892✔
3828
        )
2,892✔
3829

2,892✔
3830
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
5,784✔
3831
                // First, grab the node bucket. This will be used to populate
2,892✔
3832
                // the Node pointers in each edge read from disk.
2,892✔
3833
                nodes := tx.ReadBucket(nodeBucket)
2,892✔
3834
                if nodes == nil {
2,892✔
3835
                        return ErrGraphNotFound
×
3836
                }
×
3837

3838
                // Next, grab the edge bucket which stores the edges, and also
3839
                // the index itself so we can group the directed edges together
3840
                // logically.
3841
                edges := tx.ReadBucket(edgeBucket)
2,892✔
3842
                if edges == nil {
2,892✔
3843
                        return ErrGraphNoEdgesFound
×
3844
                }
×
3845
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
2,892✔
3846
                if edgeIndex == nil {
2,892✔
3847
                        return ErrGraphNoEdgesFound
×
3848
                }
×
3849

3850
                byteOrder.PutUint64(channelID[:], chanID)
2,892✔
3851

2,892✔
3852
                // Now, attempt to fetch edge.
2,892✔
3853
                edge, err := fetchChanEdgeInfo(edgeIndex, channelID[:])
2,892✔
3854

2,892✔
3855
                // If it doesn't exist, we'll quickly check our zombie index to
2,892✔
3856
                // see if we've previously marked it as so.
2,892✔
3857
                if errors.Is(err, ErrEdgeNotFound) {
2,893✔
3858
                        // If the zombie index doesn't exist, or the edge is not
1✔
3859
                        // marked as a zombie within it, then we'll return the
1✔
3860
                        // original ErrEdgeNotFound error.
1✔
3861
                        zombieIndex := edges.NestedReadBucket(zombieBucket)
1✔
3862
                        if zombieIndex == nil {
1✔
3863
                                return ErrEdgeNotFound
×
3864
                        }
×
3865

3866
                        isZombie, pubKey1, pubKey2 := isZombieEdge(
1✔
3867
                                zombieIndex, chanID,
1✔
3868
                        )
1✔
3869
                        if !isZombie {
1✔
3870
                                return ErrEdgeNotFound
×
3871
                        }
×
3872

3873
                        // Otherwise, the edge is marked as a zombie, so we'll
3874
                        // populate the edge info with the public keys of each
3875
                        // party as this is the only information we have about
3876
                        // it and return an error signaling so.
3877
                        edgeInfo = &models.ChannelEdgeInfo{
1✔
3878
                                NodeKey1Bytes: pubKey1,
1✔
3879
                                NodeKey2Bytes: pubKey2,
1✔
3880
                        }
1✔
3881

1✔
3882
                        return ErrZombieEdge
1✔
3883
                }
3884

3885
                // Otherwise, we'll just return the error if any.
3886
                if err != nil {
2,891✔
3887
                        return err
×
3888
                }
×
3889

3890
                edgeInfo = edge
2,891✔
3891

2,891✔
3892
                // Then we'll attempt to fetch the accompanying policies of this
2,891✔
3893
                // edge.
2,891✔
3894
                e1, e2, err := fetchChanEdgePolicies(
2,891✔
3895
                        edgeIndex, edges, channelID[:],
2,891✔
3896
                )
2,891✔
3897
                if err != nil {
2,891✔
3898
                        return err
×
3899
                }
×
3900

3901
                policy1 = e1
2,891✔
3902
                policy2 = e2
2,891✔
3903

2,891✔
3904
                return nil
2,891✔
3905
        }, func() {
2,892✔
3906
                edgeInfo = nil
2,892✔
3907
                policy1 = nil
2,892✔
3908
                policy2 = nil
2,892✔
3909
        })
2,892✔
3910
        if errors.Is(err, ErrZombieEdge) {
2,893✔
3911
                return edgeInfo, nil, nil, err
1✔
3912
        }
1✔
3913
        if err != nil {
2,891✔
3914
                return nil, nil, nil, err
×
3915
        }
×
3916

3917
        return edgeInfo, policy1, policy2, nil
2,891✔
3918
}
3919

3920
// IsPublicNode is a helper method that determines whether the node with the
3921
// given public key is seen as a public node in the graph from the graph's
3922
// source node's point of view.
3923
func (c *KVStore) IsPublicNode(pubKey [33]byte) (bool, error) {
13✔
3924
        var nodeIsPublic bool
13✔
3925
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
26✔
3926
                nodes := tx.ReadBucket(nodeBucket)
13✔
3927
                if nodes == nil {
13✔
3928
                        return ErrGraphNodesNotFound
×
3929
                }
×
3930
                ourPubKey := nodes.Get(sourceKey)
13✔
3931
                if ourPubKey == nil {
13✔
3932
                        return ErrSourceNodeNotSet
×
3933
                }
×
3934
                node, err := fetchLightningNode(nodes, pubKey[:])
13✔
3935
                if err != nil {
13✔
3936
                        return err
×
3937
                }
×
3938

3939
                nodeIsPublic, err = c.isPublic(tx, node.PubKeyBytes, ourPubKey)
13✔
3940

13✔
3941
                return err
13✔
3942
        }, func() {
13✔
3943
                nodeIsPublic = false
13✔
3944
        })
13✔
3945
        if err != nil {
13✔
3946
                return false, err
×
3947
        }
×
3948

3949
        return nodeIsPublic, nil
13✔
3950
}
3951

3952
// genMultiSigP2WSH generates the p2wsh'd multisig script for 2 of 2 pubkeys.
3953
func genMultiSigP2WSH(aPub, bPub []byte) ([]byte, error) {
46✔
3954
        witnessScript, err := input.GenMultiSigScript(aPub, bPub)
46✔
3955
        if err != nil {
46✔
3956
                return nil, err
×
3957
        }
×
3958

3959
        // With the witness script generated, we'll now turn it into a p2wsh
3960
        // script:
3961
        //  * OP_0 <sha256(script)>
3962
        bldr := txscript.NewScriptBuilder(
46✔
3963
                txscript.WithScriptAllocSize(input.P2WSHSize),
46✔
3964
        )
46✔
3965
        bldr.AddOp(txscript.OP_0)
46✔
3966
        scriptHash := sha256.Sum256(witnessScript)
46✔
3967
        bldr.AddData(scriptHash[:])
46✔
3968

46✔
3969
        return bldr.Script()
46✔
3970
}
3971

3972
// EdgePoint couples the outpoint of a channel with the funding script that it
3973
// creates. The FilteredChainView will use this to watch for spends of this
3974
// edge point on chain. We require both of these values as depending on the
3975
// concrete implementation, either the pkScript, or the out point will be used.
3976
type EdgePoint struct {
3977
        // FundingPkScript is the p2wsh multi-sig script of the target channel.
3978
        FundingPkScript []byte
3979

3980
        // OutPoint is the outpoint of the target channel.
3981
        OutPoint wire.OutPoint
3982
}
3983

3984
// String returns a human readable version of the target EdgePoint. We return
3985
// the outpoint directly as it is enough to uniquely identify the edge point.
3986
func (e *EdgePoint) String() string {
×
3987
        return e.OutPoint.String()
×
3988
}
×
3989

3990
// ChannelView returns the verifiable edge information for each active channel
3991
// within the known channel graph. The set of UTXO's (along with their scripts)
3992
// returned are the ones that need to be watched on chain to detect channel
3993
// closes on the resident blockchain.
3994
func (c *KVStore) ChannelView() ([]EdgePoint, error) {
22✔
3995
        var edgePoints []EdgePoint
22✔
3996
        if err := kvdb.View(c.db, func(tx kvdb.RTx) error {
44✔
3997
                // We're going to iterate over the entire channel index, so
22✔
3998
                // we'll need to fetch the edgeBucket to get to the index as
22✔
3999
                // it's a sub-bucket.
22✔
4000
                edges := tx.ReadBucket(edgeBucket)
22✔
4001
                if edges == nil {
22✔
4002
                        return ErrGraphNoEdgesFound
×
4003
                }
×
4004
                chanIndex := edges.NestedReadBucket(channelPointBucket)
22✔
4005
                if chanIndex == nil {
22✔
4006
                        return ErrGraphNoEdgesFound
×
4007
                }
×
4008
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
22✔
4009
                if edgeIndex == nil {
22✔
4010
                        return ErrGraphNoEdgesFound
×
4011
                }
×
4012

4013
                // Once we have the proper bucket, we'll range over each key
4014
                // (which is the channel point for the channel) and decode it,
4015
                // accumulating each entry.
4016
                return chanIndex.ForEach(
22✔
4017
                        func(chanPointBytes, chanID []byte) error {
64✔
4018
                                chanPointReader := bytes.NewReader(
42✔
4019
                                        chanPointBytes,
42✔
4020
                                )
42✔
4021

42✔
4022
                                var chanPoint wire.OutPoint
42✔
4023
                                err := ReadOutpoint(chanPointReader, &chanPoint)
42✔
4024
                                if err != nil {
42✔
4025
                                        return err
×
4026
                                }
×
4027

4028
                                edgeInfo, err := fetchChanEdgeInfo(
42✔
4029
                                        edgeIndex, chanID,
42✔
4030
                                )
42✔
4031
                                if err != nil {
42✔
4032
                                        return err
×
4033
                                }
×
4034

4035
                                pkScript, err := genMultiSigP2WSH(
42✔
4036
                                        edgeInfo.BitcoinKey1Bytes[:],
42✔
4037
                                        edgeInfo.BitcoinKey2Bytes[:],
42✔
4038
                                )
42✔
4039
                                if err != nil {
42✔
4040
                                        return err
×
4041
                                }
×
4042

4043
                                edgePoints = append(edgePoints, EdgePoint{
42✔
4044
                                        FundingPkScript: pkScript,
42✔
4045
                                        OutPoint:        chanPoint,
42✔
4046
                                })
42✔
4047

42✔
4048
                                return nil
42✔
4049
                        },
4050
                )
4051
        }, func() {
22✔
4052
                edgePoints = nil
22✔
4053
        }); err != nil {
22✔
4054
                return nil, err
×
4055
        }
×
4056

4057
        return edgePoints, nil
22✔
4058
}
4059

4060
// MarkEdgeZombie attempts to mark a channel identified by its channel ID as a
4061
// zombie. This method is used on an ad-hoc basis, when channels need to be
4062
// marked as zombies outside the normal pruning cycle.
4063
func (c *KVStore) MarkEdgeZombie(chanID uint64,
4064
        pubKey1, pubKey2 [33]byte) error {
123✔
4065

123✔
4066
        c.cacheMu.Lock()
123✔
4067
        defer c.cacheMu.Unlock()
123✔
4068

123✔
4069
        err := kvdb.Batch(c.db, func(tx kvdb.RwTx) error {
246✔
4070
                edges := tx.ReadWriteBucket(edgeBucket)
123✔
4071
                if edges == nil {
123✔
4072
                        return ErrGraphNoEdgesFound
×
4073
                }
×
4074
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
123✔
4075
                if err != nil {
123✔
4076
                        return fmt.Errorf("unable to create zombie "+
×
4077
                                "bucket: %w", err)
×
4078
                }
×
4079

4080
                return markEdgeZombie(zombieIndex, chanID, pubKey1, pubKey2)
123✔
4081
        })
4082
        if err != nil {
123✔
4083
                return err
×
4084
        }
×
4085

4086
        c.rejectCache.remove(chanID)
123✔
4087
        c.chanCache.remove(chanID)
123✔
4088

123✔
4089
        return nil
123✔
4090
}
4091

4092
// markEdgeZombie marks an edge as a zombie within our zombie index. The public
4093
// keys should represent the node public keys of the two parties involved in the
4094
// edge.
4095
func markEdgeZombie(zombieIndex kvdb.RwBucket, chanID uint64, pubKey1,
4096
        pubKey2 [33]byte) error {
145✔
4097

145✔
4098
        var k [8]byte
145✔
4099
        byteOrder.PutUint64(k[:], chanID)
145✔
4100

145✔
4101
        var v [66]byte
145✔
4102
        copy(v[:33], pubKey1[:])
145✔
4103
        copy(v[33:], pubKey2[:])
145✔
4104

145✔
4105
        return zombieIndex.Put(k[:], v[:])
145✔
4106
}
145✔
4107

4108
// MarkEdgeLive clears an edge from our zombie index, deeming it as live.
4109
func (c *KVStore) MarkEdgeLive(chanID uint64) error {
21✔
4110
        c.cacheMu.Lock()
21✔
4111
        defer c.cacheMu.Unlock()
21✔
4112

21✔
4113
        return c.markEdgeLiveUnsafe(nil, chanID)
21✔
4114
}
21✔
4115

4116
// markEdgeLiveUnsafe clears an edge from the zombie index. This method can be
4117
// called with an existing kvdb.RwTx or the argument can be set to nil in which
4118
// case a new transaction will be created.
4119
//
4120
// NOTE: this method MUST only be called if the cacheMu has already been
4121
// acquired.
4122
func (c *KVStore) markEdgeLiveUnsafe(tx kvdb.RwTx, chanID uint64) error {
21✔
4123
        dbFn := func(tx kvdb.RwTx) error {
42✔
4124
                edges := tx.ReadWriteBucket(edgeBucket)
21✔
4125
                if edges == nil {
21✔
4126
                        return ErrGraphNoEdgesFound
×
4127
                }
×
4128
                zombieIndex := edges.NestedReadWriteBucket(zombieBucket)
21✔
4129
                if zombieIndex == nil {
21✔
4130
                        return nil
×
4131
                }
×
4132

4133
                var k [8]byte
21✔
4134
                byteOrder.PutUint64(k[:], chanID)
21✔
4135

21✔
4136
                if len(zombieIndex.Get(k[:])) == 0 {
23✔
4137
                        return ErrZombieEdgeNotFound
2✔
4138
                }
2✔
4139

4140
                return zombieIndex.Delete(k[:])
19✔
4141
        }
4142

4143
        // If the transaction is nil, we'll create a new one. Otherwise, we use
4144
        // the existing transaction
4145
        var err error
21✔
4146
        if tx == nil {
42✔
4147
                err = kvdb.Update(c.db, dbFn, func() {})
42✔
4148
        } else {
×
4149
                err = dbFn(tx)
×
4150
        }
×
4151
        if err != nil {
23✔
4152
                return err
2✔
4153
        }
2✔
4154

4155
        c.rejectCache.remove(chanID)
19✔
4156
        c.chanCache.remove(chanID)
19✔
4157

19✔
4158
        return nil
19✔
4159
}
4160

4161
// IsZombieEdge returns whether the edge is considered zombie. If it is a
4162
// zombie, then the two node public keys corresponding to this edge are also
4163
// returned.
4164
func (c *KVStore) IsZombieEdge(chanID uint64) (bool, [33]byte, [33]byte,
4165
        error) {
14✔
4166

14✔
4167
        var (
14✔
4168
                isZombie         bool
14✔
4169
                pubKey1, pubKey2 [33]byte
14✔
4170
        )
14✔
4171

14✔
4172
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
28✔
4173
                edges := tx.ReadBucket(edgeBucket)
14✔
4174
                if edges == nil {
14✔
4175
                        return ErrGraphNoEdgesFound
×
4176
                }
×
4177
                zombieIndex := edges.NestedReadBucket(zombieBucket)
14✔
4178
                if zombieIndex == nil {
14✔
4179
                        return nil
×
4180
                }
×
4181

4182
                isZombie, pubKey1, pubKey2 = isZombieEdge(zombieIndex, chanID)
14✔
4183

14✔
4184
                return nil
14✔
4185
        }, func() {
14✔
4186
                isZombie = false
14✔
4187
                pubKey1 = [33]byte{}
14✔
4188
                pubKey2 = [33]byte{}
14✔
4189
        })
14✔
4190
        if err != nil {
14✔
4191
                return false, [33]byte{}, [33]byte{}, fmt.Errorf("%w: %w "+
×
4192
                        "(chanID=%d)", ErrCantCheckIfZombieEdgeStr, err, chanID)
×
4193
        }
×
4194

4195
        return isZombie, pubKey1, pubKey2, nil
14✔
4196
}
4197

4198
// isZombieEdge returns whether an entry exists for the given channel in the
4199
// zombie index. If an entry exists, then the two node public keys corresponding
4200
// to this edge are also returned.
4201
func isZombieEdge(zombieIndex kvdb.RBucket,
4202
        chanID uint64) (bool, [33]byte, [33]byte) {
201✔
4203

201✔
4204
        var k [8]byte
201✔
4205
        byteOrder.PutUint64(k[:], chanID)
201✔
4206

201✔
4207
        v := zombieIndex.Get(k[:])
201✔
4208
        if v == nil {
310✔
4209
                return false, [33]byte{}, [33]byte{}
109✔
4210
        }
109✔
4211

4212
        var pubKey1, pubKey2 [33]byte
92✔
4213
        copy(pubKey1[:], v[:33])
92✔
4214
        copy(pubKey2[:], v[33:])
92✔
4215

92✔
4216
        return true, pubKey1, pubKey2
92✔
4217
}
4218

4219
// NumZombies returns the current number of zombie channels in the graph.
4220
func (c *KVStore) NumZombies() (uint64, error) {
4✔
4221
        var numZombies uint64
4✔
4222
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
8✔
4223
                edges := tx.ReadBucket(edgeBucket)
4✔
4224
                if edges == nil {
4✔
4225
                        return nil
×
4226
                }
×
4227
                zombieIndex := edges.NestedReadBucket(zombieBucket)
4✔
4228
                if zombieIndex == nil {
4✔
4229
                        return nil
×
4230
                }
×
4231

4232
                return zombieIndex.ForEach(func(_, _ []byte) error {
6✔
4233
                        numZombies++
2✔
4234
                        return nil
2✔
4235
                })
2✔
4236
        }, func() {
4✔
4237
                numZombies = 0
4✔
4238
        })
4✔
4239
        if err != nil {
4✔
4240
                return 0, err
×
4241
        }
×
4242

4243
        return numZombies, nil
4✔
4244
}
4245

4246
// PutClosedScid stores a SCID for a closed channel in the database. This is so
4247
// that we can ignore channel announcements that we know to be closed without
4248
// having to validate them and fetch a block.
4249
func (c *KVStore) PutClosedScid(scid lnwire.ShortChannelID) error {
1✔
4250
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
2✔
4251
                closedScids, err := tx.CreateTopLevelBucket(closedScidBucket)
1✔
4252
                if err != nil {
1✔
4253
                        return err
×
4254
                }
×
4255

4256
                var k [8]byte
1✔
4257
                byteOrder.PutUint64(k[:], scid.ToUint64())
1✔
4258

1✔
4259
                return closedScids.Put(k[:], []byte{})
1✔
4260
        }, func() {})
1✔
4261
}
4262

4263
// IsClosedScid checks whether a channel identified by the passed in scid is
4264
// closed. This helps avoid having to perform expensive validation checks.
4265
// TODO: Add an LRU cache to cut down on disc reads.
4266
func (c *KVStore) IsClosedScid(scid lnwire.ShortChannelID) (bool, error) {
2✔
4267
        var isClosed bool
2✔
4268
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
4✔
4269
                closedScids := tx.ReadBucket(closedScidBucket)
2✔
4270
                if closedScids == nil {
2✔
4271
                        return ErrClosedScidsNotFound
×
4272
                }
×
4273

4274
                var k [8]byte
2✔
4275
                byteOrder.PutUint64(k[:], scid.ToUint64())
2✔
4276

2✔
4277
                if closedScids.Get(k[:]) != nil {
3✔
4278
                        isClosed = true
1✔
4279
                        return nil
1✔
4280
                }
1✔
4281

4282
                return nil
1✔
4283
        }, func() {
2✔
4284
                isClosed = false
2✔
4285
        })
2✔
4286
        if err != nil {
2✔
4287
                return false, err
×
4288
        }
×
4289

4290
        return isClosed, nil
2✔
4291
}
4292

4293
// GraphSession will provide the call-back with access to a NodeTraverser
4294
// instance which can be used to perform queries against the channel graph.
4295
func (c *KVStore) GraphSession(cb func(graph NodeTraverser) error,
4296
        reset func()) error {
54✔
4297

54✔
4298
        return c.db.View(func(tx walletdb.ReadTx) error {
108✔
4299
                return cb(&nodeTraverserSession{
54✔
4300
                        db: c,
54✔
4301
                        tx: tx,
54✔
4302
                })
54✔
4303
        }, reset)
54✔
4304
}
4305

4306
// nodeTraverserSession implements the NodeTraverser interface but with a
4307
// backing read only transaction for a consistent view of the graph.
4308
type nodeTraverserSession struct {
4309
        tx kvdb.RTx
4310
        db *KVStore
4311
}
4312

4313
// ForEachNodeDirectedChannel calls the callback for every channel of the given
4314
// node.
4315
//
4316
// NOTE: Part of the NodeTraverser interface.
4317
func (c *nodeTraverserSession) ForEachNodeDirectedChannel(nodePub route.Vertex,
4318
        cb func(channel *DirectedChannel) error, _ func()) error {
239✔
4319

239✔
4320
        return c.db.forEachNodeDirectedChannel(c.tx, nodePub, cb, func() {})
239✔
4321
}
4322

4323
// FetchNodeFeatures returns the features of the given node. If the node is
4324
// unknown, assume no additional features are supported.
4325
//
4326
// NOTE: Part of the NodeTraverser interface.
4327
func (c *nodeTraverserSession) FetchNodeFeatures(nodePub route.Vertex) (
4328
        *lnwire.FeatureVector, error) {
254✔
4329

254✔
4330
        return c.db.fetchNodeFeatures(c.tx, nodePub)
254✔
4331
}
254✔
4332

4333
func putLightningNode(nodeBucket, aliasBucket, updateIndex kvdb.RwBucket,
4334
        node *models.Node) error {
1,169✔
4335

1,169✔
4336
        var (
1,169✔
4337
                scratch [16]byte
1,169✔
4338
                b       bytes.Buffer
1,169✔
4339
        )
1,169✔
4340

1,169✔
4341
        pub, err := node.PubKey()
1,169✔
4342
        if err != nil {
1,169✔
4343
                return err
×
4344
        }
×
4345
        nodePub := pub.SerializeCompressed()
1,169✔
4346

1,169✔
4347
        // If the node has the update time set, write it, else write 0.
1,169✔
4348
        updateUnix := uint64(0)
1,169✔
4349
        if node.LastUpdate.Unix() > 0 {
2,202✔
4350
                updateUnix = uint64(node.LastUpdate.Unix())
1,033✔
4351
        }
1,033✔
4352

4353
        byteOrder.PutUint64(scratch[:8], updateUnix)
1,169✔
4354
        if _, err := b.Write(scratch[:8]); err != nil {
1,169✔
4355
                return err
×
4356
        }
×
4357

4358
        if _, err := b.Write(nodePub); err != nil {
1,169✔
4359
                return err
×
4360
        }
×
4361

4362
        // If we got a node announcement for this node, we will have the rest
4363
        // of the data available. If not we don't have more data to write.
4364
        if !node.HaveAnnouncement() {
1,252✔
4365
                // Write HaveNodeAnnouncement=0.
83✔
4366
                byteOrder.PutUint16(scratch[:2], 0)
83✔
4367
                if _, err := b.Write(scratch[:2]); err != nil {
83✔
4368
                        return err
×
4369
                }
×
4370

4371
                return nodeBucket.Put(nodePub, b.Bytes())
83✔
4372
        }
4373

4374
        // Write HaveNodeAnnouncement=1.
4375
        byteOrder.PutUint16(scratch[:2], 1)
1,086✔
4376
        if _, err := b.Write(scratch[:2]); err != nil {
1,086✔
4377
                return err
×
4378
        }
×
4379

4380
        nodeColor := node.Color.UnwrapOr(color.RGBA{})
1,086✔
4381

1,086✔
4382
        if err := binary.Write(&b, byteOrder, nodeColor.R); err != nil {
1,086✔
4383
                return err
×
4384
        }
×
4385
        if err := binary.Write(&b, byteOrder, nodeColor.G); err != nil {
1,086✔
4386
                return err
×
4387
        }
×
4388
        if err := binary.Write(&b, byteOrder, nodeColor.B); err != nil {
1,086✔
4389
                return err
×
4390
        }
×
4391

4392
        err = wire.WriteVarString(&b, 0, node.Alias.UnwrapOr(""))
1,086✔
4393
        if err != nil {
1,086✔
4394
                return err
×
4395
        }
×
4396

4397
        if err := node.Features.Encode(&b); err != nil {
1,086✔
4398
                return err
×
4399
        }
×
4400

4401
        numAddresses := uint16(len(node.Addresses))
1,086✔
4402
        byteOrder.PutUint16(scratch[:2], numAddresses)
1,086✔
4403
        if _, err := b.Write(scratch[:2]); err != nil {
1,086✔
4404
                return err
×
4405
        }
×
4406

4407
        for _, address := range node.Addresses {
2,671✔
4408
                if err := SerializeAddr(&b, address); err != nil {
1,585✔
4409
                        return err
×
4410
                }
×
4411
        }
4412

4413
        sigLen := len(node.AuthSigBytes)
1,086✔
4414
        if sigLen > 80 {
1,086✔
4415
                return fmt.Errorf("max sig len allowed is 80, had %v",
×
4416
                        sigLen)
×
4417
        }
×
4418

4419
        err = wire.WriteVarBytes(&b, 0, node.AuthSigBytes)
1,086✔
4420
        if err != nil {
1,086✔
4421
                return err
×
4422
        }
×
4423

4424
        if len(node.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
1,086✔
4425
                return ErrTooManyExtraOpaqueBytes(len(node.ExtraOpaqueData))
×
4426
        }
×
4427
        err = wire.WriteVarBytes(&b, 0, node.ExtraOpaqueData)
1,086✔
4428
        if err != nil {
1,086✔
4429
                return err
×
4430
        }
×
4431

4432
        err = aliasBucket.Put(nodePub, []byte(node.Alias.UnwrapOr("")))
1,086✔
4433
        if err != nil {
1,086✔
4434
                return err
×
4435
        }
×
4436

4437
        // With the alias bucket updated, we'll now update the index that
4438
        // tracks the time series of node updates.
4439
        var indexKey [8 + 33]byte
1,086✔
4440
        byteOrder.PutUint64(indexKey[:8], updateUnix)
1,086✔
4441
        copy(indexKey[8:], nodePub)
1,086✔
4442

1,086✔
4443
        // If there was already an old index entry for this node, then we'll
1,086✔
4444
        // delete the old one before we write the new entry.
1,086✔
4445
        if nodeBytes := nodeBucket.Get(nodePub); nodeBytes != nil {
1,104✔
4446
                // Extract out the old update time to we can reconstruct the
18✔
4447
                // prior index key to delete it from the index.
18✔
4448
                oldUpdateTime := nodeBytes[:8]
18✔
4449

18✔
4450
                var oldIndexKey [8 + 33]byte
18✔
4451
                copy(oldIndexKey[:8], oldUpdateTime)
18✔
4452
                copy(oldIndexKey[8:], nodePub)
18✔
4453

18✔
4454
                if err := updateIndex.Delete(oldIndexKey[:]); err != nil {
18✔
4455
                        return err
×
4456
                }
×
4457
        }
4458

4459
        if err := updateIndex.Put(indexKey[:], nil); err != nil {
1,086✔
4460
                return err
×
4461
        }
×
4462

4463
        return nodeBucket.Put(nodePub, b.Bytes())
1,086✔
4464
}
4465

4466
func fetchLightningNode(nodeBucket kvdb.RBucket,
4467
        nodePub []byte) (*models.Node, error) {
4,576✔
4468

4,576✔
4469
        nodeBytes := nodeBucket.Get(nodePub)
4,576✔
4470
        if nodeBytes == nil {
4,659✔
4471
                return nil, ErrGraphNodeNotFound
83✔
4472
        }
83✔
4473

4474
        nodeReader := bytes.NewReader(nodeBytes)
4,493✔
4475

4,493✔
4476
        return deserializeLightningNode(nodeReader)
4,493✔
4477
}
4478

4479
func deserializeLightningNodeCacheable(r io.Reader) (route.Vertex,
4480
        *lnwire.FeatureVector, error) {
120✔
4481

120✔
4482
        var (
120✔
4483
                pubKey      route.Vertex
120✔
4484
                features    = lnwire.EmptyFeatureVector()
120✔
4485
                nodeScratch [8]byte
120✔
4486
        )
120✔
4487

120✔
4488
        // Skip ahead:
120✔
4489
        // - LastUpdate (8 bytes)
120✔
4490
        if _, err := r.Read(nodeScratch[:]); err != nil {
120✔
4491
                return pubKey, nil, err
×
4492
        }
×
4493

4494
        if _, err := io.ReadFull(r, pubKey[:]); err != nil {
120✔
4495
                return pubKey, nil, err
×
4496
        }
×
4497

4498
        // Read the node announcement flag.
4499
        if _, err := r.Read(nodeScratch[:2]); err != nil {
120✔
4500
                return pubKey, nil, err
×
4501
        }
×
4502
        hasNodeAnn := byteOrder.Uint16(nodeScratch[:2])
120✔
4503

120✔
4504
        // The rest of the data is optional, and will only be there if we got a
120✔
4505
        // node announcement for this node.
120✔
4506
        if hasNodeAnn == 0 {
120✔
4507
                return pubKey, features, nil
×
4508
        }
×
4509

4510
        // We did get a node announcement for this node, so we'll have the rest
4511
        // of the data available.
4512
        var rgb uint8
120✔
4513
        if err := binary.Read(r, byteOrder, &rgb); err != nil {
120✔
4514
                return pubKey, nil, err
×
4515
        }
×
4516
        if err := binary.Read(r, byteOrder, &rgb); err != nil {
120✔
4517
                return pubKey, nil, err
×
4518
        }
×
4519
        if err := binary.Read(r, byteOrder, &rgb); err != nil {
120✔
4520
                return pubKey, nil, err
×
4521
        }
×
4522

4523
        if _, err := wire.ReadVarString(r, 0); err != nil {
120✔
4524
                return pubKey, nil, err
×
4525
        }
×
4526

4527
        if err := features.Decode(r); err != nil {
120✔
4528
                return pubKey, nil, err
×
4529
        }
×
4530

4531
        return pubKey, features, nil
120✔
4532
}
4533

4534
func deserializeLightningNode(r io.Reader) (*models.Node, error) {
9,481✔
4535
        var (
9,481✔
4536
                scratch [8]byte
9,481✔
4537
                err     error
9,481✔
4538
                pubKey  [33]byte
9,481✔
4539
        )
9,481✔
4540

9,481✔
4541
        if _, err := r.Read(scratch[:]); err != nil {
9,481✔
4542
                return nil, err
×
4543
        }
×
4544

4545
        unix := int64(byteOrder.Uint64(scratch[:]))
9,481✔
4546
        lastUpdate := time.Unix(unix, 0)
9,481✔
4547

9,481✔
4548
        if _, err := io.ReadFull(r, pubKey[:]); err != nil {
9,481✔
4549
                return nil, err
×
4550
        }
×
4551

4552
        node := models.NewV1ShellNode(pubKey)
9,481✔
4553
        node.LastUpdate = lastUpdate
9,481✔
4554

9,481✔
4555
        if _, err := r.Read(scratch[:2]); err != nil {
9,481✔
4556
                return nil, err
×
4557
        }
×
4558

4559
        hasNodeAnn := byteOrder.Uint16(scratch[:2])
9,481✔
4560
        // The rest of the data is optional, and will only be there if we got a
9,481✔
4561
        // node announcement for this node.
9,481✔
4562
        if hasNodeAnn == 0 {
9,627✔
4563
                return node, nil
146✔
4564
        }
146✔
4565

4566
        // We did get a node announcement for this node, so we'll have the rest
4567
        // of the data available.
4568
        var nodeColor color.RGBA
9,335✔
4569
        if err := binary.Read(r, byteOrder, &nodeColor.R); err != nil {
9,335✔
4570
                return nil, err
×
4571
        }
×
4572
        if err := binary.Read(r, byteOrder, &nodeColor.G); err != nil {
9,335✔
4573
                return nil, err
×
4574
        }
×
4575
        if err := binary.Read(r, byteOrder, &nodeColor.B); err != nil {
9,335✔
4576
                return nil, err
×
4577
        }
×
4578
        node.Color = fn.Some(nodeColor)
9,335✔
4579

9,335✔
4580
        alias, err := wire.ReadVarString(r, 0)
9,335✔
4581
        if err != nil {
9,335✔
4582
                return nil, err
×
4583
        }
×
4584
        node.Alias = fn.Some(alias)
9,335✔
4585

9,335✔
4586
        err = node.Features.Decode(r)
9,335✔
4587
        if err != nil {
9,335✔
4588
                return nil, err
×
4589
        }
×
4590

4591
        if _, err := r.Read(scratch[:2]); err != nil {
9,335✔
4592
                return nil, err
×
4593
        }
×
4594
        numAddresses := int(byteOrder.Uint16(scratch[:2]))
9,335✔
4595

9,335✔
4596
        var addresses []net.Addr
9,335✔
4597
        for i := 0; i < numAddresses; i++ {
21,878✔
4598
                address, err := DeserializeAddr(r)
12,543✔
4599
                if err != nil {
12,543✔
4600
                        return nil, err
×
4601
                }
×
4602
                addresses = append(addresses, address)
12,543✔
4603
        }
4604
        node.Addresses = addresses
9,335✔
4605

9,335✔
4606
        node.AuthSigBytes, err = wire.ReadVarBytes(r, 0, 80, "sig")
9,335✔
4607
        if err != nil {
9,335✔
4608
                return nil, err
×
4609
        }
×
4610

4611
        // We'll try and see if there are any opaque bytes left, if not, then
4612
        // we'll ignore the EOF error and return the node as is.
4613
        extraBytes, err := wire.ReadVarBytes(
9,335✔
4614
                r, 0, MaxAllowedExtraOpaqueBytes, "blob",
9,335✔
4615
        )
9,335✔
4616
        switch {
9,335✔
4617
        case errors.Is(err, io.ErrUnexpectedEOF):
×
4618
        case errors.Is(err, io.EOF):
×
4619
        case err != nil:
×
4620
                return nil, err
×
4621
        }
4622

4623
        if len(extraBytes) > 0 {
9,346✔
4624
                node.ExtraOpaqueData = extraBytes
11✔
4625
        }
11✔
4626

4627
        return node, nil
9,335✔
4628
}
4629

4630
func putChanEdgeInfo(edgeIndex kvdb.RwBucket,
4631
        edgeInfo *models.ChannelEdgeInfo, chanID [8]byte) error {
1,589✔
4632

1,589✔
4633
        var b bytes.Buffer
1,589✔
4634

1,589✔
4635
        if _, err := b.Write(edgeInfo.NodeKey1Bytes[:]); err != nil {
1,589✔
4636
                return err
×
4637
        }
×
4638
        if _, err := b.Write(edgeInfo.NodeKey2Bytes[:]); err != nil {
1,589✔
4639
                return err
×
4640
        }
×
4641
        if _, err := b.Write(edgeInfo.BitcoinKey1Bytes[:]); err != nil {
1,589✔
4642
                return err
×
4643
        }
×
4644
        if _, err := b.Write(edgeInfo.BitcoinKey2Bytes[:]); err != nil {
1,589✔
4645
                return err
×
4646
        }
×
4647

4648
        var featureBuf bytes.Buffer
1,589✔
4649
        if err := edgeInfo.Features.Encode(&featureBuf); err != nil {
1,589✔
4650
                return fmt.Errorf("unable to encode features: %w", err)
×
4651
        }
×
4652

4653
        if err := wire.WriteVarBytes(&b, 0, featureBuf.Bytes()); err != nil {
1,589✔
4654
                return err
×
4655
        }
×
4656

4657
        authProof := edgeInfo.AuthProof
1,589✔
4658
        var nodeSig1, nodeSig2, bitcoinSig1, bitcoinSig2 []byte
1,589✔
4659
        if authProof != nil {
3,094✔
4660
                nodeSig1 = authProof.NodeSig1Bytes
1,505✔
4661
                nodeSig2 = authProof.NodeSig2Bytes
1,505✔
4662
                bitcoinSig1 = authProof.BitcoinSig1Bytes
1,505✔
4663
                bitcoinSig2 = authProof.BitcoinSig2Bytes
1,505✔
4664
        }
1,505✔
4665

4666
        if err := wire.WriteVarBytes(&b, 0, nodeSig1); err != nil {
1,589✔
4667
                return err
×
4668
        }
×
4669
        if err := wire.WriteVarBytes(&b, 0, nodeSig2); err != nil {
1,589✔
4670
                return err
×
4671
        }
×
4672
        if err := wire.WriteVarBytes(&b, 0, bitcoinSig1); err != nil {
1,589✔
4673
                return err
×
4674
        }
×
4675
        if err := wire.WriteVarBytes(&b, 0, bitcoinSig2); err != nil {
1,589✔
4676
                return err
×
4677
        }
×
4678

4679
        if err := WriteOutpoint(&b, &edgeInfo.ChannelPoint); err != nil {
1,589✔
4680
                return err
×
4681
        }
×
4682
        err := binary.Write(&b, byteOrder, uint64(edgeInfo.Capacity))
1,589✔
4683
        if err != nil {
1,589✔
4684
                return err
×
4685
        }
×
4686
        if _, err := b.Write(chanID[:]); err != nil {
1,589✔
4687
                return err
×
4688
        }
×
4689
        if _, err := b.Write(edgeInfo.ChainHash[:]); err != nil {
1,589✔
4690
                return err
×
4691
        }
×
4692

4693
        if len(edgeInfo.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
1,589✔
4694
                return ErrTooManyExtraOpaqueBytes(len(edgeInfo.ExtraOpaqueData))
×
4695
        }
×
4696
        err = wire.WriteVarBytes(&b, 0, edgeInfo.ExtraOpaqueData)
1,589✔
4697
        if err != nil {
1,589✔
4698
                return err
×
4699
        }
×
4700

4701
        return edgeIndex.Put(chanID[:], b.Bytes())
1,589✔
4702
}
4703

4704
func fetchChanEdgeInfo(edgeIndex kvdb.RBucket,
4705
        chanID []byte) (*models.ChannelEdgeInfo, error) {
7,106✔
4706

7,106✔
4707
        edgeInfoBytes := edgeIndex.Get(chanID)
7,106✔
4708
        if edgeInfoBytes == nil {
7,171✔
4709
                return nil, ErrEdgeNotFound
65✔
4710
        }
65✔
4711

4712
        edgeInfoReader := bytes.NewReader(edgeInfoBytes)
7,041✔
4713

7,041✔
4714
        return deserializeChanEdgeInfo(edgeInfoReader)
7,041✔
4715
}
4716

4717
func deserializeChanEdgeInfo(r io.Reader) (*models.ChannelEdgeInfo, error) {
7,583✔
4718
        var (
7,583✔
4719
                err      error
7,583✔
4720
                edgeInfo models.ChannelEdgeInfo
7,583✔
4721
        )
7,583✔
4722

7,583✔
4723
        if _, err := io.ReadFull(r, edgeInfo.NodeKey1Bytes[:]); err != nil {
7,583✔
4724
                return nil, err
×
4725
        }
×
4726
        if _, err := io.ReadFull(r, edgeInfo.NodeKey2Bytes[:]); err != nil {
7,583✔
4727
                return nil, err
×
4728
        }
×
4729
        if _, err := io.ReadFull(r, edgeInfo.BitcoinKey1Bytes[:]); err != nil {
7,583✔
4730
                return nil, err
×
4731
        }
×
4732
        if _, err := io.ReadFull(r, edgeInfo.BitcoinKey2Bytes[:]); err != nil {
7,583✔
4733
                return nil, err
×
4734
        }
×
4735

4736
        featureBytes, err := wire.ReadVarBytes(r, 0, 900, "features")
7,583✔
4737
        if err != nil {
7,583✔
4738
                return nil, err
×
4739
        }
×
4740

4741
        features := lnwire.NewRawFeatureVector()
7,583✔
4742
        err = features.Decode(bytes.NewReader(featureBytes))
7,583✔
4743
        if err != nil {
7,583✔
4744
                return nil, fmt.Errorf("unable to decode "+
×
4745
                        "features: %w", err)
×
4746
        }
×
4747
        edgeInfo.Features = lnwire.NewFeatureVector(features, lnwire.Features)
7,583✔
4748

7,583✔
4749
        proof := &models.ChannelAuthProof{}
7,583✔
4750

7,583✔
4751
        proof.NodeSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
7,583✔
4752
        if err != nil {
7,583✔
4753
                return nil, err
×
4754
        }
×
4755
        proof.NodeSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
7,583✔
4756
        if err != nil {
7,583✔
4757
                return nil, err
×
4758
        }
×
4759
        proof.BitcoinSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
7,583✔
4760
        if err != nil {
7,583✔
4761
                return nil, err
×
4762
        }
×
4763
        proof.BitcoinSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
7,583✔
4764
        if err != nil {
7,583✔
4765
                return nil, err
×
4766
        }
×
4767

4768
        if !proof.IsEmpty() {
12,063✔
4769
                edgeInfo.AuthProof = proof
4,480✔
4770
        }
4,480✔
4771

4772
        edgeInfo.ChannelPoint = wire.OutPoint{}
7,583✔
4773
        if err := ReadOutpoint(r, &edgeInfo.ChannelPoint); err != nil {
7,583✔
4774
                return nil, err
×
4775
        }
×
4776
        if err := binary.Read(r, byteOrder, &edgeInfo.Capacity); err != nil {
7,583✔
4777
                return nil, err
×
4778
        }
×
4779
        if err := binary.Read(r, byteOrder, &edgeInfo.ChannelID); err != nil {
7,583✔
4780
                return nil, err
×
4781
        }
×
4782

4783
        if _, err := io.ReadFull(r, edgeInfo.ChainHash[:]); err != nil {
7,583✔
4784
                return nil, err
×
4785
        }
×
4786

4787
        // We'll try and see if there are any opaque bytes left, if not, then
4788
        // we'll ignore the EOF error and return the edge as is.
4789
        edgeInfo.ExtraOpaqueData, err = wire.ReadVarBytes(
7,583✔
4790
                r, 0, MaxAllowedExtraOpaqueBytes, "blob",
7,583✔
4791
        )
7,583✔
4792
        switch {
7,583✔
4793
        case errors.Is(err, io.ErrUnexpectedEOF):
×
4794
        case errors.Is(err, io.EOF):
×
4795
        case err != nil:
×
4796
                return nil, err
×
4797
        }
4798

4799
        return &edgeInfo, nil
7,583✔
4800
}
4801

4802
func putChanEdgePolicy(edges kvdb.RwBucket, edge *models.ChannelEdgePolicy,
4803
        from, to []byte) error {
2,870✔
4804

2,870✔
4805
        var edgeKey [33 + 8]byte
2,870✔
4806
        copy(edgeKey[:], from)
2,870✔
4807
        byteOrder.PutUint64(edgeKey[33:], edge.ChannelID)
2,870✔
4808

2,870✔
4809
        var b bytes.Buffer
2,870✔
4810
        if err := serializeChanEdgePolicy(&b, edge, to); err != nil {
2,870✔
4811
                return err
×
4812
        }
×
4813

4814
        // Before we write out the new edge, we'll create a new entry in the
4815
        // update index in order to keep it fresh.
4816
        updateUnix := uint64(edge.LastUpdate.Unix())
2,870✔
4817
        var indexKey [8 + 8]byte
2,870✔
4818
        byteOrder.PutUint64(indexKey[:8], updateUnix)
2,870✔
4819
        byteOrder.PutUint64(indexKey[8:], edge.ChannelID)
2,870✔
4820

2,870✔
4821
        updateIndex, err := edges.CreateBucketIfNotExists(edgeUpdateIndexBucket)
2,870✔
4822
        if err != nil {
2,870✔
4823
                return err
×
4824
        }
×
4825

4826
        // If there was already an entry for this edge, then we'll need to
4827
        // delete the old one to ensure we don't leave around any after-images.
4828
        // An unknown policy value does not have a update time recorded, so
4829
        // it also does not need to be removed.
4830
        if edgeBytes := edges.Get(edgeKey[:]); edgeBytes != nil &&
2,870✔
4831
                !bytes.Equal(edgeBytes, unknownPolicy) {
2,900✔
4832

30✔
4833
                // In order to delete the old entry, we'll need to obtain the
30✔
4834
                // *prior* update time in order to delete it. To do this, we'll
30✔
4835
                // need to deserialize the existing policy within the database
30✔
4836
                // (now outdated by the new one), and delete its corresponding
30✔
4837
                // entry within the update index. We'll ignore any
30✔
4838
                // ErrEdgePolicyOptionalFieldNotFound or ErrParsingExtraTLVBytes
30✔
4839
                // errors, as we only need the channel ID and update time to
30✔
4840
                // delete the entry.
30✔
4841
                //
30✔
4842
                // TODO(halseth): get rid of these invalid policies in a
30✔
4843
                // migration.
30✔
4844
                //
30✔
4845
                // NOTE: the above TODO was completed in the SQL migration and
30✔
4846
                // so such edge cases no longer need to be handled there.
30✔
4847
                oldEdgePolicy, err := deserializeChanEdgePolicy(
30✔
4848
                        bytes.NewReader(edgeBytes),
30✔
4849
                )
30✔
4850
                if err != nil &&
30✔
4851
                        !errors.Is(err, ErrEdgePolicyOptionalFieldNotFound) &&
30✔
4852
                        !errors.Is(err, ErrParsingExtraTLVBytes) {
30✔
4853

×
4854
                        return err
×
4855
                }
×
4856

4857
                oldUpdateTime := uint64(oldEdgePolicy.LastUpdate.Unix())
30✔
4858

30✔
4859
                var oldIndexKey [8 + 8]byte
30✔
4860
                byteOrder.PutUint64(oldIndexKey[:8], oldUpdateTime)
30✔
4861
                byteOrder.PutUint64(oldIndexKey[8:], edge.ChannelID)
30✔
4862

30✔
4863
                if err := updateIndex.Delete(oldIndexKey[:]); err != nil {
30✔
4864
                        return err
×
4865
                }
×
4866
        }
4867

4868
        if err := updateIndex.Put(indexKey[:], nil); err != nil {
2,870✔
4869
                return err
×
4870
        }
×
4871

4872
        err = updateEdgePolicyDisabledIndex(
2,870✔
4873
                edges, edge.ChannelID,
2,870✔
4874
                edge.ChannelFlags&lnwire.ChanUpdateDirection > 0,
2,870✔
4875
                edge.IsDisabled(),
2,870✔
4876
        )
2,870✔
4877
        if err != nil {
2,870✔
4878
                return err
×
4879
        }
×
4880

4881
        return edges.Put(edgeKey[:], b.Bytes())
2,870✔
4882
}
4883

4884
// updateEdgePolicyDisabledIndex is used to update the disabledEdgePolicyIndex
4885
// bucket by either add a new disabled ChannelEdgePolicy or remove an existing
4886
// one.
4887
// The direction represents the direction of the edge and disabled is used for
4888
// deciding whether to remove or add an entry to the bucket.
4889
// In general a channel is disabled if two entries for the same chanID exist
4890
// in this bucket.
4891
// Maintaining the bucket this way allows a fast retrieval of disabled
4892
// channels, for example when prune is needed.
4893
func updateEdgePolicyDisabledIndex(edges kvdb.RwBucket, chanID uint64,
4894
        direction bool, disabled bool) error {
3,144✔
4895

3,144✔
4896
        var disabledEdgeKey [8 + 1]byte
3,144✔
4897
        byteOrder.PutUint64(disabledEdgeKey[0:], chanID)
3,144✔
4898
        if direction {
4,712✔
4899
                disabledEdgeKey[8] = 1
1,568✔
4900
        }
1,568✔
4901

4902
        disabledEdgePolicyIndex, err := edges.CreateBucketIfNotExists(
3,144✔
4903
                disabledEdgePolicyBucket,
3,144✔
4904
        )
3,144✔
4905
        if err != nil {
3,144✔
4906
                return err
×
4907
        }
×
4908

4909
        if disabled {
3,170✔
4910
                return disabledEdgePolicyIndex.Put(disabledEdgeKey[:], []byte{})
26✔
4911
        }
26✔
4912

4913
        return disabledEdgePolicyIndex.Delete(disabledEdgeKey[:])
3,118✔
4914
}
4915

4916
// putChanEdgePolicyUnknown marks the edge policy as unknown
4917
// in the edges bucket.
4918
func putChanEdgePolicyUnknown(edges kvdb.RwBucket, channelID uint64,
4919
        from []byte) error {
3,174✔
4920

3,174✔
4921
        var edgeKey [33 + 8]byte
3,174✔
4922
        copy(edgeKey[:], from)
3,174✔
4923
        byteOrder.PutUint64(edgeKey[33:], channelID)
3,174✔
4924

3,174✔
4925
        if edges.Get(edgeKey[:]) != nil {
3,174✔
4926
                return fmt.Errorf("cannot write unknown policy for channel %v "+
×
4927
                        " when there is already a policy present", channelID)
×
4928
        }
×
4929

4930
        return edges.Put(edgeKey[:], unknownPolicy)
3,174✔
4931
}
4932

4933
func fetchChanEdgePolicy(edges kvdb.RBucket, chanID []byte,
4934
        nodePub []byte) (*models.ChannelEdgePolicy, error) {
14,078✔
4935

14,078✔
4936
        var edgeKey [33 + 8]byte
14,078✔
4937
        copy(edgeKey[:], nodePub)
14,078✔
4938
        copy(edgeKey[33:], chanID)
14,078✔
4939

14,078✔
4940
        edgeBytes := edges.Get(edgeKey[:])
14,078✔
4941
        if edgeBytes == nil {
14,078✔
4942
                return nil, ErrEdgeNotFound
×
4943
        }
×
4944

4945
        // No need to deserialize unknown policy.
4946
        if bytes.Equal(edgeBytes, unknownPolicy) {
15,670✔
4947
                return nil, nil
1,592✔
4948
        }
1,592✔
4949

4950
        edgeReader := bytes.NewReader(edgeBytes)
12,486✔
4951

12,486✔
4952
        ep, err := deserializeChanEdgePolicy(edgeReader)
12,486✔
4953
        switch {
12,486✔
4954
        // If the db policy was missing an expected optional field, we return
4955
        // nil as if the policy was unknown.
4956
        case errors.Is(err, ErrEdgePolicyOptionalFieldNotFound):
2✔
4957
                return nil, nil
2✔
4958

4959
        // If the policy contains invalid TLV bytes, we return nil as if
4960
        // the policy was unknown.
4961
        case errors.Is(err, ErrParsingExtraTLVBytes):
×
4962
                return nil, nil
×
4963

4964
        case err != nil:
×
4965
                return nil, err
×
4966
        }
4967

4968
        return ep, nil
12,484✔
4969
}
4970

4971
func fetchChanEdgePolicies(edgeIndex kvdb.RBucket, edges kvdb.RBucket,
4972
        chanID []byte) (*models.ChannelEdgePolicy, *models.ChannelEdgePolicy,
4973
        error) {
3,197✔
4974

3,197✔
4975
        edgeInfo := edgeIndex.Get(chanID)
3,197✔
4976
        if edgeInfo == nil {
3,197✔
4977
                return nil, nil, fmt.Errorf("%w: chanID=%x", ErrEdgeNotFound,
×
4978
                        chanID)
×
4979
        }
×
4980

4981
        // The first node is contained within the first half of the edge
4982
        // information. We only propagate the error here and below if it's
4983
        // something other than edge non-existence.
4984
        node1Pub := edgeInfo[:33]
3,197✔
4985
        edge1, err := fetchChanEdgePolicy(edges, chanID, node1Pub)
3,197✔
4986
        if err != nil {
3,197✔
4987
                return nil, nil, fmt.Errorf("%w: node1Pub=%x", ErrEdgeNotFound,
×
4988
                        node1Pub)
×
4989
        }
×
4990

4991
        // Similarly, the second node is contained within the latter
4992
        // half of the edge information.
4993
        node2Pub := edgeInfo[33:66]
3,197✔
4994
        edge2, err := fetchChanEdgePolicy(edges, chanID, node2Pub)
3,197✔
4995
        if err != nil {
3,197✔
4996
                return nil, nil, fmt.Errorf("%w: node2Pub=%x", ErrEdgeNotFound,
×
4997
                        node2Pub)
×
4998
        }
×
4999

5000
        return edge1, edge2, nil
3,197✔
5001
}
5002

5003
func serializeChanEdgePolicy(w io.Writer, edge *models.ChannelEdgePolicy,
5004
        to []byte) error {
2,872✔
5005

2,872✔
5006
        err := wire.WriteVarBytes(w, 0, edge.SigBytes)
2,872✔
5007
        if err != nil {
2,872✔
5008
                return err
×
5009
        }
×
5010

5011
        if err := binary.Write(w, byteOrder, edge.ChannelID); err != nil {
2,872✔
5012
                return err
×
5013
        }
×
5014

5015
        var scratch [8]byte
2,872✔
5016
        updateUnix := uint64(edge.LastUpdate.Unix())
2,872✔
5017
        byteOrder.PutUint64(scratch[:], updateUnix)
2,872✔
5018
        if _, err := w.Write(scratch[:]); err != nil {
2,872✔
5019
                return err
×
5020
        }
×
5021

5022
        if err := binary.Write(w, byteOrder, edge.MessageFlags); err != nil {
2,872✔
5023
                return err
×
5024
        }
×
5025
        if err := binary.Write(w, byteOrder, edge.ChannelFlags); err != nil {
2,872✔
5026
                return err
×
5027
        }
×
5028
        if err := binary.Write(w, byteOrder, edge.TimeLockDelta); err != nil {
2,872✔
5029
                return err
×
5030
        }
×
5031
        if err := binary.Write(w, byteOrder, uint64(edge.MinHTLC)); err != nil {
2,872✔
5032
                return err
×
5033
        }
×
5034
        err = binary.Write(w, byteOrder, uint64(edge.FeeBaseMSat))
2,872✔
5035
        if err != nil {
2,872✔
5036
                return err
×
5037
        }
×
5038
        err = binary.Write(
2,872✔
5039
                w, byteOrder, uint64(edge.FeeProportionalMillionths),
2,872✔
5040
        )
2,872✔
5041
        if err != nil {
2,872✔
5042
                return err
×
5043
        }
×
5044

5045
        if _, err := w.Write(to); err != nil {
2,872✔
5046
                return err
×
5047
        }
×
5048

5049
        // If the max_htlc field is present, we write it. To be compatible with
5050
        // older versions that wasn't aware of this field, we write it as part
5051
        // of the opaque data.
5052
        // TODO(halseth): clean up when moving to TLV.
5053
        var opaqueBuf bytes.Buffer
2,872✔
5054
        if edge.MessageFlags.HasMaxHtlc() {
5,360✔
5055
                err := binary.Write(&opaqueBuf, byteOrder, uint64(edge.MaxHTLC))
2,488✔
5056
                if err != nil {
2,488✔
5057
                        return err
×
5058
                }
×
5059
        }
5060

5061
        if len(edge.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
2,872✔
5062
                return ErrTooManyExtraOpaqueBytes(len(edge.ExtraOpaqueData))
×
5063
        }
×
5064
        if _, err := opaqueBuf.Write(edge.ExtraOpaqueData); err != nil {
2,872✔
5065
                return err
×
5066
        }
×
5067

5068
        if err := wire.WriteVarBytes(w, 0, opaqueBuf.Bytes()); err != nil {
2,872✔
5069
                return err
×
5070
        }
×
5071

5072
        return nil
2,872✔
5073
}
5074

5075
func deserializeChanEdgePolicy(r io.Reader) (*models.ChannelEdgePolicy, error) {
12,517✔
5076
        // Deserialize the policy. Note that in case an optional field is not
12,517✔
5077
        // found or if the edge has invalid TLV data, then both an error and a
12,517✔
5078
        // populated policy object are returned so that the caller can decide
12,517✔
5079
        // if it still wants to use the edge or not.
12,517✔
5080
        edge, err := deserializeChanEdgePolicyRaw(r)
12,517✔
5081
        if err != nil &&
12,517✔
5082
                !errors.Is(err, ErrEdgePolicyOptionalFieldNotFound) &&
12,517✔
5083
                !errors.Is(err, ErrParsingExtraTLVBytes) {
12,517✔
5084

×
5085
                return nil, err
×
5086
        }
×
5087

5088
        return edge, err
12,517✔
5089
}
5090

5091
func deserializeChanEdgePolicyRaw(r io.Reader) (*models.ChannelEdgePolicy,
5092
        error) {
13,530✔
5093

13,530✔
5094
        edge := &models.ChannelEdgePolicy{}
13,530✔
5095

13,530✔
5096
        var err error
13,530✔
5097
        edge.SigBytes, err = wire.ReadVarBytes(r, 0, 80, "sig")
13,530✔
5098
        if err != nil {
13,530✔
5099
                return nil, err
×
5100
        }
×
5101

5102
        if err := binary.Read(r, byteOrder, &edge.ChannelID); err != nil {
13,530✔
5103
                return nil, err
×
5104
        }
×
5105

5106
        var scratch [8]byte
13,530✔
5107
        if _, err := r.Read(scratch[:]); err != nil {
13,530✔
5108
                return nil, err
×
5109
        }
×
5110
        unix := int64(byteOrder.Uint64(scratch[:]))
13,530✔
5111
        edge.LastUpdate = time.Unix(unix, 0)
13,530✔
5112

13,530✔
5113
        if err := binary.Read(r, byteOrder, &edge.MessageFlags); err != nil {
13,530✔
5114
                return nil, err
×
5115
        }
×
5116
        if err := binary.Read(r, byteOrder, &edge.ChannelFlags); err != nil {
13,530✔
5117
                return nil, err
×
5118
        }
×
5119
        if err := binary.Read(r, byteOrder, &edge.TimeLockDelta); err != nil {
13,530✔
5120
                return nil, err
×
5121
        }
×
5122

5123
        var n uint64
13,530✔
5124
        if err := binary.Read(r, byteOrder, &n); err != nil {
13,530✔
5125
                return nil, err
×
5126
        }
×
5127
        edge.MinHTLC = lnwire.MilliSatoshi(n)
13,530✔
5128

13,530✔
5129
        if err := binary.Read(r, byteOrder, &n); err != nil {
13,530✔
5130
                return nil, err
×
5131
        }
×
5132
        edge.FeeBaseMSat = lnwire.MilliSatoshi(n)
13,530✔
5133

13,530✔
5134
        if err := binary.Read(r, byteOrder, &n); err != nil {
13,530✔
5135
                return nil, err
×
5136
        }
×
5137
        edge.FeeProportionalMillionths = lnwire.MilliSatoshi(n)
13,530✔
5138

13,530✔
5139
        if _, err := r.Read(edge.ToNode[:]); err != nil {
13,530✔
5140
                return nil, err
×
5141
        }
×
5142

5143
        // We'll try and see if there are any opaque bytes left, if not, then
5144
        // we'll ignore the EOF error and return the edge as is.
5145
        edge.ExtraOpaqueData, err = wire.ReadVarBytes(
13,530✔
5146
                r, 0, MaxAllowedExtraOpaqueBytes, "blob",
13,530✔
5147
        )
13,530✔
5148
        switch {
13,530✔
5149
        case errors.Is(err, io.ErrUnexpectedEOF):
×
5150
        case errors.Is(err, io.EOF):
4✔
5151
        case err != nil:
×
5152
                return nil, err
×
5153
        }
5154

5155
        // See if optional fields are present.
5156
        if edge.MessageFlags.HasMaxHtlc() {
26,105✔
5157
                // The max_htlc field should be at the beginning of the opaque
12,575✔
5158
                // bytes.
12,575✔
5159
                opq := edge.ExtraOpaqueData
12,575✔
5160

12,575✔
5161
                // If the max_htlc field is not present, it might be old data
12,575✔
5162
                // stored before this field was validated. We'll return the
12,575✔
5163
                // edge along with an error.
12,575✔
5164
                if len(opq) < 8 {
12,579✔
5165
                        return edge, ErrEdgePolicyOptionalFieldNotFound
4✔
5166
                }
4✔
5167

5168
                maxHtlc := byteOrder.Uint64(opq[:8])
12,571✔
5169
                edge.MaxHTLC = lnwire.MilliSatoshi(maxHtlc)
12,571✔
5170

12,571✔
5171
                // Exclude the parsed field from the rest of the opaque data.
12,571✔
5172
                edge.ExtraOpaqueData = opq[8:]
12,571✔
5173
        }
5174

5175
        // Attempt to extract the inbound fee from the opaque data. If we fail
5176
        // to parse the TLV here, we return an error we also return the edge
5177
        // so that the caller can still use it. This is for backwards
5178
        // compatibility in case we have already persisted some policies that
5179
        // have invalid TLV data.
5180
        var inboundFee lnwire.Fee
13,526✔
5181
        typeMap, err := edge.ExtraOpaqueData.ExtractRecords(&inboundFee)
13,526✔
5182
        if err != nil {
13,526✔
5183
                return edge, fmt.Errorf("%w: %w", ErrParsingExtraTLVBytes, err)
×
5184
        }
×
5185

5186
        val, ok := typeMap[lnwire.FeeRecordType]
13,526✔
5187
        if ok && val == nil {
15,219✔
5188
                edge.InboundFee = fn.Some(inboundFee)
1,693✔
5189
        }
1,693✔
5190

5191
        return edge, nil
13,526✔
5192
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc