• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 16171981582

09 Jul 2025 02:22PM UTC coverage: 55.32% (-2.3%) from 57.62%
16171981582

Pull #10059

github

web-flow
Merge b62793ada into ea32aac77
Pull Request #10059: .gemini: add styleguide.md

108496 of 196123 relevant lines covered (55.32%)

22301.4 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

77.35
/graph/db/kv_store.go
1
package graphdb
2

3
import (
4
        "bytes"
5
        "context"
6
        "crypto/sha256"
7
        "encoding/binary"
8
        "errors"
9
        "fmt"
10
        "io"
11
        "math"
12
        "net"
13
        "sort"
14
        "sync"
15
        "time"
16

17
        "github.com/btcsuite/btcd/btcec/v2"
18
        "github.com/btcsuite/btcd/chaincfg/chainhash"
19
        "github.com/btcsuite/btcd/txscript"
20
        "github.com/btcsuite/btcd/wire"
21
        "github.com/btcsuite/btcwallet/walletdb"
22
        "github.com/lightningnetwork/lnd/aliasmgr"
23
        "github.com/lightningnetwork/lnd/batch"
24
        "github.com/lightningnetwork/lnd/fn/v2"
25
        "github.com/lightningnetwork/lnd/graph/db/models"
26
        "github.com/lightningnetwork/lnd/input"
27
        "github.com/lightningnetwork/lnd/kvdb"
28
        "github.com/lightningnetwork/lnd/lnwire"
29
        "github.com/lightningnetwork/lnd/routing/route"
30
)
31

32
var (
33
        // nodeBucket is a bucket which houses all the vertices or nodes within
34
        // the channel graph. This bucket has a single-sub bucket which adds an
35
        // additional index from pubkey -> alias. Within the top-level of this
36
        // bucket, the key space maps a node's compressed public key to the
37
        // serialized information for that node. Additionally, there's a
38
        // special key "source" which stores the pubkey of the source node. The
39
        // source node is used as the starting point for all graph/queries and
40
        // traversals. The graph is formed as a star-graph with the source node
41
        // at the center.
42
        //
43
        // maps: pubKey -> nodeInfo
44
        // maps: source -> selfPubKey
45
        nodeBucket = []byte("graph-node")
46

47
        // nodeUpdateIndexBucket is a sub-bucket of the nodeBucket. This bucket
48
        // will be used to quickly look up the "freshness" of a node's last
49
        // update to the network. The bucket only contains keys, and no values,
50
        // it's mapping:
51
        //
52
        // maps: updateTime || nodeID -> nil
53
        nodeUpdateIndexBucket = []byte("graph-node-update-index")
54

55
        // sourceKey is a special key that resides within the nodeBucket. The
56
        // sourceKey maps a key to the public key of the "self node".
57
        sourceKey = []byte("source")
58

59
        // aliasIndexBucket is a sub-bucket that's nested within the main
60
        // nodeBucket. This bucket maps the public key of a node to its
61
        // current alias. This bucket is provided as it can be used within a
62
        // future UI layer to add an additional degree of confirmation.
63
        aliasIndexBucket = []byte("alias")
64

65
        // edgeBucket is a bucket which houses all of the edge or channel
66
        // information within the channel graph. This bucket essentially acts
67
        // as an adjacency list, which in conjunction with a range scan, can be
68
        // used to iterate over all the incoming and outgoing edges for a
69
        // particular node. Key in the bucket use a prefix scheme which leads
70
        // with the node's public key and sends with the compact edge ID.
71
        // For each chanID, there will be two entries within the bucket, as the
72
        // graph is directed: nodes may have different policies w.r.t to fees
73
        // for their respective directions.
74
        //
75
        // maps: pubKey || chanID -> channel edge policy for node
76
        edgeBucket = []byte("graph-edge")
77

78
        // unknownPolicy is represented as an empty slice. It is
79
        // used as the value in edgeBucket for unknown channel edge policies.
80
        // Unknown policies are still stored in the database to enable efficient
81
        // lookup of incoming channel edges.
82
        unknownPolicy = []byte{}
83

84
        // chanStart is an array of all zero bytes which is used to perform
85
        // range scans within the edgeBucket to obtain all of the outgoing
86
        // edges for a particular node.
87
        chanStart [8]byte
88

89
        // edgeIndexBucket is an index which can be used to iterate all edges
90
        // in the bucket, grouping them according to their in/out nodes.
91
        // Additionally, the items in this bucket also contain the complete
92
        // edge information for a channel. The edge information includes the
93
        // capacity of the channel, the nodes that made the channel, etc. This
94
        // bucket resides within the edgeBucket above. Creation of an edge
95
        // proceeds in two phases: first the edge is added to the edge index,
96
        // afterwards the edgeBucket can be updated with the latest details of
97
        // the edge as they are announced on the network.
98
        //
99
        // maps: chanID -> pubKey1 || pubKey2 || restofEdgeInfo
100
        edgeIndexBucket = []byte("edge-index")
101

102
        // edgeUpdateIndexBucket is a sub-bucket of the main edgeBucket. This
103
        // bucket contains an index which allows us to gauge the "freshness" of
104
        // a channel's last updates.
105
        //
106
        // maps: updateTime || chanID -> nil
107
        edgeUpdateIndexBucket = []byte("edge-update-index")
108

109
        // channelPointBucket maps a channel's full outpoint (txid:index) to
110
        // its short 8-byte channel ID. This bucket resides within the
111
        // edgeBucket above, and can be used to quickly remove an edge due to
112
        // the outpoint being spent, or to query for existence of a channel.
113
        //
114
        // maps: outPoint -> chanID
115
        channelPointBucket = []byte("chan-index")
116

117
        // zombieBucket is a sub-bucket of the main edgeBucket bucket
118
        // responsible for maintaining an index of zombie channels. Each entry
119
        // exists within the bucket as follows:
120
        //
121
        // maps: chanID -> pubKey1 || pubKey2
122
        //
123
        // The chanID represents the channel ID of the edge that is marked as a
124
        // zombie and is used as the key, which maps to the public keys of the
125
        // edge's participants.
126
        zombieBucket = []byte("zombie-index")
127

128
        // disabledEdgePolicyBucket is a sub-bucket of the main edgeBucket
129
        // bucket responsible for maintaining an index of disabled edge
130
        // policies. Each entry exists within the bucket as follows:
131
        //
132
        // maps: <chanID><direction> -> []byte{}
133
        //
134
        // The chanID represents the channel ID of the edge and the direction is
135
        // one byte representing the direction of the edge. The main purpose of
136
        // this index is to allow pruning disabled channels in a fast way
137
        // without the need to iterate all over the graph.
138
        disabledEdgePolicyBucket = []byte("disabled-edge-policy-index")
139

140
        // graphMetaBucket is a top-level bucket which stores various meta-deta
141
        // related to the on-disk channel graph. Data stored in this bucket
142
        // includes the block to which the graph has been synced to, the total
143
        // number of channels, etc.
144
        graphMetaBucket = []byte("graph-meta")
145

146
        // pruneLogBucket is a bucket within the graphMetaBucket that stores
147
        // a mapping from the block height to the hash for the blocks used to
148
        // prune the graph.
149
        // Once a new block is discovered, any channels that have been closed
150
        // (by spending the outpoint) can safely be removed from the graph, and
151
        // the block is added to the prune log. We need to keep such a log for
152
        // the case where a reorg happens, and we must "rewind" the state of the
153
        // graph by removing channels that were previously confirmed. In such a
154
        // case we'll remove all entries from the prune log with a block height
155
        // that no longer exists.
156
        pruneLogBucket = []byte("prune-log")
157

158
        // closedScidBucket is a top-level bucket that stores scids for
159
        // channels that we know to be closed. This is used so that we don't
160
        // need to perform expensive validation checks if we receive a channel
161
        // announcement for the channel again.
162
        //
163
        // maps: scid -> []byte{}
164
        closedScidBucket = []byte("closed-scid")
165
)
166

167
const (
168
        // MaxAllowedExtraOpaqueBytes is the largest amount of opaque bytes that
169
        // we'll permit to be written to disk. We limit this as otherwise, it
170
        // would be possible for a node to create a ton of updates and slowly
171
        // fill our disk, and also waste bandwidth due to relaying.
172
        MaxAllowedExtraOpaqueBytes = 10000
173
)
174

175
// KVStore is a persistent, on-disk graph representation of the Lightning
176
// Network. This struct can be used to implement path finding algorithms on top
177
// of, and also to update a node's view based on information received from the
178
// p2p network. Internally, the graph is stored using a modified adjacency list
179
// representation with some added object interaction possible with each
180
// serialized edge/node. The graph is stored is directed, meaning that are two
181
// edges stored for each channel: an inbound/outbound edge for each node pair.
182
// Nodes, edges, and edge information can all be added to the graph
183
// independently. Edge removal results in the deletion of all edge information
184
// for that edge.
185
type KVStore struct {
186
        db kvdb.Backend
187

188
        // cacheMu guards all caches (rejectCache and chanCache). If
189
        // this mutex will be acquired at the same time as the DB mutex then
190
        // the cacheMu MUST be acquired first to prevent deadlock.
191
        cacheMu     sync.RWMutex
192
        rejectCache *rejectCache
193
        chanCache   *channelCache
194

195
        chanScheduler batch.Scheduler[kvdb.RwTx]
196
        nodeScheduler batch.Scheduler[kvdb.RwTx]
197
}
198

199
// A compile-time assertion to ensure that the KVStore struct implements the
200
// V1Store interface.
201
var _ V1Store = (*KVStore)(nil)
202

203
// NewKVStore allocates a new KVStore backed by a DB instance. The
204
// returned instance has its own unique reject cache and channel cache.
205
func NewKVStore(db kvdb.Backend, options ...StoreOptionModifier) (*KVStore,
206
        error) {
170✔
207

170✔
208
        opts := DefaultOptions()
170✔
209
        for _, o := range options {
170✔
210
                o(opts)
×
211
        }
×
212

213
        if !opts.NoMigration {
340✔
214
                if err := initKVStore(db); err != nil {
170✔
215
                        return nil, err
×
216
                }
×
217
        }
218

219
        g := &KVStore{
170✔
220
                db:          db,
170✔
221
                rejectCache: newRejectCache(opts.RejectCacheSize),
170✔
222
                chanCache:   newChannelCache(opts.ChannelCacheSize),
170✔
223
        }
170✔
224
        g.chanScheduler = batch.NewTimeScheduler(
170✔
225
                batch.NewBoltBackend[kvdb.RwTx](db), &g.cacheMu,
170✔
226
                opts.BatchCommitInterval,
170✔
227
        )
170✔
228
        g.nodeScheduler = batch.NewTimeScheduler(
170✔
229
                batch.NewBoltBackend[kvdb.RwTx](db), nil,
170✔
230
                opts.BatchCommitInterval,
170✔
231
        )
170✔
232

170✔
233
        return g, nil
170✔
234
}
235

236
// channelMapKey is the key structure used for storing channel edge policies.
237
type channelMapKey struct {
238
        nodeKey route.Vertex
239
        chanID  [8]byte
240
}
241

242
// String returns a human-readable representation of the key.
243
func (c channelMapKey) String() string {
×
244
        return fmt.Sprintf("node=%v, chanID=%x", c.nodeKey, c.chanID)
×
245
}
×
246

247
// getChannelMap loads all channel edge policies from the database and stores
248
// them in a map.
249
func getChannelMap(edges kvdb.RBucket) (
250
        map[channelMapKey]*models.ChannelEdgePolicy, error) {
145✔
251

145✔
252
        // Create a map to store all channel edge policies.
145✔
253
        channelMap := make(map[channelMapKey]*models.ChannelEdgePolicy)
145✔
254

145✔
255
        err := kvdb.ForAll(edges, func(k, edgeBytes []byte) error {
1,729✔
256
                // Skip embedded buckets.
1,584✔
257
                if bytes.Equal(k, edgeIndexBucket) ||
1,584✔
258
                        bytes.Equal(k, edgeUpdateIndexBucket) ||
1,584✔
259
                        bytes.Equal(k, zombieBucket) ||
1,584✔
260
                        bytes.Equal(k, disabledEdgePolicyBucket) ||
1,584✔
261
                        bytes.Equal(k, channelPointBucket) {
2,172✔
262

588✔
263
                        return nil
588✔
264
                }
588✔
265

266
                // Validate key length.
267
                if len(k) != 33+8 {
996✔
268
                        return fmt.Errorf("invalid edge key %x encountered", k)
×
269
                }
×
270

271
                var key channelMapKey
996✔
272
                copy(key.nodeKey[:], k[:33])
996✔
273
                copy(key.chanID[:], k[33:])
996✔
274

996✔
275
                // No need to deserialize unknown policy.
996✔
276
                if bytes.Equal(edgeBytes, unknownPolicy) {
996✔
277
                        return nil
×
278
                }
×
279

280
                edgeReader := bytes.NewReader(edgeBytes)
996✔
281
                edge, err := deserializeChanEdgePolicyRaw(
996✔
282
                        edgeReader,
996✔
283
                )
996✔
284

996✔
285
                switch {
996✔
286
                // If the db policy was missing an expected optional field, we
287
                // return nil as if the policy was unknown.
288
                case errors.Is(err, ErrEdgePolicyOptionalFieldNotFound):
×
289
                        return nil
×
290

291
                // We don't want a single policy with bad TLV data to stop us
292
                // from loading the rest of the data, so we just skip this
293
                // policy. This is for backwards compatibility since we did not
294
                // use to validate TLV data in the past before persisting it.
295
                case errors.Is(err, ErrParsingExtraTLVBytes):
×
296
                        return nil
×
297

298
                case err != nil:
×
299
                        return err
×
300
                }
301

302
                channelMap[key] = edge
996✔
303

996✔
304
                return nil
996✔
305
        })
306
        if err != nil {
145✔
307
                return nil, err
×
308
        }
×
309

310
        return channelMap, nil
145✔
311
}
312

313
var graphTopLevelBuckets = [][]byte{
314
        nodeBucket,
315
        edgeBucket,
316
        graphMetaBucket,
317
        closedScidBucket,
318
}
319

320
// createChannelDB creates and initializes a fresh version of  In
321
// the case that the target path has not yet been created or doesn't yet exist,
322
// then the path is created. Additionally, all required top-level buckets used
323
// within the database are created.
324
func initKVStore(db kvdb.Backend) error {
170✔
325
        err := kvdb.Update(db, func(tx kvdb.RwTx) error {
340✔
326
                for _, tlb := range graphTopLevelBuckets {
850✔
327
                        if _, err := tx.CreateTopLevelBucket(tlb); err != nil {
680✔
328
                                return err
×
329
                        }
×
330
                }
331

332
                nodes := tx.ReadWriteBucket(nodeBucket)
170✔
333
                _, err := nodes.CreateBucketIfNotExists(aliasIndexBucket)
170✔
334
                if err != nil {
170✔
335
                        return err
×
336
                }
×
337
                _, err = nodes.CreateBucketIfNotExists(nodeUpdateIndexBucket)
170✔
338
                if err != nil {
170✔
339
                        return err
×
340
                }
×
341

342
                edges := tx.ReadWriteBucket(edgeBucket)
170✔
343
                _, err = edges.CreateBucketIfNotExists(edgeIndexBucket)
170✔
344
                if err != nil {
170✔
345
                        return err
×
346
                }
×
347
                _, err = edges.CreateBucketIfNotExists(edgeUpdateIndexBucket)
170✔
348
                if err != nil {
170✔
349
                        return err
×
350
                }
×
351
                _, err = edges.CreateBucketIfNotExists(channelPointBucket)
170✔
352
                if err != nil {
170✔
353
                        return err
×
354
                }
×
355
                _, err = edges.CreateBucketIfNotExists(zombieBucket)
170✔
356
                if err != nil {
170✔
357
                        return err
×
358
                }
×
359

360
                graphMeta := tx.ReadWriteBucket(graphMetaBucket)
170✔
361
                _, err = graphMeta.CreateBucketIfNotExists(pruneLogBucket)
170✔
362

170✔
363
                return err
170✔
364
        }, func() {})
170✔
365
        if err != nil {
170✔
366
                return fmt.Errorf("unable to create new channel graph: %w", err)
×
367
        }
×
368

369
        return nil
170✔
370
}
371

372
// AddrsForNode returns all known addresses for the target node public key that
373
// the graph DB is aware of. The returned boolean indicates if the given node is
374
// unknown to the graph DB or not.
375
//
376
// NOTE: this is part of the channeldb.AddrSource interface.
377
func (c *KVStore) AddrsForNode(ctx context.Context,
378
        nodePub *btcec.PublicKey) (bool, []net.Addr, error) {
3✔
379

3✔
380
        pubKey, err := route.NewVertexFromBytes(nodePub.SerializeCompressed())
3✔
381
        if err != nil {
3✔
382
                return false, nil, err
×
383
        }
×
384

385
        node, err := c.FetchLightningNode(ctx, pubKey)
3✔
386
        // We don't consider it an error if the graph is unaware of the node.
3✔
387
        switch {
3✔
388
        case err != nil && !errors.Is(err, ErrGraphNodeNotFound):
×
389
                return false, nil, err
×
390

391
        case errors.Is(err, ErrGraphNodeNotFound):
1✔
392
                return false, nil, nil
1✔
393
        }
394

395
        return true, node.Addresses, nil
2✔
396
}
397

398
// ForEachChannel iterates through all the channel edges stored within the
399
// graph and invokes the passed callback for each edge. The callback takes two
400
// edges as since this is a directed graph, both the in/out edges are visited.
401
// If the callback returns an error, then the transaction is aborted and the
402
// iteration stops early.
403
//
404
// NOTE: If an edge can't be found, or wasn't advertised, then a nil pointer
405
// for that particular channel edge routing policy will be passed into the
406
// callback.
407
func (c *KVStore) ForEachChannel(cb func(*models.ChannelEdgeInfo,
408
        *models.ChannelEdgePolicy, *models.ChannelEdgePolicy) error) error {
7✔
409

7✔
410
        return forEachChannel(c.db, cb)
7✔
411
}
7✔
412

413
// forEachChannel iterates through all the channel edges stored within the
414
// graph and invokes the passed callback for each edge. The callback takes two
415
// edges as since this is a directed graph, both the in/out edges are visited.
416
// If the callback returns an error, then the transaction is aborted and the
417
// iteration stops early.
418
//
419
// NOTE: If an edge can't be found, or wasn't advertised, then a nil pointer
420
// for that particular channel edge routing policy will be passed into the
421
// callback.
422
func forEachChannel(db kvdb.Backend, cb func(*models.ChannelEdgeInfo,
423
        *models.ChannelEdgePolicy, *models.ChannelEdgePolicy) error) error {
7✔
424

7✔
425
        return db.View(func(tx kvdb.RTx) error {
14✔
426
                edges := tx.ReadBucket(edgeBucket)
7✔
427
                if edges == nil {
7✔
428
                        return ErrGraphNoEdgesFound
×
429
                }
×
430

431
                // First, load all edges in memory indexed by node and channel
432
                // id.
433
                channelMap, err := getChannelMap(edges)
7✔
434
                if err != nil {
7✔
435
                        return err
×
436
                }
×
437

438
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
7✔
439
                if edgeIndex == nil {
7✔
440
                        return ErrGraphNoEdgesFound
×
441
                }
×
442

443
                // Load edge index, recombine each channel with the policies
444
                // loaded above and invoke the callback.
445
                return kvdb.ForAll(
7✔
446
                        edgeIndex, func(k, edgeInfoBytes []byte) error {
109✔
447
                                var chanID [8]byte
102✔
448
                                copy(chanID[:], k)
102✔
449

102✔
450
                                edgeInfoReader := bytes.NewReader(edgeInfoBytes)
102✔
451
                                info, err := deserializeChanEdgeInfo(
102✔
452
                                        edgeInfoReader,
102✔
453
                                )
102✔
454
                                if err != nil {
102✔
455
                                        return err
×
456
                                }
×
457

458
                                policy1 := channelMap[channelMapKey{
102✔
459
                                        nodeKey: info.NodeKey1Bytes,
102✔
460
                                        chanID:  chanID,
102✔
461
                                }]
102✔
462

102✔
463
                                policy2 := channelMap[channelMapKey{
102✔
464
                                        nodeKey: info.NodeKey2Bytes,
102✔
465
                                        chanID:  chanID,
102✔
466
                                }]
102✔
467

102✔
468
                                return cb(&info, policy1, policy2)
102✔
469
                        },
470
                )
471
        }, func() {})
7✔
472
}
473

474
// ForEachChannelCacheable iterates through all the channel edges stored within
475
// the graph and invokes the passed callback for each edge. The callback takes
476
// two edges as since this is a directed graph, both the in/out edges are
477
// visited. If the callback returns an error, then the transaction is aborted
478
// and the iteration stops early.
479
//
480
// NOTE: If an edge can't be found, or wasn't advertised, then a nil pointer
481
// for that particular channel edge routing policy will be passed into the
482
// callback.
483
//
484
// NOTE: this method is like ForEachChannel but fetches only the data required
485
// for the graph cache.
486
func (c *KVStore) ForEachChannelCacheable(cb func(*models.CachedEdgeInfo,
487
        *models.CachedEdgePolicy, *models.CachedEdgePolicy) error) error {
138✔
488

138✔
489
        return c.db.View(func(tx kvdb.RTx) error {
276✔
490
                edges := tx.ReadBucket(edgeBucket)
138✔
491
                if edges == nil {
138✔
492
                        return ErrGraphNoEdgesFound
×
493
                }
×
494

495
                // First, load all edges in memory indexed by node and channel
496
                // id.
497
                channelMap, err := getChannelMap(edges)
138✔
498
                if err != nil {
138✔
499
                        return err
×
500
                }
×
501

502
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
138✔
503
                if edgeIndex == nil {
138✔
504
                        return ErrGraphNoEdgesFound
×
505
                }
×
506

507
                // Load edge index, recombine each channel with the policies
508
                // loaded above and invoke the callback.
509
                return kvdb.ForAll(
138✔
510
                        edgeIndex, func(k, edgeInfoBytes []byte) error {
534✔
511
                                var chanID [8]byte
396✔
512
                                copy(chanID[:], k)
396✔
513

396✔
514
                                edgeInfoReader := bytes.NewReader(edgeInfoBytes)
396✔
515
                                info, err := deserializeChanEdgeInfo(
396✔
516
                                        edgeInfoReader,
396✔
517
                                )
396✔
518
                                if err != nil {
396✔
519
                                        return err
×
520
                                }
×
521

522
                                key1 := channelMapKey{
396✔
523
                                        nodeKey: info.NodeKey1Bytes,
396✔
524
                                        chanID:  chanID,
396✔
525
                                }
396✔
526
                                policy1 := channelMap[key1]
396✔
527

396✔
528
                                key2 := channelMapKey{
396✔
529
                                        nodeKey: info.NodeKey2Bytes,
396✔
530
                                        chanID:  chanID,
396✔
531
                                }
396✔
532
                                policy2 := channelMap[key2]
396✔
533

396✔
534
                                // We now create the cached edge policies, but
396✔
535
                                // only when the above policies are found in the
396✔
536
                                // `channelMap`.
396✔
537
                                var (
396✔
538
                                        cachedPolicy1 *models.CachedEdgePolicy
396✔
539
                                        cachedPolicy2 *models.CachedEdgePolicy
396✔
540
                                )
396✔
541

396✔
542
                                if policy1 != nil {
792✔
543
                                        cachedPolicy1 = models.NewCachedPolicy(
396✔
544
                                                policy1,
396✔
545
                                        )
396✔
546
                                }
396✔
547

548
                                if policy2 != nil {
792✔
549
                                        cachedPolicy2 = models.NewCachedPolicy(
396✔
550
                                                policy2,
396✔
551
                                        )
396✔
552
                                }
396✔
553

554
                                return cb(
396✔
555
                                        models.NewCachedEdge(&info),
396✔
556
                                        cachedPolicy1, cachedPolicy2,
396✔
557
                                )
396✔
558
                        },
559
                )
560
        }, func() {})
138✔
561
}
562

563
// forEachNodeDirectedChannel iterates through all channels of a given node,
564
// executing the passed callback on the directed edge representing the channel
565
// and its incoming policy. If the callback returns an error, then the iteration
566
// is halted with the error propagated back up to the caller. An optional read
567
// transaction may be provided. If none is provided, a new one will be created.
568
//
569
// Unknown policies are passed into the callback as nil values.
570
func (c *KVStore) forEachNodeDirectedChannel(tx kvdb.RTx,
571
        node route.Vertex, cb func(channel *DirectedChannel) error) error {
263✔
572

263✔
573
        // Fallback that uses the database.
263✔
574
        toNodeCallback := func() route.Vertex {
395✔
575
                return node
132✔
576
        }
132✔
577
        toNodeFeatures, err := c.fetchNodeFeatures(tx, node)
263✔
578
        if err != nil {
263✔
579
                return err
×
580
        }
×
581

582
        dbCallback := func(tx kvdb.RTx, e *models.ChannelEdgeInfo, p1,
263✔
583
                p2 *models.ChannelEdgePolicy) error {
950✔
584

687✔
585
                var cachedInPolicy *models.CachedEdgePolicy
687✔
586
                if p2 != nil {
1,371✔
587
                        cachedInPolicy = models.NewCachedPolicy(p2)
684✔
588
                        cachedInPolicy.ToNodePubKey = toNodeCallback
684✔
589
                        cachedInPolicy.ToNodeFeatures = toNodeFeatures
684✔
590
                }
684✔
591

592
                directedChannel := &DirectedChannel{
687✔
593
                        ChannelID:    e.ChannelID,
687✔
594
                        IsNode1:      node == e.NodeKey1Bytes,
687✔
595
                        OtherNode:    e.NodeKey2Bytes,
687✔
596
                        Capacity:     e.Capacity,
687✔
597
                        OutPolicySet: p1 != nil,
687✔
598
                        InPolicy:     cachedInPolicy,
687✔
599
                }
687✔
600

687✔
601
                if p1 != nil {
1,373✔
602
                        p1.InboundFee.WhenSome(func(fee lnwire.Fee) {
1,023✔
603
                                directedChannel.InboundFee = fee
337✔
604
                        })
337✔
605
                }
606

607
                if node == e.NodeKey2Bytes {
1,031✔
608
                        directedChannel.OtherNode = e.NodeKey1Bytes
344✔
609
                }
344✔
610

611
                return cb(directedChannel)
687✔
612
        }
613

614
        return nodeTraversal(tx, node[:], c.db, dbCallback)
263✔
615
}
616

617
// fetchNodeFeatures returns the features of a given node. If no features are
618
// known for the node, an empty feature vector is returned. An optional read
619
// transaction may be provided. If none is provided, a new one will be created.
620
func (c *KVStore) fetchNodeFeatures(tx kvdb.RTx,
621
        node route.Vertex) (*lnwire.FeatureVector, error) {
708✔
622

708✔
623
        // Fallback that uses the database.
708✔
624
        targetNode, err := c.FetchLightningNodeTx(tx, node)
708✔
625
        switch {
708✔
626
        // If the node exists and has features, return them directly.
627
        case err == nil:
697✔
628
                return targetNode.Features, nil
697✔
629

630
        // If we couldn't find a node announcement, populate a blank feature
631
        // vector.
632
        case errors.Is(err, ErrGraphNodeNotFound):
11✔
633
                return lnwire.EmptyFeatureVector(), nil
11✔
634

635
        // Otherwise, bubble the error up.
636
        default:
×
637
                return nil, err
×
638
        }
639
}
640

641
// ForEachNodeDirectedChannel iterates through all channels of a given node,
642
// executing the passed callback on the directed edge representing the channel
643
// and its incoming policy. If the callback returns an error, then the iteration
644
// is halted with the error propagated back up to the caller.
645
//
646
// Unknown policies are passed into the callback as nil values.
647
//
648
// NOTE: this is part of the graphdb.NodeTraverser interface.
649
func (c *KVStore) ForEachNodeDirectedChannel(nodePub route.Vertex,
650
        cb func(channel *DirectedChannel) error) error {
23✔
651

23✔
652
        return c.forEachNodeDirectedChannel(nil, nodePub, cb)
23✔
653
}
23✔
654

655
// FetchNodeFeatures returns the features of the given node. If no features are
656
// known for the node, an empty feature vector is returned.
657
//
658
// NOTE: this is part of the graphdb.NodeTraverser interface.
659
func (c *KVStore) FetchNodeFeatures(nodePub route.Vertex) (
660
        *lnwire.FeatureVector, error) {
1✔
661

1✔
662
        return c.fetchNodeFeatures(nil, nodePub)
1✔
663
}
1✔
664

665
// ForEachNodeCached is similar to forEachNode, but it returns DirectedChannel
666
// data to the call-back.
667
//
668
// NOTE: The callback contents MUST not be modified.
669
func (c *KVStore) ForEachNodeCached(cb func(node route.Vertex,
670
        chans map[uint64]*DirectedChannel) error) error {
1✔
671

1✔
672
        // Otherwise call back to a version that uses the database directly.
1✔
673
        // We'll iterate over each node, then the set of channels for each
1✔
674
        // node, and construct a similar callback functiopn signature as the
1✔
675
        // main funcotin expects.
1✔
676
        return forEachNode(c.db, func(tx kvdb.RTx,
1✔
677
                node *models.LightningNode) error {
21✔
678

20✔
679
                channels := make(map[uint64]*DirectedChannel)
20✔
680

20✔
681
                err := c.forEachNodeChannelTx(tx, node.PubKeyBytes,
20✔
682
                        func(tx kvdb.RTx, e *models.ChannelEdgeInfo,
20✔
683
                                p1 *models.ChannelEdgePolicy,
20✔
684
                                p2 *models.ChannelEdgePolicy) error {
210✔
685

190✔
686
                                toNodeCallback := func() route.Vertex {
190✔
687
                                        return node.PubKeyBytes
×
688
                                }
×
689
                                toNodeFeatures, err := c.fetchNodeFeatures(
190✔
690
                                        tx, node.PubKeyBytes,
190✔
691
                                )
190✔
692
                                if err != nil {
190✔
693
                                        return err
×
694
                                }
×
695

696
                                var cachedInPolicy *models.CachedEdgePolicy
190✔
697
                                if p2 != nil {
380✔
698
                                        cachedInPolicy =
190✔
699
                                                models.NewCachedPolicy(p2)
190✔
700
                                        cachedInPolicy.ToNodePubKey =
190✔
701
                                                toNodeCallback
190✔
702
                                        cachedInPolicy.ToNodeFeatures =
190✔
703
                                                toNodeFeatures
190✔
704
                                }
190✔
705

706
                                directedChannel := &DirectedChannel{
190✔
707
                                        ChannelID: e.ChannelID,
190✔
708
                                        IsNode1: node.PubKeyBytes ==
190✔
709
                                                e.NodeKey1Bytes,
190✔
710
                                        OtherNode:    e.NodeKey2Bytes,
190✔
711
                                        Capacity:     e.Capacity,
190✔
712
                                        OutPolicySet: p1 != nil,
190✔
713
                                        InPolicy:     cachedInPolicy,
190✔
714
                                }
190✔
715

190✔
716
                                if node.PubKeyBytes == e.NodeKey2Bytes {
285✔
717
                                        directedChannel.OtherNode =
95✔
718
                                                e.NodeKey1Bytes
95✔
719
                                }
95✔
720

721
                                channels[e.ChannelID] = directedChannel
190✔
722

190✔
723
                                return nil
190✔
724
                        })
725
                if err != nil {
20✔
726
                        return err
×
727
                }
×
728

729
                return cb(node.PubKeyBytes, channels)
20✔
730
        })
731
}
732

733
// DisabledChannelIDs returns the channel ids of disabled channels.
734
// A channel is disabled when two of the associated ChanelEdgePolicies
735
// have their disabled bit on.
736
func (c *KVStore) DisabledChannelIDs() ([]uint64, error) {
6✔
737
        var disabledChanIDs []uint64
6✔
738
        var chanEdgeFound map[uint64]struct{}
6✔
739

6✔
740
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
12✔
741
                edges := tx.ReadBucket(edgeBucket)
6✔
742
                if edges == nil {
6✔
743
                        return ErrGraphNoEdgesFound
×
744
                }
×
745

746
                disabledEdgePolicyIndex := edges.NestedReadBucket(
6✔
747
                        disabledEdgePolicyBucket,
6✔
748
                )
6✔
749
                if disabledEdgePolicyIndex == nil {
7✔
750
                        return nil
1✔
751
                }
1✔
752

753
                // We iterate over all disabled policies and we add each channel
754
                // that has more than one disabled policy to disabledChanIDs
755
                // array.
756
                return disabledEdgePolicyIndex.ForEach(
5✔
757
                        func(k, v []byte) error {
16✔
758
                                chanID := byteOrder.Uint64(k[:8])
11✔
759
                                _, edgeFound := chanEdgeFound[chanID]
11✔
760
                                if edgeFound {
15✔
761
                                        delete(chanEdgeFound, chanID)
4✔
762
                                        disabledChanIDs = append(
4✔
763
                                                disabledChanIDs, chanID,
4✔
764
                                        )
4✔
765

4✔
766
                                        return nil
4✔
767
                                }
4✔
768

769
                                chanEdgeFound[chanID] = struct{}{}
7✔
770

7✔
771
                                return nil
7✔
772
                        },
773
                )
774
        }, func() {
6✔
775
                disabledChanIDs = nil
6✔
776
                chanEdgeFound = make(map[uint64]struct{})
6✔
777
        })
6✔
778
        if err != nil {
6✔
779
                return nil, err
×
780
        }
×
781

782
        return disabledChanIDs, nil
6✔
783
}
784

785
// ForEachNode iterates through all the stored vertices/nodes in the graph,
786
// executing the passed callback with each node encountered. If the callback
787
// returns an error, then the transaction is aborted and the iteration stops
788
// early. Any operations performed on the NodeTx passed to the call-back are
789
// executed under the same read transaction and so, methods on the NodeTx object
790
// _MUST_ only be called from within the call-back.
791
func (c *KVStore) ForEachNode(cb func(tx NodeRTx) error) error {
128✔
792
        return forEachNode(c.db, func(tx kvdb.RTx,
128✔
793
                node *models.LightningNode) error {
1,286✔
794

1,158✔
795
                return cb(newChanGraphNodeTx(tx, c, node))
1,158✔
796
        })
1,158✔
797
}
798

799
// forEachNode iterates through all the stored vertices/nodes in the graph,
800
// executing the passed callback with each node encountered. If the callback
801
// returns an error, then the transaction is aborted and the iteration stops
802
// early.
803
//
804
// TODO(roasbeef): add iterator interface to allow for memory efficient graph
805
// traversal when graph gets mega.
806
func forEachNode(db kvdb.Backend,
807
        cb func(kvdb.RTx, *models.LightningNode) error) error {
129✔
808

129✔
809
        traversal := func(tx kvdb.RTx) error {
258✔
810
                // First grab the nodes bucket which stores the mapping from
129✔
811
                // pubKey to node information.
129✔
812
                nodes := tx.ReadBucket(nodeBucket)
129✔
813
                if nodes == nil {
129✔
814
                        return ErrGraphNotFound
×
815
                }
×
816

817
                return nodes.ForEach(func(pubKey, nodeBytes []byte) error {
1,568✔
818
                        // If this is the source key, then we skip this
1,439✔
819
                        // iteration as the value for this key is a pubKey
1,439✔
820
                        // rather than raw node information.
1,439✔
821
                        if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 {
1,700✔
822
                                return nil
261✔
823
                        }
261✔
824

825
                        nodeReader := bytes.NewReader(nodeBytes)
1,178✔
826
                        node, err := deserializeLightningNode(nodeReader)
1,178✔
827
                        if err != nil {
1,178✔
828
                                return err
×
829
                        }
×
830

831
                        // Execute the callback, the transaction will abort if
832
                        // this returns an error.
833
                        return cb(tx, &node)
1,178✔
834
                })
835
        }
836

837
        return kvdb.View(db, traversal, func() {})
258✔
838
}
839

840
// ForEachNodeCacheable iterates through all the stored vertices/nodes in the
841
// graph, executing the passed callback with each node encountered. If the
842
// callback returns an error, then the transaction is aborted and the iteration
843
// stops early.
844
func (c *KVStore) ForEachNodeCacheable(cb func(route.Vertex,
845
        *lnwire.FeatureVector) error) error {
139✔
846

139✔
847
        traversal := func(tx kvdb.RTx) error {
278✔
848
                // First grab the nodes bucket which stores the mapping from
139✔
849
                // pubKey to node information.
139✔
850
                nodes := tx.ReadBucket(nodeBucket)
139✔
851
                if nodes == nil {
139✔
852
                        return ErrGraphNotFound
×
853
                }
×
854

855
                return nodes.ForEach(func(pubKey, nodeBytes []byte) error {
537✔
856
                        // If this is the source key, then we skip this
398✔
857
                        // iteration as the value for this key is a pubKey
398✔
858
                        // rather than raw node information.
398✔
859
                        if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 {
676✔
860
                                return nil
278✔
861
                        }
278✔
862

863
                        nodeReader := bytes.NewReader(nodeBytes)
120✔
864
                        node, features, err := deserializeLightningNodeCacheable( //nolint:ll
120✔
865
                                nodeReader,
120✔
866
                        )
120✔
867
                        if err != nil {
120✔
868
                                return err
×
869
                        }
×
870

871
                        // Execute the callback, the transaction will abort if
872
                        // this returns an error.
873
                        return cb(node, features)
120✔
874
                })
875
        }
876

877
        return kvdb.View(c.db, traversal, func() {})
278✔
878
}
879

880
// SourceNode returns the source node of the graph. The source node is treated
881
// as the center node within a star-graph. This method may be used to kick off
882
// a path finding algorithm in order to explore the reachability of another
883
// node based off the source node.
884
func (c *KVStore) SourceNode(_ context.Context) (*models.LightningNode, error) {
238✔
885
        return sourceNode(c.db)
238✔
886
}
238✔
887

888
// sourceNode fetches the source node of the graph. The source node is treated
889
// as the center node within a star-graph.
890
func sourceNode(db kvdb.Backend) (*models.LightningNode, error) {
238✔
891
        var source *models.LightningNode
238✔
892
        err := kvdb.View(db, func(tx kvdb.RTx) error {
476✔
893
                // First grab the nodes bucket which stores the mapping from
238✔
894
                // pubKey to node information.
238✔
895
                nodes := tx.ReadBucket(nodeBucket)
238✔
896
                if nodes == nil {
238✔
897
                        return ErrGraphNotFound
×
898
                }
×
899

900
                node, err := sourceNodeWithTx(nodes)
238✔
901
                if err != nil {
239✔
902
                        return err
1✔
903
                }
1✔
904
                source = node
237✔
905

237✔
906
                return nil
237✔
907
        }, func() {
238✔
908
                source = nil
238✔
909
        })
238✔
910
        if err != nil {
239✔
911
                return nil, err
1✔
912
        }
1✔
913

914
        return source, nil
237✔
915
}
916

917
// sourceNodeWithTx uses an existing database transaction and returns the source
918
// node of the graph. The source node is treated as the center node within a
919
// star-graph. This method may be used to kick off a path finding algorithm in
920
// order to explore the reachability of another node based off the source node.
921
func sourceNodeWithTx(nodes kvdb.RBucket) (*models.LightningNode, error) {
500✔
922
        selfPub := nodes.Get(sourceKey)
500✔
923
        if selfPub == nil {
501✔
924
                return nil, ErrSourceNodeNotSet
1✔
925
        }
1✔
926

927
        // With the pubKey of the source node retrieved, we're able to
928
        // fetch the full node information.
929
        node, err := fetchLightningNode(nodes, selfPub)
499✔
930
        if err != nil {
499✔
931
                return nil, err
×
932
        }
×
933

934
        return &node, nil
499✔
935
}
936

937
// SetSourceNode sets the source node within the graph database. The source
938
// node is to be used as the center of a star-graph within path finding
939
// algorithms.
940
func (c *KVStore) SetSourceNode(_ context.Context,
941
        node *models.LightningNode) error {
114✔
942

114✔
943
        nodePubBytes := node.PubKeyBytes[:]
114✔
944

114✔
945
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
228✔
946
                // First grab the nodes bucket which stores the mapping from
114✔
947
                // pubKey to node information.
114✔
948
                nodes, err := tx.CreateTopLevelBucket(nodeBucket)
114✔
949
                if err != nil {
114✔
950
                        return err
×
951
                }
×
952

953
                // Next we create the mapping from source to the targeted
954
                // public key.
955
                if err := nodes.Put(sourceKey, nodePubBytes); err != nil {
114✔
956
                        return err
×
957
                }
×
958

959
                // Finally, we commit the information of the lightning node
960
                // itself.
961
                return addLightningNode(tx, node)
114✔
962
        }, func() {})
114✔
963
}
964

965
// AddLightningNode adds a vertex/node to the graph database. If the node is not
966
// in the database from before, this will add a new, unconnected one to the
967
// graph. If it is present from before, this will update that node's
968
// information. Note that this method is expected to only be called to update an
969
// already present node from a node announcement, or to insert a node found in a
970
// channel update.
971
//
972
// TODO(roasbeef): also need sig of announcement.
973
func (c *KVStore) AddLightningNode(ctx context.Context,
974
        node *models.LightningNode, opts ...batch.SchedulerOption) error {
712✔
975

712✔
976
        r := &batch.Request[kvdb.RwTx]{
712✔
977
                Opts: batch.NewSchedulerOptions(opts...),
712✔
978
                Do: func(tx kvdb.RwTx) error {
1,424✔
979
                        return addLightningNode(tx, node)
712✔
980
                },
712✔
981
        }
982

983
        return c.nodeScheduler.Execute(ctx, r)
712✔
984
}
985

986
func addLightningNode(tx kvdb.RwTx, node *models.LightningNode) error {
907✔
987
        nodes, err := tx.CreateTopLevelBucket(nodeBucket)
907✔
988
        if err != nil {
907✔
989
                return err
×
990
        }
×
991

992
        aliases, err := nodes.CreateBucketIfNotExists(aliasIndexBucket)
907✔
993
        if err != nil {
907✔
994
                return err
×
995
        }
×
996

997
        updateIndex, err := nodes.CreateBucketIfNotExists(
907✔
998
                nodeUpdateIndexBucket,
907✔
999
        )
907✔
1000
        if err != nil {
907✔
1001
                return err
×
1002
        }
×
1003

1004
        return putLightningNode(nodes, aliases, updateIndex, node)
907✔
1005
}
1006

1007
// LookupAlias attempts to return the alias as advertised by the target node.
1008
// TODO(roasbeef): currently assumes that aliases are unique...
1009
func (c *KVStore) LookupAlias(_ context.Context,
1010
        pub *btcec.PublicKey) (string, error) {
2✔
1011

2✔
1012
        var alias string
2✔
1013

2✔
1014
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
4✔
1015
                nodes := tx.ReadBucket(nodeBucket)
2✔
1016
                if nodes == nil {
2✔
1017
                        return ErrGraphNodesNotFound
×
1018
                }
×
1019

1020
                aliases := nodes.NestedReadBucket(aliasIndexBucket)
2✔
1021
                if aliases == nil {
2✔
1022
                        return ErrGraphNodesNotFound
×
1023
                }
×
1024

1025
                nodePub := pub.SerializeCompressed()
2✔
1026
                a := aliases.Get(nodePub)
2✔
1027
                if a == nil {
3✔
1028
                        return ErrNodeAliasNotFound
1✔
1029
                }
1✔
1030

1031
                // TODO(roasbeef): should actually be using the utf-8
1032
                // package...
1033
                alias = string(a)
1✔
1034

1✔
1035
                return nil
1✔
1036
        }, func() {
2✔
1037
                alias = ""
2✔
1038
        })
2✔
1039
        if err != nil {
3✔
1040
                return "", err
1✔
1041
        }
1✔
1042

1043
        return alias, nil
1✔
1044
}
1045

1046
// DeleteLightningNode starts a new database transaction to remove a vertex/node
1047
// from the database according to the node's public key.
1048
func (c *KVStore) DeleteLightningNode(_ context.Context,
1049
        nodePub route.Vertex) error {
4✔
1050

4✔
1051
        // TODO(roasbeef): ensure dangling edges are removed...
4✔
1052
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
8✔
1053
                nodes := tx.ReadWriteBucket(nodeBucket)
4✔
1054
                if nodes == nil {
4✔
1055
                        return ErrGraphNodeNotFound
×
1056
                }
×
1057

1058
                return c.deleteLightningNode(nodes, nodePub[:])
4✔
1059
        }, func() {})
4✔
1060
}
1061

1062
// deleteLightningNode uses an existing database transaction to remove a
1063
// vertex/node from the database according to the node's public key.
1064
func (c *KVStore) deleteLightningNode(nodes kvdb.RwBucket,
1065
        compressedPubKey []byte) error {
66✔
1066

66✔
1067
        aliases := nodes.NestedReadWriteBucket(aliasIndexBucket)
66✔
1068
        if aliases == nil {
66✔
1069
                return ErrGraphNodesNotFound
×
1070
        }
×
1071

1072
        if err := aliases.Delete(compressedPubKey); err != nil {
66✔
1073
                return err
×
1074
        }
×
1075

1076
        // Before we delete the node, we'll fetch its current state so we can
1077
        // determine when its last update was to clear out the node update
1078
        // index.
1079
        node, err := fetchLightningNode(nodes, compressedPubKey)
66✔
1080
        if err != nil {
67✔
1081
                return err
1✔
1082
        }
1✔
1083

1084
        if err := nodes.Delete(compressedPubKey); err != nil {
65✔
1085
                return err
×
1086
        }
×
1087

1088
        // Finally, we'll delete the index entry for the node within the
1089
        // nodeUpdateIndexBucket as this node is no longer active, so we don't
1090
        // need to track its last update.
1091
        nodeUpdateIndex := nodes.NestedReadWriteBucket(nodeUpdateIndexBucket)
65✔
1092
        if nodeUpdateIndex == nil {
65✔
1093
                return ErrGraphNodesNotFound
×
1094
        }
×
1095

1096
        // In order to delete the entry, we'll need to reconstruct the key for
1097
        // its last update.
1098
        updateUnix := uint64(node.LastUpdate.Unix())
65✔
1099
        var indexKey [8 + 33]byte
65✔
1100
        byteOrder.PutUint64(indexKey[:8], updateUnix)
65✔
1101
        copy(indexKey[8:], compressedPubKey)
65✔
1102

65✔
1103
        return nodeUpdateIndex.Delete(indexKey[:])
65✔
1104
}
1105

1106
// AddChannelEdge adds a new (undirected, blank) edge to the graph database. An
1107
// undirected edge from the two target nodes are created. The information stored
1108
// denotes the static attributes of the channel, such as the channelID, the keys
1109
// involved in creation of the channel, and the set of features that the channel
1110
// supports. The chanPoint and chanID are used to uniquely identify the edge
1111
// globally within the database.
1112
func (c *KVStore) AddChannelEdge(ctx context.Context,
1113
        edge *models.ChannelEdgeInfo, opts ...batch.SchedulerOption) error {
1,724✔
1114

1,724✔
1115
        var alreadyExists bool
1,724✔
1116
        r := &batch.Request[kvdb.RwTx]{
1,724✔
1117
                Opts: batch.NewSchedulerOptions(opts...),
1,724✔
1118
                Reset: func() {
3,448✔
1119
                        alreadyExists = false
1,724✔
1120
                },
1,724✔
1121
                Do: func(tx kvdb.RwTx) error {
1,724✔
1122
                        err := c.addChannelEdge(tx, edge)
1,724✔
1123

1,724✔
1124
                        // Silence ErrEdgeAlreadyExist so that the batch can
1,724✔
1125
                        // succeed, but propagate the error via local state.
1,724✔
1126
                        if errors.Is(err, ErrEdgeAlreadyExist) {
1,961✔
1127
                                alreadyExists = true
237✔
1128
                                return nil
237✔
1129
                        }
237✔
1130

1131
                        return err
1,487✔
1132
                },
1133
                OnCommit: func(err error) error {
1,724✔
1134
                        switch {
1,724✔
1135
                        case err != nil:
×
1136
                                return err
×
1137
                        case alreadyExists:
237✔
1138
                                return ErrEdgeAlreadyExist
237✔
1139
                        default:
1,487✔
1140
                                c.rejectCache.remove(edge.ChannelID)
1,487✔
1141
                                c.chanCache.remove(edge.ChannelID)
1,487✔
1142
                                return nil
1,487✔
1143
                        }
1144
                },
1145
        }
1146

1147
        return c.chanScheduler.Execute(ctx, r)
1,724✔
1148
}
1149

1150
// addChannelEdge is the private form of AddChannelEdge that allows callers to
1151
// utilize an existing db transaction.
1152
func (c *KVStore) addChannelEdge(tx kvdb.RwTx,
1153
        edge *models.ChannelEdgeInfo) error {
1,724✔
1154

1,724✔
1155
        // Construct the channel's primary key which is the 8-byte channel ID.
1,724✔
1156
        var chanKey [8]byte
1,724✔
1157
        binary.BigEndian.PutUint64(chanKey[:], edge.ChannelID)
1,724✔
1158

1,724✔
1159
        nodes, err := tx.CreateTopLevelBucket(nodeBucket)
1,724✔
1160
        if err != nil {
1,724✔
1161
                return err
×
1162
        }
×
1163
        edges, err := tx.CreateTopLevelBucket(edgeBucket)
1,724✔
1164
        if err != nil {
1,724✔
1165
                return err
×
1166
        }
×
1167
        edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
1,724✔
1168
        if err != nil {
1,724✔
1169
                return err
×
1170
        }
×
1171
        chanIndex, err := edges.CreateBucketIfNotExists(channelPointBucket)
1,724✔
1172
        if err != nil {
1,724✔
1173
                return err
×
1174
        }
×
1175

1176
        // First, attempt to check if this edge has already been created. If
1177
        // so, then we can exit early as this method is meant to be idempotent.
1178
        if edgeInfo := edgeIndex.Get(chanKey[:]); edgeInfo != nil {
1,961✔
1179
                return ErrEdgeAlreadyExist
237✔
1180
        }
237✔
1181

1182
        // Before we insert the channel into the database, we'll ensure that
1183
        // both nodes already exist in the channel graph. If either node
1184
        // doesn't, then we'll insert a "shell" node that just includes its
1185
        // public key, so subsequent validation and queries can work properly.
1186
        _, node1Err := fetchLightningNode(nodes, edge.NodeKey1Bytes[:])
1,487✔
1187
        switch {
1,487✔
1188
        case errors.Is(node1Err, ErrGraphNodeNotFound):
21✔
1189
                node1Shell := models.LightningNode{
21✔
1190
                        PubKeyBytes:          edge.NodeKey1Bytes,
21✔
1191
                        HaveNodeAnnouncement: false,
21✔
1192
                }
21✔
1193
                err := addLightningNode(tx, &node1Shell)
21✔
1194
                if err != nil {
21✔
1195
                        return fmt.Errorf("unable to create shell node "+
×
1196
                                "for: %x: %w", edge.NodeKey1Bytes, err)
×
1197
                }
×
1198
        case node1Err != nil:
×
1199
                return node1Err
×
1200
        }
1201

1202
        _, node2Err := fetchLightningNode(nodes, edge.NodeKey2Bytes[:])
1,487✔
1203
        switch {
1,487✔
1204
        case errors.Is(node2Err, ErrGraphNodeNotFound):
60✔
1205
                node2Shell := models.LightningNode{
60✔
1206
                        PubKeyBytes:          edge.NodeKey2Bytes,
60✔
1207
                        HaveNodeAnnouncement: false,
60✔
1208
                }
60✔
1209
                err := addLightningNode(tx, &node2Shell)
60✔
1210
                if err != nil {
60✔
1211
                        return fmt.Errorf("unable to create shell node "+
×
1212
                                "for: %x: %w", edge.NodeKey2Bytes, err)
×
1213
                }
×
1214
        case node2Err != nil:
×
1215
                return node2Err
×
1216
        }
1217

1218
        // If the edge hasn't been created yet, then we'll first add it to the
1219
        // edge index in order to associate the edge between two nodes and also
1220
        // store the static components of the channel.
1221
        if err := putChanEdgeInfo(edgeIndex, edge, chanKey); err != nil {
1,487✔
1222
                return err
×
1223
        }
×
1224

1225
        // Mark edge policies for both sides as unknown. This is to enable
1226
        // efficient incoming channel lookup for a node.
1227
        keys := []*[33]byte{
1,487✔
1228
                &edge.NodeKey1Bytes,
1,487✔
1229
                &edge.NodeKey2Bytes,
1,487✔
1230
        }
1,487✔
1231
        for _, key := range keys {
4,461✔
1232
                err := putChanEdgePolicyUnknown(edges, edge.ChannelID, key[:])
2,974✔
1233
                if err != nil {
2,974✔
1234
                        return err
×
1235
                }
×
1236
        }
1237

1238
        // Finally we add it to the channel index which maps channel points
1239
        // (outpoints) to the shorter channel ID's.
1240
        var b bytes.Buffer
1,487✔
1241
        if err := WriteOutpoint(&b, &edge.ChannelPoint); err != nil {
1,487✔
1242
                return err
×
1243
        }
×
1244

1245
        return chanIndex.Put(b.Bytes(), chanKey[:])
1,487✔
1246
}
1247

1248
// HasChannelEdge returns true if the database knows of a channel edge with the
1249
// passed channel ID, and false otherwise. If an edge with that ID is found
1250
// within the graph, then two time stamps representing the last time the edge
1251
// was updated for both directed edges are returned along with the boolean. If
1252
// it is not found, then the zombie index is checked and its result is returned
1253
// as the second boolean.
1254
func (c *KVStore) HasChannelEdge(
1255
        chanID uint64) (time.Time, time.Time, bool, bool, error) {
211✔
1256

211✔
1257
        var (
211✔
1258
                upd1Time time.Time
211✔
1259
                upd2Time time.Time
211✔
1260
                exists   bool
211✔
1261
                isZombie bool
211✔
1262
        )
211✔
1263

211✔
1264
        // We'll query the cache with the shared lock held to allow multiple
211✔
1265
        // readers to access values in the cache concurrently if they exist.
211✔
1266
        c.cacheMu.RLock()
211✔
1267
        if entry, ok := c.rejectCache.get(chanID); ok {
284✔
1268
                c.cacheMu.RUnlock()
73✔
1269
                upd1Time = time.Unix(entry.upd1Time, 0)
73✔
1270
                upd2Time = time.Unix(entry.upd2Time, 0)
73✔
1271
                exists, isZombie = entry.flags.unpack()
73✔
1272

73✔
1273
                return upd1Time, upd2Time, exists, isZombie, nil
73✔
1274
        }
73✔
1275
        c.cacheMu.RUnlock()
138✔
1276

138✔
1277
        c.cacheMu.Lock()
138✔
1278
        defer c.cacheMu.Unlock()
138✔
1279

138✔
1280
        // The item was not found with the shared lock, so we'll acquire the
138✔
1281
        // exclusive lock and check the cache again in case another method added
138✔
1282
        // the entry to the cache while no lock was held.
138✔
1283
        if entry, ok := c.rejectCache.get(chanID); ok {
140✔
1284
                upd1Time = time.Unix(entry.upd1Time, 0)
2✔
1285
                upd2Time = time.Unix(entry.upd2Time, 0)
2✔
1286
                exists, isZombie = entry.flags.unpack()
2✔
1287

2✔
1288
                return upd1Time, upd2Time, exists, isZombie, nil
2✔
1289
        }
2✔
1290

1291
        if err := kvdb.View(c.db, func(tx kvdb.RTx) error {
272✔
1292
                edges := tx.ReadBucket(edgeBucket)
136✔
1293
                if edges == nil {
136✔
1294
                        return ErrGraphNoEdgesFound
×
1295
                }
×
1296
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
136✔
1297
                if edgeIndex == nil {
136✔
1298
                        return ErrGraphNoEdgesFound
×
1299
                }
×
1300

1301
                var channelID [8]byte
136✔
1302
                byteOrder.PutUint64(channelID[:], chanID)
136✔
1303

136✔
1304
                // If the edge doesn't exist, then we'll also check our zombie
136✔
1305
                // index.
136✔
1306
                if edgeIndex.Get(channelID[:]) == nil {
224✔
1307
                        exists = false
88✔
1308
                        zombieIndex := edges.NestedReadBucket(zombieBucket)
88✔
1309
                        if zombieIndex != nil {
176✔
1310
                                isZombie, _, _ = isZombieEdge(
88✔
1311
                                        zombieIndex, chanID,
88✔
1312
                                )
88✔
1313
                        }
88✔
1314

1315
                        return nil
88✔
1316
                }
1317

1318
                exists = true
48✔
1319
                isZombie = false
48✔
1320

48✔
1321
                // If the channel has been found in the graph, then retrieve
48✔
1322
                // the edges itself so we can return the last updated
48✔
1323
                // timestamps.
48✔
1324
                nodes := tx.ReadBucket(nodeBucket)
48✔
1325
                if nodes == nil {
48✔
1326
                        return ErrGraphNodeNotFound
×
1327
                }
×
1328

1329
                e1, e2, err := fetchChanEdgePolicies(
48✔
1330
                        edgeIndex, edges, channelID[:],
48✔
1331
                )
48✔
1332
                if err != nil {
48✔
1333
                        return err
×
1334
                }
×
1335

1336
                // As we may have only one of the edges populated, only set the
1337
                // update time if the edge was found in the database.
1338
                if e1 != nil {
66✔
1339
                        upd1Time = e1.LastUpdate
18✔
1340
                }
18✔
1341
                if e2 != nil {
64✔
1342
                        upd2Time = e2.LastUpdate
16✔
1343
                }
16✔
1344

1345
                return nil
48✔
1346
        }, func() {}); err != nil {
136✔
1347
                return time.Time{}, time.Time{}, exists, isZombie, err
×
1348
        }
×
1349

1350
        c.rejectCache.insert(chanID, rejectCacheEntry{
136✔
1351
                upd1Time: upd1Time.Unix(),
136✔
1352
                upd2Time: upd2Time.Unix(),
136✔
1353
                flags:    packRejectFlags(exists, isZombie),
136✔
1354
        })
136✔
1355

136✔
1356
        return upd1Time, upd2Time, exists, isZombie, nil
136✔
1357
}
1358

1359
// AddEdgeProof sets the proof of an existing edge in the graph database.
1360
func (c *KVStore) AddEdgeProof(chanID lnwire.ShortChannelID,
1361
        proof *models.ChannelAuthProof) error {
2✔
1362

2✔
1363
        // Construct the channel's primary key which is the 8-byte channel ID.
2✔
1364
        var chanKey [8]byte
2✔
1365
        binary.BigEndian.PutUint64(chanKey[:], chanID.ToUint64())
2✔
1366

2✔
1367
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
4✔
1368
                edges := tx.ReadWriteBucket(edgeBucket)
2✔
1369
                if edges == nil {
2✔
1370
                        return ErrEdgeNotFound
×
1371
                }
×
1372

1373
                edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
2✔
1374
                if edgeIndex == nil {
2✔
1375
                        return ErrEdgeNotFound
×
1376
                }
×
1377

1378
                edge, err := fetchChanEdgeInfo(edgeIndex, chanKey[:])
2✔
1379
                if err != nil {
2✔
1380
                        return err
×
1381
                }
×
1382

1383
                edge.AuthProof = proof
2✔
1384

2✔
1385
                return putChanEdgeInfo(edgeIndex, &edge, chanKey)
2✔
1386
        }, func() {})
2✔
1387
}
1388

1389
const (
1390
        // pruneTipBytes is the total size of the value which stores a prune
1391
        // entry of the graph in the prune log. The "prune tip" is the last
1392
        // entry in the prune log, and indicates if the channel graph is in
1393
        // sync with the current UTXO state. The structure of the value
1394
        // is: blockHash, taking 32 bytes total.
1395
        pruneTipBytes = 32
1396
)
1397

1398
// PruneGraph prunes newly closed channels from the channel graph in response
1399
// to a new block being solved on the network. Any transactions which spend the
1400
// funding output of any known channels within he graph will be deleted.
1401
// Additionally, the "prune tip", or the last block which has been used to
1402
// prune the graph is stored so callers can ensure the graph is fully in sync
1403
// with the current UTXO state. A slice of channels that have been closed by
1404
// the target block along with any pruned nodes are returned if the function
1405
// succeeds without error.
1406
func (c *KVStore) PruneGraph(spentOutputs []*wire.OutPoint,
1407
        blockHash *chainhash.Hash, blockHeight uint32) (
1408
        []*models.ChannelEdgeInfo, []route.Vertex, error) {
238✔
1409

238✔
1410
        c.cacheMu.Lock()
238✔
1411
        defer c.cacheMu.Unlock()
238✔
1412

238✔
1413
        var (
238✔
1414
                chansClosed []*models.ChannelEdgeInfo
238✔
1415
                prunedNodes []route.Vertex
238✔
1416
        )
238✔
1417

238✔
1418
        err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
476✔
1419
                // First grab the edges bucket which houses the information
238✔
1420
                // we'd like to delete
238✔
1421
                edges, err := tx.CreateTopLevelBucket(edgeBucket)
238✔
1422
                if err != nil {
238✔
1423
                        return err
×
1424
                }
×
1425

1426
                // Next grab the two edge indexes which will also need to be
1427
                // updated.
1428
                edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
238✔
1429
                if err != nil {
238✔
1430
                        return err
×
1431
                }
×
1432
                chanIndex, err := edges.CreateBucketIfNotExists(
238✔
1433
                        channelPointBucket,
238✔
1434
                )
238✔
1435
                if err != nil {
238✔
1436
                        return err
×
1437
                }
×
1438
                nodes := tx.ReadWriteBucket(nodeBucket)
238✔
1439
                if nodes == nil {
238✔
1440
                        return ErrSourceNodeNotSet
×
1441
                }
×
1442
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
238✔
1443
                if err != nil {
238✔
1444
                        return err
×
1445
                }
×
1446

1447
                // For each of the outpoints that have been spent within the
1448
                // block, we attempt to delete them from the graph as if that
1449
                // outpoint was a channel, then it has now been closed.
1450
                for _, chanPoint := range spentOutputs {
379✔
1451
                        // TODO(roasbeef): load channel bloom filter, continue
141✔
1452
                        // if NOT if filter
141✔
1453

141✔
1454
                        var opBytes bytes.Buffer
141✔
1455
                        err := WriteOutpoint(&opBytes, chanPoint)
141✔
1456
                        if err != nil {
141✔
1457
                                return err
×
1458
                        }
×
1459

1460
                        // First attempt to see if the channel exists within
1461
                        // the database, if not, then we can exit early.
1462
                        chanID := chanIndex.Get(opBytes.Bytes())
141✔
1463
                        if chanID == nil {
262✔
1464
                                continue
121✔
1465
                        }
1466

1467
                        // Attempt to delete the channel, an ErrEdgeNotFound
1468
                        // will be returned if that outpoint isn't known to be
1469
                        // a channel. If no error is returned, then a channel
1470
                        // was successfully pruned.
1471
                        edgeInfo, err := c.delChannelEdgeUnsafe(
20✔
1472
                                edges, edgeIndex, chanIndex, zombieIndex,
20✔
1473
                                chanID, false, false,
20✔
1474
                        )
20✔
1475
                        if err != nil && !errors.Is(err, ErrEdgeNotFound) {
20✔
1476
                                return err
×
1477
                        }
×
1478

1479
                        chansClosed = append(chansClosed, edgeInfo)
20✔
1480
                }
1481

1482
                metaBucket, err := tx.CreateTopLevelBucket(graphMetaBucket)
238✔
1483
                if err != nil {
238✔
1484
                        return err
×
1485
                }
×
1486

1487
                pruneBucket, err := metaBucket.CreateBucketIfNotExists(
238✔
1488
                        pruneLogBucket,
238✔
1489
                )
238✔
1490
                if err != nil {
238✔
1491
                        return err
×
1492
                }
×
1493

1494
                // With the graph pruned, add a new entry to the prune log,
1495
                // which can be used to check if the graph is fully synced with
1496
                // the current UTXO state.
1497
                var blockHeightBytes [4]byte
238✔
1498
                byteOrder.PutUint32(blockHeightBytes[:], blockHeight)
238✔
1499

238✔
1500
                var newTip [pruneTipBytes]byte
238✔
1501
                copy(newTip[:], blockHash[:])
238✔
1502

238✔
1503
                err = pruneBucket.Put(blockHeightBytes[:], newTip[:])
238✔
1504
                if err != nil {
238✔
1505
                        return err
×
1506
                }
×
1507

1508
                // Now that the graph has been pruned, we'll also attempt to
1509
                // prune any nodes that have had a channel closed within the
1510
                // latest block.
1511
                prunedNodes, err = c.pruneGraphNodes(nodes, edgeIndex)
238✔
1512

238✔
1513
                return err
238✔
1514
        }, func() {
238✔
1515
                chansClosed = nil
238✔
1516
                prunedNodes = nil
238✔
1517
        })
238✔
1518
        if err != nil {
238✔
1519
                return nil, nil, err
×
1520
        }
×
1521

1522
        for _, channel := range chansClosed {
258✔
1523
                c.rejectCache.remove(channel.ChannelID)
20✔
1524
                c.chanCache.remove(channel.ChannelID)
20✔
1525
        }
20✔
1526

1527
        return chansClosed, prunedNodes, nil
238✔
1528
}
1529

1530
// PruneGraphNodes is a garbage collection method which attempts to prune out
1531
// any nodes from the channel graph that are currently unconnected. This ensure
1532
// that we only maintain a graph of reachable nodes. In the event that a pruned
1533
// node gains more channels, it will be re-added back to the graph.
1534
func (c *KVStore) PruneGraphNodes() ([]route.Vertex, error) {
23✔
1535
        var prunedNodes []route.Vertex
23✔
1536
        err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
46✔
1537
                nodes := tx.ReadWriteBucket(nodeBucket)
23✔
1538
                if nodes == nil {
23✔
1539
                        return ErrGraphNodesNotFound
×
1540
                }
×
1541
                edges := tx.ReadWriteBucket(edgeBucket)
23✔
1542
                if edges == nil {
23✔
1543
                        return ErrGraphNotFound
×
1544
                }
×
1545
                edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
23✔
1546
                if edgeIndex == nil {
23✔
1547
                        return ErrGraphNoEdgesFound
×
1548
                }
×
1549

1550
                var err error
23✔
1551
                prunedNodes, err = c.pruneGraphNodes(nodes, edgeIndex)
23✔
1552
                if err != nil {
23✔
1553
                        return err
×
1554
                }
×
1555

1556
                return nil
23✔
1557
        }, func() {
23✔
1558
                prunedNodes = nil
23✔
1559
        })
23✔
1560

1561
        return prunedNodes, err
23✔
1562
}
1563

1564
// pruneGraphNodes attempts to remove any nodes from the graph who have had a
1565
// channel closed within the current block. If the node still has existing
1566
// channels in the graph, this will act as a no-op.
1567
func (c *KVStore) pruneGraphNodes(nodes kvdb.RwBucket,
1568
        edgeIndex kvdb.RwBucket) ([]route.Vertex, error) {
261✔
1569

261✔
1570
        log.Trace("Pruning nodes from graph with no open channels")
261✔
1571

261✔
1572
        // We'll retrieve the graph's source node to ensure we don't remove it
261✔
1573
        // even if it no longer has any open channels.
261✔
1574
        sourceNode, err := sourceNodeWithTx(nodes)
261✔
1575
        if err != nil {
261✔
1576
                return nil, err
×
1577
        }
×
1578

1579
        // We'll use this map to keep count the number of references to a node
1580
        // in the graph. A node should only be removed once it has no more
1581
        // references in the graph.
1582
        nodeRefCounts := make(map[[33]byte]int)
261✔
1583
        err = nodes.ForEach(func(pubKey, nodeBytes []byte) error {
1,552✔
1584
                // If this is the source key, then we skip this
1,291✔
1585
                // iteration as the value for this key is a pubKey
1,291✔
1586
                // rather than raw node information.
1,291✔
1587
                if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 {
2,074✔
1588
                        return nil
783✔
1589
                }
783✔
1590

1591
                var nodePub [33]byte
508✔
1592
                copy(nodePub[:], pubKey)
508✔
1593
                nodeRefCounts[nodePub] = 0
508✔
1594

508✔
1595
                return nil
508✔
1596
        })
1597
        if err != nil {
261✔
1598
                return nil, err
×
1599
        }
×
1600

1601
        // To ensure we never delete the source node, we'll start off by
1602
        // bumping its ref count to 1.
1603
        nodeRefCounts[sourceNode.PubKeyBytes] = 1
261✔
1604

261✔
1605
        // Next, we'll run through the edgeIndex which maps a channel ID to the
261✔
1606
        // edge info. We'll use this scan to populate our reference count map
261✔
1607
        // above.
261✔
1608
        err = edgeIndex.ForEach(func(chanID, edgeInfoBytes []byte) error {
472✔
1609
                // The first 66 bytes of the edge info contain the pubkeys of
211✔
1610
                // the nodes that this edge attaches. We'll extract them, and
211✔
1611
                // add them to the ref count map.
211✔
1612
                var node1, node2 [33]byte
211✔
1613
                copy(node1[:], edgeInfoBytes[:33])
211✔
1614
                copy(node2[:], edgeInfoBytes[33:])
211✔
1615

211✔
1616
                // With the nodes extracted, we'll increase the ref count of
211✔
1617
                // each of the nodes.
211✔
1618
                nodeRefCounts[node1]++
211✔
1619
                nodeRefCounts[node2]++
211✔
1620

211✔
1621
                return nil
211✔
1622
        })
211✔
1623
        if err != nil {
261✔
1624
                return nil, err
×
1625
        }
×
1626

1627
        // Finally, we'll make a second pass over the set of nodes, and delete
1628
        // any nodes that have a ref count of zero.
1629
        var pruned []route.Vertex
261✔
1630
        for nodePubKey, refCount := range nodeRefCounts {
769✔
1631
                // If the ref count of the node isn't zero, then we can safely
508✔
1632
                // skip it as it still has edges to or from it within the
508✔
1633
                // graph.
508✔
1634
                if refCount != 0 {
954✔
1635
                        continue
446✔
1636
                }
1637

1638
                // If we reach this point, then there are no longer any edges
1639
                // that connect this node, so we can delete it.
1640
                err := c.deleteLightningNode(nodes, nodePubKey[:])
62✔
1641
                if err != nil {
62✔
1642
                        if errors.Is(err, ErrGraphNodeNotFound) ||
×
1643
                                errors.Is(err, ErrGraphNodesNotFound) {
×
1644

×
1645
                                log.Warnf("Unable to prune node %x from the "+
×
1646
                                        "graph: %v", nodePubKey, err)
×
1647
                                continue
×
1648
                        }
1649

1650
                        return nil, err
×
1651
                }
1652

1653
                log.Infof("Pruned unconnected node %x from channel graph",
62✔
1654
                        nodePubKey[:])
62✔
1655

62✔
1656
                pruned = append(pruned, nodePubKey)
62✔
1657
        }
1658

1659
        if len(pruned) > 0 {
307✔
1660
                log.Infof("Pruned %v unconnected nodes from the channel graph",
46✔
1661
                        len(pruned))
46✔
1662
        }
46✔
1663

1664
        return pruned, err
261✔
1665
}
1666

1667
// DisconnectBlockAtHeight is used to indicate that the block specified
1668
// by the passed height has been disconnected from the main chain. This
1669
// will "rewind" the graph back to the height below, deleting channels
1670
// that are no longer confirmed from the graph. The prune log will be
1671
// set to the last prune height valid for the remaining chain.
1672
// Channels that were removed from the graph resulting from the
1673
// disconnected block are returned.
1674
func (c *KVStore) DisconnectBlockAtHeight(height uint32) (
1675
        []*models.ChannelEdgeInfo, error) {
157✔
1676

157✔
1677
        // Every channel having a ShortChannelID starting at 'height'
157✔
1678
        // will no longer be confirmed.
157✔
1679
        startShortChanID := lnwire.ShortChannelID{
157✔
1680
                BlockHeight: height,
157✔
1681
        }
157✔
1682

157✔
1683
        // Delete everything after this height from the db up until the
157✔
1684
        // SCID alias range.
157✔
1685
        endShortChanID := aliasmgr.StartingAlias
157✔
1686

157✔
1687
        // The block height will be the 3 first bytes of the channel IDs.
157✔
1688
        var chanIDStart [8]byte
157✔
1689
        byteOrder.PutUint64(chanIDStart[:], startShortChanID.ToUint64())
157✔
1690
        var chanIDEnd [8]byte
157✔
1691
        byteOrder.PutUint64(chanIDEnd[:], endShortChanID.ToUint64())
157✔
1692

157✔
1693
        c.cacheMu.Lock()
157✔
1694
        defer c.cacheMu.Unlock()
157✔
1695

157✔
1696
        // Keep track of the channels that are removed from the graph.
157✔
1697
        var removedChans []*models.ChannelEdgeInfo
157✔
1698

157✔
1699
        if err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
314✔
1700
                edges, err := tx.CreateTopLevelBucket(edgeBucket)
157✔
1701
                if err != nil {
157✔
1702
                        return err
×
1703
                }
×
1704
                edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
157✔
1705
                if err != nil {
157✔
1706
                        return err
×
1707
                }
×
1708
                chanIndex, err := edges.CreateBucketIfNotExists(
157✔
1709
                        channelPointBucket,
157✔
1710
                )
157✔
1711
                if err != nil {
157✔
1712
                        return err
×
1713
                }
×
1714
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
157✔
1715
                if err != nil {
157✔
1716
                        return err
×
1717
                }
×
1718

1719
                // Scan from chanIDStart to chanIDEnd, deleting every
1720
                // found edge.
1721
                // NOTE: we must delete the edges after the cursor loop, since
1722
                // modifying the bucket while traversing is not safe.
1723
                // NOTE: We use a < comparison in bytes.Compare instead of <=
1724
                // so that the StartingAlias itself isn't deleted.
1725
                var keys [][]byte
157✔
1726
                cursor := edgeIndex.ReadWriteCursor()
157✔
1727

157✔
1728
                //nolint:ll
157✔
1729
                for k, _ := cursor.Seek(chanIDStart[:]); k != nil &&
157✔
1730
                        bytes.Compare(k, chanIDEnd[:]) < 0; k, _ = cursor.Next() {
252✔
1731
                        keys = append(keys, k)
95✔
1732
                }
95✔
1733

1734
                for _, k := range keys {
252✔
1735
                        edgeInfo, err := c.delChannelEdgeUnsafe(
95✔
1736
                                edges, edgeIndex, chanIndex, zombieIndex,
95✔
1737
                                k, false, false,
95✔
1738
                        )
95✔
1739
                        if err != nil && !errors.Is(err, ErrEdgeNotFound) {
95✔
1740
                                return err
×
1741
                        }
×
1742

1743
                        removedChans = append(removedChans, edgeInfo)
95✔
1744
                }
1745

1746
                // Delete all the entries in the prune log having a height
1747
                // greater or equal to the block disconnected.
1748
                metaBucket, err := tx.CreateTopLevelBucket(graphMetaBucket)
157✔
1749
                if err != nil {
157✔
1750
                        return err
×
1751
                }
×
1752

1753
                pruneBucket, err := metaBucket.CreateBucketIfNotExists(
157✔
1754
                        pruneLogBucket,
157✔
1755
                )
157✔
1756
                if err != nil {
157✔
1757
                        return err
×
1758
                }
×
1759

1760
                var pruneKeyStart [4]byte
157✔
1761
                byteOrder.PutUint32(pruneKeyStart[:], height)
157✔
1762

157✔
1763
                var pruneKeyEnd [4]byte
157✔
1764
                byteOrder.PutUint32(pruneKeyEnd[:], math.MaxUint32)
157✔
1765

157✔
1766
                // To avoid modifying the bucket while traversing, we delete
157✔
1767
                // the keys in a second loop.
157✔
1768
                var pruneKeys [][]byte
157✔
1769
                pruneCursor := pruneBucket.ReadWriteCursor()
157✔
1770
                //nolint:ll
157✔
1771
                for k, _ := pruneCursor.Seek(pruneKeyStart[:]); k != nil &&
157✔
1772
                        bytes.Compare(k, pruneKeyEnd[:]) <= 0; k, _ = pruneCursor.Next() {
253✔
1773
                        pruneKeys = append(pruneKeys, k)
96✔
1774
                }
96✔
1775

1776
                for _, k := range pruneKeys {
253✔
1777
                        if err := pruneBucket.Delete(k); err != nil {
96✔
1778
                                return err
×
1779
                        }
×
1780
                }
1781

1782
                return nil
157✔
1783
        }, func() {
157✔
1784
                removedChans = nil
157✔
1785
        }); err != nil {
157✔
1786
                return nil, err
×
1787
        }
×
1788

1789
        for _, channel := range removedChans {
252✔
1790
                c.rejectCache.remove(channel.ChannelID)
95✔
1791
                c.chanCache.remove(channel.ChannelID)
95✔
1792
        }
95✔
1793

1794
        return removedChans, nil
157✔
1795
}
1796

1797
// PruneTip returns the block height and hash of the latest block that has been
1798
// used to prune channels in the graph. Knowing the "prune tip" allows callers
1799
// to tell if the graph is currently in sync with the current best known UTXO
1800
// state.
1801
func (c *KVStore) PruneTip() (*chainhash.Hash, uint32, error) {
53✔
1802
        var (
53✔
1803
                tipHash   chainhash.Hash
53✔
1804
                tipHeight uint32
53✔
1805
        )
53✔
1806

53✔
1807
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
106✔
1808
                graphMeta := tx.ReadBucket(graphMetaBucket)
53✔
1809
                if graphMeta == nil {
53✔
1810
                        return ErrGraphNotFound
×
1811
                }
×
1812
                pruneBucket := graphMeta.NestedReadBucket(pruneLogBucket)
53✔
1813
                if pruneBucket == nil {
53✔
1814
                        return ErrGraphNeverPruned
×
1815
                }
×
1816

1817
                pruneCursor := pruneBucket.ReadCursor()
53✔
1818

53✔
1819
                // The prune key with the largest block height will be our
53✔
1820
                // prune tip.
53✔
1821
                k, v := pruneCursor.Last()
53✔
1822
                if k == nil {
71✔
1823
                        return ErrGraphNeverPruned
18✔
1824
                }
18✔
1825

1826
                // Once we have the prune tip, the value will be the block hash,
1827
                // and the key the block height.
1828
                copy(tipHash[:], v)
35✔
1829
                tipHeight = byteOrder.Uint32(k)
35✔
1830

35✔
1831
                return nil
35✔
1832
        }, func() {})
53✔
1833
        if err != nil {
71✔
1834
                return nil, 0, err
18✔
1835
        }
18✔
1836

1837
        return &tipHash, tipHeight, nil
35✔
1838
}
1839

1840
// DeleteChannelEdges removes edges with the given channel IDs from the
1841
// database and marks them as zombies. This ensures that we're unable to re-add
1842
// it to our database once again. If an edge does not exist within the
1843
// database, then ErrEdgeNotFound will be returned. If strictZombiePruning is
1844
// true, then when we mark these edges as zombies, we'll set up the keys such
1845
// that we require the node that failed to send the fresh update to be the one
1846
// that resurrects the channel from its zombie state. The markZombie bool
1847
// denotes whether or not to mark the channel as a zombie.
1848
func (c *KVStore) DeleteChannelEdges(strictZombiePruning, markZombie bool,
1849
        chanIDs ...uint64) ([]*models.ChannelEdgeInfo, error) {
134✔
1850

134✔
1851
        // TODO(roasbeef): possibly delete from node bucket if node has no more
134✔
1852
        // channels
134✔
1853
        // TODO(roasbeef): don't delete both edges?
134✔
1854

134✔
1855
        c.cacheMu.Lock()
134✔
1856
        defer c.cacheMu.Unlock()
134✔
1857

134✔
1858
        var infos []*models.ChannelEdgeInfo
134✔
1859
        err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
268✔
1860
                edges := tx.ReadWriteBucket(edgeBucket)
134✔
1861
                if edges == nil {
134✔
1862
                        return ErrEdgeNotFound
×
1863
                }
×
1864
                edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
134✔
1865
                if edgeIndex == nil {
134✔
1866
                        return ErrEdgeNotFound
×
1867
                }
×
1868
                chanIndex := edges.NestedReadWriteBucket(channelPointBucket)
134✔
1869
                if chanIndex == nil {
134✔
1870
                        return ErrEdgeNotFound
×
1871
                }
×
1872
                nodes := tx.ReadWriteBucket(nodeBucket)
134✔
1873
                if nodes == nil {
134✔
1874
                        return ErrGraphNodeNotFound
×
1875
                }
×
1876
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
134✔
1877
                if err != nil {
134✔
1878
                        return err
×
1879
                }
×
1880

1881
                var rawChanID [8]byte
134✔
1882
                for _, chanID := range chanIDs {
216✔
1883
                        byteOrder.PutUint64(rawChanID[:], chanID)
82✔
1884
                        edgeInfo, err := c.delChannelEdgeUnsafe(
82✔
1885
                                edges, edgeIndex, chanIndex, zombieIndex,
82✔
1886
                                rawChanID[:], markZombie, strictZombiePruning,
82✔
1887
                        )
82✔
1888
                        if err != nil {
140✔
1889
                                return err
58✔
1890
                        }
58✔
1891

1892
                        infos = append(infos, edgeInfo)
24✔
1893
                }
1894

1895
                return nil
76✔
1896
        }, func() {
134✔
1897
                infos = nil
134✔
1898
        })
134✔
1899
        if err != nil {
192✔
1900
                return nil, err
58✔
1901
        }
58✔
1902

1903
        for _, chanID := range chanIDs {
100✔
1904
                c.rejectCache.remove(chanID)
24✔
1905
                c.chanCache.remove(chanID)
24✔
1906
        }
24✔
1907

1908
        return infos, nil
76✔
1909
}
1910

1911
// ChannelID attempt to lookup the 8-byte compact channel ID which maps to the
1912
// passed channel point (outpoint). If the passed channel doesn't exist within
1913
// the database, then ErrEdgeNotFound is returned.
1914
func (c *KVStore) ChannelID(chanPoint *wire.OutPoint) (uint64, error) {
1✔
1915
        var chanID uint64
1✔
1916
        if err := kvdb.View(c.db, func(tx kvdb.RTx) error {
2✔
1917
                var err error
1✔
1918
                chanID, err = getChanID(tx, chanPoint)
1✔
1919
                return err
1✔
1920
        }, func() {
2✔
1921
                chanID = 0
1✔
1922
        }); err != nil {
1✔
1923
                return 0, err
×
1924
        }
×
1925

1926
        return chanID, nil
1✔
1927
}
1928

1929
// getChanID returns the assigned channel ID for a given channel point.
1930
func getChanID(tx kvdb.RTx, chanPoint *wire.OutPoint) (uint64, error) {
1✔
1931
        var b bytes.Buffer
1✔
1932
        if err := WriteOutpoint(&b, chanPoint); err != nil {
1✔
1933
                return 0, err
×
1934
        }
×
1935

1936
        edges := tx.ReadBucket(edgeBucket)
1✔
1937
        if edges == nil {
1✔
1938
                return 0, ErrGraphNoEdgesFound
×
1939
        }
×
1940
        chanIndex := edges.NestedReadBucket(channelPointBucket)
1✔
1941
        if chanIndex == nil {
1✔
1942
                return 0, ErrGraphNoEdgesFound
×
1943
        }
×
1944

1945
        chanIDBytes := chanIndex.Get(b.Bytes())
1✔
1946
        if chanIDBytes == nil {
1✔
1947
                return 0, ErrEdgeNotFound
×
1948
        }
×
1949

1950
        chanID := byteOrder.Uint64(chanIDBytes)
1✔
1951

1✔
1952
        return chanID, nil
1✔
1953
}
1954

1955
// TODO(roasbeef): allow updates to use Batch?
1956

1957
// HighestChanID returns the "highest" known channel ID in the channel graph.
1958
// This represents the "newest" channel from the PoV of the chain. This method
1959
// can be used by peers to quickly determine if they're graphs are in sync.
1960
func (c *KVStore) HighestChanID(_ context.Context) (uint64, error) {
3✔
1961
        var cid uint64
3✔
1962

3✔
1963
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
6✔
1964
                edges := tx.ReadBucket(edgeBucket)
3✔
1965
                if edges == nil {
3✔
1966
                        return ErrGraphNoEdgesFound
×
1967
                }
×
1968
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
3✔
1969
                if edgeIndex == nil {
3✔
1970
                        return ErrGraphNoEdgesFound
×
1971
                }
×
1972

1973
                // In order to find the highest chan ID, we'll fetch a cursor
1974
                // and use that to seek to the "end" of our known rage.
1975
                cidCursor := edgeIndex.ReadCursor()
3✔
1976

3✔
1977
                lastChanID, _ := cidCursor.Last()
3✔
1978

3✔
1979
                // If there's no key, then this means that we don't actually
3✔
1980
                // know of any channels, so we'll return a predicable error.
3✔
1981
                if lastChanID == nil {
4✔
1982
                        return ErrGraphNoEdgesFound
1✔
1983
                }
1✔
1984

1985
                // Otherwise, we'll de serialize the channel ID and return it
1986
                // to the caller.
1987
                cid = byteOrder.Uint64(lastChanID)
2✔
1988

2✔
1989
                return nil
2✔
1990
        }, func() {
3✔
1991
                cid = 0
3✔
1992
        })
3✔
1993
        if err != nil && !errors.Is(err, ErrGraphNoEdgesFound) {
3✔
1994
                return 0, err
×
1995
        }
×
1996

1997
        return cid, nil
3✔
1998
}
1999

2000
// ChannelEdge represents the complete set of information for a channel edge in
2001
// the known channel graph. This struct couples the core information of the
2002
// edge as well as each of the known advertised edge policies.
2003
type ChannelEdge struct {
2004
        // Info contains all the static information describing the channel.
2005
        Info *models.ChannelEdgeInfo
2006

2007
        // Policy1 points to the "first" edge policy of the channel containing
2008
        // the dynamic information required to properly route through the edge.
2009
        Policy1 *models.ChannelEdgePolicy
2010

2011
        // Policy2 points to the "second" edge policy of the channel containing
2012
        // the dynamic information required to properly route through the edge.
2013
        Policy2 *models.ChannelEdgePolicy
2014

2015
        // Node1 is "node 1" in the channel. This is the node that would have
2016
        // produced Policy1 if it exists.
2017
        Node1 *models.LightningNode
2018

2019
        // Node2 is "node 2" in the channel. This is the node that would have
2020
        // produced Policy2 if it exists.
2021
        Node2 *models.LightningNode
2022
}
2023

2024
// ChanUpdatesInHorizon returns all the known channel edges which have at least
2025
// one edge that has an update timestamp within the specified horizon.
2026
func (c *KVStore) ChanUpdatesInHorizon(startTime,
2027
        endTime time.Time) ([]ChannelEdge, error) {
145✔
2028

145✔
2029
        // To ensure we don't return duplicate ChannelEdges, we'll use an
145✔
2030
        // additional map to keep track of the edges already seen to prevent
145✔
2031
        // re-adding it.
145✔
2032
        var edgesSeen map[uint64]struct{}
145✔
2033
        var edgesToCache map[uint64]ChannelEdge
145✔
2034
        var edgesInHorizon []ChannelEdge
145✔
2035

145✔
2036
        c.cacheMu.Lock()
145✔
2037
        defer c.cacheMu.Unlock()
145✔
2038

145✔
2039
        var hits int
145✔
2040
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
290✔
2041
                edges := tx.ReadBucket(edgeBucket)
145✔
2042
                if edges == nil {
145✔
2043
                        return ErrGraphNoEdgesFound
×
2044
                }
×
2045
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
145✔
2046
                if edgeIndex == nil {
145✔
2047
                        return ErrGraphNoEdgesFound
×
2048
                }
×
2049
                edgeUpdateIndex := edges.NestedReadBucket(edgeUpdateIndexBucket)
145✔
2050
                if edgeUpdateIndex == nil {
145✔
2051
                        return ErrGraphNoEdgesFound
×
2052
                }
×
2053

2054
                nodes := tx.ReadBucket(nodeBucket)
145✔
2055
                if nodes == nil {
145✔
2056
                        return ErrGraphNodesNotFound
×
2057
                }
×
2058

2059
                // We'll now obtain a cursor to perform a range query within
2060
                // the index to find all channels within the horizon.
2061
                updateCursor := edgeUpdateIndex.ReadCursor()
145✔
2062

145✔
2063
                var startTimeBytes, endTimeBytes [8 + 8]byte
145✔
2064
                byteOrder.PutUint64(
145✔
2065
                        startTimeBytes[:8], uint64(startTime.Unix()),
145✔
2066
                )
145✔
2067
                byteOrder.PutUint64(
145✔
2068
                        endTimeBytes[:8], uint64(endTime.Unix()),
145✔
2069
                )
145✔
2070

145✔
2071
                // With our start and end times constructed, we'll step through
145✔
2072
                // the index collecting the info and policy of each update of
145✔
2073
                // each channel that has a last update within the time range.
145✔
2074
                //
145✔
2075
                //nolint:ll
145✔
2076
                for indexKey, _ := updateCursor.Seek(startTimeBytes[:]); indexKey != nil &&
145✔
2077
                        bytes.Compare(indexKey, endTimeBytes[:]) <= 0; indexKey, _ = updateCursor.Next() {
191✔
2078
                        // We have a new eligible entry, so we'll slice of the
46✔
2079
                        // chan ID so we can query it in the DB.
46✔
2080
                        chanID := indexKey[8:]
46✔
2081

46✔
2082
                        // If we've already retrieved the info and policies for
46✔
2083
                        // this edge, then we can skip it as we don't need to do
46✔
2084
                        // so again.
46✔
2085
                        chanIDInt := byteOrder.Uint64(chanID)
46✔
2086
                        if _, ok := edgesSeen[chanIDInt]; ok {
65✔
2087
                                continue
19✔
2088
                        }
2089

2090
                        if channel, ok := c.chanCache.get(chanIDInt); ok {
36✔
2091
                                hits++
9✔
2092
                                edgesSeen[chanIDInt] = struct{}{}
9✔
2093
                                edgesInHorizon = append(edgesInHorizon, channel)
9✔
2094

9✔
2095
                                continue
9✔
2096
                        }
2097

2098
                        // First, we'll fetch the static edge information.
2099
                        edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
18✔
2100
                        if err != nil {
18✔
2101
                                chanID := byteOrder.Uint64(chanID)
×
2102
                                return fmt.Errorf("unable to fetch info for "+
×
2103
                                        "edge with chan_id=%v: %v", chanID, err)
×
2104
                        }
×
2105

2106
                        // With the static information obtained, we'll now
2107
                        // fetch the dynamic policy info.
2108
                        edge1, edge2, err := fetchChanEdgePolicies(
18✔
2109
                                edgeIndex, edges, chanID,
18✔
2110
                        )
18✔
2111
                        if err != nil {
18✔
2112
                                chanID := byteOrder.Uint64(chanID)
×
2113
                                return fmt.Errorf("unable to fetch policies "+
×
2114
                                        "for edge with chan_id=%v: %v", chanID,
×
2115
                                        err)
×
2116
                        }
×
2117

2118
                        node1, err := fetchLightningNode(
18✔
2119
                                nodes, edgeInfo.NodeKey1Bytes[:],
18✔
2120
                        )
18✔
2121
                        if err != nil {
18✔
2122
                                return err
×
2123
                        }
×
2124

2125
                        node2, err := fetchLightningNode(
18✔
2126
                                nodes, edgeInfo.NodeKey2Bytes[:],
18✔
2127
                        )
18✔
2128
                        if err != nil {
18✔
2129
                                return err
×
2130
                        }
×
2131

2132
                        // Finally, we'll collate this edge with the rest of
2133
                        // edges to be returned.
2134
                        edgesSeen[chanIDInt] = struct{}{}
18✔
2135
                        channel := ChannelEdge{
18✔
2136
                                Info:    &edgeInfo,
18✔
2137
                                Policy1: edge1,
18✔
2138
                                Policy2: edge2,
18✔
2139
                                Node1:   &node1,
18✔
2140
                                Node2:   &node2,
18✔
2141
                        }
18✔
2142
                        edgesInHorizon = append(edgesInHorizon, channel)
18✔
2143
                        edgesToCache[chanIDInt] = channel
18✔
2144
                }
2145

2146
                return nil
145✔
2147
        }, func() {
145✔
2148
                edgesSeen = make(map[uint64]struct{})
145✔
2149
                edgesToCache = make(map[uint64]ChannelEdge)
145✔
2150
                edgesInHorizon = nil
145✔
2151
        })
145✔
2152
        switch {
145✔
2153
        case errors.Is(err, ErrGraphNoEdgesFound):
×
2154
                fallthrough
×
2155
        case errors.Is(err, ErrGraphNodesNotFound):
×
2156
                break
×
2157

2158
        case err != nil:
×
2159
                return nil, err
×
2160
        }
2161

2162
        // Insert any edges loaded from disk into the cache.
2163
        for chanid, channel := range edgesToCache {
163✔
2164
                c.chanCache.insert(chanid, channel)
18✔
2165
        }
18✔
2166

2167
        if len(edgesInHorizon) > 0 {
150✔
2168
                log.Debugf("ChanUpdatesInHorizon hit percentage: %.2f (%d/%d)",
5✔
2169
                        float64(hits)*100/float64(len(edgesInHorizon)), hits,
5✔
2170
                        len(edgesInHorizon))
5✔
2171
        } else {
145✔
2172
                log.Debugf("ChanUpdatesInHorizon returned no edges in "+
140✔
2173
                        "horizon (%s, %s)", startTime, endTime)
140✔
2174
        }
140✔
2175

2176
        return edgesInHorizon, nil
145✔
2177
}
2178

2179
// NodeUpdatesInHorizon returns all the known lightning node which have an
2180
// update timestamp within the passed range. This method can be used by two
2181
// nodes to quickly determine if they have the same set of up to date node
2182
// announcements.
2183
func (c *KVStore) NodeUpdatesInHorizon(startTime,
2184
        endTime time.Time) ([]models.LightningNode, error) {
8✔
2185

8✔
2186
        var nodesInHorizon []models.LightningNode
8✔
2187

8✔
2188
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
16✔
2189
                nodes := tx.ReadBucket(nodeBucket)
8✔
2190
                if nodes == nil {
8✔
2191
                        return ErrGraphNodesNotFound
×
2192
                }
×
2193

2194
                nodeUpdateIndex := nodes.NestedReadBucket(nodeUpdateIndexBucket)
8✔
2195
                if nodeUpdateIndex == nil {
8✔
2196
                        return ErrGraphNodesNotFound
×
2197
                }
×
2198

2199
                // We'll now obtain a cursor to perform a range query within
2200
                // the index to find all node announcements within the horizon.
2201
                updateCursor := nodeUpdateIndex.ReadCursor()
8✔
2202

8✔
2203
                var startTimeBytes, endTimeBytes [8 + 33]byte
8✔
2204
                byteOrder.PutUint64(
8✔
2205
                        startTimeBytes[:8], uint64(startTime.Unix()),
8✔
2206
                )
8✔
2207
                byteOrder.PutUint64(
8✔
2208
                        endTimeBytes[:8], uint64(endTime.Unix()),
8✔
2209
                )
8✔
2210

8✔
2211
                // With our start and end times constructed, we'll step through
8✔
2212
                // the index collecting info for each node within the time
8✔
2213
                // range.
8✔
2214
                //
8✔
2215
                //nolint:ll
8✔
2216
                for indexKey, _ := updateCursor.Seek(startTimeBytes[:]); indexKey != nil &&
8✔
2217
                        bytes.Compare(indexKey, endTimeBytes[:]) <= 0; indexKey, _ = updateCursor.Next() {
37✔
2218
                        nodePub := indexKey[8:]
29✔
2219
                        node, err := fetchLightningNode(nodes, nodePub)
29✔
2220
                        if err != nil {
29✔
2221
                                return err
×
2222
                        }
×
2223

2224
                        nodesInHorizon = append(nodesInHorizon, node)
29✔
2225
                }
2226

2227
                return nil
8✔
2228
        }, func() {
8✔
2229
                nodesInHorizon = nil
8✔
2230
        })
8✔
2231
        switch {
8✔
2232
        case errors.Is(err, ErrGraphNoEdgesFound):
×
2233
                fallthrough
×
2234
        case errors.Is(err, ErrGraphNodesNotFound):
×
2235
                break
×
2236

2237
        case err != nil:
×
2238
                return nil, err
×
2239
        }
2240

2241
        return nodesInHorizon, nil
8✔
2242
}
2243

2244
// FilterKnownChanIDs takes a set of channel IDs and return the subset of chan
2245
// ID's that we don't know and are not known zombies of the passed set. In other
2246
// words, we perform a set difference of our set of chan ID's and the ones
2247
// passed in. This method can be used by callers to determine the set of
2248
// channels another peer knows of that we don't. The ChannelUpdateInfos for the
2249
// known zombies is also returned.
2250
func (c *KVStore) FilterKnownChanIDs(chansInfo []ChannelUpdateInfo) ([]uint64,
2251
        []ChannelUpdateInfo, error) {
128✔
2252

128✔
2253
        var (
128✔
2254
                newChanIDs   []uint64
128✔
2255
                knownZombies []ChannelUpdateInfo
128✔
2256
        )
128✔
2257

128✔
2258
        c.cacheMu.Lock()
128✔
2259
        defer c.cacheMu.Unlock()
128✔
2260

128✔
2261
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
256✔
2262
                edges := tx.ReadBucket(edgeBucket)
128✔
2263
                if edges == nil {
128✔
2264
                        return ErrGraphNoEdgesFound
×
2265
                }
×
2266
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
128✔
2267
                if edgeIndex == nil {
128✔
2268
                        return ErrGraphNoEdgesFound
×
2269
                }
×
2270

2271
                // Fetch the zombie index, it may not exist if no edges have
2272
                // ever been marked as zombies. If the index has been
2273
                // initialized, we will use it later to skip known zombie edges.
2274
                zombieIndex := edges.NestedReadBucket(zombieBucket)
128✔
2275

128✔
2276
                // We'll run through the set of chanIDs and collate only the
128✔
2277
                // set of channel that are unable to be found within our db.
128✔
2278
                var cidBytes [8]byte
128✔
2279
                for _, info := range chansInfo {
238✔
2280
                        scid := info.ShortChannelID.ToUint64()
110✔
2281
                        byteOrder.PutUint64(cidBytes[:], scid)
110✔
2282

110✔
2283
                        // If the edge is already known, skip it.
110✔
2284
                        if v := edgeIndex.Get(cidBytes[:]); v != nil {
131✔
2285
                                continue
21✔
2286
                        }
2287

2288
                        // If the edge is a known zombie, skip it.
2289
                        if zombieIndex != nil {
178✔
2290
                                isZombie, _, _ := isZombieEdge(
89✔
2291
                                        zombieIndex, scid,
89✔
2292
                                )
89✔
2293

89✔
2294
                                if isZombie {
135✔
2295
                                        knownZombies = append(
46✔
2296
                                                knownZombies, info,
46✔
2297
                                        )
46✔
2298

46✔
2299
                                        continue
46✔
2300
                                }
2301
                        }
2302

2303
                        newChanIDs = append(newChanIDs, scid)
43✔
2304
                }
2305

2306
                return nil
128✔
2307
        }, func() {
128✔
2308
                newChanIDs = nil
128✔
2309
                knownZombies = nil
128✔
2310
        })
128✔
2311
        switch {
128✔
2312
        // If we don't know of any edges yet, then we'll return the entire set
2313
        // of chan IDs specified.
2314
        case errors.Is(err, ErrGraphNoEdgesFound):
×
2315
                ogChanIDs := make([]uint64, len(chansInfo))
×
2316
                for i, info := range chansInfo {
×
2317
                        ogChanIDs[i] = info.ShortChannelID.ToUint64()
×
2318
                }
×
2319

2320
                return ogChanIDs, nil, nil
×
2321

2322
        case err != nil:
×
2323
                return nil, nil, err
×
2324
        }
2325

2326
        return newChanIDs, knownZombies, nil
128✔
2327
}
2328

2329
// ChannelUpdateInfo couples the SCID of a channel with the timestamps of the
2330
// latest received channel updates for the channel.
2331
type ChannelUpdateInfo struct {
2332
        // ShortChannelID is the SCID identifier of the channel.
2333
        ShortChannelID lnwire.ShortChannelID
2334

2335
        // Node1UpdateTimestamp is the timestamp of the latest received update
2336
        // from the node 1 channel peer. This will be set to zero time if no
2337
        // update has yet been received from this node.
2338
        Node1UpdateTimestamp time.Time
2339

2340
        // Node2UpdateTimestamp is the timestamp of the latest received update
2341
        // from the node 2 channel peer. This will be set to zero time if no
2342
        // update has yet been received from this node.
2343
        Node2UpdateTimestamp time.Time
2344
}
2345

2346
// NewChannelUpdateInfo is a constructor which makes sure we initialize the
2347
// timestamps with zero seconds unix timestamp which equals
2348
// `January 1, 1970, 00:00:00 UTC` in case the value is `time.Time{}`.
2349
func NewChannelUpdateInfo(scid lnwire.ShortChannelID, node1Timestamp,
2350
        node2Timestamp time.Time) ChannelUpdateInfo {
196✔
2351

196✔
2352
        chanInfo := ChannelUpdateInfo{
196✔
2353
                ShortChannelID:       scid,
196✔
2354
                Node1UpdateTimestamp: node1Timestamp,
196✔
2355
                Node2UpdateTimestamp: node2Timestamp,
196✔
2356
        }
196✔
2357

196✔
2358
        if node1Timestamp.IsZero() {
382✔
2359
                chanInfo.Node1UpdateTimestamp = time.Unix(0, 0)
186✔
2360
        }
186✔
2361

2362
        if node2Timestamp.IsZero() {
382✔
2363
                chanInfo.Node2UpdateTimestamp = time.Unix(0, 0)
186✔
2364
        }
186✔
2365

2366
        return chanInfo
196✔
2367
}
2368

2369
// BlockChannelRange represents a range of channels for a given block height.
2370
type BlockChannelRange struct {
2371
        // Height is the height of the block all of the channels below were
2372
        // included in.
2373
        Height uint32
2374

2375
        // Channels is the list of channels identified by their short ID
2376
        // representation known to us that were included in the block height
2377
        // above. The list may include channel update timestamp information if
2378
        // requested.
2379
        Channels []ChannelUpdateInfo
2380
}
2381

2382
// FilterChannelRange returns the channel ID's of all known channels which were
2383
// mined in a block height within the passed range. The channel IDs are grouped
2384
// by their common block height. This method can be used to quickly share with a
2385
// peer the set of channels we know of within a particular range to catch them
2386
// up after a period of time offline. If withTimestamps is true then the
2387
// timestamp info of the latest received channel update messages of the channel
2388
// will be included in the response.
2389
func (c *KVStore) FilterChannelRange(startHeight,
2390
        endHeight uint32, withTimestamps bool) ([]BlockChannelRange, error) {
11✔
2391

11✔
2392
        startChanID := &lnwire.ShortChannelID{
11✔
2393
                BlockHeight: startHeight,
11✔
2394
        }
11✔
2395

11✔
2396
        endChanID := lnwire.ShortChannelID{
11✔
2397
                BlockHeight: endHeight,
11✔
2398
                TxIndex:     math.MaxUint32 & 0x00ffffff,
11✔
2399
                TxPosition:  math.MaxUint16,
11✔
2400
        }
11✔
2401

11✔
2402
        // As we need to perform a range scan, we'll convert the starting and
11✔
2403
        // ending height to their corresponding values when encoded using short
11✔
2404
        // channel ID's.
11✔
2405
        var chanIDStart, chanIDEnd [8]byte
11✔
2406
        byteOrder.PutUint64(chanIDStart[:], startChanID.ToUint64())
11✔
2407
        byteOrder.PutUint64(chanIDEnd[:], endChanID.ToUint64())
11✔
2408

11✔
2409
        var channelsPerBlock map[uint32][]ChannelUpdateInfo
11✔
2410
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
22✔
2411
                edges := tx.ReadBucket(edgeBucket)
11✔
2412
                if edges == nil {
11✔
2413
                        return ErrGraphNoEdgesFound
×
2414
                }
×
2415
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
11✔
2416
                if edgeIndex == nil {
11✔
2417
                        return ErrGraphNoEdgesFound
×
2418
                }
×
2419

2420
                cursor := edgeIndex.ReadCursor()
11✔
2421

11✔
2422
                // We'll now iterate through the database, and find each
11✔
2423
                // channel ID that resides within the specified range.
11✔
2424
                //
11✔
2425
                //nolint:ll
11✔
2426
                for k, v := cursor.Seek(chanIDStart[:]); k != nil &&
11✔
2427
                        bytes.Compare(k, chanIDEnd[:]) <= 0; k, v = cursor.Next() {
55✔
2428
                        // Don't send alias SCIDs during gossip sync.
44✔
2429
                        edgeReader := bytes.NewReader(v)
44✔
2430
                        edgeInfo, err := deserializeChanEdgeInfo(edgeReader)
44✔
2431
                        if err != nil {
44✔
2432
                                return err
×
2433
                        }
×
2434

2435
                        if edgeInfo.AuthProof == nil {
44✔
2436
                                continue
×
2437
                        }
2438

2439
                        // This channel ID rests within the target range, so
2440
                        // we'll add it to our returned set.
2441
                        rawCid := byteOrder.Uint64(k)
44✔
2442
                        cid := lnwire.NewShortChanIDFromInt(rawCid)
44✔
2443

44✔
2444
                        chanInfo := NewChannelUpdateInfo(
44✔
2445
                                cid, time.Time{}, time.Time{},
44✔
2446
                        )
44✔
2447

44✔
2448
                        if !withTimestamps {
66✔
2449
                                channelsPerBlock[cid.BlockHeight] = append(
22✔
2450
                                        channelsPerBlock[cid.BlockHeight],
22✔
2451
                                        chanInfo,
22✔
2452
                                )
22✔
2453

22✔
2454
                                continue
22✔
2455
                        }
2456

2457
                        node1Key, node2Key := computeEdgePolicyKeys(&edgeInfo)
22✔
2458

22✔
2459
                        rawPolicy := edges.Get(node1Key)
22✔
2460
                        if len(rawPolicy) != 0 {
28✔
2461
                                r := bytes.NewReader(rawPolicy)
6✔
2462

6✔
2463
                                edge, err := deserializeChanEdgePolicyRaw(r)
6✔
2464
                                if err != nil && !errors.Is(
6✔
2465
                                        err, ErrEdgePolicyOptionalFieldNotFound,
6✔
2466
                                ) && !errors.Is(err, ErrParsingExtraTLVBytes) {
6✔
2467

×
2468
                                        return err
×
2469
                                }
×
2470

2471
                                chanInfo.Node1UpdateTimestamp = edge.LastUpdate
6✔
2472
                        }
2473

2474
                        rawPolicy = edges.Get(node2Key)
22✔
2475
                        if len(rawPolicy) != 0 {
33✔
2476
                                r := bytes.NewReader(rawPolicy)
11✔
2477

11✔
2478
                                edge, err := deserializeChanEdgePolicyRaw(r)
11✔
2479
                                if err != nil && !errors.Is(
11✔
2480
                                        err, ErrEdgePolicyOptionalFieldNotFound,
11✔
2481
                                ) && !errors.Is(err, ErrParsingExtraTLVBytes) {
11✔
2482

×
2483
                                        return err
×
2484
                                }
×
2485

2486
                                chanInfo.Node2UpdateTimestamp = edge.LastUpdate
11✔
2487
                        }
2488

2489
                        channelsPerBlock[cid.BlockHeight] = append(
22✔
2490
                                channelsPerBlock[cid.BlockHeight], chanInfo,
22✔
2491
                        )
22✔
2492
                }
2493

2494
                return nil
11✔
2495
        }, func() {
11✔
2496
                channelsPerBlock = make(map[uint32][]ChannelUpdateInfo)
11✔
2497
        })
11✔
2498

2499
        switch {
11✔
2500
        // If we don't know of any channels yet, then there's nothing to
2501
        // filter, so we'll return an empty slice.
2502
        case errors.Is(err, ErrGraphNoEdgesFound) || len(channelsPerBlock) == 0:
3✔
2503
                return nil, nil
3✔
2504

2505
        case err != nil:
×
2506
                return nil, err
×
2507
        }
2508

2509
        // Return the channel ranges in ascending block height order.
2510
        blocks := make([]uint32, 0, len(channelsPerBlock))
8✔
2511
        for block := range channelsPerBlock {
30✔
2512
                blocks = append(blocks, block)
22✔
2513
        }
22✔
2514
        sort.Slice(blocks, func(i, j int) bool {
35✔
2515
                return blocks[i] < blocks[j]
27✔
2516
        })
27✔
2517

2518
        channelRanges := make([]BlockChannelRange, 0, len(channelsPerBlock))
8✔
2519
        for _, block := range blocks {
30✔
2520
                channelRanges = append(channelRanges, BlockChannelRange{
22✔
2521
                        Height:   block,
22✔
2522
                        Channels: channelsPerBlock[block],
22✔
2523
                })
22✔
2524
        }
22✔
2525

2526
        return channelRanges, nil
8✔
2527
}
2528

2529
// FetchChanInfos returns the set of channel edges that correspond to the passed
2530
// channel ID's. If an edge is the query is unknown to the database, it will
2531
// skipped and the result will contain only those edges that exist at the time
2532
// of the query. This can be used to respond to peer queries that are seeking to
2533
// fill in gaps in their view of the channel graph.
2534
func (c *KVStore) FetchChanInfos(chanIDs []uint64) ([]ChannelEdge, error) {
4✔
2535
        return c.fetchChanInfos(nil, chanIDs)
4✔
2536
}
4✔
2537

2538
// fetchChanInfos returns the set of channel edges that correspond to the passed
2539
// channel ID's. If an edge is the query is unknown to the database, it will
2540
// skipped and the result will contain only those edges that exist at the time
2541
// of the query. This can be used to respond to peer queries that are seeking to
2542
// fill in gaps in their view of the channel graph.
2543
//
2544
// NOTE: An optional transaction may be provided. If none is provided, then a
2545
// new one will be created.
2546
func (c *KVStore) fetchChanInfos(tx kvdb.RTx, chanIDs []uint64) (
2547
        []ChannelEdge, error) {
4✔
2548
        // TODO(roasbeef): sort cids?
4✔
2549

4✔
2550
        var (
4✔
2551
                chanEdges []ChannelEdge
4✔
2552
                cidBytes  [8]byte
4✔
2553
        )
4✔
2554

4✔
2555
        fetchChanInfos := func(tx kvdb.RTx) error {
8✔
2556
                edges := tx.ReadBucket(edgeBucket)
4✔
2557
                if edges == nil {
4✔
2558
                        return ErrGraphNoEdgesFound
×
2559
                }
×
2560
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
4✔
2561
                if edgeIndex == nil {
4✔
2562
                        return ErrGraphNoEdgesFound
×
2563
                }
×
2564
                nodes := tx.ReadBucket(nodeBucket)
4✔
2565
                if nodes == nil {
4✔
2566
                        return ErrGraphNotFound
×
2567
                }
×
2568

2569
                for _, cid := range chanIDs {
15✔
2570
                        byteOrder.PutUint64(cidBytes[:], cid)
11✔
2571

11✔
2572
                        // First, we'll fetch the static edge information. If
11✔
2573
                        // the edge is unknown, we will skip the edge and
11✔
2574
                        // continue gathering all known edges.
11✔
2575
                        edgeInfo, err := fetchChanEdgeInfo(
11✔
2576
                                edgeIndex, cidBytes[:],
11✔
2577
                        )
11✔
2578
                        switch {
11✔
2579
                        case errors.Is(err, ErrEdgeNotFound):
3✔
2580
                                continue
3✔
2581
                        case err != nil:
×
2582
                                return err
×
2583
                        }
2584

2585
                        // With the static information obtained, we'll now
2586
                        // fetch the dynamic policy info.
2587
                        edge1, edge2, err := fetchChanEdgePolicies(
8✔
2588
                                edgeIndex, edges, cidBytes[:],
8✔
2589
                        )
8✔
2590
                        if err != nil {
8✔
2591
                                return err
×
2592
                        }
×
2593

2594
                        node1, err := fetchLightningNode(
8✔
2595
                                nodes, edgeInfo.NodeKey1Bytes[:],
8✔
2596
                        )
8✔
2597
                        if err != nil {
8✔
2598
                                return err
×
2599
                        }
×
2600

2601
                        node2, err := fetchLightningNode(
8✔
2602
                                nodes, edgeInfo.NodeKey2Bytes[:],
8✔
2603
                        )
8✔
2604
                        if err != nil {
8✔
2605
                                return err
×
2606
                        }
×
2607

2608
                        chanEdges = append(chanEdges, ChannelEdge{
8✔
2609
                                Info:    &edgeInfo,
8✔
2610
                                Policy1: edge1,
8✔
2611
                                Policy2: edge2,
8✔
2612
                                Node1:   &node1,
8✔
2613
                                Node2:   &node2,
8✔
2614
                        })
8✔
2615
                }
2616

2617
                return nil
4✔
2618
        }
2619

2620
        if tx == nil {
8✔
2621
                err := kvdb.View(c.db, fetchChanInfos, func() {
8✔
2622
                        chanEdges = nil
4✔
2623
                })
4✔
2624
                if err != nil {
4✔
2625
                        return nil, err
×
2626
                }
×
2627

2628
                return chanEdges, nil
4✔
2629
        }
2630

2631
        err := fetchChanInfos(tx)
×
2632
        if err != nil {
×
2633
                return nil, err
×
2634
        }
×
2635

2636
        return chanEdges, nil
×
2637
}
2638

2639
func delEdgeUpdateIndexEntry(edgesBucket kvdb.RwBucket, chanID uint64,
2640
        edge1, edge2 *models.ChannelEdgePolicy) error {
139✔
2641

139✔
2642
        // First, we'll fetch the edge update index bucket which currently
139✔
2643
        // stores an entry for the channel we're about to delete.
139✔
2644
        updateIndex := edgesBucket.NestedReadWriteBucket(edgeUpdateIndexBucket)
139✔
2645
        if updateIndex == nil {
139✔
2646
                // No edges in bucket, return early.
×
2647
                return nil
×
2648
        }
×
2649

2650
        // Now that we have the bucket, we'll attempt to construct a template
2651
        // for the index key: updateTime || chanid.
2652
        var indexKey [8 + 8]byte
139✔
2653
        byteOrder.PutUint64(indexKey[8:], chanID)
139✔
2654

139✔
2655
        // With the template constructed, we'll attempt to delete an entry that
139✔
2656
        // would have been created by both edges: we'll alternate the update
139✔
2657
        // times, as one may had overridden the other.
139✔
2658
        if edge1 != nil {
149✔
2659
                byteOrder.PutUint64(
10✔
2660
                        indexKey[:8], uint64(edge1.LastUpdate.Unix()),
10✔
2661
                )
10✔
2662
                if err := updateIndex.Delete(indexKey[:]); err != nil {
10✔
2663
                        return err
×
2664
                }
×
2665
        }
2666

2667
        // We'll also attempt to delete the entry that may have been created by
2668
        // the second edge.
2669
        if edge2 != nil {
151✔
2670
                byteOrder.PutUint64(
12✔
2671
                        indexKey[:8], uint64(edge2.LastUpdate.Unix()),
12✔
2672
                )
12✔
2673
                if err := updateIndex.Delete(indexKey[:]); err != nil {
12✔
2674
                        return err
×
2675
                }
×
2676
        }
2677

2678
        return nil
139✔
2679
}
2680

2681
// delChannelEdgeUnsafe deletes the edge with the given chanID from the graph
2682
// cache. It then goes on to delete any policy info and edge info for this
2683
// channel from the DB and finally, if isZombie is true, it will add an entry
2684
// for this channel in the zombie index.
2685
//
2686
// NOTE: this method MUST only be called if the cacheMu has already been
2687
// acquired.
2688
func (c *KVStore) delChannelEdgeUnsafe(edges, edgeIndex, chanIndex,
2689
        zombieIndex kvdb.RwBucket, chanID []byte, isZombie,
2690
        strictZombie bool) (*models.ChannelEdgeInfo, error) {
197✔
2691

197✔
2692
        edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
197✔
2693
        if err != nil {
255✔
2694
                return nil, err
58✔
2695
        }
58✔
2696

2697
        // We'll also remove the entry in the edge update index bucket before
2698
        // we delete the edges themselves so we can access their last update
2699
        // times.
2700
        cid := byteOrder.Uint64(chanID)
139✔
2701
        edge1, edge2, err := fetchChanEdgePolicies(edgeIndex, edges, chanID)
139✔
2702
        if err != nil {
139✔
2703
                return nil, err
×
2704
        }
×
2705
        err = delEdgeUpdateIndexEntry(edges, cid, edge1, edge2)
139✔
2706
        if err != nil {
139✔
2707
                return nil, err
×
2708
        }
×
2709

2710
        // The edge key is of the format pubKey || chanID. First we construct
2711
        // the latter half, populating the channel ID.
2712
        var edgeKey [33 + 8]byte
139✔
2713
        copy(edgeKey[33:], chanID)
139✔
2714

139✔
2715
        // With the latter half constructed, copy over the first public key to
139✔
2716
        // delete the edge in this direction, then the second to delete the
139✔
2717
        // edge in the opposite direction.
139✔
2718
        copy(edgeKey[:33], edgeInfo.NodeKey1Bytes[:])
139✔
2719
        if edges.Get(edgeKey[:]) != nil {
278✔
2720
                if err := edges.Delete(edgeKey[:]); err != nil {
139✔
2721
                        return nil, err
×
2722
                }
×
2723
        }
2724
        copy(edgeKey[:33], edgeInfo.NodeKey2Bytes[:])
139✔
2725
        if edges.Get(edgeKey[:]) != nil {
278✔
2726
                if err := edges.Delete(edgeKey[:]); err != nil {
139✔
2727
                        return nil, err
×
2728
                }
×
2729
        }
2730

2731
        // As part of deleting the edge we also remove all disabled entries
2732
        // from the edgePolicyDisabledIndex bucket. We do that for both
2733
        // directions.
2734
        err = updateEdgePolicyDisabledIndex(edges, cid, false, false)
139✔
2735
        if err != nil {
139✔
2736
                return nil, err
×
2737
        }
×
2738
        err = updateEdgePolicyDisabledIndex(edges, cid, true, false)
139✔
2739
        if err != nil {
139✔
2740
                return nil, err
×
2741
        }
×
2742

2743
        // With the edge data deleted, we can purge the information from the two
2744
        // edge indexes.
2745
        if err := edgeIndex.Delete(chanID); err != nil {
139✔
2746
                return nil, err
×
2747
        }
×
2748
        var b bytes.Buffer
139✔
2749
        if err := WriteOutpoint(&b, &edgeInfo.ChannelPoint); err != nil {
139✔
2750
                return nil, err
×
2751
        }
×
2752
        if err := chanIndex.Delete(b.Bytes()); err != nil {
139✔
2753
                return nil, err
×
2754
        }
×
2755

2756
        // Finally, we'll mark the edge as a zombie within our index if it's
2757
        // being removed due to the channel becoming a zombie. We do this to
2758
        // ensure we don't store unnecessary data for spent channels.
2759
        if !isZombie {
255✔
2760
                return &edgeInfo, nil
116✔
2761
        }
116✔
2762

2763
        nodeKey1, nodeKey2 := edgeInfo.NodeKey1Bytes, edgeInfo.NodeKey2Bytes
23✔
2764
        if strictZombie {
26✔
2765
                var e1UpdateTime, e2UpdateTime *time.Time
3✔
2766
                if edge1 != nil {
5✔
2767
                        e1UpdateTime = &edge1.LastUpdate
2✔
2768
                }
2✔
2769
                if edge2 != nil {
6✔
2770
                        e2UpdateTime = &edge2.LastUpdate
3✔
2771
                }
3✔
2772

2773
                nodeKey1, nodeKey2 = makeZombiePubkeys(
3✔
2774
                        &edgeInfo, e1UpdateTime, e2UpdateTime,
3✔
2775
                )
3✔
2776
        }
2777

2778
        return &edgeInfo, markEdgeZombie(
23✔
2779
                zombieIndex, byteOrder.Uint64(chanID), nodeKey1, nodeKey2,
23✔
2780
        )
23✔
2781
}
2782

2783
// makeZombiePubkeys derives the node pubkeys to store in the zombie index for a
2784
// particular pair of channel policies. The return values are one of:
2785
//  1. (pubkey1, pubkey2)
2786
//  2. (pubkey1, blank)
2787
//  3. (blank, pubkey2)
2788
//
2789
// A blank pubkey means that corresponding node will be unable to resurrect a
2790
// channel on its own. For example, node1 may continue to publish recent
2791
// updates, but node2 has fallen way behind. After marking an edge as a zombie,
2792
// we don't want another fresh update from node1 to resurrect, as the edge can
2793
// only become live once node2 finally sends something recent.
2794
//
2795
// In the case where we have neither update, we allow either party to resurrect
2796
// the channel. If the channel were to be marked zombie again, it would be
2797
// marked with the correct lagging channel since we received an update from only
2798
// one side.
2799
func makeZombiePubkeys(info *models.ChannelEdgeInfo,
2800
        e1, e2 *time.Time) ([33]byte, [33]byte) {
3✔
2801

3✔
2802
        switch {
3✔
2803
        // If we don't have either edge policy, we'll return both pubkeys so
2804
        // that the channel can be resurrected by either party.
2805
        case e1 == nil && e2 == nil:
×
2806
                return info.NodeKey1Bytes, info.NodeKey2Bytes
×
2807

2808
        // If we're missing edge1, or if both edges are present but edge1 is
2809
        // older, we'll return edge1's pubkey and a blank pubkey for edge2. This
2810
        // means that only an update from edge1 will be able to resurrect the
2811
        // channel.
2812
        case e1 == nil || (e2 != nil && e1.Before(*e2)):
1✔
2813
                return info.NodeKey1Bytes, [33]byte{}
1✔
2814

2815
        // Otherwise, we're missing edge2 or edge2 is the older side, so we
2816
        // return a blank pubkey for edge1. In this case, only an update from
2817
        // edge2 can resurect the channel.
2818
        default:
2✔
2819
                return [33]byte{}, info.NodeKey2Bytes
2✔
2820
        }
2821
}
2822

2823
// UpdateEdgePolicy updates the edge routing policy for a single directed edge
2824
// within the database for the referenced channel. The `flags` attribute within
2825
// the ChannelEdgePolicy determines which of the directed edges are being
2826
// updated. If the flag is 1, then the first node's information is being
2827
// updated, otherwise it's the second node's information. The node ordering is
2828
// determined by the lexicographical ordering of the identity public keys of the
2829
// nodes on either side of the channel.
2830
func (c *KVStore) UpdateEdgePolicy(ctx context.Context,
2831
        edge *models.ChannelEdgePolicy,
2832
        opts ...batch.SchedulerOption) (route.Vertex, route.Vertex, error) {
2,672✔
2833

2,672✔
2834
        var (
2,672✔
2835
                isUpdate1    bool
2,672✔
2836
                edgeNotFound bool
2,672✔
2837
                from, to     route.Vertex
2,672✔
2838
        )
2,672✔
2839

2,672✔
2840
        r := &batch.Request[kvdb.RwTx]{
2,672✔
2841
                Opts: batch.NewSchedulerOptions(opts...),
2,672✔
2842
                Reset: func() {
5,345✔
2843
                        isUpdate1 = false
2,673✔
2844
                        edgeNotFound = false
2,673✔
2845
                },
2,673✔
2846
                Do: func(tx kvdb.RwTx) error {
2,673✔
2847
                        // Validate that the ExtraOpaqueData is in fact a valid
2,673✔
2848
                        // TLV stream. This is done here instead of within
2,673✔
2849
                        // updateEdgePolicy so that updateEdgePolicy can be used
2,673✔
2850
                        // by unit tests to recreate the case where we already
2,673✔
2851
                        // have nodes persisted with invalid TLV data.
2,673✔
2852
                        err := edge.ExtraOpaqueData.ValidateTLV()
2,673✔
2853
                        if err != nil {
2,675✔
2854
                                return fmt.Errorf("%w: %w",
2✔
2855
                                        ErrParsingExtraTLVBytes, err)
2✔
2856
                        }
2✔
2857

2858
                        from, to, isUpdate1, err = updateEdgePolicy(tx, edge)
2,671✔
2859
                        if err != nil {
2,675✔
2860
                                log.Errorf("UpdateEdgePolicy faild: %v", err)
4✔
2861
                        }
4✔
2862

2863
                        // Silence ErrEdgeNotFound so that the batch can
2864
                        // succeed, but propagate the error via local state.
2865
                        if errors.Is(err, ErrEdgeNotFound) {
2,675✔
2866
                                edgeNotFound = true
4✔
2867
                                return nil
4✔
2868
                        }
4✔
2869

2870
                        return err
2,667✔
2871
                },
2872
                OnCommit: func(err error) error {
2,672✔
2873
                        switch {
2,672✔
2874
                        case err != nil:
1✔
2875
                                return err
1✔
2876
                        case edgeNotFound:
4✔
2877
                                return ErrEdgeNotFound
4✔
2878
                        default:
2,667✔
2879
                                c.updateEdgeCache(edge, isUpdate1)
2,667✔
2880
                                return nil
2,667✔
2881
                        }
2882
                },
2883
        }
2884

2885
        err := c.chanScheduler.Execute(ctx, r)
2,672✔
2886

2,672✔
2887
        return from, to, err
2,672✔
2888
}
2889

2890
func (c *KVStore) updateEdgeCache(e *models.ChannelEdgePolicy,
2891
        isUpdate1 bool) {
2,667✔
2892

2,667✔
2893
        // If an entry for this channel is found in reject cache, we'll modify
2,667✔
2894
        // the entry with the updated timestamp for the direction that was just
2,667✔
2895
        // written. If the edge doesn't exist, we'll load the cache entry lazily
2,667✔
2896
        // during the next query for this edge.
2,667✔
2897
        if entry, ok := c.rejectCache.get(e.ChannelID); ok {
2,672✔
2898
                if isUpdate1 {
8✔
2899
                        entry.upd1Time = e.LastUpdate.Unix()
3✔
2900
                } else {
5✔
2901
                        entry.upd2Time = e.LastUpdate.Unix()
2✔
2902
                }
2✔
2903
                c.rejectCache.insert(e.ChannelID, entry)
5✔
2904
        }
2905

2906
        // If an entry for this channel is found in channel cache, we'll modify
2907
        // the entry with the updated policy for the direction that was just
2908
        // written. If the edge doesn't exist, we'll defer loading the info and
2909
        // policies and lazily read from disk during the next query.
2910
        if channel, ok := c.chanCache.get(e.ChannelID); ok {
2,667✔
2911
                if isUpdate1 {
×
2912
                        channel.Policy1 = e
×
2913
                } else {
×
2914
                        channel.Policy2 = e
×
2915
                }
×
2916
                c.chanCache.insert(e.ChannelID, channel)
×
2917
        }
2918
}
2919

2920
// updateEdgePolicy attempts to update an edge's policy within the relevant
2921
// buckets using an existing database transaction. The returned boolean will be
2922
// true if the updated policy belongs to node1, and false if the policy belonged
2923
// to node2.
2924
func updateEdgePolicy(tx kvdb.RwTx, edge *models.ChannelEdgePolicy) (
2925
        route.Vertex, route.Vertex, bool, error) {
2,671✔
2926

2,671✔
2927
        var noVertex route.Vertex
2,671✔
2928

2,671✔
2929
        edges := tx.ReadWriteBucket(edgeBucket)
2,671✔
2930
        if edges == nil {
2,671✔
2931
                return noVertex, noVertex, false, ErrEdgeNotFound
×
2932
        }
×
2933
        edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
2,671✔
2934
        if edgeIndex == nil {
2,671✔
2935
                return noVertex, noVertex, false, ErrEdgeNotFound
×
2936
        }
×
2937

2938
        // Create the channelID key be converting the channel ID
2939
        // integer into a byte slice.
2940
        var chanID [8]byte
2,671✔
2941
        byteOrder.PutUint64(chanID[:], edge.ChannelID)
2,671✔
2942

2,671✔
2943
        // With the channel ID, we then fetch the value storing the two
2,671✔
2944
        // nodes which connect this channel edge.
2,671✔
2945
        nodeInfo := edgeIndex.Get(chanID[:])
2,671✔
2946
        if nodeInfo == nil {
2,675✔
2947
                return noVertex, noVertex, false, ErrEdgeNotFound
4✔
2948
        }
4✔
2949

2950
        // Depending on the flags value passed above, either the first
2951
        // or second edge policy is being updated.
2952
        var fromNode, toNode []byte
2,667✔
2953
        var isUpdate1 bool
2,667✔
2954
        if edge.ChannelFlags&lnwire.ChanUpdateDirection == 0 {
4,004✔
2955
                fromNode = nodeInfo[:33]
1,337✔
2956
                toNode = nodeInfo[33:66]
1,337✔
2957
                isUpdate1 = true
1,337✔
2958
        } else {
2,667✔
2959
                fromNode = nodeInfo[33:66]
1,330✔
2960
                toNode = nodeInfo[:33]
1,330✔
2961
                isUpdate1 = false
1,330✔
2962
        }
1,330✔
2963

2964
        // Finally, with the direction of the edge being updated
2965
        // identified, we update the on-disk edge representation.
2966
        err := putChanEdgePolicy(edges, edge, fromNode, toNode)
2,667✔
2967
        if err != nil {
2,667✔
2968
                return noVertex, noVertex, false, err
×
2969
        }
×
2970

2971
        var (
2,667✔
2972
                fromNodePubKey route.Vertex
2,667✔
2973
                toNodePubKey   route.Vertex
2,667✔
2974
        )
2,667✔
2975
        copy(fromNodePubKey[:], fromNode)
2,667✔
2976
        copy(toNodePubKey[:], toNode)
2,667✔
2977

2,667✔
2978
        return fromNodePubKey, toNodePubKey, isUpdate1, nil
2,667✔
2979
}
2980

2981
// isPublic determines whether the node is seen as public within the graph from
2982
// the source node's point of view. An existing database transaction can also be
2983
// specified.
2984
func (c *KVStore) isPublic(tx kvdb.RTx, nodePub route.Vertex,
2985
        sourcePubKey []byte) (bool, error) {
13✔
2986

13✔
2987
        // In order to determine whether this node is publicly advertised within
13✔
2988
        // the graph, we'll need to look at all of its edges and check whether
13✔
2989
        // they extend to any other node than the source node. errDone will be
13✔
2990
        // used to terminate the check early.
13✔
2991
        nodeIsPublic := false
13✔
2992
        errDone := errors.New("done")
13✔
2993
        err := c.forEachNodeChannelTx(tx, nodePub, func(tx kvdb.RTx,
13✔
2994
                info *models.ChannelEdgeInfo, _ *models.ChannelEdgePolicy,
13✔
2995
                _ *models.ChannelEdgePolicy) error {
23✔
2996

10✔
2997
                // If this edge doesn't extend to the source node, we'll
10✔
2998
                // terminate our search as we can now conclude that the node is
10✔
2999
                // publicly advertised within the graph due to the local node
10✔
3000
                // knowing of the current edge.
10✔
3001
                if !bytes.Equal(info.NodeKey1Bytes[:], sourcePubKey) &&
10✔
3002
                        !bytes.Equal(info.NodeKey2Bytes[:], sourcePubKey) {
13✔
3003

3✔
3004
                        nodeIsPublic = true
3✔
3005
                        return errDone
3✔
3006
                }
3✔
3007

3008
                // Since the edge _does_ extend to the source node, we'll also
3009
                // need to ensure that this is a public edge.
3010
                if info.AuthProof != nil {
13✔
3011
                        nodeIsPublic = true
6✔
3012
                        return errDone
6✔
3013
                }
6✔
3014

3015
                // Otherwise, we'll continue our search.
3016
                return nil
1✔
3017
        })
3018
        if err != nil && !errors.Is(err, errDone) {
13✔
3019
                return false, err
×
3020
        }
×
3021

3022
        return nodeIsPublic, nil
13✔
3023
}
3024

3025
// FetchLightningNodeTx attempts to look up a target node by its identity
3026
// public key. If the node isn't found in the database, then
3027
// ErrGraphNodeNotFound is returned. An optional transaction may be provided.
3028
// If none is provided, then a new one will be created.
3029
func (c *KVStore) FetchLightningNodeTx(tx kvdb.RTx, nodePub route.Vertex) (
3030
        *models.LightningNode, error) {
3,652✔
3031

3,652✔
3032
        return c.fetchLightningNode(tx, nodePub)
3,652✔
3033
}
3,652✔
3034

3035
// FetchLightningNode attempts to look up a target node by its identity public
3036
// key. If the node isn't found in the database, then ErrGraphNodeNotFound is
3037
// returned.
3038
func (c *KVStore) FetchLightningNode(_ context.Context,
3039
        nodePub route.Vertex) (*models.LightningNode, error) {
159✔
3040

159✔
3041
        return c.fetchLightningNode(nil, nodePub)
159✔
3042
}
159✔
3043

3044
// fetchLightningNode attempts to look up a target node by its identity public
3045
// key. If the node isn't found in the database, then ErrGraphNodeNotFound is
3046
// returned. An optional transaction may be provided. If none is provided, then
3047
// a new one will be created.
3048
func (c *KVStore) fetchLightningNode(tx kvdb.RTx,
3049
        nodePub route.Vertex) (*models.LightningNode, error) {
3,811✔
3050

3,811✔
3051
        var node *models.LightningNode
3,811✔
3052
        fetch := func(tx kvdb.RTx) error {
7,622✔
3053
                // First grab the nodes bucket which stores the mapping from
3,811✔
3054
                // pubKey to node information.
3,811✔
3055
                nodes := tx.ReadBucket(nodeBucket)
3,811✔
3056
                if nodes == nil {
3,811✔
3057
                        return ErrGraphNotFound
×
3058
                }
×
3059

3060
                // If a key for this serialized public key isn't found, then
3061
                // the target node doesn't exist within the database.
3062
                nodeBytes := nodes.Get(nodePub[:])
3,811✔
3063
                if nodeBytes == nil {
3,826✔
3064
                        return ErrGraphNodeNotFound
15✔
3065
                }
15✔
3066

3067
                // If the node is found, then we can de deserialize the node
3068
                // information to return to the user.
3069
                nodeReader := bytes.NewReader(nodeBytes)
3,796✔
3070
                n, err := deserializeLightningNode(nodeReader)
3,796✔
3071
                if err != nil {
3,796✔
3072
                        return err
×
3073
                }
×
3074

3075
                node = &n
3,796✔
3076

3,796✔
3077
                return nil
3,796✔
3078
        }
3079

3080
        if tx == nil {
3,994✔
3081
                err := kvdb.View(
183✔
3082
                        c.db, fetch, func() {
366✔
3083
                                node = nil
183✔
3084
                        },
183✔
3085
                )
3086
                if err != nil {
187✔
3087
                        return nil, err
4✔
3088
                }
4✔
3089

3090
                return node, nil
179✔
3091
        }
3092

3093
        err := fetch(tx)
3,628✔
3094
        if err != nil {
3,639✔
3095
                return nil, err
11✔
3096
        }
11✔
3097

3098
        return node, nil
3,617✔
3099
}
3100

3101
// HasLightningNode determines if the graph has a vertex identified by the
3102
// target node identity public key. If the node exists in the database, a
3103
// timestamp of when the data for the node was lasted updated is returned along
3104
// with a true boolean. Otherwise, an empty time.Time is returned with a false
3105
// boolean.
3106
func (c *KVStore) HasLightningNode(_ context.Context,
3107
        nodePub [33]byte) (time.Time, bool, error) {
17✔
3108

17✔
3109
        var (
17✔
3110
                updateTime time.Time
17✔
3111
                exists     bool
17✔
3112
        )
17✔
3113

17✔
3114
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
34✔
3115
                // First grab the nodes bucket which stores the mapping from
17✔
3116
                // pubKey to node information.
17✔
3117
                nodes := tx.ReadBucket(nodeBucket)
17✔
3118
                if nodes == nil {
17✔
3119
                        return ErrGraphNotFound
×
3120
                }
×
3121

3122
                // If a key for this serialized public key isn't found, we can
3123
                // exit early.
3124
                nodeBytes := nodes.Get(nodePub[:])
17✔
3125
                if nodeBytes == nil {
20✔
3126
                        exists = false
3✔
3127
                        return nil
3✔
3128
                }
3✔
3129

3130
                // Otherwise we continue on to obtain the time stamp
3131
                // representing the last time the data for this node was
3132
                // updated.
3133
                nodeReader := bytes.NewReader(nodeBytes)
14✔
3134
                node, err := deserializeLightningNode(nodeReader)
14✔
3135
                if err != nil {
14✔
3136
                        return err
×
3137
                }
×
3138

3139
                exists = true
14✔
3140
                updateTime = node.LastUpdate
14✔
3141

14✔
3142
                return nil
14✔
3143
        }, func() {
17✔
3144
                updateTime = time.Time{}
17✔
3145
                exists = false
17✔
3146
        })
17✔
3147
        if err != nil {
17✔
3148
                return time.Time{}, exists, err
×
3149
        }
×
3150

3151
        return updateTime, exists, nil
17✔
3152
}
3153

3154
// nodeTraversal is used to traverse all channels of a node given by its
3155
// public key and passes channel information into the specified callback.
3156
func nodeTraversal(tx kvdb.RTx, nodePub []byte, db kvdb.Backend,
3157
        cb func(kvdb.RTx, *models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3158
                *models.ChannelEdgePolicy) error) error {
1,268✔
3159

1,268✔
3160
        traversal := func(tx kvdb.RTx) error {
2,536✔
3161
                edges := tx.ReadBucket(edgeBucket)
1,268✔
3162
                if edges == nil {
1,268✔
3163
                        return ErrGraphNotFound
×
3164
                }
×
3165
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
1,268✔
3166
                if edgeIndex == nil {
1,268✔
3167
                        return ErrGraphNoEdgesFound
×
3168
                }
×
3169

3170
                // In order to reach all the edges for this node, we take
3171
                // advantage of the construction of the key-space within the
3172
                // edge bucket. The keys are stored in the form: pubKey ||
3173
                // chanID. Therefore, starting from a chanID of zero, we can
3174
                // scan forward in the bucket, grabbing all the edges for the
3175
                // node. Once the prefix no longer matches, then we know we're
3176
                // done.
3177
                var nodeStart [33 + 8]byte
1,268✔
3178
                copy(nodeStart[:], nodePub)
1,268✔
3179
                copy(nodeStart[33:], chanStart[:])
1,268✔
3180

1,268✔
3181
                // Starting from the key pubKey || 0, we seek forward in the
1,268✔
3182
                // bucket until the retrieved key no longer has the public key
1,268✔
3183
                // as its prefix. This indicates that we've stepped over into
1,268✔
3184
                // another node's edges, so we can terminate our scan.
1,268✔
3185
                edgeCursor := edges.ReadCursor()
1,268✔
3186
                for nodeEdge, _ := edgeCursor.Seek(nodeStart[:]); bytes.HasPrefix(nodeEdge, nodePub); nodeEdge, _ = edgeCursor.Next() { //nolint:ll
5,111✔
3187
                        // If the prefix still matches, the channel id is
3,843✔
3188
                        // returned in nodeEdge. Channel id is used to lookup
3,843✔
3189
                        // the node at the other end of the channel and both
3,843✔
3190
                        // edge policies.
3,843✔
3191
                        chanID := nodeEdge[33:]
3,843✔
3192
                        edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
3,843✔
3193
                        if err != nil {
3,843✔
3194
                                return err
×
3195
                        }
×
3196

3197
                        outgoingPolicy, err := fetchChanEdgePolicy(
3,843✔
3198
                                edges, chanID, nodePub,
3,843✔
3199
                        )
3,843✔
3200
                        if err != nil {
3,843✔
3201
                                return err
×
3202
                        }
×
3203

3204
                        otherNode, err := edgeInfo.OtherNodeKeyBytes(nodePub)
3,843✔
3205
                        if err != nil {
3,843✔
3206
                                return err
×
3207
                        }
×
3208

3209
                        incomingPolicy, err := fetchChanEdgePolicy(
3,843✔
3210
                                edges, chanID, otherNode[:],
3,843✔
3211
                        )
3,843✔
3212
                        if err != nil {
3,843✔
3213
                                return err
×
3214
                        }
×
3215

3216
                        // Finally, we execute the callback.
3217
                        err = cb(tx, &edgeInfo, outgoingPolicy, incomingPolicy)
3,843✔
3218
                        if err != nil {
3,852✔
3219
                                return err
9✔
3220
                        }
9✔
3221
                }
3222

3223
                return nil
1,259✔
3224
        }
3225

3226
        // If no transaction was provided, then we'll create a new transaction
3227
        // to execute the transaction within.
3228
        if tx == nil {
1,297✔
3229
                return kvdb.View(db, traversal, func() {})
58✔
3230
        }
3231

3232
        // Otherwise, we re-use the existing transaction to execute the graph
3233
        // traversal.
3234
        return traversal(tx)
1,239✔
3235
}
3236

3237
// ForEachNodeChannel iterates through all channels of the given node,
3238
// executing the passed callback with an edge info structure and the policies
3239
// of each end of the channel. The first edge policy is the outgoing edge *to*
3240
// the connecting node, while the second is the incoming edge *from* the
3241
// connecting node. If the callback returns an error, then the iteration is
3242
// halted with the error propagated back up to the caller.
3243
//
3244
// Unknown policies are passed into the callback as nil values.
3245
func (c *KVStore) ForEachNodeChannel(nodePub route.Vertex,
3246
        cb func(*models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3247
                *models.ChannelEdgePolicy) error) error {
6✔
3248

6✔
3249
        return nodeTraversal(nil, nodePub[:], c.db, func(_ kvdb.RTx,
6✔
3250
                info *models.ChannelEdgeInfo, policy,
6✔
3251
                policy2 *models.ChannelEdgePolicy) error {
16✔
3252

10✔
3253
                return cb(info, policy, policy2)
10✔
3254
        })
10✔
3255
}
3256

3257
// ForEachSourceNodeChannel iterates through all channels of the source node,
3258
// executing the passed callback on each. The callback is provided with the
3259
// channel's outpoint, whether we have a policy for the channel and the channel
3260
// peer's node information.
3261
func (c *KVStore) ForEachSourceNodeChannel(cb func(chanPoint wire.OutPoint,
3262
        havePolicy bool, otherNode *models.LightningNode) error) error {
1✔
3263

1✔
3264
        return kvdb.View(c.db, func(tx kvdb.RTx) error {
2✔
3265
                nodes := tx.ReadBucket(nodeBucket)
1✔
3266
                if nodes == nil {
1✔
3267
                        return ErrGraphNotFound
×
3268
                }
×
3269

3270
                node, err := sourceNodeWithTx(nodes)
1✔
3271
                if err != nil {
1✔
3272
                        return err
×
3273
                }
×
3274

3275
                return nodeTraversal(
1✔
3276
                        tx, node.PubKeyBytes[:], c.db, func(tx kvdb.RTx,
1✔
3277
                                info *models.ChannelEdgeInfo,
1✔
3278
                                policy, _ *models.ChannelEdgePolicy) error {
3✔
3279

2✔
3280
                                peer, err := c.fetchOtherNode(
2✔
3281
                                        tx, info, node.PubKeyBytes[:],
2✔
3282
                                )
2✔
3283
                                if err != nil {
2✔
3284
                                        return err
×
3285
                                }
×
3286

3287
                                return cb(
2✔
3288
                                        info.ChannelPoint, policy != nil, peer,
2✔
3289
                                )
2✔
3290
                        },
3291
                )
3292
        }, func() {})
1✔
3293
}
3294

3295
// forEachNodeChannelTx iterates through all channels of the given node,
3296
// executing the passed callback with an edge info structure and the policies
3297
// of each end of the channel. The first edge policy is the outgoing edge *to*
3298
// the connecting node, while the second is the incoming edge *from* the
3299
// connecting node. If the callback returns an error, then the iteration is
3300
// halted with the error propagated back up to the caller.
3301
//
3302
// Unknown policies are passed into the callback as nil values.
3303
//
3304
// If the caller wishes to re-use an existing boltdb transaction, then it
3305
// should be passed as the first argument.  Otherwise, the first argument should
3306
// be nil and a fresh transaction will be created to execute the graph
3307
// traversal.
3308
func (c *KVStore) forEachNodeChannelTx(tx kvdb.RTx,
3309
        nodePub route.Vertex, cb func(kvdb.RTx, *models.ChannelEdgeInfo,
3310
                *models.ChannelEdgePolicy,
3311
                *models.ChannelEdgePolicy) error) error {
998✔
3312

998✔
3313
        return nodeTraversal(tx, nodePub[:], c.db, cb)
998✔
3314
}
998✔
3315

3316
// fetchOtherNode attempts to fetch the full LightningNode that's opposite of
3317
// the target node in the channel. This is useful when one knows the pubkey of
3318
// one of the nodes, and wishes to obtain the full LightningNode for the other
3319
// end of the channel.
3320
func (c *KVStore) fetchOtherNode(tx kvdb.RTx,
3321
        channel *models.ChannelEdgeInfo, thisNodeKey []byte) (
3322
        *models.LightningNode, error) {
2✔
3323

2✔
3324
        // Ensure that the node passed in is actually a member of the channel.
2✔
3325
        var targetNodeBytes [33]byte
2✔
3326
        switch {
2✔
3327
        case bytes.Equal(channel.NodeKey1Bytes[:], thisNodeKey):
2✔
3328
                targetNodeBytes = channel.NodeKey2Bytes
2✔
3329
        case bytes.Equal(channel.NodeKey2Bytes[:], thisNodeKey):
×
3330
                targetNodeBytes = channel.NodeKey1Bytes
×
3331
        default:
×
3332
                return nil, fmt.Errorf("node not participating in this channel")
×
3333
        }
3334

3335
        var targetNode *models.LightningNode
2✔
3336
        fetchNodeFunc := func(tx kvdb.RTx) error {
4✔
3337
                // First grab the nodes bucket which stores the mapping from
2✔
3338
                // pubKey to node information.
2✔
3339
                nodes := tx.ReadBucket(nodeBucket)
2✔
3340
                if nodes == nil {
2✔
3341
                        return ErrGraphNotFound
×
3342
                }
×
3343

3344
                node, err := fetchLightningNode(nodes, targetNodeBytes[:])
2✔
3345
                if err != nil {
2✔
3346
                        return err
×
3347
                }
×
3348

3349
                targetNode = &node
2✔
3350

2✔
3351
                return nil
2✔
3352
        }
3353

3354
        // If the transaction is nil, then we'll need to create a new one,
3355
        // otherwise we can use the existing db transaction.
3356
        var err error
2✔
3357
        if tx == nil {
2✔
3358
                err = kvdb.View(c.db, fetchNodeFunc, func() {
×
3359
                        targetNode = nil
×
3360
                })
×
3361
        } else {
2✔
3362
                err = fetchNodeFunc(tx)
2✔
3363
        }
2✔
3364

3365
        return targetNode, err
2✔
3366
}
3367

3368
// computeEdgePolicyKeys is a helper function that can be used to compute the
3369
// keys used to index the channel edge policy info for the two nodes of the
3370
// edge. The keys for node 1 and node 2 are returned respectively.
3371
func computeEdgePolicyKeys(info *models.ChannelEdgeInfo) ([]byte, []byte) {
22✔
3372
        var (
22✔
3373
                node1Key [33 + 8]byte
22✔
3374
                node2Key [33 + 8]byte
22✔
3375
        )
22✔
3376

22✔
3377
        copy(node1Key[:], info.NodeKey1Bytes[:])
22✔
3378
        copy(node2Key[:], info.NodeKey2Bytes[:])
22✔
3379

22✔
3380
        byteOrder.PutUint64(node1Key[33:], info.ChannelID)
22✔
3381
        byteOrder.PutUint64(node2Key[33:], info.ChannelID)
22✔
3382

22✔
3383
        return node1Key[:], node2Key[:]
22✔
3384
}
22✔
3385

3386
// FetchChannelEdgesByOutpoint attempts to lookup the two directed edges for
3387
// the channel identified by the funding outpoint. If the channel can't be
3388
// found, then ErrEdgeNotFound is returned. A struct which houses the general
3389
// information for the channel itself is returned as well as two structs that
3390
// contain the routing policies for the channel in either direction.
3391
func (c *KVStore) FetchChannelEdgesByOutpoint(op *wire.OutPoint) (
3392
        *models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3393
        *models.ChannelEdgePolicy, error) {
11✔
3394

11✔
3395
        var (
11✔
3396
                edgeInfo *models.ChannelEdgeInfo
11✔
3397
                policy1  *models.ChannelEdgePolicy
11✔
3398
                policy2  *models.ChannelEdgePolicy
11✔
3399
        )
11✔
3400

11✔
3401
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
22✔
3402
                // First, grab the node bucket. This will be used to populate
11✔
3403
                // the Node pointers in each edge read from disk.
11✔
3404
                nodes := tx.ReadBucket(nodeBucket)
11✔
3405
                if nodes == nil {
11✔
3406
                        return ErrGraphNotFound
×
3407
                }
×
3408

3409
                // Next, grab the edge bucket which stores the edges, and also
3410
                // the index itself so we can group the directed edges together
3411
                // logically.
3412
                edges := tx.ReadBucket(edgeBucket)
11✔
3413
                if edges == nil {
11✔
3414
                        return ErrGraphNoEdgesFound
×
3415
                }
×
3416
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
11✔
3417
                if edgeIndex == nil {
11✔
3418
                        return ErrGraphNoEdgesFound
×
3419
                }
×
3420

3421
                // If the channel's outpoint doesn't exist within the outpoint
3422
                // index, then the edge does not exist.
3423
                chanIndex := edges.NestedReadBucket(channelPointBucket)
11✔
3424
                if chanIndex == nil {
11✔
3425
                        return ErrGraphNoEdgesFound
×
3426
                }
×
3427
                var b bytes.Buffer
11✔
3428
                if err := WriteOutpoint(&b, op); err != nil {
11✔
3429
                        return err
×
3430
                }
×
3431
                chanID := chanIndex.Get(b.Bytes())
11✔
3432
                if chanID == nil {
21✔
3433
                        return fmt.Errorf("%w: op=%v", ErrEdgeNotFound, op)
10✔
3434
                }
10✔
3435

3436
                // If the channel is found to exists, then we'll first retrieve
3437
                // the general information for the channel.
3438
                edge, err := fetchChanEdgeInfo(edgeIndex, chanID)
1✔
3439
                if err != nil {
1✔
3440
                        return fmt.Errorf("%w: chanID=%x", err, chanID)
×
3441
                }
×
3442
                edgeInfo = &edge
1✔
3443

1✔
3444
                // Once we have the information about the channels' parameters,
1✔
3445
                // we'll fetch the routing policies for each for the directed
1✔
3446
                // edges.
1✔
3447
                e1, e2, err := fetchChanEdgePolicies(edgeIndex, edges, chanID)
1✔
3448
                if err != nil {
1✔
3449
                        return fmt.Errorf("failed to find policy: %w", err)
×
3450
                }
×
3451

3452
                policy1 = e1
1✔
3453
                policy2 = e2
1✔
3454

1✔
3455
                return nil
1✔
3456
        }, func() {
11✔
3457
                edgeInfo = nil
11✔
3458
                policy1 = nil
11✔
3459
                policy2 = nil
11✔
3460
        })
11✔
3461
        if err != nil {
21✔
3462
                return nil, nil, nil, err
10✔
3463
        }
10✔
3464

3465
        return edgeInfo, policy1, policy2, nil
1✔
3466
}
3467

3468
// FetchChannelEdgesByID attempts to lookup the two directed edges for the
3469
// channel identified by the channel ID. If the channel can't be found, then
3470
// ErrEdgeNotFound is returned. A struct which houses the general information
3471
// for the channel itself is returned as well as two structs that contain the
3472
// routing policies for the channel in either direction.
3473
//
3474
// ErrZombieEdge an be returned if the edge is currently marked as a zombie
3475
// within the database. In this case, the ChannelEdgePolicy's will be nil, and
3476
// the ChannelEdgeInfo will only include the public keys of each node.
3477
func (c *KVStore) FetchChannelEdgesByID(chanID uint64) (
3478
        *models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3479
        *models.ChannelEdgePolicy, error) {
2,689✔
3480

2,689✔
3481
        var (
2,689✔
3482
                edgeInfo  *models.ChannelEdgeInfo
2,689✔
3483
                policy1   *models.ChannelEdgePolicy
2,689✔
3484
                policy2   *models.ChannelEdgePolicy
2,689✔
3485
                channelID [8]byte
2,689✔
3486
        )
2,689✔
3487

2,689✔
3488
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
5,378✔
3489
                // First, grab the node bucket. This will be used to populate
2,689✔
3490
                // the Node pointers in each edge read from disk.
2,689✔
3491
                nodes := tx.ReadBucket(nodeBucket)
2,689✔
3492
                if nodes == nil {
2,689✔
3493
                        return ErrGraphNotFound
×
3494
                }
×
3495

3496
                // Next, grab the edge bucket which stores the edges, and also
3497
                // the index itself so we can group the directed edges together
3498
                // logically.
3499
                edges := tx.ReadBucket(edgeBucket)
2,689✔
3500
                if edges == nil {
2,689✔
3501
                        return ErrGraphNoEdgesFound
×
3502
                }
×
3503
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
2,689✔
3504
                if edgeIndex == nil {
2,689✔
3505
                        return ErrGraphNoEdgesFound
×
3506
                }
×
3507

3508
                byteOrder.PutUint64(channelID[:], chanID)
2,689✔
3509

2,689✔
3510
                // Now, attempt to fetch edge.
2,689✔
3511
                edge, err := fetchChanEdgeInfo(edgeIndex, channelID[:])
2,689✔
3512

2,689✔
3513
                // If it doesn't exist, we'll quickly check our zombie index to
2,689✔
3514
                // see if we've previously marked it as so.
2,689✔
3515
                if errors.Is(err, ErrEdgeNotFound) {
2,691✔
3516
                        // If the zombie index doesn't exist, or the edge is not
2✔
3517
                        // marked as a zombie within it, then we'll return the
2✔
3518
                        // original ErrEdgeNotFound error.
2✔
3519
                        zombieIndex := edges.NestedReadBucket(zombieBucket)
2✔
3520
                        if zombieIndex == nil {
2✔
3521
                                return ErrEdgeNotFound
×
3522
                        }
×
3523

3524
                        isZombie, pubKey1, pubKey2 := isZombieEdge(
2✔
3525
                                zombieIndex, chanID,
2✔
3526
                        )
2✔
3527
                        if !isZombie {
2✔
3528
                                return ErrEdgeNotFound
×
3529
                        }
×
3530

3531
                        // Otherwise, the edge is marked as a zombie, so we'll
3532
                        // populate the edge info with the public keys of each
3533
                        // party as this is the only information we have about
3534
                        // it and return an error signaling so.
3535
                        edgeInfo = &models.ChannelEdgeInfo{
2✔
3536
                                NodeKey1Bytes: pubKey1,
2✔
3537
                                NodeKey2Bytes: pubKey2,
2✔
3538
                        }
2✔
3539

2✔
3540
                        return ErrZombieEdge
2✔
3541
                }
3542

3543
                // Otherwise, we'll just return the error if any.
3544
                if err != nil {
2,687✔
3545
                        return err
×
3546
                }
×
3547

3548
                edgeInfo = &edge
2,687✔
3549

2,687✔
3550
                // Then we'll attempt to fetch the accompanying policies of this
2,687✔
3551
                // edge.
2,687✔
3552
                e1, e2, err := fetchChanEdgePolicies(
2,687✔
3553
                        edgeIndex, edges, channelID[:],
2,687✔
3554
                )
2,687✔
3555
                if err != nil {
2,687✔
3556
                        return err
×
3557
                }
×
3558

3559
                policy1 = e1
2,687✔
3560
                policy2 = e2
2,687✔
3561

2,687✔
3562
                return nil
2,687✔
3563
        }, func() {
2,689✔
3564
                edgeInfo = nil
2,689✔
3565
                policy1 = nil
2,689✔
3566
                policy2 = nil
2,689✔
3567
        })
2,689✔
3568
        if errors.Is(err, ErrZombieEdge) {
2,691✔
3569
                return edgeInfo, nil, nil, err
2✔
3570
        }
2✔
3571
        if err != nil {
2,687✔
3572
                return nil, nil, nil, err
×
3573
        }
×
3574

3575
        return edgeInfo, policy1, policy2, nil
2,687✔
3576
}
3577

3578
// IsPublicNode is a helper method that determines whether the node with the
3579
// given public key is seen as a public node in the graph from the graph's
3580
// source node's point of view.
3581
func (c *KVStore) IsPublicNode(pubKey [33]byte) (bool, error) {
13✔
3582
        var nodeIsPublic bool
13✔
3583
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
26✔
3584
                nodes := tx.ReadBucket(nodeBucket)
13✔
3585
                if nodes == nil {
13✔
3586
                        return ErrGraphNodesNotFound
×
3587
                }
×
3588
                ourPubKey := nodes.Get(sourceKey)
13✔
3589
                if ourPubKey == nil {
13✔
3590
                        return ErrSourceNodeNotSet
×
3591
                }
×
3592
                node, err := fetchLightningNode(nodes, pubKey[:])
13✔
3593
                if err != nil {
13✔
3594
                        return err
×
3595
                }
×
3596

3597
                nodeIsPublic, err = c.isPublic(tx, node.PubKeyBytes, ourPubKey)
13✔
3598

13✔
3599
                return err
13✔
3600
        }, func() {
13✔
3601
                nodeIsPublic = false
13✔
3602
        })
13✔
3603
        if err != nil {
13✔
3604
                return false, err
×
3605
        }
×
3606

3607
        return nodeIsPublic, nil
13✔
3608
}
3609

3610
// genMultiSigP2WSH generates the p2wsh'd multisig script for 2 of 2 pubkeys.
3611
func genMultiSigP2WSH(aPub, bPub []byte) ([]byte, error) {
46✔
3612
        witnessScript, err := input.GenMultiSigScript(aPub, bPub)
46✔
3613
        if err != nil {
46✔
3614
                return nil, err
×
3615
        }
×
3616

3617
        // With the witness script generated, we'll now turn it into a p2wsh
3618
        // script:
3619
        //  * OP_0 <sha256(script)>
3620
        bldr := txscript.NewScriptBuilder(
46✔
3621
                txscript.WithScriptAllocSize(input.P2WSHSize),
46✔
3622
        )
46✔
3623
        bldr.AddOp(txscript.OP_0)
46✔
3624
        scriptHash := sha256.Sum256(witnessScript)
46✔
3625
        bldr.AddData(scriptHash[:])
46✔
3626

46✔
3627
        return bldr.Script()
46✔
3628
}
3629

3630
// EdgePoint couples the outpoint of a channel with the funding script that it
3631
// creates. The FilteredChainView will use this to watch for spends of this
3632
// edge point on chain. We require both of these values as depending on the
3633
// concrete implementation, either the pkScript, or the out point will be used.
3634
type EdgePoint struct {
3635
        // FundingPkScript is the p2wsh multi-sig script of the target channel.
3636
        FundingPkScript []byte
3637

3638
        // OutPoint is the outpoint of the target channel.
3639
        OutPoint wire.OutPoint
3640
}
3641

3642
// String returns a human readable version of the target EdgePoint. We return
3643
// the outpoint directly as it is enough to uniquely identify the edge point.
3644
func (e *EdgePoint) String() string {
×
3645
        return e.OutPoint.String()
×
3646
}
×
3647

3648
// ChannelView returns the verifiable edge information for each active channel
3649
// within the known channel graph. The set of UTXO's (along with their scripts)
3650
// returned are the ones that need to be watched on chain to detect channel
3651
// closes on the resident blockchain.
3652
func (c *KVStore) ChannelView() ([]EdgePoint, error) {
22✔
3653
        var edgePoints []EdgePoint
22✔
3654
        if err := kvdb.View(c.db, func(tx kvdb.RTx) error {
44✔
3655
                // We're going to iterate over the entire channel index, so
22✔
3656
                // we'll need to fetch the edgeBucket to get to the index as
22✔
3657
                // it's a sub-bucket.
22✔
3658
                edges := tx.ReadBucket(edgeBucket)
22✔
3659
                if edges == nil {
22✔
3660
                        return ErrGraphNoEdgesFound
×
3661
                }
×
3662
                chanIndex := edges.NestedReadBucket(channelPointBucket)
22✔
3663
                if chanIndex == nil {
22✔
3664
                        return ErrGraphNoEdgesFound
×
3665
                }
×
3666
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
22✔
3667
                if edgeIndex == nil {
22✔
3668
                        return ErrGraphNoEdgesFound
×
3669
                }
×
3670

3671
                // Once we have the proper bucket, we'll range over each key
3672
                // (which is the channel point for the channel) and decode it,
3673
                // accumulating each entry.
3674
                return chanIndex.ForEach(
22✔
3675
                        func(chanPointBytes, chanID []byte) error {
64✔
3676
                                chanPointReader := bytes.NewReader(
42✔
3677
                                        chanPointBytes,
42✔
3678
                                )
42✔
3679

42✔
3680
                                var chanPoint wire.OutPoint
42✔
3681
                                err := ReadOutpoint(chanPointReader, &chanPoint)
42✔
3682
                                if err != nil {
42✔
3683
                                        return err
×
3684
                                }
×
3685

3686
                                edgeInfo, err := fetchChanEdgeInfo(
42✔
3687
                                        edgeIndex, chanID,
42✔
3688
                                )
42✔
3689
                                if err != nil {
42✔
3690
                                        return err
×
3691
                                }
×
3692

3693
                                pkScript, err := genMultiSigP2WSH(
42✔
3694
                                        edgeInfo.BitcoinKey1Bytes[:],
42✔
3695
                                        edgeInfo.BitcoinKey2Bytes[:],
42✔
3696
                                )
42✔
3697
                                if err != nil {
42✔
3698
                                        return err
×
3699
                                }
×
3700

3701
                                edgePoints = append(edgePoints, EdgePoint{
42✔
3702
                                        FundingPkScript: pkScript,
42✔
3703
                                        OutPoint:        chanPoint,
42✔
3704
                                })
42✔
3705

42✔
3706
                                return nil
42✔
3707
                        },
3708
                )
3709
        }, func() {
22✔
3710
                edgePoints = nil
22✔
3711
        }); err != nil {
22✔
3712
                return nil, err
×
3713
        }
×
3714

3715
        return edgePoints, nil
22✔
3716
}
3717

3718
// MarkEdgeZombie attempts to mark a channel identified by its channel ID as a
3719
// zombie. This method is used on an ad-hoc basis, when channels need to be
3720
// marked as zombies outside the normal pruning cycle.
3721
func (c *KVStore) MarkEdgeZombie(chanID uint64,
3722
        pubKey1, pubKey2 [33]byte) error {
128✔
3723

128✔
3724
        c.cacheMu.Lock()
128✔
3725
        defer c.cacheMu.Unlock()
128✔
3726

128✔
3727
        err := kvdb.Batch(c.db, func(tx kvdb.RwTx) error {
256✔
3728
                edges := tx.ReadWriteBucket(edgeBucket)
128✔
3729
                if edges == nil {
128✔
3730
                        return ErrGraphNoEdgesFound
×
3731
                }
×
3732
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
128✔
3733
                if err != nil {
128✔
3734
                        return fmt.Errorf("unable to create zombie "+
×
3735
                                "bucket: %w", err)
×
3736
                }
×
3737

3738
                return markEdgeZombie(zombieIndex, chanID, pubKey1, pubKey2)
128✔
3739
        })
3740
        if err != nil {
128✔
3741
                return err
×
3742
        }
×
3743

3744
        c.rejectCache.remove(chanID)
128✔
3745
        c.chanCache.remove(chanID)
128✔
3746

128✔
3747
        return nil
128✔
3748
}
3749

3750
// markEdgeZombie marks an edge as a zombie within our zombie index. The public
3751
// keys should represent the node public keys of the two parties involved in the
3752
// edge.
3753
func markEdgeZombie(zombieIndex kvdb.RwBucket, chanID uint64, pubKey1,
3754
        pubKey2 [33]byte) error {
151✔
3755

151✔
3756
        var k [8]byte
151✔
3757
        byteOrder.PutUint64(k[:], chanID)
151✔
3758

151✔
3759
        var v [66]byte
151✔
3760
        copy(v[:33], pubKey1[:])
151✔
3761
        copy(v[33:], pubKey2[:])
151✔
3762

151✔
3763
        return zombieIndex.Put(k[:], v[:])
151✔
3764
}
151✔
3765

3766
// MarkEdgeLive clears an edge from our zombie index, deeming it as live.
3767
func (c *KVStore) MarkEdgeLive(chanID uint64) error {
24✔
3768
        c.cacheMu.Lock()
24✔
3769
        defer c.cacheMu.Unlock()
24✔
3770

24✔
3771
        return c.markEdgeLiveUnsafe(nil, chanID)
24✔
3772
}
24✔
3773

3774
// markEdgeLiveUnsafe clears an edge from the zombie index. This method can be
3775
// called with an existing kvdb.RwTx or the argument can be set to nil in which
3776
// case a new transaction will be created.
3777
//
3778
// NOTE: this method MUST only be called if the cacheMu has already been
3779
// acquired.
3780
func (c *KVStore) markEdgeLiveUnsafe(tx kvdb.RwTx, chanID uint64) error {
24✔
3781
        dbFn := func(tx kvdb.RwTx) error {
48✔
3782
                edges := tx.ReadWriteBucket(edgeBucket)
24✔
3783
                if edges == nil {
24✔
3784
                        return ErrGraphNoEdgesFound
×
3785
                }
×
3786
                zombieIndex := edges.NestedReadWriteBucket(zombieBucket)
24✔
3787
                if zombieIndex == nil {
24✔
3788
                        return nil
×
3789
                }
×
3790

3791
                var k [8]byte
24✔
3792
                byteOrder.PutUint64(k[:], chanID)
24✔
3793

24✔
3794
                if len(zombieIndex.Get(k[:])) == 0 {
25✔
3795
                        return ErrZombieEdgeNotFound
1✔
3796
                }
1✔
3797

3798
                return zombieIndex.Delete(k[:])
23✔
3799
        }
3800

3801
        // If the transaction is nil, we'll create a new one. Otherwise, we use
3802
        // the existing transaction
3803
        var err error
24✔
3804
        if tx == nil {
48✔
3805
                err = kvdb.Update(c.db, dbFn, func() {})
48✔
3806
        } else {
×
3807
                err = dbFn(tx)
×
3808
        }
×
3809
        if err != nil {
25✔
3810
                return err
1✔
3811
        }
1✔
3812

3813
        c.rejectCache.remove(chanID)
23✔
3814
        c.chanCache.remove(chanID)
23✔
3815

23✔
3816
        return nil
23✔
3817
}
3818

3819
// IsZombieEdge returns whether the edge is considered zombie. If it is a
3820
// zombie, then the two node public keys corresponding to this edge are also
3821
// returned.
3822
func (c *KVStore) IsZombieEdge(chanID uint64) (bool, [33]byte, [33]byte,
3823
        error) {
14✔
3824

14✔
3825
        var (
14✔
3826
                isZombie         bool
14✔
3827
                pubKey1, pubKey2 [33]byte
14✔
3828
        )
14✔
3829

14✔
3830
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
28✔
3831
                edges := tx.ReadBucket(edgeBucket)
14✔
3832
                if edges == nil {
14✔
3833
                        return ErrGraphNoEdgesFound
×
3834
                }
×
3835
                zombieIndex := edges.NestedReadBucket(zombieBucket)
14✔
3836
                if zombieIndex == nil {
14✔
3837
                        return nil
×
3838
                }
×
3839

3840
                isZombie, pubKey1, pubKey2 = isZombieEdge(zombieIndex, chanID)
14✔
3841

14✔
3842
                return nil
14✔
3843
        }, func() {
14✔
3844
                isZombie = false
14✔
3845
                pubKey1 = [33]byte{}
14✔
3846
                pubKey2 = [33]byte{}
14✔
3847
        })
14✔
3848
        if err != nil {
14✔
3849
                return false, [33]byte{}, [33]byte{}, fmt.Errorf("%w: %w "+
×
3850
                        "(chanID=%d)", ErrCantCheckIfZombieEdgeStr, err, chanID)
×
3851
        }
×
3852

3853
        return isZombie, pubKey1, pubKey2, nil
14✔
3854
}
3855

3856
// isZombieEdge returns whether an entry exists for the given channel in the
3857
// zombie index. If an entry exists, then the two node public keys corresponding
3858
// to this edge are also returned.
3859
func isZombieEdge(zombieIndex kvdb.RBucket,
3860
        chanID uint64) (bool, [33]byte, [33]byte) {
193✔
3861

193✔
3862
        var k [8]byte
193✔
3863
        byteOrder.PutUint64(k[:], chanID)
193✔
3864

193✔
3865
        v := zombieIndex.Get(k[:])
193✔
3866
        if v == nil {
297✔
3867
                return false, [33]byte{}, [33]byte{}
104✔
3868
        }
104✔
3869

3870
        var pubKey1, pubKey2 [33]byte
89✔
3871
        copy(pubKey1[:], v[:33])
89✔
3872
        copy(pubKey2[:], v[33:])
89✔
3873

89✔
3874
        return true, pubKey1, pubKey2
89✔
3875
}
3876

3877
// NumZombies returns the current number of zombie channels in the graph.
3878
func (c *KVStore) NumZombies() (uint64, error) {
4✔
3879
        var numZombies uint64
4✔
3880
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
8✔
3881
                edges := tx.ReadBucket(edgeBucket)
4✔
3882
                if edges == nil {
4✔
3883
                        return nil
×
3884
                }
×
3885
                zombieIndex := edges.NestedReadBucket(zombieBucket)
4✔
3886
                if zombieIndex == nil {
4✔
3887
                        return nil
×
3888
                }
×
3889

3890
                return zombieIndex.ForEach(func(_, _ []byte) error {
6✔
3891
                        numZombies++
2✔
3892
                        return nil
2✔
3893
                })
2✔
3894
        }, func() {
4✔
3895
                numZombies = 0
4✔
3896
        })
4✔
3897
        if err != nil {
4✔
3898
                return 0, err
×
3899
        }
×
3900

3901
        return numZombies, nil
4✔
3902
}
3903

3904
// PutClosedScid stores a SCID for a closed channel in the database. This is so
3905
// that we can ignore channel announcements that we know to be closed without
3906
// having to validate them and fetch a block.
3907
func (c *KVStore) PutClosedScid(scid lnwire.ShortChannelID) error {
1✔
3908
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
2✔
3909
                closedScids, err := tx.CreateTopLevelBucket(closedScidBucket)
1✔
3910
                if err != nil {
1✔
3911
                        return err
×
3912
                }
×
3913

3914
                var k [8]byte
1✔
3915
                byteOrder.PutUint64(k[:], scid.ToUint64())
1✔
3916

1✔
3917
                return closedScids.Put(k[:], []byte{})
1✔
3918
        }, func() {})
1✔
3919
}
3920

3921
// IsClosedScid checks whether a channel identified by the passed in scid is
3922
// closed. This helps avoid having to perform expensive validation checks.
3923
// TODO: Add an LRU cache to cut down on disc reads.
3924
func (c *KVStore) IsClosedScid(scid lnwire.ShortChannelID) (bool, error) {
2✔
3925
        var isClosed bool
2✔
3926
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
4✔
3927
                closedScids := tx.ReadBucket(closedScidBucket)
2✔
3928
                if closedScids == nil {
2✔
3929
                        return ErrClosedScidsNotFound
×
3930
                }
×
3931

3932
                var k [8]byte
2✔
3933
                byteOrder.PutUint64(k[:], scid.ToUint64())
2✔
3934

2✔
3935
                if closedScids.Get(k[:]) != nil {
3✔
3936
                        isClosed = true
1✔
3937
                        return nil
1✔
3938
                }
1✔
3939

3940
                return nil
1✔
3941
        }, func() {
2✔
3942
                isClosed = false
2✔
3943
        })
2✔
3944
        if err != nil {
2✔
3945
                return false, err
×
3946
        }
×
3947

3948
        return isClosed, nil
2✔
3949
}
3950

3951
// GraphSession will provide the call-back with access to a NodeTraverser
3952
// instance which can be used to perform queries against the channel graph.
3953
func (c *KVStore) GraphSession(cb func(graph NodeTraverser) error) error {
54✔
3954
        return c.db.View(func(tx walletdb.ReadTx) error {
108✔
3955
                return cb(&nodeTraverserSession{
54✔
3956
                        db: c,
54✔
3957
                        tx: tx,
54✔
3958
                })
54✔
3959
        }, func() {})
108✔
3960
}
3961

3962
// nodeTraverserSession implements the NodeTraverser interface but with a
3963
// backing read only transaction for a consistent view of the graph.
3964
type nodeTraverserSession struct {
3965
        tx kvdb.RTx
3966
        db *KVStore
3967
}
3968

3969
// ForEachNodeDirectedChannel calls the callback for every channel of the given
3970
// node.
3971
//
3972
// NOTE: Part of the NodeTraverser interface.
3973
func (c *nodeTraverserSession) ForEachNodeDirectedChannel(nodePub route.Vertex,
3974
        cb func(channel *DirectedChannel) error) error {
240✔
3975

240✔
3976
        return c.db.forEachNodeDirectedChannel(c.tx, nodePub, cb)
240✔
3977
}
240✔
3978

3979
// FetchNodeFeatures returns the features of the given node. If the node is
3980
// unknown, assume no additional features are supported.
3981
//
3982
// NOTE: Part of the NodeTraverser interface.
3983
func (c *nodeTraverserSession) FetchNodeFeatures(nodePub route.Vertex) (
3984
        *lnwire.FeatureVector, error) {
254✔
3985

254✔
3986
        return c.db.fetchNodeFeatures(c.tx, nodePub)
254✔
3987
}
254✔
3988

3989
func putLightningNode(nodeBucket, aliasBucket, updateIndex kvdb.RwBucket,
3990
        node *models.LightningNode) error {
907✔
3991

907✔
3992
        var (
907✔
3993
                scratch [16]byte
907✔
3994
                b       bytes.Buffer
907✔
3995
        )
907✔
3996

907✔
3997
        pub, err := node.PubKey()
907✔
3998
        if err != nil {
907✔
3999
                return err
×
4000
        }
×
4001
        nodePub := pub.SerializeCompressed()
907✔
4002

907✔
4003
        // If the node has the update time set, write it, else write 0.
907✔
4004
        updateUnix := uint64(0)
907✔
4005
        if node.LastUpdate.Unix() > 0 {
1,679✔
4006
                updateUnix = uint64(node.LastUpdate.Unix())
772✔
4007
        }
772✔
4008

4009
        byteOrder.PutUint64(scratch[:8], updateUnix)
907✔
4010
        if _, err := b.Write(scratch[:8]); err != nil {
907✔
4011
                return err
×
4012
        }
×
4013

4014
        if _, err := b.Write(nodePub); err != nil {
907✔
4015
                return err
×
4016
        }
×
4017

4018
        // If we got a node announcement for this node, we will have the rest
4019
        // of the data available. If not we don't have more data to write.
4020
        if !node.HaveNodeAnnouncement {
989✔
4021
                // Write HaveNodeAnnouncement=0.
82✔
4022
                byteOrder.PutUint16(scratch[:2], 0)
82✔
4023
                if _, err := b.Write(scratch[:2]); err != nil {
82✔
4024
                        return err
×
4025
                }
×
4026

4027
                return nodeBucket.Put(nodePub, b.Bytes())
82✔
4028
        }
4029

4030
        // Write HaveNodeAnnouncement=1.
4031
        byteOrder.PutUint16(scratch[:2], 1)
825✔
4032
        if _, err := b.Write(scratch[:2]); err != nil {
825✔
4033
                return err
×
4034
        }
×
4035

4036
        if err := binary.Write(&b, byteOrder, node.Color.R); err != nil {
825✔
4037
                return err
×
4038
        }
×
4039
        if err := binary.Write(&b, byteOrder, node.Color.G); err != nil {
825✔
4040
                return err
×
4041
        }
×
4042
        if err := binary.Write(&b, byteOrder, node.Color.B); err != nil {
825✔
4043
                return err
×
4044
        }
×
4045

4046
        if err := wire.WriteVarString(&b, 0, node.Alias); err != nil {
825✔
4047
                return err
×
4048
        }
×
4049

4050
        if err := node.Features.Encode(&b); err != nil {
825✔
4051
                return err
×
4052
        }
×
4053

4054
        numAddresses := uint16(len(node.Addresses))
825✔
4055
        byteOrder.PutUint16(scratch[:2], numAddresses)
825✔
4056
        if _, err := b.Write(scratch[:2]); err != nil {
825✔
4057
                return err
×
4058
        }
×
4059

4060
        for _, address := range node.Addresses {
1,887✔
4061
                if err := SerializeAddr(&b, address); err != nil {
1,062✔
4062
                        return err
×
4063
                }
×
4064
        }
4065

4066
        sigLen := len(node.AuthSigBytes)
825✔
4067
        if sigLen > 80 {
825✔
4068
                return fmt.Errorf("max sig len allowed is 80, had %v",
×
4069
                        sigLen)
×
4070
        }
×
4071

4072
        err = wire.WriteVarBytes(&b, 0, node.AuthSigBytes)
825✔
4073
        if err != nil {
825✔
4074
                return err
×
4075
        }
×
4076

4077
        if len(node.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
825✔
4078
                return ErrTooManyExtraOpaqueBytes(len(node.ExtraOpaqueData))
×
4079
        }
×
4080
        err = wire.WriteVarBytes(&b, 0, node.ExtraOpaqueData)
825✔
4081
        if err != nil {
825✔
4082
                return err
×
4083
        }
×
4084

4085
        if err := aliasBucket.Put(nodePub, []byte(node.Alias)); err != nil {
825✔
4086
                return err
×
4087
        }
×
4088

4089
        // With the alias bucket updated, we'll now update the index that
4090
        // tracks the time series of node updates.
4091
        var indexKey [8 + 33]byte
825✔
4092
        byteOrder.PutUint64(indexKey[:8], updateUnix)
825✔
4093
        copy(indexKey[8:], nodePub)
825✔
4094

825✔
4095
        // If there was already an old index entry for this node, then we'll
825✔
4096
        // delete the old one before we write the new entry.
825✔
4097
        if nodeBytes := nodeBucket.Get(nodePub); nodeBytes != nil {
841✔
4098
                // Extract out the old update time to we can reconstruct the
16✔
4099
                // prior index key to delete it from the index.
16✔
4100
                oldUpdateTime := nodeBytes[:8]
16✔
4101

16✔
4102
                var oldIndexKey [8 + 33]byte
16✔
4103
                copy(oldIndexKey[:8], oldUpdateTime)
16✔
4104
                copy(oldIndexKey[8:], nodePub)
16✔
4105

16✔
4106
                if err := updateIndex.Delete(oldIndexKey[:]); err != nil {
16✔
4107
                        return err
×
4108
                }
×
4109
        }
4110

4111
        if err := updateIndex.Put(indexKey[:], nil); err != nil {
825✔
4112
                return err
×
4113
        }
×
4114

4115
        return nodeBucket.Put(nodePub, b.Bytes())
825✔
4116
}
4117

4118
func fetchLightningNode(nodeBucket kvdb.RBucket,
4119
        nodePub []byte) (models.LightningNode, error) {
3,635✔
4120

3,635✔
4121
        nodeBytes := nodeBucket.Get(nodePub)
3,635✔
4122
        if nodeBytes == nil {
3,717✔
4123
                return models.LightningNode{}, ErrGraphNodeNotFound
82✔
4124
        }
82✔
4125

4126
        nodeReader := bytes.NewReader(nodeBytes)
3,553✔
4127

3,553✔
4128
        return deserializeLightningNode(nodeReader)
3,553✔
4129
}
4130

4131
func deserializeLightningNodeCacheable(r io.Reader) (route.Vertex,
4132
        *lnwire.FeatureVector, error) {
120✔
4133

120✔
4134
        var (
120✔
4135
                pubKey      route.Vertex
120✔
4136
                features    = lnwire.EmptyFeatureVector()
120✔
4137
                nodeScratch [8]byte
120✔
4138
        )
120✔
4139

120✔
4140
        // Skip ahead:
120✔
4141
        // - LastUpdate (8 bytes)
120✔
4142
        if _, err := r.Read(nodeScratch[:]); err != nil {
120✔
4143
                return pubKey, nil, err
×
4144
        }
×
4145

4146
        if _, err := io.ReadFull(r, pubKey[:]); err != nil {
120✔
4147
                return pubKey, nil, err
×
4148
        }
×
4149

4150
        // Read the node announcement flag.
4151
        if _, err := r.Read(nodeScratch[:2]); err != nil {
120✔
4152
                return pubKey, nil, err
×
4153
        }
×
4154
        hasNodeAnn := byteOrder.Uint16(nodeScratch[:2])
120✔
4155

120✔
4156
        // The rest of the data is optional, and will only be there if we got a
120✔
4157
        // node announcement for this node.
120✔
4158
        if hasNodeAnn == 0 {
120✔
4159
                return pubKey, features, nil
×
4160
        }
×
4161

4162
        // We did get a node announcement for this node, so we'll have the rest
4163
        // of the data available.
4164
        var rgb uint8
120✔
4165
        if err := binary.Read(r, byteOrder, &rgb); err != nil {
120✔
4166
                return pubKey, nil, err
×
4167
        }
×
4168
        if err := binary.Read(r, byteOrder, &rgb); err != nil {
120✔
4169
                return pubKey, nil, err
×
4170
        }
×
4171
        if err := binary.Read(r, byteOrder, &rgb); err != nil {
120✔
4172
                return pubKey, nil, err
×
4173
        }
×
4174

4175
        if _, err := wire.ReadVarString(r, 0); err != nil {
120✔
4176
                return pubKey, nil, err
×
4177
        }
×
4178

4179
        if err := features.Decode(r); err != nil {
120✔
4180
                return pubKey, nil, err
×
4181
        }
×
4182

4183
        return pubKey, features, nil
120✔
4184
}
4185

4186
func deserializeLightningNode(r io.Reader) (models.LightningNode, error) {
8,541✔
4187
        var (
8,541✔
4188
                node    models.LightningNode
8,541✔
4189
                scratch [8]byte
8,541✔
4190
                err     error
8,541✔
4191
        )
8,541✔
4192

8,541✔
4193
        // Always populate a feature vector, even if we don't have a node
8,541✔
4194
        // announcement and short circuit below.
8,541✔
4195
        node.Features = lnwire.EmptyFeatureVector()
8,541✔
4196

8,541✔
4197
        if _, err := r.Read(scratch[:]); err != nil {
8,541✔
4198
                return models.LightningNode{}, err
×
4199
        }
×
4200

4201
        unix := int64(byteOrder.Uint64(scratch[:]))
8,541✔
4202
        node.LastUpdate = time.Unix(unix, 0)
8,541✔
4203

8,541✔
4204
        if _, err := io.ReadFull(r, node.PubKeyBytes[:]); err != nil {
8,541✔
4205
                return models.LightningNode{}, err
×
4206
        }
×
4207

4208
        if _, err := r.Read(scratch[:2]); err != nil {
8,541✔
4209
                return models.LightningNode{}, err
×
4210
        }
×
4211

4212
        hasNodeAnn := byteOrder.Uint16(scratch[:2])
8,541✔
4213
        if hasNodeAnn == 1 {
16,935✔
4214
                node.HaveNodeAnnouncement = true
8,394✔
4215
        } else {
8,541✔
4216
                node.HaveNodeAnnouncement = false
147✔
4217
        }
147✔
4218

4219
        // The rest of the data is optional, and will only be there if we got a
4220
        // node announcement for this node.
4221
        if !node.HaveNodeAnnouncement {
8,688✔
4222
                return node, nil
147✔
4223
        }
147✔
4224

4225
        // We did get a node announcement for this node, so we'll have the rest
4226
        // of the data available.
4227
        if err := binary.Read(r, byteOrder, &node.Color.R); err != nil {
8,394✔
4228
                return models.LightningNode{}, err
×
4229
        }
×
4230
        if err := binary.Read(r, byteOrder, &node.Color.G); err != nil {
8,394✔
4231
                return models.LightningNode{}, err
×
4232
        }
×
4233
        if err := binary.Read(r, byteOrder, &node.Color.B); err != nil {
8,394✔
4234
                return models.LightningNode{}, err
×
4235
        }
×
4236

4237
        node.Alias, err = wire.ReadVarString(r, 0)
8,394✔
4238
        if err != nil {
8,394✔
4239
                return models.LightningNode{}, err
×
4240
        }
×
4241

4242
        err = node.Features.Decode(r)
8,394✔
4243
        if err != nil {
8,394✔
4244
                return models.LightningNode{}, err
×
4245
        }
×
4246

4247
        if _, err := r.Read(scratch[:2]); err != nil {
8,394✔
4248
                return models.LightningNode{}, err
×
4249
        }
×
4250
        numAddresses := int(byteOrder.Uint16(scratch[:2]))
8,394✔
4251

8,394✔
4252
        var addresses []net.Addr
8,394✔
4253
        for i := 0; i < numAddresses; i++ {
19,054✔
4254
                address, err := DeserializeAddr(r)
10,660✔
4255
                if err != nil {
10,660✔
4256
                        return models.LightningNode{}, err
×
4257
                }
×
4258
                addresses = append(addresses, address)
10,660✔
4259
        }
4260
        node.Addresses = addresses
8,394✔
4261

8,394✔
4262
        node.AuthSigBytes, err = wire.ReadVarBytes(r, 0, 80, "sig")
8,394✔
4263
        if err != nil {
8,394✔
4264
                return models.LightningNode{}, err
×
4265
        }
×
4266

4267
        // We'll try and see if there are any opaque bytes left, if not, then
4268
        // we'll ignore the EOF error and return the node as is.
4269
        extraBytes, err := wire.ReadVarBytes(
8,394✔
4270
                r, 0, MaxAllowedExtraOpaqueBytes, "blob",
8,394✔
4271
        )
8,394✔
4272
        switch {
8,394✔
4273
        case errors.Is(err, io.ErrUnexpectedEOF):
×
4274
        case errors.Is(err, io.EOF):
×
4275
        case err != nil:
×
4276
                return models.LightningNode{}, err
×
4277
        }
4278

4279
        if len(extraBytes) > 0 {
8,404✔
4280
                node.ExtraOpaqueData = extraBytes
10✔
4281
        }
10✔
4282

4283
        return node, nil
8,394✔
4284
}
4285

4286
func putChanEdgeInfo(edgeIndex kvdb.RwBucket,
4287
        edgeInfo *models.ChannelEdgeInfo, chanID [8]byte) error {
1,489✔
4288

1,489✔
4289
        var b bytes.Buffer
1,489✔
4290

1,489✔
4291
        if _, err := b.Write(edgeInfo.NodeKey1Bytes[:]); err != nil {
1,489✔
4292
                return err
×
4293
        }
×
4294
        if _, err := b.Write(edgeInfo.NodeKey2Bytes[:]); err != nil {
1,489✔
4295
                return err
×
4296
        }
×
4297
        if _, err := b.Write(edgeInfo.BitcoinKey1Bytes[:]); err != nil {
1,489✔
4298
                return err
×
4299
        }
×
4300
        if _, err := b.Write(edgeInfo.BitcoinKey2Bytes[:]); err != nil {
1,489✔
4301
                return err
×
4302
        }
×
4303

4304
        var featureBuf bytes.Buffer
1,489✔
4305
        if err := edgeInfo.Features.Encode(&featureBuf); err != nil {
1,489✔
4306
                return fmt.Errorf("unable to encode features: %w", err)
×
4307
        }
×
4308

4309
        if err := wire.WriteVarBytes(&b, 0, featureBuf.Bytes()); err != nil {
1,489✔
4310
                return err
×
4311
        }
×
4312

4313
        authProof := edgeInfo.AuthProof
1,489✔
4314
        var nodeSig1, nodeSig2, bitcoinSig1, bitcoinSig2 []byte
1,489✔
4315
        if authProof != nil {
2,894✔
4316
                nodeSig1 = authProof.NodeSig1Bytes
1,405✔
4317
                nodeSig2 = authProof.NodeSig2Bytes
1,405✔
4318
                bitcoinSig1 = authProof.BitcoinSig1Bytes
1,405✔
4319
                bitcoinSig2 = authProof.BitcoinSig2Bytes
1,405✔
4320
        }
1,405✔
4321

4322
        if err := wire.WriteVarBytes(&b, 0, nodeSig1); err != nil {
1,489✔
4323
                return err
×
4324
        }
×
4325
        if err := wire.WriteVarBytes(&b, 0, nodeSig2); err != nil {
1,489✔
4326
                return err
×
4327
        }
×
4328
        if err := wire.WriteVarBytes(&b, 0, bitcoinSig1); err != nil {
1,489✔
4329
                return err
×
4330
        }
×
4331
        if err := wire.WriteVarBytes(&b, 0, bitcoinSig2); err != nil {
1,489✔
4332
                return err
×
4333
        }
×
4334

4335
        if err := WriteOutpoint(&b, &edgeInfo.ChannelPoint); err != nil {
1,489✔
4336
                return err
×
4337
        }
×
4338
        err := binary.Write(&b, byteOrder, uint64(edgeInfo.Capacity))
1,489✔
4339
        if err != nil {
1,489✔
4340
                return err
×
4341
        }
×
4342
        if _, err := b.Write(chanID[:]); err != nil {
1,489✔
4343
                return err
×
4344
        }
×
4345
        if _, err := b.Write(edgeInfo.ChainHash[:]); err != nil {
1,489✔
4346
                return err
×
4347
        }
×
4348

4349
        if len(edgeInfo.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
1,489✔
4350
                return ErrTooManyExtraOpaqueBytes(len(edgeInfo.ExtraOpaqueData))
×
4351
        }
×
4352
        err = wire.WriteVarBytes(&b, 0, edgeInfo.ExtraOpaqueData)
1,489✔
4353
        if err != nil {
1,489✔
4354
                return err
×
4355
        }
×
4356

4357
        return edgeIndex.Put(chanID[:], b.Bytes())
1,489✔
4358
}
4359

4360
func fetchChanEdgeInfo(edgeIndex kvdb.RBucket,
4361
        chanID []byte) (models.ChannelEdgeInfo, error) {
6,803✔
4362

6,803✔
4363
        edgeInfoBytes := edgeIndex.Get(chanID)
6,803✔
4364
        if edgeInfoBytes == nil {
6,866✔
4365
                return models.ChannelEdgeInfo{}, ErrEdgeNotFound
63✔
4366
        }
63✔
4367

4368
        edgeInfoReader := bytes.NewReader(edgeInfoBytes)
6,740✔
4369

6,740✔
4370
        return deserializeChanEdgeInfo(edgeInfoReader)
6,740✔
4371
}
4372

4373
func deserializeChanEdgeInfo(r io.Reader) (models.ChannelEdgeInfo, error) {
7,282✔
4374
        var (
7,282✔
4375
                err      error
7,282✔
4376
                edgeInfo models.ChannelEdgeInfo
7,282✔
4377
        )
7,282✔
4378

7,282✔
4379
        if _, err := io.ReadFull(r, edgeInfo.NodeKey1Bytes[:]); err != nil {
7,282✔
4380
                return models.ChannelEdgeInfo{}, err
×
4381
        }
×
4382
        if _, err := io.ReadFull(r, edgeInfo.NodeKey2Bytes[:]); err != nil {
7,282✔
4383
                return models.ChannelEdgeInfo{}, err
×
4384
        }
×
4385
        if _, err := io.ReadFull(r, edgeInfo.BitcoinKey1Bytes[:]); err != nil {
7,282✔
4386
                return models.ChannelEdgeInfo{}, err
×
4387
        }
×
4388
        if _, err := io.ReadFull(r, edgeInfo.BitcoinKey2Bytes[:]); err != nil {
7,282✔
4389
                return models.ChannelEdgeInfo{}, err
×
4390
        }
×
4391

4392
        featureBytes, err := wire.ReadVarBytes(r, 0, 900, "features")
7,282✔
4393
        if err != nil {
7,282✔
4394
                return models.ChannelEdgeInfo{}, err
×
4395
        }
×
4396

4397
        features := lnwire.NewRawFeatureVector()
7,282✔
4398
        err = features.Decode(bytes.NewReader(featureBytes))
7,282✔
4399
        if err != nil {
7,282✔
4400
                return models.ChannelEdgeInfo{}, fmt.Errorf("unable to decode "+
×
4401
                        "features: %w", err)
×
4402
        }
×
4403
        edgeInfo.Features = lnwire.NewFeatureVector(features, lnwire.Features)
7,282✔
4404

7,282✔
4405
        proof := &models.ChannelAuthProof{}
7,282✔
4406

7,282✔
4407
        proof.NodeSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
7,282✔
4408
        if err != nil {
7,282✔
4409
                return models.ChannelEdgeInfo{}, err
×
4410
        }
×
4411
        proof.NodeSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
7,282✔
4412
        if err != nil {
7,282✔
4413
                return models.ChannelEdgeInfo{}, err
×
4414
        }
×
4415
        proof.BitcoinSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
7,282✔
4416
        if err != nil {
7,282✔
4417
                return models.ChannelEdgeInfo{}, err
×
4418
        }
×
4419
        proof.BitcoinSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
7,282✔
4420
        if err != nil {
7,282✔
4421
                return models.ChannelEdgeInfo{}, err
×
4422
        }
×
4423

4424
        if !proof.IsEmpty() {
11,461✔
4425
                edgeInfo.AuthProof = proof
4,179✔
4426
        }
4,179✔
4427

4428
        edgeInfo.ChannelPoint = wire.OutPoint{}
7,282✔
4429
        if err := ReadOutpoint(r, &edgeInfo.ChannelPoint); err != nil {
7,282✔
4430
                return models.ChannelEdgeInfo{}, err
×
4431
        }
×
4432
        if err := binary.Read(r, byteOrder, &edgeInfo.Capacity); err != nil {
7,282✔
4433
                return models.ChannelEdgeInfo{}, err
×
4434
        }
×
4435
        if err := binary.Read(r, byteOrder, &edgeInfo.ChannelID); err != nil {
7,282✔
4436
                return models.ChannelEdgeInfo{}, err
×
4437
        }
×
4438

4439
        if _, err := io.ReadFull(r, edgeInfo.ChainHash[:]); err != nil {
7,282✔
4440
                return models.ChannelEdgeInfo{}, err
×
4441
        }
×
4442

4443
        // We'll try and see if there are any opaque bytes left, if not, then
4444
        // we'll ignore the EOF error and return the edge as is.
4445
        edgeInfo.ExtraOpaqueData, err = wire.ReadVarBytes(
7,282✔
4446
                r, 0, MaxAllowedExtraOpaqueBytes, "blob",
7,282✔
4447
        )
7,282✔
4448
        switch {
7,282✔
4449
        case errors.Is(err, io.ErrUnexpectedEOF):
×
4450
        case errors.Is(err, io.EOF):
×
4451
        case err != nil:
×
4452
                return models.ChannelEdgeInfo{}, err
×
4453
        }
4454

4455
        return edgeInfo, nil
7,282✔
4456
}
4457

4458
func putChanEdgePolicy(edges kvdb.RwBucket, edge *models.ChannelEdgePolicy,
4459
        from, to []byte) error {
2,667✔
4460

2,667✔
4461
        var edgeKey [33 + 8]byte
2,667✔
4462
        copy(edgeKey[:], from)
2,667✔
4463
        byteOrder.PutUint64(edgeKey[33:], edge.ChannelID)
2,667✔
4464

2,667✔
4465
        var b bytes.Buffer
2,667✔
4466
        if err := serializeChanEdgePolicy(&b, edge, to); err != nil {
2,667✔
4467
                return err
×
4468
        }
×
4469

4470
        // Before we write out the new edge, we'll create a new entry in the
4471
        // update index in order to keep it fresh.
4472
        updateUnix := uint64(edge.LastUpdate.Unix())
2,667✔
4473
        var indexKey [8 + 8]byte
2,667✔
4474
        byteOrder.PutUint64(indexKey[:8], updateUnix)
2,667✔
4475
        byteOrder.PutUint64(indexKey[8:], edge.ChannelID)
2,667✔
4476

2,667✔
4477
        updateIndex, err := edges.CreateBucketIfNotExists(edgeUpdateIndexBucket)
2,667✔
4478
        if err != nil {
2,667✔
4479
                return err
×
4480
        }
×
4481

4482
        // If there was already an entry for this edge, then we'll need to
4483
        // delete the old one to ensure we don't leave around any after-images.
4484
        // An unknown policy value does not have a update time recorded, so
4485
        // it also does not need to be removed.
4486
        if edgeBytes := edges.Get(edgeKey[:]); edgeBytes != nil &&
2,667✔
4487
                !bytes.Equal(edgeBytes, unknownPolicy) {
2,694✔
4488

27✔
4489
                // In order to delete the old entry, we'll need to obtain the
27✔
4490
                // *prior* update time in order to delete it. To do this, we'll
27✔
4491
                // need to deserialize the existing policy within the database
27✔
4492
                // (now outdated by the new one), and delete its corresponding
27✔
4493
                // entry within the update index. We'll ignore any
27✔
4494
                // ErrEdgePolicyOptionalFieldNotFound or ErrParsingExtraTLVBytes
27✔
4495
                // errors, as we only need the channel ID and update time to
27✔
4496
                // delete the entry.
27✔
4497
                //
27✔
4498
                // TODO(halseth): get rid of these invalid policies in a
27✔
4499
                // migration.
27✔
4500
                //
27✔
4501
                // NOTE: the above TODO was completed in the SQL migration and
27✔
4502
                // so such edge cases no longer need to be handled there.
27✔
4503
                oldEdgePolicy, err := deserializeChanEdgePolicy(
27✔
4504
                        bytes.NewReader(edgeBytes),
27✔
4505
                )
27✔
4506
                if err != nil &&
27✔
4507
                        !errors.Is(err, ErrEdgePolicyOptionalFieldNotFound) &&
27✔
4508
                        !errors.Is(err, ErrParsingExtraTLVBytes) {
27✔
4509

×
4510
                        return err
×
4511
                }
×
4512

4513
                oldUpdateTime := uint64(oldEdgePolicy.LastUpdate.Unix())
27✔
4514

27✔
4515
                var oldIndexKey [8 + 8]byte
27✔
4516
                byteOrder.PutUint64(oldIndexKey[:8], oldUpdateTime)
27✔
4517
                byteOrder.PutUint64(oldIndexKey[8:], edge.ChannelID)
27✔
4518

27✔
4519
                if err := updateIndex.Delete(oldIndexKey[:]); err != nil {
27✔
4520
                        return err
×
4521
                }
×
4522
        }
4523

4524
        if err := updateIndex.Put(indexKey[:], nil); err != nil {
2,667✔
4525
                return err
×
4526
        }
×
4527

4528
        err = updateEdgePolicyDisabledIndex(
2,667✔
4529
                edges, edge.ChannelID,
2,667✔
4530
                edge.ChannelFlags&lnwire.ChanUpdateDirection > 0,
2,667✔
4531
                edge.IsDisabled(),
2,667✔
4532
        )
2,667✔
4533
        if err != nil {
2,667✔
4534
                return err
×
4535
        }
×
4536

4537
        return edges.Put(edgeKey[:], b.Bytes())
2,667✔
4538
}
4539

4540
// updateEdgePolicyDisabledIndex is used to update the disabledEdgePolicyIndex
4541
// bucket by either add a new disabled ChannelEdgePolicy or remove an existing
4542
// one.
4543
// The direction represents the direction of the edge and disabled is used for
4544
// deciding whether to remove or add an entry to the bucket.
4545
// In general a channel is disabled if two entries for the same chanID exist
4546
// in this bucket.
4547
// Maintaining the bucket this way allows a fast retrieval of disabled
4548
// channels, for example when prune is needed.
4549
func updateEdgePolicyDisabledIndex(edges kvdb.RwBucket, chanID uint64,
4550
        direction bool, disabled bool) error {
2,945✔
4551

2,945✔
4552
        var disabledEdgeKey [8 + 1]byte
2,945✔
4553
        byteOrder.PutUint64(disabledEdgeKey[0:], chanID)
2,945✔
4554
        if direction {
4,414✔
4555
                disabledEdgeKey[8] = 1
1,469✔
4556
        }
1,469✔
4557

4558
        disabledEdgePolicyIndex, err := edges.CreateBucketIfNotExists(
2,945✔
4559
                disabledEdgePolicyBucket,
2,945✔
4560
        )
2,945✔
4561
        if err != nil {
2,945✔
4562
                return err
×
4563
        }
×
4564

4565
        if disabled {
2,971✔
4566
                return disabledEdgePolicyIndex.Put(disabledEdgeKey[:], []byte{})
26✔
4567
        }
26✔
4568

4569
        return disabledEdgePolicyIndex.Delete(disabledEdgeKey[:])
2,919✔
4570
}
4571

4572
// putChanEdgePolicyUnknown marks the edge policy as unknown
4573
// in the edges bucket.
4574
func putChanEdgePolicyUnknown(edges kvdb.RwBucket, channelID uint64,
4575
        from []byte) error {
2,974✔
4576

2,974✔
4577
        var edgeKey [33 + 8]byte
2,974✔
4578
        copy(edgeKey[:], from)
2,974✔
4579
        byteOrder.PutUint64(edgeKey[33:], channelID)
2,974✔
4580

2,974✔
4581
        if edges.Get(edgeKey[:]) != nil {
2,974✔
4582
                return fmt.Errorf("cannot write unknown policy for channel %v "+
×
4583
                        " when there is already a policy present", channelID)
×
4584
        }
×
4585

4586
        return edges.Put(edgeKey[:], unknownPolicy)
2,974✔
4587
}
4588

4589
func fetchChanEdgePolicy(edges kvdb.RBucket, chanID []byte,
4590
        nodePub []byte) (*models.ChannelEdgePolicy, error) {
13,488✔
4591

13,488✔
4592
        var edgeKey [33 + 8]byte
13,488✔
4593
        copy(edgeKey[:], nodePub)
13,488✔
4594
        copy(edgeKey[33:], chanID)
13,488✔
4595

13,488✔
4596
        edgeBytes := edges.Get(edgeKey[:])
13,488✔
4597
        if edgeBytes == nil {
13,488✔
4598
                return nil, ErrEdgeNotFound
×
4599
        }
×
4600

4601
        // No need to deserialize unknown policy.
4602
        if bytes.Equal(edgeBytes, unknownPolicy) {
14,946✔
4603
                return nil, nil
1,458✔
4604
        }
1,458✔
4605

4606
        edgeReader := bytes.NewReader(edgeBytes)
12,030✔
4607

12,030✔
4608
        ep, err := deserializeChanEdgePolicy(edgeReader)
12,030✔
4609
        switch {
12,030✔
4610
        // If the db policy was missing an expected optional field, we return
4611
        // nil as if the policy was unknown.
4612
        case errors.Is(err, ErrEdgePolicyOptionalFieldNotFound):
2✔
4613
                return nil, nil
2✔
4614

4615
        // If the policy contains invalid TLV bytes, we return nil as if
4616
        // the policy was unknown.
4617
        case errors.Is(err, ErrParsingExtraTLVBytes):
×
4618
                return nil, nil
×
4619

4620
        case err != nil:
×
4621
                return nil, err
×
4622
        }
4623

4624
        return ep, nil
12,028✔
4625
}
4626

4627
func fetchChanEdgePolicies(edgeIndex kvdb.RBucket, edges kvdb.RBucket,
4628
        chanID []byte) (*models.ChannelEdgePolicy, *models.ChannelEdgePolicy,
4629
        error) {
2,901✔
4630

2,901✔
4631
        edgeInfo := edgeIndex.Get(chanID)
2,901✔
4632
        if edgeInfo == nil {
2,901✔
4633
                return nil, nil, fmt.Errorf("%w: chanID=%x", ErrEdgeNotFound,
×
4634
                        chanID)
×
4635
        }
×
4636

4637
        // The first node is contained within the first half of the edge
4638
        // information. We only propagate the error here and below if it's
4639
        // something other than edge non-existence.
4640
        node1Pub := edgeInfo[:33]
2,901✔
4641
        edge1, err := fetchChanEdgePolicy(edges, chanID, node1Pub)
2,901✔
4642
        if err != nil {
2,901✔
4643
                return nil, nil, fmt.Errorf("%w: node1Pub=%x", ErrEdgeNotFound,
×
4644
                        node1Pub)
×
4645
        }
×
4646

4647
        // Similarly, the second node is contained within the latter
4648
        // half of the edge information.
4649
        node2Pub := edgeInfo[33:66]
2,901✔
4650
        edge2, err := fetchChanEdgePolicy(edges, chanID, node2Pub)
2,901✔
4651
        if err != nil {
2,901✔
4652
                return nil, nil, fmt.Errorf("%w: node2Pub=%x", ErrEdgeNotFound,
×
4653
                        node2Pub)
×
4654
        }
×
4655

4656
        return edge1, edge2, nil
2,901✔
4657
}
4658

4659
func serializeChanEdgePolicy(w io.Writer, edge *models.ChannelEdgePolicy,
4660
        to []byte) error {
2,669✔
4661

2,669✔
4662
        err := wire.WriteVarBytes(w, 0, edge.SigBytes)
2,669✔
4663
        if err != nil {
2,669✔
4664
                return err
×
4665
        }
×
4666

4667
        if err := binary.Write(w, byteOrder, edge.ChannelID); err != nil {
2,669✔
4668
                return err
×
4669
        }
×
4670

4671
        var scratch [8]byte
2,669✔
4672
        updateUnix := uint64(edge.LastUpdate.Unix())
2,669✔
4673
        byteOrder.PutUint64(scratch[:], updateUnix)
2,669✔
4674
        if _, err := w.Write(scratch[:]); err != nil {
2,669✔
4675
                return err
×
4676
        }
×
4677

4678
        if err := binary.Write(w, byteOrder, edge.MessageFlags); err != nil {
2,669✔
4679
                return err
×
4680
        }
×
4681
        if err := binary.Write(w, byteOrder, edge.ChannelFlags); err != nil {
2,669✔
4682
                return err
×
4683
        }
×
4684
        if err := binary.Write(w, byteOrder, edge.TimeLockDelta); err != nil {
2,669✔
4685
                return err
×
4686
        }
×
4687
        if err := binary.Write(w, byteOrder, uint64(edge.MinHTLC)); err != nil {
2,669✔
4688
                return err
×
4689
        }
×
4690
        err = binary.Write(w, byteOrder, uint64(edge.FeeBaseMSat))
2,669✔
4691
        if err != nil {
2,669✔
4692
                return err
×
4693
        }
×
4694
        err = binary.Write(
2,669✔
4695
                w, byteOrder, uint64(edge.FeeProportionalMillionths),
2,669✔
4696
        )
2,669✔
4697
        if err != nil {
2,669✔
4698
                return err
×
4699
        }
×
4700

4701
        if _, err := w.Write(to); err != nil {
2,669✔
4702
                return err
×
4703
        }
×
4704

4705
        // If the max_htlc field is present, we write it. To be compatible with
4706
        // older versions that wasn't aware of this field, we write it as part
4707
        // of the opaque data.
4708
        // TODO(halseth): clean up when moving to TLV.
4709
        var opaqueBuf bytes.Buffer
2,669✔
4710
        if edge.MessageFlags.HasMaxHtlc() {
4,954✔
4711
                err := binary.Write(&opaqueBuf, byteOrder, uint64(edge.MaxHTLC))
2,285✔
4712
                if err != nil {
2,285✔
4713
                        return err
×
4714
                }
×
4715
        }
4716

4717
        if len(edge.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
2,669✔
4718
                return ErrTooManyExtraOpaqueBytes(len(edge.ExtraOpaqueData))
×
4719
        }
×
4720
        if _, err := opaqueBuf.Write(edge.ExtraOpaqueData); err != nil {
2,669✔
4721
                return err
×
4722
        }
×
4723

4724
        if err := wire.WriteVarBytes(w, 0, opaqueBuf.Bytes()); err != nil {
2,669✔
4725
                return err
×
4726
        }
×
4727

4728
        return nil
2,669✔
4729
}
4730

4731
func deserializeChanEdgePolicy(r io.Reader) (*models.ChannelEdgePolicy, error) {
12,058✔
4732
        // Deserialize the policy. Note that in case an optional field is not
12,058✔
4733
        // found or if the edge has invalid TLV data, then both an error and a
12,058✔
4734
        // populated policy object are returned so that the caller can decide
12,058✔
4735
        // if it still wants to use the edge or not.
12,058✔
4736
        edge, err := deserializeChanEdgePolicyRaw(r)
12,058✔
4737
        if err != nil &&
12,058✔
4738
                !errors.Is(err, ErrEdgePolicyOptionalFieldNotFound) &&
12,058✔
4739
                !errors.Is(err, ErrParsingExtraTLVBytes) {
12,058✔
4740

×
4741
                return nil, err
×
4742
        }
×
4743

4744
        return edge, err
12,058✔
4745
}
4746

4747
func deserializeChanEdgePolicyRaw(r io.Reader) (*models.ChannelEdgePolicy,
4748
        error) {
13,071✔
4749

13,071✔
4750
        edge := &models.ChannelEdgePolicy{}
13,071✔
4751

13,071✔
4752
        var err error
13,071✔
4753
        edge.SigBytes, err = wire.ReadVarBytes(r, 0, 80, "sig")
13,071✔
4754
        if err != nil {
13,071✔
4755
                return nil, err
×
4756
        }
×
4757

4758
        if err := binary.Read(r, byteOrder, &edge.ChannelID); err != nil {
13,071✔
4759
                return nil, err
×
4760
        }
×
4761

4762
        var scratch [8]byte
13,071✔
4763
        if _, err := r.Read(scratch[:]); err != nil {
13,071✔
4764
                return nil, err
×
4765
        }
×
4766
        unix := int64(byteOrder.Uint64(scratch[:]))
13,071✔
4767
        edge.LastUpdate = time.Unix(unix, 0)
13,071✔
4768

13,071✔
4769
        if err := binary.Read(r, byteOrder, &edge.MessageFlags); err != nil {
13,071✔
4770
                return nil, err
×
4771
        }
×
4772
        if err := binary.Read(r, byteOrder, &edge.ChannelFlags); err != nil {
13,071✔
4773
                return nil, err
×
4774
        }
×
4775
        if err := binary.Read(r, byteOrder, &edge.TimeLockDelta); err != nil {
13,071✔
4776
                return nil, err
×
4777
        }
×
4778

4779
        var n uint64
13,071✔
4780
        if err := binary.Read(r, byteOrder, &n); err != nil {
13,071✔
4781
                return nil, err
×
4782
        }
×
4783
        edge.MinHTLC = lnwire.MilliSatoshi(n)
13,071✔
4784

13,071✔
4785
        if err := binary.Read(r, byteOrder, &n); err != nil {
13,071✔
4786
                return nil, err
×
4787
        }
×
4788
        edge.FeeBaseMSat = lnwire.MilliSatoshi(n)
13,071✔
4789

13,071✔
4790
        if err := binary.Read(r, byteOrder, &n); err != nil {
13,071✔
4791
                return nil, err
×
4792
        }
×
4793
        edge.FeeProportionalMillionths = lnwire.MilliSatoshi(n)
13,071✔
4794

13,071✔
4795
        if _, err := r.Read(edge.ToNode[:]); err != nil {
13,071✔
4796
                return nil, err
×
4797
        }
×
4798

4799
        // We'll try and see if there are any opaque bytes left, if not, then
4800
        // we'll ignore the EOF error and return the edge as is.
4801
        edge.ExtraOpaqueData, err = wire.ReadVarBytes(
13,071✔
4802
                r, 0, MaxAllowedExtraOpaqueBytes, "blob",
13,071✔
4803
        )
13,071✔
4804
        switch {
13,071✔
4805
        case errors.Is(err, io.ErrUnexpectedEOF):
×
4806
        case errors.Is(err, io.EOF):
4✔
4807
        case err != nil:
×
4808
                return nil, err
×
4809
        }
4810

4811
        // See if optional fields are present.
4812
        if edge.MessageFlags.HasMaxHtlc() {
25,195✔
4813
                // The max_htlc field should be at the beginning of the opaque
12,124✔
4814
                // bytes.
12,124✔
4815
                opq := edge.ExtraOpaqueData
12,124✔
4816

12,124✔
4817
                // If the max_htlc field is not present, it might be old data
12,124✔
4818
                // stored before this field was validated. We'll return the
12,124✔
4819
                // edge along with an error.
12,124✔
4820
                if len(opq) < 8 {
12,128✔
4821
                        return edge, ErrEdgePolicyOptionalFieldNotFound
4✔
4822
                }
4✔
4823

4824
                maxHtlc := byteOrder.Uint64(opq[:8])
12,120✔
4825
                edge.MaxHTLC = lnwire.MilliSatoshi(maxHtlc)
12,120✔
4826

12,120✔
4827
                // Exclude the parsed field from the rest of the opaque data.
12,120✔
4828
                edge.ExtraOpaqueData = opq[8:]
12,120✔
4829
        }
4830

4831
        // Attempt to extract the inbound fee from the opaque data. If we fail
4832
        // to parse the TLV here, we return an error we also return the edge
4833
        // so that the caller can still use it. This is for backwards
4834
        // compatibility in case we have already persisted some policies that
4835
        // have invalid TLV data.
4836
        var inboundFee lnwire.Fee
13,067✔
4837
        typeMap, err := edge.ExtraOpaqueData.ExtractRecords(&inboundFee)
13,067✔
4838
        if err != nil {
13,067✔
4839
                return edge, fmt.Errorf("%w: %w", ErrParsingExtraTLVBytes, err)
×
4840
        }
×
4841

4842
        val, ok := typeMap[lnwire.FeeRecordType]
13,067✔
4843
        if ok && val == nil {
14,753✔
4844
                edge.InboundFee = fn.Some(inboundFee)
1,686✔
4845
        }
1,686✔
4846

4847
        return edge, nil
13,067✔
4848
}
4849

4850
// chanGraphNodeTx is an implementation of the NodeRTx interface backed by the
4851
// KVStore and a kvdb.RTx.
4852
type chanGraphNodeTx struct {
4853
        tx   kvdb.RTx
4854
        db   *KVStore
4855
        node *models.LightningNode
4856
}
4857

4858
// A compile-time constraint to ensure chanGraphNodeTx implements the NodeRTx
4859
// interface.
4860
var _ NodeRTx = (*chanGraphNodeTx)(nil)
4861

4862
func newChanGraphNodeTx(tx kvdb.RTx, db *KVStore,
4863
        node *models.LightningNode) *chanGraphNodeTx {
4,102✔
4864

4,102✔
4865
        return &chanGraphNodeTx{
4,102✔
4866
                tx:   tx,
4,102✔
4867
                db:   db,
4,102✔
4868
                node: node,
4,102✔
4869
        }
4,102✔
4870
}
4,102✔
4871

4872
// Node returns the raw information of the node.
4873
//
4874
// NOTE: This is a part of the NodeRTx interface.
4875
func (c *chanGraphNodeTx) Node() *models.LightningNode {
5,019✔
4876
        return c.node
5,019✔
4877
}
5,019✔
4878

4879
// FetchNode fetches the node with the given pub key under the same transaction
4880
// used to fetch the current node. The returned node is also a NodeRTx and any
4881
// operations on that NodeRTx will also be done under the same transaction.
4882
//
4883
// NOTE: This is a part of the NodeRTx interface.
4884
func (c *chanGraphNodeTx) FetchNode(nodePub route.Vertex) (NodeRTx, error) {
2,944✔
4885
        node, err := c.db.FetchLightningNodeTx(c.tx, nodePub)
2,944✔
4886
        if err != nil {
2,944✔
4887
                return nil, err
×
4888
        }
×
4889

4890
        return newChanGraphNodeTx(c.tx, c.db, node), nil
2,944✔
4891
}
4892

4893
// ForEachChannel can be used to iterate over the node's channels under
4894
// the same transaction used to fetch the node.
4895
//
4896
// NOTE: This is a part of the NodeRTx interface.
4897
func (c *chanGraphNodeTx) ForEachChannel(f func(*models.ChannelEdgeInfo,
4898
        *models.ChannelEdgePolicy, *models.ChannelEdgePolicy) error) error {
965✔
4899

965✔
4900
        return c.db.forEachNodeChannelTx(c.tx, c.node.PubKeyBytes,
965✔
4901
                func(_ kvdb.RTx, info *models.ChannelEdgeInfo, policy1,
965✔
4902
                        policy2 *models.ChannelEdgePolicy) error {
3,909✔
4903

2,944✔
4904
                        return f(info, policy1, policy2)
2,944✔
4905
                },
2,944✔
4906
        )
4907
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc