• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 16777740336

06 Aug 2025 01:04PM UTC coverage: 54.85% (-12.1%) from 66.954%
16777740336

Pull #10135

github

web-flow
Merge 429aa830c into e512770f1
Pull Request #10135: docs: move v0.19.3 items to correct file

108702 of 198181 relevant lines covered (54.85%)

22045.97 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

77.31
/graph/db/kv_store.go
1
package graphdb
2

3
import (
4
        "bytes"
5
        "context"
6
        "crypto/sha256"
7
        "encoding/binary"
8
        "errors"
9
        "fmt"
10
        "io"
11
        "math"
12
        "net"
13
        "sort"
14
        "sync"
15
        "time"
16

17
        "github.com/btcsuite/btcd/btcec/v2"
18
        "github.com/btcsuite/btcd/chaincfg/chainhash"
19
        "github.com/btcsuite/btcd/txscript"
20
        "github.com/btcsuite/btcd/wire"
21
        "github.com/btcsuite/btcwallet/walletdb"
22
        "github.com/lightningnetwork/lnd/aliasmgr"
23
        "github.com/lightningnetwork/lnd/batch"
24
        "github.com/lightningnetwork/lnd/fn/v2"
25
        "github.com/lightningnetwork/lnd/graph/db/models"
26
        "github.com/lightningnetwork/lnd/input"
27
        "github.com/lightningnetwork/lnd/kvdb"
28
        "github.com/lightningnetwork/lnd/lnwire"
29
        "github.com/lightningnetwork/lnd/routing/route"
30
)
31

32
var (
33
        // nodeBucket is a bucket which houses all the vertices or nodes within
34
        // the channel graph. This bucket has a single-sub bucket which adds an
35
        // additional index from pubkey -> alias. Within the top-level of this
36
        // bucket, the key space maps a node's compressed public key to the
37
        // serialized information for that node. Additionally, there's a
38
        // special key "source" which stores the pubkey of the source node. The
39
        // source node is used as the starting point for all graph/queries and
40
        // traversals. The graph is formed as a star-graph with the source node
41
        // at the center.
42
        //
43
        // maps: pubKey -> nodeInfo
44
        // maps: source -> selfPubKey
45
        nodeBucket = []byte("graph-node")
46

47
        // nodeUpdateIndexBucket is a sub-bucket of the nodeBucket. This bucket
48
        // will be used to quickly look up the "freshness" of a node's last
49
        // update to the network. The bucket only contains keys, and no values,
50
        // it's mapping:
51
        //
52
        // maps: updateTime || nodeID -> nil
53
        nodeUpdateIndexBucket = []byte("graph-node-update-index")
54

55
        // sourceKey is a special key that resides within the nodeBucket. The
56
        // sourceKey maps a key to the public key of the "self node".
57
        sourceKey = []byte("source")
58

59
        // aliasIndexBucket is a sub-bucket that's nested within the main
60
        // nodeBucket. This bucket maps the public key of a node to its
61
        // current alias. This bucket is provided as it can be used within a
62
        // future UI layer to add an additional degree of confirmation.
63
        aliasIndexBucket = []byte("alias")
64

65
        // edgeBucket is a bucket which houses all of the edge or channel
66
        // information within the channel graph. This bucket essentially acts
67
        // as an adjacency list, which in conjunction with a range scan, can be
68
        // used to iterate over all the incoming and outgoing edges for a
69
        // particular node. Key in the bucket use a prefix scheme which leads
70
        // with the node's public key and sends with the compact edge ID.
71
        // For each chanID, there will be two entries within the bucket, as the
72
        // graph is directed: nodes may have different policies w.r.t to fees
73
        // for their respective directions.
74
        //
75
        // maps: pubKey || chanID -> channel edge policy for node
76
        edgeBucket = []byte("graph-edge")
77

78
        // unknownPolicy is represented as an empty slice. It is
79
        // used as the value in edgeBucket for unknown channel edge policies.
80
        // Unknown policies are still stored in the database to enable efficient
81
        // lookup of incoming channel edges.
82
        unknownPolicy = []byte{}
83

84
        // chanStart is an array of all zero bytes which is used to perform
85
        // range scans within the edgeBucket to obtain all of the outgoing
86
        // edges for a particular node.
87
        chanStart [8]byte
88

89
        // edgeIndexBucket is an index which can be used to iterate all edges
90
        // in the bucket, grouping them according to their in/out nodes.
91
        // Additionally, the items in this bucket also contain the complete
92
        // edge information for a channel. The edge information includes the
93
        // capacity of the channel, the nodes that made the channel, etc. This
94
        // bucket resides within the edgeBucket above. Creation of an edge
95
        // proceeds in two phases: first the edge is added to the edge index,
96
        // afterwards the edgeBucket can be updated with the latest details of
97
        // the edge as they are announced on the network.
98
        //
99
        // maps: chanID -> pubKey1 || pubKey2 || restofEdgeInfo
100
        edgeIndexBucket = []byte("edge-index")
101

102
        // edgeUpdateIndexBucket is a sub-bucket of the main edgeBucket. This
103
        // bucket contains an index which allows us to gauge the "freshness" of
104
        // a channel's last updates.
105
        //
106
        // maps: updateTime || chanID -> nil
107
        edgeUpdateIndexBucket = []byte("edge-update-index")
108

109
        // channelPointBucket maps a channel's full outpoint (txid:index) to
110
        // its short 8-byte channel ID. This bucket resides within the
111
        // edgeBucket above, and can be used to quickly remove an edge due to
112
        // the outpoint being spent, or to query for existence of a channel.
113
        //
114
        // maps: outPoint -> chanID
115
        channelPointBucket = []byte("chan-index")
116

117
        // zombieBucket is a sub-bucket of the main edgeBucket bucket
118
        // responsible for maintaining an index of zombie channels. Each entry
119
        // exists within the bucket as follows:
120
        //
121
        // maps: chanID -> pubKey1 || pubKey2
122
        //
123
        // The chanID represents the channel ID of the edge that is marked as a
124
        // zombie and is used as the key, which maps to the public keys of the
125
        // edge's participants.
126
        zombieBucket = []byte("zombie-index")
127

128
        // disabledEdgePolicyBucket is a sub-bucket of the main edgeBucket
129
        // bucket responsible for maintaining an index of disabled edge
130
        // policies. Each entry exists within the bucket as follows:
131
        //
132
        // maps: <chanID><direction> -> []byte{}
133
        //
134
        // The chanID represents the channel ID of the edge and the direction is
135
        // one byte representing the direction of the edge. The main purpose of
136
        // this index is to allow pruning disabled channels in a fast way
137
        // without the need to iterate all over the graph.
138
        disabledEdgePolicyBucket = []byte("disabled-edge-policy-index")
139

140
        // graphMetaBucket is a top-level bucket which stores various meta-deta
141
        // related to the on-disk channel graph. Data stored in this bucket
142
        // includes the block to which the graph has been synced to, the total
143
        // number of channels, etc.
144
        graphMetaBucket = []byte("graph-meta")
145

146
        // pruneLogBucket is a bucket within the graphMetaBucket that stores
147
        // a mapping from the block height to the hash for the blocks used to
148
        // prune the graph.
149
        // Once a new block is discovered, any channels that have been closed
150
        // (by spending the outpoint) can safely be removed from the graph, and
151
        // the block is added to the prune log. We need to keep such a log for
152
        // the case where a reorg happens, and we must "rewind" the state of the
153
        // graph by removing channels that were previously confirmed. In such a
154
        // case we'll remove all entries from the prune log with a block height
155
        // that no longer exists.
156
        pruneLogBucket = []byte("prune-log")
157

158
        // closedScidBucket is a top-level bucket that stores scids for
159
        // channels that we know to be closed. This is used so that we don't
160
        // need to perform expensive validation checks if we receive a channel
161
        // announcement for the channel again.
162
        //
163
        // maps: scid -> []byte{}
164
        closedScidBucket = []byte("closed-scid")
165
)
166

167
const (
168
        // MaxAllowedExtraOpaqueBytes is the largest amount of opaque bytes that
169
        // we'll permit to be written to disk. We limit this as otherwise, it
170
        // would be possible for a node to create a ton of updates and slowly
171
        // fill our disk, and also waste bandwidth due to relaying.
172
        MaxAllowedExtraOpaqueBytes = 10000
173
)
174

175
// KVStore is a persistent, on-disk graph representation of the Lightning
176
// Network. This struct can be used to implement path finding algorithms on top
177
// of, and also to update a node's view based on information received from the
178
// p2p network. Internally, the graph is stored using a modified adjacency list
179
// representation with some added object interaction possible with each
180
// serialized edge/node. The graph is stored is directed, meaning that are two
181
// edges stored for each channel: an inbound/outbound edge for each node pair.
182
// Nodes, edges, and edge information can all be added to the graph
183
// independently. Edge removal results in the deletion of all edge information
184
// for that edge.
185
type KVStore struct {
186
        db kvdb.Backend
187

188
        // cacheMu guards all caches (rejectCache and chanCache). If
189
        // this mutex will be acquired at the same time as the DB mutex then
190
        // the cacheMu MUST be acquired first to prevent deadlock.
191
        cacheMu     sync.RWMutex
192
        rejectCache *rejectCache
193
        chanCache   *channelCache
194

195
        chanScheduler batch.Scheduler[kvdb.RwTx]
196
        nodeScheduler batch.Scheduler[kvdb.RwTx]
197
}
198

199
// A compile-time assertion to ensure that the KVStore struct implements the
200
// V1Store interface.
201
var _ V1Store = (*KVStore)(nil)
202

203
// NewKVStore allocates a new KVStore backed by a DB instance. The
204
// returned instance has its own unique reject cache and channel cache.
205
func NewKVStore(db kvdb.Backend, options ...StoreOptionModifier) (*KVStore,
206
        error) {
170✔
207

170✔
208
        opts := DefaultOptions()
170✔
209
        for _, o := range options {
170✔
210
                o(opts)
×
211
        }
×
212

213
        if !opts.NoMigration {
340✔
214
                if err := initKVStore(db); err != nil {
170✔
215
                        return nil, err
×
216
                }
×
217
        }
218

219
        g := &KVStore{
170✔
220
                db:          db,
170✔
221
                rejectCache: newRejectCache(opts.RejectCacheSize),
170✔
222
                chanCache:   newChannelCache(opts.ChannelCacheSize),
170✔
223
        }
170✔
224
        g.chanScheduler = batch.NewTimeScheduler(
170✔
225
                batch.NewBoltBackend[kvdb.RwTx](db), &g.cacheMu,
170✔
226
                opts.BatchCommitInterval,
170✔
227
        )
170✔
228
        g.nodeScheduler = batch.NewTimeScheduler(
170✔
229
                batch.NewBoltBackend[kvdb.RwTx](db), nil,
170✔
230
                opts.BatchCommitInterval,
170✔
231
        )
170✔
232

170✔
233
        return g, nil
170✔
234
}
235

236
// channelMapKey is the key structure used for storing channel edge policies.
237
type channelMapKey struct {
238
        nodeKey route.Vertex
239
        chanID  [8]byte
240
}
241

242
// String returns a human-readable representation of the key.
243
func (c channelMapKey) String() string {
×
244
        return fmt.Sprintf("node=%v, chanID=%x", c.nodeKey, c.chanID)
×
245
}
×
246

247
// getChannelMap loads all channel edge policies from the database and stores
248
// them in a map.
249
func getChannelMap(edges kvdb.RBucket) (
250
        map[channelMapKey]*models.ChannelEdgePolicy, error) {
145✔
251

145✔
252
        // Create a map to store all channel edge policies.
145✔
253
        channelMap := make(map[channelMapKey]*models.ChannelEdgePolicy)
145✔
254

145✔
255
        err := kvdb.ForAll(edges, func(k, edgeBytes []byte) error {
1,729✔
256
                // Skip embedded buckets.
1,584✔
257
                if bytes.Equal(k, edgeIndexBucket) ||
1,584✔
258
                        bytes.Equal(k, edgeUpdateIndexBucket) ||
1,584✔
259
                        bytes.Equal(k, zombieBucket) ||
1,584✔
260
                        bytes.Equal(k, disabledEdgePolicyBucket) ||
1,584✔
261
                        bytes.Equal(k, channelPointBucket) {
2,172✔
262

588✔
263
                        return nil
588✔
264
                }
588✔
265

266
                // Validate key length.
267
                if len(k) != 33+8 {
996✔
268
                        return fmt.Errorf("invalid edge key %x encountered", k)
×
269
                }
×
270

271
                var key channelMapKey
996✔
272
                copy(key.nodeKey[:], k[:33])
996✔
273
                copy(key.chanID[:], k[33:])
996✔
274

996✔
275
                // No need to deserialize unknown policy.
996✔
276
                if bytes.Equal(edgeBytes, unknownPolicy) {
996✔
277
                        return nil
×
278
                }
×
279

280
                edgeReader := bytes.NewReader(edgeBytes)
996✔
281
                edge, err := deserializeChanEdgePolicyRaw(
996✔
282
                        edgeReader,
996✔
283
                )
996✔
284

996✔
285
                switch {
996✔
286
                // If the db policy was missing an expected optional field, we
287
                // return nil as if the policy was unknown.
288
                case errors.Is(err, ErrEdgePolicyOptionalFieldNotFound):
×
289
                        return nil
×
290

291
                // We don't want a single policy with bad TLV data to stop us
292
                // from loading the rest of the data, so we just skip this
293
                // policy. This is for backwards compatibility since we did not
294
                // use to validate TLV data in the past before persisting it.
295
                case errors.Is(err, ErrParsingExtraTLVBytes):
×
296
                        return nil
×
297

298
                case err != nil:
×
299
                        return err
×
300
                }
301

302
                channelMap[key] = edge
996✔
303

996✔
304
                return nil
996✔
305
        })
306
        if err != nil {
145✔
307
                return nil, err
×
308
        }
×
309

310
        return channelMap, nil
145✔
311
}
312

313
var graphTopLevelBuckets = [][]byte{
314
        nodeBucket,
315
        edgeBucket,
316
        graphMetaBucket,
317
        closedScidBucket,
318
}
319

320
// createChannelDB creates and initializes a fresh version of  In
321
// the case that the target path has not yet been created or doesn't yet exist,
322
// then the path is created. Additionally, all required top-level buckets used
323
// within the database are created.
324
func initKVStore(db kvdb.Backend) error {
170✔
325
        err := kvdb.Update(db, func(tx kvdb.RwTx) error {
340✔
326
                for _, tlb := range graphTopLevelBuckets {
850✔
327
                        if _, err := tx.CreateTopLevelBucket(tlb); err != nil {
680✔
328
                                return err
×
329
                        }
×
330
                }
331

332
                nodes := tx.ReadWriteBucket(nodeBucket)
170✔
333
                _, err := nodes.CreateBucketIfNotExists(aliasIndexBucket)
170✔
334
                if err != nil {
170✔
335
                        return err
×
336
                }
×
337
                _, err = nodes.CreateBucketIfNotExists(nodeUpdateIndexBucket)
170✔
338
                if err != nil {
170✔
339
                        return err
×
340
                }
×
341

342
                edges := tx.ReadWriteBucket(edgeBucket)
170✔
343
                _, err = edges.CreateBucketIfNotExists(edgeIndexBucket)
170✔
344
                if err != nil {
170✔
345
                        return err
×
346
                }
×
347
                _, err = edges.CreateBucketIfNotExists(edgeUpdateIndexBucket)
170✔
348
                if err != nil {
170✔
349
                        return err
×
350
                }
×
351
                _, err = edges.CreateBucketIfNotExists(channelPointBucket)
170✔
352
                if err != nil {
170✔
353
                        return err
×
354
                }
×
355
                _, err = edges.CreateBucketIfNotExists(zombieBucket)
170✔
356
                if err != nil {
170✔
357
                        return err
×
358
                }
×
359

360
                graphMeta := tx.ReadWriteBucket(graphMetaBucket)
170✔
361
                _, err = graphMeta.CreateBucketIfNotExists(pruneLogBucket)
170✔
362

170✔
363
                return err
170✔
364
        }, func() {})
170✔
365
        if err != nil {
170✔
366
                return fmt.Errorf("unable to create new channel graph: %w", err)
×
367
        }
×
368

369
        return nil
170✔
370
}
371

372
// AddrsForNode returns all known addresses for the target node public key that
373
// the graph DB is aware of. The returned boolean indicates if the given node is
374
// unknown to the graph DB or not.
375
//
376
// NOTE: this is part of the channeldb.AddrSource interface.
377
func (c *KVStore) AddrsForNode(ctx context.Context,
378
        nodePub *btcec.PublicKey) (bool, []net.Addr, error) {
3✔
379

3✔
380
        pubKey, err := route.NewVertexFromBytes(nodePub.SerializeCompressed())
3✔
381
        if err != nil {
3✔
382
                return false, nil, err
×
383
        }
×
384

385
        node, err := c.FetchLightningNode(ctx, pubKey)
3✔
386
        // We don't consider it an error if the graph is unaware of the node.
3✔
387
        switch {
3✔
388
        case err != nil && !errors.Is(err, ErrGraphNodeNotFound):
×
389
                return false, nil, err
×
390

391
        case errors.Is(err, ErrGraphNodeNotFound):
1✔
392
                return false, nil, nil
1✔
393
        }
394

395
        return true, node.Addresses, nil
2✔
396
}
397

398
// ForEachChannel iterates through all the channel edges stored within the
399
// graph and invokes the passed callback for each edge. The callback takes two
400
// edges as since this is a directed graph, both the in/out edges are visited.
401
// If the callback returns an error, then the transaction is aborted and the
402
// iteration stops early.
403
//
404
// NOTE: If an edge can't be found, or wasn't advertised, then a nil pointer
405
// for that particular channel edge routing policy will be passed into the
406
// callback.
407
func (c *KVStore) ForEachChannel(_ context.Context,
408
        cb func(*models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
409
                *models.ChannelEdgePolicy) error, reset func()) error {
7✔
410

7✔
411
        return forEachChannel(c.db, cb, reset)
7✔
412
}
7✔
413

414
// forEachChannel iterates through all the channel edges stored within the
415
// graph and invokes the passed callback for each edge. The callback takes two
416
// edges as since this is a directed graph, both the in/out edges are visited.
417
// If the callback returns an error, then the transaction is aborted and the
418
// iteration stops early.
419
//
420
// NOTE: If an edge can't be found, or wasn't advertised, then a nil pointer
421
// for that particular channel edge routing policy will be passed into the
422
// callback.
423
func forEachChannel(db kvdb.Backend, cb func(*models.ChannelEdgeInfo,
424
        *models.ChannelEdgePolicy, *models.ChannelEdgePolicy) error,
425
        reset func()) error {
7✔
426

7✔
427
        return db.View(func(tx kvdb.RTx) error {
14✔
428
                edges := tx.ReadBucket(edgeBucket)
7✔
429
                if edges == nil {
7✔
430
                        return ErrGraphNoEdgesFound
×
431
                }
×
432

433
                // First, load all edges in memory indexed by node and channel
434
                // id.
435
                channelMap, err := getChannelMap(edges)
7✔
436
                if err != nil {
7✔
437
                        return err
×
438
                }
×
439

440
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
7✔
441
                if edgeIndex == nil {
7✔
442
                        return ErrGraphNoEdgesFound
×
443
                }
×
444

445
                // Load edge index, recombine each channel with the policies
446
                // loaded above and invoke the callback.
447
                return kvdb.ForAll(
7✔
448
                        edgeIndex, func(k, edgeInfoBytes []byte) error {
109✔
449
                                var chanID [8]byte
102✔
450
                                copy(chanID[:], k)
102✔
451

102✔
452
                                edgeInfoReader := bytes.NewReader(edgeInfoBytes)
102✔
453
                                info, err := deserializeChanEdgeInfo(
102✔
454
                                        edgeInfoReader,
102✔
455
                                )
102✔
456
                                if err != nil {
102✔
457
                                        return err
×
458
                                }
×
459

460
                                policy1 := channelMap[channelMapKey{
102✔
461
                                        nodeKey: info.NodeKey1Bytes,
102✔
462
                                        chanID:  chanID,
102✔
463
                                }]
102✔
464

102✔
465
                                policy2 := channelMap[channelMapKey{
102✔
466
                                        nodeKey: info.NodeKey2Bytes,
102✔
467
                                        chanID:  chanID,
102✔
468
                                }]
102✔
469

102✔
470
                                return cb(&info, policy1, policy2)
102✔
471
                        },
472
                )
473
        }, reset)
474
}
475

476
// ForEachChannelCacheable iterates through all the channel edges stored within
477
// the graph and invokes the passed callback for each edge. The callback takes
478
// two edges as since this is a directed graph, both the in/out edges are
479
// visited. If the callback returns an error, then the transaction is aborted
480
// and the iteration stops early.
481
//
482
// NOTE: If an edge can't be found, or wasn't advertised, then a nil pointer
483
// for that particular channel edge routing policy will be passed into the
484
// callback.
485
//
486
// NOTE: this method is like ForEachChannel but fetches only the data required
487
// for the graph cache.
488
func (c *KVStore) ForEachChannelCacheable(cb func(*models.CachedEdgeInfo,
489
        *models.CachedEdgePolicy, *models.CachedEdgePolicy) error,
490
        reset func()) error {
138✔
491

138✔
492
        return c.db.View(func(tx kvdb.RTx) error {
276✔
493
                edges := tx.ReadBucket(edgeBucket)
138✔
494
                if edges == nil {
138✔
495
                        return ErrGraphNoEdgesFound
×
496
                }
×
497

498
                // First, load all edges in memory indexed by node and channel
499
                // id.
500
                channelMap, err := getChannelMap(edges)
138✔
501
                if err != nil {
138✔
502
                        return err
×
503
                }
×
504

505
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
138✔
506
                if edgeIndex == nil {
138✔
507
                        return ErrGraphNoEdgesFound
×
508
                }
×
509

510
                // Load edge index, recombine each channel with the policies
511
                // loaded above and invoke the callback.
512
                return kvdb.ForAll(
138✔
513
                        edgeIndex, func(k, edgeInfoBytes []byte) error {
534✔
514
                                var chanID [8]byte
396✔
515
                                copy(chanID[:], k)
396✔
516

396✔
517
                                edgeInfoReader := bytes.NewReader(edgeInfoBytes)
396✔
518
                                info, err := deserializeChanEdgeInfo(
396✔
519
                                        edgeInfoReader,
396✔
520
                                )
396✔
521
                                if err != nil {
396✔
522
                                        return err
×
523
                                }
×
524

525
                                key1 := channelMapKey{
396✔
526
                                        nodeKey: info.NodeKey1Bytes,
396✔
527
                                        chanID:  chanID,
396✔
528
                                }
396✔
529
                                policy1 := channelMap[key1]
396✔
530

396✔
531
                                key2 := channelMapKey{
396✔
532
                                        nodeKey: info.NodeKey2Bytes,
396✔
533
                                        chanID:  chanID,
396✔
534
                                }
396✔
535
                                policy2 := channelMap[key2]
396✔
536

396✔
537
                                // We now create the cached edge policies, but
396✔
538
                                // only when the above policies are found in the
396✔
539
                                // `channelMap`.
396✔
540
                                var (
396✔
541
                                        cachedPolicy1 *models.CachedEdgePolicy
396✔
542
                                        cachedPolicy2 *models.CachedEdgePolicy
396✔
543
                                )
396✔
544

396✔
545
                                if policy1 != nil {
792✔
546
                                        cachedPolicy1 = models.NewCachedPolicy(
396✔
547
                                                policy1,
396✔
548
                                        )
396✔
549
                                }
396✔
550

551
                                if policy2 != nil {
792✔
552
                                        cachedPolicy2 = models.NewCachedPolicy(
396✔
553
                                                policy2,
396✔
554
                                        )
396✔
555
                                }
396✔
556

557
                                return cb(
396✔
558
                                        models.NewCachedEdge(&info),
396✔
559
                                        cachedPolicy1, cachedPolicy2,
396✔
560
                                )
396✔
561
                        },
562
                )
563
        }, reset)
564
}
565

566
// forEachNodeDirectedChannel iterates through all channels of a given node,
567
// executing the passed callback on the directed edge representing the channel
568
// and its incoming policy. If the callback returns an error, then the iteration
569
// is halted with the error propagated back up to the caller. An optional read
570
// transaction may be provided. If none is provided, a new one will be created.
571
//
572
// Unknown policies are passed into the callback as nil values.
573
//
574
// NOTE: the reset param is only meaningful if the tx param is nil. If it is
575
// not nil, the caller is expected to have passed in a reset to the parent
576
// function's View/Update call which will then apply to the whole transaction.
577
func (c *KVStore) forEachNodeDirectedChannel(tx kvdb.RTx,
578
        node route.Vertex, cb func(channel *DirectedChannel) error,
579
        reset func()) error {
262✔
580

262✔
581
        // Fallback that uses the database.
262✔
582
        toNodeCallback := func() route.Vertex {
394✔
583
                return node
132✔
584
        }
132✔
585
        toNodeFeatures, err := c.fetchNodeFeatures(tx, node)
262✔
586
        if err != nil {
262✔
587
                return err
×
588
        }
×
589

590
        dbCallback := func(tx kvdb.RTx, e *models.ChannelEdgeInfo, p1,
262✔
591
                p2 *models.ChannelEdgePolicy) error {
948✔
592

686✔
593
                var cachedInPolicy *models.CachedEdgePolicy
686✔
594
                if p2 != nil {
1,369✔
595
                        cachedInPolicy = models.NewCachedPolicy(p2)
683✔
596
                        cachedInPolicy.ToNodePubKey = toNodeCallback
683✔
597
                        cachedInPolicy.ToNodeFeatures = toNodeFeatures
683✔
598
                }
683✔
599

600
                directedChannel := &DirectedChannel{
686✔
601
                        ChannelID:    e.ChannelID,
686✔
602
                        IsNode1:      node == e.NodeKey1Bytes,
686✔
603
                        OtherNode:    e.NodeKey2Bytes,
686✔
604
                        Capacity:     e.Capacity,
686✔
605
                        OutPolicySet: p1 != nil,
686✔
606
                        InPolicy:     cachedInPolicy,
686✔
607
                }
686✔
608

686✔
609
                if p1 != nil {
1,371✔
610
                        p1.InboundFee.WhenSome(func(fee lnwire.Fee) {
1,021✔
611
                                directedChannel.InboundFee = fee
336✔
612
                        })
336✔
613
                }
614

615
                if node == e.NodeKey2Bytes {
1,032✔
616
                        directedChannel.OtherNode = e.NodeKey1Bytes
346✔
617
                }
346✔
618

619
                return cb(directedChannel)
686✔
620
        }
621

622
        return nodeTraversal(tx, node[:], c.db, dbCallback, reset)
262✔
623
}
624

625
// fetchNodeFeatures returns the features of a given node. If no features are
626
// known for the node, an empty feature vector is returned. An optional read
627
// transaction may be provided. If none is provided, a new one will be created.
628
func (c *KVStore) fetchNodeFeatures(tx kvdb.RTx,
629
        node route.Vertex) (*lnwire.FeatureVector, error) {
3,651✔
630

3,651✔
631
        // Fallback that uses the database.
3,651✔
632
        targetNode, err := c.FetchLightningNodeTx(tx, node)
3,651✔
633
        switch {
3,651✔
634
        // If the node exists and has features, return them directly.
635
        case err == nil:
3,640✔
636
                return targetNode.Features, nil
3,640✔
637

638
        // If we couldn't find a node announcement, populate a blank feature
639
        // vector.
640
        case errors.Is(err, ErrGraphNodeNotFound):
11✔
641
                return lnwire.EmptyFeatureVector(), nil
11✔
642

643
        // Otherwise, bubble the error up.
644
        default:
×
645
                return nil, err
×
646
        }
647
}
648

649
// ForEachNodeDirectedChannel iterates through all channels of a given node,
650
// executing the passed callback on the directed edge representing the channel
651
// and its incoming policy. If the callback returns an error, then the iteration
652
// is halted with the error propagated back up to the caller.
653
//
654
// Unknown policies are passed into the callback as nil values.
655
//
656
// NOTE: this is part of the graphdb.NodeTraverser interface.
657
func (c *KVStore) ForEachNodeDirectedChannel(nodePub route.Vertex,
658
        cb func(channel *DirectedChannel) error, reset func()) error {
23✔
659

23✔
660
        return c.forEachNodeDirectedChannel(nil, nodePub, cb, reset)
23✔
661
}
23✔
662

663
// FetchNodeFeatures returns the features of the given node. If no features are
664
// known for the node, an empty feature vector is returned.
665
//
666
// NOTE: this is part of the graphdb.NodeTraverser interface.
667
func (c *KVStore) FetchNodeFeatures(nodePub route.Vertex) (
668
        *lnwire.FeatureVector, error) {
1✔
669

1✔
670
        return c.fetchNodeFeatures(nil, nodePub)
1✔
671
}
1✔
672

673
// ForEachNodeCached is similar to forEachNode, but it returns DirectedChannel
674
// data to the call-back.
675
//
676
// NOTE: The callback contents MUST not be modified.
677
func (c *KVStore) ForEachNodeCached(_ context.Context,
678
        cb func(node route.Vertex, chans map[uint64]*DirectedChannel) error,
679
        reset func()) error {
120✔
680

120✔
681
        // Otherwise call back to a version that uses the database directly.
120✔
682
        // We'll iterate over each node, then the set of channels for each
120✔
683
        // node, and construct a similar callback functiopn signature as the
120✔
684
        // main funcotin expects.
120✔
685
        return forEachNode(c.db, func(tx kvdb.RTx,
120✔
686
                node *models.LightningNode) error {
1,106✔
687

986✔
688
                channels := make(map[uint64]*DirectedChannel)
986✔
689

986✔
690
                err := c.forEachNodeChannelTx(tx, node.PubKeyBytes,
986✔
691
                        func(tx kvdb.RTx, e *models.ChannelEdgeInfo,
986✔
692
                                p1 *models.ChannelEdgePolicy,
986✔
693
                                p2 *models.ChannelEdgePolicy) error {
4,120✔
694

3,134✔
695
                                toNodeCallback := func() route.Vertex {
3,134✔
696
                                        return node.PubKeyBytes
×
697
                                }
×
698
                                toNodeFeatures, err := c.fetchNodeFeatures(
3,134✔
699
                                        tx, node.PubKeyBytes,
3,134✔
700
                                )
3,134✔
701
                                if err != nil {
3,134✔
702
                                        return err
×
703
                                }
×
704

705
                                var cachedInPolicy *models.CachedEdgePolicy
3,134✔
706
                                if p2 != nil {
6,268✔
707
                                        cachedInPolicy =
3,134✔
708
                                                models.NewCachedPolicy(p2)
3,134✔
709
                                        cachedInPolicy.ToNodePubKey =
3,134✔
710
                                                toNodeCallback
3,134✔
711
                                        cachedInPolicy.ToNodeFeatures =
3,134✔
712
                                                toNodeFeatures
3,134✔
713
                                }
3,134✔
714

715
                                directedChannel := &DirectedChannel{
3,134✔
716
                                        ChannelID: e.ChannelID,
3,134✔
717
                                        IsNode1: node.PubKeyBytes ==
3,134✔
718
                                                e.NodeKey1Bytes,
3,134✔
719
                                        OtherNode:    e.NodeKey2Bytes,
3,134✔
720
                                        Capacity:     e.Capacity,
3,134✔
721
                                        OutPolicySet: p1 != nil,
3,134✔
722
                                        InPolicy:     cachedInPolicy,
3,134✔
723
                                }
3,134✔
724

3,134✔
725
                                if node.PubKeyBytes == e.NodeKey2Bytes {
4,701✔
726
                                        directedChannel.OtherNode =
1,567✔
727
                                                e.NodeKey1Bytes
1,567✔
728
                                }
1,567✔
729

730
                                channels[e.ChannelID] = directedChannel
3,134✔
731

3,134✔
732
                                return nil
3,134✔
733
                        }, reset,
734
                )
735
                if err != nil {
986✔
736
                        return err
×
737
                }
×
738

739
                return cb(node.PubKeyBytes, channels)
986✔
740
        }, reset)
1,952✔
741
}
966✔
742

966✔
743
// DisabledChannelIDs returns the channel ids of disabled channels.
744
// A channel is disabled when two of the associated ChanelEdgePolicies
986✔
745
// have their disabled bit on.
746
func (c *KVStore) DisabledChannelIDs() ([]uint64, error) {
747
        var disabledChanIDs []uint64
748
        var chanEdgeFound map[uint64]struct{}
749

750
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
751
                edges := tx.ReadBucket(edgeBucket)
6✔
752
                if edges == nil {
6✔
753
                        return ErrGraphNoEdgesFound
6✔
754
                }
6✔
755

12✔
756
                disabledEdgePolicyIndex := edges.NestedReadBucket(
6✔
757
                        disabledEdgePolicyBucket,
6✔
758
                )
×
759
                if disabledEdgePolicyIndex == nil {
×
760
                        return nil
761
                }
6✔
762

6✔
763
                // We iterate over all disabled policies and we add each channel
6✔
764
                // that has more than one disabled policy to disabledChanIDs
7✔
765
                // array.
1✔
766
                return disabledEdgePolicyIndex.ForEach(
1✔
767
                        func(k, v []byte) error {
768
                                chanID := byteOrder.Uint64(k[:8])
769
                                _, edgeFound := chanEdgeFound[chanID]
770
                                if edgeFound {
771
                                        delete(chanEdgeFound, chanID)
5✔
772
                                        disabledChanIDs = append(
16✔
773
                                                disabledChanIDs, chanID,
11✔
774
                                        )
11✔
775

15✔
776
                                        return nil
4✔
777
                                }
4✔
778

4✔
779
                                chanEdgeFound[chanID] = struct{}{}
4✔
780

4✔
781
                                return nil
4✔
782
                        },
4✔
783
                )
784
        }, func() {
7✔
785
                disabledChanIDs = nil
7✔
786
                chanEdgeFound = make(map[uint64]struct{})
7✔
787
        })
788
        if err != nil {
789
                return nil, err
6✔
790
        }
6✔
791

6✔
792
        return disabledChanIDs, nil
6✔
793
}
6✔
794

×
795
// ForEachNode iterates through all the stored vertices/nodes in the graph,
×
796
// executing the passed callback with each node encountered. If the callback
797
// returns an error, then the transaction is aborted and the iteration stops
6✔
798
// early. Any operations performed on the NodeTx passed to the call-back are
799
// executed under the same read transaction and so, methods on the NodeTx object
800
// _MUST_ only be called from within the call-back.
801
func (c *KVStore) ForEachNode(_ context.Context,
802
        cb func(tx NodeRTx) error, reset func()) error {
803

804
        return forEachNode(c.db, func(tx kvdb.RTx,
805
                node *models.LightningNode) error {
806

807
                return cb(newChanGraphNodeTx(tx, c, node))
10✔
808
        }, reset)
10✔
809
}
10✔
810

203✔
811
// forEachNode iterates through all the stored vertices/nodes in the graph,
193✔
812
// executing the passed callback with each node encountered. If the callback
193✔
813
// returns an error, then the transaction is aborted and the iteration stops
193✔
814
// early.
815
//
816
// TODO(roasbeef): add iterator interface to allow for memory efficient graph
817
// traversal when graph gets mega.
818
func forEachNode(db kvdb.Backend,
819
        cb func(kvdb.RTx, *models.LightningNode) error, reset func()) error {
820

821
        traversal := func(tx kvdb.RTx) error {
822
                // First grab the nodes bucket which stores the mapping from
823
                // pubKey to node information.
824
                nodes := tx.ReadBucket(nodeBucket)
130✔
825
                if nodes == nil {
130✔
826
                        return ErrGraphNotFound
260✔
827
                }
130✔
828

130✔
829
                return nodes.ForEach(func(pubKey, nodeBytes []byte) error {
130✔
830
                        // If this is the source key, then we skip this
130✔
831
                        // iteration as the value for this key is a pubKey
×
832
                        // rather than raw node information.
×
833
                        if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 {
834
                                return nil
1,572✔
835
                        }
1,442✔
836

1,442✔
837
                        nodeReader := bytes.NewReader(nodeBytes)
1,442✔
838
                        node, err := deserializeLightningNode(nodeReader)
1,705✔
839
                        if err != nil {
263✔
840
                                return err
263✔
841
                        }
842

1,179✔
843
                        // Execute the callback, the transaction will abort if
1,179✔
844
                        // this returns an error.
1,179✔
845
                        return cb(tx, &node)
×
846
                })
×
847
        }
848

849
        return kvdb.View(db, traversal, reset)
850
}
1,179✔
851

852
// ForEachNodeCacheable iterates through all the stored vertices/nodes in the
853
// graph, executing the passed callback with each node encountered. If the
854
// callback returns an error, then the transaction is aborted and the iteration
130✔
855
// stops early.
856
func (c *KVStore) ForEachNodeCacheable(_ context.Context,
857
        cb func(route.Vertex, *lnwire.FeatureVector) error,
858
        reset func()) error {
859

860
        traversal := func(tx kvdb.RTx) error {
861
                // First grab the nodes bucket which stores the mapping from
862
                // pubKey to node information.
863
                nodes := tx.ReadBucket(nodeBucket)
139✔
864
                if nodes == nil {
139✔
865
                        return ErrGraphNotFound
278✔
866
                }
139✔
867

139✔
868
                return nodes.ForEach(func(pubKey, nodeBytes []byte) error {
139✔
869
                        // If this is the source key, then we skip this
139✔
870
                        // iteration as the value for this key is a pubKey
×
871
                        // rather than raw node information.
×
872
                        if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 {
873
                                return nil
537✔
874
                        }
398✔
875

398✔
876
                        nodeReader := bytes.NewReader(nodeBytes)
398✔
877
                        node, features, err := deserializeLightningNodeCacheable( //nolint:ll
676✔
878
                                nodeReader,
278✔
879
                        )
278✔
880
                        if err != nil {
881
                                return err
120✔
882
                        }
120✔
883

120✔
884
                        // Execute the callback, the transaction will abort if
120✔
885
                        // this returns an error.
120✔
886
                        return cb(node, features)
×
887
                })
×
888
        }
889

890
        return kvdb.View(c.db, traversal, reset)
891
}
120✔
892

893
// SourceNode returns the source node of the graph. The source node is treated
894
// as the center node within a star-graph. This method may be used to kick off
895
// a path finding algorithm in order to explore the reachability of another
139✔
896
// node based off the source node.
897
func (c *KVStore) SourceNode(_ context.Context) (*models.LightningNode, error) {
898
        return sourceNode(c.db)
899
}
900

901
// sourceNode fetches the source node of the graph. The source node is treated
902
// as the center node within a star-graph.
238✔
903
func sourceNode(db kvdb.Backend) (*models.LightningNode, error) {
238✔
904
        var source *models.LightningNode
238✔
905
        err := kvdb.View(db, func(tx kvdb.RTx) error {
906
                // First grab the nodes bucket which stores the mapping from
907
                // pubKey to node information.
908
                nodes := tx.ReadBucket(nodeBucket)
238✔
909
                if nodes == nil {
238✔
910
                        return ErrGraphNotFound
476✔
911
                }
238✔
912

238✔
913
                node, err := sourceNodeWithTx(nodes)
238✔
914
                if err != nil {
238✔
915
                        return err
×
916
                }
×
917
                source = node
918

238✔
919
                return nil
239✔
920
        }, func() {
1✔
921
                source = nil
1✔
922
        })
237✔
923
        if err != nil {
237✔
924
                return nil, err
237✔
925
        }
238✔
926

238✔
927
        return source, nil
238✔
928
}
239✔
929

1✔
930
// sourceNodeWithTx uses an existing database transaction and returns the source
1✔
931
// node of the graph. The source node is treated as the center node within a
932
// star-graph. This method may be used to kick off a path finding algorithm in
237✔
933
// order to explore the reachability of another node based off the source node.
934
func sourceNodeWithTx(nodes kvdb.RBucket) (*models.LightningNode, error) {
935
        selfPub := nodes.Get(sourceKey)
936
        if selfPub == nil {
937
                return nil, ErrSourceNodeNotSet
938
        }
939

493✔
940
        // With the pubKey of the source node retrieved, we're able to
493✔
941
        // fetch the full node information.
494✔
942
        node, err := fetchLightningNode(nodes, selfPub)
1✔
943
        if err != nil {
1✔
944
                return nil, err
945
        }
946

947
        return &node, nil
492✔
948
}
492✔
949

×
950
// SetSourceNode sets the source node within the graph database. The source
×
951
// node is to be used as the center of a star-graph within path finding
952
// algorithms.
492✔
953
func (c *KVStore) SetSourceNode(_ context.Context,
954
        node *models.LightningNode) error {
955

956
        nodePubBytes := node.PubKeyBytes[:]
957

958
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
959
                // First grab the nodes bucket which stores the mapping from
114✔
960
                // pubKey to node information.
114✔
961
                nodes, err := tx.CreateTopLevelBucket(nodeBucket)
114✔
962
                if err != nil {
114✔
963
                        return err
228✔
964
                }
114✔
965

114✔
966
                // Next we create the mapping from source to the targeted
114✔
967
                // public key.
114✔
968
                if err := nodes.Put(sourceKey, nodePubBytes); err != nil {
×
969
                        return err
×
970
                }
971

972
                // Finally, we commit the information of the lightning node
973
                // itself.
114✔
974
                return addLightningNode(tx, node)
×
975
        }, func() {})
×
976
}
977

978
// AddLightningNode adds a vertex/node to the graph database. If the node is not
979
// in the database from before, this will add a new, unconnected one to the
114✔
980
// graph. If it is present from before, this will update that node's
114✔
981
// information. Note that this method is expected to only be called to update an
982
// already present node from a node announcement, or to insert a node found in a
983
// channel update.
984
//
985
// TODO(roasbeef): also need sig of announcement.
986
func (c *KVStore) AddLightningNode(ctx context.Context,
987
        node *models.LightningNode, opts ...batch.SchedulerOption) error {
988

989
        r := &batch.Request[kvdb.RwTx]{
990
                Opts: batch.NewSchedulerOptions(opts...),
991
                Do: func(tx kvdb.RwTx) error {
992
                        return addLightningNode(tx, node)
712✔
993
                },
712✔
994
        }
712✔
995

712✔
996
        return c.nodeScheduler.Execute(ctx, r)
1,424✔
997
}
712✔
998

712✔
999
func addLightningNode(tx kvdb.RwTx, node *models.LightningNode) error {
1000
        nodes, err := tx.CreateTopLevelBucket(nodeBucket)
1001
        if err != nil {
712✔
1002
                return err
1003
        }
1004

906✔
1005
        aliases, err := nodes.CreateBucketIfNotExists(aliasIndexBucket)
906✔
1006
        if err != nil {
906✔
1007
                return err
×
1008
        }
×
1009

1010
        updateIndex, err := nodes.CreateBucketIfNotExists(
906✔
1011
                nodeUpdateIndexBucket,
906✔
1012
        )
×
1013
        if err != nil {
×
1014
                return err
1015
        }
906✔
1016

906✔
1017
        return putLightningNode(nodes, aliases, updateIndex, node)
906✔
1018
}
906✔
1019

×
1020
// LookupAlias attempts to return the alias as advertised by the target node.
×
1021
// TODO(roasbeef): currently assumes that aliases are unique...
1022
func (c *KVStore) LookupAlias(_ context.Context,
906✔
1023
        pub *btcec.PublicKey) (string, error) {
1024

1025
        var alias string
1026

1027
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
1028
                nodes := tx.ReadBucket(nodeBucket)
2✔
1029
                if nodes == nil {
2✔
1030
                        return ErrGraphNodesNotFound
2✔
1031
                }
2✔
1032

4✔
1033
                aliases := nodes.NestedReadBucket(aliasIndexBucket)
2✔
1034
                if aliases == nil {
2✔
1035
                        return ErrGraphNodesNotFound
×
1036
                }
×
1037

1038
                nodePub := pub.SerializeCompressed()
2✔
1039
                a := aliases.Get(nodePub)
2✔
1040
                if a == nil {
×
1041
                        return ErrNodeAliasNotFound
×
1042
                }
1043

2✔
1044
                // TODO(roasbeef): should actually be using the utf-8
2✔
1045
                // package...
3✔
1046
                alias = string(a)
1✔
1047

1✔
1048
                return nil
1049
        }, func() {
1050
                alias = ""
1051
        })
1✔
1052
        if err != nil {
1✔
1053
                return "", err
1✔
1054
        }
2✔
1055

2✔
1056
        return alias, nil
2✔
1057
}
3✔
1058

1✔
1059
// DeleteLightningNode starts a new database transaction to remove a vertex/node
1✔
1060
// from the database according to the node's public key.
1061
func (c *KVStore) DeleteLightningNode(_ context.Context,
1✔
1062
        nodePub route.Vertex) error {
1063

1064
        // TODO(roasbeef): ensure dangling edges are removed...
1065
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
1066
                nodes := tx.ReadWriteBucket(nodeBucket)
1067
                if nodes == nil {
4✔
1068
                        return ErrGraphNodeNotFound
4✔
1069
                }
4✔
1070

8✔
1071
                return c.deleteLightningNode(nodes, nodePub[:])
4✔
1072
        }, func() {})
4✔
1073
}
×
1074

×
1075
// deleteLightningNode uses an existing database transaction to remove a
1076
// vertex/node from the database according to the node's public key.
4✔
1077
func (c *KVStore) deleteLightningNode(nodes kvdb.RwBucket,
4✔
1078
        compressedPubKey []byte) error {
1079

1080
        aliases := nodes.NestedReadWriteBucket(aliasIndexBucket)
1081
        if aliases == nil {
1082
                return ErrGraphNodesNotFound
1083
        }
65✔
1084

65✔
1085
        if err := aliases.Delete(compressedPubKey); err != nil {
65✔
1086
                return err
65✔
1087
        }
×
1088

×
1089
        // Before we delete the node, we'll fetch its current state so we can
1090
        // determine when its last update was to clear out the node update
65✔
1091
        // index.
×
1092
        node, err := fetchLightningNode(nodes, compressedPubKey)
×
1093
        if err != nil {
1094
                return err
1095
        }
1096

1097
        if err := nodes.Delete(compressedPubKey); err != nil {
65✔
1098
                return err
66✔
1099
        }
1✔
1100

1✔
1101
        // Finally, we'll delete the index entry for the node within the
1102
        // nodeUpdateIndexBucket as this node is no longer active, so we don't
64✔
1103
        // need to track its last update.
×
1104
        nodeUpdateIndex := nodes.NestedReadWriteBucket(nodeUpdateIndexBucket)
×
1105
        if nodeUpdateIndex == nil {
1106
                return ErrGraphNodesNotFound
1107
        }
1108

1109
        // In order to delete the entry, we'll need to reconstruct the key for
64✔
1110
        // its last update.
64✔
1111
        updateUnix := uint64(node.LastUpdate.Unix())
×
1112
        var indexKey [8 + 33]byte
×
1113
        byteOrder.PutUint64(indexKey[:8], updateUnix)
1114
        copy(indexKey[8:], compressedPubKey)
1115

1116
        return nodeUpdateIndex.Delete(indexKey[:])
64✔
1117
}
64✔
1118

64✔
1119
// AddChannelEdge adds a new (undirected, blank) edge to the graph database. An
64✔
1120
// undirected edge from the two target nodes are created. The information stored
64✔
1121
// denotes the static attributes of the channel, such as the channelID, the keys
64✔
1122
// involved in creation of the channel, and the set of features that the channel
1123
// supports. The chanPoint and chanID are used to uniquely identify the edge
1124
// globally within the database.
1125
func (c *KVStore) AddChannelEdge(ctx context.Context,
1126
        edge *models.ChannelEdgeInfo, opts ...batch.SchedulerOption) error {
1127

1128
        var alreadyExists bool
1129
        r := &batch.Request[kvdb.RwTx]{
1130
                Opts: batch.NewSchedulerOptions(opts...),
1131
                Reset: func() {
1,723✔
1132
                        alreadyExists = false
1,723✔
1133
                },
1,723✔
1134
                Do: func(tx kvdb.RwTx) error {
1,723✔
1135
                        err := c.addChannelEdge(tx, edge)
1,723✔
1136

3,446✔
1137
                        // Silence ErrEdgeAlreadyExist so that the batch can
1,723✔
1138
                        // succeed, but propagate the error via local state.
1,723✔
1139
                        if errors.Is(err, ErrEdgeAlreadyExist) {
1,723✔
1140
                                alreadyExists = true
1,723✔
1141
                                return nil
1,723✔
1142
                        }
1,723✔
1143

1,723✔
1144
                        return err
1,960✔
1145
                },
237✔
1146
                OnCommit: func(err error) error {
237✔
1147
                        switch {
237✔
1148
                        case err != nil:
1149
                                return err
1,486✔
1150
                        case alreadyExists:
1151
                                return ErrEdgeAlreadyExist
1,723✔
1152
                        default:
1,723✔
1153
                                c.rejectCache.remove(edge.ChannelID)
×
1154
                                c.chanCache.remove(edge.ChannelID)
×
1155
                                return nil
237✔
1156
                        }
237✔
1157
                },
1,486✔
1158
        }
1,486✔
1159

1,486✔
1160
        return c.chanScheduler.Execute(ctx, r)
1,486✔
1161
}
1162

1163
// addChannelEdge is the private form of AddChannelEdge that allows callers to
1164
// utilize an existing db transaction.
1165
func (c *KVStore) addChannelEdge(tx kvdb.RwTx,
1,723✔
1166
        edge *models.ChannelEdgeInfo) error {
1167

1168
        // Construct the channel's primary key which is the 8-byte channel ID.
1169
        var chanKey [8]byte
1170
        binary.BigEndian.PutUint64(chanKey[:], edge.ChannelID)
1171

1,723✔
1172
        nodes, err := tx.CreateTopLevelBucket(nodeBucket)
1,723✔
1173
        if err != nil {
1,723✔
1174
                return err
1,723✔
1175
        }
1,723✔
1176
        edges, err := tx.CreateTopLevelBucket(edgeBucket)
1,723✔
1177
        if err != nil {
1,723✔
1178
                return err
1,723✔
1179
        }
×
1180
        edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
×
1181
        if err != nil {
1,723✔
1182
                return err
1,723✔
1183
        }
×
1184
        chanIndex, err := edges.CreateBucketIfNotExists(channelPointBucket)
×
1185
        if err != nil {
1,723✔
1186
                return err
1,723✔
1187
        }
×
1188

×
1189
        // First, attempt to check if this edge has already been created. If
1,723✔
1190
        // so, then we can exit early as this method is meant to be idempotent.
1,723✔
1191
        if edgeInfo := edgeIndex.Get(chanKey[:]); edgeInfo != nil {
×
1192
                return ErrEdgeAlreadyExist
×
1193
        }
1194

1195
        // Before we insert the channel into the database, we'll ensure that
1196
        // both nodes already exist in the channel graph. If either node
1,960✔
1197
        // doesn't, then we'll insert a "shell" node that just includes its
237✔
1198
        // public key, so subsequent validation and queries can work properly.
237✔
1199
        _, node1Err := fetchLightningNode(nodes, edge.NodeKey1Bytes[:])
1200
        switch {
1201
        case errors.Is(node1Err, ErrGraphNodeNotFound):
1202
                node1Shell := models.LightningNode{
1203
                        PubKeyBytes:          edge.NodeKey1Bytes,
1204
                        HaveNodeAnnouncement: false,
1,486✔
1205
                }
1,486✔
1206
                err := addLightningNode(tx, &node1Shell)
22✔
1207
                if err != nil {
22✔
1208
                        return fmt.Errorf("unable to create shell node "+
22✔
1209
                                "for: %x: %w", edge.NodeKey1Bytes, err)
22✔
1210
                }
22✔
1211
        case node1Err != nil:
22✔
1212
                return node1Err
22✔
1213
        }
×
1214

×
1215
        _, node2Err := fetchLightningNode(nodes, edge.NodeKey2Bytes[:])
×
1216
        switch {
×
1217
        case errors.Is(node2Err, ErrGraphNodeNotFound):
×
1218
                node2Shell := models.LightningNode{
1219
                        PubKeyBytes:          edge.NodeKey2Bytes,
1220
                        HaveNodeAnnouncement: false,
1,486✔
1221
                }
1,486✔
1222
                err := addLightningNode(tx, &node2Shell)
58✔
1223
                if err != nil {
58✔
1224
                        return fmt.Errorf("unable to create shell node "+
58✔
1225
                                "for: %x: %w", edge.NodeKey2Bytes, err)
58✔
1226
                }
58✔
1227
        case node2Err != nil:
58✔
1228
                return node2Err
58✔
1229
        }
×
1230

×
1231
        // If the edge hasn't been created yet, then we'll first add it to the
×
1232
        // edge index in order to associate the edge between two nodes and also
×
1233
        // store the static components of the channel.
×
1234
        if err := putChanEdgeInfo(edgeIndex, edge, chanKey); err != nil {
1235
                return err
1236
        }
1237

1238
        // Mark edge policies for both sides as unknown. This is to enable
1239
        // efficient incoming channel lookup for a node.
1,486✔
1240
        keys := []*[33]byte{
×
1241
                &edge.NodeKey1Bytes,
×
1242
                &edge.NodeKey2Bytes,
1243
        }
1244
        for _, key := range keys {
1245
                err := putChanEdgePolicyUnknown(edges, edge.ChannelID, key[:])
1,486✔
1246
                if err != nil {
1,486✔
1247
                        return err
1,486✔
1248
                }
1,486✔
1249
        }
4,458✔
1250

2,972✔
1251
        // Finally we add it to the channel index which maps channel points
2,972✔
1252
        // (outpoints) to the shorter channel ID's.
×
1253
        var b bytes.Buffer
×
1254
        if err := WriteOutpoint(&b, &edge.ChannelPoint); err != nil {
1255
                return err
1256
        }
1257

1258
        return chanIndex.Put(b.Bytes(), chanKey[:])
1,486✔
1259
}
1,486✔
1260

×
1261
// HasChannelEdge returns true if the database knows of a channel edge with the
×
1262
// passed channel ID, and false otherwise. If an edge with that ID is found
1263
// within the graph, then two time stamps representing the last time the edge
1,486✔
1264
// was updated for both directed edges are returned along with the boolean. If
1265
// it is not found, then the zombie index is checked and its result is returned
1266
// as the second boolean.
1267
func (c *KVStore) HasChannelEdge(
1268
        chanID uint64) (time.Time, time.Time, bool, bool, error) {
1269

1270
        var (
1271
                upd1Time time.Time
1272
                upd2Time time.Time
1273
                exists   bool
219✔
1274
                isZombie bool
219✔
1275
        )
219✔
1276

219✔
1277
        // We'll query the cache with the shared lock held to allow multiple
219✔
1278
        // readers to access values in the cache concurrently if they exist.
219✔
1279
        c.cacheMu.RLock()
219✔
1280
        if entry, ok := c.rejectCache.get(chanID); ok {
219✔
1281
                c.cacheMu.RUnlock()
219✔
1282
                upd1Time = time.Unix(entry.upd1Time, 0)
219✔
1283
                upd2Time = time.Unix(entry.upd2Time, 0)
219✔
1284
                exists, isZombie = entry.flags.unpack()
219✔
1285

292✔
1286
                return upd1Time, upd2Time, exists, isZombie, nil
73✔
1287
        }
73✔
1288
        c.cacheMu.RUnlock()
73✔
1289

73✔
1290
        c.cacheMu.Lock()
73✔
1291
        defer c.cacheMu.Unlock()
73✔
1292

73✔
1293
        // The item was not found with the shared lock, so we'll acquire the
146✔
1294
        // exclusive lock and check the cache again in case another method added
146✔
1295
        // the entry to the cache while no lock was held.
146✔
1296
        if entry, ok := c.rejectCache.get(chanID); ok {
146✔
1297
                upd1Time = time.Unix(entry.upd1Time, 0)
146✔
1298
                upd2Time = time.Unix(entry.upd2Time, 0)
146✔
1299
                exists, isZombie = entry.flags.unpack()
146✔
1300

146✔
1301
                return upd1Time, upd2Time, exists, isZombie, nil
150✔
1302
        }
4✔
1303

4✔
1304
        if err := kvdb.View(c.db, func(tx kvdb.RTx) error {
4✔
1305
                edges := tx.ReadBucket(edgeBucket)
4✔
1306
                if edges == nil {
4✔
1307
                        return ErrGraphNoEdgesFound
4✔
1308
                }
1309
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
284✔
1310
                if edgeIndex == nil {
142✔
1311
                        return ErrGraphNoEdgesFound
142✔
1312
                }
×
1313

×
1314
                var channelID [8]byte
142✔
1315
                byteOrder.PutUint64(channelID[:], chanID)
142✔
1316

×
1317
                // If the edge doesn't exist, then we'll also check our zombie
×
1318
                // index.
1319
                if edgeIndex.Get(channelID[:]) == nil {
142✔
1320
                        exists = false
142✔
1321
                        zombieIndex := edges.NestedReadBucket(zombieBucket)
142✔
1322
                        if zombieIndex != nil {
142✔
1323
                                isZombie, _, _ = isZombieEdge(
142✔
1324
                                        zombieIndex, chanID,
234✔
1325
                                )
92✔
1326
                        }
92✔
1327

184✔
1328
                        return nil
92✔
1329
                }
92✔
1330

92✔
1331
                exists = true
92✔
1332
                isZombie = false
1333

92✔
1334
                // If the channel has been found in the graph, then retrieve
1335
                // the edges itself so we can return the last updated
1336
                // timestamps.
50✔
1337
                nodes := tx.ReadBucket(nodeBucket)
50✔
1338
                if nodes == nil {
50✔
1339
                        return ErrGraphNodeNotFound
50✔
1340
                }
50✔
1341

50✔
1342
                e1, e2, err := fetchChanEdgePolicies(
50✔
1343
                        edgeIndex, edges, channelID[:],
50✔
1344
                )
×
1345
                if err != nil {
×
1346
                        return err
1347
                }
50✔
1348

50✔
1349
                // As we may have only one of the edges populated, only set the
50✔
1350
                // update time if the edge was found in the database.
50✔
1351
                if e1 != nil {
×
1352
                        upd1Time = e1.LastUpdate
×
1353
                }
1354
                if e2 != nil {
1355
                        upd2Time = e2.LastUpdate
1356
                }
68✔
1357

18✔
1358
                return nil
18✔
1359
        }, func() {}); err != nil {
66✔
1360
                return time.Time{}, time.Time{}, exists, isZombie, err
16✔
1361
        }
16✔
1362

1363
        c.rejectCache.insert(chanID, rejectCacheEntry{
50✔
1364
                upd1Time: upd1Time.Unix(),
142✔
1365
                upd2Time: upd2Time.Unix(),
×
1366
                flags:    packRejectFlags(exists, isZombie),
×
1367
        })
1368

142✔
1369
        return upd1Time, upd2Time, exists, isZombie, nil
142✔
1370
}
142✔
1371

142✔
1372
// AddEdgeProof sets the proof of an existing edge in the graph database.
142✔
1373
func (c *KVStore) AddEdgeProof(chanID lnwire.ShortChannelID,
142✔
1374
        proof *models.ChannelAuthProof) error {
142✔
1375

1376
        // Construct the channel's primary key which is the 8-byte channel ID.
1377
        var chanKey [8]byte
1378
        binary.BigEndian.PutUint64(chanKey[:], chanID.ToUint64())
1379

2✔
1380
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
2✔
1381
                edges := tx.ReadWriteBucket(edgeBucket)
2✔
1382
                if edges == nil {
2✔
1383
                        return ErrEdgeNotFound
2✔
1384
                }
2✔
1385

4✔
1386
                edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
2✔
1387
                if edgeIndex == nil {
2✔
1388
                        return ErrEdgeNotFound
×
1389
                }
×
1390

1391
                edge, err := fetchChanEdgeInfo(edgeIndex, chanKey[:])
2✔
1392
                if err != nil {
2✔
1393
                        return err
×
1394
                }
×
1395

1396
                edge.AuthProof = proof
2✔
1397

2✔
1398
                return putChanEdgeInfo(edgeIndex, &edge, chanKey)
×
1399
        }, func() {})
×
1400
}
1401

2✔
1402
const (
2✔
1403
        // pruneTipBytes is the total size of the value which stores a prune
2✔
1404
        // entry of the graph in the prune log. The "prune tip" is the last
2✔
1405
        // entry in the prune log, and indicates if the channel graph is in
1406
        // sync with the current UTXO state. The structure of the value
1407
        // is: blockHash, taking 32 bytes total.
1408
        pruneTipBytes = 32
1409
)
1410

1411
// PruneGraph prunes newly closed channels from the channel graph in response
1412
// to a new block being solved on the network. Any transactions which spend the
1413
// funding output of any known channels within he graph will be deleted.
1414
// Additionally, the "prune tip", or the last block which has been used to
1415
// prune the graph is stored so callers can ensure the graph is fully in sync
1416
// with the current UTXO state. A slice of channels that have been closed by
1417
// the target block along with any pruned nodes are returned if the function
1418
// succeeds without error.
1419
func (c *KVStore) PruneGraph(spentOutputs []*wire.OutPoint,
1420
        blockHash *chainhash.Hash, blockHeight uint32) (
1421
        []*models.ChannelEdgeInfo, []route.Vertex, error) {
1422

1423
        c.cacheMu.Lock()
1424
        defer c.cacheMu.Unlock()
1425

1426
        var (
231✔
1427
                chansClosed []*models.ChannelEdgeInfo
231✔
1428
                prunedNodes []route.Vertex
231✔
1429
        )
231✔
1430

231✔
1431
        err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
231✔
1432
                // First grab the edges bucket which houses the information
231✔
1433
                // we'd like to delete
231✔
1434
                edges, err := tx.CreateTopLevelBucket(edgeBucket)
231✔
1435
                if err != nil {
231✔
1436
                        return err
462✔
1437
                }
231✔
1438

231✔
1439
                // Next grab the two edge indexes which will also need to be
231✔
1440
                // updated.
231✔
1441
                edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
×
1442
                if err != nil {
×
1443
                        return err
1444
                }
1445
                chanIndex, err := edges.CreateBucketIfNotExists(
1446
                        channelPointBucket,
231✔
1447
                )
231✔
1448
                if err != nil {
×
1449
                        return err
×
1450
                }
231✔
1451
                nodes := tx.ReadWriteBucket(nodeBucket)
231✔
1452
                if nodes == nil {
231✔
1453
                        return ErrSourceNodeNotSet
231✔
1454
                }
×
1455
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
×
1456
                if err != nil {
231✔
1457
                        return err
231✔
1458
                }
×
1459

×
1460
                // For each of the outpoints that have been spent within the
231✔
1461
                // block, we attempt to delete them from the graph as if that
231✔
1462
                // outpoint was a channel, then it has now been closed.
×
1463
                for _, chanPoint := range spentOutputs {
×
1464
                        // TODO(roasbeef): load channel bloom filter, continue
1465
                        // if NOT if filter
1466

1467
                        var opBytes bytes.Buffer
1468
                        err := WriteOutpoint(&opBytes, chanPoint)
353✔
1469
                        if err != nil {
122✔
1470
                                return err
122✔
1471
                        }
122✔
1472

122✔
1473
                        // First attempt to see if the channel exists within
122✔
1474
                        // the database, if not, then we can exit early.
122✔
1475
                        chanID := chanIndex.Get(opBytes.Bytes())
×
1476
                        if chanID == nil {
×
1477
                                continue
1478
                        }
1479

1480
                        // Attempt to delete the channel, an ErrEdgeNotFound
122✔
1481
                        // will be returned if that outpoint isn't known to be
225✔
1482
                        // a channel. If no error is returned, then a channel
103✔
1483
                        // was successfully pruned.
1484
                        edgeInfo, err := c.delChannelEdgeUnsafe(
1485
                                edges, edgeIndex, chanIndex, zombieIndex,
1486
                                chanID, false, false,
1487
                        )
1488
                        if err != nil && !errors.Is(err, ErrEdgeNotFound) {
1489
                                return err
19✔
1490
                        }
19✔
1491

19✔
1492
                        chansClosed = append(chansClosed, edgeInfo)
19✔
1493
                }
19✔
1494

×
1495
                metaBucket, err := tx.CreateTopLevelBucket(graphMetaBucket)
×
1496
                if err != nil {
1497
                        return err
19✔
1498
                }
1499

1500
                pruneBucket, err := metaBucket.CreateBucketIfNotExists(
231✔
1501
                        pruneLogBucket,
231✔
1502
                )
×
1503
                if err != nil {
×
1504
                        return err
1505
                }
231✔
1506

231✔
1507
                // With the graph pruned, add a new entry to the prune log,
231✔
1508
                // which can be used to check if the graph is fully synced with
231✔
1509
                // the current UTXO state.
×
1510
                var blockHeightBytes [4]byte
×
1511
                byteOrder.PutUint32(blockHeightBytes[:], blockHeight)
1512

1513
                var newTip [pruneTipBytes]byte
1514
                copy(newTip[:], blockHash[:])
1515

231✔
1516
                err = pruneBucket.Put(blockHeightBytes[:], newTip[:])
231✔
1517
                if err != nil {
231✔
1518
                        return err
231✔
1519
                }
231✔
1520

231✔
1521
                // Now that the graph has been pruned, we'll also attempt to
231✔
1522
                // prune any nodes that have had a channel closed within the
231✔
1523
                // latest block.
×
1524
                prunedNodes, err = c.pruneGraphNodes(nodes, edgeIndex)
×
1525

1526
                return err
1527
        }, func() {
1528
                chansClosed = nil
1529
                prunedNodes = nil
231✔
1530
        })
231✔
1531
        if err != nil {
231✔
1532
                return nil, nil, err
231✔
1533
        }
231✔
1534

231✔
1535
        for _, channel := range chansClosed {
231✔
1536
                c.rejectCache.remove(channel.ChannelID)
231✔
1537
                c.chanCache.remove(channel.ChannelID)
×
1538
        }
×
1539

1540
        return chansClosed, prunedNodes, nil
250✔
1541
}
19✔
1542

19✔
1543
// PruneGraphNodes is a garbage collection method which attempts to prune out
19✔
1544
// any nodes from the channel graph that are currently unconnected. This ensure
1545
// that we only maintain a graph of reachable nodes. In the event that a pruned
231✔
1546
// node gains more channels, it will be re-added back to the graph.
1547
func (c *KVStore) PruneGraphNodes() ([]route.Vertex, error) {
1548
        var prunedNodes []route.Vertex
1549
        err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
1550
                nodes := tx.ReadWriteBucket(nodeBucket)
1551
                if nodes == nil {
1552
                        return ErrGraphNodesNotFound
23✔
1553
                }
23✔
1554
                edges := tx.ReadWriteBucket(edgeBucket)
46✔
1555
                if edges == nil {
23✔
1556
                        return ErrGraphNotFound
23✔
1557
                }
×
1558
                edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
×
1559
                if edgeIndex == nil {
23✔
1560
                        return ErrGraphNoEdgesFound
23✔
1561
                }
×
1562

×
1563
                var err error
23✔
1564
                prunedNodes, err = c.pruneGraphNodes(nodes, edgeIndex)
23✔
1565
                if err != nil {
×
1566
                        return err
×
1567
                }
1568

23✔
1569
                return nil
23✔
1570
        }, func() {
23✔
1571
                prunedNodes = nil
×
1572
        })
×
1573

1574
        return prunedNodes, err
23✔
1575
}
23✔
1576

23✔
1577
// pruneGraphNodes attempts to remove any nodes from the graph who have had a
23✔
1578
// channel closed within the current block. If the node still has existing
1579
// channels in the graph, this will act as a no-op.
23✔
1580
func (c *KVStore) pruneGraphNodes(nodes kvdb.RwBucket,
1581
        edgeIndex kvdb.RwBucket) ([]route.Vertex, error) {
1582

1583
        log.Trace("Pruning nodes from graph with no open channels")
1584

1585
        // We'll retrieve the graph's source node to ensure we don't remove it
1586
        // even if it no longer has any open channels.
254✔
1587
        sourceNode, err := sourceNodeWithTx(nodes)
254✔
1588
        if err != nil {
254✔
1589
                return nil, err
254✔
1590
        }
254✔
1591

254✔
1592
        // We'll use this map to keep count the number of references to a node
254✔
1593
        // in the graph. A node should only be removed once it has no more
254✔
1594
        // references in the graph.
×
1595
        nodeRefCounts := make(map[[33]byte]int)
×
1596
        err = nodes.ForEach(func(pubKey, nodeBytes []byte) error {
1597
                // If this is the source key, then we skip this
1598
                // iteration as the value for this key is a pubKey
1599
                // rather than raw node information.
1600
                if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 {
254✔
1601
                        return nil
1,514✔
1602
                }
1,260✔
1603

1,260✔
1604
                var nodePub [33]byte
1,260✔
1605
                copy(nodePub[:], pubKey)
2,022✔
1606
                nodeRefCounts[nodePub] = 0
762✔
1607

762✔
1608
                return nil
1609
        })
498✔
1610
        if err != nil {
498✔
1611
                return nil, err
498✔
1612
        }
498✔
1613

498✔
1614
        // To ensure we never delete the source node, we'll start off by
1615
        // bumping its ref count to 1.
254✔
1616
        nodeRefCounts[sourceNode.PubKeyBytes] = 1
×
1617

×
1618
        // Next, we'll run through the edgeIndex which maps a channel ID to the
1619
        // edge info. We'll use this scan to populate our reference count map
1620
        // above.
1621
        err = edgeIndex.ForEach(func(chanID, edgeInfoBytes []byte) error {
254✔
1622
                // The first 66 bytes of the edge info contain the pubkeys of
254✔
1623
                // the nodes that this edge attaches. We'll extract them, and
254✔
1624
                // add them to the ref count map.
254✔
1625
                var node1, node2 [33]byte
254✔
1626
                copy(node1[:], edgeInfoBytes[:33])
461✔
1627
                copy(node2[:], edgeInfoBytes[33:])
207✔
1628

207✔
1629
                // With the nodes extracted, we'll increase the ref count of
207✔
1630
                // each of the nodes.
207✔
1631
                nodeRefCounts[node1]++
207✔
1632
                nodeRefCounts[node2]++
207✔
1633

207✔
1634
                return nil
207✔
1635
        })
207✔
1636
        if err != nil {
207✔
1637
                return nil, err
207✔
1638
        }
207✔
1639

207✔
1640
        // Finally, we'll make a second pass over the set of nodes, and delete
207✔
1641
        // any nodes that have a ref count of zero.
254✔
1642
        var pruned []route.Vertex
×
1643
        for nodePubKey, refCount := range nodeRefCounts {
×
1644
                // If the ref count of the node isn't zero, then we can safely
1645
                // skip it as it still has edges to or from it within the
1646
                // graph.
1647
                if refCount != 0 {
254✔
1648
                        continue
752✔
1649
                }
498✔
1650

498✔
1651
                // If we reach this point, then there are no longer any edges
498✔
1652
                // that connect this node, so we can delete it.
935✔
1653
                err := c.deleteLightningNode(nodes, nodePubKey[:])
437✔
1654
                if err != nil {
1655
                        if errors.Is(err, ErrGraphNodeNotFound) ||
1656
                                errors.Is(err, ErrGraphNodesNotFound) {
1657

1658
                                log.Warnf("Unable to prune node %x from the "+
61✔
1659
                                        "graph: %v", nodePubKey, err)
61✔
1660
                                continue
×
1661
                        }
×
1662

×
1663
                        return nil, err
×
1664
                }
×
1665

×
1666
                log.Infof("Pruned unconnected node %x from channel graph",
1667
                        nodePubKey[:])
1668

×
1669
                pruned = append(pruned, nodePubKey)
1670
        }
1671

61✔
1672
        if len(pruned) > 0 {
61✔
1673
                log.Infof("Pruned %v unconnected nodes from the channel graph",
61✔
1674
                        len(pruned))
61✔
1675
        }
1676

1677
        return pruned, err
299✔
1678
}
45✔
1679

45✔
1680
// DisconnectBlockAtHeight is used to indicate that the block specified
45✔
1681
// by the passed height has been disconnected from the main chain. This
1682
// will "rewind" the graph back to the height below, deleting channels
254✔
1683
// that are no longer confirmed from the graph. The prune log will be
1684
// set to the last prune height valid for the remaining chain.
1685
// Channels that were removed from the graph resulting from the
1686
// disconnected block are returned.
1687
func (c *KVStore) DisconnectBlockAtHeight(height uint32) (
1688
        []*models.ChannelEdgeInfo, error) {
1689

1690
        // Every channel having a ShortChannelID starting at 'height'
1691
        // will no longer be confirmed.
1692
        startShortChanID := lnwire.ShortChannelID{
1693
                BlockHeight: height,
154✔
1694
        }
154✔
1695

154✔
1696
        // Delete everything after this height from the db up until the
154✔
1697
        // SCID alias range.
154✔
1698
        endShortChanID := aliasmgr.StartingAlias
154✔
1699

154✔
1700
        // The block height will be the 3 first bytes of the channel IDs.
154✔
1701
        var chanIDStart [8]byte
154✔
1702
        byteOrder.PutUint64(chanIDStart[:], startShortChanID.ToUint64())
154✔
1703
        var chanIDEnd [8]byte
154✔
1704
        byteOrder.PutUint64(chanIDEnd[:], endShortChanID.ToUint64())
154✔
1705

154✔
1706
        c.cacheMu.Lock()
154✔
1707
        defer c.cacheMu.Unlock()
154✔
1708

154✔
1709
        // Keep track of the channels that are removed from the graph.
154✔
1710
        var removedChans []*models.ChannelEdgeInfo
154✔
1711

154✔
1712
        if err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
154✔
1713
                edges, err := tx.CreateTopLevelBucket(edgeBucket)
154✔
1714
                if err != nil {
154✔
1715
                        return err
154✔
1716
                }
154✔
1717
                edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
308✔
1718
                if err != nil {
154✔
1719
                        return err
154✔
1720
                }
×
1721
                chanIndex, err := edges.CreateBucketIfNotExists(
×
1722
                        channelPointBucket,
154✔
1723
                )
154✔
1724
                if err != nil {
×
1725
                        return err
×
1726
                }
154✔
1727
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
154✔
1728
                if err != nil {
154✔
1729
                        return err
154✔
1730
                }
×
1731

×
1732
                // Scan from chanIDStart to chanIDEnd, deleting every
154✔
1733
                // found edge.
154✔
1734
                // NOTE: we must delete the edges after the cursor loop, since
×
1735
                // modifying the bucket while traversing is not safe.
×
1736
                // NOTE: We use a < comparison in bytes.Compare instead of <=
1737
                // so that the StartingAlias itself isn't deleted.
1738
                var keys [][]byte
1739
                cursor := edgeIndex.ReadWriteCursor()
1740

1741
                //nolint:ll
1742
                for k, _ := cursor.Seek(chanIDStart[:]); k != nil &&
1743
                        bytes.Compare(k, chanIDEnd[:]) < 0; k, _ = cursor.Next() {
154✔
1744
                        keys = append(keys, k)
154✔
1745
                }
154✔
1746

154✔
1747
                for _, k := range keys {
154✔
1748
                        edgeInfo, err := c.delChannelEdgeUnsafe(
248✔
1749
                                edges, edgeIndex, chanIndex, zombieIndex,
94✔
1750
                                k, false, false,
94✔
1751
                        )
1752
                        if err != nil && !errors.Is(err, ErrEdgeNotFound) {
248✔
1753
                                return err
94✔
1754
                        }
94✔
1755

94✔
1756
                        removedChans = append(removedChans, edgeInfo)
94✔
1757
                }
94✔
1758

×
1759
                // Delete all the entries in the prune log having a height
×
1760
                // greater or equal to the block disconnected.
1761
                metaBucket, err := tx.CreateTopLevelBucket(graphMetaBucket)
94✔
1762
                if err != nil {
1763
                        return err
1764
                }
1765

1766
                pruneBucket, err := metaBucket.CreateBucketIfNotExists(
154✔
1767
                        pruneLogBucket,
154✔
1768
                )
×
1769
                if err != nil {
×
1770
                        return err
1771
                }
154✔
1772

154✔
1773
                var pruneKeyStart [4]byte
154✔
1774
                byteOrder.PutUint32(pruneKeyStart[:], height)
154✔
1775

×
1776
                var pruneKeyEnd [4]byte
×
1777
                byteOrder.PutUint32(pruneKeyEnd[:], math.MaxUint32)
1778

154✔
1779
                // To avoid modifying the bucket while traversing, we delete
154✔
1780
                // the keys in a second loop.
154✔
1781
                var pruneKeys [][]byte
154✔
1782
                pruneCursor := pruneBucket.ReadWriteCursor()
154✔
1783
                //nolint:ll
154✔
1784
                for k, _ := pruneCursor.Seek(pruneKeyStart[:]); k != nil &&
154✔
1785
                        bytes.Compare(k, pruneKeyEnd[:]) <= 0; k, _ = pruneCursor.Next() {
154✔
1786
                        pruneKeys = append(pruneKeys, k)
154✔
1787
                }
154✔
1788

154✔
1789
                for _, k := range pruneKeys {
154✔
1790
                        if err := pruneBucket.Delete(k); err != nil {
245✔
1791
                                return err
91✔
1792
                        }
91✔
1793
                }
1794

245✔
1795
                return nil
91✔
1796
        }, func() {
×
1797
                removedChans = nil
×
1798
        }); err != nil {
1799
                return nil, err
1800
        }
154✔
1801

154✔
1802
        for _, channel := range removedChans {
154✔
1803
                c.rejectCache.remove(channel.ChannelID)
154✔
1804
                c.chanCache.remove(channel.ChannelID)
×
1805
        }
×
1806

1807
        return removedChans, nil
248✔
1808
}
94✔
1809

94✔
1810
// PruneTip returns the block height and hash of the latest block that has been
94✔
1811
// used to prune channels in the graph. Knowing the "prune tip" allows callers
1812
// to tell if the graph is currently in sync with the current best known UTXO
154✔
1813
// state.
1814
func (c *KVStore) PruneTip() (*chainhash.Hash, uint32, error) {
1815
        var (
1816
                tipHash   chainhash.Hash
1817
                tipHeight uint32
1818
        )
1819

53✔
1820
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
53✔
1821
                graphMeta := tx.ReadBucket(graphMetaBucket)
53✔
1822
                if graphMeta == nil {
53✔
1823
                        return ErrGraphNotFound
53✔
1824
                }
53✔
1825
                pruneBucket := graphMeta.NestedReadBucket(pruneLogBucket)
106✔
1826
                if pruneBucket == nil {
53✔
1827
                        return ErrGraphNeverPruned
53✔
1828
                }
×
1829

×
1830
                pruneCursor := pruneBucket.ReadCursor()
53✔
1831

53✔
1832
                // The prune key with the largest block height will be our
×
1833
                // prune tip.
×
1834
                k, v := pruneCursor.Last()
1835
                if k == nil {
53✔
1836
                        return ErrGraphNeverPruned
53✔
1837
                }
53✔
1838

53✔
1839
                // Once we have the prune tip, the value will be the block hash,
53✔
1840
                // and the key the block height.
71✔
1841
                copy(tipHash[:], v)
18✔
1842
                tipHeight = byteOrder.Uint32(k)
18✔
1843

1844
                return nil
1845
        }, func() {})
1846
        if err != nil {
35✔
1847
                return nil, 0, err
35✔
1848
        }
35✔
1849

35✔
1850
        return &tipHash, tipHeight, nil
53✔
1851
}
71✔
1852

18✔
1853
// DeleteChannelEdges removes edges with the given channel IDs from the
18✔
1854
// database and marks them as zombies. This ensures that we're unable to re-add
1855
// it to our database once again. If an edge does not exist within the
35✔
1856
// database, then ErrEdgeNotFound will be returned. If strictZombiePruning is
1857
// true, then when we mark these edges as zombies, we'll set up the keys such
1858
// that we require the node that failed to send the fresh update to be the one
1859
// that resurrects the channel from its zombie state. The markZombie bool
1860
// denotes whether or not to mark the channel as a zombie.
1861
func (c *KVStore) DeleteChannelEdges(strictZombiePruning, markZombie bool,
1862
        chanIDs ...uint64) ([]*models.ChannelEdgeInfo, error) {
1863

1864
        // TODO(roasbeef): possibly delete from node bucket if node has no more
1865
        // channels
1866
        // TODO(roasbeef): don't delete both edges?
1867

142✔
1868
        c.cacheMu.Lock()
142✔
1869
        defer c.cacheMu.Unlock()
142✔
1870

142✔
1871
        var infos []*models.ChannelEdgeInfo
142✔
1872
        err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
142✔
1873
                edges := tx.ReadWriteBucket(edgeBucket)
142✔
1874
                if edges == nil {
142✔
1875
                        return ErrEdgeNotFound
142✔
1876
                }
142✔
1877
                edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
284✔
1878
                if edgeIndex == nil {
142✔
1879
                        return ErrEdgeNotFound
142✔
1880
                }
×
1881
                chanIndex := edges.NestedReadWriteBucket(channelPointBucket)
×
1882
                if chanIndex == nil {
142✔
1883
                        return ErrEdgeNotFound
142✔
1884
                }
×
1885
                nodes := tx.ReadWriteBucket(nodeBucket)
×
1886
                if nodes == nil {
142✔
1887
                        return ErrGraphNodeNotFound
142✔
1888
                }
×
1889
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
×
1890
                if err != nil {
142✔
1891
                        return err
142✔
1892
                }
×
1893

×
1894
                var rawChanID [8]byte
142✔
1895
                for _, chanID := range chanIDs {
142✔
1896
                        byteOrder.PutUint64(rawChanID[:], chanID)
×
1897
                        edgeInfo, err := c.delChannelEdgeUnsafe(
×
1898
                                edges, edgeIndex, chanIndex, zombieIndex,
1899
                                rawChanID[:], markZombie, strictZombiePruning,
142✔
1900
                        )
226✔
1901
                        if err != nil {
84✔
1902
                                return err
84✔
1903
                        }
84✔
1904

84✔
1905
                        infos = append(infos, edgeInfo)
84✔
1906
                }
142✔
1907

58✔
1908
                return nil
58✔
1909
        }, func() {
1910
                infos = nil
26✔
1911
        })
1912
        if err != nil {
1913
                return nil, err
84✔
1914
        }
142✔
1915

142✔
1916
        for _, chanID := range chanIDs {
142✔
1917
                c.rejectCache.remove(chanID)
200✔
1918
                c.chanCache.remove(chanID)
58✔
1919
        }
58✔
1920

1921
        return infos, nil
109✔
1922
}
25✔
1923

25✔
1924
// ChannelID attempt to lookup the 8-byte compact channel ID which maps to the
25✔
1925
// passed channel point (outpoint). If the passed channel doesn't exist within
1926
// the database, then ErrEdgeNotFound is returned.
84✔
1927
func (c *KVStore) ChannelID(chanPoint *wire.OutPoint) (uint64, error) {
1928
        var chanID uint64
1929
        if err := kvdb.View(c.db, func(tx kvdb.RTx) error {
1930
                var err error
1931
                chanID, err = getChanID(tx, chanPoint)
1932
                return err
1✔
1933
        }, func() {
1✔
1934
                chanID = 0
2✔
1935
        }); err != nil {
1✔
1936
                return 0, err
1✔
1937
        }
1✔
1938

2✔
1939
        return chanID, nil
1✔
1940
}
1✔
1941

×
1942
// getChanID returns the assigned channel ID for a given channel point.
×
1943
func getChanID(tx kvdb.RTx, chanPoint *wire.OutPoint) (uint64, error) {
1944
        var b bytes.Buffer
1✔
1945
        if err := WriteOutpoint(&b, chanPoint); err != nil {
1946
                return 0, err
1947
        }
1948

1✔
1949
        edges := tx.ReadBucket(edgeBucket)
1✔
1950
        if edges == nil {
1✔
1951
                return 0, ErrGraphNoEdgesFound
×
1952
        }
×
1953
        chanIndex := edges.NestedReadBucket(channelPointBucket)
1954
        if chanIndex == nil {
1✔
1955
                return 0, ErrGraphNoEdgesFound
1✔
1956
        }
×
1957

×
1958
        chanIDBytes := chanIndex.Get(b.Bytes())
1✔
1959
        if chanIDBytes == nil {
1✔
1960
                return 0, ErrEdgeNotFound
×
1961
        }
×
1962

1963
        chanID := byteOrder.Uint64(chanIDBytes)
1✔
1964

1✔
1965
        return chanID, nil
×
1966
}
×
1967

1968
// TODO(roasbeef): allow updates to use Batch?
1✔
1969

1✔
1970
// HighestChanID returns the "highest" known channel ID in the channel graph.
1✔
1971
// This represents the "newest" channel from the PoV of the chain. This method
1972
// can be used by peers to quickly determine if they're graphs are in sync.
1973
func (c *KVStore) HighestChanID(_ context.Context) (uint64, error) {
1974
        var cid uint64
1975

1976
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
1977
                edges := tx.ReadBucket(edgeBucket)
1978
                if edges == nil {
3✔
1979
                        return ErrGraphNoEdgesFound
3✔
1980
                }
3✔
1981
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
6✔
1982
                if edgeIndex == nil {
3✔
1983
                        return ErrGraphNoEdgesFound
3✔
1984
                }
×
1985

×
1986
                // In order to find the highest chan ID, we'll fetch a cursor
3✔
1987
                // and use that to seek to the "end" of our known rage.
3✔
1988
                cidCursor := edgeIndex.ReadCursor()
×
1989

×
1990
                lastChanID, _ := cidCursor.Last()
1991

1992
                // If there's no key, then this means that we don't actually
1993
                // know of any channels, so we'll return a predicable error.
3✔
1994
                if lastChanID == nil {
3✔
1995
                        return ErrGraphNoEdgesFound
3✔
1996
                }
3✔
1997

3✔
1998
                // Otherwise, we'll de serialize the channel ID and return it
3✔
1999
                // to the caller.
4✔
2000
                cid = byteOrder.Uint64(lastChanID)
1✔
2001

1✔
2002
                return nil
2003
        }, func() {
2004
                cid = 0
2005
        })
2✔
2006
        if err != nil && !errors.Is(err, ErrGraphNoEdgesFound) {
2✔
2007
                return 0, err
2✔
2008
        }
3✔
2009

3✔
2010
        return cid, nil
3✔
2011
}
3✔
2012

×
2013
// ChannelEdge represents the complete set of information for a channel edge in
×
2014
// the known channel graph. This struct couples the core information of the
2015
// edge as well as each of the known advertised edge policies.
3✔
2016
type ChannelEdge struct {
2017
        // Info contains all the static information describing the channel.
2018
        Info *models.ChannelEdgeInfo
2019

2020
        // Policy1 points to the "first" edge policy of the channel containing
2021
        // the dynamic information required to properly route through the edge.
2022
        Policy1 *models.ChannelEdgePolicy
2023

2024
        // Policy2 points to the "second" edge policy of the channel containing
2025
        // the dynamic information required to properly route through the edge.
2026
        Policy2 *models.ChannelEdgePolicy
2027

2028
        // Node1 is "node 1" in the channel. This is the node that would have
2029
        // produced Policy1 if it exists.
2030
        Node1 *models.LightningNode
2031

2032
        // Node2 is "node 2" in the channel. This is the node that would have
2033
        // produced Policy2 if it exists.
2034
        Node2 *models.LightningNode
2035
}
2036

2037
// ChanUpdatesInHorizon returns all the known channel edges which have at least
2038
// one edge that has an update timestamp within the specified horizon.
2039
func (c *KVStore) ChanUpdatesInHorizon(startTime,
2040
        endTime time.Time) ([]ChannelEdge, error) {
2041

2042
        // To ensure we don't return duplicate ChannelEdges, we'll use an
2043
        // additional map to keep track of the edges already seen to prevent
2044
        // re-adding it.
2045
        var edgesSeen map[uint64]struct{}
145✔
2046
        var edgesToCache map[uint64]ChannelEdge
145✔
2047
        var edgesInHorizon []ChannelEdge
145✔
2048

145✔
2049
        c.cacheMu.Lock()
145✔
2050
        defer c.cacheMu.Unlock()
145✔
2051

145✔
2052
        var hits int
145✔
2053
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
145✔
2054
                edges := tx.ReadBucket(edgeBucket)
145✔
2055
                if edges == nil {
145✔
2056
                        return ErrGraphNoEdgesFound
145✔
2057
                }
145✔
2058
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
290✔
2059
                if edgeIndex == nil {
145✔
2060
                        return ErrGraphNoEdgesFound
145✔
2061
                }
×
2062
                edgeUpdateIndex := edges.NestedReadBucket(edgeUpdateIndexBucket)
×
2063
                if edgeUpdateIndex == nil {
145✔
2064
                        return ErrGraphNoEdgesFound
145✔
2065
                }
×
2066

×
2067
                nodes := tx.ReadBucket(nodeBucket)
145✔
2068
                if nodes == nil {
145✔
2069
                        return ErrGraphNodesNotFound
×
2070
                }
×
2071

2072
                // We'll now obtain a cursor to perform a range query within
145✔
2073
                // the index to find all channels within the horizon.
145✔
2074
                updateCursor := edgeUpdateIndex.ReadCursor()
×
2075

×
2076
                var startTimeBytes, endTimeBytes [8 + 8]byte
2077
                byteOrder.PutUint64(
2078
                        startTimeBytes[:8], uint64(startTime.Unix()),
2079
                )
145✔
2080
                byteOrder.PutUint64(
145✔
2081
                        endTimeBytes[:8], uint64(endTime.Unix()),
145✔
2082
                )
145✔
2083

145✔
2084
                // With our start and end times constructed, we'll step through
145✔
2085
                // the index collecting the info and policy of each update of
145✔
2086
                // each channel that has a last update within the time range.
145✔
2087
                //
145✔
2088
                //nolint:ll
145✔
2089
                for indexKey, _ := updateCursor.Seek(startTimeBytes[:]); indexKey != nil &&
145✔
2090
                        bytes.Compare(indexKey, endTimeBytes[:]) <= 0; indexKey, _ = updateCursor.Next() {
145✔
2091
                        // We have a new eligible entry, so we'll slice of the
145✔
2092
                        // chan ID so we can query it in the DB.
145✔
2093
                        chanID := indexKey[8:]
145✔
2094

145✔
2095
                        // If we've already retrieved the info and policies for
191✔
2096
                        // this edge, then we can skip it as we don't need to do
46✔
2097
                        // so again.
46✔
2098
                        chanIDInt := byteOrder.Uint64(chanID)
46✔
2099
                        if _, ok := edgesSeen[chanIDInt]; ok {
46✔
2100
                                continue
46✔
2101
                        }
46✔
2102

46✔
2103
                        if channel, ok := c.chanCache.get(chanIDInt); ok {
46✔
2104
                                hits++
65✔
2105
                                edgesSeen[chanIDInt] = struct{}{}
19✔
2106
                                edgesInHorizon = append(edgesInHorizon, channel)
2107

2108
                                continue
36✔
2109
                        }
9✔
2110

9✔
2111
                        // First, we'll fetch the static edge information.
9✔
2112
                        edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
9✔
2113
                        if err != nil {
9✔
2114
                                chanID := byteOrder.Uint64(chanID)
2115
                                return fmt.Errorf("unable to fetch info for "+
2116
                                        "edge with chan_id=%v: %v", chanID, err)
2117
                        }
18✔
2118

18✔
2119
                        // With the static information obtained, we'll now
×
2120
                        // fetch the dynamic policy info.
×
2121
                        edge1, edge2, err := fetchChanEdgePolicies(
×
2122
                                edgeIndex, edges, chanID,
×
2123
                        )
2124
                        if err != nil {
2125
                                chanID := byteOrder.Uint64(chanID)
2126
                                return fmt.Errorf("unable to fetch policies "+
18✔
2127
                                        "for edge with chan_id=%v: %v", chanID,
18✔
2128
                                        err)
18✔
2129
                        }
18✔
2130

×
2131
                        node1, err := fetchLightningNode(
×
2132
                                nodes, edgeInfo.NodeKey1Bytes[:],
×
2133
                        )
×
2134
                        if err != nil {
×
2135
                                return err
2136
                        }
18✔
2137

18✔
2138
                        node2, err := fetchLightningNode(
18✔
2139
                                nodes, edgeInfo.NodeKey2Bytes[:],
18✔
2140
                        )
×
2141
                        if err != nil {
×
2142
                                return err
2143
                        }
18✔
2144

18✔
2145
                        // Finally, we'll collate this edge with the rest of
18✔
2146
                        // edges to be returned.
18✔
2147
                        edgesSeen[chanIDInt] = struct{}{}
×
2148
                        channel := ChannelEdge{
×
2149
                                Info:    &edgeInfo,
2150
                                Policy1: edge1,
2151
                                Policy2: edge2,
2152
                                Node1:   &node1,
18✔
2153
                                Node2:   &node2,
18✔
2154
                        }
18✔
2155
                        edgesInHorizon = append(edgesInHorizon, channel)
18✔
2156
                        edgesToCache[chanIDInt] = channel
18✔
2157
                }
18✔
2158

18✔
2159
                return nil
18✔
2160
        }, func() {
18✔
2161
                edgesSeen = make(map[uint64]struct{})
18✔
2162
                edgesToCache = make(map[uint64]ChannelEdge)
2163
                edgesInHorizon = nil
2164
        })
145✔
2165
        switch {
145✔
2166
        case errors.Is(err, ErrGraphNoEdgesFound):
145✔
2167
                fallthrough
145✔
2168
        case errors.Is(err, ErrGraphNodesNotFound):
145✔
2169
                break
145✔
2170

145✔
2171
        case err != nil:
×
2172
                return nil, err
×
2173
        }
×
2174

×
2175
        // Insert any edges loaded from disk into the cache.
2176
        for chanid, channel := range edgesToCache {
×
2177
                c.chanCache.insert(chanid, channel)
×
2178
        }
2179

2180
        if len(edgesInHorizon) > 0 {
2181
                log.Debugf("ChanUpdatesInHorizon hit percentage: %.2f (%d/%d)",
163✔
2182
                        float64(hits)*100/float64(len(edgesInHorizon)), hits,
18✔
2183
                        len(edgesInHorizon))
18✔
2184
        } else {
2185
                log.Debugf("ChanUpdatesInHorizon returned no edges in "+
150✔
2186
                        "horizon (%s, %s)", startTime, endTime)
5✔
2187
        }
5✔
2188

5✔
2189
        return edgesInHorizon, nil
145✔
2190
}
140✔
2191

140✔
2192
// NodeUpdatesInHorizon returns all the known lightning node which have an
140✔
2193
// update timestamp within the passed range. This method can be used by two
2194
// nodes to quickly determine if they have the same set of up to date node
145✔
2195
// announcements.
2196
func (c *KVStore) NodeUpdatesInHorizon(startTime,
2197
        endTime time.Time) ([]models.LightningNode, error) {
2198

2199
        var nodesInHorizon []models.LightningNode
2200

2201
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
2202
                nodes := tx.ReadBucket(nodeBucket)
8✔
2203
                if nodes == nil {
8✔
2204
                        return ErrGraphNodesNotFound
8✔
2205
                }
8✔
2206

16✔
2207
                nodeUpdateIndex := nodes.NestedReadBucket(nodeUpdateIndexBucket)
8✔
2208
                if nodeUpdateIndex == nil {
8✔
2209
                        return ErrGraphNodesNotFound
×
2210
                }
×
2211

2212
                // We'll now obtain a cursor to perform a range query within
8✔
2213
                // the index to find all node announcements within the horizon.
8✔
2214
                updateCursor := nodeUpdateIndex.ReadCursor()
×
2215

×
2216
                var startTimeBytes, endTimeBytes [8 + 33]byte
2217
                byteOrder.PutUint64(
2218
                        startTimeBytes[:8], uint64(startTime.Unix()),
2219
                )
8✔
2220
                byteOrder.PutUint64(
8✔
2221
                        endTimeBytes[:8], uint64(endTime.Unix()),
8✔
2222
                )
8✔
2223

8✔
2224
                // With our start and end times constructed, we'll step through
8✔
2225
                // the index collecting info for each node within the time
8✔
2226
                // range.
8✔
2227
                //
8✔
2228
                //nolint:ll
8✔
2229
                for indexKey, _ := updateCursor.Seek(startTimeBytes[:]); indexKey != nil &&
8✔
2230
                        bytes.Compare(indexKey, endTimeBytes[:]) <= 0; indexKey, _ = updateCursor.Next() {
8✔
2231
                        nodePub := indexKey[8:]
8✔
2232
                        node, err := fetchLightningNode(nodes, nodePub)
8✔
2233
                        if err != nil {
8✔
2234
                                return err
8✔
2235
                        }
37✔
2236

29✔
2237
                        nodesInHorizon = append(nodesInHorizon, node)
29✔
2238
                }
29✔
2239

×
2240
                return nil
×
2241
        }, func() {
2242
                nodesInHorizon = nil
29✔
2243
        })
2244
        switch {
2245
        case errors.Is(err, ErrGraphNoEdgesFound):
8✔
2246
                fallthrough
8✔
2247
        case errors.Is(err, ErrGraphNodesNotFound):
8✔
2248
                break
8✔
2249

8✔
2250
        case err != nil:
×
2251
                return nil, err
×
2252
        }
×
2253

×
2254
        return nodesInHorizon, nil
2255
}
×
2256

×
2257
// FilterKnownChanIDs takes a set of channel IDs and return the subset of chan
2258
// ID's that we don't know and are not known zombies of the passed set. In other
2259
// words, we perform a set difference of our set of chan ID's and the ones
8✔
2260
// passed in. This method can be used by callers to determine the set of
2261
// channels another peer knows of that we don't. The ChannelUpdateInfos for the
2262
// known zombies is also returned.
2263
func (c *KVStore) FilterKnownChanIDs(chansInfo []ChannelUpdateInfo) ([]uint64,
2264
        []ChannelUpdateInfo, error) {
2265

2266
        var (
2267
                newChanIDs   []uint64
2268
                knownZombies []ChannelUpdateInfo
2269
        )
124✔
2270

124✔
2271
        c.cacheMu.Lock()
124✔
2272
        defer c.cacheMu.Unlock()
124✔
2273

124✔
2274
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
124✔
2275
                edges := tx.ReadBucket(edgeBucket)
124✔
2276
                if edges == nil {
124✔
2277
                        return ErrGraphNoEdgesFound
124✔
2278
                }
124✔
2279
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
248✔
2280
                if edgeIndex == nil {
124✔
2281
                        return ErrGraphNoEdgesFound
124✔
2282
                }
×
2283

×
2284
                // Fetch the zombie index, it may not exist if no edges have
124✔
2285
                // ever been marked as zombies. If the index has been
124✔
2286
                // initialized, we will use it later to skip known zombie edges.
×
2287
                zombieIndex := edges.NestedReadBucket(zombieBucket)
×
2288

2289
                // We'll run through the set of chanIDs and collate only the
2290
                // set of channel that are unable to be found within our db.
2291
                var cidBytes [8]byte
2292
                for _, info := range chansInfo {
124✔
2293
                        scid := info.ShortChannelID.ToUint64()
124✔
2294
                        byteOrder.PutUint64(cidBytes[:], scid)
124✔
2295

124✔
2296
                        // If the edge is already known, skip it.
124✔
2297
                        if v := edgeIndex.Get(cidBytes[:]); v != nil {
227✔
2298
                                continue
103✔
2299
                        }
103✔
2300

103✔
2301
                        // If the edge is a known zombie, skip it.
103✔
2302
                        if zombieIndex != nil {
124✔
2303
                                isZombie, _, _ := isZombieEdge(
21✔
2304
                                        zombieIndex, scid,
2305
                                )
2306

2307
                                if isZombie {
164✔
2308
                                        knownZombies = append(
82✔
2309
                                                knownZombies, info,
82✔
2310
                                        )
82✔
2311

82✔
2312
                                        continue
123✔
2313
                                }
41✔
2314
                        }
41✔
2315

41✔
2316
                        newChanIDs = append(newChanIDs, scid)
41✔
2317
                }
41✔
2318

2319
                return nil
2320
        }, func() {
2321
                newChanIDs = nil
41✔
2322
                knownZombies = nil
2323
        })
2324
        switch {
124✔
2325
        // If we don't know of any edges yet, then we'll return the entire set
124✔
2326
        // of chan IDs specified.
124✔
2327
        case errors.Is(err, ErrGraphNoEdgesFound):
124✔
2328
                ogChanIDs := make([]uint64, len(chansInfo))
124✔
2329
                for i, info := range chansInfo {
124✔
2330
                        ogChanIDs[i] = info.ShortChannelID.ToUint64()
2331
                }
2332

×
2333
                return ogChanIDs, nil, nil
×
2334

×
2335
        case err != nil:
×
2336
                return nil, nil, err
×
2337
        }
2338

×
2339
        return newChanIDs, knownZombies, nil
2340
}
×
2341

×
2342
// ChannelUpdateInfo couples the SCID of a channel with the timestamps of the
2343
// latest received channel updates for the channel.
2344
type ChannelUpdateInfo struct {
124✔
2345
        // ShortChannelID is the SCID identifier of the channel.
2346
        ShortChannelID lnwire.ShortChannelID
2347

2348
        // Node1UpdateTimestamp is the timestamp of the latest received update
2349
        // from the node 1 channel peer. This will be set to zero time if no
2350
        // update has yet been received from this node.
2351
        Node1UpdateTimestamp time.Time
2352

2353
        // Node2UpdateTimestamp is the timestamp of the latest received update
2354
        // from the node 2 channel peer. This will be set to zero time if no
2355
        // update has yet been received from this node.
2356
        Node2UpdateTimestamp time.Time
2357
}
2358

2359
// NewChannelUpdateInfo is a constructor which makes sure we initialize the
2360
// timestamps with zero seconds unix timestamp which equals
2361
// `January 1, 1970, 00:00:00 UTC` in case the value is `time.Time{}`.
2362
func NewChannelUpdateInfo(scid lnwire.ShortChannelID, node1Timestamp,
2363
        node2Timestamp time.Time) ChannelUpdateInfo {
2364

2365
        chanInfo := ChannelUpdateInfo{
2366
                ShortChannelID:       scid,
2367
                Node1UpdateTimestamp: node1Timestamp,
2368
                Node2UpdateTimestamp: node2Timestamp,
196✔
2369
        }
196✔
2370

196✔
2371
        if node1Timestamp.IsZero() {
196✔
2372
                chanInfo.Node1UpdateTimestamp = time.Unix(0, 0)
196✔
2373
        }
196✔
2374

196✔
2375
        if node2Timestamp.IsZero() {
196✔
2376
                chanInfo.Node2UpdateTimestamp = time.Unix(0, 0)
382✔
2377
        }
186✔
2378

186✔
2379
        return chanInfo
2380
}
382✔
2381

186✔
2382
// BlockChannelRange represents a range of channels for a given block height.
186✔
2383
type BlockChannelRange struct {
2384
        // Height is the height of the block all of the channels below were
196✔
2385
        // included in.
2386
        Height uint32
2387

2388
        // Channels is the list of channels identified by their short ID
2389
        // representation known to us that were included in the block height
2390
        // above. The list may include channel update timestamp information if
2391
        // requested.
2392
        Channels []ChannelUpdateInfo
2393
}
2394

2395
// FilterChannelRange returns the channel ID's of all known channels which were
2396
// mined in a block height within the passed range. The channel IDs are grouped
2397
// by their common block height. This method can be used to quickly share with a
2398
// peer the set of channels we know of within a particular range to catch them
2399
// up after a period of time offline. If withTimestamps is true then the
2400
// timestamp info of the latest received channel update messages of the channel
2401
// will be included in the response.
2402
func (c *KVStore) FilterChannelRange(startHeight,
2403
        endHeight uint32, withTimestamps bool) ([]BlockChannelRange, error) {
2404

2405
        startChanID := &lnwire.ShortChannelID{
2406
                BlockHeight: startHeight,
2407
        }
2408

11✔
2409
        endChanID := lnwire.ShortChannelID{
11✔
2410
                BlockHeight: endHeight,
11✔
2411
                TxIndex:     math.MaxUint32 & 0x00ffffff,
11✔
2412
                TxPosition:  math.MaxUint16,
11✔
2413
        }
11✔
2414

11✔
2415
        // As we need to perform a range scan, we'll convert the starting and
11✔
2416
        // ending height to their corresponding values when encoded using short
11✔
2417
        // channel ID's.
11✔
2418
        var chanIDStart, chanIDEnd [8]byte
11✔
2419
        byteOrder.PutUint64(chanIDStart[:], startChanID.ToUint64())
11✔
2420
        byteOrder.PutUint64(chanIDEnd[:], endChanID.ToUint64())
11✔
2421

11✔
2422
        var channelsPerBlock map[uint32][]ChannelUpdateInfo
11✔
2423
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
11✔
2424
                edges := tx.ReadBucket(edgeBucket)
11✔
2425
                if edges == nil {
11✔
2426
                        return ErrGraphNoEdgesFound
11✔
2427
                }
11✔
2428
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
22✔
2429
                if edgeIndex == nil {
11✔
2430
                        return ErrGraphNoEdgesFound
11✔
2431
                }
×
2432

×
2433
                cursor := edgeIndex.ReadCursor()
11✔
2434

11✔
2435
                // We'll now iterate through the database, and find each
×
2436
                // channel ID that resides within the specified range.
×
2437
                //
2438
                //nolint:ll
11✔
2439
                for k, v := cursor.Seek(chanIDStart[:]); k != nil &&
11✔
2440
                        bytes.Compare(k, chanIDEnd[:]) <= 0; k, v = cursor.Next() {
11✔
2441
                        // Don't send alias SCIDs during gossip sync.
11✔
2442
                        edgeReader := bytes.NewReader(v)
11✔
2443
                        edgeInfo, err := deserializeChanEdgeInfo(edgeReader)
11✔
2444
                        if err != nil {
11✔
2445
                                return err
55✔
2446
                        }
44✔
2447

44✔
2448
                        if edgeInfo.AuthProof == nil {
44✔
2449
                                continue
44✔
2450
                        }
×
2451

×
2452
                        // This channel ID rests within the target range, so
2453
                        // we'll add it to our returned set.
44✔
2454
                        rawCid := byteOrder.Uint64(k)
×
2455
                        cid := lnwire.NewShortChanIDFromInt(rawCid)
2456

2457
                        chanInfo := NewChannelUpdateInfo(
2458
                                cid, time.Time{}, time.Time{},
2459
                        )
44✔
2460

44✔
2461
                        if !withTimestamps {
44✔
2462
                                channelsPerBlock[cid.BlockHeight] = append(
44✔
2463
                                        channelsPerBlock[cid.BlockHeight],
44✔
2464
                                        chanInfo,
44✔
2465
                                )
44✔
2466

66✔
2467
                                continue
22✔
2468
                        }
22✔
2469

22✔
2470
                        node1Key, node2Key := computeEdgePolicyKeys(&edgeInfo)
22✔
2471

22✔
2472
                        rawPolicy := edges.Get(node1Key)
22✔
2473
                        if len(rawPolicy) != 0 {
2474
                                r := bytes.NewReader(rawPolicy)
2475

22✔
2476
                                edge, err := deserializeChanEdgePolicyRaw(r)
22✔
2477
                                if err != nil && !errors.Is(
22✔
2478
                                        err, ErrEdgePolicyOptionalFieldNotFound,
28✔
2479
                                ) && !errors.Is(err, ErrParsingExtraTLVBytes) {
6✔
2480

6✔
2481
                                        return err
6✔
2482
                                }
6✔
2483

6✔
2484
                                chanInfo.Node1UpdateTimestamp = edge.LastUpdate
6✔
2485
                        }
×
2486

×
2487
                        rawPolicy = edges.Get(node2Key)
×
2488
                        if len(rawPolicy) != 0 {
2489
                                r := bytes.NewReader(rawPolicy)
6✔
2490

2491
                                edge, err := deserializeChanEdgePolicyRaw(r)
2492
                                if err != nil && !errors.Is(
22✔
2493
                                        err, ErrEdgePolicyOptionalFieldNotFound,
33✔
2494
                                ) && !errors.Is(err, ErrParsingExtraTLVBytes) {
11✔
2495

11✔
2496
                                        return err
11✔
2497
                                }
11✔
2498

11✔
2499
                                chanInfo.Node2UpdateTimestamp = edge.LastUpdate
11✔
2500
                        }
×
2501

×
2502
                        channelsPerBlock[cid.BlockHeight] = append(
×
2503
                                channelsPerBlock[cid.BlockHeight], chanInfo,
2504
                        )
11✔
2505
                }
2506

2507
                return nil
22✔
2508
        }, func() {
22✔
2509
                channelsPerBlock = make(map[uint32][]ChannelUpdateInfo)
22✔
2510
        })
2511

2512
        switch {
11✔
2513
        // If we don't know of any channels yet, then there's nothing to
11✔
2514
        // filter, so we'll return an empty slice.
11✔
2515
        case errors.Is(err, ErrGraphNoEdgesFound) || len(channelsPerBlock) == 0:
11✔
2516
                return nil, nil
2517

11✔
2518
        case err != nil:
2519
                return nil, err
2520
        }
3✔
2521

3✔
2522
        // Return the channel ranges in ascending block height order.
2523
        blocks := make([]uint32, 0, len(channelsPerBlock))
×
2524
        for block := range channelsPerBlock {
×
2525
                blocks = append(blocks, block)
2526
        }
2527
        sort.Slice(blocks, func(i, j int) bool {
2528
                return blocks[i] < blocks[j]
8✔
2529
        })
30✔
2530

22✔
2531
        channelRanges := make([]BlockChannelRange, 0, len(channelsPerBlock))
22✔
2532
        for _, block := range blocks {
22✔
2533
                channelRanges = append(channelRanges, BlockChannelRange{
14✔
2534
                        Height:   block,
14✔
2535
                        Channels: channelsPerBlock[block],
2536
                })
8✔
2537
        }
30✔
2538

22✔
2539
        return channelRanges, nil
22✔
2540
}
22✔
2541

22✔
2542
// FetchChanInfos returns the set of channel edges that correspond to the passed
22✔
2543
// channel ID's. If an edge is the query is unknown to the database, it will
2544
// skipped and the result will contain only those edges that exist at the time
8✔
2545
// of the query. This can be used to respond to peer queries that are seeking to
2546
// fill in gaps in their view of the channel graph.
2547
func (c *KVStore) FetchChanInfos(chanIDs []uint64) ([]ChannelEdge, error) {
2548
        return c.fetchChanInfos(nil, chanIDs)
2549
}
2550

2551
// fetchChanInfos returns the set of channel edges that correspond to the passed
2552
// channel ID's. If an edge is the query is unknown to the database, it will
4✔
2553
// skipped and the result will contain only those edges that exist at the time
4✔
2554
// of the query. This can be used to respond to peer queries that are seeking to
4✔
2555
// fill in gaps in their view of the channel graph.
2556
//
2557
// NOTE: An optional transaction may be provided. If none is provided, then a
2558
// new one will be created.
2559
func (c *KVStore) fetchChanInfos(tx kvdb.RTx, chanIDs []uint64) (
2560
        []ChannelEdge, error) {
2561
        // TODO(roasbeef): sort cids?
2562

2563
        var (
2564
                chanEdges []ChannelEdge
2565
                cidBytes  [8]byte
4✔
2566
        )
4✔
2567

4✔
2568
        fetchChanInfos := func(tx kvdb.RTx) error {
4✔
2569
                edges := tx.ReadBucket(edgeBucket)
4✔
2570
                if edges == nil {
4✔
2571
                        return ErrGraphNoEdgesFound
4✔
2572
                }
4✔
2573
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
8✔
2574
                if edgeIndex == nil {
4✔
2575
                        return ErrGraphNoEdgesFound
4✔
2576
                }
×
2577
                nodes := tx.ReadBucket(nodeBucket)
×
2578
                if nodes == nil {
4✔
2579
                        return ErrGraphNotFound
4✔
2580
                }
×
2581

×
2582
                for _, cid := range chanIDs {
4✔
2583
                        byteOrder.PutUint64(cidBytes[:], cid)
4✔
2584

×
2585
                        // First, we'll fetch the static edge information. If
×
2586
                        // the edge is unknown, we will skip the edge and
2587
                        // continue gathering all known edges.
15✔
2588
                        edgeInfo, err := fetchChanEdgeInfo(
11✔
2589
                                edgeIndex, cidBytes[:],
11✔
2590
                        )
11✔
2591
                        switch {
11✔
2592
                        case errors.Is(err, ErrEdgeNotFound):
11✔
2593
                                continue
11✔
2594
                        case err != nil:
11✔
2595
                                return err
11✔
2596
                        }
11✔
2597

3✔
2598
                        // With the static information obtained, we'll now
3✔
2599
                        // fetch the dynamic policy info.
×
2600
                        edge1, edge2, err := fetchChanEdgePolicies(
×
2601
                                edgeIndex, edges, cidBytes[:],
2602
                        )
2603
                        if err != nil {
2604
                                return err
2605
                        }
8✔
2606

8✔
2607
                        node1, err := fetchLightningNode(
8✔
2608
                                nodes, edgeInfo.NodeKey1Bytes[:],
8✔
2609
                        )
×
2610
                        if err != nil {
×
2611
                                return err
2612
                        }
8✔
2613

8✔
2614
                        node2, err := fetchLightningNode(
8✔
2615
                                nodes, edgeInfo.NodeKey2Bytes[:],
8✔
2616
                        )
×
2617
                        if err != nil {
×
2618
                                return err
2619
                        }
8✔
2620

8✔
2621
                        chanEdges = append(chanEdges, ChannelEdge{
8✔
2622
                                Info:    &edgeInfo,
8✔
2623
                                Policy1: edge1,
×
2624
                                Policy2: edge2,
×
2625
                                Node1:   &node1,
2626
                                Node2:   &node2,
8✔
2627
                        })
8✔
2628
                }
8✔
2629

8✔
2630
                return nil
8✔
2631
        }
8✔
2632

8✔
2633
        if tx == nil {
2634
                err := kvdb.View(c.db, fetchChanInfos, func() {
2635
                        chanEdges = nil
4✔
2636
                })
2637
                if err != nil {
2638
                        return nil, err
8✔
2639
                }
8✔
2640

4✔
2641
                return chanEdges, nil
4✔
2642
        }
4✔
2643

×
2644
        err := fetchChanInfos(tx)
×
2645
        if err != nil {
2646
                return nil, err
4✔
2647
        }
2648

2649
        return chanEdges, nil
×
2650
}
×
2651

×
2652
func delEdgeUpdateIndexEntry(edgesBucket kvdb.RwBucket, chanID uint64,
×
2653
        edge1, edge2 *models.ChannelEdgePolicy) error {
2654

×
2655
        // First, we'll fetch the edge update index bucket which currently
2656
        // stores an entry for the channel we're about to delete.
2657
        updateIndex := edgesBucket.NestedReadWriteBucket(edgeUpdateIndexBucket)
2658
        if updateIndex == nil {
139✔
2659
                // No edges in bucket, return early.
139✔
2660
                return nil
139✔
2661
        }
139✔
2662

139✔
2663
        // Now that we have the bucket, we'll attempt to construct a template
139✔
2664
        // for the index key: updateTime || chanid.
×
2665
        var indexKey [8 + 8]byte
×
2666
        byteOrder.PutUint64(indexKey[8:], chanID)
×
2667

2668
        // With the template constructed, we'll attempt to delete an entry that
2669
        // would have been created by both edges: we'll alternate the update
2670
        // times, as one may had overridden the other.
139✔
2671
        if edge1 != nil {
139✔
2672
                byteOrder.PutUint64(
139✔
2673
                        indexKey[:8], uint64(edge1.LastUpdate.Unix()),
139✔
2674
                )
139✔
2675
                if err := updateIndex.Delete(indexKey[:]); err != nil {
139✔
2676
                        return err
149✔
2677
                }
10✔
2678
        }
10✔
2679

10✔
2680
        // We'll also attempt to delete the entry that may have been created by
10✔
2681
        // the second edge.
×
2682
        if edge2 != nil {
×
2683
                byteOrder.PutUint64(
2684
                        indexKey[:8], uint64(edge2.LastUpdate.Unix()),
2685
                )
2686
                if err := updateIndex.Delete(indexKey[:]); err != nil {
2687
                        return err
151✔
2688
                }
12✔
2689
        }
12✔
2690

12✔
2691
        return nil
12✔
2692
}
×
2693

×
2694
// delChannelEdgeUnsafe deletes the edge with the given chanID from the graph
2695
// cache. It then goes on to delete any policy info and edge info for this
2696
// channel from the DB and finally, if isZombie is true, it will add an entry
139✔
2697
// for this channel in the zombie index.
2698
//
2699
// NOTE: this method MUST only be called if the cacheMu has already been
2700
// acquired.
2701
func (c *KVStore) delChannelEdgeUnsafe(edges, edgeIndex, chanIndex,
2702
        zombieIndex kvdb.RwBucket, chanID []byte, isZombie,
2703
        strictZombie bool) (*models.ChannelEdgeInfo, error) {
2704

2705
        edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
2706
        if err != nil {
2707
                return nil, err
2708
        }
197✔
2709

197✔
2710
        // We'll also remove the entry in the edge update index bucket before
197✔
2711
        // we delete the edges themselves so we can access their last update
255✔
2712
        // times.
58✔
2713
        cid := byteOrder.Uint64(chanID)
58✔
2714
        edge1, edge2, err := fetchChanEdgePolicies(edgeIndex, edges, chanID)
2715
        if err != nil {
2716
                return nil, err
2717
        }
2718
        err = delEdgeUpdateIndexEntry(edges, cid, edge1, edge2)
139✔
2719
        if err != nil {
139✔
2720
                return nil, err
139✔
2721
        }
×
2722

×
2723
        // The edge key is of the format pubKey || chanID. First we construct
139✔
2724
        // the latter half, populating the channel ID.
139✔
2725
        var edgeKey [33 + 8]byte
×
2726
        copy(edgeKey[33:], chanID)
×
2727

2728
        // With the latter half constructed, copy over the first public key to
2729
        // delete the edge in this direction, then the second to delete the
2730
        // edge in the opposite direction.
139✔
2731
        copy(edgeKey[:33], edgeInfo.NodeKey1Bytes[:])
139✔
2732
        if edges.Get(edgeKey[:]) != nil {
139✔
2733
                if err := edges.Delete(edgeKey[:]); err != nil {
139✔
2734
                        return nil, err
139✔
2735
                }
139✔
2736
        }
139✔
2737
        copy(edgeKey[:33], edgeInfo.NodeKey2Bytes[:])
278✔
2738
        if edges.Get(edgeKey[:]) != nil {
139✔
2739
                if err := edges.Delete(edgeKey[:]); err != nil {
×
2740
                        return nil, err
×
2741
                }
2742
        }
139✔
2743

278✔
2744
        // As part of deleting the edge we also remove all disabled entries
139✔
2745
        // from the edgePolicyDisabledIndex bucket. We do that for both
×
2746
        // directions.
×
2747
        err = updateEdgePolicyDisabledIndex(edges, cid, false, false)
2748
        if err != nil {
2749
                return nil, err
2750
        }
2751
        err = updateEdgePolicyDisabledIndex(edges, cid, true, false)
2752
        if err != nil {
139✔
2753
                return nil, err
139✔
2754
        }
×
2755

×
2756
        // With the edge data deleted, we can purge the information from the two
139✔
2757
        // edge indexes.
139✔
2758
        if err := edgeIndex.Delete(chanID); err != nil {
×
2759
                return nil, err
×
2760
        }
2761
        var b bytes.Buffer
2762
        if err := WriteOutpoint(&b, &edgeInfo.ChannelPoint); err != nil {
2763
                return nil, err
139✔
2764
        }
×
2765
        if err := chanIndex.Delete(b.Bytes()); err != nil {
×
2766
                return nil, err
139✔
2767
        }
139✔
2768

×
2769
        // Finally, we'll mark the edge as a zombie within our index if it's
×
2770
        // being removed due to the channel becoming a zombie. We do this to
139✔
2771
        // ensure we don't store unnecessary data for spent channels.
×
2772
        if !isZombie {
×
2773
                return &edgeInfo, nil
2774
        }
2775

2776
        nodeKey1, nodeKey2 := edgeInfo.NodeKey1Bytes, edgeInfo.NodeKey2Bytes
2777
        if strictZombie {
254✔
2778
                var e1UpdateTime, e2UpdateTime *time.Time
115✔
2779
                if edge1 != nil {
115✔
2780
                        e1UpdateTime = &edge1.LastUpdate
2781
                }
24✔
2782
                if edge2 != nil {
28✔
2783
                        e2UpdateTime = &edge2.LastUpdate
4✔
2784
                }
6✔
2785

2✔
2786
                nodeKey1, nodeKey2 = makeZombiePubkeys(
2✔
2787
                        &edgeInfo, e1UpdateTime, e2UpdateTime,
7✔
2788
                )
3✔
2789
        }
3✔
2790

2791
        return &edgeInfo, markEdgeZombie(
4✔
2792
                zombieIndex, byteOrder.Uint64(chanID), nodeKey1, nodeKey2,
4✔
2793
        )
4✔
2794
}
2795

2796
// makeZombiePubkeys derives the node pubkeys to store in the zombie index for a
24✔
2797
// particular pair of channel policies. The return values are one of:
24✔
2798
//  1. (pubkey1, pubkey2)
24✔
2799
//  2. (pubkey1, blank)
2800
//  3. (blank, pubkey2)
2801
//
2802
// A blank pubkey means that corresponding node will be unable to resurrect a
2803
// channel on its own. For example, node1 may continue to publish recent
2804
// updates, but node2 has fallen way behind. After marking an edge as a zombie,
2805
// we don't want another fresh update from node1 to resurrect, as the edge can
2806
// only become live once node2 finally sends something recent.
2807
//
2808
// In the case where we have neither update, we allow either party to resurrect
2809
// the channel. If the channel were to be marked zombie again, it would be
2810
// marked with the correct lagging channel since we received an update from only
2811
// one side.
2812
func makeZombiePubkeys(info *models.ChannelEdgeInfo,
2813
        e1, e2 *time.Time) ([33]byte, [33]byte) {
2814

2815
        switch {
2816
        // If we don't have either edge policy, we'll return both pubkeys so
2817
        // that the channel can be resurrected by either party.
2818
        case e1 == nil && e2 == nil:
4✔
2819
                return info.NodeKey1Bytes, info.NodeKey2Bytes
4✔
2820

4✔
2821
        // If we're missing edge1, or if both edges are present but edge1 is
2822
        // older, we'll return edge1's pubkey and a blank pubkey for edge2. This
2823
        // means that only an update from edge1 will be able to resurrect the
1✔
2824
        // channel.
1✔
2825
        case e1 == nil || (e2 != nil && e1.Before(*e2)):
2826
                return info.NodeKey1Bytes, [33]byte{}
2827

2828
        // Otherwise, we're missing edge2 or edge2 is the older side, so we
2829
        // return a blank pubkey for edge1. In this case, only an update from
2830
        // edge2 can resurect the channel.
1✔
2831
        default:
1✔
2832
                return [33]byte{}, info.NodeKey2Bytes
2833
        }
2834
}
2835

2836
// UpdateEdgePolicy updates the edge routing policy for a single directed edge
2✔
2837
// within the database for the referenced channel. The `flags` attribute within
2✔
2838
// the ChannelEdgePolicy determines which of the directed edges are being
2839
// updated. If the flag is 1, then the first node's information is being
2840
// updated, otherwise it's the second node's information. The node ordering is
2841
// determined by the lexicographical ordering of the identity public keys of the
2842
// nodes on either side of the channel.
2843
func (c *KVStore) UpdateEdgePolicy(ctx context.Context,
2844
        edge *models.ChannelEdgePolicy,
2845
        opts ...batch.SchedulerOption) (route.Vertex, route.Vertex, error) {
2846

2847
        var (
2848
                isUpdate1    bool
2849
                edgeNotFound bool
2850
                from, to     route.Vertex
2,672✔
2851
        )
2,672✔
2852

2,672✔
2853
        r := &batch.Request[kvdb.RwTx]{
2,672✔
2854
                Opts: batch.NewSchedulerOptions(opts...),
2,672✔
2855
                Reset: func() {
2,672✔
2856
                        isUpdate1 = false
2,672✔
2857
                        edgeNotFound = false
2,672✔
2858
                },
2,672✔
2859
                Do: func(tx kvdb.RwTx) error {
2,672✔
2860
                        // Validate that the ExtraOpaqueData is in fact a valid
5,345✔
2861
                        // TLV stream. This is done here instead of within
2,673✔
2862
                        // updateEdgePolicy so that updateEdgePolicy can be used
2,673✔
2863
                        // by unit tests to recreate the case where we already
2,673✔
2864
                        // have nodes persisted with invalid TLV data.
2,673✔
2865
                        err := edge.ExtraOpaqueData.ValidateTLV()
2,673✔
2866
                        if err != nil {
2,673✔
2867
                                return fmt.Errorf("%w: %w",
2,673✔
2868
                                        ErrParsingExtraTLVBytes, err)
2,673✔
2869
                        }
2,673✔
2870

2,673✔
2871
                        from, to, isUpdate1, err = updateEdgePolicy(tx, edge)
2,675✔
2872
                        if err != nil {
2✔
2873
                                log.Errorf("UpdateEdgePolicy faild: %v", err)
2✔
2874
                        }
2✔
2875

2876
                        // Silence ErrEdgeNotFound so that the batch can
2,671✔
2877
                        // succeed, but propagate the error via local state.
2,675✔
2878
                        if errors.Is(err, ErrEdgeNotFound) {
4✔
2879
                                edgeNotFound = true
4✔
2880
                                return nil
2881
                        }
2882

2883
                        return err
2,675✔
2884
                },
4✔
2885
                OnCommit: func(err error) error {
4✔
2886
                        switch {
4✔
2887
                        case err != nil:
2888
                                return err
2,667✔
2889
                        case edgeNotFound:
2890
                                return ErrEdgeNotFound
2,672✔
2891
                        default:
2,672✔
2892
                                c.updateEdgeCache(edge, isUpdate1)
1✔
2893
                                return nil
1✔
2894
                        }
4✔
2895
                },
4✔
2896
        }
2,667✔
2897

2,667✔
2898
        err := c.chanScheduler.Execute(ctx, r)
2,667✔
2899

2900
        return from, to, err
2901
}
2902

2903
func (c *KVStore) updateEdgeCache(e *models.ChannelEdgePolicy,
2,672✔
2904
        isUpdate1 bool) {
2,672✔
2905

2,672✔
2906
        // If an entry for this channel is found in reject cache, we'll modify
2907
        // the entry with the updated timestamp for the direction that was just
2908
        // written. If the edge doesn't exist, we'll load the cache entry lazily
2909
        // during the next query for this edge.
2,667✔
2910
        if entry, ok := c.rejectCache.get(e.ChannelID); ok {
2,667✔
2911
                if isUpdate1 {
2,667✔
2912
                        entry.upd1Time = e.LastUpdate.Unix()
2,667✔
2913
                } else {
2,667✔
2914
                        entry.upd2Time = e.LastUpdate.Unix()
2,667✔
2915
                }
2,672✔
2916
                c.rejectCache.insert(e.ChannelID, entry)
8✔
2917
        }
3✔
2918

5✔
2919
        // If an entry for this channel is found in channel cache, we'll modify
2✔
2920
        // the entry with the updated policy for the direction that was just
2✔
2921
        // written. If the edge doesn't exist, we'll defer loading the info and
5✔
2922
        // policies and lazily read from disk during the next query.
2923
        if channel, ok := c.chanCache.get(e.ChannelID); ok {
2924
                if isUpdate1 {
2925
                        channel.Policy1 = e
2926
                } else {
2927
                        channel.Policy2 = e
2928
                }
2,667✔
2929
                c.chanCache.insert(e.ChannelID, channel)
×
2930
        }
×
2931
}
×
2932

×
2933
// updateEdgePolicy attempts to update an edge's policy within the relevant
×
2934
// buckets using an existing database transaction. The returned boolean will be
×
2935
// true if the updated policy belongs to node1, and false if the policy belonged
2936
// to node2.
2937
func updateEdgePolicy(tx kvdb.RwTx, edge *models.ChannelEdgePolicy) (
2938
        route.Vertex, route.Vertex, bool, error) {
2939

2940
        var noVertex route.Vertex
2941

2942
        edges := tx.ReadWriteBucket(edgeBucket)
2943
        if edges == nil {
2,671✔
2944
                return noVertex, noVertex, false, ErrEdgeNotFound
2,671✔
2945
        }
2,671✔
2946
        edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
2,671✔
2947
        if edgeIndex == nil {
2,671✔
2948
                return noVertex, noVertex, false, ErrEdgeNotFound
2,671✔
2949
        }
×
2950

×
2951
        // Create the channelID key be converting the channel ID
2,671✔
2952
        // integer into a byte slice.
2,671✔
2953
        var chanID [8]byte
×
2954
        byteOrder.PutUint64(chanID[:], edge.ChannelID)
×
2955

2956
        // With the channel ID, we then fetch the value storing the two
2957
        // nodes which connect this channel edge.
2958
        nodeInfo := edgeIndex.Get(chanID[:])
2,671✔
2959
        if nodeInfo == nil {
2,671✔
2960
                return noVertex, noVertex, false, ErrEdgeNotFound
2,671✔
2961
        }
2,671✔
2962

2,671✔
2963
        // Depending on the flags value passed above, either the first
2,671✔
2964
        // or second edge policy is being updated.
2,675✔
2965
        var fromNode, toNode []byte
4✔
2966
        var isUpdate1 bool
4✔
2967
        if edge.ChannelFlags&lnwire.ChanUpdateDirection == 0 {
2968
                fromNode = nodeInfo[:33]
2969
                toNode = nodeInfo[33:66]
2970
                isUpdate1 = true
2,667✔
2971
        } else {
2,667✔
2972
                fromNode = nodeInfo[33:66]
4,004✔
2973
                toNode = nodeInfo[:33]
1,337✔
2974
                isUpdate1 = false
1,337✔
2975
        }
1,337✔
2976

2,667✔
2977
        // Finally, with the direction of the edge being updated
1,330✔
2978
        // identified, we update the on-disk edge representation.
1,330✔
2979
        err := putChanEdgePolicy(edges, edge, fromNode, toNode)
1,330✔
2980
        if err != nil {
1,330✔
2981
                return noVertex, noVertex, false, err
2982
        }
2983

2984
        var (
2,667✔
2985
                fromNodePubKey route.Vertex
2,667✔
2986
                toNodePubKey   route.Vertex
×
2987
        )
×
2988
        copy(fromNodePubKey[:], fromNode)
2989
        copy(toNodePubKey[:], toNode)
2,667✔
2990

2,667✔
2991
        return fromNodePubKey, toNodePubKey, isUpdate1, nil
2,667✔
2992
}
2,667✔
2993

2,667✔
2994
// isPublic determines whether the node is seen as public within the graph from
2,667✔
2995
// the source node's point of view. An existing database transaction can also be
2,667✔
2996
// specified.
2,667✔
2997
func (c *KVStore) isPublic(tx kvdb.RTx, nodePub route.Vertex,
2998
        sourcePubKey []byte) (bool, error) {
2999

3000
        // In order to determine whether this node is publicly advertised within
3001
        // the graph, we'll need to look at all of its edges and check whether
3002
        // they extend to any other node than the source node. errDone will be
3003
        // used to terminate the check early.
13✔
3004
        nodeIsPublic := false
13✔
3005
        errDone := errors.New("done")
13✔
3006
        err := c.forEachNodeChannelTx(tx, nodePub, func(tx kvdb.RTx,
13✔
3007
                info *models.ChannelEdgeInfo, _ *models.ChannelEdgePolicy,
13✔
3008
                _ *models.ChannelEdgePolicy) error {
13✔
3009

13✔
3010
                // If this edge doesn't extend to the source node, we'll
13✔
3011
                // terminate our search as we can now conclude that the node is
13✔
3012
                // publicly advertised within the graph due to the local node
13✔
3013
                // knowing of the current edge.
23✔
3014
                if !bytes.Equal(info.NodeKey1Bytes[:], sourcePubKey) &&
10✔
3015
                        !bytes.Equal(info.NodeKey2Bytes[:], sourcePubKey) {
10✔
3016

10✔
3017
                        nodeIsPublic = true
10✔
3018
                        return errDone
10✔
3019
                }
10✔
3020

13✔
3021
                // Since the edge _does_ extend to the source node, we'll also
3✔
3022
                // need to ensure that this is a public edge.
3✔
3023
                if info.AuthProof != nil {
3✔
3024
                        nodeIsPublic = true
3✔
3025
                        return errDone
3026
                }
3027

3028
                // Otherwise, we'll continue our search.
13✔
3029
                return nil
6✔
3030
        }, func() {
6✔
3031
                nodeIsPublic = false
6✔
3032
        })
3033
        if err != nil && !errors.Is(err, errDone) {
3034
                return false, err
1✔
3035
        }
×
3036

×
3037
        return nodeIsPublic, nil
×
3038
}
13✔
3039

×
3040
// FetchLightningNodeTx attempts to look up a target node by its identity
×
3041
// public key. If the node isn't found in the database, then
3042
// ErrGraphNodeNotFound is returned. An optional transaction may be provided.
13✔
3043
// If none is provided, then a new one will be created.
3044
func (c *KVStore) FetchLightningNodeTx(tx kvdb.RTx, nodePub route.Vertex) (
3045
        *models.LightningNode, error) {
3046

3047
        return c.fetchLightningNode(tx, nodePub)
3048
}
3049

3050
// FetchLightningNode attempts to look up a target node by its identity public
3,651✔
3051
// key. If the node isn't found in the database, then ErrGraphNodeNotFound is
3,651✔
3052
// returned.
3,651✔
3053
func (c *KVStore) FetchLightningNode(_ context.Context,
3,651✔
3054
        nodePub route.Vertex) (*models.LightningNode, error) {
3055

3056
        return c.fetchLightningNode(nil, nodePub)
3057
}
3058

3059
// fetchLightningNode attempts to look up a target node by its identity public
159✔
3060
// key. If the node isn't found in the database, then ErrGraphNodeNotFound is
159✔
3061
// returned. An optional transaction may be provided. If none is provided, then
159✔
3062
// a new one will be created.
159✔
3063
func (c *KVStore) fetchLightningNode(tx kvdb.RTx,
3064
        nodePub route.Vertex) (*models.LightningNode, error) {
3065

3066
        var node *models.LightningNode
3067
        fetch := func(tx kvdb.RTx) error {
3068
                // First grab the nodes bucket which stores the mapping from
3069
                // pubKey to node information.
3,810✔
3070
                nodes := tx.ReadBucket(nodeBucket)
3,810✔
3071
                if nodes == nil {
3,810✔
3072
                        return ErrGraphNotFound
7,620✔
3073
                }
3,810✔
3074

3,810✔
3075
                // If a key for this serialized public key isn't found, then
3,810✔
3076
                // the target node doesn't exist within the database.
3,810✔
3077
                nodeBytes := nodes.Get(nodePub[:])
×
3078
                if nodeBytes == nil {
×
3079
                        return ErrGraphNodeNotFound
3080
                }
3081

3082
                // If the node is found, then we can de deserialize the node
3,810✔
3083
                // information to return to the user.
3,825✔
3084
                nodeReader := bytes.NewReader(nodeBytes)
15✔
3085
                n, err := deserializeLightningNode(nodeReader)
15✔
3086
                if err != nil {
3087
                        return err
3088
                }
3089

3,795✔
3090
                node = &n
3,795✔
3091

3,795✔
3092
                return nil
×
3093
        }
×
3094

3095
        if tx == nil {
3,795✔
3096
                err := kvdb.View(
3,795✔
3097
                        c.db, fetch, func() {
3,795✔
3098
                                node = nil
3099
                        },
3100
                )
3,993✔
3101
                if err != nil {
183✔
3102
                        return nil, err
366✔
3103
                }
183✔
3104

183✔
3105
                return node, nil
3106
        }
187✔
3107

4✔
3108
        err := fetch(tx)
4✔
3109
        if err != nil {
3110
                return nil, err
179✔
3111
        }
3112

3113
        return node, nil
3,627✔
3114
}
3,638✔
3115

11✔
3116
// HasLightningNode determines if the graph has a vertex identified by the
11✔
3117
// target node identity public key. If the node exists in the database, a
3118
// timestamp of when the data for the node was lasted updated is returned along
3,616✔
3119
// with a true boolean. Otherwise, an empty time.Time is returned with a false
3120
// boolean.
3121
func (c *KVStore) HasLightningNode(_ context.Context,
3122
        nodePub [33]byte) (time.Time, bool, error) {
3123

3124
        var (
3125
                updateTime time.Time
3126
                exists     bool
3127
        )
17✔
3128

17✔
3129
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
17✔
3130
                // First grab the nodes bucket which stores the mapping from
17✔
3131
                // pubKey to node information.
17✔
3132
                nodes := tx.ReadBucket(nodeBucket)
17✔
3133
                if nodes == nil {
17✔
3134
                        return ErrGraphNotFound
34✔
3135
                }
17✔
3136

17✔
3137
                // If a key for this serialized public key isn't found, we can
17✔
3138
                // exit early.
17✔
3139
                nodeBytes := nodes.Get(nodePub[:])
×
3140
                if nodeBytes == nil {
×
3141
                        exists = false
3142
                        return nil
3143
                }
3144

17✔
3145
                // Otherwise we continue on to obtain the time stamp
20✔
3146
                // representing the last time the data for this node was
3✔
3147
                // updated.
3✔
3148
                nodeReader := bytes.NewReader(nodeBytes)
3✔
3149
                node, err := deserializeLightningNode(nodeReader)
3150
                if err != nil {
3151
                        return err
3152
                }
3153

14✔
3154
                exists = true
14✔
3155
                updateTime = node.LastUpdate
14✔
3156

×
3157
                return nil
×
3158
        }, func() {
3159
                updateTime = time.Time{}
14✔
3160
                exists = false
14✔
3161
        })
14✔
3162
        if err != nil {
14✔
3163
                return time.Time{}, exists, err
17✔
3164
        }
17✔
3165

17✔
3166
        return updateTime, exists, nil
17✔
3167
}
17✔
3168

×
3169
// nodeTraversal is used to traverse all channels of a node given by its
×
3170
// public key and passes channel information into the specified callback.
3171
//
17✔
3172
// NOTE: the reset param is only meaningful if the tx param is nil. If it is
3173
// not nil, the caller is expected to have passed in a reset to the parent
3174
// function's View/Update call which will then apply to the whole transaction.
3175
func nodeTraversal(tx kvdb.RTx, nodePub []byte, db kvdb.Backend,
3176
        cb func(kvdb.RTx, *models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3177
                *models.ChannelEdgePolicy) error, reset func()) error {
3178

3179
        traversal := func(tx kvdb.RTx) error {
3180
                edges := tx.ReadBucket(edgeBucket)
3181
                if edges == nil {
3182
                        return ErrGraphNotFound
1,268✔
3183
                }
1,268✔
3184
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
2,536✔
3185
                if edgeIndex == nil {
1,268✔
3186
                        return ErrGraphNoEdgesFound
1,268✔
3187
                }
×
3188

×
3189
                // In order to reach all the edges for this node, we take
1,268✔
3190
                // advantage of the construction of the key-space within the
1,268✔
3191
                // edge bucket. The keys are stored in the form: pubKey ||
×
3192
                // chanID. Therefore, starting from a chanID of zero, we can
×
3193
                // scan forward in the bucket, grabbing all the edges for the
3194
                // node. Once the prefix no longer matches, then we know we're
3195
                // done.
3196
                var nodeStart [33 + 8]byte
3197
                copy(nodeStart[:], nodePub)
3198
                copy(nodeStart[33:], chanStart[:])
3199

3200
                // Starting from the key pubKey || 0, we seek forward in the
3201
                // bucket until the retrieved key no longer has the public key
1,268✔
3202
                // as its prefix. This indicates that we've stepped over into
1,268✔
3203
                // another node's edges, so we can terminate our scan.
1,268✔
3204
                edgeCursor := edges.ReadCursor()
1,268✔
3205
                for nodeEdge, _ := edgeCursor.Seek(nodeStart[:]); bytes.HasPrefix(nodeEdge, nodePub); nodeEdge, _ = edgeCursor.Next() { //nolint:ll
1,268✔
3206
                        // If the prefix still matches, the channel id is
1,268✔
3207
                        // returned in nodeEdge. Channel id is used to lookup
1,268✔
3208
                        // the node at the other end of the channel and both
1,268✔
3209
                        // edge policies.
1,268✔
3210
                        chanID := nodeEdge[33:]
5,110✔
3211
                        edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
3,842✔
3212
                        if err != nil {
3,842✔
3213
                                return err
3,842✔
3214
                        }
3,842✔
3215

3,842✔
3216
                        outgoingPolicy, err := fetchChanEdgePolicy(
3,842✔
3217
                                edges, chanID, nodePub,
3,842✔
3218
                        )
×
3219
                        if err != nil {
×
3220
                                return err
3221
                        }
3,842✔
3222

3,842✔
3223
                        otherNode, err := edgeInfo.OtherNodeKeyBytes(nodePub)
3,842✔
3224
                        if err != nil {
3,842✔
3225
                                return err
×
3226
                        }
×
3227

3228
                        incomingPolicy, err := fetchChanEdgePolicy(
3,842✔
3229
                                edges, chanID, otherNode[:],
3,842✔
3230
                        )
×
3231
                        if err != nil {
×
3232
                                return err
3233
                        }
3,842✔
3234

3,842✔
3235
                        // Finally, we execute the callback.
3,842✔
3236
                        err = cb(tx, &edgeInfo, outgoingPolicy, incomingPolicy)
3,842✔
3237
                        if err != nil {
×
3238
                                return err
×
3239
                        }
3240
                }
3241

3,842✔
3242
                return nil
3,851✔
3243
        }
9✔
3244

9✔
3245
        // If no transaction was provided, then we'll create a new transaction
3246
        // to execute the transaction within.
3247
        if tx == nil {
1,259✔
3248
                return kvdb.View(db, traversal, reset)
3249
        }
3250

3251
        // Otherwise, we re-use the existing transaction to execute the graph
3252
        // traversal.
1,297✔
3253
        return traversal(tx)
29✔
3254
}
29✔
3255

3256
// ForEachNodeChannel iterates through all channels of the given node,
3257
// executing the passed callback with an edge info structure and the policies
3258
// of each end of the channel. The first edge policy is the outgoing edge *to*
1,239✔
3259
// the connecting node, while the second is the incoming edge *from* the
3260
// connecting node. If the callback returns an error, then the iteration is
3261
// halted with the error propagated back up to the caller.
3262
//
3263
// Unknown policies are passed into the callback as nil values.
3264
func (c *KVStore) ForEachNodeChannel(_ context.Context, nodePub route.Vertex,
3265
        cb func(*models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3266
                *models.ChannelEdgePolicy) error, reset func()) error {
3267

3268
        return nodeTraversal(
3269
                nil, nodePub[:], c.db, func(_ kvdb.RTx,
3270
                        info *models.ChannelEdgeInfo, policy,
3271
                        policy2 *models.ChannelEdgePolicy) error {
6✔
3272

6✔
3273
                        return cb(info, policy, policy2)
6✔
3274
                }, reset,
6✔
3275
        )
6✔
3276
}
16✔
3277

10✔
3278
// ForEachSourceNodeChannel iterates through all channels of the source node,
10✔
3279
// executing the passed callback on each. The callback is provided with the
10✔
3280
// channel's outpoint, whether we have a policy for the channel and the channel
3281
// peer's node information.
3282
func (c *KVStore) ForEachSourceNodeChannel(_ context.Context,
3283
        cb func(chanPoint wire.OutPoint, havePolicy bool,
3284
                otherNode *models.LightningNode) error, reset func()) error {
3285

3286
        return kvdb.View(c.db, func(tx kvdb.RTx) error {
3287
                nodes := tx.ReadBucket(nodeBucket)
3288
                if nodes == nil {
3289
                        return ErrGraphNotFound
1✔
3290
                }
1✔
3291

2✔
3292
                node, err := sourceNodeWithTx(nodes)
1✔
3293
                if err != nil {
1✔
3294
                        return err
×
3295
                }
×
3296

3297
                return nodeTraversal(
1✔
3298
                        tx, node.PubKeyBytes[:], c.db, func(tx kvdb.RTx,
1✔
3299
                                info *models.ChannelEdgeInfo,
×
3300
                                policy, _ *models.ChannelEdgePolicy) error {
×
3301

3302
                                peer, err := c.fetchOtherNode(
1✔
3303
                                        tx, info, node.PubKeyBytes[:],
1✔
3304
                                )
1✔
3305
                                if err != nil {
3✔
3306
                                        return err
2✔
3307
                                }
2✔
3308

2✔
3309
                                return cb(
2✔
3310
                                        info.ChannelPoint, policy != nil, peer,
2✔
3311
                                )
×
3312
                        }, reset,
×
3313
                )
3314
        }, reset)
2✔
3315
}
2✔
3316

2✔
3317
// forEachNodeChannelTx iterates through all channels of the given node,
3318
// executing the passed callback with an edge info structure and the policies
3319
// of each end of the channel. The first edge policy is the outgoing edge *to*
3320
// the connecting node, while the second is the incoming edge *from* the
3321
// connecting node. If the callback returns an error, then the iteration is
3322
// halted with the error propagated back up to the caller.
3323
//
3324
// Unknown policies are passed into the callback as nil values.
3325
//
3326
// If the caller wishes to re-use an existing boltdb transaction, then it
3327
// should be passed as the first argument.  Otherwise, the first argument should
3328
// be nil and a fresh transaction will be created to execute the graph
3329
// traversal.
3330
//
3331
// NOTE: the reset function is only meaningful if the tx param is nil.
3332
func (c *KVStore) forEachNodeChannelTx(tx kvdb.RTx,
3333
        nodePub route.Vertex, cb func(kvdb.RTx, *models.ChannelEdgeInfo,
3334
                *models.ChannelEdgePolicy, *models.ChannelEdgePolicy) error,
3335
        reset func()) error {
3336

3337
        return nodeTraversal(tx, nodePub[:], c.db, cb, reset)
3338
}
3339

3340
// fetchOtherNode attempts to fetch the full LightningNode that's opposite of
999✔
3341
// the target node in the channel. This is useful when one knows the pubkey of
999✔
3342
// one of the nodes, and wishes to obtain the full LightningNode for the other
999✔
3343
// end of the channel.
999✔
3344
func (c *KVStore) fetchOtherNode(tx kvdb.RTx,
3345
        channel *models.ChannelEdgeInfo, thisNodeKey []byte) (
3346
        *models.LightningNode, error) {
3347

3348
        // Ensure that the node passed in is actually a member of the channel.
3349
        var targetNodeBytes [33]byte
3350
        switch {
3351
        case bytes.Equal(channel.NodeKey1Bytes[:], thisNodeKey):
2✔
3352
                targetNodeBytes = channel.NodeKey2Bytes
2✔
3353
        case bytes.Equal(channel.NodeKey2Bytes[:], thisNodeKey):
2✔
3354
                targetNodeBytes = channel.NodeKey1Bytes
2✔
3355
        default:
2✔
3356
                return nil, fmt.Errorf("node not participating in this channel")
1✔
3357
        }
1✔
3358

1✔
3359
        var targetNode *models.LightningNode
1✔
3360
        fetchNodeFunc := func(tx kvdb.RTx) error {
×
3361
                // First grab the nodes bucket which stores the mapping from
×
3362
                // pubKey to node information.
3363
                nodes := tx.ReadBucket(nodeBucket)
3364
                if nodes == nil {
2✔
3365
                        return ErrGraphNotFound
4✔
3366
                }
2✔
3367

2✔
3368
                node, err := fetchLightningNode(nodes, targetNodeBytes[:])
2✔
3369
                if err != nil {
2✔
3370
                        return err
×
3371
                }
×
3372

3373
                targetNode = &node
2✔
3374

2✔
3375
                return nil
×
3376
        }
×
3377

3378
        // If the transaction is nil, then we'll need to create a new one,
2✔
3379
        // otherwise we can use the existing db transaction.
2✔
3380
        var err error
2✔
3381
        if tx == nil {
3382
                err = kvdb.View(c.db, fetchNodeFunc, func() {
3383
                        targetNode = nil
3384
                })
3385
        } else {
2✔
3386
                err = fetchNodeFunc(tx)
2✔
3387
        }
×
3388

×
3389
        return targetNode, err
×
3390
}
2✔
3391

2✔
3392
// computeEdgePolicyKeys is a helper function that can be used to compute the
2✔
3393
// keys used to index the channel edge policy info for the two nodes of the
3394
// edge. The keys for node 1 and node 2 are returned respectively.
2✔
3395
func computeEdgePolicyKeys(info *models.ChannelEdgeInfo) ([]byte, []byte) {
3396
        var (
3397
                node1Key [33 + 8]byte
3398
                node2Key [33 + 8]byte
3399
        )
3400

22✔
3401
        copy(node1Key[:], info.NodeKey1Bytes[:])
22✔
3402
        copy(node2Key[:], info.NodeKey2Bytes[:])
22✔
3403

22✔
3404
        byteOrder.PutUint64(node1Key[33:], info.ChannelID)
22✔
3405
        byteOrder.PutUint64(node2Key[33:], info.ChannelID)
22✔
3406

22✔
3407
        return node1Key[:], node2Key[:]
22✔
3408
}
22✔
3409

22✔
3410
// FetchChannelEdgesByOutpoint attempts to lookup the two directed edges for
22✔
3411
// the channel identified by the funding outpoint. If the channel can't be
22✔
3412
// found, then ErrEdgeNotFound is returned. A struct which houses the general
22✔
3413
// information for the channel itself is returned as well as two structs that
22✔
3414
// contain the routing policies for the channel in either direction.
3415
func (c *KVStore) FetchChannelEdgesByOutpoint(op *wire.OutPoint) (
3416
        *models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3417
        *models.ChannelEdgePolicy, error) {
3418

3419
        var (
3420
                edgeInfo *models.ChannelEdgeInfo
3421
                policy1  *models.ChannelEdgePolicy
3422
                policy2  *models.ChannelEdgePolicy
11✔
3423
        )
11✔
3424

11✔
3425
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
11✔
3426
                // First, grab the node bucket. This will be used to populate
11✔
3427
                // the Node pointers in each edge read from disk.
11✔
3428
                nodes := tx.ReadBucket(nodeBucket)
11✔
3429
                if nodes == nil {
11✔
3430
                        return ErrGraphNotFound
22✔
3431
                }
11✔
3432

11✔
3433
                // Next, grab the edge bucket which stores the edges, and also
11✔
3434
                // the index itself so we can group the directed edges together
11✔
3435
                // logically.
×
3436
                edges := tx.ReadBucket(edgeBucket)
×
3437
                if edges == nil {
3438
                        return ErrGraphNoEdgesFound
3439
                }
3440
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
3441
                if edgeIndex == nil {
11✔
3442
                        return ErrGraphNoEdgesFound
11✔
3443
                }
×
3444

×
3445
                // If the channel's outpoint doesn't exist within the outpoint
11✔
3446
                // index, then the edge does not exist.
11✔
3447
                chanIndex := edges.NestedReadBucket(channelPointBucket)
×
3448
                if chanIndex == nil {
×
3449
                        return ErrGraphNoEdgesFound
3450
                }
3451
                var b bytes.Buffer
3452
                if err := WriteOutpoint(&b, op); err != nil {
11✔
3453
                        return err
11✔
3454
                }
×
3455
                chanID := chanIndex.Get(b.Bytes())
×
3456
                if chanID == nil {
11✔
3457
                        return fmt.Errorf("%w: op=%v", ErrEdgeNotFound, op)
11✔
3458
                }
×
3459

×
3460
                // If the channel is found to exists, then we'll first retrieve
11✔
3461
                // the general information for the channel.
21✔
3462
                edge, err := fetchChanEdgeInfo(edgeIndex, chanID)
10✔
3463
                if err != nil {
10✔
3464
                        return fmt.Errorf("%w: chanID=%x", err, chanID)
3465
                }
3466
                edgeInfo = &edge
3467

1✔
3468
                // Once we have the information about the channels' parameters,
1✔
3469
                // we'll fetch the routing policies for each for the directed
×
3470
                // edges.
×
3471
                e1, e2, err := fetchChanEdgePolicies(edgeIndex, edges, chanID)
1✔
3472
                if err != nil {
1✔
3473
                        return fmt.Errorf("failed to find policy: %w", err)
1✔
3474
                }
1✔
3475

1✔
3476
                policy1 = e1
1✔
3477
                policy2 = e2
1✔
3478

×
3479
                return nil
×
3480
        }, func() {
3481
                edgeInfo = nil
1✔
3482
                policy1 = nil
1✔
3483
                policy2 = nil
1✔
3484
        })
1✔
3485
        if err != nil {
11✔
3486
                return nil, nil, nil, err
11✔
3487
        }
11✔
3488

11✔
3489
        return edgeInfo, policy1, policy2, nil
11✔
3490
}
21✔
3491

10✔
3492
// FetchChannelEdgesByID attempts to lookup the two directed edges for the
10✔
3493
// channel identified by the channel ID. If the channel can't be found, then
3494
// ErrEdgeNotFound is returned. A struct which houses the general information
1✔
3495
// for the channel itself is returned as well as two structs that contain the
3496
// routing policies for the channel in either direction.
3497
//
3498
// ErrZombieEdge an be returned if the edge is currently marked as a zombie
3499
// within the database. In this case, the ChannelEdgePolicy's will be nil, and
3500
// the ChannelEdgeInfo will only include the public keys of each node.
3501
func (c *KVStore) FetchChannelEdgesByID(chanID uint64) (
3502
        *models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3503
        *models.ChannelEdgePolicy, error) {
3504

3505
        var (
3506
                edgeInfo  *models.ChannelEdgeInfo
3507
                policy1   *models.ChannelEdgePolicy
3508
                policy2   *models.ChannelEdgePolicy
2,689✔
3509
                channelID [8]byte
2,689✔
3510
        )
2,689✔
3511

2,689✔
3512
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
2,689✔
3513
                // First, grab the node bucket. This will be used to populate
2,689✔
3514
                // the Node pointers in each edge read from disk.
2,689✔
3515
                nodes := tx.ReadBucket(nodeBucket)
2,689✔
3516
                if nodes == nil {
2,689✔
3517
                        return ErrGraphNotFound
5,378✔
3518
                }
2,689✔
3519

2,689✔
3520
                // Next, grab the edge bucket which stores the edges, and also
2,689✔
3521
                // the index itself so we can group the directed edges together
2,689✔
3522
                // logically.
×
3523
                edges := tx.ReadBucket(edgeBucket)
×
3524
                if edges == nil {
3525
                        return ErrGraphNoEdgesFound
3526
                }
3527
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
3528
                if edgeIndex == nil {
2,689✔
3529
                        return ErrGraphNoEdgesFound
2,689✔
3530
                }
×
3531

×
3532
                byteOrder.PutUint64(channelID[:], chanID)
2,689✔
3533

2,689✔
3534
                // Now, attempt to fetch edge.
×
3535
                edge, err := fetchChanEdgeInfo(edgeIndex, channelID[:])
×
3536

3537
                // If it doesn't exist, we'll quickly check our zombie index to
2,689✔
3538
                // see if we've previously marked it as so.
2,689✔
3539
                if errors.Is(err, ErrEdgeNotFound) {
2,689✔
3540
                        // If the zombie index doesn't exist, or the edge is not
2,689✔
3541
                        // marked as a zombie within it, then we'll return the
2,689✔
3542
                        // original ErrEdgeNotFound error.
2,689✔
3543
                        zombieIndex := edges.NestedReadBucket(zombieBucket)
2,689✔
3544
                        if zombieIndex == nil {
2,690✔
3545
                                return ErrEdgeNotFound
1✔
3546
                        }
1✔
3547

1✔
3548
                        isZombie, pubKey1, pubKey2 := isZombieEdge(
1✔
3549
                                zombieIndex, chanID,
1✔
3550
                        )
×
3551
                        if !isZombie {
×
3552
                                return ErrEdgeNotFound
3553
                        }
1✔
3554

1✔
3555
                        // Otherwise, the edge is marked as a zombie, so we'll
1✔
3556
                        // populate the edge info with the public keys of each
1✔
3557
                        // party as this is the only information we have about
×
3558
                        // it and return an error signaling so.
×
3559
                        edgeInfo = &models.ChannelEdgeInfo{
3560
                                NodeKey1Bytes: pubKey1,
3561
                                NodeKey2Bytes: pubKey2,
3562
                        }
3563

3564
                        return ErrZombieEdge
1✔
3565
                }
1✔
3566

1✔
3567
                // Otherwise, we'll just return the error if any.
1✔
3568
                if err != nil {
1✔
3569
                        return err
1✔
3570
                }
3571

3572
                edgeInfo = &edge
3573

2,688✔
3574
                // Then we'll attempt to fetch the accompanying policies of this
×
3575
                // edge.
×
3576
                e1, e2, err := fetchChanEdgePolicies(
3577
                        edgeIndex, edges, channelID[:],
2,688✔
3578
                )
2,688✔
3579
                if err != nil {
2,688✔
3580
                        return err
2,688✔
3581
                }
2,688✔
3582

2,688✔
3583
                policy1 = e1
2,688✔
3584
                policy2 = e2
2,688✔
3585

×
3586
                return nil
×
3587
        }, func() {
3588
                edgeInfo = nil
2,688✔
3589
                policy1 = nil
2,688✔
3590
                policy2 = nil
2,688✔
3591
        })
2,688✔
3592
        if errors.Is(err, ErrZombieEdge) {
2,689✔
3593
                return edgeInfo, nil, nil, err
2,689✔
3594
        }
2,689✔
3595
        if err != nil {
2,689✔
3596
                return nil, nil, nil, err
2,689✔
3597
        }
2,690✔
3598

1✔
3599
        return edgeInfo, policy1, policy2, nil
1✔
3600
}
2,688✔
3601

×
3602
// IsPublicNode is a helper method that determines whether the node with the
×
3603
// given public key is seen as a public node in the graph from the graph's
3604
// source node's point of view.
2,688✔
3605
func (c *KVStore) IsPublicNode(pubKey [33]byte) (bool, error) {
3606
        var nodeIsPublic bool
3607
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
3608
                nodes := tx.ReadBucket(nodeBucket)
3609
                if nodes == nil {
3610
                        return ErrGraphNodesNotFound
13✔
3611
                }
13✔
3612
                ourPubKey := nodes.Get(sourceKey)
26✔
3613
                if ourPubKey == nil {
13✔
3614
                        return ErrSourceNodeNotSet
13✔
3615
                }
×
3616
                node, err := fetchLightningNode(nodes, pubKey[:])
×
3617
                if err != nil {
13✔
3618
                        return err
13✔
3619
                }
×
3620

×
3621
                nodeIsPublic, err = c.isPublic(tx, node.PubKeyBytes, ourPubKey)
13✔
3622

13✔
3623
                return err
×
3624
        }, func() {
×
3625
                nodeIsPublic = false
3626
        })
13✔
3627
        if err != nil {
13✔
3628
                return false, err
13✔
3629
        }
13✔
3630

13✔
3631
        return nodeIsPublic, nil
13✔
3632
}
13✔
3633

×
3634
// genMultiSigP2WSH generates the p2wsh'd multisig script for 2 of 2 pubkeys.
×
3635
func genMultiSigP2WSH(aPub, bPub []byte) ([]byte, error) {
3636
        witnessScript, err := input.GenMultiSigScript(aPub, bPub)
13✔
3637
        if err != nil {
3638
                return nil, err
3639
        }
3640

46✔
3641
        // With the witness script generated, we'll now turn it into a p2wsh
46✔
3642
        // script:
46✔
3643
        //  * OP_0 <sha256(script)>
×
3644
        bldr := txscript.NewScriptBuilder(
×
3645
                txscript.WithScriptAllocSize(input.P2WSHSize),
3646
        )
3647
        bldr.AddOp(txscript.OP_0)
3648
        scriptHash := sha256.Sum256(witnessScript)
3649
        bldr.AddData(scriptHash[:])
46✔
3650

46✔
3651
        return bldr.Script()
46✔
3652
}
46✔
3653

46✔
3654
// EdgePoint couples the outpoint of a channel with the funding script that it
46✔
3655
// creates. The FilteredChainView will use this to watch for spends of this
46✔
3656
// edge point on chain. We require both of these values as depending on the
46✔
3657
// concrete implementation, either the pkScript, or the out point will be used.
3658
type EdgePoint struct {
3659
        // FundingPkScript is the p2wsh multi-sig script of the target channel.
3660
        FundingPkScript []byte
3661

3662
        // OutPoint is the outpoint of the target channel.
3663
        OutPoint wire.OutPoint
3664
}
3665

3666
// String returns a human readable version of the target EdgePoint. We return
3667
// the outpoint directly as it is enough to uniquely identify the edge point.
3668
func (e *EdgePoint) String() string {
3669
        return e.OutPoint.String()
3670
}
3671

3672
// ChannelView returns the verifiable edge information for each active channel
3673
// within the known channel graph. The set of UTXO's (along with their scripts)
×
3674
// returned are the ones that need to be watched on chain to detect channel
×
3675
// closes on the resident blockchain.
×
3676
func (c *KVStore) ChannelView() ([]EdgePoint, error) {
3677
        var edgePoints []EdgePoint
3678
        if err := kvdb.View(c.db, func(tx kvdb.RTx) error {
3679
                // We're going to iterate over the entire channel index, so
3680
                // we'll need to fetch the edgeBucket to get to the index as
3681
                // it's a sub-bucket.
22✔
3682
                edges := tx.ReadBucket(edgeBucket)
22✔
3683
                if edges == nil {
44✔
3684
                        return ErrGraphNoEdgesFound
22✔
3685
                }
22✔
3686
                chanIndex := edges.NestedReadBucket(channelPointBucket)
22✔
3687
                if chanIndex == nil {
22✔
3688
                        return ErrGraphNoEdgesFound
22✔
3689
                }
×
3690
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
×
3691
                if edgeIndex == nil {
22✔
3692
                        return ErrGraphNoEdgesFound
22✔
3693
                }
×
3694

×
3695
                // Once we have the proper bucket, we'll range over each key
22✔
3696
                // (which is the channel point for the channel) and decode it,
22✔
3697
                // accumulating each entry.
×
3698
                return chanIndex.ForEach(
×
3699
                        func(chanPointBytes, chanID []byte) error {
3700
                                chanPointReader := bytes.NewReader(
3701
                                        chanPointBytes,
3702
                                )
3703

22✔
3704
                                var chanPoint wire.OutPoint
64✔
3705
                                err := ReadOutpoint(chanPointReader, &chanPoint)
42✔
3706
                                if err != nil {
42✔
3707
                                        return err
42✔
3708
                                }
42✔
3709

42✔
3710
                                edgeInfo, err := fetchChanEdgeInfo(
42✔
3711
                                        edgeIndex, chanID,
42✔
3712
                                )
×
3713
                                if err != nil {
×
3714
                                        return err
3715
                                }
42✔
3716

42✔
3717
                                pkScript, err := genMultiSigP2WSH(
42✔
3718
                                        edgeInfo.BitcoinKey1Bytes[:],
42✔
3719
                                        edgeInfo.BitcoinKey2Bytes[:],
×
3720
                                )
×
3721
                                if err != nil {
3722
                                        return err
42✔
3723
                                }
42✔
3724

42✔
3725
                                edgePoints = append(edgePoints, EdgePoint{
42✔
3726
                                        FundingPkScript: pkScript,
42✔
3727
                                        OutPoint:        chanPoint,
×
3728
                                })
×
3729

3730
                                return nil
42✔
3731
                        },
42✔
3732
                )
42✔
3733
        }, func() {
42✔
3734
                edgePoints = nil
42✔
3735
        }); err != nil {
42✔
3736
                return nil, err
3737
        }
3738

22✔
3739
        return edgePoints, nil
22✔
3740
}
22✔
3741

×
3742
// MarkEdgeZombie attempts to mark a channel identified by its channel ID as a
×
3743
// zombie. This method is used on an ad-hoc basis, when channels need to be
3744
// marked as zombies outside the normal pruning cycle.
22✔
3745
func (c *KVStore) MarkEdgeZombie(chanID uint64,
3746
        pubKey1, pubKey2 [33]byte) error {
3747

3748
        c.cacheMu.Lock()
3749
        defer c.cacheMu.Unlock()
3750

3751
        err := kvdb.Batch(c.db, func(tx kvdb.RwTx) error {
127✔
3752
                edges := tx.ReadWriteBucket(edgeBucket)
127✔
3753
                if edges == nil {
127✔
3754
                        return ErrGraphNoEdgesFound
127✔
3755
                }
127✔
3756
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
254✔
3757
                if err != nil {
127✔
3758
                        return fmt.Errorf("unable to create zombie "+
127✔
3759
                                "bucket: %w", err)
×
3760
                }
×
3761

127✔
3762
                return markEdgeZombie(zombieIndex, chanID, pubKey1, pubKey2)
127✔
3763
        })
×
3764
        if err != nil {
×
3765
                return err
×
3766
        }
3767

127✔
3768
        c.rejectCache.remove(chanID)
3769
        c.chanCache.remove(chanID)
127✔
3770

×
3771
        return nil
×
3772
}
3773

127✔
3774
// markEdgeZombie marks an edge as a zombie within our zombie index. The public
127✔
3775
// keys should represent the node public keys of the two parties involved in the
127✔
3776
// edge.
127✔
3777
func markEdgeZombie(zombieIndex kvdb.RwBucket, chanID uint64, pubKey1,
3778
        pubKey2 [33]byte) error {
3779

3780
        var k [8]byte
3781
        byteOrder.PutUint64(k[:], chanID)
3782

3783
        var v [66]byte
151✔
3784
        copy(v[:33], pubKey1[:])
151✔
3785
        copy(v[33:], pubKey2[:])
151✔
3786

151✔
3787
        return zombieIndex.Put(k[:], v[:])
151✔
3788
}
151✔
3789

151✔
3790
// MarkEdgeLive clears an edge from our zombie index, deeming it as live.
151✔
3791
func (c *KVStore) MarkEdgeLive(chanID uint64) error {
151✔
3792
        c.cacheMu.Lock()
151✔
3793
        defer c.cacheMu.Unlock()
151✔
3794

3795
        return c.markEdgeLiveUnsafe(nil, chanID)
3796
}
20✔
3797

20✔
3798
// markEdgeLiveUnsafe clears an edge from the zombie index. This method can be
20✔
3799
// called with an existing kvdb.RwTx or the argument can be set to nil in which
20✔
3800
// case a new transaction will be created.
20✔
3801
//
20✔
3802
// NOTE: this method MUST only be called if the cacheMu has already been
3803
// acquired.
3804
func (c *KVStore) markEdgeLiveUnsafe(tx kvdb.RwTx, chanID uint64) error {
3805
        dbFn := func(tx kvdb.RwTx) error {
3806
                edges := tx.ReadWriteBucket(edgeBucket)
3807
                if edges == nil {
3808
                        return ErrGraphNoEdgesFound
3809
                }
20✔
3810
                zombieIndex := edges.NestedReadWriteBucket(zombieBucket)
40✔
3811
                if zombieIndex == nil {
20✔
3812
                        return nil
20✔
3813
                }
×
3814

×
3815
                var k [8]byte
20✔
3816
                byteOrder.PutUint64(k[:], chanID)
20✔
3817

×
3818
                if len(zombieIndex.Get(k[:])) == 0 {
×
3819
                        return ErrZombieEdgeNotFound
3820
                }
20✔
3821

20✔
3822
                return zombieIndex.Delete(k[:])
20✔
3823
        }
22✔
3824

2✔
3825
        // If the transaction is nil, we'll create a new one. Otherwise, we use
2✔
3826
        // the existing transaction
3827
        var err error
18✔
3828
        if tx == nil {
3829
                err = kvdb.Update(c.db, dbFn, func() {})
3830
        } else {
3831
                err = dbFn(tx)
3832
        }
20✔
3833
        if err != nil {
40✔
3834
                return err
40✔
3835
        }
×
3836

×
3837
        c.rejectCache.remove(chanID)
×
3838
        c.chanCache.remove(chanID)
22✔
3839

2✔
3840
        return nil
2✔
3841
}
3842

18✔
3843
// IsZombieEdge returns whether the edge is considered zombie. If it is a
18✔
3844
// zombie, then the two node public keys corresponding to this edge are also
18✔
3845
// returned.
18✔
3846
func (c *KVStore) IsZombieEdge(chanID uint64) (bool, [33]byte, [33]byte,
3847
        error) {
3848

3849
        var (
3850
                isZombie         bool
3851
                pubKey1, pubKey2 [33]byte
3852
        )
14✔
3853

14✔
3854
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
14✔
3855
                edges := tx.ReadBucket(edgeBucket)
14✔
3856
                if edges == nil {
14✔
3857
                        return ErrGraphNoEdgesFound
14✔
3858
                }
14✔
3859
                zombieIndex := edges.NestedReadBucket(zombieBucket)
28✔
3860
                if zombieIndex == nil {
14✔
3861
                        return nil
14✔
3862
                }
×
3863

×
3864
                isZombie, pubKey1, pubKey2 = isZombieEdge(zombieIndex, chanID)
14✔
3865

14✔
3866
                return nil
×
3867
        }, func() {
×
3868
                isZombie = false
3869
                pubKey1 = [33]byte{}
14✔
3870
                pubKey2 = [33]byte{}
14✔
3871
        })
14✔
3872
        if err != nil {
14✔
3873
                return false, [33]byte{}, [33]byte{}, fmt.Errorf("%w: %w "+
14✔
3874
                        "(chanID=%d)", ErrCantCheckIfZombieEdgeStr, err, chanID)
14✔
3875
        }
14✔
3876

14✔
3877
        return isZombie, pubKey1, pubKey2, nil
14✔
3878
}
×
3879

×
3880
// isZombieEdge returns whether an entry exists for the given channel in the
×
3881
// zombie index. If an entry exists, then the two node public keys corresponding
3882
// to this edge are also returned.
14✔
3883
func isZombieEdge(zombieIndex kvdb.RBucket,
3884
        chanID uint64) (bool, [33]byte, [33]byte) {
3885

3886
        var k [8]byte
3887
        byteOrder.PutUint64(k[:], chanID)
3888

3889
        v := zombieIndex.Get(k[:])
189✔
3890
        if v == nil {
189✔
3891
                return false, [33]byte{}, [33]byte{}
189✔
3892
        }
189✔
3893

189✔
3894
        var pubKey1, pubKey2 [33]byte
189✔
3895
        copy(pubKey1[:], v[:33])
297✔
3896
        copy(pubKey2[:], v[33:])
108✔
3897

108✔
3898
        return true, pubKey1, pubKey2
3899
}
81✔
3900

81✔
3901
// NumZombies returns the current number of zombie channels in the graph.
81✔
3902
func (c *KVStore) NumZombies() (uint64, error) {
81✔
3903
        var numZombies uint64
81✔
3904
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
3905
                edges := tx.ReadBucket(edgeBucket)
3906
                if edges == nil {
3907
                        return nil
4✔
3908
                }
4✔
3909
                zombieIndex := edges.NestedReadBucket(zombieBucket)
8✔
3910
                if zombieIndex == nil {
4✔
3911
                        return nil
4✔
3912
                }
×
3913

×
3914
                return zombieIndex.ForEach(func(_, _ []byte) error {
4✔
3915
                        numZombies++
4✔
3916
                        return nil
×
3917
                })
×
3918
        }, func() {
3919
                numZombies = 0
6✔
3920
        })
2✔
3921
        if err != nil {
2✔
3922
                return 0, err
2✔
3923
        }
4✔
3924

4✔
3925
        return numZombies, nil
4✔
3926
}
4✔
3927

×
3928
// PutClosedScid stores a SCID for a closed channel in the database. This is so
×
3929
// that we can ignore channel announcements that we know to be closed without
3930
// having to validate them and fetch a block.
4✔
3931
func (c *KVStore) PutClosedScid(scid lnwire.ShortChannelID) error {
3932
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
3933
                closedScids, err := tx.CreateTopLevelBucket(closedScidBucket)
3934
                if err != nil {
3935
                        return err
3936
                }
1✔
3937

2✔
3938
                var k [8]byte
1✔
3939
                byteOrder.PutUint64(k[:], scid.ToUint64())
1✔
3940

×
3941
                return closedScids.Put(k[:], []byte{})
×
3942
        }, func() {})
3943
}
1✔
3944

1✔
3945
// IsClosedScid checks whether a channel identified by the passed in scid is
1✔
3946
// closed. This helps avoid having to perform expensive validation checks.
1✔
3947
// TODO: Add an LRU cache to cut down on disc reads.
1✔
3948
func (c *KVStore) IsClosedScid(scid lnwire.ShortChannelID) (bool, error) {
3949
        var isClosed bool
3950
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
3951
                closedScids := tx.ReadBucket(closedScidBucket)
3952
                if closedScids == nil {
3953
                        return ErrClosedScidsNotFound
2✔
3954
                }
2✔
3955

4✔
3956
                var k [8]byte
2✔
3957
                byteOrder.PutUint64(k[:], scid.ToUint64())
2✔
3958

×
3959
                if closedScids.Get(k[:]) != nil {
×
3960
                        isClosed = true
3961
                        return nil
2✔
3962
                }
2✔
3963

2✔
3964
                return nil
3✔
3965
        }, func() {
1✔
3966
                isClosed = false
1✔
3967
        })
1✔
3968
        if err != nil {
3969
                return false, err
1✔
3970
        }
2✔
3971

2✔
3972
        return isClosed, nil
2✔
3973
}
2✔
3974

×
3975
// GraphSession will provide the call-back with access to a NodeTraverser
×
3976
// instance which can be used to perform queries against the channel graph.
3977
func (c *KVStore) GraphSession(cb func(graph NodeTraverser) error,
2✔
3978
        reset func()) error {
3979

3980
        return c.db.View(func(tx walletdb.ReadTx) error {
3981
                return cb(&nodeTraverserSession{
3982
                        db: c,
3983
                        tx: tx,
54✔
3984
                })
54✔
3985
        }, reset)
108✔
3986
}
54✔
3987

54✔
3988
// nodeTraverserSession implements the NodeTraverser interface but with a
54✔
3989
// backing read only transaction for a consistent view of the graph.
54✔
3990
type nodeTraverserSession struct {
54✔
3991
        tx kvdb.RTx
3992
        db *KVStore
3993
}
3994

3995
// ForEachNodeDirectedChannel calls the callback for every channel of the given
3996
// node.
3997
//
3998
// NOTE: Part of the NodeTraverser interface.
3999
func (c *nodeTraverserSession) ForEachNodeDirectedChannel(nodePub route.Vertex,
4000
        cb func(channel *DirectedChannel) error, _ func()) error {
4001

4002
        return c.db.forEachNodeDirectedChannel(c.tx, nodePub, cb, func() {})
4003
}
4004

4005
// FetchNodeFeatures returns the features of the given node. If the node is
239✔
4006
// unknown, assume no additional features are supported.
239✔
4007
//
239✔
4008
// NOTE: Part of the NodeTraverser interface.
4009
func (c *nodeTraverserSession) FetchNodeFeatures(nodePub route.Vertex) (
4010
        *lnwire.FeatureVector, error) {
4011

4012
        return c.db.fetchNodeFeatures(c.tx, nodePub)
4013
}
4014

4015
func putLightningNode(nodeBucket, aliasBucket, updateIndex kvdb.RwBucket,
254✔
4016
        node *models.LightningNode) error {
254✔
4017

254✔
4018
        var (
254✔
4019
                scratch [16]byte
4020
                b       bytes.Buffer
4021
        )
906✔
4022

906✔
4023
        pub, err := node.PubKey()
906✔
4024
        if err != nil {
906✔
4025
                return err
906✔
4026
        }
906✔
4027
        nodePub := pub.SerializeCompressed()
906✔
4028

906✔
4029
        // If the node has the update time set, write it, else write 0.
906✔
4030
        updateUnix := uint64(0)
×
4031
        if node.LastUpdate.Unix() > 0 {
×
4032
                updateUnix = uint64(node.LastUpdate.Unix())
906✔
4033
        }
906✔
4034

906✔
4035
        byteOrder.PutUint64(scratch[:8], updateUnix)
906✔
4036
        if _, err := b.Write(scratch[:8]); err != nil {
1,678✔
4037
                return err
772✔
4038
        }
772✔
4039

4040
        if _, err := b.Write(nodePub); err != nil {
906✔
4041
                return err
906✔
4042
        }
×
4043

×
4044
        // If we got a node announcement for this node, we will have the rest
4045
        // of the data available. If not we don't have more data to write.
906✔
4046
        if !node.HaveNodeAnnouncement {
×
4047
                // Write HaveNodeAnnouncement=0.
×
4048
                byteOrder.PutUint16(scratch[:2], 0)
4049
                if _, err := b.Write(scratch[:2]); err != nil {
4050
                        return err
4051
                }
987✔
4052

81✔
4053
                return nodeBucket.Put(nodePub, b.Bytes())
81✔
4054
        }
81✔
4055

×
4056
        // Write HaveNodeAnnouncement=1.
×
4057
        byteOrder.PutUint16(scratch[:2], 1)
4058
        if _, err := b.Write(scratch[:2]); err != nil {
81✔
4059
                return err
4060
        }
4061

4062
        if err := binary.Write(&b, byteOrder, node.Color.R); err != nil {
825✔
4063
                return err
825✔
4064
        }
×
4065
        if err := binary.Write(&b, byteOrder, node.Color.G); err != nil {
×
4066
                return err
4067
        }
825✔
4068
        if err := binary.Write(&b, byteOrder, node.Color.B); err != nil {
×
4069
                return err
×
4070
        }
825✔
4071

×
4072
        if err := wire.WriteVarString(&b, 0, node.Alias); err != nil {
×
4073
                return err
825✔
4074
        }
×
4075

×
4076
        if err := node.Features.Encode(&b); err != nil {
4077
                return err
825✔
4078
        }
×
4079

×
4080
        numAddresses := uint16(len(node.Addresses))
4081
        byteOrder.PutUint16(scratch[:2], numAddresses)
825✔
4082
        if _, err := b.Write(scratch[:2]); err != nil {
×
4083
                return err
×
4084
        }
4085

825✔
4086
        for _, address := range node.Addresses {
825✔
4087
                if err := SerializeAddr(&b, address); err != nil {
825✔
4088
                        return err
×
4089
                }
×
4090
        }
4091

1,887✔
4092
        sigLen := len(node.AuthSigBytes)
1,062✔
4093
        if sigLen > 80 {
×
4094
                return fmt.Errorf("max sig len allowed is 80, had %v",
×
4095
                        sigLen)
4096
        }
4097

825✔
4098
        err = wire.WriteVarBytes(&b, 0, node.AuthSigBytes)
825✔
4099
        if err != nil {
×
4100
                return err
×
4101
        }
×
4102

4103
        if len(node.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
825✔
4104
                return ErrTooManyExtraOpaqueBytes(len(node.ExtraOpaqueData))
825✔
4105
        }
×
4106
        err = wire.WriteVarBytes(&b, 0, node.ExtraOpaqueData)
×
4107
        if err != nil {
4108
                return err
825✔
4109
        }
×
4110

×
4111
        if err := aliasBucket.Put(nodePub, []byte(node.Alias)); err != nil {
825✔
4112
                return err
825✔
4113
        }
×
4114

×
4115
        // With the alias bucket updated, we'll now update the index that
4116
        // tracks the time series of node updates.
825✔
4117
        var indexKey [8 + 33]byte
×
4118
        byteOrder.PutUint64(indexKey[:8], updateUnix)
×
4119
        copy(indexKey[8:], nodePub)
4120

4121
        // If there was already an old index entry for this node, then we'll
4122
        // delete the old one before we write the new entry.
825✔
4123
        if nodeBytes := nodeBucket.Get(nodePub); nodeBytes != nil {
825✔
4124
                // Extract out the old update time to we can reconstruct the
825✔
4125
                // prior index key to delete it from the index.
825✔
4126
                oldUpdateTime := nodeBytes[:8]
825✔
4127

825✔
4128
                var oldIndexKey [8 + 33]byte
841✔
4129
                copy(oldIndexKey[:8], oldUpdateTime)
16✔
4130
                copy(oldIndexKey[8:], nodePub)
16✔
4131

16✔
4132
                if err := updateIndex.Delete(oldIndexKey[:]); err != nil {
16✔
4133
                        return err
16✔
4134
                }
16✔
4135
        }
16✔
4136

16✔
4137
        if err := updateIndex.Put(indexKey[:], nil); err != nil {
16✔
4138
                return err
×
4139
        }
×
4140

4141
        return nodeBucket.Put(nodePub, b.Bytes())
4142
}
825✔
4143

×
4144
func fetchLightningNode(nodeBucket kvdb.RBucket,
×
4145
        nodePub []byte) (models.LightningNode, error) {
4146

825✔
4147
        nodeBytes := nodeBucket.Get(nodePub)
4148
        if nodeBytes == nil {
4149
                return models.LightningNode{}, ErrGraphNodeNotFound
4150
        }
3,625✔
4151

3,625✔
4152
        nodeReader := bytes.NewReader(nodeBytes)
3,625✔
4153

3,706✔
4154
        return deserializeLightningNode(nodeReader)
81✔
4155
}
81✔
4156

4157
func deserializeLightningNodeCacheable(r io.Reader) (route.Vertex,
3,544✔
4158
        *lnwire.FeatureVector, error) {
3,544✔
4159

3,544✔
4160
        var (
4161
                pubKey      route.Vertex
4162
                features    = lnwire.EmptyFeatureVector()
4163
                nodeScratch [8]byte
120✔
4164
        )
120✔
4165

120✔
4166
        // Skip ahead:
120✔
4167
        // - LastUpdate (8 bytes)
120✔
4168
        if _, err := r.Read(nodeScratch[:]); err != nil {
120✔
4169
                return pubKey, nil, err
120✔
4170
        }
120✔
4171

120✔
4172
        if _, err := io.ReadFull(r, pubKey[:]); err != nil {
120✔
4173
                return pubKey, nil, err
120✔
4174
        }
×
4175

×
4176
        // Read the node announcement flag.
4177
        if _, err := r.Read(nodeScratch[:2]); err != nil {
120✔
4178
                return pubKey, nil, err
×
4179
        }
×
4180
        hasNodeAnn := byteOrder.Uint16(nodeScratch[:2])
4181

4182
        // The rest of the data is optional, and will only be there if we got a
120✔
4183
        // node announcement for this node.
×
4184
        if hasNodeAnn == 0 {
×
4185
                return pubKey, features, nil
120✔
4186
        }
120✔
4187

120✔
4188
        // We did get a node announcement for this node, so we'll have the rest
120✔
4189
        // of the data available.
120✔
4190
        var rgb uint8
×
4191
        if err := binary.Read(r, byteOrder, &rgb); err != nil {
×
4192
                return pubKey, nil, err
4193
        }
4194
        if err := binary.Read(r, byteOrder, &rgb); err != nil {
4195
                return pubKey, nil, err
120✔
4196
        }
120✔
4197
        if err := binary.Read(r, byteOrder, &rgb); err != nil {
×
4198
                return pubKey, nil, err
×
4199
        }
120✔
4200

×
4201
        if _, err := wire.ReadVarString(r, 0); err != nil {
×
4202
                return pubKey, nil, err
120✔
4203
        }
×
4204

×
4205
        if err := features.Decode(r); err != nil {
4206
                return pubKey, nil, err
120✔
4207
        }
×
4208

×
4209
        return pubKey, features, nil
4210
}
120✔
4211

×
4212
func deserializeLightningNode(r io.Reader) (models.LightningNode, error) {
×
4213
        var (
4214
                node    models.LightningNode
120✔
4215
                scratch [8]byte
4216
                err     error
4217
        )
8,532✔
4218

8,532✔
4219
        // Always populate a feature vector, even if we don't have a node
8,532✔
4220
        // announcement and short circuit below.
8,532✔
4221
        node.Features = lnwire.EmptyFeatureVector()
8,532✔
4222

8,532✔
4223
        if _, err := r.Read(scratch[:]); err != nil {
8,532✔
4224
                return models.LightningNode{}, err
8,532✔
4225
        }
8,532✔
4226

8,532✔
4227
        unix := int64(byteOrder.Uint64(scratch[:]))
8,532✔
4228
        node.LastUpdate = time.Unix(unix, 0)
8,532✔
4229

×
4230
        if _, err := io.ReadFull(r, node.PubKeyBytes[:]); err != nil {
×
4231
                return models.LightningNode{}, err
4232
        }
8,532✔
4233

8,532✔
4234
        if _, err := r.Read(scratch[:2]); err != nil {
8,532✔
4235
                return models.LightningNode{}, err
8,532✔
4236
        }
×
4237

×
4238
        hasNodeAnn := byteOrder.Uint16(scratch[:2])
4239
        if hasNodeAnn == 1 {
8,532✔
4240
                node.HaveNodeAnnouncement = true
×
4241
        } else {
×
4242
                node.HaveNodeAnnouncement = false
4243
        }
8,532✔
4244

16,918✔
4245
        // The rest of the data is optional, and will only be there if we got a
8,386✔
4246
        // node announcement for this node.
8,532✔
4247
        if !node.HaveNodeAnnouncement {
146✔
4248
                return node, nil
146✔
4249
        }
4250

4251
        // We did get a node announcement for this node, so we'll have the rest
4252
        // of the data available.
8,678✔
4253
        if err := binary.Read(r, byteOrder, &node.Color.R); err != nil {
146✔
4254
                return models.LightningNode{}, err
146✔
4255
        }
4256
        if err := binary.Read(r, byteOrder, &node.Color.G); err != nil {
4257
                return models.LightningNode{}, err
4258
        }
8,386✔
4259
        if err := binary.Read(r, byteOrder, &node.Color.B); err != nil {
×
4260
                return models.LightningNode{}, err
×
4261
        }
8,386✔
4262

×
4263
        node.Alias, err = wire.ReadVarString(r, 0)
×
4264
        if err != nil {
8,386✔
4265
                return models.LightningNode{}, err
×
4266
        }
×
4267

4268
        err = node.Features.Decode(r)
8,386✔
4269
        if err != nil {
8,386✔
4270
                return models.LightningNode{}, err
×
4271
        }
×
4272

4273
        if _, err := r.Read(scratch[:2]); err != nil {
8,386✔
4274
                return models.LightningNode{}, err
8,386✔
4275
        }
×
4276
        numAddresses := int(byteOrder.Uint16(scratch[:2]))
×
4277

4278
        var addresses []net.Addr
8,386✔
4279
        for i := 0; i < numAddresses; i++ {
×
4280
                address, err := DeserializeAddr(r)
×
4281
                if err != nil {
8,386✔
4282
                        return models.LightningNode{}, err
8,386✔
4283
                }
8,386✔
4284
                addresses = append(addresses, address)
19,031✔
4285
        }
10,645✔
4286
        node.Addresses = addresses
10,645✔
4287

×
4288
        node.AuthSigBytes, err = wire.ReadVarBytes(r, 0, 80, "sig")
×
4289
        if err != nil {
10,645✔
4290
                return models.LightningNode{}, err
4291
        }
8,386✔
4292

8,386✔
4293
        // We'll try and see if there are any opaque bytes left, if not, then
8,386✔
4294
        // we'll ignore the EOF error and return the node as is.
8,386✔
4295
        extraBytes, err := wire.ReadVarBytes(
×
4296
                r, 0, MaxAllowedExtraOpaqueBytes, "blob",
×
4297
        )
4298
        switch {
4299
        case errors.Is(err, io.ErrUnexpectedEOF):
4300
        case errors.Is(err, io.EOF):
8,386✔
4301
        case err != nil:
8,386✔
4302
                return models.LightningNode{}, err
8,386✔
4303
        }
8,386✔
4304

×
4305
        if len(extraBytes) > 0 {
×
4306
                node.ExtraOpaqueData = extraBytes
×
4307
        }
×
4308

4309
        return node, nil
4310
}
8,397✔
4311

11✔
4312
func putChanEdgeInfo(edgeIndex kvdb.RwBucket,
11✔
4313
        edgeInfo *models.ChannelEdgeInfo, chanID [8]byte) error {
4314

8,386✔
4315
        var b bytes.Buffer
4316

4317
        if _, err := b.Write(edgeInfo.NodeKey1Bytes[:]); err != nil {
4318
                return err
1,488✔
4319
        }
1,488✔
4320
        if _, err := b.Write(edgeInfo.NodeKey2Bytes[:]); err != nil {
1,488✔
4321
                return err
1,488✔
4322
        }
1,488✔
4323
        if _, err := b.Write(edgeInfo.BitcoinKey1Bytes[:]); err != nil {
×
4324
                return err
×
4325
        }
1,488✔
4326
        if _, err := b.Write(edgeInfo.BitcoinKey2Bytes[:]); err != nil {
×
4327
                return err
×
4328
        }
1,488✔
4329

×
4330
        var featureBuf bytes.Buffer
×
4331
        if err := edgeInfo.Features.Encode(&featureBuf); err != nil {
1,488✔
4332
                return fmt.Errorf("unable to encode features: %w", err)
×
4333
        }
×
4334

4335
        if err := wire.WriteVarBytes(&b, 0, featureBuf.Bytes()); err != nil {
1,488✔
4336
                return err
1,488✔
4337
        }
×
4338

×
4339
        authProof := edgeInfo.AuthProof
4340
        var nodeSig1, nodeSig2, bitcoinSig1, bitcoinSig2 []byte
1,488✔
4341
        if authProof != nil {
×
4342
                nodeSig1 = authProof.NodeSig1Bytes
×
4343
                nodeSig2 = authProof.NodeSig2Bytes
4344
                bitcoinSig1 = authProof.BitcoinSig1Bytes
1,488✔
4345
                bitcoinSig2 = authProof.BitcoinSig2Bytes
1,488✔
4346
        }
2,892✔
4347

1,404✔
4348
        if err := wire.WriteVarBytes(&b, 0, nodeSig1); err != nil {
1,404✔
4349
                return err
1,404✔
4350
        }
1,404✔
4351
        if err := wire.WriteVarBytes(&b, 0, nodeSig2); err != nil {
1,404✔
4352
                return err
4353
        }
1,488✔
4354
        if err := wire.WriteVarBytes(&b, 0, bitcoinSig1); err != nil {
×
4355
                return err
×
4356
        }
1,488✔
4357
        if err := wire.WriteVarBytes(&b, 0, bitcoinSig2); err != nil {
×
4358
                return err
×
4359
        }
1,488✔
4360

×
4361
        if err := WriteOutpoint(&b, &edgeInfo.ChannelPoint); err != nil {
×
4362
                return err
1,488✔
4363
        }
×
4364
        err := binary.Write(&b, byteOrder, uint64(edgeInfo.Capacity))
×
4365
        if err != nil {
4366
                return err
1,488✔
4367
        }
×
4368
        if _, err := b.Write(chanID[:]); err != nil {
×
4369
                return err
1,488✔
4370
        }
1,488✔
4371
        if _, err := b.Write(edgeInfo.ChainHash[:]); err != nil {
×
4372
                return err
×
4373
        }
1,488✔
4374

×
4375
        if len(edgeInfo.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
×
4376
                return ErrTooManyExtraOpaqueBytes(len(edgeInfo.ExtraOpaqueData))
1,488✔
4377
        }
×
4378
        err = wire.WriteVarBytes(&b, 0, edgeInfo.ExtraOpaqueData)
×
4379
        if err != nil {
4380
                return err
1,488✔
4381
        }
×
4382

×
4383
        return edgeIndex.Put(chanID[:], b.Bytes())
1,488✔
4384
}
1,488✔
4385

×
4386
func fetchChanEdgeInfo(edgeIndex kvdb.RBucket,
×
4387
        chanID []byte) (models.ChannelEdgeInfo, error) {
4388

1,488✔
4389
        edgeInfoBytes := edgeIndex.Get(chanID)
4390
        if edgeInfoBytes == nil {
4391
                return models.ChannelEdgeInfo{}, ErrEdgeNotFound
4392
        }
6,802✔
4393

6,802✔
4394
        edgeInfoReader := bytes.NewReader(edgeInfoBytes)
6,802✔
4395

6,864✔
4396
        return deserializeChanEdgeInfo(edgeInfoReader)
62✔
4397
}
62✔
4398

4399
func deserializeChanEdgeInfo(r io.Reader) (models.ChannelEdgeInfo, error) {
6,740✔
4400
        var (
6,740✔
4401
                err      error
6,740✔
4402
                edgeInfo models.ChannelEdgeInfo
4403
        )
4404

7,282✔
4405
        if _, err := io.ReadFull(r, edgeInfo.NodeKey1Bytes[:]); err != nil {
7,282✔
4406
                return models.ChannelEdgeInfo{}, err
7,282✔
4407
        }
7,282✔
4408
        if _, err := io.ReadFull(r, edgeInfo.NodeKey2Bytes[:]); err != nil {
7,282✔
4409
                return models.ChannelEdgeInfo{}, err
7,282✔
4410
        }
7,282✔
4411
        if _, err := io.ReadFull(r, edgeInfo.BitcoinKey1Bytes[:]); err != nil {
×
4412
                return models.ChannelEdgeInfo{}, err
×
4413
        }
7,282✔
4414
        if _, err := io.ReadFull(r, edgeInfo.BitcoinKey2Bytes[:]); err != nil {
×
4415
                return models.ChannelEdgeInfo{}, err
×
4416
        }
7,282✔
4417

×
4418
        featureBytes, err := wire.ReadVarBytes(r, 0, 900, "features")
×
4419
        if err != nil {
7,282✔
4420
                return models.ChannelEdgeInfo{}, err
×
4421
        }
×
4422

4423
        features := lnwire.NewRawFeatureVector()
7,282✔
4424
        err = features.Decode(bytes.NewReader(featureBytes))
7,282✔
4425
        if err != nil {
×
4426
                return models.ChannelEdgeInfo{}, fmt.Errorf("unable to decode "+
×
4427
                        "features: %w", err)
4428
        }
7,282✔
4429
        edgeInfo.Features = lnwire.NewFeatureVector(features, lnwire.Features)
7,282✔
4430

7,282✔
4431
        proof := &models.ChannelAuthProof{}
×
4432

×
4433
        proof.NodeSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
×
4434
        if err != nil {
7,282✔
4435
                return models.ChannelEdgeInfo{}, err
7,282✔
4436
        }
7,282✔
4437
        proof.NodeSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
7,282✔
4438
        if err != nil {
7,282✔
4439
                return models.ChannelEdgeInfo{}, err
7,282✔
4440
        }
×
4441
        proof.BitcoinSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
×
4442
        if err != nil {
7,282✔
4443
                return models.ChannelEdgeInfo{}, err
7,282✔
4444
        }
×
4445
        proof.BitcoinSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
×
4446
        if err != nil {
7,282✔
4447
                return models.ChannelEdgeInfo{}, err
7,282✔
4448
        }
×
4449

×
4450
        if !proof.IsEmpty() {
7,282✔
4451
                edgeInfo.AuthProof = proof
7,282✔
4452
        }
×
4453

×
4454
        edgeInfo.ChannelPoint = wire.OutPoint{}
4455
        if err := ReadOutpoint(r, &edgeInfo.ChannelPoint); err != nil {
11,461✔
4456
                return models.ChannelEdgeInfo{}, err
4,179✔
4457
        }
4,179✔
4458
        if err := binary.Read(r, byteOrder, &edgeInfo.Capacity); err != nil {
4459
                return models.ChannelEdgeInfo{}, err
7,282✔
4460
        }
7,282✔
4461
        if err := binary.Read(r, byteOrder, &edgeInfo.ChannelID); err != nil {
×
4462
                return models.ChannelEdgeInfo{}, err
×
4463
        }
7,282✔
4464

×
4465
        if _, err := io.ReadFull(r, edgeInfo.ChainHash[:]); err != nil {
×
4466
                return models.ChannelEdgeInfo{}, err
7,282✔
4467
        }
×
4468

×
4469
        // We'll try and see if there are any opaque bytes left, if not, then
4470
        // we'll ignore the EOF error and return the edge as is.
7,282✔
4471
        edgeInfo.ExtraOpaqueData, err = wire.ReadVarBytes(
×
4472
                r, 0, MaxAllowedExtraOpaqueBytes, "blob",
×
4473
        )
4474
        switch {
4475
        case errors.Is(err, io.ErrUnexpectedEOF):
4476
        case errors.Is(err, io.EOF):
7,282✔
4477
        case err != nil:
7,282✔
4478
                return models.ChannelEdgeInfo{}, err
7,282✔
4479
        }
7,282✔
4480

×
4481
        return edgeInfo, nil
×
4482
}
×
4483

×
4484
func putChanEdgePolicy(edges kvdb.RwBucket, edge *models.ChannelEdgePolicy,
4485
        from, to []byte) error {
4486

7,282✔
4487
        var edgeKey [33 + 8]byte
4488
        copy(edgeKey[:], from)
4489
        byteOrder.PutUint64(edgeKey[33:], edge.ChannelID)
4490

2,667✔
4491
        var b bytes.Buffer
2,667✔
4492
        if err := serializeChanEdgePolicy(&b, edge, to); err != nil {
2,667✔
4493
                return err
2,667✔
4494
        }
2,667✔
4495

2,667✔
4496
        // Before we write out the new edge, we'll create a new entry in the
2,667✔
4497
        // update index in order to keep it fresh.
2,667✔
4498
        updateUnix := uint64(edge.LastUpdate.Unix())
×
4499
        var indexKey [8 + 8]byte
×
4500
        byteOrder.PutUint64(indexKey[:8], updateUnix)
4501
        byteOrder.PutUint64(indexKey[8:], edge.ChannelID)
4502

4503
        updateIndex, err := edges.CreateBucketIfNotExists(edgeUpdateIndexBucket)
2,667✔
4504
        if err != nil {
2,667✔
4505
                return err
2,667✔
4506
        }
2,667✔
4507

2,667✔
4508
        // If there was already an entry for this edge, then we'll need to
2,667✔
4509
        // delete the old one to ensure we don't leave around any after-images.
2,667✔
4510
        // An unknown policy value does not have a update time recorded, so
×
4511
        // it also does not need to be removed.
×
4512
        if edgeBytes := edges.Get(edgeKey[:]); edgeBytes != nil &&
4513
                !bytes.Equal(edgeBytes, unknownPolicy) {
4514

4515
                // In order to delete the old entry, we'll need to obtain the
4516
                // *prior* update time in order to delete it. To do this, we'll
4517
                // need to deserialize the existing policy within the database
2,667✔
4518
                // (now outdated by the new one), and delete its corresponding
2,694✔
4519
                // entry within the update index. We'll ignore any
27✔
4520
                // ErrEdgePolicyOptionalFieldNotFound or ErrParsingExtraTLVBytes
27✔
4521
                // errors, as we only need the channel ID and update time to
27✔
4522
                // delete the entry.
27✔
4523
                //
27✔
4524
                // TODO(halseth): get rid of these invalid policies in a
27✔
4525
                // migration.
27✔
4526
                //
27✔
4527
                // NOTE: the above TODO was completed in the SQL migration and
27✔
4528
                // so such edge cases no longer need to be handled there.
27✔
4529
                oldEdgePolicy, err := deserializeChanEdgePolicy(
27✔
4530
                        bytes.NewReader(edgeBytes),
27✔
4531
                )
27✔
4532
                if err != nil &&
27✔
4533
                        !errors.Is(err, ErrEdgePolicyOptionalFieldNotFound) &&
27✔
4534
                        !errors.Is(err, ErrParsingExtraTLVBytes) {
27✔
4535

27✔
4536
                        return err
27✔
4537
                }
27✔
4538

27✔
4539
                oldUpdateTime := uint64(oldEdgePolicy.LastUpdate.Unix())
27✔
4540

×
4541
                var oldIndexKey [8 + 8]byte
×
4542
                byteOrder.PutUint64(oldIndexKey[:8], oldUpdateTime)
×
4543
                byteOrder.PutUint64(oldIndexKey[8:], edge.ChannelID)
4544

27✔
4545
                if err := updateIndex.Delete(oldIndexKey[:]); err != nil {
27✔
4546
                        return err
27✔
4547
                }
27✔
4548
        }
27✔
4549

27✔
4550
        if err := updateIndex.Put(indexKey[:], nil); err != nil {
27✔
4551
                return err
×
4552
        }
×
4553

4554
        err = updateEdgePolicyDisabledIndex(
4555
                edges, edge.ChannelID,
2,667✔
4556
                edge.ChannelFlags&lnwire.ChanUpdateDirection > 0,
×
4557
                edge.IsDisabled(),
×
4558
        )
4559
        if err != nil {
2,667✔
4560
                return err
2,667✔
4561
        }
2,667✔
4562

2,667✔
4563
        return edges.Put(edgeKey[:], b.Bytes())
2,667✔
4564
}
2,667✔
4565

×
4566
// updateEdgePolicyDisabledIndex is used to update the disabledEdgePolicyIndex
×
4567
// bucket by either add a new disabled ChannelEdgePolicy or remove an existing
4568
// one.
2,667✔
4569
// The direction represents the direction of the edge and disabled is used for
4570
// deciding whether to remove or add an entry to the bucket.
4571
// In general a channel is disabled if two entries for the same chanID exist
4572
// in this bucket.
4573
// Maintaining the bucket this way allows a fast retrieval of disabled
4574
// channels, for example when prune is needed.
4575
func updateEdgePolicyDisabledIndex(edges kvdb.RwBucket, chanID uint64,
4576
        direction bool, disabled bool) error {
4577

4578
        var disabledEdgeKey [8 + 1]byte
4579
        byteOrder.PutUint64(disabledEdgeKey[0:], chanID)
4580
        if direction {
4581
                disabledEdgeKey[8] = 1
2,945✔
4582
        }
2,945✔
4583

2,945✔
4584
        disabledEdgePolicyIndex, err := edges.CreateBucketIfNotExists(
2,945✔
4585
                disabledEdgePolicyBucket,
4,414✔
4586
        )
1,469✔
4587
        if err != nil {
1,469✔
4588
                return err
4589
        }
2,945✔
4590

2,945✔
4591
        if disabled {
2,945✔
4592
                return disabledEdgePolicyIndex.Put(disabledEdgeKey[:], []byte{})
2,945✔
4593
        }
×
4594

×
4595
        return disabledEdgePolicyIndex.Delete(disabledEdgeKey[:])
4596
}
2,971✔
4597

26✔
4598
// putChanEdgePolicyUnknown marks the edge policy as unknown
26✔
4599
// in the edges bucket.
4600
func putChanEdgePolicyUnknown(edges kvdb.RwBucket, channelID uint64,
2,919✔
4601
        from []byte) error {
4602

4603
        var edgeKey [33 + 8]byte
4604
        copy(edgeKey[:], from)
4605
        byteOrder.PutUint64(edgeKey[33:], channelID)
4606

2,972✔
4607
        if edges.Get(edgeKey[:]) != nil {
2,972✔
4608
                return fmt.Errorf("cannot write unknown policy for channel %v "+
2,972✔
4609
                        " when there is already a policy present", channelID)
2,972✔
4610
        }
2,972✔
4611

2,972✔
4612
        return edges.Put(edgeKey[:], unknownPolicy)
2,972✔
4613
}
×
4614

×
4615
func fetchChanEdgePolicy(edges kvdb.RBucket, chanID []byte,
×
4616
        nodePub []byte) (*models.ChannelEdgePolicy, error) {
4617

2,972✔
4618
        var edgeKey [33 + 8]byte
4619
        copy(edgeKey[:], nodePub)
4620
        copy(edgeKey[33:], chanID)
4621

13,492✔
4622
        edgeBytes := edges.Get(edgeKey[:])
13,492✔
4623
        if edgeBytes == nil {
13,492✔
4624
                return nil, ErrEdgeNotFound
13,492✔
4625
        }
13,492✔
4626

13,492✔
4627
        // No need to deserialize unknown policy.
13,492✔
4628
        if bytes.Equal(edgeBytes, unknownPolicy) {
13,492✔
4629
                return nil, nil
×
4630
        }
×
4631

4632
        edgeReader := bytes.NewReader(edgeBytes)
4633

15,034✔
4634
        ep, err := deserializeChanEdgePolicy(edgeReader)
1,542✔
4635
        switch {
1,542✔
4636
        // If the db policy was missing an expected optional field, we return
4637
        // nil as if the policy was unknown.
11,950✔
4638
        case errors.Is(err, ErrEdgePolicyOptionalFieldNotFound):
11,950✔
4639
                return nil, nil
11,950✔
4640

11,950✔
4641
        // If the policy contains invalid TLV bytes, we return nil as if
4642
        // the policy was unknown.
4643
        case errors.Is(err, ErrParsingExtraTLVBytes):
2✔
4644
                return nil, nil
2✔
4645

4646
        case err != nil:
4647
                return nil, err
4648
        }
×
4649

×
4650
        return ep, nil
4651
}
×
4652

×
4653
func fetchChanEdgePolicies(edgeIndex kvdb.RBucket, edges kvdb.RBucket,
4654
        chanID []byte) (*models.ChannelEdgePolicy, *models.ChannelEdgePolicy,
4655
        error) {
11,948✔
4656

4657
        edgeInfo := edgeIndex.Get(chanID)
4658
        if edgeInfo == nil {
4659
                return nil, nil, fmt.Errorf("%w: chanID=%x", ErrEdgeNotFound,
4660
                        chanID)
2,904✔
4661
        }
2,904✔
4662

2,904✔
4663
        // The first node is contained within the first half of the edge
2,904✔
4664
        // information. We only propagate the error here and below if it's
×
4665
        // something other than edge non-existence.
×
4666
        node1Pub := edgeInfo[:33]
×
4667
        edge1, err := fetchChanEdgePolicy(edges, chanID, node1Pub)
4668
        if err != nil {
4669
                return nil, nil, fmt.Errorf("%w: node1Pub=%x", ErrEdgeNotFound,
4670
                        node1Pub)
4671
        }
2,904✔
4672

2,904✔
4673
        // Similarly, the second node is contained within the latter
2,904✔
4674
        // half of the edge information.
×
4675
        node2Pub := edgeInfo[33:66]
×
4676
        edge2, err := fetchChanEdgePolicy(edges, chanID, node2Pub)
×
4677
        if err != nil {
4678
                return nil, nil, fmt.Errorf("%w: node2Pub=%x", ErrEdgeNotFound,
4679
                        node2Pub)
4680
        }
2,904✔
4681

2,904✔
4682
        return edge1, edge2, nil
2,904✔
4683
}
×
4684

×
4685
func serializeChanEdgePolicy(w io.Writer, edge *models.ChannelEdgePolicy,
×
4686
        to []byte) error {
4687

2,904✔
4688
        err := wire.WriteVarBytes(w, 0, edge.SigBytes)
4689
        if err != nil {
4690
                return err
4691
        }
2,669✔
4692

2,669✔
4693
        if err := binary.Write(w, byteOrder, edge.ChannelID); err != nil {
2,669✔
4694
                return err
2,669✔
4695
        }
×
4696

×
4697
        var scratch [8]byte
4698
        updateUnix := uint64(edge.LastUpdate.Unix())
2,669✔
4699
        byteOrder.PutUint64(scratch[:], updateUnix)
×
4700
        if _, err := w.Write(scratch[:]); err != nil {
×
4701
                return err
4702
        }
2,669✔
4703

2,669✔
4704
        if err := binary.Write(w, byteOrder, edge.MessageFlags); err != nil {
2,669✔
4705
                return err
2,669✔
4706
        }
×
4707
        if err := binary.Write(w, byteOrder, edge.ChannelFlags); err != nil {
×
4708
                return err
4709
        }
2,669✔
4710
        if err := binary.Write(w, byteOrder, edge.TimeLockDelta); err != nil {
×
4711
                return err
×
4712
        }
2,669✔
4713
        if err := binary.Write(w, byteOrder, uint64(edge.MinHTLC)); err != nil {
×
4714
                return err
×
4715
        }
2,669✔
4716
        err = binary.Write(w, byteOrder, uint64(edge.FeeBaseMSat))
×
4717
        if err != nil {
×
4718
                return err
2,669✔
4719
        }
×
4720
        err = binary.Write(
×
4721
                w, byteOrder, uint64(edge.FeeProportionalMillionths),
2,669✔
4722
        )
2,669✔
4723
        if err != nil {
×
4724
                return err
×
4725
        }
2,669✔
4726

2,669✔
4727
        if _, err := w.Write(to); err != nil {
2,669✔
4728
                return err
2,669✔
4729
        }
×
4730

×
4731
        // If the max_htlc field is present, we write it. To be compatible with
4732
        // older versions that wasn't aware of this field, we write it as part
2,669✔
4733
        // of the opaque data.
×
4734
        // TODO(halseth): clean up when moving to TLV.
×
4735
        var opaqueBuf bytes.Buffer
4736
        if edge.MessageFlags.HasMaxHtlc() {
4737
                err := binary.Write(&opaqueBuf, byteOrder, uint64(edge.MaxHTLC))
4738
                if err != nil {
4739
                        return err
4740
                }
2,669✔
4741
        }
4,954✔
4742

2,285✔
4743
        if len(edge.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
2,285✔
4744
                return ErrTooManyExtraOpaqueBytes(len(edge.ExtraOpaqueData))
×
4745
        }
×
4746
        if _, err := opaqueBuf.Write(edge.ExtraOpaqueData); err != nil {
4747
                return err
4748
        }
2,669✔
4749

×
4750
        if err := wire.WriteVarBytes(w, 0, opaqueBuf.Bytes()); err != nil {
×
4751
                return err
2,669✔
4752
        }
×
4753

×
4754
        return nil
4755
}
2,669✔
4756

×
4757
func deserializeChanEdgePolicy(r io.Reader) (*models.ChannelEdgePolicy, error) {
×
4758
        // Deserialize the policy. Note that in case an optional field is not
4759
        // found or if the edge has invalid TLV data, then both an error and a
2,669✔
4760
        // populated policy object are returned so that the caller can decide
4761
        // if it still wants to use the edge or not.
4762
        edge, err := deserializeChanEdgePolicyRaw(r)
11,978✔
4763
        if err != nil &&
11,978✔
4764
                !errors.Is(err, ErrEdgePolicyOptionalFieldNotFound) &&
11,978✔
4765
                !errors.Is(err, ErrParsingExtraTLVBytes) {
11,978✔
4766

11,978✔
4767
                return nil, err
11,978✔
4768
        }
11,978✔
4769

11,978✔
4770
        return edge, err
11,978✔
4771
}
×
4772

×
4773
func deserializeChanEdgePolicyRaw(r io.Reader) (*models.ChannelEdgePolicy,
×
4774
        error) {
4775

11,978✔
4776
        edge := &models.ChannelEdgePolicy{}
4777

4778
        var err error
4779
        edge.SigBytes, err = wire.ReadVarBytes(r, 0, 80, "sig")
12,991✔
4780
        if err != nil {
12,991✔
4781
                return nil, err
12,991✔
4782
        }
12,991✔
4783

12,991✔
4784
        if err := binary.Read(r, byteOrder, &edge.ChannelID); err != nil {
12,991✔
4785
                return nil, err
12,991✔
4786
        }
×
4787

×
4788
        var scratch [8]byte
4789
        if _, err := r.Read(scratch[:]); err != nil {
12,991✔
4790
                return nil, err
×
4791
        }
×
4792
        unix := int64(byteOrder.Uint64(scratch[:]))
4793
        edge.LastUpdate = time.Unix(unix, 0)
12,991✔
4794

12,991✔
4795
        if err := binary.Read(r, byteOrder, &edge.MessageFlags); err != nil {
×
4796
                return nil, err
×
4797
        }
12,991✔
4798
        if err := binary.Read(r, byteOrder, &edge.ChannelFlags); err != nil {
12,991✔
4799
                return nil, err
12,991✔
4800
        }
12,991✔
4801
        if err := binary.Read(r, byteOrder, &edge.TimeLockDelta); err != nil {
×
4802
                return nil, err
×
4803
        }
12,991✔
4804

×
4805
        var n uint64
×
4806
        if err := binary.Read(r, byteOrder, &n); err != nil {
12,991✔
4807
                return nil, err
×
4808
        }
×
4809
        edge.MinHTLC = lnwire.MilliSatoshi(n)
4810

12,991✔
4811
        if err := binary.Read(r, byteOrder, &n); err != nil {
12,991✔
4812
                return nil, err
×
4813
        }
×
4814
        edge.FeeBaseMSat = lnwire.MilliSatoshi(n)
12,991✔
4815

12,991✔
4816
        if err := binary.Read(r, byteOrder, &n); err != nil {
12,991✔
4817
                return nil, err
×
4818
        }
×
4819
        edge.FeeProportionalMillionths = lnwire.MilliSatoshi(n)
12,991✔
4820

12,991✔
4821
        if _, err := r.Read(edge.ToNode[:]); err != nil {
12,991✔
4822
                return nil, err
×
4823
        }
×
4824

12,991✔
4825
        // We'll try and see if there are any opaque bytes left, if not, then
12,991✔
4826
        // we'll ignore the EOF error and return the edge as is.
12,991✔
4827
        edge.ExtraOpaqueData, err = wire.ReadVarBytes(
×
4828
                r, 0, MaxAllowedExtraOpaqueBytes, "blob",
×
4829
        )
4830
        switch {
4831
        case errors.Is(err, io.ErrUnexpectedEOF):
4832
        case errors.Is(err, io.EOF):
12,991✔
4833
        case err != nil:
12,991✔
4834
                return nil, err
12,991✔
4835
        }
12,991✔
4836

×
4837
        // See if optional fields are present.
4✔
4838
        if edge.MessageFlags.HasMaxHtlc() {
×
4839
                // The max_htlc field should be at the beginning of the opaque
×
4840
                // bytes.
4841
                opq := edge.ExtraOpaqueData
4842

4843
                // If the max_htlc field is not present, it might be old data
25,013✔
4844
                // stored before this field was validated. We'll return the
12,022✔
4845
                // edge along with an error.
12,022✔
4846
                if len(opq) < 8 {
12,022✔
4847
                        return edge, ErrEdgePolicyOptionalFieldNotFound
12,022✔
4848
                }
12,022✔
4849

12,022✔
4850
                maxHtlc := byteOrder.Uint64(opq[:8])
12,022✔
4851
                edge.MaxHTLC = lnwire.MilliSatoshi(maxHtlc)
12,026✔
4852

4✔
4853
                // Exclude the parsed field from the rest of the opaque data.
4✔
4854
                edge.ExtraOpaqueData = opq[8:]
4855
        }
12,018✔
4856

12,018✔
4857
        // Attempt to extract the inbound fee from the opaque data. If we fail
12,018✔
4858
        // to parse the TLV here, we return an error we also return the edge
12,018✔
4859
        // so that the caller can still use it. This is for backwards
12,018✔
4860
        // compatibility in case we have already persisted some policies that
4861
        // have invalid TLV data.
4862
        var inboundFee lnwire.Fee
4863
        typeMap, err := edge.ExtraOpaqueData.ExtractRecords(&inboundFee)
4864
        if err != nil {
4865
                return edge, fmt.Errorf("%w: %w", ErrParsingExtraTLVBytes, err)
4866
        }
4867

12,987✔
4868
        val, ok := typeMap[lnwire.FeeRecordType]
12,987✔
4869
        if ok && val == nil {
12,987✔
4870
                edge.InboundFee = fn.Some(inboundFee)
×
4871
        }
×
4872

4873
        return edge, nil
12,987✔
4874
}
14,701✔
4875

1,714✔
4876
// chanGraphNodeTx is an implementation of the NodeRTx interface backed by the
1,714✔
4877
// KVStore and a kvdb.RTx.
4878
type chanGraphNodeTx struct {
12,987✔
4879
        tx   kvdb.RTx
4880
        db   *KVStore
4881
        node *models.LightningNode
4882
}
4883

4884
// A compile-time constraint to ensure chanGraphNodeTx implements the NodeRTx
4885
// interface.
4886
var _ NodeRTx = (*chanGraphNodeTx)(nil)
4887

4888
func newChanGraphNodeTx(tx kvdb.RTx, db *KVStore,
4889
        node *models.LightningNode) *chanGraphNodeTx {
4890

4891
        return &chanGraphNodeTx{
4892
                tx:   tx,
4893
                db:   db,
4894
                node: node,
4895
        }
4896
}
4897

4898
// Node returns the raw information of the node.
4899
//
4900
// NOTE: This is a part of the NodeRTx interface.
4901
func (c *chanGraphNodeTx) Node() *models.LightningNode {
4902
        return c.node
4903
}
4904

4905
// FetchNode fetches the node with the given pub key under the same transaction
4906
// used to fetch the current node. The returned node is also a NodeRTx and any
4907
// operations on that NodeRTx will also be done under the same transaction.
4908
//
4909
// NOTE: This is a part of the NodeRTx interface.
4910
func (c *chanGraphNodeTx) FetchNode(nodePub route.Vertex) (NodeRTx, error) {
4911
        node, err := c.db.FetchLightningNodeTx(c.tx, nodePub)
4912
        if err != nil {
4913
                return nil, err
4914
        }
4915

4916
        return newChanGraphNodeTx(c.tx, c.db, node), nil
4917
}
4918

4919
// ForEachChannel can be used to iterate over the node's channels under
4920
// the same transaction used to fetch the node.
4921
//
4922
// NOTE: This is a part of the NodeRTx interface.
4923
func (c *chanGraphNodeTx) ForEachChannel(f func(*models.ChannelEdgeInfo,
4924
        *models.ChannelEdgePolicy, *models.ChannelEdgePolicy) error) error {
4925

4926
        return c.db.forEachNodeChannelTx(
4927
                c.tx, c.node.PubKeyBytes,
4928
                func(_ kvdb.RTx, info *models.ChannelEdgeInfo, policy1,
4929
                        policy2 *models.ChannelEdgePolicy) error {
4930

4931
                        return f(info, policy1, policy2)
4932
                },
4933
                // NOTE: We don't need to reset anything here as the caller is
4934
                // expected to pass in the reset function to the ForEachNode
4935
                // method that constructed the chanGraphNodeTx.
4936
                func() {},
4937
        )
4938
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc