• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 16217691934

11 Jul 2025 10:19AM UTC coverage: 67.331% (+0.006%) from 67.325%
16217691934

Pull #10068

github

web-flow
Merge d6378e50e into 6b326152d
Pull Request #10068: multi: let all V1Store `ForEach*` methods take a `reset` call-back

137 of 188 new or added lines in 18 files covered. (72.87%)

66 existing lines in 18 files now uncovered.

135368 of 201048 relevant lines covered (67.33%)

21713.13 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

77.97
/graph/db/kv_store.go
1
package graphdb
2

3
import (
4
        "bytes"
5
        "context"
6
        "crypto/sha256"
7
        "encoding/binary"
8
        "errors"
9
        "fmt"
10
        "io"
11
        "math"
12
        "net"
13
        "sort"
14
        "sync"
15
        "time"
16

17
        "github.com/btcsuite/btcd/btcec/v2"
18
        "github.com/btcsuite/btcd/chaincfg/chainhash"
19
        "github.com/btcsuite/btcd/txscript"
20
        "github.com/btcsuite/btcd/wire"
21
        "github.com/btcsuite/btcwallet/walletdb"
22
        "github.com/lightningnetwork/lnd/aliasmgr"
23
        "github.com/lightningnetwork/lnd/batch"
24
        "github.com/lightningnetwork/lnd/fn/v2"
25
        "github.com/lightningnetwork/lnd/graph/db/models"
26
        "github.com/lightningnetwork/lnd/input"
27
        "github.com/lightningnetwork/lnd/kvdb"
28
        "github.com/lightningnetwork/lnd/lnwire"
29
        "github.com/lightningnetwork/lnd/routing/route"
30
)
31

32
var (
33
        // nodeBucket is a bucket which houses all the vertices or nodes within
34
        // the channel graph. This bucket has a single-sub bucket which adds an
35
        // additional index from pubkey -> alias. Within the top-level of this
36
        // bucket, the key space maps a node's compressed public key to the
37
        // serialized information for that node. Additionally, there's a
38
        // special key "source" which stores the pubkey of the source node. The
39
        // source node is used as the starting point for all graph/queries and
40
        // traversals. The graph is formed as a star-graph with the source node
41
        // at the center.
42
        //
43
        // maps: pubKey -> nodeInfo
44
        // maps: source -> selfPubKey
45
        nodeBucket = []byte("graph-node")
46

47
        // nodeUpdateIndexBucket is a sub-bucket of the nodeBucket. This bucket
48
        // will be used to quickly look up the "freshness" of a node's last
49
        // update to the network. The bucket only contains keys, and no values,
50
        // it's mapping:
51
        //
52
        // maps: updateTime || nodeID -> nil
53
        nodeUpdateIndexBucket = []byte("graph-node-update-index")
54

55
        // sourceKey is a special key that resides within the nodeBucket. The
56
        // sourceKey maps a key to the public key of the "self node".
57
        sourceKey = []byte("source")
58

59
        // aliasIndexBucket is a sub-bucket that's nested within the main
60
        // nodeBucket. This bucket maps the public key of a node to its
61
        // current alias. This bucket is provided as it can be used within a
62
        // future UI layer to add an additional degree of confirmation.
63
        aliasIndexBucket = []byte("alias")
64

65
        // edgeBucket is a bucket which houses all of the edge or channel
66
        // information within the channel graph. This bucket essentially acts
67
        // as an adjacency list, which in conjunction with a range scan, can be
68
        // used to iterate over all the incoming and outgoing edges for a
69
        // particular node. Key in the bucket use a prefix scheme which leads
70
        // with the node's public key and sends with the compact edge ID.
71
        // For each chanID, there will be two entries within the bucket, as the
72
        // graph is directed: nodes may have different policies w.r.t to fees
73
        // for their respective directions.
74
        //
75
        // maps: pubKey || chanID -> channel edge policy for node
76
        edgeBucket = []byte("graph-edge")
77

78
        // unknownPolicy is represented as an empty slice. It is
79
        // used as the value in edgeBucket for unknown channel edge policies.
80
        // Unknown policies are still stored in the database to enable efficient
81
        // lookup of incoming channel edges.
82
        unknownPolicy = []byte{}
83

84
        // chanStart is an array of all zero bytes which is used to perform
85
        // range scans within the edgeBucket to obtain all of the outgoing
86
        // edges for a particular node.
87
        chanStart [8]byte
88

89
        // edgeIndexBucket is an index which can be used to iterate all edges
90
        // in the bucket, grouping them according to their in/out nodes.
91
        // Additionally, the items in this bucket also contain the complete
92
        // edge information for a channel. The edge information includes the
93
        // capacity of the channel, the nodes that made the channel, etc. This
94
        // bucket resides within the edgeBucket above. Creation of an edge
95
        // proceeds in two phases: first the edge is added to the edge index,
96
        // afterwards the edgeBucket can be updated with the latest details of
97
        // the edge as they are announced on the network.
98
        //
99
        // maps: chanID -> pubKey1 || pubKey2 || restofEdgeInfo
100
        edgeIndexBucket = []byte("edge-index")
101

102
        // edgeUpdateIndexBucket is a sub-bucket of the main edgeBucket. This
103
        // bucket contains an index which allows us to gauge the "freshness" of
104
        // a channel's last updates.
105
        //
106
        // maps: updateTime || chanID -> nil
107
        edgeUpdateIndexBucket = []byte("edge-update-index")
108

109
        // channelPointBucket maps a channel's full outpoint (txid:index) to
110
        // its short 8-byte channel ID. This bucket resides within the
111
        // edgeBucket above, and can be used to quickly remove an edge due to
112
        // the outpoint being spent, or to query for existence of a channel.
113
        //
114
        // maps: outPoint -> chanID
115
        channelPointBucket = []byte("chan-index")
116

117
        // zombieBucket is a sub-bucket of the main edgeBucket bucket
118
        // responsible for maintaining an index of zombie channels. Each entry
119
        // exists within the bucket as follows:
120
        //
121
        // maps: chanID -> pubKey1 || pubKey2
122
        //
123
        // The chanID represents the channel ID of the edge that is marked as a
124
        // zombie and is used as the key, which maps to the public keys of the
125
        // edge's participants.
126
        zombieBucket = []byte("zombie-index")
127

128
        // disabledEdgePolicyBucket is a sub-bucket of the main edgeBucket
129
        // bucket responsible for maintaining an index of disabled edge
130
        // policies. Each entry exists within the bucket as follows:
131
        //
132
        // maps: <chanID><direction> -> []byte{}
133
        //
134
        // The chanID represents the channel ID of the edge and the direction is
135
        // one byte representing the direction of the edge. The main purpose of
136
        // this index is to allow pruning disabled channels in a fast way
137
        // without the need to iterate all over the graph.
138
        disabledEdgePolicyBucket = []byte("disabled-edge-policy-index")
139

140
        // graphMetaBucket is a top-level bucket which stores various meta-deta
141
        // related to the on-disk channel graph. Data stored in this bucket
142
        // includes the block to which the graph has been synced to, the total
143
        // number of channels, etc.
144
        graphMetaBucket = []byte("graph-meta")
145

146
        // pruneLogBucket is a bucket within the graphMetaBucket that stores
147
        // a mapping from the block height to the hash for the blocks used to
148
        // prune the graph.
149
        // Once a new block is discovered, any channels that have been closed
150
        // (by spending the outpoint) can safely be removed from the graph, and
151
        // the block is added to the prune log. We need to keep such a log for
152
        // the case where a reorg happens, and we must "rewind" the state of the
153
        // graph by removing channels that were previously confirmed. In such a
154
        // case we'll remove all entries from the prune log with a block height
155
        // that no longer exists.
156
        pruneLogBucket = []byte("prune-log")
157

158
        // closedScidBucket is a top-level bucket that stores scids for
159
        // channels that we know to be closed. This is used so that we don't
160
        // need to perform expensive validation checks if we receive a channel
161
        // announcement for the channel again.
162
        //
163
        // maps: scid -> []byte{}
164
        closedScidBucket = []byte("closed-scid")
165
)
166

167
const (
168
        // MaxAllowedExtraOpaqueBytes is the largest amount of opaque bytes that
169
        // we'll permit to be written to disk. We limit this as otherwise, it
170
        // would be possible for a node to create a ton of updates and slowly
171
        // fill our disk, and also waste bandwidth due to relaying.
172
        MaxAllowedExtraOpaqueBytes = 10000
173
)
174

175
// KVStore is a persistent, on-disk graph representation of the Lightning
176
// Network. This struct can be used to implement path finding algorithms on top
177
// of, and also to update a node's view based on information received from the
178
// p2p network. Internally, the graph is stored using a modified adjacency list
179
// representation with some added object interaction possible with each
180
// serialized edge/node. The graph is stored is directed, meaning that are two
181
// edges stored for each channel: an inbound/outbound edge for each node pair.
182
// Nodes, edges, and edge information can all be added to the graph
183
// independently. Edge removal results in the deletion of all edge information
184
// for that edge.
185
type KVStore struct {
186
        db kvdb.Backend
187

188
        // cacheMu guards all caches (rejectCache and chanCache). If
189
        // this mutex will be acquired at the same time as the DB mutex then
190
        // the cacheMu MUST be acquired first to prevent deadlock.
191
        cacheMu     sync.RWMutex
192
        rejectCache *rejectCache
193
        chanCache   *channelCache
194

195
        chanScheduler batch.Scheduler[kvdb.RwTx]
196
        nodeScheduler batch.Scheduler[kvdb.RwTx]
197
}
198

199
// A compile-time assertion to ensure that the KVStore struct implements the
200
// V1Store interface.
201
var _ V1Store = (*KVStore)(nil)
202

203
// NewKVStore allocates a new KVStore backed by a DB instance. The
204
// returned instance has its own unique reject cache and channel cache.
205
func NewKVStore(db kvdb.Backend, options ...StoreOptionModifier) (*KVStore,
206
        error) {
173✔
207

173✔
208
        opts := DefaultOptions()
173✔
209
        for _, o := range options {
176✔
210
                o(opts)
3✔
211
        }
3✔
212

213
        if !opts.NoMigration {
346✔
214
                if err := initKVStore(db); err != nil {
173✔
215
                        return nil, err
×
216
                }
×
217
        }
218

219
        g := &KVStore{
173✔
220
                db:          db,
173✔
221
                rejectCache: newRejectCache(opts.RejectCacheSize),
173✔
222
                chanCache:   newChannelCache(opts.ChannelCacheSize),
173✔
223
        }
173✔
224
        g.chanScheduler = batch.NewTimeScheduler(
173✔
225
                batch.NewBoltBackend[kvdb.RwTx](db), &g.cacheMu,
173✔
226
                opts.BatchCommitInterval,
173✔
227
        )
173✔
228
        g.nodeScheduler = batch.NewTimeScheduler(
173✔
229
                batch.NewBoltBackend[kvdb.RwTx](db), nil,
173✔
230
                opts.BatchCommitInterval,
173✔
231
        )
173✔
232

173✔
233
        return g, nil
173✔
234
}
235

236
// channelMapKey is the key structure used for storing channel edge policies.
237
type channelMapKey struct {
238
        nodeKey route.Vertex
239
        chanID  [8]byte
240
}
241

242
// String returns a human-readable representation of the key.
243
func (c channelMapKey) String() string {
×
244
        return fmt.Sprintf("node=%v, chanID=%x", c.nodeKey, c.chanID)
×
245
}
×
246

247
// getChannelMap loads all channel edge policies from the database and stores
248
// them in a map.
249
func getChannelMap(edges kvdb.RBucket) (
250
        map[channelMapKey]*models.ChannelEdgePolicy, error) {
148✔
251

148✔
252
        // Create a map to store all channel edge policies.
148✔
253
        channelMap := make(map[channelMapKey]*models.ChannelEdgePolicy)
148✔
254

148✔
255
        err := kvdb.ForAll(edges, func(k, edgeBytes []byte) error {
1,735✔
256
                // Skip embedded buckets.
1,587✔
257
                if bytes.Equal(k, edgeIndexBucket) ||
1,587✔
258
                        bytes.Equal(k, edgeUpdateIndexBucket) ||
1,587✔
259
                        bytes.Equal(k, zombieBucket) ||
1,587✔
260
                        bytes.Equal(k, disabledEdgePolicyBucket) ||
1,587✔
261
                        bytes.Equal(k, channelPointBucket) {
2,178✔
262

591✔
263
                        return nil
591✔
264
                }
591✔
265

266
                // Validate key length.
267
                if len(k) != 33+8 {
999✔
268
                        return fmt.Errorf("invalid edge key %x encountered", k)
×
269
                }
×
270

271
                var key channelMapKey
999✔
272
                copy(key.nodeKey[:], k[:33])
999✔
273
                copy(key.chanID[:], k[33:])
999✔
274

999✔
275
                // No need to deserialize unknown policy.
999✔
276
                if bytes.Equal(edgeBytes, unknownPolicy) {
999✔
277
                        return nil
×
278
                }
×
279

280
                edgeReader := bytes.NewReader(edgeBytes)
999✔
281
                edge, err := deserializeChanEdgePolicyRaw(
999✔
282
                        edgeReader,
999✔
283
                )
999✔
284

999✔
285
                switch {
999✔
286
                // If the db policy was missing an expected optional field, we
287
                // return nil as if the policy was unknown.
288
                case errors.Is(err, ErrEdgePolicyOptionalFieldNotFound):
×
289
                        return nil
×
290

291
                // We don't want a single policy with bad TLV data to stop us
292
                // from loading the rest of the data, so we just skip this
293
                // policy. This is for backwards compatibility since we did not
294
                // use to validate TLV data in the past before persisting it.
295
                case errors.Is(err, ErrParsingExtraTLVBytes):
×
296
                        return nil
×
297

298
                case err != nil:
×
299
                        return err
×
300
                }
301

302
                channelMap[key] = edge
999✔
303

999✔
304
                return nil
999✔
305
        })
306
        if err != nil {
148✔
307
                return nil, err
×
308
        }
×
309

310
        return channelMap, nil
148✔
311
}
312

313
var graphTopLevelBuckets = [][]byte{
314
        nodeBucket,
315
        edgeBucket,
316
        graphMetaBucket,
317
        closedScidBucket,
318
}
319

320
// createChannelDB creates and initializes a fresh version of  In
321
// the case that the target path has not yet been created or doesn't yet exist,
322
// then the path is created. Additionally, all required top-level buckets used
323
// within the database are created.
324
func initKVStore(db kvdb.Backend) error {
173✔
325
        err := kvdb.Update(db, func(tx kvdb.RwTx) error {
346✔
326
                for _, tlb := range graphTopLevelBuckets {
856✔
327
                        if _, err := tx.CreateTopLevelBucket(tlb); err != nil {
683✔
328
                                return err
×
329
                        }
×
330
                }
331

332
                nodes := tx.ReadWriteBucket(nodeBucket)
173✔
333
                _, err := nodes.CreateBucketIfNotExists(aliasIndexBucket)
173✔
334
                if err != nil {
173✔
335
                        return err
×
336
                }
×
337
                _, err = nodes.CreateBucketIfNotExists(nodeUpdateIndexBucket)
173✔
338
                if err != nil {
173✔
339
                        return err
×
340
                }
×
341

342
                edges := tx.ReadWriteBucket(edgeBucket)
173✔
343
                _, err = edges.CreateBucketIfNotExists(edgeIndexBucket)
173✔
344
                if err != nil {
173✔
345
                        return err
×
346
                }
×
347
                _, err = edges.CreateBucketIfNotExists(edgeUpdateIndexBucket)
173✔
348
                if err != nil {
173✔
349
                        return err
×
350
                }
×
351
                _, err = edges.CreateBucketIfNotExists(channelPointBucket)
173✔
352
                if err != nil {
173✔
353
                        return err
×
354
                }
×
355
                _, err = edges.CreateBucketIfNotExists(zombieBucket)
173✔
356
                if err != nil {
173✔
357
                        return err
×
358
                }
×
359

360
                graphMeta := tx.ReadWriteBucket(graphMetaBucket)
173✔
361
                _, err = graphMeta.CreateBucketIfNotExists(pruneLogBucket)
173✔
362

173✔
363
                return err
173✔
364
        }, func() {})
173✔
365
        if err != nil {
173✔
366
                return fmt.Errorf("unable to create new channel graph: %w", err)
×
367
        }
×
368

369
        return nil
173✔
370
}
371

372
// AddrsForNode returns all known addresses for the target node public key that
373
// the graph DB is aware of. The returned boolean indicates if the given node is
374
// unknown to the graph DB or not.
375
//
376
// NOTE: this is part of the channeldb.AddrSource interface.
377
func (c *KVStore) AddrsForNode(ctx context.Context,
378
        nodePub *btcec.PublicKey) (bool, []net.Addr, error) {
6✔
379

6✔
380
        pubKey, err := route.NewVertexFromBytes(nodePub.SerializeCompressed())
6✔
381
        if err != nil {
6✔
382
                return false, nil, err
×
383
        }
×
384

385
        node, err := c.FetchLightningNode(ctx, pubKey)
6✔
386
        // We don't consider it an error if the graph is unaware of the node.
6✔
387
        switch {
6✔
388
        case err != nil && !errors.Is(err, ErrGraphNodeNotFound):
×
389
                return false, nil, err
×
390

391
        case errors.Is(err, ErrGraphNodeNotFound):
4✔
392
                return false, nil, nil
4✔
393
        }
394

395
        return true, node.Addresses, nil
5✔
396
}
397

398
// ForEachChannel iterates through all the channel edges stored within the
399
// graph and invokes the passed callback for each edge. The callback takes two
400
// edges as since this is a directed graph, both the in/out edges are visited.
401
// If the callback returns an error, then the transaction is aborted and the
402
// iteration stops early.
403
//
404
// NOTE: If an edge can't be found, or wasn't advertised, then a nil pointer
405
// for that particular channel edge routing policy will be passed into the
406
// callback.
407
func (c *KVStore) ForEachChannel(_ context.Context,
408
        cb func(*models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
409
                *models.ChannelEdgePolicy) error, reset func()) error {
10✔
410

10✔
411
        return forEachChannel(c.db, cb, reset)
10✔
412
}
10✔
413

414
// forEachChannel iterates through all the channel edges stored within the
415
// graph and invokes the passed callback for each edge. The callback takes two
416
// edges as since this is a directed graph, both the in/out edges are visited.
417
// If the callback returns an error, then the transaction is aborted and the
418
// iteration stops early.
419
//
420
// NOTE: If an edge can't be found, or wasn't advertised, then a nil pointer
421
// for that particular channel edge routing policy will be passed into the
422
// callback.
423
func forEachChannel(db kvdb.Backend, cb func(*models.ChannelEdgeInfo,
424
        *models.ChannelEdgePolicy, *models.ChannelEdgePolicy) error,
425
        reset func()) error {
10✔
426

10✔
427
        return db.View(func(tx kvdb.RTx) error {
20✔
428
                edges := tx.ReadBucket(edgeBucket)
10✔
429
                if edges == nil {
10✔
430
                        return ErrGraphNoEdgesFound
×
431
                }
×
432

433
                // First, load all edges in memory indexed by node and channel
434
                // id.
435
                channelMap, err := getChannelMap(edges)
10✔
436
                if err != nil {
10✔
437
                        return err
×
438
                }
×
439

440
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
10✔
441
                if edgeIndex == nil {
10✔
442
                        return ErrGraphNoEdgesFound
×
443
                }
×
444

445
                // Load edge index, recombine each channel with the policies
446
                // loaded above and invoke the callback.
447
                return kvdb.ForAll(
10✔
448
                        edgeIndex, func(k, edgeInfoBytes []byte) error {
115✔
449
                                var chanID [8]byte
105✔
450
                                copy(chanID[:], k)
105✔
451

105✔
452
                                edgeInfoReader := bytes.NewReader(edgeInfoBytes)
105✔
453
                                info, err := deserializeChanEdgeInfo(
105✔
454
                                        edgeInfoReader,
105✔
455
                                )
105✔
456
                                if err != nil {
105✔
457
                                        return err
×
458
                                }
×
459

460
                                policy1 := channelMap[channelMapKey{
105✔
461
                                        nodeKey: info.NodeKey1Bytes,
105✔
462
                                        chanID:  chanID,
105✔
463
                                }]
105✔
464

105✔
465
                                policy2 := channelMap[channelMapKey{
105✔
466
                                        nodeKey: info.NodeKey2Bytes,
105✔
467
                                        chanID:  chanID,
105✔
468
                                }]
105✔
469

105✔
470
                                return cb(&info, policy1, policy2)
105✔
471
                        },
472
                )
473
        }, reset)
474
}
475

476
// ForEachChannelCacheable iterates through all the channel edges stored within
477
// the graph and invokes the passed callback for each edge. The callback takes
478
// two edges as since this is a directed graph, both the in/out edges are
479
// visited. If the callback returns an error, then the transaction is aborted
480
// and the iteration stops early.
481
//
482
// NOTE: If an edge can't be found, or wasn't advertised, then a nil pointer
483
// for that particular channel edge routing policy will be passed into the
484
// callback.
485
//
486
// NOTE: this method is like ForEachChannel but fetches only the data required
487
// for the graph cache.
488
func (c *KVStore) ForEachChannelCacheable(cb func(*models.CachedEdgeInfo,
489
        *models.CachedEdgePolicy, *models.CachedEdgePolicy) error,
490
        reset func()) error {
141✔
491

141✔
492
        return c.db.View(func(tx kvdb.RTx) error {
282✔
493
                edges := tx.ReadBucket(edgeBucket)
141✔
494
                if edges == nil {
141✔
495
                        return ErrGraphNoEdgesFound
×
496
                }
×
497

498
                // First, load all edges in memory indexed by node and channel
499
                // id.
500
                channelMap, err := getChannelMap(edges)
141✔
501
                if err != nil {
141✔
502
                        return err
×
503
                }
×
504

505
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
141✔
506
                if edgeIndex == nil {
141✔
507
                        return ErrGraphNoEdgesFound
×
508
                }
×
509

510
                // Load edge index, recombine each channel with the policies
511
                // loaded above and invoke the callback.
512
                return kvdb.ForAll(
141✔
513
                        edgeIndex, func(k, edgeInfoBytes []byte) error {
540✔
514
                                var chanID [8]byte
399✔
515
                                copy(chanID[:], k)
399✔
516

399✔
517
                                edgeInfoReader := bytes.NewReader(edgeInfoBytes)
399✔
518
                                info, err := deserializeChanEdgeInfo(
399✔
519
                                        edgeInfoReader,
399✔
520
                                )
399✔
521
                                if err != nil {
399✔
522
                                        return err
×
523
                                }
×
524

525
                                key1 := channelMapKey{
399✔
526
                                        nodeKey: info.NodeKey1Bytes,
399✔
527
                                        chanID:  chanID,
399✔
528
                                }
399✔
529
                                policy1 := channelMap[key1]
399✔
530

399✔
531
                                key2 := channelMapKey{
399✔
532
                                        nodeKey: info.NodeKey2Bytes,
399✔
533
                                        chanID:  chanID,
399✔
534
                                }
399✔
535
                                policy2 := channelMap[key2]
399✔
536

399✔
537
                                // We now create the cached edge policies, but
399✔
538
                                // only when the above policies are found in the
399✔
539
                                // `channelMap`.
399✔
540
                                var (
399✔
541
                                        cachedPolicy1 *models.CachedEdgePolicy
399✔
542
                                        cachedPolicy2 *models.CachedEdgePolicy
399✔
543
                                )
399✔
544

399✔
545
                                if policy1 != nil {
798✔
546
                                        cachedPolicy1 = models.NewCachedPolicy(
399✔
547
                                                policy1,
399✔
548
                                        )
399✔
549
                                }
399✔
550

551
                                if policy2 != nil {
798✔
552
                                        cachedPolicy2 = models.NewCachedPolicy(
399✔
553
                                                policy2,
399✔
554
                                        )
399✔
555
                                }
399✔
556

557
                                return cb(
399✔
558
                                        models.NewCachedEdge(&info),
399✔
559
                                        cachedPolicy1, cachedPolicy2,
399✔
560
                                )
399✔
561
                        },
562
                )
563
        }, reset)
564
}
565

566
// forEachNodeDirectedChannel iterates through all channels of a given node,
567
// executing the passed callback on the directed edge representing the channel
568
// and its incoming policy. If the callback returns an error, then the iteration
569
// is halted with the error propagated back up to the caller. An optional read
570
// transaction may be provided. If none is provided, a new one will be created.
571
//
572
// Unknown policies are passed into the callback as nil values.
573
//
574
// NOTE: the reset param is only meaningful if the tx param is nil.
575
func (c *KVStore) forEachNodeDirectedChannel(tx kvdb.RTx,
576
        node route.Vertex, cb func(channel *DirectedChannel) error,
577
        reset func()) error {
265✔
578

265✔
579
        // Fallback that uses the database.
265✔
580
        toNodeCallback := func() route.Vertex {
400✔
581
                return node
135✔
582
        }
135✔
583
        toNodeFeatures, err := c.fetchNodeFeatures(tx, node)
265✔
584
        if err != nil {
265✔
585
                return err
×
586
        }
×
587

588
        dbCallback := func(tx kvdb.RTx, e *models.ChannelEdgeInfo, p1,
265✔
589
                p2 *models.ChannelEdgePolicy) error {
954✔
590

689✔
591
                var cachedInPolicy *models.CachedEdgePolicy
689✔
592
                if p2 != nil {
1,375✔
593
                        cachedInPolicy = models.NewCachedPolicy(p2)
686✔
594
                        cachedInPolicy.ToNodePubKey = toNodeCallback
686✔
595
                        cachedInPolicy.ToNodeFeatures = toNodeFeatures
686✔
596
                }
686✔
597

598
                directedChannel := &DirectedChannel{
689✔
599
                        ChannelID:    e.ChannelID,
689✔
600
                        IsNode1:      node == e.NodeKey1Bytes,
689✔
601
                        OtherNode:    e.NodeKey2Bytes,
689✔
602
                        Capacity:     e.Capacity,
689✔
603
                        OutPolicySet: p1 != nil,
689✔
604
                        InPolicy:     cachedInPolicy,
689✔
605
                }
689✔
606

689✔
607
                if p1 != nil {
1,377✔
608
                        p1.InboundFee.WhenSome(func(fee lnwire.Fee) {
1,024✔
609
                                directedChannel.InboundFee = fee
336✔
610
                        })
336✔
611
                }
612

613
                if node == e.NodeKey2Bytes {
1,035✔
614
                        directedChannel.OtherNode = e.NodeKey1Bytes
346✔
615
                }
346✔
616

617
                return cb(directedChannel)
689✔
618
        }
619

620
        return nodeTraversal(tx, node[:], c.db, dbCallback, reset)
265✔
621
}
622

623
// fetchNodeFeatures returns the features of a given node. If no features are
624
// known for the node, an empty feature vector is returned. An optional read
625
// transaction may be provided. If none is provided, a new one will be created.
626
func (c *KVStore) fetchNodeFeatures(tx kvdb.RTx,
627
        node route.Vertex) (*lnwire.FeatureVector, error) {
710✔
628

710✔
629
        // Fallback that uses the database.
710✔
630
        targetNode, err := c.FetchLightningNodeTx(tx, node)
710✔
631
        switch {
710✔
632
        // If the node exists and has features, return them directly.
633
        case err == nil:
699✔
634
                return targetNode.Features, nil
699✔
635

636
        // If we couldn't find a node announcement, populate a blank feature
637
        // vector.
638
        case errors.Is(err, ErrGraphNodeNotFound):
11✔
639
                return lnwire.EmptyFeatureVector(), nil
11✔
640

641
        // Otherwise, bubble the error up.
642
        default:
×
643
                return nil, err
×
644
        }
645
}
646

647
// ForEachNodeDirectedChannel iterates through all channels of a given node,
648
// executing the passed callback on the directed edge representing the channel
649
// and its incoming policy. If the callback returns an error, then the iteration
650
// is halted with the error propagated back up to the caller.
651
//
652
// Unknown policies are passed into the callback as nil values.
653
//
654
// NOTE: this is part of the graphdb.NodeTraverser interface.
655
func (c *KVStore) ForEachNodeDirectedChannel(nodePub route.Vertex,
656
        cb func(channel *DirectedChannel) error, reset func()) error {
26✔
657

26✔
658
        return c.forEachNodeDirectedChannel(nil, nodePub, cb, reset)
26✔
659
}
26✔
660

661
// FetchNodeFeatures returns the features of the given node. If no features are
662
// known for the node, an empty feature vector is returned.
663
//
664
// NOTE: this is part of the graphdb.NodeTraverser interface.
665
func (c *KVStore) FetchNodeFeatures(nodePub route.Vertex) (
666
        *lnwire.FeatureVector, error) {
4✔
667

4✔
668
        return c.fetchNodeFeatures(nil, nodePub)
4✔
669
}
4✔
670

671
// ForEachNodeCached is similar to forEachNode, but it returns DirectedChannel
672
// data to the call-back.
673
//
674
// NOTE: The callback contents MUST not be modified.
675
func (c *KVStore) ForEachNodeCached(_ context.Context,
676
        cb func(node route.Vertex, chans map[uint64]*DirectedChannel) error,
677
        reset func()) error {
1✔
678

1✔
679
        // Otherwise call back to a version that uses the database directly.
1✔
680
        // We'll iterate over each node, then the set of channels for each
1✔
681
        // node, and construct a similar callback functiopn signature as the
1✔
682
        // main funcotin expects.
1✔
683
        return forEachNode(c.db, func(tx kvdb.RTx,
1✔
684
                node *models.LightningNode) error {
21✔
685

20✔
686
                channels := make(map[uint64]*DirectedChannel)
20✔
687

20✔
688
                err := c.forEachNodeChannelTx(tx, node.PubKeyBytes,
20✔
689
                        func(tx kvdb.RTx, e *models.ChannelEdgeInfo,
20✔
690
                                p1 *models.ChannelEdgePolicy,
20✔
691
                                p2 *models.ChannelEdgePolicy) error {
210✔
692

190✔
693
                                toNodeCallback := func() route.Vertex {
190✔
694
                                        return node.PubKeyBytes
×
695
                                }
×
696
                                toNodeFeatures, err := c.fetchNodeFeatures(
190✔
697
                                        tx, node.PubKeyBytes,
190✔
698
                                )
190✔
699
                                if err != nil {
190✔
700
                                        return err
×
701
                                }
×
702

703
                                var cachedInPolicy *models.CachedEdgePolicy
190✔
704
                                if p2 != nil {
380✔
705
                                        cachedInPolicy =
190✔
706
                                                models.NewCachedPolicy(p2)
190✔
707
                                        cachedInPolicy.ToNodePubKey =
190✔
708
                                                toNodeCallback
190✔
709
                                        cachedInPolicy.ToNodeFeatures =
190✔
710
                                                toNodeFeatures
190✔
711
                                }
190✔
712

713
                                directedChannel := &DirectedChannel{
190✔
714
                                        ChannelID: e.ChannelID,
190✔
715
                                        IsNode1: node.PubKeyBytes ==
190✔
716
                                                e.NodeKey1Bytes,
190✔
717
                                        OtherNode:    e.NodeKey2Bytes,
190✔
718
                                        Capacity:     e.Capacity,
190✔
719
                                        OutPolicySet: p1 != nil,
190✔
720
                                        InPolicy:     cachedInPolicy,
190✔
721
                                }
190✔
722

190✔
723
                                if node.PubKeyBytes == e.NodeKey2Bytes {
285✔
724
                                        directedChannel.OtherNode =
95✔
725
                                                e.NodeKey1Bytes
95✔
726
                                }
95✔
727

728
                                channels[e.ChannelID] = directedChannel
190✔
729

190✔
730
                                return nil
190✔
731
                        }, reset,
732
                )
733
                if err != nil {
20✔
734
                        return err
×
735
                }
×
736

737
                return cb(node.PubKeyBytes, channels)
20✔
738
        }, reset)
739
}
740

741
// DisabledChannelIDs returns the channel ids of disabled channels.
742
// A channel is disabled when two of the associated ChanelEdgePolicies
743
// have their disabled bit on.
744
func (c *KVStore) DisabledChannelIDs() ([]uint64, error) {
6✔
745
        var disabledChanIDs []uint64
6✔
746
        var chanEdgeFound map[uint64]struct{}
6✔
747

6✔
748
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
12✔
749
                edges := tx.ReadBucket(edgeBucket)
6✔
750
                if edges == nil {
6✔
751
                        return ErrGraphNoEdgesFound
×
752
                }
×
753

754
                disabledEdgePolicyIndex := edges.NestedReadBucket(
6✔
755
                        disabledEdgePolicyBucket,
6✔
756
                )
6✔
757
                if disabledEdgePolicyIndex == nil {
7✔
758
                        return nil
1✔
759
                }
1✔
760

761
                // We iterate over all disabled policies and we add each channel
762
                // that has more than one disabled policy to disabledChanIDs
763
                // array.
764
                return disabledEdgePolicyIndex.ForEach(
5✔
765
                        func(k, v []byte) error {
16✔
766
                                chanID := byteOrder.Uint64(k[:8])
11✔
767
                                _, edgeFound := chanEdgeFound[chanID]
11✔
768
                                if edgeFound {
15✔
769
                                        delete(chanEdgeFound, chanID)
4✔
770
                                        disabledChanIDs = append(
4✔
771
                                                disabledChanIDs, chanID,
4✔
772
                                        )
4✔
773

4✔
774
                                        return nil
4✔
775
                                }
4✔
776

777
                                chanEdgeFound[chanID] = struct{}{}
7✔
778

7✔
779
                                return nil
7✔
780
                        },
781
                )
782
        }, func() {
6✔
783
                disabledChanIDs = nil
6✔
784
                chanEdgeFound = make(map[uint64]struct{})
6✔
785
        })
6✔
786
        if err != nil {
6✔
787
                return nil, err
×
788
        }
×
789

790
        return disabledChanIDs, nil
6✔
791
}
792

793
// ForEachNode iterates through all the stored vertices/nodes in the graph,
794
// executing the passed callback with each node encountered. If the callback
795
// returns an error, then the transaction is aborted and the iteration stops
796
// early. Any operations performed on the NodeTx passed to the call-back are
797
// executed under the same read transaction and so, methods on the NodeTx object
798
// _MUST_ only be called from within the call-back.
799
func (c *KVStore) ForEachNode(_ context.Context,
800
        cb func(tx NodeRTx) error, reset func()) error {
131✔
801

131✔
802
        return forEachNode(c.db, func(tx kvdb.RTx,
131✔
803
                node *models.LightningNode) error {
1,292✔
804

1,161✔
805
                return cb(newChanGraphNodeTx(tx, c, node))
1,161✔
806
        }, reset)
1,161✔
807
}
808

809
// forEachNode iterates through all the stored vertices/nodes in the graph,
810
// executing the passed callback with each node encountered. If the callback
811
// returns an error, then the transaction is aborted and the iteration stops
812
// early.
813
//
814
// TODO(roasbeef): add iterator interface to allow for memory efficient graph
815
// traversal when graph gets mega.
816
func forEachNode(db kvdb.Backend,
817
        cb func(kvdb.RTx, *models.LightningNode) error, reset func()) error {
132✔
818

132✔
819
        traversal := func(tx kvdb.RTx) error {
264✔
820
                // First grab the nodes bucket which stores the mapping from
132✔
821
                // pubKey to node information.
132✔
822
                nodes := tx.ReadBucket(nodeBucket)
132✔
823
                if nodes == nil {
132✔
824
                        return ErrGraphNotFound
×
825
                }
×
826

827
                return nodes.ForEach(func(pubKey, nodeBytes []byte) error {
1,574✔
828
                        // If this is the source key, then we skip this
1,442✔
829
                        // iteration as the value for this key is a pubKey
1,442✔
830
                        // rather than raw node information.
1,442✔
831
                        if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 {
1,706✔
832
                                return nil
264✔
833
                        }
264✔
834

835
                        nodeReader := bytes.NewReader(nodeBytes)
1,181✔
836
                        node, err := deserializeLightningNode(nodeReader)
1,181✔
837
                        if err != nil {
1,181✔
838
                                return err
×
839
                        }
×
840

841
                        // Execute the callback, the transaction will abort if
842
                        // this returns an error.
843
                        return cb(tx, &node)
1,181✔
844
                })
845
        }
846

847
        return kvdb.View(db, traversal, reset)
132✔
848
}
849

850
// ForEachNodeCacheable iterates through all the stored vertices/nodes in the
851
// graph, executing the passed callback with each node encountered. If the
852
// callback returns an error, then the transaction is aborted and the iteration
853
// stops early.
854
func (c *KVStore) ForEachNodeCacheable(_ context.Context,
855
        cb func(route.Vertex, *lnwire.FeatureVector) error,
856
        reset func()) error {
142✔
857

142✔
858
        traversal := func(tx kvdb.RTx) error {
284✔
859
                // First grab the nodes bucket which stores the mapping from
142✔
860
                // pubKey to node information.
142✔
861
                nodes := tx.ReadBucket(nodeBucket)
142✔
862
                if nodes == nil {
142✔
863
                        return ErrGraphNotFound
×
864
                }
×
865

866
                return nodes.ForEach(func(pubKey, nodeBytes []byte) error {
543✔
867
                        // If this is the source key, then we skip this
401✔
868
                        // iteration as the value for this key is a pubKey
401✔
869
                        // rather than raw node information.
401✔
870
                        if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 {
682✔
871
                                return nil
281✔
872
                        }
281✔
873

874
                        nodeReader := bytes.NewReader(nodeBytes)
123✔
875
                        node, features, err := deserializeLightningNodeCacheable( //nolint:ll
123✔
876
                                nodeReader,
123✔
877
                        )
123✔
878
                        if err != nil {
123✔
879
                                return err
×
880
                        }
×
881

882
                        // Execute the callback, the transaction will abort if
883
                        // this returns an error.
884
                        return cb(node, features)
123✔
885
                })
886
        }
887

888
        return kvdb.View(c.db, traversal, reset)
142✔
889
}
890

891
// SourceNode returns the source node of the graph. The source node is treated
892
// as the center node within a star-graph. This method may be used to kick off
893
// a path finding algorithm in order to explore the reachability of another
894
// node based off the source node.
895
func (c *KVStore) SourceNode(_ context.Context) (*models.LightningNode, error) {
241✔
896
        return sourceNode(c.db)
241✔
897
}
241✔
898

899
// sourceNode fetches the source node of the graph. The source node is treated
900
// as the center node within a star-graph.
901
func sourceNode(db kvdb.Backend) (*models.LightningNode, error) {
241✔
902
        var source *models.LightningNode
241✔
903
        err := kvdb.View(db, func(tx kvdb.RTx) error {
482✔
904
                // First grab the nodes bucket which stores the mapping from
241✔
905
                // pubKey to node information.
241✔
906
                nodes := tx.ReadBucket(nodeBucket)
241✔
907
                if nodes == nil {
241✔
908
                        return ErrGraphNotFound
×
909
                }
×
910

911
                node, err := sourceNodeWithTx(nodes)
241✔
912
                if err != nil {
245✔
913
                        return err
4✔
914
                }
4✔
915
                source = node
240✔
916

240✔
917
                return nil
240✔
918
        }, func() {
241✔
919
                source = nil
241✔
920
        })
241✔
921
        if err != nil {
245✔
922
                return nil, err
4✔
923
        }
4✔
924

925
        return source, nil
240✔
926
}
927

928
// sourceNodeWithTx uses an existing database transaction and returns the source
929
// node of the graph. The source node is treated as the center node within a
930
// star-graph. This method may be used to kick off a path finding algorithm in
931
// order to explore the reachability of another node based off the source node.
932
func sourceNodeWithTx(nodes kvdb.RBucket) (*models.LightningNode, error) {
506✔
933
        selfPub := nodes.Get(sourceKey)
506✔
934
        if selfPub == nil {
510✔
935
                return nil, ErrSourceNodeNotSet
4✔
936
        }
4✔
937

938
        // With the pubKey of the source node retrieved, we're able to
939
        // fetch the full node information.
940
        node, err := fetchLightningNode(nodes, selfPub)
505✔
941
        if err != nil {
505✔
942
                return nil, err
×
943
        }
×
944

945
        return &node, nil
505✔
946
}
947

948
// SetSourceNode sets the source node within the graph database. The source
949
// node is to be used as the center of a star-graph within path finding
950
// algorithms.
951
func (c *KVStore) SetSourceNode(_ context.Context,
952
        node *models.LightningNode) error {
117✔
953

117✔
954
        nodePubBytes := node.PubKeyBytes[:]
117✔
955

117✔
956
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
234✔
957
                // First grab the nodes bucket which stores the mapping from
117✔
958
                // pubKey to node information.
117✔
959
                nodes, err := tx.CreateTopLevelBucket(nodeBucket)
117✔
960
                if err != nil {
117✔
961
                        return err
×
962
                }
×
963

964
                // Next we create the mapping from source to the targeted
965
                // public key.
966
                if err := nodes.Put(sourceKey, nodePubBytes); err != nil {
117✔
967
                        return err
×
968
                }
×
969

970
                // Finally, we commit the information of the lightning node
971
                // itself.
972
                return addLightningNode(tx, node)
117✔
973
        }, func() {})
117✔
974
}
975

976
// AddLightningNode adds a vertex/node to the graph database. If the node is not
977
// in the database from before, this will add a new, unconnected one to the
978
// graph. If it is present from before, this will update that node's
979
// information. Note that this method is expected to only be called to update an
980
// already present node from a node announcement, or to insert a node found in a
981
// channel update.
982
//
983
// TODO(roasbeef): also need sig of announcement.
984
func (c *KVStore) AddLightningNode(ctx context.Context,
985
        node *models.LightningNode, opts ...batch.SchedulerOption) error {
715✔
986

715✔
987
        r := &batch.Request[kvdb.RwTx]{
715✔
988
                Opts: batch.NewSchedulerOptions(opts...),
715✔
989
                Do: func(tx kvdb.RwTx) error {
1,430✔
990
                        return addLightningNode(tx, node)
715✔
991
                },
715✔
992
        }
993

994
        return c.nodeScheduler.Execute(ctx, r)
715✔
995
}
996

997
func addLightningNode(tx kvdb.RwTx, node *models.LightningNode) error {
911✔
998
        nodes, err := tx.CreateTopLevelBucket(nodeBucket)
911✔
999
        if err != nil {
911✔
1000
                return err
×
1001
        }
×
1002

1003
        aliases, err := nodes.CreateBucketIfNotExists(aliasIndexBucket)
911✔
1004
        if err != nil {
911✔
1005
                return err
×
1006
        }
×
1007

1008
        updateIndex, err := nodes.CreateBucketIfNotExists(
911✔
1009
                nodeUpdateIndexBucket,
911✔
1010
        )
911✔
1011
        if err != nil {
911✔
1012
                return err
×
1013
        }
×
1014

1015
        return putLightningNode(nodes, aliases, updateIndex, node)
911✔
1016
}
1017

1018
// LookupAlias attempts to return the alias as advertised by the target node.
1019
// TODO(roasbeef): currently assumes that aliases are unique...
1020
func (c *KVStore) LookupAlias(_ context.Context,
1021
        pub *btcec.PublicKey) (string, error) {
5✔
1022

5✔
1023
        var alias string
5✔
1024

5✔
1025
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
10✔
1026
                nodes := tx.ReadBucket(nodeBucket)
5✔
1027
                if nodes == nil {
5✔
1028
                        return ErrGraphNodesNotFound
×
1029
                }
×
1030

1031
                aliases := nodes.NestedReadBucket(aliasIndexBucket)
5✔
1032
                if aliases == nil {
5✔
1033
                        return ErrGraphNodesNotFound
×
1034
                }
×
1035

1036
                nodePub := pub.SerializeCompressed()
5✔
1037
                a := aliases.Get(nodePub)
5✔
1038
                if a == nil {
6✔
1039
                        return ErrNodeAliasNotFound
1✔
1040
                }
1✔
1041

1042
                // TODO(roasbeef): should actually be using the utf-8
1043
                // package...
1044
                alias = string(a)
4✔
1045

4✔
1046
                return nil
4✔
1047
        }, func() {
5✔
1048
                alias = ""
5✔
1049
        })
5✔
1050
        if err != nil {
6✔
1051
                return "", err
1✔
1052
        }
1✔
1053

1054
        return alias, nil
4✔
1055
}
1056

1057
// DeleteLightningNode starts a new database transaction to remove a vertex/node
1058
// from the database according to the node's public key.
1059
func (c *KVStore) DeleteLightningNode(_ context.Context,
1060
        nodePub route.Vertex) error {
4✔
1061

4✔
1062
        // TODO(roasbeef): ensure dangling edges are removed...
4✔
1063
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
8✔
1064
                nodes := tx.ReadWriteBucket(nodeBucket)
4✔
1065
                if nodes == nil {
4✔
1066
                        return ErrGraphNodeNotFound
×
1067
                }
×
1068

1069
                return c.deleteLightningNode(nodes, nodePub[:])
4✔
1070
        }, func() {})
4✔
1071
}
1072

1073
// deleteLightningNode uses an existing database transaction to remove a
1074
// vertex/node from the database according to the node's public key.
1075
func (c *KVStore) deleteLightningNode(nodes kvdb.RwBucket,
1076
        compressedPubKey []byte) error {
70✔
1077

70✔
1078
        aliases := nodes.NestedReadWriteBucket(aliasIndexBucket)
70✔
1079
        if aliases == nil {
70✔
1080
                return ErrGraphNodesNotFound
×
1081
        }
×
1082

1083
        if err := aliases.Delete(compressedPubKey); err != nil {
70✔
1084
                return err
×
1085
        }
×
1086

1087
        // Before we delete the node, we'll fetch its current state so we can
1088
        // determine when its last update was to clear out the node update
1089
        // index.
1090
        node, err := fetchLightningNode(nodes, compressedPubKey)
70✔
1091
        if err != nil {
71✔
1092
                return err
1✔
1093
        }
1✔
1094

1095
        if err := nodes.Delete(compressedPubKey); err != nil {
69✔
1096
                return err
×
1097
        }
×
1098

1099
        // Finally, we'll delete the index entry for the node within the
1100
        // nodeUpdateIndexBucket as this node is no longer active, so we don't
1101
        // need to track its last update.
1102
        nodeUpdateIndex := nodes.NestedReadWriteBucket(nodeUpdateIndexBucket)
69✔
1103
        if nodeUpdateIndex == nil {
69✔
1104
                return ErrGraphNodesNotFound
×
1105
        }
×
1106

1107
        // In order to delete the entry, we'll need to reconstruct the key for
1108
        // its last update.
1109
        updateUnix := uint64(node.LastUpdate.Unix())
69✔
1110
        var indexKey [8 + 33]byte
69✔
1111
        byteOrder.PutUint64(indexKey[:8], updateUnix)
69✔
1112
        copy(indexKey[8:], compressedPubKey)
69✔
1113

69✔
1114
        return nodeUpdateIndex.Delete(indexKey[:])
69✔
1115
}
1116

1117
// AddChannelEdge adds a new (undirected, blank) edge to the graph database. An
1118
// undirected edge from the two target nodes are created. The information stored
1119
// denotes the static attributes of the channel, such as the channelID, the keys
1120
// involved in creation of the channel, and the set of features that the channel
1121
// supports. The chanPoint and chanID are used to uniquely identify the edge
1122
// globally within the database.
1123
func (c *KVStore) AddChannelEdge(ctx context.Context,
1124
        edge *models.ChannelEdgeInfo, opts ...batch.SchedulerOption) error {
1,725✔
1125

1,725✔
1126
        var alreadyExists bool
1,725✔
1127
        r := &batch.Request[kvdb.RwTx]{
1,725✔
1128
                Opts: batch.NewSchedulerOptions(opts...),
1,725✔
1129
                Reset: func() {
3,450✔
1130
                        alreadyExists = false
1,725✔
1131
                },
1,725✔
1132
                Do: func(tx kvdb.RwTx) error {
1,725✔
1133
                        err := c.addChannelEdge(tx, edge)
1,725✔
1134

1,725✔
1135
                        // Silence ErrEdgeAlreadyExist so that the batch can
1,725✔
1136
                        // succeed, but propagate the error via local state.
1,725✔
1137
                        if errors.Is(err, ErrEdgeAlreadyExist) {
1,962✔
1138
                                alreadyExists = true
237✔
1139
                                return nil
237✔
1140
                        }
237✔
1141

1142
                        return err
1,488✔
1143
                },
1144
                OnCommit: func(err error) error {
1,725✔
1145
                        switch {
1,725✔
1146
                        case err != nil:
×
1147
                                return err
×
1148
                        case alreadyExists:
237✔
1149
                                return ErrEdgeAlreadyExist
237✔
1150
                        default:
1,488✔
1151
                                c.rejectCache.remove(edge.ChannelID)
1,488✔
1152
                                c.chanCache.remove(edge.ChannelID)
1,488✔
1153
                                return nil
1,488✔
1154
                        }
1155
                },
1156
        }
1157

1158
        return c.chanScheduler.Execute(ctx, r)
1,725✔
1159
}
1160

1161
// addChannelEdge is the private form of AddChannelEdge that allows callers to
1162
// utilize an existing db transaction.
1163
func (c *KVStore) addChannelEdge(tx kvdb.RwTx,
1164
        edge *models.ChannelEdgeInfo) error {
1,725✔
1165

1,725✔
1166
        // Construct the channel's primary key which is the 8-byte channel ID.
1,725✔
1167
        var chanKey [8]byte
1,725✔
1168
        binary.BigEndian.PutUint64(chanKey[:], edge.ChannelID)
1,725✔
1169

1,725✔
1170
        nodes, err := tx.CreateTopLevelBucket(nodeBucket)
1,725✔
1171
        if err != nil {
1,725✔
1172
                return err
×
1173
        }
×
1174
        edges, err := tx.CreateTopLevelBucket(edgeBucket)
1,725✔
1175
        if err != nil {
1,725✔
1176
                return err
×
1177
        }
×
1178
        edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
1,725✔
1179
        if err != nil {
1,725✔
1180
                return err
×
1181
        }
×
1182
        chanIndex, err := edges.CreateBucketIfNotExists(channelPointBucket)
1,725✔
1183
        if err != nil {
1,725✔
1184
                return err
×
1185
        }
×
1186

1187
        // First, attempt to check if this edge has already been created. If
1188
        // so, then we can exit early as this method is meant to be idempotent.
1189
        if edgeInfo := edgeIndex.Get(chanKey[:]); edgeInfo != nil {
1,962✔
1190
                return ErrEdgeAlreadyExist
237✔
1191
        }
237✔
1192

1193
        // Before we insert the channel into the database, we'll ensure that
1194
        // both nodes already exist in the channel graph. If either node
1195
        // doesn't, then we'll insert a "shell" node that just includes its
1196
        // public key, so subsequent validation and queries can work properly.
1197
        _, node1Err := fetchLightningNode(nodes, edge.NodeKey1Bytes[:])
1,488✔
1198
        switch {
1,488✔
1199
        case errors.Is(node1Err, ErrGraphNodeNotFound):
24✔
1200
                node1Shell := models.LightningNode{
24✔
1201
                        PubKeyBytes:          edge.NodeKey1Bytes,
24✔
1202
                        HaveNodeAnnouncement: false,
24✔
1203
                }
24✔
1204
                err := addLightningNode(tx, &node1Shell)
24✔
1205
                if err != nil {
24✔
1206
                        return fmt.Errorf("unable to create shell node "+
×
1207
                                "for: %x: %w", edge.NodeKey1Bytes, err)
×
1208
                }
×
1209
        case node1Err != nil:
×
1210
                return node1Err
×
1211
        }
1212

1213
        _, node2Err := fetchLightningNode(nodes, edge.NodeKey2Bytes[:])
1,488✔
1214
        switch {
1,488✔
1215
        case errors.Is(node2Err, ErrGraphNodeNotFound):
64✔
1216
                node2Shell := models.LightningNode{
64✔
1217
                        PubKeyBytes:          edge.NodeKey2Bytes,
64✔
1218
                        HaveNodeAnnouncement: false,
64✔
1219
                }
64✔
1220
                err := addLightningNode(tx, &node2Shell)
64✔
1221
                if err != nil {
64✔
1222
                        return fmt.Errorf("unable to create shell node "+
×
1223
                                "for: %x: %w", edge.NodeKey2Bytes, err)
×
1224
                }
×
1225
        case node2Err != nil:
×
1226
                return node2Err
×
1227
        }
1228

1229
        // If the edge hasn't been created yet, then we'll first add it to the
1230
        // edge index in order to associate the edge between two nodes and also
1231
        // store the static components of the channel.
1232
        if err := putChanEdgeInfo(edgeIndex, edge, chanKey); err != nil {
1,488✔
1233
                return err
×
1234
        }
×
1235

1236
        // Mark edge policies for both sides as unknown. This is to enable
1237
        // efficient incoming channel lookup for a node.
1238
        keys := []*[33]byte{
1,488✔
1239
                &edge.NodeKey1Bytes,
1,488✔
1240
                &edge.NodeKey2Bytes,
1,488✔
1241
        }
1,488✔
1242
        for _, key := range keys {
4,461✔
1243
                err := putChanEdgePolicyUnknown(edges, edge.ChannelID, key[:])
2,973✔
1244
                if err != nil {
2,973✔
1245
                        return err
×
1246
                }
×
1247
        }
1248

1249
        // Finally we add it to the channel index which maps channel points
1250
        // (outpoints) to the shorter channel ID's.
1251
        var b bytes.Buffer
1,488✔
1252
        if err := WriteOutpoint(&b, &edge.ChannelPoint); err != nil {
1,488✔
1253
                return err
×
1254
        }
×
1255

1256
        return chanIndex.Put(b.Bytes(), chanKey[:])
1,488✔
1257
}
1258

1259
// HasChannelEdge returns true if the database knows of a channel edge with the
1260
// passed channel ID, and false otherwise. If an edge with that ID is found
1261
// within the graph, then two time stamps representing the last time the edge
1262
// was updated for both directed edges are returned along with the boolean. If
1263
// it is not found, then the zombie index is checked and its result is returned
1264
// as the second boolean.
1265
func (c *KVStore) HasChannelEdge(
1266
        chanID uint64) (time.Time, time.Time, bool, bool, error) {
223✔
1267

223✔
1268
        var (
223✔
1269
                upd1Time time.Time
223✔
1270
                upd2Time time.Time
223✔
1271
                exists   bool
223✔
1272
                isZombie bool
223✔
1273
        )
223✔
1274

223✔
1275
        // We'll query the cache with the shared lock held to allow multiple
223✔
1276
        // readers to access values in the cache concurrently if they exist.
223✔
1277
        c.cacheMu.RLock()
223✔
1278
        if entry, ok := c.rejectCache.get(chanID); ok {
302✔
1279
                c.cacheMu.RUnlock()
79✔
1280
                upd1Time = time.Unix(entry.upd1Time, 0)
79✔
1281
                upd2Time = time.Unix(entry.upd2Time, 0)
79✔
1282
                exists, isZombie = entry.flags.unpack()
79✔
1283

79✔
1284
                return upd1Time, upd2Time, exists, isZombie, nil
79✔
1285
        }
79✔
1286
        c.cacheMu.RUnlock()
147✔
1287

147✔
1288
        c.cacheMu.Lock()
147✔
1289
        defer c.cacheMu.Unlock()
147✔
1290

147✔
1291
        // The item was not found with the shared lock, so we'll acquire the
147✔
1292
        // exclusive lock and check the cache again in case another method added
147✔
1293
        // the entry to the cache while no lock was held.
147✔
1294
        if entry, ok := c.rejectCache.get(chanID); ok {
158✔
1295
                upd1Time = time.Unix(entry.upd1Time, 0)
11✔
1296
                upd2Time = time.Unix(entry.upd2Time, 0)
11✔
1297
                exists, isZombie = entry.flags.unpack()
11✔
1298

11✔
1299
                return upd1Time, upd2Time, exists, isZombie, nil
11✔
1300
        }
11✔
1301

1302
        if err := kvdb.View(c.db, func(tx kvdb.RTx) error {
278✔
1303
                edges := tx.ReadBucket(edgeBucket)
139✔
1304
                if edges == nil {
139✔
1305
                        return ErrGraphNoEdgesFound
×
1306
                }
×
1307
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
139✔
1308
                if edgeIndex == nil {
139✔
1309
                        return ErrGraphNoEdgesFound
×
1310
                }
×
1311

1312
                var channelID [8]byte
139✔
1313
                byteOrder.PutUint64(channelID[:], chanID)
139✔
1314

139✔
1315
                // If the edge doesn't exist, then we'll also check our zombie
139✔
1316
                // index.
139✔
1317
                if edgeIndex.Get(channelID[:]) == nil {
228✔
1318
                        exists = false
89✔
1319
                        zombieIndex := edges.NestedReadBucket(zombieBucket)
89✔
1320
                        if zombieIndex != nil {
178✔
1321
                                isZombie, _, _ = isZombieEdge(
89✔
1322
                                        zombieIndex, chanID,
89✔
1323
                                )
89✔
1324
                        }
89✔
1325

1326
                        return nil
89✔
1327
                }
1328

1329
                exists = true
53✔
1330
                isZombie = false
53✔
1331

53✔
1332
                // If the channel has been found in the graph, then retrieve
53✔
1333
                // the edges itself so we can return the last updated
53✔
1334
                // timestamps.
53✔
1335
                nodes := tx.ReadBucket(nodeBucket)
53✔
1336
                if nodes == nil {
53✔
1337
                        return ErrGraphNodeNotFound
×
1338
                }
×
1339

1340
                e1, e2, err := fetchChanEdgePolicies(
53✔
1341
                        edgeIndex, edges, channelID[:],
53✔
1342
                )
53✔
1343
                if err != nil {
53✔
1344
                        return err
×
1345
                }
×
1346

1347
                // As we may have only one of the edges populated, only set the
1348
                // update time if the edge was found in the database.
1349
                if e1 != nil {
74✔
1350
                        upd1Time = e1.LastUpdate
21✔
1351
                }
21✔
1352
                if e2 != nil {
72✔
1353
                        upd2Time = e2.LastUpdate
19✔
1354
                }
19✔
1355

1356
                return nil
53✔
1357
        }, func() {}); err != nil {
139✔
1358
                return time.Time{}, time.Time{}, exists, isZombie, err
×
1359
        }
×
1360

1361
        c.rejectCache.insert(chanID, rejectCacheEntry{
139✔
1362
                upd1Time: upd1Time.Unix(),
139✔
1363
                upd2Time: upd2Time.Unix(),
139✔
1364
                flags:    packRejectFlags(exists, isZombie),
139✔
1365
        })
139✔
1366

139✔
1367
        return upd1Time, upd2Time, exists, isZombie, nil
139✔
1368
}
1369

1370
// AddEdgeProof sets the proof of an existing edge in the graph database.
1371
func (c *KVStore) AddEdgeProof(chanID lnwire.ShortChannelID,
1372
        proof *models.ChannelAuthProof) error {
5✔
1373

5✔
1374
        // Construct the channel's primary key which is the 8-byte channel ID.
5✔
1375
        var chanKey [8]byte
5✔
1376
        binary.BigEndian.PutUint64(chanKey[:], chanID.ToUint64())
5✔
1377

5✔
1378
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
10✔
1379
                edges := tx.ReadWriteBucket(edgeBucket)
5✔
1380
                if edges == nil {
5✔
1381
                        return ErrEdgeNotFound
×
1382
                }
×
1383

1384
                edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
5✔
1385
                if edgeIndex == nil {
5✔
1386
                        return ErrEdgeNotFound
×
1387
                }
×
1388

1389
                edge, err := fetchChanEdgeInfo(edgeIndex, chanKey[:])
5✔
1390
                if err != nil {
5✔
1391
                        return err
×
1392
                }
×
1393

1394
                edge.AuthProof = proof
5✔
1395

5✔
1396
                return putChanEdgeInfo(edgeIndex, &edge, chanKey)
5✔
1397
        }, func() {})
5✔
1398
}
1399

1400
const (
1401
        // pruneTipBytes is the total size of the value which stores a prune
1402
        // entry of the graph in the prune log. The "prune tip" is the last
1403
        // entry in the prune log, and indicates if the channel graph is in
1404
        // sync with the current UTXO state. The structure of the value
1405
        // is: blockHash, taking 32 bytes total.
1406
        pruneTipBytes = 32
1407
)
1408

1409
// PruneGraph prunes newly closed channels from the channel graph in response
1410
// to a new block being solved on the network. Any transactions which spend the
1411
// funding output of any known channels within he graph will be deleted.
1412
// Additionally, the "prune tip", or the last block which has been used to
1413
// prune the graph is stored so callers can ensure the graph is fully in sync
1414
// with the current UTXO state. A slice of channels that have been closed by
1415
// the target block along with any pruned nodes are returned if the function
1416
// succeeds without error.
1417
func (c *KVStore) PruneGraph(spentOutputs []*wire.OutPoint,
1418
        blockHash *chainhash.Hash, blockHeight uint32) (
1419
        []*models.ChannelEdgeInfo, []route.Vertex, error) {
244✔
1420

244✔
1421
        c.cacheMu.Lock()
244✔
1422
        defer c.cacheMu.Unlock()
244✔
1423

244✔
1424
        var (
244✔
1425
                chansClosed []*models.ChannelEdgeInfo
244✔
1426
                prunedNodes []route.Vertex
244✔
1427
        )
244✔
1428

244✔
1429
        err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
488✔
1430
                // First grab the edges bucket which houses the information
244✔
1431
                // we'd like to delete
244✔
1432
                edges, err := tx.CreateTopLevelBucket(edgeBucket)
244✔
1433
                if err != nil {
244✔
1434
                        return err
×
1435
                }
×
1436

1437
                // Next grab the two edge indexes which will also need to be
1438
                // updated.
1439
                edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
244✔
1440
                if err != nil {
244✔
1441
                        return err
×
1442
                }
×
1443
                chanIndex, err := edges.CreateBucketIfNotExists(
244✔
1444
                        channelPointBucket,
244✔
1445
                )
244✔
1446
                if err != nil {
244✔
1447
                        return err
×
1448
                }
×
1449
                nodes := tx.ReadWriteBucket(nodeBucket)
244✔
1450
                if nodes == nil {
244✔
1451
                        return ErrSourceNodeNotSet
×
1452
                }
×
1453
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
244✔
1454
                if err != nil {
244✔
1455
                        return err
×
1456
                }
×
1457

1458
                // For each of the outpoints that have been spent within the
1459
                // block, we attempt to delete them from the graph as if that
1460
                // outpoint was a channel, then it has now been closed.
1461
                for _, chanPoint := range spentOutputs {
389✔
1462
                        // TODO(roasbeef): load channel bloom filter, continue
145✔
1463
                        // if NOT if filter
145✔
1464

145✔
1465
                        var opBytes bytes.Buffer
145✔
1466
                        err := WriteOutpoint(&opBytes, chanPoint)
145✔
1467
                        if err != nil {
145✔
1468
                                return err
×
1469
                        }
×
1470

1471
                        // First attempt to see if the channel exists within
1472
                        // the database, if not, then we can exit early.
1473
                        chanID := chanIndex.Get(opBytes.Bytes())
145✔
1474
                        if chanID == nil {
261✔
1475
                                continue
116✔
1476
                        }
1477

1478
                        // Attempt to delete the channel, an ErrEdgeNotFound
1479
                        // will be returned if that outpoint isn't known to be
1480
                        // a channel. If no error is returned, then a channel
1481
                        // was successfully pruned.
1482
                        edgeInfo, err := c.delChannelEdgeUnsafe(
29✔
1483
                                edges, edgeIndex, chanIndex, zombieIndex,
29✔
1484
                                chanID, false, false,
29✔
1485
                        )
29✔
1486
                        if err != nil && !errors.Is(err, ErrEdgeNotFound) {
29✔
1487
                                return err
×
1488
                        }
×
1489

1490
                        chansClosed = append(chansClosed, edgeInfo)
29✔
1491
                }
1492

1493
                metaBucket, err := tx.CreateTopLevelBucket(graphMetaBucket)
244✔
1494
                if err != nil {
244✔
1495
                        return err
×
1496
                }
×
1497

1498
                pruneBucket, err := metaBucket.CreateBucketIfNotExists(
244✔
1499
                        pruneLogBucket,
244✔
1500
                )
244✔
1501
                if err != nil {
244✔
1502
                        return err
×
1503
                }
×
1504

1505
                // With the graph pruned, add a new entry to the prune log,
1506
                // which can be used to check if the graph is fully synced with
1507
                // the current UTXO state.
1508
                var blockHeightBytes [4]byte
244✔
1509
                byteOrder.PutUint32(blockHeightBytes[:], blockHeight)
244✔
1510

244✔
1511
                var newTip [pruneTipBytes]byte
244✔
1512
                copy(newTip[:], blockHash[:])
244✔
1513

244✔
1514
                err = pruneBucket.Put(blockHeightBytes[:], newTip[:])
244✔
1515
                if err != nil {
244✔
1516
                        return err
×
1517
                }
×
1518

1519
                // Now that the graph has been pruned, we'll also attempt to
1520
                // prune any nodes that have had a channel closed within the
1521
                // latest block.
1522
                prunedNodes, err = c.pruneGraphNodes(nodes, edgeIndex)
244✔
1523

244✔
1524
                return err
244✔
1525
        }, func() {
244✔
1526
                chansClosed = nil
244✔
1527
                prunedNodes = nil
244✔
1528
        })
244✔
1529
        if err != nil {
244✔
1530
                return nil, nil, err
×
1531
        }
×
1532

1533
        for _, channel := range chansClosed {
273✔
1534
                c.rejectCache.remove(channel.ChannelID)
29✔
1535
                c.chanCache.remove(channel.ChannelID)
29✔
1536
        }
29✔
1537

1538
        return chansClosed, prunedNodes, nil
244✔
1539
}
1540

1541
// PruneGraphNodes is a garbage collection method which attempts to prune out
1542
// any nodes from the channel graph that are currently unconnected. This ensure
1543
// that we only maintain a graph of reachable nodes. In the event that a pruned
1544
// node gains more channels, it will be re-added back to the graph.
1545
func (c *KVStore) PruneGraphNodes() ([]route.Vertex, error) {
26✔
1546
        var prunedNodes []route.Vertex
26✔
1547
        err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
52✔
1548
                nodes := tx.ReadWriteBucket(nodeBucket)
26✔
1549
                if nodes == nil {
26✔
1550
                        return ErrGraphNodesNotFound
×
1551
                }
×
1552
                edges := tx.ReadWriteBucket(edgeBucket)
26✔
1553
                if edges == nil {
26✔
1554
                        return ErrGraphNotFound
×
1555
                }
×
1556
                edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
26✔
1557
                if edgeIndex == nil {
26✔
1558
                        return ErrGraphNoEdgesFound
×
1559
                }
×
1560

1561
                var err error
26✔
1562
                prunedNodes, err = c.pruneGraphNodes(nodes, edgeIndex)
26✔
1563
                if err != nil {
26✔
1564
                        return err
×
1565
                }
×
1566

1567
                return nil
26✔
1568
        }, func() {
26✔
1569
                prunedNodes = nil
26✔
1570
        })
26✔
1571

1572
        return prunedNodes, err
26✔
1573
}
1574

1575
// pruneGraphNodes attempts to remove any nodes from the graph who have had a
1576
// channel closed within the current block. If the node still has existing
1577
// channels in the graph, this will act as a no-op.
1578
func (c *KVStore) pruneGraphNodes(nodes kvdb.RwBucket,
1579
        edgeIndex kvdb.RwBucket) ([]route.Vertex, error) {
267✔
1580

267✔
1581
        log.Trace("Pruning nodes from graph with no open channels")
267✔
1582

267✔
1583
        // We'll retrieve the graph's source node to ensure we don't remove it
267✔
1584
        // even if it no longer has any open channels.
267✔
1585
        sourceNode, err := sourceNodeWithTx(nodes)
267✔
1586
        if err != nil {
267✔
1587
                return nil, err
×
1588
        }
×
1589

1590
        // We'll use this map to keep count the number of references to a node
1591
        // in the graph. A node should only be removed once it has no more
1592
        // references in the graph.
1593
        nodeRefCounts := make(map[[33]byte]int)
267✔
1594
        err = nodes.ForEach(func(pubKey, nodeBytes []byte) error {
1,572✔
1595
                // If this is the source key, then we skip this
1,305✔
1596
                // iteration as the value for this key is a pubKey
1,305✔
1597
                // rather than raw node information.
1,305✔
1598
                if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 {
2,100✔
1599
                        return nil
795✔
1600
                }
795✔
1601

1602
                var nodePub [33]byte
513✔
1603
                copy(nodePub[:], pubKey)
513✔
1604
                nodeRefCounts[nodePub] = 0
513✔
1605

513✔
1606
                return nil
513✔
1607
        })
1608
        if err != nil {
267✔
1609
                return nil, err
×
1610
        }
×
1611

1612
        // To ensure we never delete the source node, we'll start off by
1613
        // bumping its ref count to 1.
1614
        nodeRefCounts[sourceNode.PubKeyBytes] = 1
267✔
1615

267✔
1616
        // Next, we'll run through the edgeIndex which maps a channel ID to the
267✔
1617
        // edge info. We'll use this scan to populate our reference count map
267✔
1618
        // above.
267✔
1619
        err = edgeIndex.ForEach(func(chanID, edgeInfoBytes []byte) error {
470✔
1620
                // The first 66 bytes of the edge info contain the pubkeys of
203✔
1621
                // the nodes that this edge attaches. We'll extract them, and
203✔
1622
                // add them to the ref count map.
203✔
1623
                var node1, node2 [33]byte
203✔
1624
                copy(node1[:], edgeInfoBytes[:33])
203✔
1625
                copy(node2[:], edgeInfoBytes[33:])
203✔
1626

203✔
1627
                // With the nodes extracted, we'll increase the ref count of
203✔
1628
                // each of the nodes.
203✔
1629
                nodeRefCounts[node1]++
203✔
1630
                nodeRefCounts[node2]++
203✔
1631

203✔
1632
                return nil
203✔
1633
        })
203✔
1634
        if err != nil {
267✔
1635
                return nil, err
×
1636
        }
×
1637

1638
        // Finally, we'll make a second pass over the set of nodes, and delete
1639
        // any nodes that have a ref count of zero.
1640
        var pruned []route.Vertex
267✔
1641
        for nodePubKey, refCount := range nodeRefCounts {
780✔
1642
                // If the ref count of the node isn't zero, then we can safely
513✔
1643
                // skip it as it still has edges to or from it within the
513✔
1644
                // graph.
513✔
1645
                if refCount != 0 {
963✔
1646
                        continue
450✔
1647
                }
1648

1649
                // If we reach this point, then there are no longer any edges
1650
                // that connect this node, so we can delete it.
1651
                err := c.deleteLightningNode(nodes, nodePubKey[:])
66✔
1652
                if err != nil {
66✔
1653
                        if errors.Is(err, ErrGraphNodeNotFound) ||
×
1654
                                errors.Is(err, ErrGraphNodesNotFound) {
×
1655

×
1656
                                log.Warnf("Unable to prune node %x from the "+
×
1657
                                        "graph: %v", nodePubKey, err)
×
1658
                                continue
×
1659
                        }
1660

1661
                        return nil, err
×
1662
                }
1663

1664
                log.Infof("Pruned unconnected node %x from channel graph",
66✔
1665
                        nodePubKey[:])
66✔
1666

66✔
1667
                pruned = append(pruned, nodePubKey)
66✔
1668
        }
1669

1670
        if len(pruned) > 0 {
317✔
1671
                log.Infof("Pruned %v unconnected nodes from the channel graph",
50✔
1672
                        len(pruned))
50✔
1673
        }
50✔
1674

1675
        return pruned, err
267✔
1676
}
1677

1678
// DisconnectBlockAtHeight is used to indicate that the block specified
1679
// by the passed height has been disconnected from the main chain. This
1680
// will "rewind" the graph back to the height below, deleting channels
1681
// that are no longer confirmed from the graph. The prune log will be
1682
// set to the last prune height valid for the remaining chain.
1683
// Channels that were removed from the graph resulting from the
1684
// disconnected block are returned.
1685
func (c *KVStore) DisconnectBlockAtHeight(height uint32) (
1686
        []*models.ChannelEdgeInfo, error) {
155✔
1687

155✔
1688
        // Every channel having a ShortChannelID starting at 'height'
155✔
1689
        // will no longer be confirmed.
155✔
1690
        startShortChanID := lnwire.ShortChannelID{
155✔
1691
                BlockHeight: height,
155✔
1692
        }
155✔
1693

155✔
1694
        // Delete everything after this height from the db up until the
155✔
1695
        // SCID alias range.
155✔
1696
        endShortChanID := aliasmgr.StartingAlias
155✔
1697

155✔
1698
        // The block height will be the 3 first bytes of the channel IDs.
155✔
1699
        var chanIDStart [8]byte
155✔
1700
        byteOrder.PutUint64(chanIDStart[:], startShortChanID.ToUint64())
155✔
1701
        var chanIDEnd [8]byte
155✔
1702
        byteOrder.PutUint64(chanIDEnd[:], endShortChanID.ToUint64())
155✔
1703

155✔
1704
        c.cacheMu.Lock()
155✔
1705
        defer c.cacheMu.Unlock()
155✔
1706

155✔
1707
        // Keep track of the channels that are removed from the graph.
155✔
1708
        var removedChans []*models.ChannelEdgeInfo
155✔
1709

155✔
1710
        if err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
310✔
1711
                edges, err := tx.CreateTopLevelBucket(edgeBucket)
155✔
1712
                if err != nil {
155✔
1713
                        return err
×
1714
                }
×
1715
                edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
155✔
1716
                if err != nil {
155✔
1717
                        return err
×
1718
                }
×
1719
                chanIndex, err := edges.CreateBucketIfNotExists(
155✔
1720
                        channelPointBucket,
155✔
1721
                )
155✔
1722
                if err != nil {
155✔
1723
                        return err
×
1724
                }
×
1725
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
155✔
1726
                if err != nil {
155✔
1727
                        return err
×
1728
                }
×
1729

1730
                // Scan from chanIDStart to chanIDEnd, deleting every
1731
                // found edge.
1732
                // NOTE: we must delete the edges after the cursor loop, since
1733
                // modifying the bucket while traversing is not safe.
1734
                // NOTE: We use a < comparison in bytes.Compare instead of <=
1735
                // so that the StartingAlias itself isn't deleted.
1736
                var keys [][]byte
155✔
1737
                cursor := edgeIndex.ReadWriteCursor()
155✔
1738

155✔
1739
                //nolint:ll
155✔
1740
                for k, _ := cursor.Seek(chanIDStart[:]); k != nil &&
155✔
1741
                        bytes.Compare(k, chanIDEnd[:]) < 0; k, _ = cursor.Next() {
244✔
1742
                        keys = append(keys, k)
89✔
1743
                }
89✔
1744

1745
                for _, k := range keys {
244✔
1746
                        edgeInfo, err := c.delChannelEdgeUnsafe(
89✔
1747
                                edges, edgeIndex, chanIndex, zombieIndex,
89✔
1748
                                k, false, false,
89✔
1749
                        )
89✔
1750
                        if err != nil && !errors.Is(err, ErrEdgeNotFound) {
89✔
1751
                                return err
×
1752
                        }
×
1753

1754
                        removedChans = append(removedChans, edgeInfo)
89✔
1755
                }
1756

1757
                // Delete all the entries in the prune log having a height
1758
                // greater or equal to the block disconnected.
1759
                metaBucket, err := tx.CreateTopLevelBucket(graphMetaBucket)
155✔
1760
                if err != nil {
155✔
1761
                        return err
×
1762
                }
×
1763

1764
                pruneBucket, err := metaBucket.CreateBucketIfNotExists(
155✔
1765
                        pruneLogBucket,
155✔
1766
                )
155✔
1767
                if err != nil {
155✔
1768
                        return err
×
1769
                }
×
1770

1771
                var pruneKeyStart [4]byte
155✔
1772
                byteOrder.PutUint32(pruneKeyStart[:], height)
155✔
1773

155✔
1774
                var pruneKeyEnd [4]byte
155✔
1775
                byteOrder.PutUint32(pruneKeyEnd[:], math.MaxUint32)
155✔
1776

155✔
1777
                // To avoid modifying the bucket while traversing, we delete
155✔
1778
                // the keys in a second loop.
155✔
1779
                var pruneKeys [][]byte
155✔
1780
                pruneCursor := pruneBucket.ReadWriteCursor()
155✔
1781
                //nolint:ll
155✔
1782
                for k, _ := pruneCursor.Seek(pruneKeyStart[:]); k != nil &&
155✔
1783
                        bytes.Compare(k, pruneKeyEnd[:]) <= 0; k, _ = pruneCursor.Next() {
250✔
1784
                        pruneKeys = append(pruneKeys, k)
95✔
1785
                }
95✔
1786

1787
                for _, k := range pruneKeys {
250✔
1788
                        if err := pruneBucket.Delete(k); err != nil {
95✔
1789
                                return err
×
1790
                        }
×
1791
                }
1792

1793
                return nil
155✔
1794
        }, func() {
155✔
1795
                removedChans = nil
155✔
1796
        }); err != nil {
155✔
1797
                return nil, err
×
1798
        }
×
1799

1800
        for _, channel := range removedChans {
244✔
1801
                c.rejectCache.remove(channel.ChannelID)
89✔
1802
                c.chanCache.remove(channel.ChannelID)
89✔
1803
        }
89✔
1804

1805
        return removedChans, nil
155✔
1806
}
1807

1808
// PruneTip returns the block height and hash of the latest block that has been
1809
// used to prune channels in the graph. Knowing the "prune tip" allows callers
1810
// to tell if the graph is currently in sync with the current best known UTXO
1811
// state.
1812
func (c *KVStore) PruneTip() (*chainhash.Hash, uint32, error) {
56✔
1813
        var (
56✔
1814
                tipHash   chainhash.Hash
56✔
1815
                tipHeight uint32
56✔
1816
        )
56✔
1817

56✔
1818
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
112✔
1819
                graphMeta := tx.ReadBucket(graphMetaBucket)
56✔
1820
                if graphMeta == nil {
56✔
1821
                        return ErrGraphNotFound
×
1822
                }
×
1823
                pruneBucket := graphMeta.NestedReadBucket(pruneLogBucket)
56✔
1824
                if pruneBucket == nil {
56✔
1825
                        return ErrGraphNeverPruned
×
1826
                }
×
1827

1828
                pruneCursor := pruneBucket.ReadCursor()
56✔
1829

56✔
1830
                // The prune key with the largest block height will be our
56✔
1831
                // prune tip.
56✔
1832
                k, v := pruneCursor.Last()
56✔
1833
                if k == nil {
77✔
1834
                        return ErrGraphNeverPruned
21✔
1835
                }
21✔
1836

1837
                // Once we have the prune tip, the value will be the block hash,
1838
                // and the key the block height.
1839
                copy(tipHash[:], v)
38✔
1840
                tipHeight = byteOrder.Uint32(k)
38✔
1841

38✔
1842
                return nil
38✔
1843
        }, func() {})
56✔
1844
        if err != nil {
77✔
1845
                return nil, 0, err
21✔
1846
        }
21✔
1847

1848
        return &tipHash, tipHeight, nil
38✔
1849
}
1850

1851
// DeleteChannelEdges removes edges with the given channel IDs from the
1852
// database and marks them as zombies. This ensures that we're unable to re-add
1853
// it to our database once again. If an edge does not exist within the
1854
// database, then ErrEdgeNotFound will be returned. If strictZombiePruning is
1855
// true, then when we mark these edges as zombies, we'll set up the keys such
1856
// that we require the node that failed to send the fresh update to be the one
1857
// that resurrects the channel from its zombie state. The markZombie bool
1858
// denotes whether or not to mark the channel as a zombie.
1859
func (c *KVStore) DeleteChannelEdges(strictZombiePruning, markZombie bool,
1860
        chanIDs ...uint64) ([]*models.ChannelEdgeInfo, error) {
138✔
1861

138✔
1862
        // TODO(roasbeef): possibly delete from node bucket if node has no more
138✔
1863
        // channels
138✔
1864
        // TODO(roasbeef): don't delete both edges?
138✔
1865

138✔
1866
        c.cacheMu.Lock()
138✔
1867
        defer c.cacheMu.Unlock()
138✔
1868

138✔
1869
        var infos []*models.ChannelEdgeInfo
138✔
1870
        err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
276✔
1871
                edges := tx.ReadWriteBucket(edgeBucket)
138✔
1872
                if edges == nil {
138✔
1873
                        return ErrEdgeNotFound
×
1874
                }
×
1875
                edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
138✔
1876
                if edgeIndex == nil {
138✔
1877
                        return ErrEdgeNotFound
×
1878
                }
×
1879
                chanIndex := edges.NestedReadWriteBucket(channelPointBucket)
138✔
1880
                if chanIndex == nil {
138✔
1881
                        return ErrEdgeNotFound
×
1882
                }
×
1883
                nodes := tx.ReadWriteBucket(nodeBucket)
138✔
1884
                if nodes == nil {
138✔
1885
                        return ErrGraphNodeNotFound
×
1886
                }
×
1887
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
138✔
1888
                if err != nil {
138✔
1889
                        return err
×
1890
                }
×
1891

1892
                var rawChanID [8]byte
138✔
1893
                for _, chanID := range chanIDs {
220✔
1894
                        byteOrder.PutUint64(rawChanID[:], chanID)
82✔
1895
                        edgeInfo, err := c.delChannelEdgeUnsafe(
82✔
1896
                                edges, edgeIndex, chanIndex, zombieIndex,
82✔
1897
                                rawChanID[:], markZombie, strictZombiePruning,
82✔
1898
                        )
82✔
1899
                        if err != nil {
137✔
1900
                                return err
55✔
1901
                        }
55✔
1902

1903
                        infos = append(infos, edgeInfo)
27✔
1904
                }
1905

1906
                return nil
83✔
1907
        }, func() {
138✔
1908
                infos = nil
138✔
1909
        })
138✔
1910
        if err != nil {
193✔
1911
                return nil, err
55✔
1912
        }
55✔
1913

1914
        for _, chanID := range chanIDs {
110✔
1915
                c.rejectCache.remove(chanID)
27✔
1916
                c.chanCache.remove(chanID)
27✔
1917
        }
27✔
1918

1919
        return infos, nil
83✔
1920
}
1921

1922
// ChannelID attempt to lookup the 8-byte compact channel ID which maps to the
1923
// passed channel point (outpoint). If the passed channel doesn't exist within
1924
// the database, then ErrEdgeNotFound is returned.
1925
func (c *KVStore) ChannelID(chanPoint *wire.OutPoint) (uint64, error) {
4✔
1926
        var chanID uint64
4✔
1927
        if err := kvdb.View(c.db, func(tx kvdb.RTx) error {
8✔
1928
                var err error
4✔
1929
                chanID, err = getChanID(tx, chanPoint)
4✔
1930
                return err
4✔
1931
        }, func() {
8✔
1932
                chanID = 0
4✔
1933
        }); err != nil {
7✔
1934
                return 0, err
3✔
1935
        }
3✔
1936

1937
        return chanID, nil
4✔
1938
}
1939

1940
// getChanID returns the assigned channel ID for a given channel point.
1941
func getChanID(tx kvdb.RTx, chanPoint *wire.OutPoint) (uint64, error) {
4✔
1942
        var b bytes.Buffer
4✔
1943
        if err := WriteOutpoint(&b, chanPoint); err != nil {
4✔
1944
                return 0, err
×
1945
        }
×
1946

1947
        edges := tx.ReadBucket(edgeBucket)
4✔
1948
        if edges == nil {
4✔
1949
                return 0, ErrGraphNoEdgesFound
×
1950
        }
×
1951
        chanIndex := edges.NestedReadBucket(channelPointBucket)
4✔
1952
        if chanIndex == nil {
4✔
1953
                return 0, ErrGraphNoEdgesFound
×
1954
        }
×
1955

1956
        chanIDBytes := chanIndex.Get(b.Bytes())
4✔
1957
        if chanIDBytes == nil {
7✔
1958
                return 0, ErrEdgeNotFound
3✔
1959
        }
3✔
1960

1961
        chanID := byteOrder.Uint64(chanIDBytes)
4✔
1962

4✔
1963
        return chanID, nil
4✔
1964
}
1965

1966
// TODO(roasbeef): allow updates to use Batch?
1967

1968
// HighestChanID returns the "highest" known channel ID in the channel graph.
1969
// This represents the "newest" channel from the PoV of the chain. This method
1970
// can be used by peers to quickly determine if they're graphs are in sync.
1971
func (c *KVStore) HighestChanID(_ context.Context) (uint64, error) {
6✔
1972
        var cid uint64
6✔
1973

6✔
1974
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
12✔
1975
                edges := tx.ReadBucket(edgeBucket)
6✔
1976
                if edges == nil {
6✔
1977
                        return ErrGraphNoEdgesFound
×
1978
                }
×
1979
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
6✔
1980
                if edgeIndex == nil {
6✔
1981
                        return ErrGraphNoEdgesFound
×
1982
                }
×
1983

1984
                // In order to find the highest chan ID, we'll fetch a cursor
1985
                // and use that to seek to the "end" of our known rage.
1986
                cidCursor := edgeIndex.ReadCursor()
6✔
1987

6✔
1988
                lastChanID, _ := cidCursor.Last()
6✔
1989

6✔
1990
                // If there's no key, then this means that we don't actually
6✔
1991
                // know of any channels, so we'll return a predicable error.
6✔
1992
                if lastChanID == nil {
10✔
1993
                        return ErrGraphNoEdgesFound
4✔
1994
                }
4✔
1995

1996
                // Otherwise, we'll de serialize the channel ID and return it
1997
                // to the caller.
1998
                cid = byteOrder.Uint64(lastChanID)
5✔
1999

5✔
2000
                return nil
5✔
2001
        }, func() {
6✔
2002
                cid = 0
6✔
2003
        })
6✔
2004
        if err != nil && !errors.Is(err, ErrGraphNoEdgesFound) {
6✔
2005
                return 0, err
×
2006
        }
×
2007

2008
        return cid, nil
6✔
2009
}
2010

2011
// ChannelEdge represents the complete set of information for a channel edge in
2012
// the known channel graph. This struct couples the core information of the
2013
// edge as well as each of the known advertised edge policies.
2014
type ChannelEdge struct {
2015
        // Info contains all the static information describing the channel.
2016
        Info *models.ChannelEdgeInfo
2017

2018
        // Policy1 points to the "first" edge policy of the channel containing
2019
        // the dynamic information required to properly route through the edge.
2020
        Policy1 *models.ChannelEdgePolicy
2021

2022
        // Policy2 points to the "second" edge policy of the channel containing
2023
        // the dynamic information required to properly route through the edge.
2024
        Policy2 *models.ChannelEdgePolicy
2025

2026
        // Node1 is "node 1" in the channel. This is the node that would have
2027
        // produced Policy1 if it exists.
2028
        Node1 *models.LightningNode
2029

2030
        // Node2 is "node 2" in the channel. This is the node that would have
2031
        // produced Policy2 if it exists.
2032
        Node2 *models.LightningNode
2033
}
2034

2035
// ChanUpdatesInHorizon returns all the known channel edges which have at least
2036
// one edge that has an update timestamp within the specified horizon.
2037
func (c *KVStore) ChanUpdatesInHorizon(startTime,
2038
        endTime time.Time) ([]ChannelEdge, error) {
144✔
2039

144✔
2040
        // To ensure we don't return duplicate ChannelEdges, we'll use an
144✔
2041
        // additional map to keep track of the edges already seen to prevent
144✔
2042
        // re-adding it.
144✔
2043
        var edgesSeen map[uint64]struct{}
144✔
2044
        var edgesToCache map[uint64]ChannelEdge
144✔
2045
        var edgesInHorizon []ChannelEdge
144✔
2046

144✔
2047
        c.cacheMu.Lock()
144✔
2048
        defer c.cacheMu.Unlock()
144✔
2049

144✔
2050
        var hits int
144✔
2051
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
288✔
2052
                edges := tx.ReadBucket(edgeBucket)
144✔
2053
                if edges == nil {
144✔
2054
                        return ErrGraphNoEdgesFound
×
2055
                }
×
2056
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
144✔
2057
                if edgeIndex == nil {
144✔
2058
                        return ErrGraphNoEdgesFound
×
2059
                }
×
2060
                edgeUpdateIndex := edges.NestedReadBucket(edgeUpdateIndexBucket)
144✔
2061
                if edgeUpdateIndex == nil {
144✔
2062
                        return ErrGraphNoEdgesFound
×
2063
                }
×
2064

2065
                nodes := tx.ReadBucket(nodeBucket)
144✔
2066
                if nodes == nil {
144✔
2067
                        return ErrGraphNodesNotFound
×
2068
                }
×
2069

2070
                // We'll now obtain a cursor to perform a range query within
2071
                // the index to find all channels within the horizon.
2072
                updateCursor := edgeUpdateIndex.ReadCursor()
144✔
2073

144✔
2074
                var startTimeBytes, endTimeBytes [8 + 8]byte
144✔
2075
                byteOrder.PutUint64(
144✔
2076
                        startTimeBytes[:8], uint64(startTime.Unix()),
144✔
2077
                )
144✔
2078
                byteOrder.PutUint64(
144✔
2079
                        endTimeBytes[:8], uint64(endTime.Unix()),
144✔
2080
                )
144✔
2081

144✔
2082
                // With our start and end times constructed, we'll step through
144✔
2083
                // the index collecting the info and policy of each update of
144✔
2084
                // each channel that has a last update within the time range.
144✔
2085
                //
144✔
2086
                //nolint:ll
144✔
2087
                for indexKey, _ := updateCursor.Seek(startTimeBytes[:]); indexKey != nil &&
144✔
2088
                        bytes.Compare(indexKey, endTimeBytes[:]) <= 0; indexKey, _ = updateCursor.Next() {
193✔
2089
                        // We have a new eligible entry, so we'll slice of the
49✔
2090
                        // chan ID so we can query it in the DB.
49✔
2091
                        chanID := indexKey[8:]
49✔
2092

49✔
2093
                        // If we've already retrieved the info and policies for
49✔
2094
                        // this edge, then we can skip it as we don't need to do
49✔
2095
                        // so again.
49✔
2096
                        chanIDInt := byteOrder.Uint64(chanID)
49✔
2097
                        if _, ok := edgesSeen[chanIDInt]; ok {
68✔
2098
                                continue
19✔
2099
                        }
2100

2101
                        if channel, ok := c.chanCache.get(chanIDInt); ok {
41✔
2102
                                hits++
11✔
2103
                                edgesSeen[chanIDInt] = struct{}{}
11✔
2104
                                edgesInHorizon = append(edgesInHorizon, channel)
11✔
2105

11✔
2106
                                continue
11✔
2107
                        }
2108

2109
                        // First, we'll fetch the static edge information.
2110
                        edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
21✔
2111
                        if err != nil {
21✔
2112
                                chanID := byteOrder.Uint64(chanID)
×
2113
                                return fmt.Errorf("unable to fetch info for "+
×
2114
                                        "edge with chan_id=%v: %v", chanID, err)
×
2115
                        }
×
2116

2117
                        // With the static information obtained, we'll now
2118
                        // fetch the dynamic policy info.
2119
                        edge1, edge2, err := fetchChanEdgePolicies(
21✔
2120
                                edgeIndex, edges, chanID,
21✔
2121
                        )
21✔
2122
                        if err != nil {
21✔
2123
                                chanID := byteOrder.Uint64(chanID)
×
2124
                                return fmt.Errorf("unable to fetch policies "+
×
2125
                                        "for edge with chan_id=%v: %v", chanID,
×
2126
                                        err)
×
2127
                        }
×
2128

2129
                        node1, err := fetchLightningNode(
21✔
2130
                                nodes, edgeInfo.NodeKey1Bytes[:],
21✔
2131
                        )
21✔
2132
                        if err != nil {
21✔
2133
                                return err
×
2134
                        }
×
2135

2136
                        node2, err := fetchLightningNode(
21✔
2137
                                nodes, edgeInfo.NodeKey2Bytes[:],
21✔
2138
                        )
21✔
2139
                        if err != nil {
21✔
2140
                                return err
×
2141
                        }
×
2142

2143
                        // Finally, we'll collate this edge with the rest of
2144
                        // edges to be returned.
2145
                        edgesSeen[chanIDInt] = struct{}{}
21✔
2146
                        channel := ChannelEdge{
21✔
2147
                                Info:    &edgeInfo,
21✔
2148
                                Policy1: edge1,
21✔
2149
                                Policy2: edge2,
21✔
2150
                                Node1:   &node1,
21✔
2151
                                Node2:   &node2,
21✔
2152
                        }
21✔
2153
                        edgesInHorizon = append(edgesInHorizon, channel)
21✔
2154
                        edgesToCache[chanIDInt] = channel
21✔
2155
                }
2156

2157
                return nil
144✔
2158
        }, func() {
144✔
2159
                edgesSeen = make(map[uint64]struct{})
144✔
2160
                edgesToCache = make(map[uint64]ChannelEdge)
144✔
2161
                edgesInHorizon = nil
144✔
2162
        })
144✔
2163
        switch {
144✔
2164
        case errors.Is(err, ErrGraphNoEdgesFound):
×
2165
                fallthrough
×
2166
        case errors.Is(err, ErrGraphNodesNotFound):
×
2167
                break
×
2168

2169
        case err != nil:
×
2170
                return nil, err
×
2171
        }
2172

2173
        // Insert any edges loaded from disk into the cache.
2174
        for chanid, channel := range edgesToCache {
165✔
2175
                c.chanCache.insert(chanid, channel)
21✔
2176
        }
21✔
2177

2178
        if len(edgesInHorizon) > 0 {
152✔
2179
                log.Debugf("ChanUpdatesInHorizon hit percentage: %.2f (%d/%d)",
8✔
2180
                        float64(hits)*100/float64(len(edgesInHorizon)), hits,
8✔
2181
                        len(edgesInHorizon))
8✔
2182
        } else {
147✔
2183
                log.Debugf("ChanUpdatesInHorizon returned no edges in "+
139✔
2184
                        "horizon (%s, %s)", startTime, endTime)
139✔
2185
        }
139✔
2186

2187
        return edgesInHorizon, nil
144✔
2188
}
2189

2190
// NodeUpdatesInHorizon returns all the known lightning node which have an
2191
// update timestamp within the passed range. This method can be used by two
2192
// nodes to quickly determine if they have the same set of up to date node
2193
// announcements.
2194
func (c *KVStore) NodeUpdatesInHorizon(startTime,
2195
        endTime time.Time) ([]models.LightningNode, error) {
11✔
2196

11✔
2197
        var nodesInHorizon []models.LightningNode
11✔
2198

11✔
2199
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
22✔
2200
                nodes := tx.ReadBucket(nodeBucket)
11✔
2201
                if nodes == nil {
11✔
2202
                        return ErrGraphNodesNotFound
×
2203
                }
×
2204

2205
                nodeUpdateIndex := nodes.NestedReadBucket(nodeUpdateIndexBucket)
11✔
2206
                if nodeUpdateIndex == nil {
11✔
2207
                        return ErrGraphNodesNotFound
×
2208
                }
×
2209

2210
                // We'll now obtain a cursor to perform a range query within
2211
                // the index to find all node announcements within the horizon.
2212
                updateCursor := nodeUpdateIndex.ReadCursor()
11✔
2213

11✔
2214
                var startTimeBytes, endTimeBytes [8 + 33]byte
11✔
2215
                byteOrder.PutUint64(
11✔
2216
                        startTimeBytes[:8], uint64(startTime.Unix()),
11✔
2217
                )
11✔
2218
                byteOrder.PutUint64(
11✔
2219
                        endTimeBytes[:8], uint64(endTime.Unix()),
11✔
2220
                )
11✔
2221

11✔
2222
                // With our start and end times constructed, we'll step through
11✔
2223
                // the index collecting info for each node within the time
11✔
2224
                // range.
11✔
2225
                //
11✔
2226
                //nolint:ll
11✔
2227
                for indexKey, _ := updateCursor.Seek(startTimeBytes[:]); indexKey != nil &&
11✔
2228
                        bytes.Compare(indexKey, endTimeBytes[:]) <= 0; indexKey, _ = updateCursor.Next() {
43✔
2229
                        nodePub := indexKey[8:]
32✔
2230
                        node, err := fetchLightningNode(nodes, nodePub)
32✔
2231
                        if err != nil {
32✔
2232
                                return err
×
2233
                        }
×
2234

2235
                        nodesInHorizon = append(nodesInHorizon, node)
32✔
2236
                }
2237

2238
                return nil
11✔
2239
        }, func() {
11✔
2240
                nodesInHorizon = nil
11✔
2241
        })
11✔
2242
        switch {
11✔
2243
        case errors.Is(err, ErrGraphNoEdgesFound):
×
2244
                fallthrough
×
2245
        case errors.Is(err, ErrGraphNodesNotFound):
×
2246
                break
×
2247

2248
        case err != nil:
×
2249
                return nil, err
×
2250
        }
2251

2252
        return nodesInHorizon, nil
11✔
2253
}
2254

2255
// FilterKnownChanIDs takes a set of channel IDs and return the subset of chan
2256
// ID's that we don't know and are not known zombies of the passed set. In other
2257
// words, we perform a set difference of our set of chan ID's and the ones
2258
// passed in. This method can be used by callers to determine the set of
2259
// channels another peer knows of that we don't. The ChannelUpdateInfos for the
2260
// known zombies is also returned.
2261
func (c *KVStore) FilterKnownChanIDs(chansInfo []ChannelUpdateInfo) ([]uint64,
2262
        []ChannelUpdateInfo, error) {
127✔
2263

127✔
2264
        var (
127✔
2265
                newChanIDs   []uint64
127✔
2266
                knownZombies []ChannelUpdateInfo
127✔
2267
        )
127✔
2268

127✔
2269
        c.cacheMu.Lock()
127✔
2270
        defer c.cacheMu.Unlock()
127✔
2271

127✔
2272
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
254✔
2273
                edges := tx.ReadBucket(edgeBucket)
127✔
2274
                if edges == nil {
127✔
2275
                        return ErrGraphNoEdgesFound
×
2276
                }
×
2277
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
127✔
2278
                if edgeIndex == nil {
127✔
2279
                        return ErrGraphNoEdgesFound
×
2280
                }
×
2281

2282
                // Fetch the zombie index, it may not exist if no edges have
2283
                // ever been marked as zombies. If the index has been
2284
                // initialized, we will use it later to skip known zombie edges.
2285
                zombieIndex := edges.NestedReadBucket(zombieBucket)
127✔
2286

127✔
2287
                // We'll run through the set of chanIDs and collate only the
127✔
2288
                // set of channel that are unable to be found within our db.
127✔
2289
                var cidBytes [8]byte
127✔
2290
                for _, info := range chansInfo {
237✔
2291
                        scid := info.ShortChannelID.ToUint64()
110✔
2292
                        byteOrder.PutUint64(cidBytes[:], scid)
110✔
2293

110✔
2294
                        // If the edge is already known, skip it.
110✔
2295
                        if v := edgeIndex.Get(cidBytes[:]); v != nil {
134✔
2296
                                continue
24✔
2297
                        }
2298

2299
                        // If the edge is a known zombie, skip it.
2300
                        if zombieIndex != nil {
178✔
2301
                                isZombie, _, _ := isZombieEdge(
89✔
2302
                                        zombieIndex, scid,
89✔
2303
                                )
89✔
2304

89✔
2305
                                if isZombie {
131✔
2306
                                        knownZombies = append(
42✔
2307
                                                knownZombies, info,
42✔
2308
                                        )
42✔
2309

42✔
2310
                                        continue
42✔
2311
                                }
2312
                        }
2313

2314
                        newChanIDs = append(newChanIDs, scid)
47✔
2315
                }
2316

2317
                return nil
127✔
2318
        }, func() {
127✔
2319
                newChanIDs = nil
127✔
2320
                knownZombies = nil
127✔
2321
        })
127✔
2322
        switch {
127✔
2323
        // If we don't know of any edges yet, then we'll return the entire set
2324
        // of chan IDs specified.
2325
        case errors.Is(err, ErrGraphNoEdgesFound):
×
2326
                ogChanIDs := make([]uint64, len(chansInfo))
×
2327
                for i, info := range chansInfo {
×
2328
                        ogChanIDs[i] = info.ShortChannelID.ToUint64()
×
2329
                }
×
2330

2331
                return ogChanIDs, nil, nil
×
2332

2333
        case err != nil:
×
2334
                return nil, nil, err
×
2335
        }
2336

2337
        return newChanIDs, knownZombies, nil
127✔
2338
}
2339

2340
// ChannelUpdateInfo couples the SCID of a channel with the timestamps of the
2341
// latest received channel updates for the channel.
2342
type ChannelUpdateInfo struct {
2343
        // ShortChannelID is the SCID identifier of the channel.
2344
        ShortChannelID lnwire.ShortChannelID
2345

2346
        // Node1UpdateTimestamp is the timestamp of the latest received update
2347
        // from the node 1 channel peer. This will be set to zero time if no
2348
        // update has yet been received from this node.
2349
        Node1UpdateTimestamp time.Time
2350

2351
        // Node2UpdateTimestamp is the timestamp of the latest received update
2352
        // from the node 2 channel peer. This will be set to zero time if no
2353
        // update has yet been received from this node.
2354
        Node2UpdateTimestamp time.Time
2355
}
2356

2357
// NewChannelUpdateInfo is a constructor which makes sure we initialize the
2358
// timestamps with zero seconds unix timestamp which equals
2359
// `January 1, 1970, 00:00:00 UTC` in case the value is `time.Time{}`.
2360
func NewChannelUpdateInfo(scid lnwire.ShortChannelID, node1Timestamp,
2361
        node2Timestamp time.Time) ChannelUpdateInfo {
199✔
2362

199✔
2363
        chanInfo := ChannelUpdateInfo{
199✔
2364
                ShortChannelID:       scid,
199✔
2365
                Node1UpdateTimestamp: node1Timestamp,
199✔
2366
                Node2UpdateTimestamp: node2Timestamp,
199✔
2367
        }
199✔
2368

199✔
2369
        if node1Timestamp.IsZero() {
388✔
2370
                chanInfo.Node1UpdateTimestamp = time.Unix(0, 0)
189✔
2371
        }
189✔
2372

2373
        if node2Timestamp.IsZero() {
388✔
2374
                chanInfo.Node2UpdateTimestamp = time.Unix(0, 0)
189✔
2375
        }
189✔
2376

2377
        return chanInfo
199✔
2378
}
2379

2380
// BlockChannelRange represents a range of channels for a given block height.
2381
type BlockChannelRange struct {
2382
        // Height is the height of the block all of the channels below were
2383
        // included in.
2384
        Height uint32
2385

2386
        // Channels is the list of channels identified by their short ID
2387
        // representation known to us that were included in the block height
2388
        // above. The list may include channel update timestamp information if
2389
        // requested.
2390
        Channels []ChannelUpdateInfo
2391
}
2392

2393
// FilterChannelRange returns the channel ID's of all known channels which were
2394
// mined in a block height within the passed range. The channel IDs are grouped
2395
// by their common block height. This method can be used to quickly share with a
2396
// peer the set of channels we know of within a particular range to catch them
2397
// up after a period of time offline. If withTimestamps is true then the
2398
// timestamp info of the latest received channel update messages of the channel
2399
// will be included in the response.
2400
func (c *KVStore) FilterChannelRange(startHeight,
2401
        endHeight uint32, withTimestamps bool) ([]BlockChannelRange, error) {
14✔
2402

14✔
2403
        startChanID := &lnwire.ShortChannelID{
14✔
2404
                BlockHeight: startHeight,
14✔
2405
        }
14✔
2406

14✔
2407
        endChanID := lnwire.ShortChannelID{
14✔
2408
                BlockHeight: endHeight,
14✔
2409
                TxIndex:     math.MaxUint32 & 0x00ffffff,
14✔
2410
                TxPosition:  math.MaxUint16,
14✔
2411
        }
14✔
2412

14✔
2413
        // As we need to perform a range scan, we'll convert the starting and
14✔
2414
        // ending height to their corresponding values when encoded using short
14✔
2415
        // channel ID's.
14✔
2416
        var chanIDStart, chanIDEnd [8]byte
14✔
2417
        byteOrder.PutUint64(chanIDStart[:], startChanID.ToUint64())
14✔
2418
        byteOrder.PutUint64(chanIDEnd[:], endChanID.ToUint64())
14✔
2419

14✔
2420
        var channelsPerBlock map[uint32][]ChannelUpdateInfo
14✔
2421
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
28✔
2422
                edges := tx.ReadBucket(edgeBucket)
14✔
2423
                if edges == nil {
14✔
2424
                        return ErrGraphNoEdgesFound
×
2425
                }
×
2426
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
14✔
2427
                if edgeIndex == nil {
14✔
2428
                        return ErrGraphNoEdgesFound
×
2429
                }
×
2430

2431
                cursor := edgeIndex.ReadCursor()
14✔
2432

14✔
2433
                // We'll now iterate through the database, and find each
14✔
2434
                // channel ID that resides within the specified range.
14✔
2435
                //
14✔
2436
                //nolint:ll
14✔
2437
                for k, v := cursor.Seek(chanIDStart[:]); k != nil &&
14✔
2438
                        bytes.Compare(k, chanIDEnd[:]) <= 0; k, v = cursor.Next() {
61✔
2439
                        // Don't send alias SCIDs during gossip sync.
47✔
2440
                        edgeReader := bytes.NewReader(v)
47✔
2441
                        edgeInfo, err := deserializeChanEdgeInfo(edgeReader)
47✔
2442
                        if err != nil {
47✔
2443
                                return err
×
2444
                        }
×
2445

2446
                        if edgeInfo.AuthProof == nil {
50✔
2447
                                continue
3✔
2448
                        }
2449

2450
                        // This channel ID rests within the target range, so
2451
                        // we'll add it to our returned set.
2452
                        rawCid := byteOrder.Uint64(k)
47✔
2453
                        cid := lnwire.NewShortChanIDFromInt(rawCid)
47✔
2454

47✔
2455
                        chanInfo := NewChannelUpdateInfo(
47✔
2456
                                cid, time.Time{}, time.Time{},
47✔
2457
                        )
47✔
2458

47✔
2459
                        if !withTimestamps {
69✔
2460
                                channelsPerBlock[cid.BlockHeight] = append(
22✔
2461
                                        channelsPerBlock[cid.BlockHeight],
22✔
2462
                                        chanInfo,
22✔
2463
                                )
22✔
2464

22✔
2465
                                continue
22✔
2466
                        }
2467

2468
                        node1Key, node2Key := computeEdgePolicyKeys(&edgeInfo)
25✔
2469

25✔
2470
                        rawPolicy := edges.Get(node1Key)
25✔
2471
                        if len(rawPolicy) != 0 {
34✔
2472
                                r := bytes.NewReader(rawPolicy)
9✔
2473

9✔
2474
                                edge, err := deserializeChanEdgePolicyRaw(r)
9✔
2475
                                if err != nil && !errors.Is(
9✔
2476
                                        err, ErrEdgePolicyOptionalFieldNotFound,
9✔
2477
                                ) && !errors.Is(err, ErrParsingExtraTLVBytes) {
9✔
2478

×
2479
                                        return err
×
2480
                                }
×
2481

2482
                                chanInfo.Node1UpdateTimestamp = edge.LastUpdate
9✔
2483
                        }
2484

2485
                        rawPolicy = edges.Get(node2Key)
25✔
2486
                        if len(rawPolicy) != 0 {
39✔
2487
                                r := bytes.NewReader(rawPolicy)
14✔
2488

14✔
2489
                                edge, err := deserializeChanEdgePolicyRaw(r)
14✔
2490
                                if err != nil && !errors.Is(
14✔
2491
                                        err, ErrEdgePolicyOptionalFieldNotFound,
14✔
2492
                                ) && !errors.Is(err, ErrParsingExtraTLVBytes) {
14✔
2493

×
2494
                                        return err
×
2495
                                }
×
2496

2497
                                chanInfo.Node2UpdateTimestamp = edge.LastUpdate
14✔
2498
                        }
2499

2500
                        channelsPerBlock[cid.BlockHeight] = append(
25✔
2501
                                channelsPerBlock[cid.BlockHeight], chanInfo,
25✔
2502
                        )
25✔
2503
                }
2504

2505
                return nil
14✔
2506
        }, func() {
14✔
2507
                channelsPerBlock = make(map[uint32][]ChannelUpdateInfo)
14✔
2508
        })
14✔
2509

2510
        switch {
14✔
2511
        // If we don't know of any channels yet, then there's nothing to
2512
        // filter, so we'll return an empty slice.
2513
        case errors.Is(err, ErrGraphNoEdgesFound) || len(channelsPerBlock) == 0:
6✔
2514
                return nil, nil
6✔
2515

2516
        case err != nil:
×
2517
                return nil, err
×
2518
        }
2519

2520
        // Return the channel ranges in ascending block height order.
2521
        blocks := make([]uint32, 0, len(channelsPerBlock))
11✔
2522
        for block := range channelsPerBlock {
36✔
2523
                blocks = append(blocks, block)
25✔
2524
        }
25✔
2525
        sort.Slice(blocks, func(i, j int) bool {
33✔
2526
                return blocks[i] < blocks[j]
22✔
2527
        })
22✔
2528

2529
        channelRanges := make([]BlockChannelRange, 0, len(channelsPerBlock))
11✔
2530
        for _, block := range blocks {
36✔
2531
                channelRanges = append(channelRanges, BlockChannelRange{
25✔
2532
                        Height:   block,
25✔
2533
                        Channels: channelsPerBlock[block],
25✔
2534
                })
25✔
2535
        }
25✔
2536

2537
        return channelRanges, nil
11✔
2538
}
2539

2540
// FetchChanInfos returns the set of channel edges that correspond to the passed
2541
// channel ID's. If an edge is the query is unknown to the database, it will
2542
// skipped and the result will contain only those edges that exist at the time
2543
// of the query. This can be used to respond to peer queries that are seeking to
2544
// fill in gaps in their view of the channel graph.
2545
func (c *KVStore) FetchChanInfos(chanIDs []uint64) ([]ChannelEdge, error) {
7✔
2546
        return c.fetchChanInfos(nil, chanIDs)
7✔
2547
}
7✔
2548

2549
// fetchChanInfos returns the set of channel edges that correspond to the passed
2550
// channel ID's. If an edge is the query is unknown to the database, it will
2551
// skipped and the result will contain only those edges that exist at the time
2552
// of the query. This can be used to respond to peer queries that are seeking to
2553
// fill in gaps in their view of the channel graph.
2554
//
2555
// NOTE: An optional transaction may be provided. If none is provided, then a
2556
// new one will be created.
2557
func (c *KVStore) fetchChanInfos(tx kvdb.RTx, chanIDs []uint64) (
2558
        []ChannelEdge, error) {
7✔
2559
        // TODO(roasbeef): sort cids?
7✔
2560

7✔
2561
        var (
7✔
2562
                chanEdges []ChannelEdge
7✔
2563
                cidBytes  [8]byte
7✔
2564
        )
7✔
2565

7✔
2566
        fetchChanInfos := func(tx kvdb.RTx) error {
14✔
2567
                edges := tx.ReadBucket(edgeBucket)
7✔
2568
                if edges == nil {
7✔
2569
                        return ErrGraphNoEdgesFound
×
2570
                }
×
2571
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
7✔
2572
                if edgeIndex == nil {
7✔
2573
                        return ErrGraphNoEdgesFound
×
2574
                }
×
2575
                nodes := tx.ReadBucket(nodeBucket)
7✔
2576
                if nodes == nil {
7✔
2577
                        return ErrGraphNotFound
×
2578
                }
×
2579

2580
                for _, cid := range chanIDs {
21✔
2581
                        byteOrder.PutUint64(cidBytes[:], cid)
14✔
2582

14✔
2583
                        // First, we'll fetch the static edge information. If
14✔
2584
                        // the edge is unknown, we will skip the edge and
14✔
2585
                        // continue gathering all known edges.
14✔
2586
                        edgeInfo, err := fetchChanEdgeInfo(
14✔
2587
                                edgeIndex, cidBytes[:],
14✔
2588
                        )
14✔
2589
                        switch {
14✔
2590
                        case errors.Is(err, ErrEdgeNotFound):
3✔
2591
                                continue
3✔
2592
                        case err != nil:
×
2593
                                return err
×
2594
                        }
2595

2596
                        // With the static information obtained, we'll now
2597
                        // fetch the dynamic policy info.
2598
                        edge1, edge2, err := fetchChanEdgePolicies(
11✔
2599
                                edgeIndex, edges, cidBytes[:],
11✔
2600
                        )
11✔
2601
                        if err != nil {
11✔
2602
                                return err
×
2603
                        }
×
2604

2605
                        node1, err := fetchLightningNode(
11✔
2606
                                nodes, edgeInfo.NodeKey1Bytes[:],
11✔
2607
                        )
11✔
2608
                        if err != nil {
11✔
2609
                                return err
×
2610
                        }
×
2611

2612
                        node2, err := fetchLightningNode(
11✔
2613
                                nodes, edgeInfo.NodeKey2Bytes[:],
11✔
2614
                        )
11✔
2615
                        if err != nil {
11✔
2616
                                return err
×
2617
                        }
×
2618

2619
                        chanEdges = append(chanEdges, ChannelEdge{
11✔
2620
                                Info:    &edgeInfo,
11✔
2621
                                Policy1: edge1,
11✔
2622
                                Policy2: edge2,
11✔
2623
                                Node1:   &node1,
11✔
2624
                                Node2:   &node2,
11✔
2625
                        })
11✔
2626
                }
2627

2628
                return nil
7✔
2629
        }
2630

2631
        if tx == nil {
14✔
2632
                err := kvdb.View(c.db, fetchChanInfos, func() {
14✔
2633
                        chanEdges = nil
7✔
2634
                })
7✔
2635
                if err != nil {
7✔
2636
                        return nil, err
×
2637
                }
×
2638

2639
                return chanEdges, nil
7✔
2640
        }
2641

2642
        err := fetchChanInfos(tx)
×
2643
        if err != nil {
×
2644
                return nil, err
×
2645
        }
×
2646

2647
        return chanEdges, nil
×
2648
}
2649

2650
func delEdgeUpdateIndexEntry(edgesBucket kvdb.RwBucket, chanID uint64,
2651
        edge1, edge2 *models.ChannelEdgePolicy) error {
140✔
2652

140✔
2653
        // First, we'll fetch the edge update index bucket which currently
140✔
2654
        // stores an entry for the channel we're about to delete.
140✔
2655
        updateIndex := edgesBucket.NestedReadWriteBucket(edgeUpdateIndexBucket)
140✔
2656
        if updateIndex == nil {
140✔
2657
                // No edges in bucket, return early.
×
2658
                return nil
×
2659
        }
×
2660

2661
        // Now that we have the bucket, we'll attempt to construct a template
2662
        // for the index key: updateTime || chanid.
2663
        var indexKey [8 + 8]byte
140✔
2664
        byteOrder.PutUint64(indexKey[8:], chanID)
140✔
2665

140✔
2666
        // With the template constructed, we'll attempt to delete an entry that
140✔
2667
        // would have been created by both edges: we'll alternate the update
140✔
2668
        // times, as one may had overridden the other.
140✔
2669
        if edge1 != nil {
153✔
2670
                byteOrder.PutUint64(
13✔
2671
                        indexKey[:8], uint64(edge1.LastUpdate.Unix()),
13✔
2672
                )
13✔
2673
                if err := updateIndex.Delete(indexKey[:]); err != nil {
13✔
2674
                        return err
×
2675
                }
×
2676
        }
2677

2678
        // We'll also attempt to delete the entry that may have been created by
2679
        // the second edge.
2680
        if edge2 != nil {
155✔
2681
                byteOrder.PutUint64(
15✔
2682
                        indexKey[:8], uint64(edge2.LastUpdate.Unix()),
15✔
2683
                )
15✔
2684
                if err := updateIndex.Delete(indexKey[:]); err != nil {
15✔
2685
                        return err
×
2686
                }
×
2687
        }
2688

2689
        return nil
140✔
2690
}
2691

2692
// delChannelEdgeUnsafe deletes the edge with the given chanID from the graph
2693
// cache. It then goes on to delete any policy info and edge info for this
2694
// channel from the DB and finally, if isZombie is true, it will add an entry
2695
// for this channel in the zombie index.
2696
//
2697
// NOTE: this method MUST only be called if the cacheMu has already been
2698
// acquired.
2699
func (c *KVStore) delChannelEdgeUnsafe(edges, edgeIndex, chanIndex,
2700
        zombieIndex kvdb.RwBucket, chanID []byte, isZombie,
2701
        strictZombie bool) (*models.ChannelEdgeInfo, error) {
195✔
2702

195✔
2703
        edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
195✔
2704
        if err != nil {
250✔
2705
                return nil, err
55✔
2706
        }
55✔
2707

2708
        // We'll also remove the entry in the edge update index bucket before
2709
        // we delete the edges themselves so we can access their last update
2710
        // times.
2711
        cid := byteOrder.Uint64(chanID)
140✔
2712
        edge1, edge2, err := fetchChanEdgePolicies(edgeIndex, edges, chanID)
140✔
2713
        if err != nil {
140✔
2714
                return nil, err
×
2715
        }
×
2716
        err = delEdgeUpdateIndexEntry(edges, cid, edge1, edge2)
140✔
2717
        if err != nil {
140✔
2718
                return nil, err
×
2719
        }
×
2720

2721
        // The edge key is of the format pubKey || chanID. First we construct
2722
        // the latter half, populating the channel ID.
2723
        var edgeKey [33 + 8]byte
140✔
2724
        copy(edgeKey[33:], chanID)
140✔
2725

140✔
2726
        // With the latter half constructed, copy over the first public key to
140✔
2727
        // delete the edge in this direction, then the second to delete the
140✔
2728
        // edge in the opposite direction.
140✔
2729
        copy(edgeKey[:33], edgeInfo.NodeKey1Bytes[:])
140✔
2730
        if edges.Get(edgeKey[:]) != nil {
280✔
2731
                if err := edges.Delete(edgeKey[:]); err != nil {
140✔
2732
                        return nil, err
×
2733
                }
×
2734
        }
2735
        copy(edgeKey[:33], edgeInfo.NodeKey2Bytes[:])
140✔
2736
        if edges.Get(edgeKey[:]) != nil {
280✔
2737
                if err := edges.Delete(edgeKey[:]); err != nil {
140✔
2738
                        return nil, err
×
2739
                }
×
2740
        }
2741

2742
        // As part of deleting the edge we also remove all disabled entries
2743
        // from the edgePolicyDisabledIndex bucket. We do that for both
2744
        // directions.
2745
        err = updateEdgePolicyDisabledIndex(edges, cid, false, false)
140✔
2746
        if err != nil {
140✔
2747
                return nil, err
×
2748
        }
×
2749
        err = updateEdgePolicyDisabledIndex(edges, cid, true, false)
140✔
2750
        if err != nil {
140✔
2751
                return nil, err
×
2752
        }
×
2753

2754
        // With the edge data deleted, we can purge the information from the two
2755
        // edge indexes.
2756
        if err := edgeIndex.Delete(chanID); err != nil {
140✔
2757
                return nil, err
×
2758
        }
×
2759
        var b bytes.Buffer
140✔
2760
        if err := WriteOutpoint(&b, &edgeInfo.ChannelPoint); err != nil {
140✔
2761
                return nil, err
×
2762
        }
×
2763
        if err := chanIndex.Delete(b.Bytes()); err != nil {
140✔
2764
                return nil, err
×
2765
        }
×
2766

2767
        // Finally, we'll mark the edge as a zombie within our index if it's
2768
        // being removed due to the channel becoming a zombie. We do this to
2769
        // ensure we don't store unnecessary data for spent channels.
2770
        if !isZombie {
256✔
2771
                return &edgeInfo, nil
116✔
2772
        }
116✔
2773

2774
        nodeKey1, nodeKey2 := edgeInfo.NodeKey1Bytes, edgeInfo.NodeKey2Bytes
27✔
2775
        if strictZombie {
32✔
2776
                var e1UpdateTime, e2UpdateTime *time.Time
5✔
2777
                if edge1 != nil {
7✔
2778
                        e1UpdateTime = &edge1.LastUpdate
2✔
2779
                }
2✔
2780
                if edge2 != nil {
8✔
2781
                        e2UpdateTime = &edge2.LastUpdate
3✔
2782
                }
3✔
2783

2784
                nodeKey1, nodeKey2 = makeZombiePubkeys(
5✔
2785
                        &edgeInfo, e1UpdateTime, e2UpdateTime,
5✔
2786
                )
5✔
2787
        }
2788

2789
        return &edgeInfo, markEdgeZombie(
27✔
2790
                zombieIndex, byteOrder.Uint64(chanID), nodeKey1, nodeKey2,
27✔
2791
        )
27✔
2792
}
2793

2794
// makeZombiePubkeys derives the node pubkeys to store in the zombie index for a
2795
// particular pair of channel policies. The return values are one of:
2796
//  1. (pubkey1, pubkey2)
2797
//  2. (pubkey1, blank)
2798
//  3. (blank, pubkey2)
2799
//
2800
// A blank pubkey means that corresponding node will be unable to resurrect a
2801
// channel on its own. For example, node1 may continue to publish recent
2802
// updates, but node2 has fallen way behind. After marking an edge as a zombie,
2803
// we don't want another fresh update from node1 to resurrect, as the edge can
2804
// only become live once node2 finally sends something recent.
2805
//
2806
// In the case where we have neither update, we allow either party to resurrect
2807
// the channel. If the channel were to be marked zombie again, it would be
2808
// marked with the correct lagging channel since we received an update from only
2809
// one side.
2810
func makeZombiePubkeys(info *models.ChannelEdgeInfo,
2811
        e1, e2 *time.Time) ([33]byte, [33]byte) {
5✔
2812

5✔
2813
        switch {
5✔
2814
        // If we don't have either edge policy, we'll return both pubkeys so
2815
        // that the channel can be resurrected by either party.
2816
        case e1 == nil && e2 == nil:
2✔
2817
                return info.NodeKey1Bytes, info.NodeKey2Bytes
2✔
2818

2819
        // If we're missing edge1, or if both edges are present but edge1 is
2820
        // older, we'll return edge1's pubkey and a blank pubkey for edge2. This
2821
        // means that only an update from edge1 will be able to resurrect the
2822
        // channel.
2823
        case e1 == nil || (e2 != nil && e1.Before(*e2)):
1✔
2824
                return info.NodeKey1Bytes, [33]byte{}
1✔
2825

2826
        // Otherwise, we're missing edge2 or edge2 is the older side, so we
2827
        // return a blank pubkey for edge1. In this case, only an update from
2828
        // edge2 can resurect the channel.
2829
        default:
2✔
2830
                return [33]byte{}, info.NodeKey2Bytes
2✔
2831
        }
2832
}
2833

2834
// UpdateEdgePolicy updates the edge routing policy for a single directed edge
2835
// within the database for the referenced channel. The `flags` attribute within
2836
// the ChannelEdgePolicy determines which of the directed edges are being
2837
// updated. If the flag is 1, then the first node's information is being
2838
// updated, otherwise it's the second node's information. The node ordering is
2839
// determined by the lexicographical ordering of the identity public keys of the
2840
// nodes on either side of the channel.
2841
func (c *KVStore) UpdateEdgePolicy(ctx context.Context,
2842
        edge *models.ChannelEdgePolicy,
2843
        opts ...batch.SchedulerOption) (route.Vertex, route.Vertex, error) {
2,675✔
2844

2,675✔
2845
        var (
2,675✔
2846
                isUpdate1    bool
2,675✔
2847
                edgeNotFound bool
2,675✔
2848
                from, to     route.Vertex
2,675✔
2849
        )
2,675✔
2850

2,675✔
2851
        r := &batch.Request[kvdb.RwTx]{
2,675✔
2852
                Opts: batch.NewSchedulerOptions(opts...),
2,675✔
2853
                Reset: func() {
5,351✔
2854
                        isUpdate1 = false
2,676✔
2855
                        edgeNotFound = false
2,676✔
2856
                },
2,676✔
2857
                Do: func(tx kvdb.RwTx) error {
2,676✔
2858
                        // Validate that the ExtraOpaqueData is in fact a valid
2,676✔
2859
                        // TLV stream. This is done here instead of within
2,676✔
2860
                        // updateEdgePolicy so that updateEdgePolicy can be used
2,676✔
2861
                        // by unit tests to recreate the case where we already
2,676✔
2862
                        // have nodes persisted with invalid TLV data.
2,676✔
2863
                        err := edge.ExtraOpaqueData.ValidateTLV()
2,676✔
2864
                        if err != nil {
2,678✔
2865
                                return fmt.Errorf("%w: %w",
2✔
2866
                                        ErrParsingExtraTLVBytes, err)
2✔
2867
                        }
2✔
2868

2869
                        from, to, isUpdate1, err = updateEdgePolicy(tx, edge)
2,674✔
2870
                        if err != nil {
2,678✔
2871
                                log.Errorf("UpdateEdgePolicy faild: %v", err)
4✔
2872
                        }
4✔
2873

2874
                        // Silence ErrEdgeNotFound so that the batch can
2875
                        // succeed, but propagate the error via local state.
2876
                        if errors.Is(err, ErrEdgeNotFound) {
2,678✔
2877
                                edgeNotFound = true
4✔
2878
                                return nil
4✔
2879
                        }
4✔
2880

2881
                        return err
2,670✔
2882
                },
2883
                OnCommit: func(err error) error {
2,675✔
2884
                        switch {
2,675✔
2885
                        case err != nil:
1✔
2886
                                return err
1✔
2887
                        case edgeNotFound:
4✔
2888
                                return ErrEdgeNotFound
4✔
2889
                        default:
2,670✔
2890
                                c.updateEdgeCache(edge, isUpdate1)
2,670✔
2891
                                return nil
2,670✔
2892
                        }
2893
                },
2894
        }
2895

2896
        err := c.chanScheduler.Execute(ctx, r)
2,675✔
2897

2,675✔
2898
        return from, to, err
2,675✔
2899
}
2900

2901
func (c *KVStore) updateEdgeCache(e *models.ChannelEdgePolicy,
2902
        isUpdate1 bool) {
2,670✔
2903

2,670✔
2904
        // If an entry for this channel is found in reject cache, we'll modify
2,670✔
2905
        // the entry with the updated timestamp for the direction that was just
2,670✔
2906
        // written. If the edge doesn't exist, we'll load the cache entry lazily
2,670✔
2907
        // during the next query for this edge.
2,670✔
2908
        if entry, ok := c.rejectCache.get(e.ChannelID); ok {
2,678✔
2909
                if isUpdate1 {
14✔
2910
                        entry.upd1Time = e.LastUpdate.Unix()
6✔
2911
                } else {
11✔
2912
                        entry.upd2Time = e.LastUpdate.Unix()
5✔
2913
                }
5✔
2914
                c.rejectCache.insert(e.ChannelID, entry)
8✔
2915
        }
2916

2917
        // If an entry for this channel is found in channel cache, we'll modify
2918
        // the entry with the updated policy for the direction that was just
2919
        // written. If the edge doesn't exist, we'll defer loading the info and
2920
        // policies and lazily read from disk during the next query.
2921
        if channel, ok := c.chanCache.get(e.ChannelID); ok {
2,673✔
2922
                if isUpdate1 {
6✔
2923
                        channel.Policy1 = e
3✔
2924
                } else {
6✔
2925
                        channel.Policy2 = e
3✔
2926
                }
3✔
2927
                c.chanCache.insert(e.ChannelID, channel)
3✔
2928
        }
2929
}
2930

2931
// updateEdgePolicy attempts to update an edge's policy within the relevant
2932
// buckets using an existing database transaction. The returned boolean will be
2933
// true if the updated policy belongs to node1, and false if the policy belonged
2934
// to node2.
2935
func updateEdgePolicy(tx kvdb.RwTx, edge *models.ChannelEdgePolicy) (
2936
        route.Vertex, route.Vertex, bool, error) {
2,674✔
2937

2,674✔
2938
        var noVertex route.Vertex
2,674✔
2939

2,674✔
2940
        edges := tx.ReadWriteBucket(edgeBucket)
2,674✔
2941
        if edges == nil {
2,674✔
2942
                return noVertex, noVertex, false, ErrEdgeNotFound
×
2943
        }
×
2944
        edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
2,674✔
2945
        if edgeIndex == nil {
2,674✔
2946
                return noVertex, noVertex, false, ErrEdgeNotFound
×
2947
        }
×
2948

2949
        // Create the channelID key be converting the channel ID
2950
        // integer into a byte slice.
2951
        var chanID [8]byte
2,674✔
2952
        byteOrder.PutUint64(chanID[:], edge.ChannelID)
2,674✔
2953

2,674✔
2954
        // With the channel ID, we then fetch the value storing the two
2,674✔
2955
        // nodes which connect this channel edge.
2,674✔
2956
        nodeInfo := edgeIndex.Get(chanID[:])
2,674✔
2957
        if nodeInfo == nil {
2,678✔
2958
                return noVertex, noVertex, false, ErrEdgeNotFound
4✔
2959
        }
4✔
2960

2961
        // Depending on the flags value passed above, either the first
2962
        // or second edge policy is being updated.
2963
        var fromNode, toNode []byte
2,670✔
2964
        var isUpdate1 bool
2,670✔
2965
        if edge.ChannelFlags&lnwire.ChanUpdateDirection == 0 {
4,010✔
2966
                fromNode = nodeInfo[:33]
1,340✔
2967
                toNode = nodeInfo[33:66]
1,340✔
2968
                isUpdate1 = true
1,340✔
2969
        } else {
2,673✔
2970
                fromNode = nodeInfo[33:66]
1,333✔
2971
                toNode = nodeInfo[:33]
1,333✔
2972
                isUpdate1 = false
1,333✔
2973
        }
1,333✔
2974

2975
        // Finally, with the direction of the edge being updated
2976
        // identified, we update the on-disk edge representation.
2977
        err := putChanEdgePolicy(edges, edge, fromNode, toNode)
2,670✔
2978
        if err != nil {
2,670✔
2979
                return noVertex, noVertex, false, err
×
2980
        }
×
2981

2982
        var (
2,670✔
2983
                fromNodePubKey route.Vertex
2,670✔
2984
                toNodePubKey   route.Vertex
2,670✔
2985
        )
2,670✔
2986
        copy(fromNodePubKey[:], fromNode)
2,670✔
2987
        copy(toNodePubKey[:], toNode)
2,670✔
2988

2,670✔
2989
        return fromNodePubKey, toNodePubKey, isUpdate1, nil
2,670✔
2990
}
2991

2992
// isPublic determines whether the node is seen as public within the graph from
2993
// the source node's point of view. An existing database transaction can also be
2994
// specified.
2995
func (c *KVStore) isPublic(tx kvdb.RTx, nodePub route.Vertex,
2996
        sourcePubKey []byte) (bool, error) {
16✔
2997

16✔
2998
        // In order to determine whether this node is publicly advertised within
16✔
2999
        // the graph, we'll need to look at all of its edges and check whether
16✔
3000
        // they extend to any other node than the source node. errDone will be
16✔
3001
        // used to terminate the check early.
16✔
3002
        nodeIsPublic := false
16✔
3003
        errDone := errors.New("done")
16✔
3004
        err := c.forEachNodeChannelTx(tx, nodePub, func(tx kvdb.RTx,
16✔
3005
                info *models.ChannelEdgeInfo, _ *models.ChannelEdgePolicy,
16✔
3006
                _ *models.ChannelEdgePolicy) error {
29✔
3007

13✔
3008
                // If this edge doesn't extend to the source node, we'll
13✔
3009
                // terminate our search as we can now conclude that the node is
13✔
3010
                // publicly advertised within the graph due to the local node
13✔
3011
                // knowing of the current edge.
13✔
3012
                if !bytes.Equal(info.NodeKey1Bytes[:], sourcePubKey) &&
13✔
3013
                        !bytes.Equal(info.NodeKey2Bytes[:], sourcePubKey) {
19✔
3014

6✔
3015
                        nodeIsPublic = true
6✔
3016
                        return errDone
6✔
3017
                }
6✔
3018

3019
                // Since the edge _does_ extend to the source node, we'll also
3020
                // need to ensure that this is a public edge.
3021
                if info.AuthProof != nil {
19✔
3022
                        nodeIsPublic = true
9✔
3023
                        return errDone
9✔
3024
                }
9✔
3025

3026
                // Otherwise, we'll continue our search.
3027
                return nil
4✔
NEW
3028
        }, func() {
×
NEW
3029
                nodeIsPublic = false
×
UNCOV
3030
        })
×
3031
        if err != nil && !errors.Is(err, errDone) {
16✔
3032
                return false, err
×
3033
        }
×
3034

3035
        return nodeIsPublic, nil
16✔
3036
}
3037

3038
// FetchLightningNodeTx attempts to look up a target node by its identity
3039
// public key. If the node isn't found in the database, then
3040
// ErrGraphNodeNotFound is returned. An optional transaction may be provided.
3041
// If none is provided, then a new one will be created.
3042
func (c *KVStore) FetchLightningNodeTx(tx kvdb.RTx, nodePub route.Vertex) (
3043
        *models.LightningNode, error) {
3,654✔
3044

3,654✔
3045
        return c.fetchLightningNode(tx, nodePub)
3,654✔
3046
}
3,654✔
3047

3048
// FetchLightningNode attempts to look up a target node by its identity public
3049
// key. If the node isn't found in the database, then ErrGraphNodeNotFound is
3050
// returned.
3051
func (c *KVStore) FetchLightningNode(_ context.Context,
3052
        nodePub route.Vertex) (*models.LightningNode, error) {
162✔
3053

162✔
3054
        return c.fetchLightningNode(nil, nodePub)
162✔
3055
}
162✔
3056

3057
// fetchLightningNode attempts to look up a target node by its identity public
3058
// key. If the node isn't found in the database, then ErrGraphNodeNotFound is
3059
// returned. An optional transaction may be provided. If none is provided, then
3060
// a new one will be created.
3061
func (c *KVStore) fetchLightningNode(tx kvdb.RTx,
3062
        nodePub route.Vertex) (*models.LightningNode, error) {
3,813✔
3063

3,813✔
3064
        var node *models.LightningNode
3,813✔
3065
        fetch := func(tx kvdb.RTx) error {
7,626✔
3066
                // First grab the nodes bucket which stores the mapping from
3,813✔
3067
                // pubKey to node information.
3,813✔
3068
                nodes := tx.ReadBucket(nodeBucket)
3,813✔
3069
                if nodes == nil {
3,813✔
3070
                        return ErrGraphNotFound
×
3071
                }
×
3072

3073
                // If a key for this serialized public key isn't found, then
3074
                // the target node doesn't exist within the database.
3075
                nodeBytes := nodes.Get(nodePub[:])
3,813✔
3076
                if nodeBytes == nil {
3,831✔
3077
                        return ErrGraphNodeNotFound
18✔
3078
                }
18✔
3079

3080
                // If the node is found, then we can de deserialize the node
3081
                // information to return to the user.
3082
                nodeReader := bytes.NewReader(nodeBytes)
3,798✔
3083
                n, err := deserializeLightningNode(nodeReader)
3,798✔
3084
                if err != nil {
3,798✔
3085
                        return err
×
3086
                }
×
3087

3088
                node = &n
3,798✔
3089

3,798✔
3090
                return nil
3,798✔
3091
        }
3092

3093
        if tx == nil {
3,999✔
3094
                err := kvdb.View(
186✔
3095
                        c.db, fetch, func() {
372✔
3096
                                node = nil
186✔
3097
                        },
186✔
3098
                )
3099
                if err != nil {
193✔
3100
                        return nil, err
7✔
3101
                }
7✔
3102

3103
                return node, nil
182✔
3104
        }
3105

3106
        err := fetch(tx)
3,627✔
3107
        if err != nil {
3,638✔
3108
                return nil, err
11✔
3109
        }
11✔
3110

3111
        return node, nil
3,616✔
3112
}
3113

3114
// HasLightningNode determines if the graph has a vertex identified by the
3115
// target node identity public key. If the node exists in the database, a
3116
// timestamp of when the data for the node was lasted updated is returned along
3117
// with a true boolean. Otherwise, an empty time.Time is returned with a false
3118
// boolean.
3119
func (c *KVStore) HasLightningNode(_ context.Context,
3120
        nodePub [33]byte) (time.Time, bool, error) {
20✔
3121

20✔
3122
        var (
20✔
3123
                updateTime time.Time
20✔
3124
                exists     bool
20✔
3125
        )
20✔
3126

20✔
3127
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
40✔
3128
                // First grab the nodes bucket which stores the mapping from
20✔
3129
                // pubKey to node information.
20✔
3130
                nodes := tx.ReadBucket(nodeBucket)
20✔
3131
                if nodes == nil {
20✔
3132
                        return ErrGraphNotFound
×
3133
                }
×
3134

3135
                // If a key for this serialized public key isn't found, we can
3136
                // exit early.
3137
                nodeBytes := nodes.Get(nodePub[:])
20✔
3138
                if nodeBytes == nil {
26✔
3139
                        exists = false
6✔
3140
                        return nil
6✔
3141
                }
6✔
3142

3143
                // Otherwise we continue on to obtain the time stamp
3144
                // representing the last time the data for this node was
3145
                // updated.
3146
                nodeReader := bytes.NewReader(nodeBytes)
17✔
3147
                node, err := deserializeLightningNode(nodeReader)
17✔
3148
                if err != nil {
17✔
3149
                        return err
×
3150
                }
×
3151

3152
                exists = true
17✔
3153
                updateTime = node.LastUpdate
17✔
3154

17✔
3155
                return nil
17✔
3156
        }, func() {
20✔
3157
                updateTime = time.Time{}
20✔
3158
                exists = false
20✔
3159
        })
20✔
3160
        if err != nil {
20✔
3161
                return time.Time{}, exists, err
×
3162
        }
×
3163

3164
        return updateTime, exists, nil
20✔
3165
}
3166

3167
// nodeTraversal is used to traverse all channels of a node given by its
3168
// public key and passes channel information into the specified callback.
3169
//
3170
// NOTE: the reset param is only meaningful if the tx param is nil.
3171
func nodeTraversal(tx kvdb.RTx, nodePub []byte, db kvdb.Backend,
3172
        cb func(kvdb.RTx, *models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3173
                *models.ChannelEdgePolicy) error, reset func()) error {
1,270✔
3174

1,270✔
3175
        traversal := func(tx kvdb.RTx) error {
2,540✔
3176
                edges := tx.ReadBucket(edgeBucket)
1,270✔
3177
                if edges == nil {
1,270✔
3178
                        return ErrGraphNotFound
×
3179
                }
×
3180
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
1,270✔
3181
                if edgeIndex == nil {
1,270✔
3182
                        return ErrGraphNoEdgesFound
×
3183
                }
×
3184

3185
                // In order to reach all the edges for this node, we take
3186
                // advantage of the construction of the key-space within the
3187
                // edge bucket. The keys are stored in the form: pubKey ||
3188
                // chanID. Therefore, starting from a chanID of zero, we can
3189
                // scan forward in the bucket, grabbing all the edges for the
3190
                // node. Once the prefix no longer matches, then we know we're
3191
                // done.
3192
                var nodeStart [33 + 8]byte
1,270✔
3193
                copy(nodeStart[:], nodePub)
1,270✔
3194
                copy(nodeStart[33:], chanStart[:])
1,270✔
3195

1,270✔
3196
                // Starting from the key pubKey || 0, we seek forward in the
1,270✔
3197
                // bucket until the retrieved key no longer has the public key
1,270✔
3198
                // as its prefix. This indicates that we've stepped over into
1,270✔
3199
                // another node's edges, so we can terminate our scan.
1,270✔
3200
                edgeCursor := edges.ReadCursor()
1,270✔
3201
                for nodeEdge, _ := edgeCursor.Seek(nodeStart[:]); bytes.HasPrefix(nodeEdge, nodePub); nodeEdge, _ = edgeCursor.Next() { //nolint:ll
5,115✔
3202
                        // If the prefix still matches, the channel id is
3,845✔
3203
                        // returned in nodeEdge. Channel id is used to lookup
3,845✔
3204
                        // the node at the other end of the channel and both
3,845✔
3205
                        // edge policies.
3,845✔
3206
                        chanID := nodeEdge[33:]
3,845✔
3207
                        edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
3,845✔
3208
                        if err != nil {
3,845✔
3209
                                return err
×
3210
                        }
×
3211

3212
                        outgoingPolicy, err := fetchChanEdgePolicy(
3,845✔
3213
                                edges, chanID, nodePub,
3,845✔
3214
                        )
3,845✔
3215
                        if err != nil {
3,845✔
3216
                                return err
×
3217
                        }
×
3218

3219
                        otherNode, err := edgeInfo.OtherNodeKeyBytes(nodePub)
3,845✔
3220
                        if err != nil {
3,845✔
3221
                                return err
×
3222
                        }
×
3223

3224
                        incomingPolicy, err := fetchChanEdgePolicy(
3,845✔
3225
                                edges, chanID, otherNode[:],
3,845✔
3226
                        )
3,845✔
3227
                        if err != nil {
3,845✔
3228
                                return err
×
3229
                        }
×
3230

3231
                        // Finally, we execute the callback.
3232
                        err = cb(tx, &edgeInfo, outgoingPolicy, incomingPolicy)
3,845✔
3233
                        if err != nil {
3,857✔
3234
                                return err
12✔
3235
                        }
12✔
3236
                }
3237

3238
                return nil
1,261✔
3239
        }
3240

3241
        // If no transaction was provided, then we'll create a new transaction
3242
        // to execute the transaction within.
3243
        if tx == nil {
1,302✔
3244
                return kvdb.View(db, traversal, reset)
32✔
3245
        }
32✔
3246

3247
        // Otherwise, we re-use the existing transaction to execute the graph
3248
        // traversal.
3249
        return traversal(tx)
1,241✔
3250
}
3251

3252
// ForEachNodeChannel iterates through all channels of the given node,
3253
// executing the passed callback with an edge info structure and the policies
3254
// of each end of the channel. The first edge policy is the outgoing edge *to*
3255
// the connecting node, while the second is the incoming edge *from* the
3256
// connecting node. If the callback returns an error, then the iteration is
3257
// halted with the error propagated back up to the caller.
3258
//
3259
// Unknown policies are passed into the callback as nil values.
3260
func (c *KVStore) ForEachNodeChannel(_ context.Context, nodePub route.Vertex,
3261
        cb func(*models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3262
                *models.ChannelEdgePolicy) error, reset func()) error {
9✔
3263

9✔
3264
        return nodeTraversal(
9✔
3265
                nil, nodePub[:], c.db, func(_ kvdb.RTx,
9✔
3266
                        info *models.ChannelEdgeInfo, policy,
9✔
3267
                        policy2 *models.ChannelEdgePolicy) error {
22✔
3268

13✔
3269
                        return cb(info, policy, policy2)
13✔
3270
                }, reset,
13✔
3271
        )
3272
}
3273

3274
// ForEachSourceNodeChannel iterates through all channels of the source node,
3275
// executing the passed callback on each. The callback is provided with the
3276
// channel's outpoint, whether we have a policy for the channel and the channel
3277
// peer's node information.
3278
func (c *KVStore) ForEachSourceNodeChannel(_ context.Context,
3279
        cb func(chanPoint wire.OutPoint, havePolicy bool,
3280
                otherNode *models.LightningNode) error, reset func()) error {
4✔
3281

4✔
3282
        return kvdb.View(c.db, func(tx kvdb.RTx) error {
8✔
3283
                nodes := tx.ReadBucket(nodeBucket)
4✔
3284
                if nodes == nil {
4✔
3285
                        return ErrGraphNotFound
×
3286
                }
×
3287

3288
                node, err := sourceNodeWithTx(nodes)
4✔
3289
                if err != nil {
4✔
3290
                        return err
×
3291
                }
×
3292

3293
                return nodeTraversal(
4✔
3294
                        tx, node.PubKeyBytes[:], c.db, func(tx kvdb.RTx,
4✔
3295
                                info *models.ChannelEdgeInfo,
4✔
3296
                                policy, _ *models.ChannelEdgePolicy) error {
9✔
3297

5✔
3298
                                peer, err := c.fetchOtherNode(
5✔
3299
                                        tx, info, node.PubKeyBytes[:],
5✔
3300
                                )
5✔
3301
                                if err != nil {
5✔
3302
                                        return err
×
3303
                                }
×
3304

3305
                                return cb(
5✔
3306
                                        info.ChannelPoint, policy != nil, peer,
5✔
3307
                                )
5✔
3308
                        }, reset,
3309
                )
3310
        }, reset)
3311
}
3312

3313
// forEachNodeChannelTx iterates through all channels of the given node,
3314
// executing the passed callback with an edge info structure and the policies
3315
// of each end of the channel. The first edge policy is the outgoing edge *to*
3316
// the connecting node, while the second is the incoming edge *from* the
3317
// connecting node. If the callback returns an error, then the iteration is
3318
// halted with the error propagated back up to the caller.
3319
//
3320
// Unknown policies are passed into the callback as nil values.
3321
//
3322
// If the caller wishes to re-use an existing boltdb transaction, then it
3323
// should be passed as the first argument.  Otherwise, the first argument should
3324
// be nil and a fresh transaction will be created to execute the graph
3325
// traversal.
3326
//
3327
// NOTE: the reset function is only meaningful if the tx param is nil.
3328
func (c *KVStore) forEachNodeChannelTx(tx kvdb.RTx,
3329
        nodePub route.Vertex, cb func(kvdb.RTx, *models.ChannelEdgeInfo,
3330
                *models.ChannelEdgePolicy, *models.ChannelEdgePolicy) error,
3331
        reset func()) error {
1,001✔
3332

1,001✔
3333
        return nodeTraversal(tx, nodePub[:], c.db, cb, reset)
1,001✔
3334
}
1,001✔
3335

3336
// fetchOtherNode attempts to fetch the full LightningNode that's opposite of
3337
// the target node in the channel. This is useful when one knows the pubkey of
3338
// one of the nodes, and wishes to obtain the full LightningNode for the other
3339
// end of the channel.
3340
func (c *KVStore) fetchOtherNode(tx kvdb.RTx,
3341
        channel *models.ChannelEdgeInfo, thisNodeKey []byte) (
3342
        *models.LightningNode, error) {
5✔
3343

5✔
3344
        // Ensure that the node passed in is actually a member of the channel.
5✔
3345
        var targetNodeBytes [33]byte
5✔
3346
        switch {
5✔
3347
        case bytes.Equal(channel.NodeKey1Bytes[:], thisNodeKey):
5✔
3348
                targetNodeBytes = channel.NodeKey2Bytes
5✔
3349
        case bytes.Equal(channel.NodeKey2Bytes[:], thisNodeKey):
3✔
3350
                targetNodeBytes = channel.NodeKey1Bytes
3✔
3351
        default:
×
3352
                return nil, fmt.Errorf("node not participating in this channel")
×
3353
        }
3354

3355
        var targetNode *models.LightningNode
5✔
3356
        fetchNodeFunc := func(tx kvdb.RTx) error {
10✔
3357
                // First grab the nodes bucket which stores the mapping from
5✔
3358
                // pubKey to node information.
5✔
3359
                nodes := tx.ReadBucket(nodeBucket)
5✔
3360
                if nodes == nil {
5✔
3361
                        return ErrGraphNotFound
×
3362
                }
×
3363

3364
                node, err := fetchLightningNode(nodes, targetNodeBytes[:])
5✔
3365
                if err != nil {
5✔
3366
                        return err
×
3367
                }
×
3368

3369
                targetNode = &node
5✔
3370

5✔
3371
                return nil
5✔
3372
        }
3373

3374
        // If the transaction is nil, then we'll need to create a new one,
3375
        // otherwise we can use the existing db transaction.
3376
        var err error
5✔
3377
        if tx == nil {
5✔
3378
                err = kvdb.View(c.db, fetchNodeFunc, func() {
×
3379
                        targetNode = nil
×
3380
                })
×
3381
        } else {
5✔
3382
                err = fetchNodeFunc(tx)
5✔
3383
        }
5✔
3384

3385
        return targetNode, err
5✔
3386
}
3387

3388
// computeEdgePolicyKeys is a helper function that can be used to compute the
3389
// keys used to index the channel edge policy info for the two nodes of the
3390
// edge. The keys for node 1 and node 2 are returned respectively.
3391
func computeEdgePolicyKeys(info *models.ChannelEdgeInfo) ([]byte, []byte) {
25✔
3392
        var (
25✔
3393
                node1Key [33 + 8]byte
25✔
3394
                node2Key [33 + 8]byte
25✔
3395
        )
25✔
3396

25✔
3397
        copy(node1Key[:], info.NodeKey1Bytes[:])
25✔
3398
        copy(node2Key[:], info.NodeKey2Bytes[:])
25✔
3399

25✔
3400
        byteOrder.PutUint64(node1Key[33:], info.ChannelID)
25✔
3401
        byteOrder.PutUint64(node2Key[33:], info.ChannelID)
25✔
3402

25✔
3403
        return node1Key[:], node2Key[:]
25✔
3404
}
25✔
3405

3406
// FetchChannelEdgesByOutpoint attempts to lookup the two directed edges for
3407
// the channel identified by the funding outpoint. If the channel can't be
3408
// found, then ErrEdgeNotFound is returned. A struct which houses the general
3409
// information for the channel itself is returned as well as two structs that
3410
// contain the routing policies for the channel in either direction.
3411
func (c *KVStore) FetchChannelEdgesByOutpoint(op *wire.OutPoint) (
3412
        *models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3413
        *models.ChannelEdgePolicy, error) {
14✔
3414

14✔
3415
        var (
14✔
3416
                edgeInfo *models.ChannelEdgeInfo
14✔
3417
                policy1  *models.ChannelEdgePolicy
14✔
3418
                policy2  *models.ChannelEdgePolicy
14✔
3419
        )
14✔
3420

14✔
3421
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
28✔
3422
                // First, grab the node bucket. This will be used to populate
14✔
3423
                // the Node pointers in each edge read from disk.
14✔
3424
                nodes := tx.ReadBucket(nodeBucket)
14✔
3425
                if nodes == nil {
14✔
3426
                        return ErrGraphNotFound
×
3427
                }
×
3428

3429
                // Next, grab the edge bucket which stores the edges, and also
3430
                // the index itself so we can group the directed edges together
3431
                // logically.
3432
                edges := tx.ReadBucket(edgeBucket)
14✔
3433
                if edges == nil {
14✔
3434
                        return ErrGraphNoEdgesFound
×
3435
                }
×
3436
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
14✔
3437
                if edgeIndex == nil {
14✔
3438
                        return ErrGraphNoEdgesFound
×
3439
                }
×
3440

3441
                // If the channel's outpoint doesn't exist within the outpoint
3442
                // index, then the edge does not exist.
3443
                chanIndex := edges.NestedReadBucket(channelPointBucket)
14✔
3444
                if chanIndex == nil {
14✔
3445
                        return ErrGraphNoEdgesFound
×
3446
                }
×
3447
                var b bytes.Buffer
14✔
3448
                if err := WriteOutpoint(&b, op); err != nil {
14✔
3449
                        return err
×
3450
                }
×
3451
                chanID := chanIndex.Get(b.Bytes())
14✔
3452
                if chanID == nil {
27✔
3453
                        return fmt.Errorf("%w: op=%v", ErrEdgeNotFound, op)
13✔
3454
                }
13✔
3455

3456
                // If the channel is found to exists, then we'll first retrieve
3457
                // the general information for the channel.
3458
                edge, err := fetchChanEdgeInfo(edgeIndex, chanID)
4✔
3459
                if err != nil {
4✔
3460
                        return fmt.Errorf("%w: chanID=%x", err, chanID)
×
3461
                }
×
3462
                edgeInfo = &edge
4✔
3463

4✔
3464
                // Once we have the information about the channels' parameters,
4✔
3465
                // we'll fetch the routing policies for each for the directed
4✔
3466
                // edges.
4✔
3467
                e1, e2, err := fetchChanEdgePolicies(edgeIndex, edges, chanID)
4✔
3468
                if err != nil {
4✔
3469
                        return fmt.Errorf("failed to find policy: %w", err)
×
3470
                }
×
3471

3472
                policy1 = e1
4✔
3473
                policy2 = e2
4✔
3474

4✔
3475
                return nil
4✔
3476
        }, func() {
14✔
3477
                edgeInfo = nil
14✔
3478
                policy1 = nil
14✔
3479
                policy2 = nil
14✔
3480
        })
14✔
3481
        if err != nil {
27✔
3482
                return nil, nil, nil, err
13✔
3483
        }
13✔
3484

3485
        return edgeInfo, policy1, policy2, nil
4✔
3486
}
3487

3488
// FetchChannelEdgesByID attempts to lookup the two directed edges for the
3489
// channel identified by the channel ID. If the channel can't be found, then
3490
// ErrEdgeNotFound is returned. A struct which houses the general information
3491
// for the channel itself is returned as well as two structs that contain the
3492
// routing policies for the channel in either direction.
3493
//
3494
// ErrZombieEdge an be returned if the edge is currently marked as a zombie
3495
// within the database. In this case, the ChannelEdgePolicy's will be nil, and
3496
// the ChannelEdgeInfo will only include the public keys of each node.
3497
func (c *KVStore) FetchChannelEdgesByID(chanID uint64) (
3498
        *models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3499
        *models.ChannelEdgePolicy, error) {
2,692✔
3500

2,692✔
3501
        var (
2,692✔
3502
                edgeInfo  *models.ChannelEdgeInfo
2,692✔
3503
                policy1   *models.ChannelEdgePolicy
2,692✔
3504
                policy2   *models.ChannelEdgePolicy
2,692✔
3505
                channelID [8]byte
2,692✔
3506
        )
2,692✔
3507

2,692✔
3508
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
5,384✔
3509
                // First, grab the node bucket. This will be used to populate
2,692✔
3510
                // the Node pointers in each edge read from disk.
2,692✔
3511
                nodes := tx.ReadBucket(nodeBucket)
2,692✔
3512
                if nodes == nil {
2,692✔
3513
                        return ErrGraphNotFound
×
3514
                }
×
3515

3516
                // Next, grab the edge bucket which stores the edges, and also
3517
                // the index itself so we can group the directed edges together
3518
                // logically.
3519
                edges := tx.ReadBucket(edgeBucket)
2,692✔
3520
                if edges == nil {
2,692✔
3521
                        return ErrGraphNoEdgesFound
×
3522
                }
×
3523
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
2,692✔
3524
                if edgeIndex == nil {
2,692✔
3525
                        return ErrGraphNoEdgesFound
×
3526
                }
×
3527

3528
                byteOrder.PutUint64(channelID[:], chanID)
2,692✔
3529

2,692✔
3530
                // Now, attempt to fetch edge.
2,692✔
3531
                edge, err := fetchChanEdgeInfo(edgeIndex, channelID[:])
2,692✔
3532

2,692✔
3533
                // If it doesn't exist, we'll quickly check our zombie index to
2,692✔
3534
                // see if we've previously marked it as so.
2,692✔
3535
                if errors.Is(err, ErrEdgeNotFound) {
2,696✔
3536
                        // If the zombie index doesn't exist, or the edge is not
4✔
3537
                        // marked as a zombie within it, then we'll return the
4✔
3538
                        // original ErrEdgeNotFound error.
4✔
3539
                        zombieIndex := edges.NestedReadBucket(zombieBucket)
4✔
3540
                        if zombieIndex == nil {
4✔
3541
                                return ErrEdgeNotFound
×
3542
                        }
×
3543

3544
                        isZombie, pubKey1, pubKey2 := isZombieEdge(
4✔
3545
                                zombieIndex, chanID,
4✔
3546
                        )
4✔
3547
                        if !isZombie {
7✔
3548
                                return ErrEdgeNotFound
3✔
3549
                        }
3✔
3550

3551
                        // Otherwise, the edge is marked as a zombie, so we'll
3552
                        // populate the edge info with the public keys of each
3553
                        // party as this is the only information we have about
3554
                        // it and return an error signaling so.
3555
                        edgeInfo = &models.ChannelEdgeInfo{
4✔
3556
                                NodeKey1Bytes: pubKey1,
4✔
3557
                                NodeKey2Bytes: pubKey2,
4✔
3558
                        }
4✔
3559

4✔
3560
                        return ErrZombieEdge
4✔
3561
                }
3562

3563
                // Otherwise, we'll just return the error if any.
3564
                if err != nil {
2,691✔
3565
                        return err
×
3566
                }
×
3567

3568
                edgeInfo = &edge
2,691✔
3569

2,691✔
3570
                // Then we'll attempt to fetch the accompanying policies of this
2,691✔
3571
                // edge.
2,691✔
3572
                e1, e2, err := fetchChanEdgePolicies(
2,691✔
3573
                        edgeIndex, edges, channelID[:],
2,691✔
3574
                )
2,691✔
3575
                if err != nil {
2,691✔
3576
                        return err
×
3577
                }
×
3578

3579
                policy1 = e1
2,691✔
3580
                policy2 = e2
2,691✔
3581

2,691✔
3582
                return nil
2,691✔
3583
        }, func() {
2,692✔
3584
                edgeInfo = nil
2,692✔
3585
                policy1 = nil
2,692✔
3586
                policy2 = nil
2,692✔
3587
        })
2,692✔
3588
        if errors.Is(err, ErrZombieEdge) {
2,696✔
3589
                return edgeInfo, nil, nil, err
4✔
3590
        }
4✔
3591
        if err != nil {
2,694✔
3592
                return nil, nil, nil, err
3✔
3593
        }
3✔
3594

3595
        return edgeInfo, policy1, policy2, nil
2,691✔
3596
}
3597

3598
// IsPublicNode is a helper method that determines whether the node with the
3599
// given public key is seen as a public node in the graph from the graph's
3600
// source node's point of view.
3601
func (c *KVStore) IsPublicNode(pubKey [33]byte) (bool, error) {
16✔
3602
        var nodeIsPublic bool
16✔
3603
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
32✔
3604
                nodes := tx.ReadBucket(nodeBucket)
16✔
3605
                if nodes == nil {
16✔
3606
                        return ErrGraphNodesNotFound
×
3607
                }
×
3608
                ourPubKey := nodes.Get(sourceKey)
16✔
3609
                if ourPubKey == nil {
16✔
3610
                        return ErrSourceNodeNotSet
×
3611
                }
×
3612
                node, err := fetchLightningNode(nodes, pubKey[:])
16✔
3613
                if err != nil {
16✔
3614
                        return err
×
3615
                }
×
3616

3617
                nodeIsPublic, err = c.isPublic(tx, node.PubKeyBytes, ourPubKey)
16✔
3618

16✔
3619
                return err
16✔
3620
        }, func() {
16✔
3621
                nodeIsPublic = false
16✔
3622
        })
16✔
3623
        if err != nil {
16✔
3624
                return false, err
×
3625
        }
×
3626

3627
        return nodeIsPublic, nil
16✔
3628
}
3629

3630
// genMultiSigP2WSH generates the p2wsh'd multisig script for 2 of 2 pubkeys.
3631
func genMultiSigP2WSH(aPub, bPub []byte) ([]byte, error) {
49✔
3632
        witnessScript, err := input.GenMultiSigScript(aPub, bPub)
49✔
3633
        if err != nil {
49✔
3634
                return nil, err
×
3635
        }
×
3636

3637
        // With the witness script generated, we'll now turn it into a p2wsh
3638
        // script:
3639
        //  * OP_0 <sha256(script)>
3640
        bldr := txscript.NewScriptBuilder(
49✔
3641
                txscript.WithScriptAllocSize(input.P2WSHSize),
49✔
3642
        )
49✔
3643
        bldr.AddOp(txscript.OP_0)
49✔
3644
        scriptHash := sha256.Sum256(witnessScript)
49✔
3645
        bldr.AddData(scriptHash[:])
49✔
3646

49✔
3647
        return bldr.Script()
49✔
3648
}
3649

3650
// EdgePoint couples the outpoint of a channel with the funding script that it
3651
// creates. The FilteredChainView will use this to watch for spends of this
3652
// edge point on chain. We require both of these values as depending on the
3653
// concrete implementation, either the pkScript, or the out point will be used.
3654
type EdgePoint struct {
3655
        // FundingPkScript is the p2wsh multi-sig script of the target channel.
3656
        FundingPkScript []byte
3657

3658
        // OutPoint is the outpoint of the target channel.
3659
        OutPoint wire.OutPoint
3660
}
3661

3662
// String returns a human readable version of the target EdgePoint. We return
3663
// the outpoint directly as it is enough to uniquely identify the edge point.
3664
func (e *EdgePoint) String() string {
×
3665
        return e.OutPoint.String()
×
3666
}
×
3667

3668
// ChannelView returns the verifiable edge information for each active channel
3669
// within the known channel graph. The set of UTXO's (along with their scripts)
3670
// returned are the ones that need to be watched on chain to detect channel
3671
// closes on the resident blockchain.
3672
func (c *KVStore) ChannelView() ([]EdgePoint, error) {
25✔
3673
        var edgePoints []EdgePoint
25✔
3674
        if err := kvdb.View(c.db, func(tx kvdb.RTx) error {
50✔
3675
                // We're going to iterate over the entire channel index, so
25✔
3676
                // we'll need to fetch the edgeBucket to get to the index as
25✔
3677
                // it's a sub-bucket.
25✔
3678
                edges := tx.ReadBucket(edgeBucket)
25✔
3679
                if edges == nil {
25✔
3680
                        return ErrGraphNoEdgesFound
×
3681
                }
×
3682
                chanIndex := edges.NestedReadBucket(channelPointBucket)
25✔
3683
                if chanIndex == nil {
25✔
3684
                        return ErrGraphNoEdgesFound
×
3685
                }
×
3686
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
25✔
3687
                if edgeIndex == nil {
25✔
3688
                        return ErrGraphNoEdgesFound
×
3689
                }
×
3690

3691
                // Once we have the proper bucket, we'll range over each key
3692
                // (which is the channel point for the channel) and decode it,
3693
                // accumulating each entry.
3694
                return chanIndex.ForEach(
25✔
3695
                        func(chanPointBytes, chanID []byte) error {
70✔
3696
                                chanPointReader := bytes.NewReader(
45✔
3697
                                        chanPointBytes,
45✔
3698
                                )
45✔
3699

45✔
3700
                                var chanPoint wire.OutPoint
45✔
3701
                                err := ReadOutpoint(chanPointReader, &chanPoint)
45✔
3702
                                if err != nil {
45✔
3703
                                        return err
×
3704
                                }
×
3705

3706
                                edgeInfo, err := fetchChanEdgeInfo(
45✔
3707
                                        edgeIndex, chanID,
45✔
3708
                                )
45✔
3709
                                if err != nil {
45✔
3710
                                        return err
×
3711
                                }
×
3712

3713
                                pkScript, err := genMultiSigP2WSH(
45✔
3714
                                        edgeInfo.BitcoinKey1Bytes[:],
45✔
3715
                                        edgeInfo.BitcoinKey2Bytes[:],
45✔
3716
                                )
45✔
3717
                                if err != nil {
45✔
3718
                                        return err
×
3719
                                }
×
3720

3721
                                edgePoints = append(edgePoints, EdgePoint{
45✔
3722
                                        FundingPkScript: pkScript,
45✔
3723
                                        OutPoint:        chanPoint,
45✔
3724
                                })
45✔
3725

45✔
3726
                                return nil
45✔
3727
                        },
3728
                )
3729
        }, func() {
25✔
3730
                edgePoints = nil
25✔
3731
        }); err != nil {
25✔
3732
                return nil, err
×
3733
        }
×
3734

3735
        return edgePoints, nil
25✔
3736
}
3737

3738
// MarkEdgeZombie attempts to mark a channel identified by its channel ID as a
3739
// zombie. This method is used on an ad-hoc basis, when channels need to be
3740
// marked as zombies outside the normal pruning cycle.
3741
func (c *KVStore) MarkEdgeZombie(chanID uint64,
3742
        pubKey1, pubKey2 [33]byte) error {
129✔
3743

129✔
3744
        c.cacheMu.Lock()
129✔
3745
        defer c.cacheMu.Unlock()
129✔
3746

129✔
3747
        err := kvdb.Batch(c.db, func(tx kvdb.RwTx) error {
258✔
3748
                edges := tx.ReadWriteBucket(edgeBucket)
129✔
3749
                if edges == nil {
129✔
3750
                        return ErrGraphNoEdgesFound
×
3751
                }
×
3752
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
129✔
3753
                if err != nil {
129✔
3754
                        return fmt.Errorf("unable to create zombie "+
×
3755
                                "bucket: %w", err)
×
3756
                }
×
3757

3758
                return markEdgeZombie(zombieIndex, chanID, pubKey1, pubKey2)
129✔
3759
        })
3760
        if err != nil {
129✔
3761
                return err
×
3762
        }
×
3763

3764
        c.rejectCache.remove(chanID)
129✔
3765
        c.chanCache.remove(chanID)
129✔
3766

129✔
3767
        return nil
129✔
3768
}
3769

3770
// markEdgeZombie marks an edge as a zombie within our zombie index. The public
3771
// keys should represent the node public keys of the two parties involved in the
3772
// edge.
3773
func markEdgeZombie(zombieIndex kvdb.RwBucket, chanID uint64, pubKey1,
3774
        pubKey2 [33]byte) error {
156✔
3775

156✔
3776
        var k [8]byte
156✔
3777
        byteOrder.PutUint64(k[:], chanID)
156✔
3778

156✔
3779
        var v [66]byte
156✔
3780
        copy(v[:33], pubKey1[:])
156✔
3781
        copy(v[33:], pubKey2[:])
156✔
3782

156✔
3783
        return zombieIndex.Put(k[:], v[:])
156✔
3784
}
156✔
3785

3786
// MarkEdgeLive clears an edge from our zombie index, deeming it as live.
3787
func (c *KVStore) MarkEdgeLive(chanID uint64) error {
20✔
3788
        c.cacheMu.Lock()
20✔
3789
        defer c.cacheMu.Unlock()
20✔
3790

20✔
3791
        return c.markEdgeLiveUnsafe(nil, chanID)
20✔
3792
}
20✔
3793

3794
// markEdgeLiveUnsafe clears an edge from the zombie index. This method can be
3795
// called with an existing kvdb.RwTx or the argument can be set to nil in which
3796
// case a new transaction will be created.
3797
//
3798
// NOTE: this method MUST only be called if the cacheMu has already been
3799
// acquired.
3800
func (c *KVStore) markEdgeLiveUnsafe(tx kvdb.RwTx, chanID uint64) error {
20✔
3801
        dbFn := func(tx kvdb.RwTx) error {
40✔
3802
                edges := tx.ReadWriteBucket(edgeBucket)
20✔
3803
                if edges == nil {
20✔
3804
                        return ErrGraphNoEdgesFound
×
3805
                }
×
3806
                zombieIndex := edges.NestedReadWriteBucket(zombieBucket)
20✔
3807
                if zombieIndex == nil {
20✔
3808
                        return nil
×
3809
                }
×
3810

3811
                var k [8]byte
20✔
3812
                byteOrder.PutUint64(k[:], chanID)
20✔
3813

20✔
3814
                if len(zombieIndex.Get(k[:])) == 0 {
21✔
3815
                        return ErrZombieEdgeNotFound
1✔
3816
                }
1✔
3817

3818
                return zombieIndex.Delete(k[:])
19✔
3819
        }
3820

3821
        // If the transaction is nil, we'll create a new one. Otherwise, we use
3822
        // the existing transaction
3823
        var err error
20✔
3824
        if tx == nil {
40✔
3825
                err = kvdb.Update(c.db, dbFn, func() {})
40✔
3826
        } else {
×
3827
                err = dbFn(tx)
×
3828
        }
×
3829
        if err != nil {
21✔
3830
                return err
1✔
3831
        }
1✔
3832

3833
        c.rejectCache.remove(chanID)
19✔
3834
        c.chanCache.remove(chanID)
19✔
3835

19✔
3836
        return nil
19✔
3837
}
3838

3839
// IsZombieEdge returns whether the edge is considered zombie. If it is a
3840
// zombie, then the two node public keys corresponding to this edge are also
3841
// returned.
3842
func (c *KVStore) IsZombieEdge(chanID uint64) (bool, [33]byte, [33]byte,
3843
        error) {
14✔
3844

14✔
3845
        var (
14✔
3846
                isZombie         bool
14✔
3847
                pubKey1, pubKey2 [33]byte
14✔
3848
        )
14✔
3849

14✔
3850
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
28✔
3851
                edges := tx.ReadBucket(edgeBucket)
14✔
3852
                if edges == nil {
14✔
3853
                        return ErrGraphNoEdgesFound
×
3854
                }
×
3855
                zombieIndex := edges.NestedReadBucket(zombieBucket)
14✔
3856
                if zombieIndex == nil {
14✔
3857
                        return nil
×
3858
                }
×
3859

3860
                isZombie, pubKey1, pubKey2 = isZombieEdge(zombieIndex, chanID)
14✔
3861

14✔
3862
                return nil
14✔
3863
        }, func() {
14✔
3864
                isZombie = false
14✔
3865
                pubKey1 = [33]byte{}
14✔
3866
                pubKey2 = [33]byte{}
14✔
3867
        })
14✔
3868
        if err != nil {
14✔
3869
                return false, [33]byte{}, [33]byte{}, fmt.Errorf("%w: %w "+
×
3870
                        "(chanID=%d)", ErrCantCheckIfZombieEdgeStr, err, chanID)
×
3871
        }
×
3872

3873
        return isZombie, pubKey1, pubKey2, nil
14✔
3874
}
3875

3876
// isZombieEdge returns whether an entry exists for the given channel in the
3877
// zombie index. If an entry exists, then the two node public keys corresponding
3878
// to this edge are also returned.
3879
func isZombieEdge(zombieIndex kvdb.RBucket,
3880
        chanID uint64) (bool, [33]byte, [33]byte) {
190✔
3881

190✔
3882
        var k [8]byte
190✔
3883
        byteOrder.PutUint64(k[:], chanID)
190✔
3884

190✔
3885
        v := zombieIndex.Get(k[:])
190✔
3886
        if v == nil {
295✔
3887
                return false, [33]byte{}, [33]byte{}
105✔
3888
        }
105✔
3889

3890
        var pubKey1, pubKey2 [33]byte
88✔
3891
        copy(pubKey1[:], v[:33])
88✔
3892
        copy(pubKey2[:], v[33:])
88✔
3893

88✔
3894
        return true, pubKey1, pubKey2
88✔
3895
}
3896

3897
// NumZombies returns the current number of zombie channels in the graph.
3898
func (c *KVStore) NumZombies() (uint64, error) {
4✔
3899
        var numZombies uint64
4✔
3900
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
8✔
3901
                edges := tx.ReadBucket(edgeBucket)
4✔
3902
                if edges == nil {
4✔
3903
                        return nil
×
3904
                }
×
3905
                zombieIndex := edges.NestedReadBucket(zombieBucket)
4✔
3906
                if zombieIndex == nil {
4✔
3907
                        return nil
×
3908
                }
×
3909

3910
                return zombieIndex.ForEach(func(_, _ []byte) error {
6✔
3911
                        numZombies++
2✔
3912
                        return nil
2✔
3913
                })
2✔
3914
        }, func() {
4✔
3915
                numZombies = 0
4✔
3916
        })
4✔
3917
        if err != nil {
4✔
3918
                return 0, err
×
3919
        }
×
3920

3921
        return numZombies, nil
4✔
3922
}
3923

3924
// PutClosedScid stores a SCID for a closed channel in the database. This is so
3925
// that we can ignore channel announcements that we know to be closed without
3926
// having to validate them and fetch a block.
3927
func (c *KVStore) PutClosedScid(scid lnwire.ShortChannelID) error {
1✔
3928
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
2✔
3929
                closedScids, err := tx.CreateTopLevelBucket(closedScidBucket)
1✔
3930
                if err != nil {
1✔
3931
                        return err
×
3932
                }
×
3933

3934
                var k [8]byte
1✔
3935
                byteOrder.PutUint64(k[:], scid.ToUint64())
1✔
3936

1✔
3937
                return closedScids.Put(k[:], []byte{})
1✔
3938
        }, func() {})
1✔
3939
}
3940

3941
// IsClosedScid checks whether a channel identified by the passed in scid is
3942
// closed. This helps avoid having to perform expensive validation checks.
3943
// TODO: Add an LRU cache to cut down on disc reads.
3944
func (c *KVStore) IsClosedScid(scid lnwire.ShortChannelID) (bool, error) {
5✔
3945
        var isClosed bool
5✔
3946
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
10✔
3947
                closedScids := tx.ReadBucket(closedScidBucket)
5✔
3948
                if closedScids == nil {
5✔
3949
                        return ErrClosedScidsNotFound
×
3950
                }
×
3951

3952
                var k [8]byte
5✔
3953
                byteOrder.PutUint64(k[:], scid.ToUint64())
5✔
3954

5✔
3955
                if closedScids.Get(k[:]) != nil {
6✔
3956
                        isClosed = true
1✔
3957
                        return nil
1✔
3958
                }
1✔
3959

3960
                return nil
4✔
3961
        }, func() {
5✔
3962
                isClosed = false
5✔
3963
        })
5✔
3964
        if err != nil {
5✔
3965
                return false, err
×
3966
        }
×
3967

3968
        return isClosed, nil
5✔
3969
}
3970

3971
// GraphSession will provide the call-back with access to a NodeTraverser
3972
// instance which can be used to perform queries against the channel graph.
3973
func (c *KVStore) GraphSession(cb func(graph NodeTraverser) error,
3974
        reset func()) error {
54✔
3975

54✔
3976
        return c.db.View(func(tx walletdb.ReadTx) error {
108✔
3977
                return cb(&nodeTraverserSession{
54✔
3978
                        db: c,
54✔
3979
                        tx: tx,
54✔
3980
                })
54✔
3981
        }, reset)
54✔
3982
}
3983

3984
// nodeTraverserSession implements the NodeTraverser interface but with a
3985
// backing read only transaction for a consistent view of the graph.
3986
type nodeTraverserSession struct {
3987
        tx kvdb.RTx
3988
        db *KVStore
3989
}
3990

3991
// ForEachNodeDirectedChannel calls the callback for every channel of the given
3992
// node.
3993
//
3994
// NOTE: Part of the NodeTraverser interface.
3995
func (c *nodeTraverserSession) ForEachNodeDirectedChannel(nodePub route.Vertex,
3996
        cb func(channel *DirectedChannel) error, _ func()) error {
239✔
3997

239✔
3998
        return c.db.forEachNodeDirectedChannel(c.tx, nodePub, cb, func() {})
239✔
3999
}
4000

4001
// FetchNodeFeatures returns the features of the given node. If the node is
4002
// unknown, assume no additional features are supported.
4003
//
4004
// NOTE: Part of the NodeTraverser interface.
4005
func (c *nodeTraverserSession) FetchNodeFeatures(nodePub route.Vertex) (
4006
        *lnwire.FeatureVector, error) {
254✔
4007

254✔
4008
        return c.db.fetchNodeFeatures(c.tx, nodePub)
254✔
4009
}
254✔
4010

4011
func putLightningNode(nodeBucket, aliasBucket, updateIndex kvdb.RwBucket,
4012
        node *models.LightningNode) error {
911✔
4013

911✔
4014
        var (
911✔
4015
                scratch [16]byte
911✔
4016
                b       bytes.Buffer
911✔
4017
        )
911✔
4018

911✔
4019
        pub, err := node.PubKey()
911✔
4020
        if err != nil {
911✔
4021
                return err
×
4022
        }
×
4023
        nodePub := pub.SerializeCompressed()
911✔
4024

911✔
4025
        // If the node has the update time set, write it, else write 0.
911✔
4026
        updateUnix := uint64(0)
911✔
4027
        if node.LastUpdate.Unix() > 0 {
1,686✔
4028
                updateUnix = uint64(node.LastUpdate.Unix())
775✔
4029
        }
775✔
4030

4031
        byteOrder.PutUint64(scratch[:8], updateUnix)
911✔
4032
        if _, err := b.Write(scratch[:8]); err != nil {
911✔
4033
                return err
×
4034
        }
×
4035

4036
        if _, err := b.Write(nodePub); err != nil {
911✔
4037
                return err
×
4038
        }
×
4039

4040
        // If we got a node announcement for this node, we will have the rest
4041
        // of the data available. If not we don't have more data to write.
4042
        if !node.HaveNodeAnnouncement {
997✔
4043
                // Write HaveNodeAnnouncement=0.
86✔
4044
                byteOrder.PutUint16(scratch[:2], 0)
86✔
4045
                if _, err := b.Write(scratch[:2]); err != nil {
86✔
4046
                        return err
×
4047
                }
×
4048

4049
                return nodeBucket.Put(nodePub, b.Bytes())
86✔
4050
        }
4051

4052
        // Write HaveNodeAnnouncement=1.
4053
        byteOrder.PutUint16(scratch[:2], 1)
828✔
4054
        if _, err := b.Write(scratch[:2]); err != nil {
828✔
4055
                return err
×
4056
        }
×
4057

4058
        if err := binary.Write(&b, byteOrder, node.Color.R); err != nil {
828✔
4059
                return err
×
4060
        }
×
4061
        if err := binary.Write(&b, byteOrder, node.Color.G); err != nil {
828✔
4062
                return err
×
4063
        }
×
4064
        if err := binary.Write(&b, byteOrder, node.Color.B); err != nil {
828✔
4065
                return err
×
4066
        }
×
4067

4068
        if err := wire.WriteVarString(&b, 0, node.Alias); err != nil {
828✔
4069
                return err
×
4070
        }
×
4071

4072
        if err := node.Features.Encode(&b); err != nil {
828✔
4073
                return err
×
4074
        }
×
4075

4076
        numAddresses := uint16(len(node.Addresses))
828✔
4077
        byteOrder.PutUint16(scratch[:2], numAddresses)
828✔
4078
        if _, err := b.Write(scratch[:2]); err != nil {
828✔
4079
                return err
×
4080
        }
×
4081

4082
        for _, address := range node.Addresses {
1,893✔
4083
                if err := SerializeAddr(&b, address); err != nil {
1,065✔
4084
                        return err
×
4085
                }
×
4086
        }
4087

4088
        sigLen := len(node.AuthSigBytes)
828✔
4089
        if sigLen > 80 {
828✔
4090
                return fmt.Errorf("max sig len allowed is 80, had %v",
×
4091
                        sigLen)
×
4092
        }
×
4093

4094
        err = wire.WriteVarBytes(&b, 0, node.AuthSigBytes)
828✔
4095
        if err != nil {
828✔
4096
                return err
×
4097
        }
×
4098

4099
        if len(node.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
828✔
4100
                return ErrTooManyExtraOpaqueBytes(len(node.ExtraOpaqueData))
×
4101
        }
×
4102
        err = wire.WriteVarBytes(&b, 0, node.ExtraOpaqueData)
828✔
4103
        if err != nil {
828✔
4104
                return err
×
4105
        }
×
4106

4107
        if err := aliasBucket.Put(nodePub, []byte(node.Alias)); err != nil {
828✔
4108
                return err
×
4109
        }
×
4110

4111
        // With the alias bucket updated, we'll now update the index that
4112
        // tracks the time series of node updates.
4113
        var indexKey [8 + 33]byte
828✔
4114
        byteOrder.PutUint64(indexKey[:8], updateUnix)
828✔
4115
        copy(indexKey[8:], nodePub)
828✔
4116

828✔
4117
        // If there was already an old index entry for this node, then we'll
828✔
4118
        // delete the old one before we write the new entry.
828✔
4119
        if nodeBytes := nodeBucket.Get(nodePub); nodeBytes != nil {
847✔
4120
                // Extract out the old update time to we can reconstruct the
19✔
4121
                // prior index key to delete it from the index.
19✔
4122
                oldUpdateTime := nodeBytes[:8]
19✔
4123

19✔
4124
                var oldIndexKey [8 + 33]byte
19✔
4125
                copy(oldIndexKey[:8], oldUpdateTime)
19✔
4126
                copy(oldIndexKey[8:], nodePub)
19✔
4127

19✔
4128
                if err := updateIndex.Delete(oldIndexKey[:]); err != nil {
19✔
4129
                        return err
×
4130
                }
×
4131
        }
4132

4133
        if err := updateIndex.Put(indexKey[:], nil); err != nil {
828✔
4134
                return err
×
4135
        }
×
4136

4137
        return nodeBucket.Put(nodePub, b.Bytes())
828✔
4138
}
4139

4140
func fetchLightningNode(nodeBucket kvdb.RBucket,
4141
        nodePub []byte) (models.LightningNode, error) {
3,638✔
4142

3,638✔
4143
        nodeBytes := nodeBucket.Get(nodePub)
3,638✔
4144
        if nodeBytes == nil {
3,724✔
4145
                return models.LightningNode{}, ErrGraphNodeNotFound
86✔
4146
        }
86✔
4147

4148
        nodeReader := bytes.NewReader(nodeBytes)
3,555✔
4149

3,555✔
4150
        return deserializeLightningNode(nodeReader)
3,555✔
4151
}
4152

4153
func deserializeLightningNodeCacheable(r io.Reader) (route.Vertex,
4154
        *lnwire.FeatureVector, error) {
123✔
4155

123✔
4156
        var (
123✔
4157
                pubKey      route.Vertex
123✔
4158
                features    = lnwire.EmptyFeatureVector()
123✔
4159
                nodeScratch [8]byte
123✔
4160
        )
123✔
4161

123✔
4162
        // Skip ahead:
123✔
4163
        // - LastUpdate (8 bytes)
123✔
4164
        if _, err := r.Read(nodeScratch[:]); err != nil {
123✔
4165
                return pubKey, nil, err
×
4166
        }
×
4167

4168
        if _, err := io.ReadFull(r, pubKey[:]); err != nil {
123✔
4169
                return pubKey, nil, err
×
4170
        }
×
4171

4172
        // Read the node announcement flag.
4173
        if _, err := r.Read(nodeScratch[:2]); err != nil {
123✔
4174
                return pubKey, nil, err
×
4175
        }
×
4176
        hasNodeAnn := byteOrder.Uint16(nodeScratch[:2])
123✔
4177

123✔
4178
        // The rest of the data is optional, and will only be there if we got a
123✔
4179
        // node announcement for this node.
123✔
4180
        if hasNodeAnn == 0 {
126✔
4181
                return pubKey, features, nil
3✔
4182
        }
3✔
4183

4184
        // We did get a node announcement for this node, so we'll have the rest
4185
        // of the data available.
4186
        var rgb uint8
123✔
4187
        if err := binary.Read(r, byteOrder, &rgb); err != nil {
123✔
4188
                return pubKey, nil, err
×
4189
        }
×
4190
        if err := binary.Read(r, byteOrder, &rgb); err != nil {
123✔
4191
                return pubKey, nil, err
×
4192
        }
×
4193
        if err := binary.Read(r, byteOrder, &rgb); err != nil {
123✔
4194
                return pubKey, nil, err
×
4195
        }
×
4196

4197
        if _, err := wire.ReadVarString(r, 0); err != nil {
123✔
4198
                return pubKey, nil, err
×
4199
        }
×
4200

4201
        if err := features.Decode(r); err != nil {
123✔
4202
                return pubKey, nil, err
×
4203
        }
×
4204

4205
        return pubKey, features, nil
123✔
4206
}
4207

4208
func deserializeLightningNode(r io.Reader) (models.LightningNode, error) {
8,542✔
4209
        var (
8,542✔
4210
                node    models.LightningNode
8,542✔
4211
                scratch [8]byte
8,542✔
4212
                err     error
8,542✔
4213
        )
8,542✔
4214

8,542✔
4215
        // Always populate a feature vector, even if we don't have a node
8,542✔
4216
        // announcement and short circuit below.
8,542✔
4217
        node.Features = lnwire.EmptyFeatureVector()
8,542✔
4218

8,542✔
4219
        if _, err := r.Read(scratch[:]); err != nil {
8,542✔
4220
                return models.LightningNode{}, err
×
4221
        }
×
4222

4223
        unix := int64(byteOrder.Uint64(scratch[:]))
8,542✔
4224
        node.LastUpdate = time.Unix(unix, 0)
8,542✔
4225

8,542✔
4226
        if _, err := io.ReadFull(r, node.PubKeyBytes[:]); err != nil {
8,542✔
4227
                return models.LightningNode{}, err
×
4228
        }
×
4229

4230
        if _, err := r.Read(scratch[:2]); err != nil {
8,542✔
4231
                return models.LightningNode{}, err
×
4232
        }
×
4233

4234
        hasNodeAnn := byteOrder.Uint16(scratch[:2])
8,542✔
4235
        if hasNodeAnn == 1 {
16,939✔
4236
                node.HaveNodeAnnouncement = true
8,397✔
4237
        } else {
8,545✔
4238
                node.HaveNodeAnnouncement = false
148✔
4239
        }
148✔
4240

4241
        // The rest of the data is optional, and will only be there if we got a
4242
        // node announcement for this node.
4243
        if !node.HaveNodeAnnouncement {
8,690✔
4244
                return node, nil
148✔
4245
        }
148✔
4246

4247
        // We did get a node announcement for this node, so we'll have the rest
4248
        // of the data available.
4249
        if err := binary.Read(r, byteOrder, &node.Color.R); err != nil {
8,397✔
4250
                return models.LightningNode{}, err
×
4251
        }
×
4252
        if err := binary.Read(r, byteOrder, &node.Color.G); err != nil {
8,397✔
4253
                return models.LightningNode{}, err
×
4254
        }
×
4255
        if err := binary.Read(r, byteOrder, &node.Color.B); err != nil {
8,397✔
4256
                return models.LightningNode{}, err
×
4257
        }
×
4258

4259
        node.Alias, err = wire.ReadVarString(r, 0)
8,397✔
4260
        if err != nil {
8,397✔
4261
                return models.LightningNode{}, err
×
4262
        }
×
4263

4264
        err = node.Features.Decode(r)
8,397✔
4265
        if err != nil {
8,397✔
4266
                return models.LightningNode{}, err
×
4267
        }
×
4268

4269
        if _, err := r.Read(scratch[:2]); err != nil {
8,397✔
4270
                return models.LightningNode{}, err
×
4271
        }
×
4272
        numAddresses := int(byteOrder.Uint16(scratch[:2]))
8,397✔
4273

8,397✔
4274
        var addresses []net.Addr
8,397✔
4275
        for i := 0; i < numAddresses; i++ {
19,061✔
4276
                address, err := DeserializeAddr(r)
10,664✔
4277
                if err != nil {
10,664✔
4278
                        return models.LightningNode{}, err
×
4279
                }
×
4280
                addresses = append(addresses, address)
10,664✔
4281
        }
4282
        node.Addresses = addresses
8,397✔
4283

8,397✔
4284
        node.AuthSigBytes, err = wire.ReadVarBytes(r, 0, 80, "sig")
8,397✔
4285
        if err != nil {
8,397✔
4286
                return models.LightningNode{}, err
×
4287
        }
×
4288

4289
        // We'll try and see if there are any opaque bytes left, if not, then
4290
        // we'll ignore the EOF error and return the node as is.
4291
        extraBytes, err := wire.ReadVarBytes(
8,397✔
4292
                r, 0, MaxAllowedExtraOpaqueBytes, "blob",
8,397✔
4293
        )
8,397✔
4294
        switch {
8,397✔
4295
        case errors.Is(err, io.ErrUnexpectedEOF):
×
4296
        case errors.Is(err, io.EOF):
×
4297
        case err != nil:
×
4298
                return models.LightningNode{}, err
×
4299
        }
4300

4301
        if len(extraBytes) > 0 {
8,407✔
4302
                node.ExtraOpaqueData = extraBytes
10✔
4303
        }
10✔
4304

4305
        return node, nil
8,397✔
4306
}
4307

4308
func putChanEdgeInfo(edgeIndex kvdb.RwBucket,
4309
        edgeInfo *models.ChannelEdgeInfo, chanID [8]byte) error {
1,490✔
4310

1,490✔
4311
        var b bytes.Buffer
1,490✔
4312

1,490✔
4313
        if _, err := b.Write(edgeInfo.NodeKey1Bytes[:]); err != nil {
1,490✔
4314
                return err
×
4315
        }
×
4316
        if _, err := b.Write(edgeInfo.NodeKey2Bytes[:]); err != nil {
1,490✔
4317
                return err
×
4318
        }
×
4319
        if _, err := b.Write(edgeInfo.BitcoinKey1Bytes[:]); err != nil {
1,490✔
4320
                return err
×
4321
        }
×
4322
        if _, err := b.Write(edgeInfo.BitcoinKey2Bytes[:]); err != nil {
1,490✔
4323
                return err
×
4324
        }
×
4325

4326
        var featureBuf bytes.Buffer
1,490✔
4327
        if err := edgeInfo.Features.Encode(&featureBuf); err != nil {
1,490✔
4328
                return fmt.Errorf("unable to encode features: %w", err)
×
4329
        }
×
4330

4331
        if err := wire.WriteVarBytes(&b, 0, featureBuf.Bytes()); err != nil {
1,490✔
4332
                return err
×
4333
        }
×
4334

4335
        authProof := edgeInfo.AuthProof
1,490✔
4336
        var nodeSig1, nodeSig2, bitcoinSig1, bitcoinSig2 []byte
1,490✔
4337
        if authProof != nil {
2,896✔
4338
                nodeSig1 = authProof.NodeSig1Bytes
1,406✔
4339
                nodeSig2 = authProof.NodeSig2Bytes
1,406✔
4340
                bitcoinSig1 = authProof.BitcoinSig1Bytes
1,406✔
4341
                bitcoinSig2 = authProof.BitcoinSig2Bytes
1,406✔
4342
        }
1,406✔
4343

4344
        if err := wire.WriteVarBytes(&b, 0, nodeSig1); err != nil {
1,490✔
4345
                return err
×
4346
        }
×
4347
        if err := wire.WriteVarBytes(&b, 0, nodeSig2); err != nil {
1,490✔
4348
                return err
×
4349
        }
×
4350
        if err := wire.WriteVarBytes(&b, 0, bitcoinSig1); err != nil {
1,490✔
4351
                return err
×
4352
        }
×
4353
        if err := wire.WriteVarBytes(&b, 0, bitcoinSig2); err != nil {
1,490✔
4354
                return err
×
4355
        }
×
4356

4357
        if err := WriteOutpoint(&b, &edgeInfo.ChannelPoint); err != nil {
1,490✔
4358
                return err
×
4359
        }
×
4360
        err := binary.Write(&b, byteOrder, uint64(edgeInfo.Capacity))
1,490✔
4361
        if err != nil {
1,490✔
4362
                return err
×
4363
        }
×
4364
        if _, err := b.Write(chanID[:]); err != nil {
1,490✔
4365
                return err
×
4366
        }
×
4367
        if _, err := b.Write(edgeInfo.ChainHash[:]); err != nil {
1,490✔
4368
                return err
×
4369
        }
×
4370

4371
        if len(edgeInfo.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
1,490✔
4372
                return ErrTooManyExtraOpaqueBytes(len(edgeInfo.ExtraOpaqueData))
×
4373
        }
×
4374
        err = wire.WriteVarBytes(&b, 0, edgeInfo.ExtraOpaqueData)
1,490✔
4375
        if err != nil {
1,490✔
4376
                return err
×
4377
        }
×
4378

4379
        return edgeIndex.Put(chanID[:], b.Bytes())
1,490✔
4380
}
4381

4382
func fetchChanEdgeInfo(edgeIndex kvdb.RBucket,
4383
        chanID []byte) (models.ChannelEdgeInfo, error) {
6,800✔
4384

6,800✔
4385
        edgeInfoBytes := edgeIndex.Get(chanID)
6,800✔
4386
        if edgeInfoBytes == nil {
6,862✔
4387
                return models.ChannelEdgeInfo{}, ErrEdgeNotFound
62✔
4388
        }
62✔
4389

4390
        edgeInfoReader := bytes.NewReader(edgeInfoBytes)
6,741✔
4391

6,741✔
4392
        return deserializeChanEdgeInfo(edgeInfoReader)
6,741✔
4393
}
4394

4395
func deserializeChanEdgeInfo(r io.Reader) (models.ChannelEdgeInfo, error) {
7,283✔
4396
        var (
7,283✔
4397
                err      error
7,283✔
4398
                edgeInfo models.ChannelEdgeInfo
7,283✔
4399
        )
7,283✔
4400

7,283✔
4401
        if _, err := io.ReadFull(r, edgeInfo.NodeKey1Bytes[:]); err != nil {
7,283✔
4402
                return models.ChannelEdgeInfo{}, err
×
4403
        }
×
4404
        if _, err := io.ReadFull(r, edgeInfo.NodeKey2Bytes[:]); err != nil {
7,283✔
4405
                return models.ChannelEdgeInfo{}, err
×
4406
        }
×
4407
        if _, err := io.ReadFull(r, edgeInfo.BitcoinKey1Bytes[:]); err != nil {
7,283✔
4408
                return models.ChannelEdgeInfo{}, err
×
4409
        }
×
4410
        if _, err := io.ReadFull(r, edgeInfo.BitcoinKey2Bytes[:]); err != nil {
7,283✔
4411
                return models.ChannelEdgeInfo{}, err
×
4412
        }
×
4413

4414
        featureBytes, err := wire.ReadVarBytes(r, 0, 900, "features")
7,283✔
4415
        if err != nil {
7,283✔
4416
                return models.ChannelEdgeInfo{}, err
×
4417
        }
×
4418

4419
        features := lnwire.NewRawFeatureVector()
7,283✔
4420
        err = features.Decode(bytes.NewReader(featureBytes))
7,283✔
4421
        if err != nil {
7,283✔
4422
                return models.ChannelEdgeInfo{}, fmt.Errorf("unable to decode "+
×
4423
                        "features: %w", err)
×
4424
        }
×
4425
        edgeInfo.Features = lnwire.NewFeatureVector(features, lnwire.Features)
7,283✔
4426

7,283✔
4427
        proof := &models.ChannelAuthProof{}
7,283✔
4428

7,283✔
4429
        proof.NodeSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
7,283✔
4430
        if err != nil {
7,283✔
4431
                return models.ChannelEdgeInfo{}, err
×
4432
        }
×
4433
        proof.NodeSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
7,283✔
4434
        if err != nil {
7,283✔
4435
                return models.ChannelEdgeInfo{}, err
×
4436
        }
×
4437
        proof.BitcoinSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
7,283✔
4438
        if err != nil {
7,283✔
4439
                return models.ChannelEdgeInfo{}, err
×
4440
        }
×
4441
        proof.BitcoinSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
7,283✔
4442
        if err != nil {
7,283✔
4443
                return models.ChannelEdgeInfo{}, err
×
4444
        }
×
4445

4446
        if !proof.IsEmpty() {
11,463✔
4447
                edgeInfo.AuthProof = proof
4,180✔
4448
        }
4,180✔
4449

4450
        edgeInfo.ChannelPoint = wire.OutPoint{}
7,283✔
4451
        if err := ReadOutpoint(r, &edgeInfo.ChannelPoint); err != nil {
7,283✔
4452
                return models.ChannelEdgeInfo{}, err
×
4453
        }
×
4454
        if err := binary.Read(r, byteOrder, &edgeInfo.Capacity); err != nil {
7,283✔
4455
                return models.ChannelEdgeInfo{}, err
×
4456
        }
×
4457
        if err := binary.Read(r, byteOrder, &edgeInfo.ChannelID); err != nil {
7,283✔
4458
                return models.ChannelEdgeInfo{}, err
×
4459
        }
×
4460

4461
        if _, err := io.ReadFull(r, edgeInfo.ChainHash[:]); err != nil {
7,283✔
4462
                return models.ChannelEdgeInfo{}, err
×
4463
        }
×
4464

4465
        // We'll try and see if there are any opaque bytes left, if not, then
4466
        // we'll ignore the EOF error and return the edge as is.
4467
        edgeInfo.ExtraOpaqueData, err = wire.ReadVarBytes(
7,283✔
4468
                r, 0, MaxAllowedExtraOpaqueBytes, "blob",
7,283✔
4469
        )
7,283✔
4470
        switch {
7,283✔
4471
        case errors.Is(err, io.ErrUnexpectedEOF):
×
4472
        case errors.Is(err, io.EOF):
×
4473
        case err != nil:
×
4474
                return models.ChannelEdgeInfo{}, err
×
4475
        }
4476

4477
        return edgeInfo, nil
7,283✔
4478
}
4479

4480
func putChanEdgePolicy(edges kvdb.RwBucket, edge *models.ChannelEdgePolicy,
4481
        from, to []byte) error {
2,670✔
4482

2,670✔
4483
        var edgeKey [33 + 8]byte
2,670✔
4484
        copy(edgeKey[:], from)
2,670✔
4485
        byteOrder.PutUint64(edgeKey[33:], edge.ChannelID)
2,670✔
4486

2,670✔
4487
        var b bytes.Buffer
2,670✔
4488
        if err := serializeChanEdgePolicy(&b, edge, to); err != nil {
2,670✔
4489
                return err
×
4490
        }
×
4491

4492
        // Before we write out the new edge, we'll create a new entry in the
4493
        // update index in order to keep it fresh.
4494
        updateUnix := uint64(edge.LastUpdate.Unix())
2,670✔
4495
        var indexKey [8 + 8]byte
2,670✔
4496
        byteOrder.PutUint64(indexKey[:8], updateUnix)
2,670✔
4497
        byteOrder.PutUint64(indexKey[8:], edge.ChannelID)
2,670✔
4498

2,670✔
4499
        updateIndex, err := edges.CreateBucketIfNotExists(edgeUpdateIndexBucket)
2,670✔
4500
        if err != nil {
2,670✔
4501
                return err
×
4502
        }
×
4503

4504
        // If there was already an entry for this edge, then we'll need to
4505
        // delete the old one to ensure we don't leave around any after-images.
4506
        // An unknown policy value does not have a update time recorded, so
4507
        // it also does not need to be removed.
4508
        if edgeBytes := edges.Get(edgeKey[:]); edgeBytes != nil &&
2,670✔
4509
                !bytes.Equal(edgeBytes, unknownPolicy) {
2,700✔
4510

30✔
4511
                // In order to delete the old entry, we'll need to obtain the
30✔
4512
                // *prior* update time in order to delete it. To do this, we'll
30✔
4513
                // need to deserialize the existing policy within the database
30✔
4514
                // (now outdated by the new one), and delete its corresponding
30✔
4515
                // entry within the update index. We'll ignore any
30✔
4516
                // ErrEdgePolicyOptionalFieldNotFound or ErrParsingExtraTLVBytes
30✔
4517
                // errors, as we only need the channel ID and update time to
30✔
4518
                // delete the entry.
30✔
4519
                //
30✔
4520
                // TODO(halseth): get rid of these invalid policies in a
30✔
4521
                // migration.
30✔
4522
                //
30✔
4523
                // NOTE: the above TODO was completed in the SQL migration and
30✔
4524
                // so such edge cases no longer need to be handled there.
30✔
4525
                oldEdgePolicy, err := deserializeChanEdgePolicy(
30✔
4526
                        bytes.NewReader(edgeBytes),
30✔
4527
                )
30✔
4528
                if err != nil &&
30✔
4529
                        !errors.Is(err, ErrEdgePolicyOptionalFieldNotFound) &&
30✔
4530
                        !errors.Is(err, ErrParsingExtraTLVBytes) {
30✔
4531

×
4532
                        return err
×
4533
                }
×
4534

4535
                oldUpdateTime := uint64(oldEdgePolicy.LastUpdate.Unix())
30✔
4536

30✔
4537
                var oldIndexKey [8 + 8]byte
30✔
4538
                byteOrder.PutUint64(oldIndexKey[:8], oldUpdateTime)
30✔
4539
                byteOrder.PutUint64(oldIndexKey[8:], edge.ChannelID)
30✔
4540

30✔
4541
                if err := updateIndex.Delete(oldIndexKey[:]); err != nil {
30✔
4542
                        return err
×
4543
                }
×
4544
        }
4545

4546
        if err := updateIndex.Put(indexKey[:], nil); err != nil {
2,670✔
4547
                return err
×
4548
        }
×
4549

4550
        err = updateEdgePolicyDisabledIndex(
2,670✔
4551
                edges, edge.ChannelID,
2,670✔
4552
                edge.ChannelFlags&lnwire.ChanUpdateDirection > 0,
2,670✔
4553
                edge.IsDisabled(),
2,670✔
4554
        )
2,670✔
4555
        if err != nil {
2,670✔
4556
                return err
×
4557
        }
×
4558

4559
        return edges.Put(edgeKey[:], b.Bytes())
2,670✔
4560
}
4561

4562
// updateEdgePolicyDisabledIndex is used to update the disabledEdgePolicyIndex
4563
// bucket by either add a new disabled ChannelEdgePolicy or remove an existing
4564
// one.
4565
// The direction represents the direction of the edge and disabled is used for
4566
// deciding whether to remove or add an entry to the bucket.
4567
// In general a channel is disabled if two entries for the same chanID exist
4568
// in this bucket.
4569
// Maintaining the bucket this way allows a fast retrieval of disabled
4570
// channels, for example when prune is needed.
4571
func updateEdgePolicyDisabledIndex(edges kvdb.RwBucket, chanID uint64,
4572
        direction bool, disabled bool) error {
2,944✔
4573

2,944✔
4574
        var disabledEdgeKey [8 + 1]byte
2,944✔
4575
        byteOrder.PutUint64(disabledEdgeKey[0:], chanID)
2,944✔
4576
        if direction {
4,414✔
4577
                disabledEdgeKey[8] = 1
1,470✔
4578
        }
1,470✔
4579

4580
        disabledEdgePolicyIndex, err := edges.CreateBucketIfNotExists(
2,944✔
4581
                disabledEdgePolicyBucket,
2,944✔
4582
        )
2,944✔
4583
        if err != nil {
2,944✔
4584
                return err
×
4585
        }
×
4586

4587
        if disabled {
2,973✔
4588
                return disabledEdgePolicyIndex.Put(disabledEdgeKey[:], []byte{})
29✔
4589
        }
29✔
4590

4591
        return disabledEdgePolicyIndex.Delete(disabledEdgeKey[:])
2,918✔
4592
}
4593

4594
// putChanEdgePolicyUnknown marks the edge policy as unknown
4595
// in the edges bucket.
4596
func putChanEdgePolicyUnknown(edges kvdb.RwBucket, channelID uint64,
4597
        from []byte) error {
2,973✔
4598

2,973✔
4599
        var edgeKey [33 + 8]byte
2,973✔
4600
        copy(edgeKey[:], from)
2,973✔
4601
        byteOrder.PutUint64(edgeKey[33:], channelID)
2,973✔
4602

2,973✔
4603
        if edges.Get(edgeKey[:]) != nil {
2,973✔
4604
                return fmt.Errorf("cannot write unknown policy for channel %v "+
×
4605
                        " when there is already a policy present", channelID)
×
4606
        }
×
4607

4608
        return edges.Put(edgeKey[:], unknownPolicy)
2,973✔
4609
}
4610

4611
func fetchChanEdgePolicy(edges kvdb.RBucket, chanID []byte,
4612
        nodePub []byte) (*models.ChannelEdgePolicy, error) {
13,491✔
4613

13,491✔
4614
        var edgeKey [33 + 8]byte
13,491✔
4615
        copy(edgeKey[:], nodePub)
13,491✔
4616
        copy(edgeKey[33:], chanID)
13,491✔
4617

13,491✔
4618
        edgeBytes := edges.Get(edgeKey[:])
13,491✔
4619
        if edgeBytes == nil {
13,491✔
4620
                return nil, ErrEdgeNotFound
×
4621
        }
×
4622

4623
        // No need to deserialize unknown policy.
4624
        if bytes.Equal(edgeBytes, unknownPolicy) {
15,005✔
4625
                return nil, nil
1,514✔
4626
        }
1,514✔
4627

4628
        edgeReader := bytes.NewReader(edgeBytes)
11,980✔
4629

11,980✔
4630
        ep, err := deserializeChanEdgePolicy(edgeReader)
11,980✔
4631
        switch {
11,980✔
4632
        // If the db policy was missing an expected optional field, we return
4633
        // nil as if the policy was unknown.
4634
        case errors.Is(err, ErrEdgePolicyOptionalFieldNotFound):
2✔
4635
                return nil, nil
2✔
4636

4637
        // If the policy contains invalid TLV bytes, we return nil as if
4638
        // the policy was unknown.
4639
        case errors.Is(err, ErrParsingExtraTLVBytes):
×
4640
                return nil, nil
×
4641

4642
        case err != nil:
×
4643
                return nil, err
×
4644
        }
4645

4646
        return ep, nil
11,978✔
4647
}
4648

4649
func fetchChanEdgePolicies(edgeIndex kvdb.RBucket, edges kvdb.RBucket,
4650
        chanID []byte) (*models.ChannelEdgePolicy, *models.ChannelEdgePolicy,
4651
        error) {
2,905✔
4652

2,905✔
4653
        edgeInfo := edgeIndex.Get(chanID)
2,905✔
4654
        if edgeInfo == nil {
2,905✔
4655
                return nil, nil, fmt.Errorf("%w: chanID=%x", ErrEdgeNotFound,
×
4656
                        chanID)
×
4657
        }
×
4658

4659
        // The first node is contained within the first half of the edge
4660
        // information. We only propagate the error here and below if it's
4661
        // something other than edge non-existence.
4662
        node1Pub := edgeInfo[:33]
2,905✔
4663
        edge1, err := fetchChanEdgePolicy(edges, chanID, node1Pub)
2,905✔
4664
        if err != nil {
2,905✔
4665
                return nil, nil, fmt.Errorf("%w: node1Pub=%x", ErrEdgeNotFound,
×
4666
                        node1Pub)
×
4667
        }
×
4668

4669
        // Similarly, the second node is contained within the latter
4670
        // half of the edge information.
4671
        node2Pub := edgeInfo[33:66]
2,905✔
4672
        edge2, err := fetchChanEdgePolicy(edges, chanID, node2Pub)
2,905✔
4673
        if err != nil {
2,905✔
4674
                return nil, nil, fmt.Errorf("%w: node2Pub=%x", ErrEdgeNotFound,
×
4675
                        node2Pub)
×
4676
        }
×
4677

4678
        return edge1, edge2, nil
2,905✔
4679
}
4680

4681
func serializeChanEdgePolicy(w io.Writer, edge *models.ChannelEdgePolicy,
4682
        to []byte) error {
2,672✔
4683

2,672✔
4684
        err := wire.WriteVarBytes(w, 0, edge.SigBytes)
2,672✔
4685
        if err != nil {
2,672✔
4686
                return err
×
4687
        }
×
4688

4689
        if err := binary.Write(w, byteOrder, edge.ChannelID); err != nil {
2,672✔
4690
                return err
×
4691
        }
×
4692

4693
        var scratch [8]byte
2,672✔
4694
        updateUnix := uint64(edge.LastUpdate.Unix())
2,672✔
4695
        byteOrder.PutUint64(scratch[:], updateUnix)
2,672✔
4696
        if _, err := w.Write(scratch[:]); err != nil {
2,672✔
4697
                return err
×
4698
        }
×
4699

4700
        if err := binary.Write(w, byteOrder, edge.MessageFlags); err != nil {
2,672✔
4701
                return err
×
4702
        }
×
4703
        if err := binary.Write(w, byteOrder, edge.ChannelFlags); err != nil {
2,672✔
4704
                return err
×
4705
        }
×
4706
        if err := binary.Write(w, byteOrder, edge.TimeLockDelta); err != nil {
2,672✔
4707
                return err
×
4708
        }
×
4709
        if err := binary.Write(w, byteOrder, uint64(edge.MinHTLC)); err != nil {
2,672✔
4710
                return err
×
4711
        }
×
4712
        err = binary.Write(w, byteOrder, uint64(edge.FeeBaseMSat))
2,672✔
4713
        if err != nil {
2,672✔
4714
                return err
×
4715
        }
×
4716
        err = binary.Write(
2,672✔
4717
                w, byteOrder, uint64(edge.FeeProportionalMillionths),
2,672✔
4718
        )
2,672✔
4719
        if err != nil {
2,672✔
4720
                return err
×
4721
        }
×
4722

4723
        if _, err := w.Write(to); err != nil {
2,672✔
4724
                return err
×
4725
        }
×
4726

4727
        // If the max_htlc field is present, we write it. To be compatible with
4728
        // older versions that wasn't aware of this field, we write it as part
4729
        // of the opaque data.
4730
        // TODO(halseth): clean up when moving to TLV.
4731
        var opaqueBuf bytes.Buffer
2,672✔
4732
        if edge.MessageFlags.HasMaxHtlc() {
4,960✔
4733
                err := binary.Write(&opaqueBuf, byteOrder, uint64(edge.MaxHTLC))
2,288✔
4734
                if err != nil {
2,288✔
4735
                        return err
×
4736
                }
×
4737
        }
4738

4739
        if len(edge.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
2,672✔
4740
                return ErrTooManyExtraOpaqueBytes(len(edge.ExtraOpaqueData))
×
4741
        }
×
4742
        if _, err := opaqueBuf.Write(edge.ExtraOpaqueData); err != nil {
2,672✔
4743
                return err
×
4744
        }
×
4745

4746
        if err := wire.WriteVarBytes(w, 0, opaqueBuf.Bytes()); err != nil {
2,672✔
4747
                return err
×
4748
        }
×
4749

4750
        return nil
2,672✔
4751
}
4752

4753
func deserializeChanEdgePolicy(r io.Reader) (*models.ChannelEdgePolicy, error) {
12,008✔
4754
        // Deserialize the policy. Note that in case an optional field is not
12,008✔
4755
        // found or if the edge has invalid TLV data, then both an error and a
12,008✔
4756
        // populated policy object are returned so that the caller can decide
12,008✔
4757
        // if it still wants to use the edge or not.
12,008✔
4758
        edge, err := deserializeChanEdgePolicyRaw(r)
12,008✔
4759
        if err != nil &&
12,008✔
4760
                !errors.Is(err, ErrEdgePolicyOptionalFieldNotFound) &&
12,008✔
4761
                !errors.Is(err, ErrParsingExtraTLVBytes) {
12,008✔
4762

×
4763
                return nil, err
×
4764
        }
×
4765

4766
        return edge, err
12,008✔
4767
}
4768

4769
func deserializeChanEdgePolicyRaw(r io.Reader) (*models.ChannelEdgePolicy,
4770
        error) {
13,021✔
4771

13,021✔
4772
        edge := &models.ChannelEdgePolicy{}
13,021✔
4773

13,021✔
4774
        var err error
13,021✔
4775
        edge.SigBytes, err = wire.ReadVarBytes(r, 0, 80, "sig")
13,021✔
4776
        if err != nil {
13,021✔
4777
                return nil, err
×
4778
        }
×
4779

4780
        if err := binary.Read(r, byteOrder, &edge.ChannelID); err != nil {
13,021✔
4781
                return nil, err
×
4782
        }
×
4783

4784
        var scratch [8]byte
13,021✔
4785
        if _, err := r.Read(scratch[:]); err != nil {
13,021✔
4786
                return nil, err
×
4787
        }
×
4788
        unix := int64(byteOrder.Uint64(scratch[:]))
13,021✔
4789
        edge.LastUpdate = time.Unix(unix, 0)
13,021✔
4790

13,021✔
4791
        if err := binary.Read(r, byteOrder, &edge.MessageFlags); err != nil {
13,021✔
4792
                return nil, err
×
4793
        }
×
4794
        if err := binary.Read(r, byteOrder, &edge.ChannelFlags); err != nil {
13,021✔
4795
                return nil, err
×
4796
        }
×
4797
        if err := binary.Read(r, byteOrder, &edge.TimeLockDelta); err != nil {
13,021✔
4798
                return nil, err
×
4799
        }
×
4800

4801
        var n uint64
13,021✔
4802
        if err := binary.Read(r, byteOrder, &n); err != nil {
13,021✔
4803
                return nil, err
×
4804
        }
×
4805
        edge.MinHTLC = lnwire.MilliSatoshi(n)
13,021✔
4806

13,021✔
4807
        if err := binary.Read(r, byteOrder, &n); err != nil {
13,021✔
4808
                return nil, err
×
4809
        }
×
4810
        edge.FeeBaseMSat = lnwire.MilliSatoshi(n)
13,021✔
4811

13,021✔
4812
        if err := binary.Read(r, byteOrder, &n); err != nil {
13,021✔
4813
                return nil, err
×
4814
        }
×
4815
        edge.FeeProportionalMillionths = lnwire.MilliSatoshi(n)
13,021✔
4816

13,021✔
4817
        if _, err := r.Read(edge.ToNode[:]); err != nil {
13,021✔
4818
                return nil, err
×
4819
        }
×
4820

4821
        // We'll try and see if there are any opaque bytes left, if not, then
4822
        // we'll ignore the EOF error and return the edge as is.
4823
        edge.ExtraOpaqueData, err = wire.ReadVarBytes(
13,021✔
4824
                r, 0, MaxAllowedExtraOpaqueBytes, "blob",
13,021✔
4825
        )
13,021✔
4826
        switch {
13,021✔
4827
        case errors.Is(err, io.ErrUnexpectedEOF):
×
4828
        case errors.Is(err, io.EOF):
4✔
4829
        case err != nil:
×
4830
                return nil, err
×
4831
        }
4832

4833
        // See if optional fields are present.
4834
        if edge.MessageFlags.HasMaxHtlc() {
25,068✔
4835
                // The max_htlc field should be at the beginning of the opaque
12,047✔
4836
                // bytes.
12,047✔
4837
                opq := edge.ExtraOpaqueData
12,047✔
4838

12,047✔
4839
                // If the max_htlc field is not present, it might be old data
12,047✔
4840
                // stored before this field was validated. We'll return the
12,047✔
4841
                // edge along with an error.
12,047✔
4842
                if len(opq) < 8 {
12,051✔
4843
                        return edge, ErrEdgePolicyOptionalFieldNotFound
4✔
4844
                }
4✔
4845

4846
                maxHtlc := byteOrder.Uint64(opq[:8])
12,043✔
4847
                edge.MaxHTLC = lnwire.MilliSatoshi(maxHtlc)
12,043✔
4848

12,043✔
4849
                // Exclude the parsed field from the rest of the opaque data.
12,043✔
4850
                edge.ExtraOpaqueData = opq[8:]
12,043✔
4851
        }
4852

4853
        // Attempt to extract the inbound fee from the opaque data. If we fail
4854
        // to parse the TLV here, we return an error we also return the edge
4855
        // so that the caller can still use it. This is for backwards
4856
        // compatibility in case we have already persisted some policies that
4857
        // have invalid TLV data.
4858
        var inboundFee lnwire.Fee
13,017✔
4859
        typeMap, err := edge.ExtraOpaqueData.ExtractRecords(&inboundFee)
13,017✔
4860
        if err != nil {
13,017✔
4861
                return edge, fmt.Errorf("%w: %w", ErrParsingExtraTLVBytes, err)
×
4862
        }
×
4863

4864
        val, ok := typeMap[lnwire.FeeRecordType]
13,017✔
4865
        if ok && val == nil {
14,756✔
4866
                edge.InboundFee = fn.Some(inboundFee)
1,739✔
4867
        }
1,739✔
4868

4869
        return edge, nil
13,017✔
4870
}
4871

4872
// chanGraphNodeTx is an implementation of the NodeRTx interface backed by the
4873
// KVStore and a kvdb.RTx.
4874
type chanGraphNodeTx struct {
4875
        tx   kvdb.RTx
4876
        db   *KVStore
4877
        node *models.LightningNode
4878
}
4879

4880
// A compile-time constraint to ensure chanGraphNodeTx implements the NodeRTx
4881
// interface.
4882
var _ NodeRTx = (*chanGraphNodeTx)(nil)
4883

4884
func newChanGraphNodeTx(tx kvdb.RTx, db *KVStore,
4885
        node *models.LightningNode) *chanGraphNodeTx {
4,105✔
4886

4,105✔
4887
        return &chanGraphNodeTx{
4,105✔
4888
                tx:   tx,
4,105✔
4889
                db:   db,
4,105✔
4890
                node: node,
4,105✔
4891
        }
4,105✔
4892
}
4,105✔
4893

4894
// Node returns the raw information of the node.
4895
//
4896
// NOTE: This is a part of the NodeRTx interface.
4897
func (c *chanGraphNodeTx) Node() *models.LightningNode {
5,022✔
4898
        return c.node
5,022✔
4899
}
5,022✔
4900

4901
// FetchNode fetches the node with the given pub key under the same transaction
4902
// used to fetch the current node. The returned node is also a NodeRTx and any
4903
// operations on that NodeRTx will also be done under the same transaction.
4904
//
4905
// NOTE: This is a part of the NodeRTx interface.
4906
func (c *chanGraphNodeTx) FetchNode(nodePub route.Vertex) (NodeRTx, error) {
2,944✔
4907
        node, err := c.db.FetchLightningNodeTx(c.tx, nodePub)
2,944✔
4908
        if err != nil {
2,944✔
4909
                return nil, err
×
4910
        }
×
4911

4912
        return newChanGraphNodeTx(c.tx, c.db, node), nil
2,944✔
4913
}
4914

4915
// ForEachChannel can be used to iterate over the node's channels under
4916
// the same transaction used to fetch the node.
4917
//
4918
// NOTE: This is a part of the NodeRTx interface.
4919
func (c *chanGraphNodeTx) ForEachChannel(f func(*models.ChannelEdgeInfo,
4920
        *models.ChannelEdgePolicy, *models.ChannelEdgePolicy) error) error {
965✔
4921

965✔
4922
        return c.db.forEachNodeChannelTx(
965✔
4923
                c.tx, c.node.PubKeyBytes,
965✔
4924
                func(_ kvdb.RTx, info *models.ChannelEdgeInfo, policy1,
965✔
4925
                        policy2 *models.ChannelEdgePolicy) error {
3,909✔
4926

2,944✔
4927
                        return f(info, policy1, policy2)
2,944✔
4928
                },
2,944✔
4929
                // NOTE: We don't need to reset anything here as the caller is
4930
                // expected to pass in the reset function to the ForEachNode
4931
                // method that constructed the chanGraphNodeTx.
NEW
4932
                func() {},
×
4933
        )
4934
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc