• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 13436819071

20 Feb 2025 01:51PM UTC coverage: 58.791% (-0.003%) from 58.794%
13436819071

Pull #9533

github

ellemouton
graph/db: fix linter issues of old code

Since we have renamed a file housing some very old code, the linter has
now run on all this code for the first time. So we gotta do some
clean-up work here to make it happy.
Pull Request #9533: graph: extract cache from CRUD [1]

2490 of 3214 new or added lines in 2 files covered. (77.47%)

44 existing lines in 13 files now uncovered.

136104 of 231505 relevant lines covered (58.79%)

19306.86 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

77.45
/graph/db/kv_store.go
1
package graphdb
2

3
import (
4
        "bytes"
5
        "crypto/sha256"
6
        "encoding/binary"
7
        "errors"
8
        "fmt"
9
        "io"
10
        "math"
11
        "net"
12
        "sort"
13
        "sync"
14
        "testing"
15
        "time"
16

17
        "github.com/btcsuite/btcd/btcec/v2"
18
        "github.com/btcsuite/btcd/chaincfg/chainhash"
19
        "github.com/btcsuite/btcd/txscript"
20
        "github.com/btcsuite/btcd/wire"
21
        "github.com/btcsuite/btcwallet/walletdb"
22
        "github.com/lightningnetwork/lnd/aliasmgr"
23
        "github.com/lightningnetwork/lnd/batch"
24
        "github.com/lightningnetwork/lnd/graph/db/models"
25
        "github.com/lightningnetwork/lnd/input"
26
        "github.com/lightningnetwork/lnd/kvdb"
27
        "github.com/lightningnetwork/lnd/lnwire"
28
        "github.com/lightningnetwork/lnd/routing/route"
29
)
30

31
var (
32
        // nodeBucket is a bucket which houses all the vertices or nodes within
33
        // the channel graph. This bucket has a single-sub bucket which adds an
34
        // additional index from pubkey -> alias. Within the top-level of this
35
        // bucket, the key space maps a node's compressed public key to the
36
        // serialized information for that node. Additionally, there's a
37
        // special key "source" which stores the pubkey of the source node. The
38
        // source node is used as the starting point for all graph/queries and
39
        // traversals. The graph is formed as a star-graph with the source node
40
        // at the center.
41
        //
42
        // maps: pubKey -> nodeInfo
43
        // maps: source -> selfPubKey
44
        nodeBucket = []byte("graph-node")
45

46
        // nodeUpdateIndexBucket is a sub-bucket of the nodeBucket. This bucket
47
        // will be used to quickly look up the "freshness" of a node's last
48
        // update to the network. The bucket only contains keys, and no values,
49
        // it's mapping:
50
        //
51
        // maps: updateTime || nodeID -> nil
52
        nodeUpdateIndexBucket = []byte("graph-node-update-index")
53

54
        // sourceKey is a special key that resides within the nodeBucket. The
55
        // sourceKey maps a key to the public key of the "self node".
56
        sourceKey = []byte("source")
57

58
        // aliasIndexBucket is a sub-bucket that's nested within the main
59
        // nodeBucket. This bucket maps the public key of a node to its
60
        // current alias. This bucket is provided as it can be used within a
61
        // future UI layer to add an additional degree of confirmation.
62
        aliasIndexBucket = []byte("alias")
63

64
        // edgeBucket is a bucket which houses all of the edge or channel
65
        // information within the channel graph. This bucket essentially acts
66
        // as an adjacency list, which in conjunction with a range scan, can be
67
        // used to iterate over all the incoming and outgoing edges for a
68
        // particular node. Key in the bucket use a prefix scheme which leads
69
        // with the node's public key and sends with the compact edge ID.
70
        // For each chanID, there will be two entries within the bucket, as the
71
        // graph is directed: nodes may have different policies w.r.t to fees
72
        // for their respective directions.
73
        //
74
        // maps: pubKey || chanID -> channel edge policy for node
75
        edgeBucket = []byte("graph-edge")
76

77
        // unknownPolicy is represented as an empty slice. It is
78
        // used as the value in edgeBucket for unknown channel edge policies.
79
        // Unknown policies are still stored in the database to enable efficient
80
        // lookup of incoming channel edges.
81
        unknownPolicy = []byte{}
82

83
        // chanStart is an array of all zero bytes which is used to perform
84
        // range scans within the edgeBucket to obtain all of the outgoing
85
        // edges for a particular node.
86
        chanStart [8]byte
87

88
        // edgeIndexBucket is an index which can be used to iterate all edges
89
        // in the bucket, grouping them according to their in/out nodes.
90
        // Additionally, the items in this bucket also contain the complete
91
        // edge information for a channel. The edge information includes the
92
        // capacity of the channel, the nodes that made the channel, etc. This
93
        // bucket resides within the edgeBucket above. Creation of an edge
94
        // proceeds in two phases: first the edge is added to the edge index,
95
        // afterwards the edgeBucket can be updated with the latest details of
96
        // the edge as they are announced on the network.
97
        //
98
        // maps: chanID -> pubKey1 || pubKey2 || restofEdgeInfo
99
        edgeIndexBucket = []byte("edge-index")
100

101
        // edgeUpdateIndexBucket is a sub-bucket of the main edgeBucket. This
102
        // bucket contains an index which allows us to gauge the "freshness" of
103
        // a channel's last updates.
104
        //
105
        // maps: updateTime || chanID -> nil
106
        edgeUpdateIndexBucket = []byte("edge-update-index")
107

108
        // channelPointBucket maps a channel's full outpoint (txid:index) to
109
        // its short 8-byte channel ID. This bucket resides within the
110
        // edgeBucket above, and can be used to quickly remove an edge due to
111
        // the outpoint being spent, or to query for existence of a channel.
112
        //
113
        // maps: outPoint -> chanID
114
        channelPointBucket = []byte("chan-index")
115

116
        // zombieBucket is a sub-bucket of the main edgeBucket bucket
117
        // responsible for maintaining an index of zombie channels. Each entry
118
        // exists within the bucket as follows:
119
        //
120
        // maps: chanID -> pubKey1 || pubKey2
121
        //
122
        // The chanID represents the channel ID of the edge that is marked as a
123
        // zombie and is used as the key, which maps to the public keys of the
124
        // edge's participants.
125
        zombieBucket = []byte("zombie-index")
126

127
        // disabledEdgePolicyBucket is a sub-bucket of the main edgeBucket
128
        // bucket responsible for maintaining an index of disabled edge
129
        // policies. Each entry exists within the bucket as follows:
130
        //
131
        // maps: <chanID><direction> -> []byte{}
132
        //
133
        // The chanID represents the channel ID of the edge and the direction is
134
        // one byte representing the direction of the edge. The main purpose of
135
        // this index is to allow pruning disabled channels in a fast way
136
        // without the need to iterate all over the graph.
137
        disabledEdgePolicyBucket = []byte("disabled-edge-policy-index")
138

139
        // graphMetaBucket is a top-level bucket which stores various meta-deta
140
        // related to the on-disk channel graph. Data stored in this bucket
141
        // includes the block to which the graph has been synced to, the total
142
        // number of channels, etc.
143
        graphMetaBucket = []byte("graph-meta")
144

145
        // pruneLogBucket is a bucket within the graphMetaBucket that stores
146
        // a mapping from the block height to the hash for the blocks used to
147
        // prune the graph.
148
        // Once a new block is discovered, any channels that have been closed
149
        // (by spending the outpoint) can safely be removed from the graph, and
150
        // the block is added to the prune log. We need to keep such a log for
151
        // the case where a reorg happens, and we must "rewind" the state of the
152
        // graph by removing channels that were previously confirmed. In such a
153
        // case we'll remove all entries from the prune log with a block height
154
        // that no longer exists.
155
        pruneLogBucket = []byte("prune-log")
156

157
        // closedScidBucket is a top-level bucket that stores scids for
158
        // channels that we know to be closed. This is used so that we don't
159
        // need to perform expensive validation checks if we receive a channel
160
        // announcement for the channel again.
161
        //
162
        // maps: scid -> []byte{}
163
        closedScidBucket = []byte("closed-scid")
164
)
165

166
const (
167
        // MaxAllowedExtraOpaqueBytes is the largest amount of opaque bytes that
168
        // we'll permit to be written to disk. We limit this as otherwise, it
169
        // would be possible for a node to create a ton of updates and slowly
170
        // fill our disk, and also waste bandwidth due to relaying.
171
        MaxAllowedExtraOpaqueBytes = 10000
172
)
173

174
// KVStore is a persistent, on-disk graph representation of the Lightning
175
// Network. This struct can be used to implement path finding algorithms on top
176
// of, and also to update a node's view based on information received from the
177
// p2p network. Internally, the graph is stored using a modified adjacency list
178
// representation with some added object interaction possible with each
179
// serialized edge/node. The graph is stored is directed, meaning that are two
180
// edges stored for each channel: an inbound/outbound edge for each node pair.
181
// Nodes, edges, and edge information can all be added to the graph
182
// independently. Edge removal results in the deletion of all edge information
183
// for that edge.
184
type KVStore struct {
185
        db kvdb.Backend
186

187
        // cacheMu guards all caches (rejectCache, chanCache, graphCache). If
188
        // this mutex will be acquired at the same time as the DB mutex then
189
        // the cacheMu MUST be acquired first to prevent deadlock.
190
        cacheMu     sync.RWMutex
191
        rejectCache *rejectCache
192
        chanCache   *channelCache
193
        graphCache  *GraphCache
194

195
        chanScheduler batch.Scheduler
196
        nodeScheduler batch.Scheduler
197
}
198

199
// NewKVStore allocates a new KVStore backed by a DB instance. The
200
// returned instance has its own unique reject cache and channel cache.
201
func NewKVStore(db kvdb.Backend, options ...OptionModifier) (*KVStore,
202
        error) {
176✔
203

176✔
204
        opts := DefaultOptions()
176✔
205
        for _, o := range options {
281✔
206
                o(opts)
105✔
207
        }
105✔
208

209
        if !opts.NoMigration {
352✔
210
                if err := initKVStore(db); err != nil {
176✔
NEW
211
                        return nil, err
×
NEW
212
                }
×
213
        }
214

215
        g := &KVStore{
176✔
216
                db:          db,
176✔
217
                rejectCache: newRejectCache(opts.RejectCacheSize),
176✔
218
                chanCache:   newChannelCache(opts.ChannelCacheSize),
176✔
219
        }
176✔
220
        g.chanScheduler = batch.NewTimeScheduler(
176✔
221
                db, &g.cacheMu, opts.BatchCommitInterval,
176✔
222
        )
176✔
223
        g.nodeScheduler = batch.NewTimeScheduler(
176✔
224
                db, nil, opts.BatchCommitInterval,
176✔
225
        )
176✔
226

176✔
227
        // The graph cache can be turned off (e.g. for mobile users) for a
176✔
228
        // speed/memory usage tradeoff.
176✔
229
        if opts.UseGraphCache {
319✔
230
                g.graphCache = NewGraphCache(opts.PreAllocCacheNumNodes)
143✔
231
                startTime := time.Now()
143✔
232
                log.Debugf("Populating in-memory channel graph, this might " +
143✔
233
                        "take a while...")
143✔
234

143✔
235
                err := g.ForEachNodeCacheable(func(node route.Vertex,
143✔
236
                        features *lnwire.FeatureVector) error {
246✔
237

103✔
238
                        g.graphCache.AddNodeFeatures(node, features)
103✔
239

103✔
240
                        return nil
103✔
241
                })
103✔
242
                if err != nil {
143✔
NEW
243
                        return nil, err
×
NEW
244
                }
×
245

246
                err = g.ForEachChannel(func(info *models.ChannelEdgeInfo,
143✔
247
                        policy1, policy2 *models.ChannelEdgePolicy) error {
542✔
248

399✔
249
                        g.graphCache.AddChannel(info, policy1, policy2)
399✔
250

399✔
251
                        return nil
399✔
252
                })
399✔
253
                if err != nil {
143✔
NEW
254
                        return nil, err
×
NEW
255
                }
×
256

257
                log.Debugf("Finished populating in-memory channel graph (took "+
143✔
258
                        "%v, %s)", time.Since(startTime), g.graphCache.Stats())
143✔
259
        }
260

261
        return g, nil
176✔
262
}
263

264
// channelMapKey is the key structure used for storing channel edge policies.
265
type channelMapKey struct {
266
        nodeKey route.Vertex
267
        chanID  [8]byte
268
}
269

270
// getChannelMap loads all channel edge policies from the database and stores
271
// them in a map.
272
func (c *KVStore) getChannelMap(edges kvdb.RBucket) (
273
        map[channelMapKey]*models.ChannelEdgePolicy, error) {
147✔
274

147✔
275
        // Create a map to store all channel edge policies.
147✔
276
        channelMap := make(map[channelMapKey]*models.ChannelEdgePolicy)
147✔
277

147✔
278
        err := kvdb.ForAll(edges, func(k, edgeBytes []byte) error {
1,721✔
279
                // Skip embedded buckets.
1,574✔
280
                if bytes.Equal(k, edgeIndexBucket) ||
1,574✔
281
                        bytes.Equal(k, edgeUpdateIndexBucket) ||
1,574✔
282
                        bytes.Equal(k, zombieBucket) ||
1,574✔
283
                        bytes.Equal(k, disabledEdgePolicyBucket) ||
1,574✔
284
                        bytes.Equal(k, channelPointBucket) {
2,158✔
285

584✔
286
                        return nil
584✔
287
                }
584✔
288

289
                // Validate key length.
290
                if len(k) != 33+8 {
993✔
NEW
291
                        return fmt.Errorf("invalid edge key %x encountered", k)
×
NEW
292
                }
×
293

294
                var key channelMapKey
993✔
295
                copy(key.nodeKey[:], k[:33])
993✔
296
                copy(key.chanID[:], k[33:])
993✔
297

993✔
298
                // No need to deserialize unknown policy.
993✔
299
                if bytes.Equal(edgeBytes, unknownPolicy) {
993✔
NEW
300
                        return nil
×
NEW
301
                }
×
302

303
                edgeReader := bytes.NewReader(edgeBytes)
993✔
304
                edge, err := deserializeChanEdgePolicyRaw(
993✔
305
                        edgeReader,
993✔
306
                )
993✔
307

993✔
308
                switch {
993✔
309
                // If the db policy was missing an expected optional field, we
310
                // return nil as if the policy was unknown.
NEW
311
                case errors.Is(err, ErrEdgePolicyOptionalFieldNotFound):
×
NEW
312
                        return nil
×
313

NEW
314
                case err != nil:
×
NEW
315
                        return err
×
316
                }
317

318
                channelMap[key] = edge
993✔
319

993✔
320
                return nil
993✔
321
        })
322
        if err != nil {
147✔
NEW
323
                return nil, err
×
NEW
324
        }
×
325

326
        return channelMap, nil
147✔
327
}
328

329
var graphTopLevelBuckets = [][]byte{
330
        nodeBucket,
331
        edgeBucket,
332
        graphMetaBucket,
333
        closedScidBucket,
334
}
335

336
// Wipe completely deletes all saved state within all used buckets within the
337
// database. The deletion is done in a single transaction, therefore this
338
// operation is fully atomic.
NEW
339
func (c *KVStore) Wipe() error {
×
NEW
340
        err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
×
NEW
341
                for _, tlb := range graphTopLevelBuckets {
×
NEW
342
                        err := tx.DeleteTopLevelBucket(tlb)
×
NEW
343
                        if err != nil &&
×
NEW
344
                                !errors.Is(err, kvdb.ErrBucketNotFound) {
×
NEW
345

×
NEW
346
                                return err
×
NEW
347
                        }
×
348
                }
349

NEW
350
                return nil
×
NEW
351
        }, func() {})
×
NEW
352
        if err != nil {
×
NEW
353
                return err
×
NEW
354
        }
×
355

NEW
356
        return initKVStore(c.db)
×
357
}
358

359
// createChannelDB creates and initializes a fresh version of  In
360
// the case that the target path has not yet been created or doesn't yet exist,
361
// then the path is created. Additionally, all required top-level buckets used
362
// within the database are created.
363
func initKVStore(db kvdb.Backend) error {
176✔
364
        err := kvdb.Update(db, func(tx kvdb.RwTx) error {
352✔
365
                for _, tlb := range graphTopLevelBuckets {
871✔
366
                        if _, err := tx.CreateTopLevelBucket(tlb); err != nil {
695✔
NEW
367
                                return err
×
NEW
368
                        }
×
369
                }
370

371
                nodes := tx.ReadWriteBucket(nodeBucket)
176✔
372
                _, err := nodes.CreateBucketIfNotExists(aliasIndexBucket)
176✔
373
                if err != nil {
176✔
NEW
374
                        return err
×
NEW
375
                }
×
376
                _, err = nodes.CreateBucketIfNotExists(nodeUpdateIndexBucket)
176✔
377
                if err != nil {
176✔
NEW
378
                        return err
×
NEW
379
                }
×
380

381
                edges := tx.ReadWriteBucket(edgeBucket)
176✔
382
                _, err = edges.CreateBucketIfNotExists(edgeIndexBucket)
176✔
383
                if err != nil {
176✔
NEW
384
                        return err
×
NEW
385
                }
×
386
                _, err = edges.CreateBucketIfNotExists(edgeUpdateIndexBucket)
176✔
387
                if err != nil {
176✔
NEW
388
                        return err
×
NEW
389
                }
×
390
                _, err = edges.CreateBucketIfNotExists(channelPointBucket)
176✔
391
                if err != nil {
176✔
NEW
392
                        return err
×
NEW
393
                }
×
394
                _, err = edges.CreateBucketIfNotExists(zombieBucket)
176✔
395
                if err != nil {
176✔
NEW
396
                        return err
×
NEW
397
                }
×
398

399
                graphMeta := tx.ReadWriteBucket(graphMetaBucket)
176✔
400
                _, err = graphMeta.CreateBucketIfNotExists(pruneLogBucket)
176✔
401

176✔
402
                return err
176✔
403
        }, func() {})
176✔
404
        if err != nil {
176✔
NEW
405
                return fmt.Errorf("unable to create new channel graph: %w", err)
×
NEW
406
        }
×
407

408
        return nil
176✔
409
}
410

411
// AddrsForNode returns all known addresses for the target node public key that
412
// the graph DB is aware of. The returned boolean indicates if the given node is
413
// unknown to the graph DB or not.
414
//
415
// NOTE: this is part of the channeldb.AddrSource interface.
416
func (c *KVStore) AddrsForNode(nodePub *btcec.PublicKey) (bool, []net.Addr,
417
        error) {
4✔
418

4✔
419
        pubKey, err := route.NewVertexFromBytes(nodePub.SerializeCompressed())
4✔
420
        if err != nil {
4✔
NEW
421
                return false, nil, err
×
NEW
422
        }
×
423

424
        node, err := c.FetchLightningNode(pubKey)
4✔
425
        // We don't consider it an error if the graph is unaware of the node.
4✔
426
        switch {
4✔
NEW
427
        case err != nil && !errors.Is(err, ErrGraphNodeNotFound):
×
NEW
428
                return false, nil, err
×
429

430
        case errors.Is(err, ErrGraphNodeNotFound):
3✔
431
                return false, nil, nil
3✔
432
        }
433

434
        return true, node.Addresses, nil
4✔
435
}
436

437
// ForEachChannel iterates through all the channel edges stored within the
438
// graph and invokes the passed callback for each edge. The callback takes two
439
// edges as since this is a directed graph, both the in/out edges are visited.
440
// If the callback returns an error, then the transaction is aborted and the
441
// iteration stops early.
442
//
443
// NOTE: If an edge can't be found, or wasn't advertised, then a nil pointer
444
// for that particular channel edge routing policy will be passed into the
445
// callback.
446
func (c *KVStore) ForEachChannel(cb func(*models.ChannelEdgeInfo,
447
        *models.ChannelEdgePolicy, *models.ChannelEdgePolicy) error) error {
147✔
448

147✔
449
        return c.db.View(func(tx kvdb.RTx) error {
294✔
450
                edges := tx.ReadBucket(edgeBucket)
147✔
451
                if edges == nil {
147✔
NEW
452
                        return ErrGraphNoEdgesFound
×
NEW
453
                }
×
454

455
                // First, load all edges in memory indexed by node and channel
456
                // id.
457
                channelMap, err := c.getChannelMap(edges)
147✔
458
                if err != nil {
147✔
NEW
459
                        return err
×
NEW
460
                }
×
461

462
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
147✔
463
                if edgeIndex == nil {
147✔
NEW
464
                        return ErrGraphNoEdgesFound
×
NEW
465
                }
×
466

467
                // Load edge index, recombine each channel with the policies
468
                // loaded above and invoke the callback.
469
                return kvdb.ForAll(
147✔
470
                        edgeIndex, func(k, edgeInfoBytes []byte) error {
645✔
471
                                var chanID [8]byte
498✔
472
                                copy(chanID[:], k)
498✔
473

498✔
474
                                edgeInfoReader := bytes.NewReader(edgeInfoBytes)
498✔
475
                                info, err := deserializeChanEdgeInfo(
498✔
476
                                        edgeInfoReader,
498✔
477
                                )
498✔
478
                                if err != nil {
498✔
NEW
479
                                        return err
×
NEW
480
                                }
×
481

482
                                policy1 := channelMap[channelMapKey{
498✔
483
                                        nodeKey: info.NodeKey1Bytes,
498✔
484
                                        chanID:  chanID,
498✔
485
                                }]
498✔
486

498✔
487
                                policy2 := channelMap[channelMapKey{
498✔
488
                                        nodeKey: info.NodeKey2Bytes,
498✔
489
                                        chanID:  chanID,
498✔
490
                                }]
498✔
491

498✔
492
                                return cb(&info, policy1, policy2)
498✔
493
                        },
494
                )
495
        }, func() {})
147✔
496
}
497

498
// forEachNodeDirectedChannel iterates through all channels of a given node,
499
// executing the passed callback on the directed edge representing the channel
500
// and its incoming policy. If the callback returns an error, then the iteration
501
// is halted with the error propagated back up to the caller. An optional read
502
// transaction may be provided. If none is provided, a new one will be created.
503
//
504
// Unknown policies are passed into the callback as nil values.
505
func (c *KVStore) forEachNodeDirectedChannel(tx kvdb.RTx,
506
        node route.Vertex, cb func(channel *DirectedChannel) error) error {
706✔
507

706✔
508
        if c.graphCache != nil {
1,170✔
509
                return c.graphCache.ForEachChannel(node, cb)
464✔
510
        }
464✔
511

512
        // Fallback that uses the database.
513
        toNodeCallback := func() route.Vertex {
380✔
514
                return node
135✔
515
        }
135✔
516
        toNodeFeatures, err := c.fetchNodeFeatures(tx, node)
245✔
517
        if err != nil {
245✔
NEW
518
                return err
×
NEW
519
        }
×
520

521
        dbCallback := func(tx kvdb.RTx, e *models.ChannelEdgeInfo, p1,
245✔
522
                p2 *models.ChannelEdgePolicy) error {
744✔
523

499✔
524
                var cachedInPolicy *models.CachedEdgePolicy
499✔
525
                if p2 != nil {
995✔
526
                        cachedInPolicy = models.NewCachedPolicy(p2)
496✔
527
                        cachedInPolicy.ToNodePubKey = toNodeCallback
496✔
528
                        cachedInPolicy.ToNodeFeatures = toNodeFeatures
496✔
529
                }
496✔
530

531
                var inboundFee lnwire.Fee
499✔
532
                if p1 != nil {
997✔
533
                        // Extract inbound fee. If there is a decoding error,
498✔
534
                        // skip this edge.
498✔
535
                        _, err := p1.ExtraOpaqueData.ExtractRecords(&inboundFee)
498✔
536
                        if err != nil {
499✔
537
                                return nil
1✔
538
                        }
1✔
539
                }
540

541
                directedChannel := &DirectedChannel{
498✔
542
                        ChannelID:    e.ChannelID,
498✔
543
                        IsNode1:      node == e.NodeKey1Bytes,
498✔
544
                        OtherNode:    e.NodeKey2Bytes,
498✔
545
                        Capacity:     e.Capacity,
498✔
546
                        OutPolicySet: p1 != nil,
498✔
547
                        InPolicy:     cachedInPolicy,
498✔
548
                        InboundFee:   inboundFee,
498✔
549
                }
498✔
550

498✔
551
                if node == e.NodeKey2Bytes {
749✔
552
                        directedChannel.OtherNode = e.NodeKey1Bytes
251✔
553
                }
251✔
554

555
                return cb(directedChannel)
498✔
556
        }
557

558
        return nodeTraversal(tx, node[:], c.db, dbCallback)
245✔
559
}
560

561
// fetchNodeFeatures returns the features of a given node. If no features are
562
// known for the node, an empty feature vector is returned. An optional read
563
// transaction may be provided. If none is provided, a new one will be created.
564
func (c *KVStore) fetchNodeFeatures(tx kvdb.RTx,
565
        node route.Vertex) (*lnwire.FeatureVector, error) {
1,142✔
566

1,142✔
567
        if c.graphCache != nil {
1,598✔
568
                return c.graphCache.GetFeatures(node), nil
456✔
569
        }
456✔
570

571
        // Fallback that uses the database.
572
        targetNode, err := c.FetchLightningNodeTx(tx, node)
689✔
573
        switch {
689✔
574
        // If the node exists and has features, return them directly.
575
        case err == nil:
678✔
576
                return targetNode.Features, nil
678✔
577

578
        // If we couldn't find a node announcement, populate a blank feature
579
        // vector.
580
        case errors.Is(err, ErrGraphNodeNotFound):
11✔
581
                return lnwire.EmptyFeatureVector(), nil
11✔
582

583
        // Otherwise, bubble the error up.
NEW
584
        default:
×
NEW
585
                return nil, err
×
586
        }
587
}
588

589
// ForEachNodeDirectedChannel iterates through all channels of a given node,
590
// executing the passed callback on the directed edge representing the channel
591
// and its incoming policy. If the callback returns an error, then the iteration
592
// is halted with the error propagated back up to the caller. If the graphCache
593
// is available, then it will be used to retrieve the node's channels instead
594
// of the database.
595
//
596
// Unknown policies are passed into the callback as nil values.
597
//
598
// NOTE: this is part of the graphdb.NodeTraverser interface.
599
func (c *KVStore) ForEachNodeDirectedChannel(nodePub route.Vertex,
600
        cb func(channel *DirectedChannel) error) error {
114✔
601

114✔
602
        return c.forEachNodeDirectedChannel(nil, nodePub, cb)
114✔
603
}
114✔
604

605
// FetchNodeFeatures returns the features of the given node. If no features are
606
// known for the node, an empty feature vector is returned.
607
// If the graphCache is available, then it will be used to retrieve the node's
608
// features instead of the database.
609
//
610
// NOTE: this is part of the graphdb.NodeTraverser interface.
611
func (c *KVStore) FetchNodeFeatures(nodePub route.Vertex) (
612
        *lnwire.FeatureVector, error) {
90✔
613

90✔
614
        return c.fetchNodeFeatures(nil, nodePub)
90✔
615
}
90✔
616

617
// ForEachNodeCached is similar to forEachNode, but it utilizes the channel
618
// graph cache instead. Note that this doesn't return all the information the
619
// regular forEachNode method does.
620
//
621
// NOTE: The callback contents MUST not be modified.
622
func (c *KVStore) ForEachNodeCached(cb func(node route.Vertex,
623
        chans map[uint64]*DirectedChannel) error) error {
1✔
624

1✔
625
        if c.graphCache != nil {
1✔
NEW
626
                return c.graphCache.ForEachNode(cb)
×
NEW
627
        }
×
628

629
        // Otherwise call back to a version that uses the database directly.
630
        // We'll iterate over each node, then the set of channels for each
631
        // node, and construct a similar callback functiopn signature as the
632
        // main funcotin expects.
633
        return c.forEachNode(func(tx kvdb.RTx,
1✔
634
                node *models.LightningNode) error {
21✔
635

20✔
636
                channels := make(map[uint64]*DirectedChannel)
20✔
637

20✔
638
                err := c.ForEachNodeChannelTx(tx, node.PubKeyBytes,
20✔
639
                        func(tx kvdb.RTx, e *models.ChannelEdgeInfo,
20✔
640
                                p1 *models.ChannelEdgePolicy,
20✔
641
                                p2 *models.ChannelEdgePolicy) error {
210✔
642

190✔
643
                                toNodeCallback := func() route.Vertex {
190✔
NEW
644
                                        return node.PubKeyBytes
×
NEW
645
                                }
×
646
                                toNodeFeatures, err := c.fetchNodeFeatures(
190✔
647
                                        tx, node.PubKeyBytes,
190✔
648
                                )
190✔
649
                                if err != nil {
190✔
NEW
650
                                        return err
×
NEW
651
                                }
×
652

653
                                var cachedInPolicy *models.CachedEdgePolicy
190✔
654
                                if p2 != nil {
380✔
655
                                        cachedInPolicy =
190✔
656
                                                models.NewCachedPolicy(p2)
190✔
657
                                        cachedInPolicy.ToNodePubKey =
190✔
658
                                                toNodeCallback
190✔
659
                                        cachedInPolicy.ToNodeFeatures =
190✔
660
                                                toNodeFeatures
190✔
661
                                }
190✔
662

663
                                directedChannel := &DirectedChannel{
190✔
664
                                        ChannelID: e.ChannelID,
190✔
665
                                        IsNode1: node.PubKeyBytes ==
190✔
666
                                                e.NodeKey1Bytes,
190✔
667
                                        OtherNode:    e.NodeKey2Bytes,
190✔
668
                                        Capacity:     e.Capacity,
190✔
669
                                        OutPolicySet: p1 != nil,
190✔
670
                                        InPolicy:     cachedInPolicy,
190✔
671
                                }
190✔
672

190✔
673
                                if node.PubKeyBytes == e.NodeKey2Bytes {
285✔
674
                                        directedChannel.OtherNode =
95✔
675
                                                e.NodeKey1Bytes
95✔
676
                                }
95✔
677

678
                                channels[e.ChannelID] = directedChannel
190✔
679

190✔
680
                                return nil
190✔
681
                        })
682
                if err != nil {
20✔
NEW
683
                        return err
×
NEW
684
                }
×
685

686
                return cb(node.PubKeyBytes, channels)
20✔
687
        })
688
}
689

690
// DisabledChannelIDs returns the channel ids of disabled channels.
691
// A channel is disabled when two of the associated ChanelEdgePolicies
692
// have their disabled bit on.
693
func (c *KVStore) DisabledChannelIDs() ([]uint64, error) {
6✔
694
        var disabledChanIDs []uint64
6✔
695
        var chanEdgeFound map[uint64]struct{}
6✔
696

6✔
697
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
12✔
698
                edges := tx.ReadBucket(edgeBucket)
6✔
699
                if edges == nil {
6✔
NEW
700
                        return ErrGraphNoEdgesFound
×
NEW
701
                }
×
702

703
                disabledEdgePolicyIndex := edges.NestedReadBucket(
6✔
704
                        disabledEdgePolicyBucket,
6✔
705
                )
6✔
706
                if disabledEdgePolicyIndex == nil {
7✔
707
                        return nil
1✔
708
                }
1✔
709

710
                // We iterate over all disabled policies and we add each channel
711
                // that has more than one disabled policy to disabledChanIDs
712
                // array.
713
                return disabledEdgePolicyIndex.ForEach(
5✔
714
                        func(k, v []byte) error {
16✔
715
                                chanID := byteOrder.Uint64(k[:8])
11✔
716
                                _, edgeFound := chanEdgeFound[chanID]
11✔
717
                                if edgeFound {
15✔
718
                                        delete(chanEdgeFound, chanID)
4✔
719
                                        disabledChanIDs = append(
4✔
720
                                                disabledChanIDs, chanID,
4✔
721
                                        )
4✔
722

4✔
723
                                        return nil
4✔
724
                                }
4✔
725

726
                                chanEdgeFound[chanID] = struct{}{}
7✔
727

7✔
728
                                return nil
7✔
729
                        },
730
                )
731
        }, func() {
6✔
732
                disabledChanIDs = nil
6✔
733
                chanEdgeFound = make(map[uint64]struct{})
6✔
734
        })
6✔
735
        if err != nil {
6✔
NEW
736
                return nil, err
×
NEW
737
        }
×
738

739
        return disabledChanIDs, nil
6✔
740
}
741

742
// ForEachNode iterates through all the stored vertices/nodes in the graph,
743
// executing the passed callback with each node encountered. If the callback
744
// returns an error, then the transaction is aborted and the iteration stops
745
// early. Any operations performed on the NodeTx passed to the call-back are
746
// executed under the same read transaction and so, methods on the NodeTx object
747
// _MUST_ only be called from within the call-back.
748
func (c *KVStore) ForEachNode(cb func(tx NodeRTx) error) error {
123✔
749
        return c.forEachNode(func(tx kvdb.RTx,
123✔
750
                node *models.LightningNode) error {
1,096✔
751

973✔
752
                return cb(newChanGraphNodeTx(tx, c, node))
973✔
753
        })
973✔
754
}
755

756
// forEachNode iterates through all the stored vertices/nodes in the graph,
757
// executing the passed callback with each node encountered. If the callback
758
// returns an error, then the transaction is aborted and the iteration stops
759
// early.
760
//
761
// TODO(roasbeef): add iterator interface to allow for memory efficient graph
762
// traversal when graph gets mega.
763
func (c *KVStore) forEachNode(
764
        cb func(kvdb.RTx, *models.LightningNode) error) error {
132✔
765

132✔
766
        traversal := func(tx kvdb.RTx) error {
264✔
767
                // First grab the nodes bucket which stores the mapping from
132✔
768
                // pubKey to node information.
132✔
769
                nodes := tx.ReadBucket(nodeBucket)
132✔
770
                if nodes == nil {
132✔
NEW
771
                        return ErrGraphNotFound
×
NEW
772
                }
×
773

774
                return nodes.ForEach(func(pubKey, nodeBytes []byte) error {
1,574✔
775
                        // If this is the source key, then we skip this
1,442✔
776
                        // iteration as the value for this key is a pubKey
1,442✔
777
                        // rather than raw node information.
1,442✔
778
                        if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 {
1,706✔
779
                                return nil
264✔
780
                        }
264✔
781

782
                        nodeReader := bytes.NewReader(nodeBytes)
1,181✔
783
                        node, err := deserializeLightningNode(nodeReader)
1,181✔
784
                        if err != nil {
1,181✔
NEW
785
                                return err
×
NEW
786
                        }
×
787

788
                        // Execute the callback, the transaction will abort if
789
                        // this returns an error.
790
                        return cb(tx, &node)
1,181✔
791
                })
792
        }
793

794
        return kvdb.View(c.db, traversal, func() {})
264✔
795
}
796

797
// ForEachNodeCacheable iterates through all the stored vertices/nodes in the
798
// graph, executing the passed callback with each node encountered. If the
799
// callback returns an error, then the transaction is aborted and the iteration
800
// stops early.
801
func (c *KVStore) ForEachNodeCacheable(cb func(route.Vertex,
802
        *lnwire.FeatureVector) error) error {
144✔
803

144✔
804
        traversal := func(tx kvdb.RTx) error {
288✔
805
                // First grab the nodes bucket which stores the mapping from
144✔
806
                // pubKey to node information.
144✔
807
                nodes := tx.ReadBucket(nodeBucket)
144✔
808
                if nodes == nil {
144✔
NEW
809
                        return ErrGraphNotFound
×
NEW
810
                }
×
811

812
                return nodes.ForEach(func(pubKey, nodeBytes []byte) error {
549✔
813
                        // If this is the source key, then we skip this
405✔
814
                        // iteration as the value for this key is a pubKey
405✔
815
                        // rather than raw node information.
405✔
816
                        if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 {
690✔
817
                                return nil
285✔
818
                        }
285✔
819

820
                        nodeReader := bytes.NewReader(nodeBytes)
123✔
821
                        node, features, err := deserializeLightningNodeCacheable( //nolint:ll
123✔
822
                                nodeReader,
123✔
823
                        )
123✔
824
                        if err != nil {
123✔
NEW
825
                                return err
×
NEW
826
                        }
×
827

828
                        // Execute the callback, the transaction will abort if
829
                        // this returns an error.
830
                        return cb(node, features)
123✔
831
                })
832
        }
833

834
        return kvdb.View(c.db, traversal, func() {})
288✔
835
}
836

837
// SourceNode returns the source node of the graph. The source node is treated
838
// as the center node within a star-graph. This method may be used to kick off
839
// a path finding algorithm in order to explore the reachability of another
840
// node based off the source node.
841
func (c *KVStore) SourceNode() (*models.LightningNode, error) {
234✔
842
        var source *models.LightningNode
234✔
843
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
468✔
844
                // First grab the nodes bucket which stores the mapping from
234✔
845
                // pubKey to node information.
234✔
846
                nodes := tx.ReadBucket(nodeBucket)
234✔
847
                if nodes == nil {
234✔
NEW
848
                        return ErrGraphNotFound
×
NEW
849
                }
×
850

851
                node, err := c.sourceNode(nodes)
234✔
852
                if err != nil {
235✔
853
                        return err
1✔
854
                }
1✔
855
                source = node
233✔
856

233✔
857
                return nil
233✔
858
        }, func() {
234✔
859
                source = nil
234✔
860
        })
234✔
861
        if err != nil {
235✔
862
                return nil, err
1✔
863
        }
1✔
864

865
        return source, nil
233✔
866
}
867

868
// sourceNode uses an existing database transaction and returns the source node
869
// of the graph. The source node is treated as the center node within a
870
// star-graph. This method may be used to kick off a path finding algorithm in
871
// order to explore the reachability of another node based off the source node.
872
func (c *KVStore) sourceNode(nodes kvdb.RBucket) (*models.LightningNode,
873
        error) {
489✔
874

489✔
875
        selfPub := nodes.Get(sourceKey)
489✔
876
        if selfPub == nil {
490✔
877
                return nil, ErrSourceNodeNotSet
1✔
878
        }
1✔
879

880
        // With the pubKey of the source node retrieved, we're able to
881
        // fetch the full node information.
882
        node, err := fetchLightningNode(nodes, selfPub)
488✔
883
        if err != nil {
488✔
NEW
884
                return nil, err
×
NEW
885
        }
×
886

887
        return &node, nil
488✔
888
}
889

890
// SetSourceNode sets the source node within the graph database. The source
891
// node is to be used as the center of a star-graph within path finding
892
// algorithms.
893
func (c *KVStore) SetSourceNode(node *models.LightningNode) error {
120✔
894
        nodePubBytes := node.PubKeyBytes[:]
120✔
895

120✔
896
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
240✔
897
                // First grab the nodes bucket which stores the mapping from
120✔
898
                // pubKey to node information.
120✔
899
                nodes, err := tx.CreateTopLevelBucket(nodeBucket)
120✔
900
                if err != nil {
120✔
NEW
901
                        return err
×
NEW
902
                }
×
903

904
                // Next we create the mapping from source to the targeted
905
                // public key.
906
                if err := nodes.Put(sourceKey, nodePubBytes); err != nil {
120✔
NEW
907
                        return err
×
NEW
908
                }
×
909

910
                // Finally, we commit the information of the lightning node
911
                // itself.
912
                return addLightningNode(tx, node)
120✔
913
        }, func() {})
120✔
914
}
915

916
// AddLightningNode adds a vertex/node to the graph database. If the node is not
917
// in the database from before, this will add a new, unconnected one to the
918
// graph. If it is present from before, this will update that node's
919
// information. Note that this method is expected to only be called to update an
920
// already present node from a node announcement, or to insert a node found in a
921
// channel update.
922
//
923
// TODO(roasbeef): also need sig of announcement.
924
func (c *KVStore) AddLightningNode(node *models.LightningNode,
925
        op ...batch.SchedulerOption) error {
803✔
926

803✔
927
        r := &batch.Request{
803✔
928
                Update: func(tx kvdb.RwTx) error {
1,606✔
929
                        if c.graphCache != nil {
1,419✔
930
                                c.graphCache.AddNodeFeatures(
616✔
931
                                        node.PubKeyBytes, node.Features,
616✔
932
                                )
616✔
933
                        }
616✔
934

935
                        return addLightningNode(tx, node)
803✔
936
                },
937
        }
938

939
        for _, f := range op {
806✔
940
                f(r)
3✔
941
        }
3✔
942

943
        return c.nodeScheduler.Execute(r)
803✔
944
}
945

946
func addLightningNode(tx kvdb.RwTx, node *models.LightningNode) error {
993✔
947
        nodes, err := tx.CreateTopLevelBucket(nodeBucket)
993✔
948
        if err != nil {
993✔
NEW
949
                return err
×
NEW
950
        }
×
951

952
        aliases, err := nodes.CreateBucketIfNotExists(aliasIndexBucket)
993✔
953
        if err != nil {
993✔
NEW
954
                return err
×
NEW
955
        }
×
956

957
        updateIndex, err := nodes.CreateBucketIfNotExists(
993✔
958
                nodeUpdateIndexBucket,
993✔
959
        )
993✔
960
        if err != nil {
993✔
NEW
961
                return err
×
NEW
962
        }
×
963

964
        return putLightningNode(nodes, aliases, updateIndex, node)
993✔
965
}
966

967
// LookupAlias attempts to return the alias as advertised by the target node.
968
// TODO(roasbeef): currently assumes that aliases are unique...
969
func (c *KVStore) LookupAlias(pub *btcec.PublicKey) (string, error) {
5✔
970
        var alias string
5✔
971

5✔
972
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
10✔
973
                nodes := tx.ReadBucket(nodeBucket)
5✔
974
                if nodes == nil {
5✔
NEW
975
                        return ErrGraphNodesNotFound
×
NEW
976
                }
×
977

978
                aliases := nodes.NestedReadBucket(aliasIndexBucket)
5✔
979
                if aliases == nil {
5✔
NEW
980
                        return ErrGraphNodesNotFound
×
NEW
981
                }
×
982

983
                nodePub := pub.SerializeCompressed()
5✔
984
                a := aliases.Get(nodePub)
5✔
985
                if a == nil {
6✔
986
                        return ErrNodeAliasNotFound
1✔
987
                }
1✔
988

989
                // TODO(roasbeef): should actually be using the utf-8
990
                // package...
991
                alias = string(a)
4✔
992

4✔
993
                return nil
4✔
994
        }, func() {
5✔
995
                alias = ""
5✔
996
        })
5✔
997
        if err != nil {
6✔
998
                return "", err
1✔
999
        }
1✔
1000

1001
        return alias, nil
4✔
1002
}
1003

1004
// DeleteLightningNode starts a new database transaction to remove a vertex/node
1005
// from the database according to the node's public key.
1006
func (c *KVStore) DeleteLightningNode(nodePub route.Vertex) error {
3✔
1007
        // TODO(roasbeef): ensure dangling edges are removed...
3✔
1008
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
6✔
1009
                nodes := tx.ReadWriteBucket(nodeBucket)
3✔
1010
                if nodes == nil {
3✔
NEW
1011
                        return ErrGraphNodeNotFound
×
NEW
1012
                }
×
1013

1014
                if c.graphCache != nil {
6✔
1015
                        c.graphCache.RemoveNode(nodePub)
3✔
1016
                }
3✔
1017

1018
                return c.deleteLightningNode(nodes, nodePub[:])
3✔
1019
        }, func() {})
3✔
1020
}
1021

1022
// deleteLightningNode uses an existing database transaction to remove a
1023
// vertex/node from the database according to the node's public key.
1024
func (c *KVStore) deleteLightningNode(nodes kvdb.RwBucket,
1025
        compressedPubKey []byte) error {
68✔
1026

68✔
1027
        aliases := nodes.NestedReadWriteBucket(aliasIndexBucket)
68✔
1028
        if aliases == nil {
68✔
NEW
1029
                return ErrGraphNodesNotFound
×
NEW
1030
        }
×
1031

1032
        if err := aliases.Delete(compressedPubKey); err != nil {
68✔
NEW
1033
                return err
×
NEW
1034
        }
×
1035

1036
        // Before we delete the node, we'll fetch its current state so we can
1037
        // determine when its last update was to clear out the node update
1038
        // index.
1039
        node, err := fetchLightningNode(nodes, compressedPubKey)
68✔
1040
        if err != nil {
68✔
NEW
1041
                return err
×
NEW
1042
        }
×
1043

1044
        if err := nodes.Delete(compressedPubKey); err != nil {
68✔
NEW
1045
                return err
×
NEW
1046
        }
×
1047

1048
        // Finally, we'll delete the index entry for the node within the
1049
        // nodeUpdateIndexBucket as this node is no longer active, so we don't
1050
        // need to track its last update.
1051
        nodeUpdateIndex := nodes.NestedReadWriteBucket(nodeUpdateIndexBucket)
68✔
1052
        if nodeUpdateIndex == nil {
68✔
NEW
1053
                return ErrGraphNodesNotFound
×
NEW
1054
        }
×
1055

1056
        // In order to delete the entry, we'll need to reconstruct the key for
1057
        // its last update.
1058
        updateUnix := uint64(node.LastUpdate.Unix())
68✔
1059
        var indexKey [8 + 33]byte
68✔
1060
        byteOrder.PutUint64(indexKey[:8], updateUnix)
68✔
1061
        copy(indexKey[8:], compressedPubKey)
68✔
1062

68✔
1063
        return nodeUpdateIndex.Delete(indexKey[:])
68✔
1064
}
1065

1066
// AddChannelEdge adds a new (undirected, blank) edge to the graph database. An
1067
// undirected edge from the two target nodes are created. The information stored
1068
// denotes the static attributes of the channel, such as the channelID, the keys
1069
// involved in creation of the channel, and the set of features that the channel
1070
// supports. The chanPoint and chanID are used to uniquely identify the edge
1071
// globally within the database.
1072
func (c *KVStore) AddChannelEdge(edge *models.ChannelEdgeInfo,
1073
        op ...batch.SchedulerOption) error {
1,723✔
1074

1,723✔
1075
        var alreadyExists bool
1,723✔
1076
        r := &batch.Request{
1,723✔
1077
                Reset: func() {
3,446✔
1078
                        alreadyExists = false
1,723✔
1079
                },
1,723✔
1080
                Update: func(tx kvdb.RwTx) error {
1,723✔
1081
                        err := c.addChannelEdge(tx, edge)
1,723✔
1082

1,723✔
1083
                        // Silence ErrEdgeAlreadyExist so that the batch can
1,723✔
1084
                        // succeed, but propagate the error via local state.
1,723✔
1085
                        if errors.Is(err, ErrEdgeAlreadyExist) {
1,957✔
1086
                                alreadyExists = true
234✔
1087
                                return nil
234✔
1088
                        }
234✔
1089

1090
                        return err
1,489✔
1091
                },
1092
                OnCommit: func(err error) error {
1,723✔
1093
                        switch {
1,723✔
NEW
1094
                        case err != nil:
×
NEW
1095
                                return err
×
1096
                        case alreadyExists:
234✔
1097
                                return ErrEdgeAlreadyExist
234✔
1098
                        default:
1,489✔
1099
                                c.rejectCache.remove(edge.ChannelID)
1,489✔
1100
                                c.chanCache.remove(edge.ChannelID)
1,489✔
1101
                                return nil
1,489✔
1102
                        }
1103
                },
1104
        }
1105

1106
        for _, f := range op {
1,726✔
1107
                if f == nil {
3✔
NEW
1108
                        return fmt.Errorf("nil scheduler option was used")
×
NEW
1109
                }
×
1110

1111
                f(r)
3✔
1112
        }
1113

1114
        return c.chanScheduler.Execute(r)
1,723✔
1115
}
1116

1117
// addChannelEdge is the private form of AddChannelEdge that allows callers to
1118
// utilize an existing db transaction.
1119
func (c *KVStore) addChannelEdge(tx kvdb.RwTx,
1120
        edge *models.ChannelEdgeInfo) error {
1,723✔
1121

1,723✔
1122
        // Construct the channel's primary key which is the 8-byte channel ID.
1,723✔
1123
        var chanKey [8]byte
1,723✔
1124
        binary.BigEndian.PutUint64(chanKey[:], edge.ChannelID)
1,723✔
1125

1,723✔
1126
        nodes, err := tx.CreateTopLevelBucket(nodeBucket)
1,723✔
1127
        if err != nil {
1,723✔
NEW
1128
                return err
×
NEW
1129
        }
×
1130
        edges, err := tx.CreateTopLevelBucket(edgeBucket)
1,723✔
1131
        if err != nil {
1,723✔
NEW
1132
                return err
×
NEW
1133
        }
×
1134
        edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
1,723✔
1135
        if err != nil {
1,723✔
NEW
1136
                return err
×
NEW
1137
        }
×
1138
        chanIndex, err := edges.CreateBucketIfNotExists(channelPointBucket)
1,723✔
1139
        if err != nil {
1,723✔
NEW
1140
                return err
×
NEW
1141
        }
×
1142

1143
        // First, attempt to check if this edge has already been created. If
1144
        // so, then we can exit early as this method is meant to be idempotent.
1145
        if edgeInfo := edgeIndex.Get(chanKey[:]); edgeInfo != nil {
1,957✔
1146
                return ErrEdgeAlreadyExist
234✔
1147
        }
234✔
1148

1149
        if c.graphCache != nil {
2,788✔
1150
                c.graphCache.AddChannel(edge, nil, nil)
1,299✔
1151
        }
1,299✔
1152

1153
        // Before we insert the channel into the database, we'll ensure that
1154
        // both nodes already exist in the channel graph. If either node
1155
        // doesn't, then we'll insert a "shell" node that just includes its
1156
        // public key, so subsequent validation and queries can work properly.
1157
        _, node1Err := fetchLightningNode(nodes, edge.NodeKey1Bytes[:])
1,489✔
1158
        switch {
1,489✔
1159
        case errors.Is(node1Err, ErrGraphNodeNotFound):
21✔
1160
                node1Shell := models.LightningNode{
21✔
1161
                        PubKeyBytes:          edge.NodeKey1Bytes,
21✔
1162
                        HaveNodeAnnouncement: false,
21✔
1163
                }
21✔
1164
                err := addLightningNode(tx, &node1Shell)
21✔
1165
                if err != nil {
21✔
NEW
1166
                        return fmt.Errorf("unable to create shell node "+
×
NEW
1167
                                "for: %x: %w", edge.NodeKey1Bytes, err)
×
NEW
1168
                }
×
NEW
1169
        case node1Err != nil:
×
NEW
1170
                return node1Err
×
1171
        }
1172

1173
        _, node2Err := fetchLightningNode(nodes, edge.NodeKey2Bytes[:])
1,489✔
1174
        switch {
1,489✔
1175
        case errors.Is(node2Err, ErrGraphNodeNotFound):
58✔
1176
                node2Shell := models.LightningNode{
58✔
1177
                        PubKeyBytes:          edge.NodeKey2Bytes,
58✔
1178
                        HaveNodeAnnouncement: false,
58✔
1179
                }
58✔
1180
                err := addLightningNode(tx, &node2Shell)
58✔
1181
                if err != nil {
58✔
NEW
1182
                        return fmt.Errorf("unable to create shell node "+
×
NEW
1183
                                "for: %x: %w", edge.NodeKey2Bytes, err)
×
NEW
1184
                }
×
NEW
1185
        case node2Err != nil:
×
NEW
1186
                return node2Err
×
1187
        }
1188

1189
        // If the edge hasn't been created yet, then we'll first add it to the
1190
        // edge index in order to associate the edge between two nodes and also
1191
        // store the static components of the channel.
1192
        if err := putChanEdgeInfo(edgeIndex, edge, chanKey); err != nil {
1,489✔
NEW
1193
                return err
×
NEW
1194
        }
×
1195

1196
        // Mark edge policies for both sides as unknown. This is to enable
1197
        // efficient incoming channel lookup for a node.
1198
        keys := []*[33]byte{
1,489✔
1199
                &edge.NodeKey1Bytes,
1,489✔
1200
                &edge.NodeKey2Bytes,
1,489✔
1201
        }
1,489✔
1202
        for _, key := range keys {
4,464✔
1203
                err := putChanEdgePolicyUnknown(edges, edge.ChannelID, key[:])
2,975✔
1204
                if err != nil {
2,975✔
NEW
1205
                        return err
×
NEW
1206
                }
×
1207
        }
1208

1209
        // Finally we add it to the channel index which maps channel points
1210
        // (outpoints) to the shorter channel ID's.
1211
        var b bytes.Buffer
1,489✔
1212
        if err := WriteOutpoint(&b, &edge.ChannelPoint); err != nil {
1,489✔
NEW
1213
                return err
×
NEW
1214
        }
×
1215

1216
        return chanIndex.Put(b.Bytes(), chanKey[:])
1,489✔
1217
}
1218

1219
// HasChannelEdge returns true if the database knows of a channel edge with the
1220
// passed channel ID, and false otherwise. If an edge with that ID is found
1221
// within the graph, then two time stamps representing the last time the edge
1222
// was updated for both directed edges are returned along with the boolean. If
1223
// it is not found, then the zombie index is checked and its result is returned
1224
// as the second boolean.
1225
func (c *KVStore) HasChannelEdge(
1226
        chanID uint64) (time.Time, time.Time, bool, bool, error) {
210✔
1227

210✔
1228
        var (
210✔
1229
                upd1Time time.Time
210✔
1230
                upd2Time time.Time
210✔
1231
                exists   bool
210✔
1232
                isZombie bool
210✔
1233
        )
210✔
1234

210✔
1235
        // We'll query the cache with the shared lock held to allow multiple
210✔
1236
        // readers to access values in the cache concurrently if they exist.
210✔
1237
        c.cacheMu.RLock()
210✔
1238
        if entry, ok := c.rejectCache.get(chanID); ok {
278✔
1239
                c.cacheMu.RUnlock()
68✔
1240
                upd1Time = time.Unix(entry.upd1Time, 0)
68✔
1241
                upd2Time = time.Unix(entry.upd2Time, 0)
68✔
1242
                exists, isZombie = entry.flags.unpack()
68✔
1243

68✔
1244
                return upd1Time, upd2Time, exists, isZombie, nil
68✔
1245
        }
68✔
1246
        c.cacheMu.RUnlock()
145✔
1247

145✔
1248
        c.cacheMu.Lock()
145✔
1249
        defer c.cacheMu.Unlock()
145✔
1250

145✔
1251
        // The item was not found with the shared lock, so we'll acquire the
145✔
1252
        // exclusive lock and check the cache again in case another method added
145✔
1253
        // the entry to the cache while no lock was held.
145✔
1254
        if entry, ok := c.rejectCache.get(chanID); ok {
153✔
1255
                upd1Time = time.Unix(entry.upd1Time, 0)
8✔
1256
                upd2Time = time.Unix(entry.upd2Time, 0)
8✔
1257
                exists, isZombie = entry.flags.unpack()
8✔
1258

8✔
1259
                return upd1Time, upd2Time, exists, isZombie, nil
8✔
1260
        }
8✔
1261

1262
        if err := kvdb.View(c.db, func(tx kvdb.RTx) error {
280✔
1263
                edges := tx.ReadBucket(edgeBucket)
140✔
1264
                if edges == nil {
140✔
NEW
1265
                        return ErrGraphNoEdgesFound
×
NEW
1266
                }
×
1267
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
140✔
1268
                if edgeIndex == nil {
140✔
NEW
1269
                        return ErrGraphNoEdgesFound
×
NEW
1270
                }
×
1271

1272
                var channelID [8]byte
140✔
1273
                byteOrder.PutUint64(channelID[:], chanID)
140✔
1274

140✔
1275
                // If the edge doesn't exist, then we'll also check our zombie
140✔
1276
                // index.
140✔
1277
                if edgeIndex.Get(channelID[:]) == nil {
231✔
1278
                        exists = false
91✔
1279
                        zombieIndex := edges.NestedReadBucket(zombieBucket)
91✔
1280
                        if zombieIndex != nil {
182✔
1281
                                isZombie, _, _ = isZombieEdge(
91✔
1282
                                        zombieIndex, chanID,
91✔
1283
                                )
91✔
1284
                        }
91✔
1285

1286
                        return nil
91✔
1287
                }
1288

1289
                exists = true
52✔
1290
                isZombie = false
52✔
1291

52✔
1292
                // If the channel has been found in the graph, then retrieve
52✔
1293
                // the edges itself so we can return the last updated
52✔
1294
                // timestamps.
52✔
1295
                nodes := tx.ReadBucket(nodeBucket)
52✔
1296
                if nodes == nil {
52✔
NEW
1297
                        return ErrGraphNodeNotFound
×
NEW
1298
                }
×
1299

1300
                e1, e2, err := fetchChanEdgePolicies(
52✔
1301
                        edgeIndex, edges, channelID[:],
52✔
1302
                )
52✔
1303
                if err != nil {
52✔
NEW
1304
                        return err
×
NEW
1305
                }
×
1306

1307
                // As we may have only one of the edges populated, only set the
1308
                // update time if the edge was found in the database.
1309
                if e1 != nil {
73✔
1310
                        upd1Time = e1.LastUpdate
21✔
1311
                }
21✔
1312
                if e2 != nil {
71✔
1313
                        upd2Time = e2.LastUpdate
19✔
1314
                }
19✔
1315

1316
                return nil
52✔
1317
        }, func() {}); err != nil {
140✔
NEW
1318
                return time.Time{}, time.Time{}, exists, isZombie, err
×
NEW
1319
        }
×
1320

1321
        c.rejectCache.insert(chanID, rejectCacheEntry{
140✔
1322
                upd1Time: upd1Time.Unix(),
140✔
1323
                upd2Time: upd2Time.Unix(),
140✔
1324
                flags:    packRejectFlags(exists, isZombie),
140✔
1325
        })
140✔
1326

140✔
1327
        return upd1Time, upd2Time, exists, isZombie, nil
140✔
1328
}
1329

1330
// AddEdgeProof sets the proof of an existing edge in the graph database.
1331
func (c *KVStore) AddEdgeProof(chanID lnwire.ShortChannelID,
1332
        proof *models.ChannelAuthProof) error {
4✔
1333

4✔
1334
        // Construct the channel's primary key which is the 8-byte channel ID.
4✔
1335
        var chanKey [8]byte
4✔
1336
        binary.BigEndian.PutUint64(chanKey[:], chanID.ToUint64())
4✔
1337

4✔
1338
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
8✔
1339
                edges := tx.ReadWriteBucket(edgeBucket)
4✔
1340
                if edges == nil {
4✔
NEW
1341
                        return ErrEdgeNotFound
×
NEW
1342
                }
×
1343

1344
                edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
4✔
1345
                if edgeIndex == nil {
4✔
NEW
1346
                        return ErrEdgeNotFound
×
NEW
1347
                }
×
1348

1349
                edge, err := fetchChanEdgeInfo(edgeIndex, chanKey[:])
4✔
1350
                if err != nil {
4✔
NEW
1351
                        return err
×
NEW
1352
                }
×
1353

1354
                edge.AuthProof = proof
4✔
1355

4✔
1356
                return putChanEdgeInfo(edgeIndex, &edge, chanKey)
4✔
1357
        }, func() {})
4✔
1358
}
1359

1360
const (
1361
        // pruneTipBytes is the total size of the value which stores a prune
1362
        // entry of the graph in the prune log. The "prune tip" is the last
1363
        // entry in the prune log, and indicates if the channel graph is in
1364
        // sync with the current UTXO state. The structure of the value
1365
        // is: blockHash, taking 32 bytes total.
1366
        pruneTipBytes = 32
1367
)
1368

1369
// PruneGraph prunes newly closed channels from the channel graph in response
1370
// to a new block being solved on the network. Any transactions which spend the
1371
// funding output of any known channels within he graph will be deleted.
1372
// Additionally, the "prune tip", or the last block which has been used to
1373
// prune the graph is stored so callers can ensure the graph is fully in sync
1374
// with the current UTXO state. A slice of channels that have been closed by
1375
// the target block are returned if the function succeeds without error.
1376
func (c *KVStore) PruneGraph(spentOutputs []*wire.OutPoint,
1377
        blockHash *chainhash.Hash, blockHeight uint32) (
1378
        []*models.ChannelEdgeInfo, error) {
235✔
1379

235✔
1380
        c.cacheMu.Lock()
235✔
1381
        defer c.cacheMu.Unlock()
235✔
1382

235✔
1383
        var chansClosed []*models.ChannelEdgeInfo
235✔
1384

235✔
1385
        err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
470✔
1386
                // First grab the edges bucket which houses the information
235✔
1387
                // we'd like to delete
235✔
1388
                edges, err := tx.CreateTopLevelBucket(edgeBucket)
235✔
1389
                if err != nil {
235✔
NEW
1390
                        return err
×
NEW
1391
                }
×
1392

1393
                // Next grab the two edge indexes which will also need to be
1394
                // updated.
1395
                edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
235✔
1396
                if err != nil {
235✔
NEW
1397
                        return err
×
NEW
1398
                }
×
1399
                chanIndex, err := edges.CreateBucketIfNotExists(
235✔
1400
                        channelPointBucket,
235✔
1401
                )
235✔
1402
                if err != nil {
235✔
NEW
1403
                        return err
×
NEW
1404
                }
×
1405
                nodes := tx.ReadWriteBucket(nodeBucket)
235✔
1406
                if nodes == nil {
235✔
NEW
1407
                        return ErrSourceNodeNotSet
×
NEW
1408
                }
×
1409
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
235✔
1410
                if err != nil {
235✔
NEW
1411
                        return err
×
NEW
1412
                }
×
1413

1414
                // For each of the outpoints that have been spent within the
1415
                // block, we attempt to delete them from the graph as if that
1416
                // outpoint was a channel, then it has now been closed.
1417
                for _, chanPoint := range spentOutputs {
373✔
1418
                        // TODO(roasbeef): load channel bloom filter, continue
138✔
1419
                        // if NOT if filter
138✔
1420

138✔
1421
                        var opBytes bytes.Buffer
138✔
1422
                        err := WriteOutpoint(&opBytes, chanPoint)
138✔
1423
                        if err != nil {
138✔
NEW
1424
                                return err
×
NEW
1425
                        }
×
1426

1427
                        // First attempt to see if the channel exists within
1428
                        // the database, if not, then we can exit early.
1429
                        chanID := chanIndex.Get(opBytes.Bytes())
138✔
1430
                        if chanID == nil {
256✔
1431
                                continue
118✔
1432
                        }
1433

1434
                        // However, if it does, then we'll read out the full
1435
                        // version so we can add it to the set of deleted
1436
                        // channels.
1437
                        edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
20✔
1438
                        if err != nil {
20✔
NEW
1439
                                return err
×
NEW
1440
                        }
×
1441

1442
                        // Attempt to delete the channel, an ErrEdgeNotFound
1443
                        // will be returned if that outpoint isn't known to be
1444
                        // a channel. If no error is returned, then a channel
1445
                        // was successfully pruned.
1446
                        err = c.delChannelEdgeUnsafe(
20✔
1447
                                edges, edgeIndex, chanIndex, zombieIndex,
20✔
1448
                                chanID, false, false,
20✔
1449
                        )
20✔
1450
                        if err != nil && !errors.Is(err, ErrEdgeNotFound) {
20✔
NEW
1451
                                return err
×
NEW
1452
                        }
×
1453

1454
                        chansClosed = append(chansClosed, &edgeInfo)
20✔
1455
                }
1456

1457
                metaBucket, err := tx.CreateTopLevelBucket(graphMetaBucket)
235✔
1458
                if err != nil {
235✔
NEW
1459
                        return err
×
NEW
1460
                }
×
1461

1462
                pruneBucket, err := metaBucket.CreateBucketIfNotExists(
235✔
1463
                        pruneLogBucket,
235✔
1464
                )
235✔
1465
                if err != nil {
235✔
NEW
1466
                        return err
×
NEW
1467
                }
×
1468

1469
                // With the graph pruned, add a new entry to the prune log,
1470
                // which can be used to check if the graph is fully synced with
1471
                // the current UTXO state.
1472
                var blockHeightBytes [4]byte
235✔
1473
                byteOrder.PutUint32(blockHeightBytes[:], blockHeight)
235✔
1474

235✔
1475
                var newTip [pruneTipBytes]byte
235✔
1476
                copy(newTip[:], blockHash[:])
235✔
1477

235✔
1478
                err = pruneBucket.Put(blockHeightBytes[:], newTip[:])
235✔
1479
                if err != nil {
235✔
NEW
1480
                        return err
×
NEW
1481
                }
×
1482

1483
                // Now that the graph has been pruned, we'll also attempt to
1484
                // prune any nodes that have had a channel closed within the
1485
                // latest block.
1486
                return c.pruneGraphNodes(nodes, edgeIndex)
235✔
1487
        }, func() {
235✔
1488
                chansClosed = nil
235✔
1489
        })
235✔
1490
        if err != nil {
235✔
NEW
1491
                return nil, err
×
NEW
1492
        }
×
1493

1494
        for _, channel := range chansClosed {
255✔
1495
                c.rejectCache.remove(channel.ChannelID)
20✔
1496
                c.chanCache.remove(channel.ChannelID)
20✔
1497
        }
20✔
1498

1499
        if c.graphCache != nil {
470✔
1500
                log.Debugf("Pruned graph, cache now has %s",
235✔
1501
                        c.graphCache.Stats())
235✔
1502
        }
235✔
1503

1504
        return chansClosed, nil
235✔
1505
}
1506

1507
// PruneGraphNodes is a garbage collection method which attempts to prune out
1508
// any nodes from the channel graph that are currently unconnected. This ensure
1509
// that we only maintain a graph of reachable nodes. In the event that a pruned
1510
// node gains more channels, it will be re-added back to the graph.
1511
func (c *KVStore) PruneGraphNodes() error {
26✔
1512
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
52✔
1513
                nodes := tx.ReadWriteBucket(nodeBucket)
26✔
1514
                if nodes == nil {
26✔
NEW
1515
                        return ErrGraphNodesNotFound
×
NEW
1516
                }
×
1517
                edges := tx.ReadWriteBucket(edgeBucket)
26✔
1518
                if edges == nil {
26✔
NEW
1519
                        return ErrGraphNotFound
×
NEW
1520
                }
×
1521
                edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
26✔
1522
                if edgeIndex == nil {
26✔
NEW
1523
                        return ErrGraphNoEdgesFound
×
NEW
1524
                }
×
1525

1526
                return c.pruneGraphNodes(nodes, edgeIndex)
26✔
1527
        }, func() {})
26✔
1528
}
1529

1530
// pruneGraphNodes attempts to remove any nodes from the graph who have had a
1531
// channel closed within the current block. If the node still has existing
1532
// channels in the graph, this will act as a no-op.
1533
func (c *KVStore) pruneGraphNodes(nodes kvdb.RwBucket,
1534
        edgeIndex kvdb.RwBucket) error {
258✔
1535

258✔
1536
        log.Trace("Pruning nodes from graph with no open channels")
258✔
1537

258✔
1538
        // We'll retrieve the graph's source node to ensure we don't remove it
258✔
1539
        // even if it no longer has any open channels.
258✔
1540
        sourceNode, err := c.sourceNode(nodes)
258✔
1541
        if err != nil {
258✔
NEW
1542
                return err
×
NEW
1543
        }
×
1544

1545
        // We'll use this map to keep count the number of references to a node
1546
        // in the graph. A node should only be removed once it has no more
1547
        // references in the graph.
1548
        nodeRefCounts := make(map[[33]byte]int)
258✔
1549
        err = nodes.ForEach(func(pubKey, nodeBytes []byte) error {
1,517✔
1550
                // If this is the source key, then we skip this
1,259✔
1551
                // iteration as the value for this key is a pubKey
1,259✔
1552
                // rather than raw node information.
1,259✔
1553
                if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 {
2,027✔
1554
                        return nil
768✔
1555
                }
768✔
1556

1557
                var nodePub [33]byte
494✔
1558
                copy(nodePub[:], pubKey)
494✔
1559
                nodeRefCounts[nodePub] = 0
494✔
1560

494✔
1561
                return nil
494✔
1562
        })
1563
        if err != nil {
258✔
NEW
1564
                return err
×
NEW
1565
        }
×
1566

1567
        // To ensure we never delete the source node, we'll start off by
1568
        // bumping its ref count to 1.
1569
        nodeRefCounts[sourceNode.PubKeyBytes] = 1
258✔
1570

258✔
1571
        // Next, we'll run through the edgeIndex which maps a channel ID to the
258✔
1572
        // edge info. We'll use this scan to populate our reference count map
258✔
1573
        // above.
258✔
1574
        err = edgeIndex.ForEach(func(chanID, edgeInfoBytes []byte) error {
448✔
1575
                // The first 66 bytes of the edge info contain the pubkeys of
190✔
1576
                // the nodes that this edge attaches. We'll extract them, and
190✔
1577
                // add them to the ref count map.
190✔
1578
                var node1, node2 [33]byte
190✔
1579
                copy(node1[:], edgeInfoBytes[:33])
190✔
1580
                copy(node2[:], edgeInfoBytes[33:])
190✔
1581

190✔
1582
                // With the nodes extracted, we'll increase the ref count of
190✔
1583
                // each of the nodes.
190✔
1584
                nodeRefCounts[node1]++
190✔
1585
                nodeRefCounts[node2]++
190✔
1586

190✔
1587
                return nil
190✔
1588
        })
190✔
1589
        if err != nil {
258✔
NEW
1590
                return err
×
NEW
1591
        }
×
1592

1593
        // Finally, we'll make a second pass over the set of nodes, and delete
1594
        // any nodes that have a ref count of zero.
1595
        var numNodesPruned int
258✔
1596
        for nodePubKey, refCount := range nodeRefCounts {
752✔
1597
                // If the ref count of the node isn't zero, then we can safely
494✔
1598
                // skip it as it still has edges to or from it within the
494✔
1599
                // graph.
494✔
1600
                if refCount != 0 {
926✔
1601
                        continue
432✔
1602
                }
1603

1604
                if c.graphCache != nil {
130✔
1605
                        c.graphCache.RemoveNode(nodePubKey)
65✔
1606
                }
65✔
1607

1608
                // If we reach this point, then there are no longer any edges
1609
                // that connect this node, so we can delete it.
1610
                err := c.deleteLightningNode(nodes, nodePubKey[:])
65✔
1611
                if err != nil {
65✔
NEW
1612
                        if errors.Is(err, ErrGraphNodeNotFound) ||
×
NEW
1613
                                errors.Is(err, ErrGraphNodesNotFound) {
×
NEW
1614

×
NEW
1615
                                log.Warnf("Unable to prune node %x from the "+
×
NEW
1616
                                        "graph: %v", nodePubKey, err)
×
NEW
1617
                                continue
×
1618
                        }
1619

NEW
1620
                        return err
×
1621
                }
1622

1623
                log.Infof("Pruned unconnected node %x from channel graph",
65✔
1624
                        nodePubKey[:])
65✔
1625

65✔
1626
                numNodesPruned++
65✔
1627
        }
1628

1629
        if numNodesPruned > 0 {
307✔
1630
                log.Infof("Pruned %v unconnected nodes from the channel graph",
49✔
1631
                        numNodesPruned)
49✔
1632
        }
49✔
1633

1634
        return nil
258✔
1635
}
1636

1637
// DisconnectBlockAtHeight is used to indicate that the block specified
1638
// by the passed height has been disconnected from the main chain. This
1639
// will "rewind" the graph back to the height below, deleting channels
1640
// that are no longer confirmed from the graph. The prune log will be
1641
// set to the last prune height valid for the remaining chain.
1642
// Channels that were removed from the graph resulting from the
1643
// disconnected block are returned.
1644
func (c *KVStore) DisconnectBlockAtHeight(height uint32) (
1645
        []*models.ChannelEdgeInfo, error) {
165✔
1646

165✔
1647
        // Every channel having a ShortChannelID starting at 'height'
165✔
1648
        // will no longer be confirmed.
165✔
1649
        startShortChanID := lnwire.ShortChannelID{
165✔
1650
                BlockHeight: height,
165✔
1651
        }
165✔
1652

165✔
1653
        // Delete everything after this height from the db up until the
165✔
1654
        // SCID alias range.
165✔
1655
        endShortChanID := aliasmgr.StartingAlias
165✔
1656

165✔
1657
        // The block height will be the 3 first bytes of the channel IDs.
165✔
1658
        var chanIDStart [8]byte
165✔
1659
        byteOrder.PutUint64(chanIDStart[:], startShortChanID.ToUint64())
165✔
1660
        var chanIDEnd [8]byte
165✔
1661
        byteOrder.PutUint64(chanIDEnd[:], endShortChanID.ToUint64())
165✔
1662

165✔
1663
        c.cacheMu.Lock()
165✔
1664
        defer c.cacheMu.Unlock()
165✔
1665

165✔
1666
        // Keep track of the channels that are removed from the graph.
165✔
1667
        var removedChans []*models.ChannelEdgeInfo
165✔
1668

165✔
1669
        if err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
330✔
1670
                edges, err := tx.CreateTopLevelBucket(edgeBucket)
165✔
1671
                if err != nil {
165✔
NEW
1672
                        return err
×
NEW
1673
                }
×
1674
                edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
165✔
1675
                if err != nil {
165✔
NEW
1676
                        return err
×
NEW
1677
                }
×
1678
                chanIndex, err := edges.CreateBucketIfNotExists(
165✔
1679
                        channelPointBucket,
165✔
1680
                )
165✔
1681
                if err != nil {
165✔
NEW
1682
                        return err
×
NEW
1683
                }
×
1684
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
165✔
1685
                if err != nil {
165✔
NEW
1686
                        return err
×
NEW
1687
                }
×
1688

1689
                // Scan from chanIDStart to chanIDEnd, deleting every
1690
                // found edge.
1691
                // NOTE: we must delete the edges after the cursor loop, since
1692
                // modifying the bucket while traversing is not safe.
1693
                // NOTE: We use a < comparison in bytes.Compare instead of <=
1694
                // so that the StartingAlias itself isn't deleted.
1695
                var keys [][]byte
165✔
1696
                cursor := edgeIndex.ReadWriteCursor()
165✔
1697

165✔
1698
                //nolint:ll
165✔
1699
                for k, v := cursor.Seek(chanIDStart[:]); k != nil &&
165✔
1700
                        bytes.Compare(k, chanIDEnd[:]) < 0; k, v = cursor.Next() {
264✔
1701
                        edgeInfoReader := bytes.NewReader(v)
99✔
1702
                        edgeInfo, err := deserializeChanEdgeInfo(edgeInfoReader)
99✔
1703
                        if err != nil {
99✔
NEW
1704
                                return err
×
NEW
1705
                        }
×
1706

1707
                        keys = append(keys, k)
99✔
1708
                        removedChans = append(removedChans, &edgeInfo)
99✔
1709
                }
1710

1711
                for _, k := range keys {
264✔
1712
                        err = c.delChannelEdgeUnsafe(
99✔
1713
                                edges, edgeIndex, chanIndex, zombieIndex,
99✔
1714
                                k, false, false,
99✔
1715
                        )
99✔
1716
                        if err != nil && !errors.Is(err, ErrEdgeNotFound) {
99✔
NEW
1717
                                return err
×
NEW
1718
                        }
×
1719
                }
1720

1721
                // Delete all the entries in the prune log having a height
1722
                // greater or equal to the block disconnected.
1723
                metaBucket, err := tx.CreateTopLevelBucket(graphMetaBucket)
165✔
1724
                if err != nil {
165✔
NEW
1725
                        return err
×
NEW
1726
                }
×
1727

1728
                pruneBucket, err := metaBucket.CreateBucketIfNotExists(
165✔
1729
                        pruneLogBucket,
165✔
1730
                )
165✔
1731
                if err != nil {
165✔
NEW
1732
                        return err
×
NEW
1733
                }
×
1734

1735
                var pruneKeyStart [4]byte
165✔
1736
                byteOrder.PutUint32(pruneKeyStart[:], height)
165✔
1737

165✔
1738
                var pruneKeyEnd [4]byte
165✔
1739
                byteOrder.PutUint32(pruneKeyEnd[:], math.MaxUint32)
165✔
1740

165✔
1741
                // To avoid modifying the bucket while traversing, we delete
165✔
1742
                // the keys in a second loop.
165✔
1743
                var pruneKeys [][]byte
165✔
1744
                pruneCursor := pruneBucket.ReadWriteCursor()
165✔
1745
                //nolint:ll
165✔
1746
                for k, _ := pruneCursor.Seek(pruneKeyStart[:]); k != nil &&
165✔
1747
                        bytes.Compare(k, pruneKeyEnd[:]) <= 0; k, _ = pruneCursor.Next() {
260✔
1748
                        pruneKeys = append(pruneKeys, k)
95✔
1749
                }
95✔
1750

1751
                for _, k := range pruneKeys {
260✔
1752
                        if err := pruneBucket.Delete(k); err != nil {
95✔
NEW
1753
                                return err
×
NEW
1754
                        }
×
1755
                }
1756

1757
                return nil
165✔
1758
        }, func() {
165✔
1759
                removedChans = nil
165✔
1760
        }); err != nil {
165✔
NEW
1761
                return nil, err
×
NEW
1762
        }
×
1763

1764
        for _, channel := range removedChans {
264✔
1765
                c.rejectCache.remove(channel.ChannelID)
99✔
1766
                c.chanCache.remove(channel.ChannelID)
99✔
1767
        }
99✔
1768

1769
        return removedChans, nil
165✔
1770
}
1771

1772
// PruneTip returns the block height and hash of the latest block that has been
1773
// used to prune channels in the graph. Knowing the "prune tip" allows callers
1774
// to tell if the graph is currently in sync with the current best known UTXO
1775
// state.
1776
func (c *KVStore) PruneTip() (*chainhash.Hash, uint32, error) {
56✔
1777
        var (
56✔
1778
                tipHash   chainhash.Hash
56✔
1779
                tipHeight uint32
56✔
1780
        )
56✔
1781

56✔
1782
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
112✔
1783
                graphMeta := tx.ReadBucket(graphMetaBucket)
56✔
1784
                if graphMeta == nil {
56✔
NEW
1785
                        return ErrGraphNotFound
×
NEW
1786
                }
×
1787
                pruneBucket := graphMeta.NestedReadBucket(pruneLogBucket)
56✔
1788
                if pruneBucket == nil {
56✔
NEW
1789
                        return ErrGraphNeverPruned
×
NEW
1790
                }
×
1791

1792
                pruneCursor := pruneBucket.ReadCursor()
56✔
1793

56✔
1794
                // The prune key with the largest block height will be our
56✔
1795
                // prune tip.
56✔
1796
                k, v := pruneCursor.Last()
56✔
1797
                if k == nil {
77✔
1798
                        return ErrGraphNeverPruned
21✔
1799
                }
21✔
1800

1801
                // Once we have the prune tip, the value will be the block hash,
1802
                // and the key the block height.
1803
                copy(tipHash[:], v)
38✔
1804
                tipHeight = byteOrder.Uint32(k)
38✔
1805

38✔
1806
                return nil
38✔
1807
        }, func() {})
56✔
1808
        if err != nil {
77✔
1809
                return nil, 0, err
21✔
1810
        }
21✔
1811

1812
        return &tipHash, tipHeight, nil
38✔
1813
}
1814

1815
// DeleteChannelEdges removes edges with the given channel IDs from the
1816
// database and marks them as zombies. This ensures that we're unable to re-add
1817
// it to our database once again. If an edge does not exist within the
1818
// database, then ErrEdgeNotFound will be returned. If strictZombiePruning is
1819
// true, then when we mark these edges as zombies, we'll set up the keys such
1820
// that we require the node that failed to send the fresh update to be the one
1821
// that resurrects the channel from its zombie state. The markZombie bool
1822
// denotes whether or not to mark the channel as a zombie.
1823
func (c *KVStore) DeleteChannelEdges(strictZombiePruning, markZombie bool,
1824
        chanIDs ...uint64) error {
153✔
1825

153✔
1826
        // TODO(roasbeef): possibly delete from node bucket if node has no more
153✔
1827
        // channels
153✔
1828
        // TODO(roasbeef): don't delete both edges?
153✔
1829

153✔
1830
        c.cacheMu.Lock()
153✔
1831
        defer c.cacheMu.Unlock()
153✔
1832

153✔
1833
        err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
306✔
1834
                edges := tx.ReadWriteBucket(edgeBucket)
153✔
1835
                if edges == nil {
153✔
NEW
1836
                        return ErrEdgeNotFound
×
NEW
1837
                }
×
1838
                edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
153✔
1839
                if edgeIndex == nil {
153✔
NEW
1840
                        return ErrEdgeNotFound
×
NEW
1841
                }
×
1842
                chanIndex := edges.NestedReadWriteBucket(channelPointBucket)
153✔
1843
                if chanIndex == nil {
153✔
NEW
1844
                        return ErrEdgeNotFound
×
NEW
1845
                }
×
1846
                nodes := tx.ReadWriteBucket(nodeBucket)
153✔
1847
                if nodes == nil {
153✔
NEW
1848
                        return ErrGraphNodeNotFound
×
NEW
1849
                }
×
1850
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
153✔
1851
                if err != nil {
153✔
NEW
1852
                        return err
×
NEW
1853
                }
×
1854

1855
                var rawChanID [8]byte
153✔
1856
                for _, chanID := range chanIDs {
249✔
1857
                        byteOrder.PutUint64(rawChanID[:], chanID)
96✔
1858
                        err := c.delChannelEdgeUnsafe(
96✔
1859
                                edges, edgeIndex, chanIndex, zombieIndex,
96✔
1860
                                rawChanID[:], markZombie, strictZombiePruning,
96✔
1861
                        )
96✔
1862
                        if err != nil {
161✔
1863
                                return err
65✔
1864
                        }
65✔
1865
                }
1866

1867
                return nil
88✔
1868
        }, func() {})
153✔
1869
        if err != nil {
218✔
1870
                return err
65✔
1871
        }
65✔
1872

1873
        for _, chanID := range chanIDs {
119✔
1874
                c.rejectCache.remove(chanID)
31✔
1875
                c.chanCache.remove(chanID)
31✔
1876
        }
31✔
1877

1878
        return nil
88✔
1879
}
1880

1881
// ChannelID attempt to lookup the 8-byte compact channel ID which maps to the
1882
// passed channel point (outpoint). If the passed channel doesn't exist within
1883
// the database, then ErrEdgeNotFound is returned.
1884
func (c *KVStore) ChannelID(chanPoint *wire.OutPoint) (uint64, error) {
4✔
1885
        var chanID uint64
4✔
1886
        if err := kvdb.View(c.db, func(tx kvdb.RTx) error {
8✔
1887
                var err error
4✔
1888
                chanID, err = getChanID(tx, chanPoint)
4✔
1889
                return err
4✔
1890
        }, func() {
8✔
1891
                chanID = 0
4✔
1892
        }); err != nil {
7✔
1893
                return 0, err
3✔
1894
        }
3✔
1895

1896
        return chanID, nil
4✔
1897
}
1898

1899
// getChanID returns the assigned channel ID for a given channel point.
1900
func getChanID(tx kvdb.RTx, chanPoint *wire.OutPoint) (uint64, error) {
4✔
1901
        var b bytes.Buffer
4✔
1902
        if err := WriteOutpoint(&b, chanPoint); err != nil {
4✔
NEW
1903
                return 0, err
×
NEW
1904
        }
×
1905

1906
        edges := tx.ReadBucket(edgeBucket)
4✔
1907
        if edges == nil {
4✔
NEW
1908
                return 0, ErrGraphNoEdgesFound
×
NEW
1909
        }
×
1910
        chanIndex := edges.NestedReadBucket(channelPointBucket)
4✔
1911
        if chanIndex == nil {
4✔
NEW
1912
                return 0, ErrGraphNoEdgesFound
×
NEW
1913
        }
×
1914

1915
        chanIDBytes := chanIndex.Get(b.Bytes())
4✔
1916
        if chanIDBytes == nil {
7✔
1917
                return 0, ErrEdgeNotFound
3✔
1918
        }
3✔
1919

1920
        chanID := byteOrder.Uint64(chanIDBytes)
4✔
1921

4✔
1922
        return chanID, nil
4✔
1923
}
1924

1925
// TODO(roasbeef): allow updates to use Batch?
1926

1927
// HighestChanID returns the "highest" known channel ID in the channel graph.
1928
// This represents the "newest" channel from the PoV of the chain. This method
1929
// can be used by peers to quickly determine if they're graphs are in sync.
1930
func (c *KVStore) HighestChanID() (uint64, error) {
6✔
1931
        var cid uint64
6✔
1932

6✔
1933
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
12✔
1934
                edges := tx.ReadBucket(edgeBucket)
6✔
1935
                if edges == nil {
6✔
NEW
1936
                        return ErrGraphNoEdgesFound
×
NEW
1937
                }
×
1938
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
6✔
1939
                if edgeIndex == nil {
6✔
NEW
1940
                        return ErrGraphNoEdgesFound
×
NEW
1941
                }
×
1942

1943
                // In order to find the highest chan ID, we'll fetch a cursor
1944
                // and use that to seek to the "end" of our known rage.
1945
                cidCursor := edgeIndex.ReadCursor()
6✔
1946

6✔
1947
                lastChanID, _ := cidCursor.Last()
6✔
1948

6✔
1949
                // If there's no key, then this means that we don't actually
6✔
1950
                // know of any channels, so we'll return a predicable error.
6✔
1951
                if lastChanID == nil {
10✔
1952
                        return ErrGraphNoEdgesFound
4✔
1953
                }
4✔
1954

1955
                // Otherwise, we'll de serialize the channel ID and return it
1956
                // to the caller.
1957
                cid = byteOrder.Uint64(lastChanID)
5✔
1958

5✔
1959
                return nil
5✔
1960
        }, func() {
6✔
1961
                cid = 0
6✔
1962
        })
6✔
1963
        if err != nil && !errors.Is(err, ErrGraphNoEdgesFound) {
6✔
NEW
1964
                return 0, err
×
NEW
1965
        }
×
1966

1967
        return cid, nil
6✔
1968
}
1969

1970
// ChannelEdge represents the complete set of information for a channel edge in
1971
// the known channel graph. This struct couples the core information of the
1972
// edge as well as each of the known advertised edge policies.
1973
type ChannelEdge struct {
1974
        // Info contains all the static information describing the channel.
1975
        Info *models.ChannelEdgeInfo
1976

1977
        // Policy1 points to the "first" edge policy of the channel containing
1978
        // the dynamic information required to properly route through the edge.
1979
        Policy1 *models.ChannelEdgePolicy
1980

1981
        // Policy2 points to the "second" edge policy of the channel containing
1982
        // the dynamic information required to properly route through the edge.
1983
        Policy2 *models.ChannelEdgePolicy
1984

1985
        // Node1 is "node 1" in the channel. This is the node that would have
1986
        // produced Policy1 if it exists.
1987
        Node1 *models.LightningNode
1988

1989
        // Node2 is "node 2" in the channel. This is the node that would have
1990
        // produced Policy2 if it exists.
1991
        Node2 *models.LightningNode
1992
}
1993

1994
// ChanUpdatesInHorizon returns all the known channel edges which have at least
1995
// one edge that has an update timestamp within the specified horizon.
1996
func (c *KVStore) ChanUpdatesInHorizon(startTime,
1997
        endTime time.Time) ([]ChannelEdge, error) {
140✔
1998

140✔
1999
        // To ensure we don't return duplicate ChannelEdges, we'll use an
140✔
2000
        // additional map to keep track of the edges already seen to prevent
140✔
2001
        // re-adding it.
140✔
2002
        var edgesSeen map[uint64]struct{}
140✔
2003
        var edgesToCache map[uint64]ChannelEdge
140✔
2004
        var edgesInHorizon []ChannelEdge
140✔
2005

140✔
2006
        c.cacheMu.Lock()
140✔
2007
        defer c.cacheMu.Unlock()
140✔
2008

140✔
2009
        var hits int
140✔
2010
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
280✔
2011
                edges := tx.ReadBucket(edgeBucket)
140✔
2012
                if edges == nil {
140✔
NEW
2013
                        return ErrGraphNoEdgesFound
×
NEW
2014
                }
×
2015
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
140✔
2016
                if edgeIndex == nil {
140✔
NEW
2017
                        return ErrGraphNoEdgesFound
×
NEW
2018
                }
×
2019
                edgeUpdateIndex := edges.NestedReadBucket(edgeUpdateIndexBucket)
140✔
2020
                if edgeUpdateIndex == nil {
140✔
NEW
2021
                        return ErrGraphNoEdgesFound
×
NEW
2022
                }
×
2023

2024
                nodes := tx.ReadBucket(nodeBucket)
140✔
2025
                if nodes == nil {
140✔
NEW
2026
                        return ErrGraphNodesNotFound
×
NEW
2027
                }
×
2028

2029
                // We'll now obtain a cursor to perform a range query within
2030
                // the index to find all channels within the horizon.
2031
                updateCursor := edgeUpdateIndex.ReadCursor()
140✔
2032

140✔
2033
                var startTimeBytes, endTimeBytes [8 + 8]byte
140✔
2034
                byteOrder.PutUint64(
140✔
2035
                        startTimeBytes[:8], uint64(startTime.Unix()),
140✔
2036
                )
140✔
2037
                byteOrder.PutUint64(
140✔
2038
                        endTimeBytes[:8], uint64(endTime.Unix()),
140✔
2039
                )
140✔
2040

140✔
2041
                // With our start and end times constructed, we'll step through
140✔
2042
                // the index collecting the info and policy of each update of
140✔
2043
                // each channel that has a last update within the time range.
140✔
2044
                //
140✔
2045
                //nolint:ll
140✔
2046
                for indexKey, _ := updateCursor.Seek(startTimeBytes[:]); indexKey != nil &&
140✔
2047
                        bytes.Compare(indexKey, endTimeBytes[:]) <= 0; indexKey, _ = updateCursor.Next() {
189✔
2048
                        // We have a new eligible entry, so we'll slice of the
49✔
2049
                        // chan ID so we can query it in the DB.
49✔
2050
                        chanID := indexKey[8:]
49✔
2051

49✔
2052
                        // If we've already retrieved the info and policies for
49✔
2053
                        // this edge, then we can skip it as we don't need to do
49✔
2054
                        // so again.
49✔
2055
                        chanIDInt := byteOrder.Uint64(chanID)
49✔
2056
                        if _, ok := edgesSeen[chanIDInt]; ok {
68✔
2057
                                continue
19✔
2058
                        }
2059

2060
                        if channel, ok := c.chanCache.get(chanIDInt); ok {
42✔
2061
                                hits++
12✔
2062
                                edgesSeen[chanIDInt] = struct{}{}
12✔
2063
                                edgesInHorizon = append(edgesInHorizon, channel)
12✔
2064

12✔
2065
                                continue
12✔
2066
                        }
2067

2068
                        // First, we'll fetch the static edge information.
2069
                        edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
21✔
2070
                        if err != nil {
21✔
NEW
2071
                                chanID := byteOrder.Uint64(chanID)
×
NEW
2072
                                return fmt.Errorf("unable to fetch info for "+
×
NEW
2073
                                        "edge with chan_id=%v: %v", chanID, err)
×
NEW
2074
                        }
×
2075

2076
                        // With the static information obtained, we'll now
2077
                        // fetch the dynamic policy info.
2078
                        edge1, edge2, err := fetchChanEdgePolicies(
21✔
2079
                                edgeIndex, edges, chanID,
21✔
2080
                        )
21✔
2081
                        if err != nil {
21✔
NEW
2082
                                chanID := byteOrder.Uint64(chanID)
×
NEW
2083
                                return fmt.Errorf("unable to fetch policies "+
×
NEW
2084
                                        "for edge with chan_id=%v: %v", chanID,
×
NEW
2085
                                        err)
×
NEW
2086
                        }
×
2087

2088
                        node1, err := fetchLightningNode(
21✔
2089
                                nodes, edgeInfo.NodeKey1Bytes[:],
21✔
2090
                        )
21✔
2091
                        if err != nil {
21✔
NEW
2092
                                return err
×
NEW
2093
                        }
×
2094

2095
                        node2, err := fetchLightningNode(
21✔
2096
                                nodes, edgeInfo.NodeKey2Bytes[:],
21✔
2097
                        )
21✔
2098
                        if err != nil {
21✔
NEW
2099
                                return err
×
NEW
2100
                        }
×
2101

2102
                        // Finally, we'll collate this edge with the rest of
2103
                        // edges to be returned.
2104
                        edgesSeen[chanIDInt] = struct{}{}
21✔
2105
                        channel := ChannelEdge{
21✔
2106
                                Info:    &edgeInfo,
21✔
2107
                                Policy1: edge1,
21✔
2108
                                Policy2: edge2,
21✔
2109
                                Node1:   &node1,
21✔
2110
                                Node2:   &node2,
21✔
2111
                        }
21✔
2112
                        edgesInHorizon = append(edgesInHorizon, channel)
21✔
2113
                        edgesToCache[chanIDInt] = channel
21✔
2114
                }
2115

2116
                return nil
140✔
2117
        }, func() {
140✔
2118
                edgesSeen = make(map[uint64]struct{})
140✔
2119
                edgesToCache = make(map[uint64]ChannelEdge)
140✔
2120
                edgesInHorizon = nil
140✔
2121
        })
140✔
2122
        switch {
140✔
NEW
2123
        case errors.Is(err, ErrGraphNoEdgesFound):
×
NEW
2124
                fallthrough
×
NEW
2125
        case errors.Is(err, ErrGraphNodesNotFound):
×
NEW
2126
                break
×
2127

NEW
2128
        case err != nil:
×
NEW
2129
                return nil, err
×
2130
        }
2131

2132
        // Insert any edges loaded from disk into the cache.
2133
        for chanid, channel := range edgesToCache {
161✔
2134
                c.chanCache.insert(chanid, channel)
21✔
2135
        }
21✔
2136

2137
        log.Debugf("ChanUpdatesInHorizon hit percentage: %f (%d/%d)",
140✔
2138
                float64(hits)/float64(len(edgesInHorizon)), hits,
140✔
2139
                len(edgesInHorizon))
140✔
2140

140✔
2141
        return edgesInHorizon, nil
140✔
2142
}
2143

2144
// NodeUpdatesInHorizon returns all the known lightning node which have an
2145
// update timestamp within the passed range. This method can be used by two
2146
// nodes to quickly determine if they have the same set of up to date node
2147
// announcements.
2148
func (c *KVStore) NodeUpdatesInHorizon(startTime,
2149
        endTime time.Time) ([]models.LightningNode, error) {
11✔
2150

11✔
2151
        var nodesInHorizon []models.LightningNode
11✔
2152

11✔
2153
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
22✔
2154
                nodes := tx.ReadBucket(nodeBucket)
11✔
2155
                if nodes == nil {
11✔
NEW
2156
                        return ErrGraphNodesNotFound
×
NEW
2157
                }
×
2158

2159
                nodeUpdateIndex := nodes.NestedReadBucket(nodeUpdateIndexBucket)
11✔
2160
                if nodeUpdateIndex == nil {
11✔
NEW
2161
                        return ErrGraphNodesNotFound
×
NEW
2162
                }
×
2163

2164
                // We'll now obtain a cursor to perform a range query within
2165
                // the index to find all node announcements within the horizon.
2166
                updateCursor := nodeUpdateIndex.ReadCursor()
11✔
2167

11✔
2168
                var startTimeBytes, endTimeBytes [8 + 33]byte
11✔
2169
                byteOrder.PutUint64(
11✔
2170
                        startTimeBytes[:8], uint64(startTime.Unix()),
11✔
2171
                )
11✔
2172
                byteOrder.PutUint64(
11✔
2173
                        endTimeBytes[:8], uint64(endTime.Unix()),
11✔
2174
                )
11✔
2175

11✔
2176
                // With our start and end times constructed, we'll step through
11✔
2177
                // the index collecting info for each node within the time
11✔
2178
                // range.
11✔
2179
                //
11✔
2180
                //nolint:ll
11✔
2181
                for indexKey, _ := updateCursor.Seek(startTimeBytes[:]); indexKey != nil &&
11✔
2182
                        bytes.Compare(indexKey, endTimeBytes[:]) <= 0; indexKey, _ = updateCursor.Next() {
43✔
2183
                        nodePub := indexKey[8:]
32✔
2184
                        node, err := fetchLightningNode(nodes, nodePub)
32✔
2185
                        if err != nil {
32✔
NEW
2186
                                return err
×
NEW
2187
                        }
×
2188

2189
                        nodesInHorizon = append(nodesInHorizon, node)
32✔
2190
                }
2191

2192
                return nil
11✔
2193
        }, func() {
11✔
2194
                nodesInHorizon = nil
11✔
2195
        })
11✔
2196
        switch {
11✔
NEW
2197
        case errors.Is(err, ErrGraphNoEdgesFound):
×
NEW
2198
                fallthrough
×
NEW
2199
        case errors.Is(err, ErrGraphNodesNotFound):
×
NEW
2200
                break
×
2201

NEW
2202
        case err != nil:
×
NEW
2203
                return nil, err
×
2204
        }
2205

2206
        return nodesInHorizon, nil
11✔
2207
}
2208

2209
// FilterKnownChanIDs takes a set of channel IDs and return the subset of chan
2210
// ID's that we don't know and are not known zombies of the passed set. In other
2211
// words, we perform a set difference of our set of chan ID's and the ones
2212
// passed in. This method can be used by callers to determine the set of
2213
// channels another peer knows of that we don't.
2214
func (c *KVStore) FilterKnownChanIDs(chansInfo []ChannelUpdateInfo,
2215
        isZombieChan func(time.Time, time.Time) bool) ([]uint64, error) {
123✔
2216

123✔
2217
        var newChanIDs []uint64
123✔
2218

123✔
2219
        c.cacheMu.Lock()
123✔
2220
        defer c.cacheMu.Unlock()
123✔
2221

123✔
2222
        err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
246✔
2223
                edges := tx.ReadBucket(edgeBucket)
123✔
2224
                if edges == nil {
123✔
NEW
2225
                        return ErrGraphNoEdgesFound
×
NEW
2226
                }
×
2227
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
123✔
2228
                if edgeIndex == nil {
123✔
NEW
2229
                        return ErrGraphNoEdgesFound
×
NEW
2230
                }
×
2231

2232
                // Fetch the zombie index, it may not exist if no edges have
2233
                // ever been marked as zombies. If the index has been
2234
                // initialized, we will use it later to skip known zombie edges.
2235
                zombieIndex := edges.NestedReadBucket(zombieBucket)
123✔
2236

123✔
2237
                // We'll run through the set of chanIDs and collate only the
123✔
2238
                // set of channel that are unable to be found within our db.
123✔
2239
                var cidBytes [8]byte
123✔
2240
                for _, info := range chansInfo {
236✔
2241
                        scid := info.ShortChannelID.ToUint64()
113✔
2242
                        byteOrder.PutUint64(cidBytes[:], scid)
113✔
2243

113✔
2244
                        // If the edge is already known, skip it.
113✔
2245
                        if v := edgeIndex.Get(cidBytes[:]); v != nil {
133✔
2246
                                continue
20✔
2247
                        }
2248

2249
                        // If the edge is a known zombie, skip it.
2250
                        if zombieIndex != nil {
192✔
2251
                                isZombie, _, _ := isZombieEdge(
96✔
2252
                                        zombieIndex, scid,
96✔
2253
                                )
96✔
2254

96✔
2255
                                // TODO(ziggie): Make sure that for the strict
96✔
2256
                                // pruning case we compare the pubkeys and
96✔
2257
                                // whether the right timestamp is not older than
96✔
2258
                                // the `ChannelPruneExpiry`.
96✔
2259
                                //
96✔
2260
                                // NOTE: The timestamp data has no verification
96✔
2261
                                // attached to it in the `ReplyChannelRange` msg
96✔
2262
                                // so we are trusting this data at this point.
96✔
2263
                                // However it is not critical because we are
96✔
2264
                                // just removing the channel from the db when
96✔
2265
                                // the timestamps are more recent. During the
96✔
2266
                                // querying of the gossip msg verification
96✔
2267
                                // happens as usual.
96✔
2268
                                // However we should start punishing peers when
96✔
2269
                                // they don't provide us honest data ?
96✔
2270
                                isStillZombie := isZombieChan(
96✔
2271
                                        info.Node1UpdateTimestamp,
96✔
2272
                                        info.Node2UpdateTimestamp,
96✔
2273
                                )
96✔
2274

96✔
2275
                                switch {
96✔
2276
                                // If the edge is a known zombie and if we
2277
                                // would still consider it a zombie given the
2278
                                // latest update timestamps, then we skip this
2279
                                // channel.
2280
                                case isZombie && isStillZombie:
27✔
2281
                                        continue
27✔
2282

2283
                                // Otherwise, if we have marked it as a zombie
2284
                                // but the latest update timestamps could bring
2285
                                // it back from the dead, then we mark it alive,
2286
                                // and we let it be added to the set of IDs to
2287
                                // query our peer for.
2288
                                case isZombie && !isStillZombie:
19✔
2289
                                        err := c.markEdgeLiveUnsafe(tx, scid)
19✔
2290
                                        if err != nil {
19✔
NEW
2291
                                                return err
×
NEW
2292
                                        }
×
2293
                                }
2294
                        }
2295

2296
                        newChanIDs = append(newChanIDs, scid)
69✔
2297
                }
2298

2299
                return nil
123✔
2300
        }, func() {
123✔
2301
                newChanIDs = nil
123✔
2302
        })
123✔
2303
        switch {
123✔
2304
        // If we don't know of any edges yet, then we'll return the entire set
2305
        // of chan IDs specified.
NEW
2306
        case errors.Is(err, ErrGraphNoEdgesFound):
×
NEW
2307
                ogChanIDs := make([]uint64, len(chansInfo))
×
NEW
2308
                for i, info := range chansInfo {
×
NEW
2309
                        ogChanIDs[i] = info.ShortChannelID.ToUint64()
×
NEW
2310
                }
×
2311

NEW
2312
                return ogChanIDs, nil
×
2313

NEW
2314
        case err != nil:
×
NEW
2315
                return nil, err
×
2316
        }
2317

2318
        return newChanIDs, nil
123✔
2319
}
2320

2321
// ChannelUpdateInfo couples the SCID of a channel with the timestamps of the
2322
// latest received channel updates for the channel.
2323
type ChannelUpdateInfo struct {
2324
        // ShortChannelID is the SCID identifier of the channel.
2325
        ShortChannelID lnwire.ShortChannelID
2326

2327
        // Node1UpdateTimestamp is the timestamp of the latest received update
2328
        // from the node 1 channel peer. This will be set to zero time if no
2329
        // update has yet been received from this node.
2330
        Node1UpdateTimestamp time.Time
2331

2332
        // Node2UpdateTimestamp is the timestamp of the latest received update
2333
        // from the node 2 channel peer. This will be set to zero time if no
2334
        // update has yet been received from this node.
2335
        Node2UpdateTimestamp time.Time
2336
}
2337

2338
// NewChannelUpdateInfo is a constructor which makes sure we initialize the
2339
// timestamps with zero seconds unix timestamp which equals
2340
// `January 1, 1970, 00:00:00 UTC` in case the value is `time.Time{}`.
2341
func NewChannelUpdateInfo(scid lnwire.ShortChannelID, node1Timestamp,
2342
        node2Timestamp time.Time) ChannelUpdateInfo {
221✔
2343

221✔
2344
        chanInfo := ChannelUpdateInfo{
221✔
2345
                ShortChannelID:       scid,
221✔
2346
                Node1UpdateTimestamp: node1Timestamp,
221✔
2347
                Node2UpdateTimestamp: node2Timestamp,
221✔
2348
        }
221✔
2349

221✔
2350
        if node1Timestamp.IsZero() {
432✔
2351
                chanInfo.Node1UpdateTimestamp = time.Unix(0, 0)
211✔
2352
        }
211✔
2353

2354
        if node2Timestamp.IsZero() {
432✔
2355
                chanInfo.Node2UpdateTimestamp = time.Unix(0, 0)
211✔
2356
        }
211✔
2357

2358
        return chanInfo
221✔
2359
}
2360

2361
// BlockChannelRange represents a range of channels for a given block height.
2362
type BlockChannelRange struct {
2363
        // Height is the height of the block all of the channels below were
2364
        // included in.
2365
        Height uint32
2366

2367
        // Channels is the list of channels identified by their short ID
2368
        // representation known to us that were included in the block height
2369
        // above. The list may include channel update timestamp information if
2370
        // requested.
2371
        Channels []ChannelUpdateInfo
2372
}
2373

2374
// FilterChannelRange returns the channel ID's of all known channels which were
2375
// mined in a block height within the passed range. The channel IDs are grouped
2376
// by their common block height. This method can be used to quickly share with a
2377
// peer the set of channels we know of within a particular range to catch them
2378
// up after a period of time offline. If withTimestamps is true then the
2379
// timestamp info of the latest received channel update messages of the channel
2380
// will be included in the response.
2381
func (c *KVStore) FilterChannelRange(startHeight,
2382
        endHeight uint32, withTimestamps bool) ([]BlockChannelRange, error) {
14✔
2383

14✔
2384
        startChanID := &lnwire.ShortChannelID{
14✔
2385
                BlockHeight: startHeight,
14✔
2386
        }
14✔
2387

14✔
2388
        endChanID := lnwire.ShortChannelID{
14✔
2389
                BlockHeight: endHeight,
14✔
2390
                TxIndex:     math.MaxUint32 & 0x00ffffff,
14✔
2391
                TxPosition:  math.MaxUint16,
14✔
2392
        }
14✔
2393

14✔
2394
        // As we need to perform a range scan, we'll convert the starting and
14✔
2395
        // ending height to their corresponding values when encoded using short
14✔
2396
        // channel ID's.
14✔
2397
        var chanIDStart, chanIDEnd [8]byte
14✔
2398
        byteOrder.PutUint64(chanIDStart[:], startChanID.ToUint64())
14✔
2399
        byteOrder.PutUint64(chanIDEnd[:], endChanID.ToUint64())
14✔
2400

14✔
2401
        var channelsPerBlock map[uint32][]ChannelUpdateInfo
14✔
2402
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
28✔
2403
                edges := tx.ReadBucket(edgeBucket)
14✔
2404
                if edges == nil {
14✔
NEW
2405
                        return ErrGraphNoEdgesFound
×
NEW
2406
                }
×
2407
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
14✔
2408
                if edgeIndex == nil {
14✔
NEW
2409
                        return ErrGraphNoEdgesFound
×
NEW
2410
                }
×
2411

2412
                cursor := edgeIndex.ReadCursor()
14✔
2413

14✔
2414
                // We'll now iterate through the database, and find each
14✔
2415
                // channel ID that resides within the specified range.
14✔
2416
                //
14✔
2417
                //nolint:ll
14✔
2418
                for k, v := cursor.Seek(chanIDStart[:]); k != nil &&
14✔
2419
                        bytes.Compare(k, chanIDEnd[:]) <= 0; k, v = cursor.Next() {
61✔
2420
                        // Don't send alias SCIDs during gossip sync.
47✔
2421
                        edgeReader := bytes.NewReader(v)
47✔
2422
                        edgeInfo, err := deserializeChanEdgeInfo(edgeReader)
47✔
2423
                        if err != nil {
47✔
NEW
2424
                                return err
×
NEW
2425
                        }
×
2426

2427
                        if edgeInfo.AuthProof == nil {
50✔
2428
                                continue
3✔
2429
                        }
2430

2431
                        // This channel ID rests within the target range, so
2432
                        // we'll add it to our returned set.
2433
                        rawCid := byteOrder.Uint64(k)
47✔
2434
                        cid := lnwire.NewShortChanIDFromInt(rawCid)
47✔
2435

47✔
2436
                        chanInfo := NewChannelUpdateInfo(
47✔
2437
                                cid, time.Time{}, time.Time{},
47✔
2438
                        )
47✔
2439

47✔
2440
                        if !withTimestamps {
69✔
2441
                                channelsPerBlock[cid.BlockHeight] = append(
22✔
2442
                                        channelsPerBlock[cid.BlockHeight],
22✔
2443
                                        chanInfo,
22✔
2444
                                )
22✔
2445

22✔
2446
                                continue
22✔
2447
                        }
2448

2449
                        node1Key, node2Key := computeEdgePolicyKeys(&edgeInfo)
25✔
2450

25✔
2451
                        rawPolicy := edges.Get(node1Key)
25✔
2452
                        if len(rawPolicy) != 0 {
34✔
2453
                                r := bytes.NewReader(rawPolicy)
9✔
2454

9✔
2455
                                edge, err := deserializeChanEdgePolicyRaw(r)
9✔
2456
                                if err != nil && !errors.Is(
9✔
2457
                                        err, ErrEdgePolicyOptionalFieldNotFound,
9✔
2458
                                ) {
9✔
NEW
2459

×
NEW
2460
                                        return err
×
NEW
2461
                                }
×
2462

2463
                                chanInfo.Node1UpdateTimestamp = edge.LastUpdate
9✔
2464
                        }
2465

2466
                        rawPolicy = edges.Get(node2Key)
25✔
2467
                        if len(rawPolicy) != 0 {
39✔
2468
                                r := bytes.NewReader(rawPolicy)
14✔
2469

14✔
2470
                                edge, err := deserializeChanEdgePolicyRaw(r)
14✔
2471
                                if err != nil && !errors.Is(
14✔
2472
                                        err, ErrEdgePolicyOptionalFieldNotFound,
14✔
2473
                                ) {
14✔
NEW
2474

×
NEW
2475
                                        return err
×
NEW
2476
                                }
×
2477

2478
                                chanInfo.Node2UpdateTimestamp = edge.LastUpdate
14✔
2479
                        }
2480

2481
                        channelsPerBlock[cid.BlockHeight] = append(
25✔
2482
                                channelsPerBlock[cid.BlockHeight], chanInfo,
25✔
2483
                        )
25✔
2484
                }
2485

2486
                return nil
14✔
2487
        }, func() {
14✔
2488
                channelsPerBlock = make(map[uint32][]ChannelUpdateInfo)
14✔
2489
        })
14✔
2490

2491
        switch {
14✔
2492
        // If we don't know of any channels yet, then there's nothing to
2493
        // filter, so we'll return an empty slice.
2494
        case errors.Is(err, ErrGraphNoEdgesFound) || len(channelsPerBlock) == 0:
6✔
2495
                return nil, nil
6✔
2496

NEW
2497
        case err != nil:
×
NEW
2498
                return nil, err
×
2499
        }
2500

2501
        // Return the channel ranges in ascending block height order.
2502
        blocks := make([]uint32, 0, len(channelsPerBlock))
11✔
2503
        for block := range channelsPerBlock {
36✔
2504
                blocks = append(blocks, block)
25✔
2505
        }
25✔
2506
        sort.Slice(blocks, func(i, j int) bool {
33✔
2507
                return blocks[i] < blocks[j]
22✔
2508
        })
22✔
2509

2510
        channelRanges := make([]BlockChannelRange, 0, len(channelsPerBlock))
11✔
2511
        for _, block := range blocks {
36✔
2512
                channelRanges = append(channelRanges, BlockChannelRange{
25✔
2513
                        Height:   block,
25✔
2514
                        Channels: channelsPerBlock[block],
25✔
2515
                })
25✔
2516
        }
25✔
2517

2518
        return channelRanges, nil
11✔
2519
}
2520

2521
// FetchChanInfos returns the set of channel edges that correspond to the passed
2522
// channel ID's. If an edge is the query is unknown to the database, it will
2523
// skipped and the result will contain only those edges that exist at the time
2524
// of the query. This can be used to respond to peer queries that are seeking to
2525
// fill in gaps in their view of the channel graph.
2526
func (c *KVStore) FetchChanInfos(chanIDs []uint64) ([]ChannelEdge, error) {
6✔
2527
        return c.fetchChanInfos(nil, chanIDs)
6✔
2528
}
6✔
2529

2530
// fetchChanInfos returns the set of channel edges that correspond to the passed
2531
// channel ID's. If an edge is the query is unknown to the database, it will
2532
// skipped and the result will contain only those edges that exist at the time
2533
// of the query. This can be used to respond to peer queries that are seeking to
2534
// fill in gaps in their view of the channel graph.
2535
//
2536
// NOTE: An optional transaction may be provided. If none is provided, then a
2537
// new one will be created.
2538
func (c *KVStore) fetchChanInfos(tx kvdb.RTx, chanIDs []uint64) (
2539
        []ChannelEdge, error) {
26✔
2540
        // TODO(roasbeef): sort cids?
26✔
2541

26✔
2542
        var (
26✔
2543
                chanEdges []ChannelEdge
26✔
2544
                cidBytes  [8]byte
26✔
2545
        )
26✔
2546

26✔
2547
        fetchChanInfos := func(tx kvdb.RTx) error {
52✔
2548
                edges := tx.ReadBucket(edgeBucket)
26✔
2549
                if edges == nil {
26✔
NEW
2550
                        return ErrGraphNoEdgesFound
×
NEW
2551
                }
×
2552
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
26✔
2553
                if edgeIndex == nil {
26✔
NEW
2554
                        return ErrGraphNoEdgesFound
×
NEW
2555
                }
×
2556
                nodes := tx.ReadBucket(nodeBucket)
26✔
2557
                if nodes == nil {
26✔
NEW
2558
                        return ErrGraphNotFound
×
NEW
2559
                }
×
2560

2561
                for _, cid := range chanIDs {
59✔
2562
                        byteOrder.PutUint64(cidBytes[:], cid)
33✔
2563

33✔
2564
                        // First, we'll fetch the static edge information. If
33✔
2565
                        // the edge is unknown, we will skip the edge and
33✔
2566
                        // continue gathering all known edges.
33✔
2567
                        edgeInfo, err := fetchChanEdgeInfo(
33✔
2568
                                edgeIndex, cidBytes[:],
33✔
2569
                        )
33✔
2570
                        switch {
33✔
2571
                        case errors.Is(err, ErrEdgeNotFound):
22✔
2572
                                continue
22✔
NEW
2573
                        case err != nil:
×
NEW
2574
                                return err
×
2575
                        }
2576

2577
                        // With the static information obtained, we'll now
2578
                        // fetch the dynamic policy info.
2579
                        edge1, edge2, err := fetchChanEdgePolicies(
11✔
2580
                                edgeIndex, edges, cidBytes[:],
11✔
2581
                        )
11✔
2582
                        if err != nil {
11✔
NEW
2583
                                return err
×
NEW
2584
                        }
×
2585

2586
                        node1, err := fetchLightningNode(
11✔
2587
                                nodes, edgeInfo.NodeKey1Bytes[:],
11✔
2588
                        )
11✔
2589
                        if err != nil {
11✔
NEW
2590
                                return err
×
NEW
2591
                        }
×
2592

2593
                        node2, err := fetchLightningNode(
11✔
2594
                                nodes, edgeInfo.NodeKey2Bytes[:],
11✔
2595
                        )
11✔
2596
                        if err != nil {
11✔
NEW
2597
                                return err
×
NEW
2598
                        }
×
2599

2600
                        chanEdges = append(chanEdges, ChannelEdge{
11✔
2601
                                Info:    &edgeInfo,
11✔
2602
                                Policy1: edge1,
11✔
2603
                                Policy2: edge2,
11✔
2604
                                Node1:   &node1,
11✔
2605
                                Node2:   &node2,
11✔
2606
                        })
11✔
2607
                }
2608

2609
                return nil
26✔
2610
        }
2611

2612
        if tx == nil {
33✔
2613
                err := kvdb.View(c.db, fetchChanInfos, func() {
14✔
2614
                        chanEdges = nil
7✔
2615
                })
7✔
2616
                if err != nil {
7✔
NEW
2617
                        return nil, err
×
NEW
2618
                }
×
2619

2620
                return chanEdges, nil
7✔
2621
        }
2622

2623
        err := fetchChanInfos(tx)
19✔
2624
        if err != nil {
19✔
NEW
2625
                return nil, err
×
NEW
2626
        }
×
2627

2628
        return chanEdges, nil
19✔
2629
}
2630

2631
func delEdgeUpdateIndexEntry(edgesBucket kvdb.RwBucket, chanID uint64,
2632
        edge1, edge2 *models.ChannelEdgePolicy) error {
145✔
2633

145✔
2634
        // First, we'll fetch the edge update index bucket which currently
145✔
2635
        // stores an entry for the channel we're about to delete.
145✔
2636
        updateIndex := edgesBucket.NestedReadWriteBucket(edgeUpdateIndexBucket)
145✔
2637
        if updateIndex == nil {
145✔
NEW
2638
                // No edges in bucket, return early.
×
NEW
2639
                return nil
×
NEW
2640
        }
×
2641

2642
        // Now that we have the bucket, we'll attempt to construct a template
2643
        // for the index key: updateTime || chanid.
2644
        var indexKey [8 + 8]byte
145✔
2645
        byteOrder.PutUint64(indexKey[8:], chanID)
145✔
2646

145✔
2647
        // With the template constructed, we'll attempt to delete an entry that
145✔
2648
        // would have been created by both edges: we'll alternate the update
145✔
2649
        // times, as one may had overridden the other.
145✔
2650
        if edge1 != nil {
158✔
2651
                byteOrder.PutUint64(
13✔
2652
                        indexKey[:8], uint64(edge1.LastUpdate.Unix()),
13✔
2653
                )
13✔
2654
                if err := updateIndex.Delete(indexKey[:]); err != nil {
13✔
NEW
2655
                        return err
×
NEW
2656
                }
×
2657
        }
2658

2659
        // We'll also attempt to delete the entry that may have been created by
2660
        // the second edge.
2661
        if edge2 != nil {
160✔
2662
                byteOrder.PutUint64(
15✔
2663
                        indexKey[:8], uint64(edge2.LastUpdate.Unix()),
15✔
2664
                )
15✔
2665
                if err := updateIndex.Delete(indexKey[:]); err != nil {
15✔
NEW
2666
                        return err
×
NEW
2667
                }
×
2668
        }
2669

2670
        return nil
145✔
2671
}
2672

2673
// delChannelEdgeUnsafe deletes the edge with the given chanID from the graph
2674
// cache. It then goes on to delete any policy info and edge info for this
2675
// channel from the DB and finally, if isZombie is true, it will add an entry
2676
// for this channel in the zombie index.
2677
//
2678
// NOTE: this method MUST only be called if the cacheMu has already been
2679
// acquired.
2680
func (c *KVStore) delChannelEdgeUnsafe(edges, edgeIndex, chanIndex,
2681
        zombieIndex kvdb.RwBucket, chanID []byte, isZombie,
2682
        strictZombie bool) error {
210✔
2683

210✔
2684
        edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
210✔
2685
        if err != nil {
275✔
2686
                return err
65✔
2687
        }
65✔
2688

2689
        if c.graphCache != nil {
290✔
2690
                c.graphCache.RemoveChannel(
145✔
2691
                        edgeInfo.NodeKey1Bytes, edgeInfo.NodeKey2Bytes,
145✔
2692
                        edgeInfo.ChannelID,
145✔
2693
                )
145✔
2694
        }
145✔
2695

2696
        // We'll also remove the entry in the edge update index bucket before
2697
        // we delete the edges themselves so we can access their last update
2698
        // times.
2699
        cid := byteOrder.Uint64(chanID)
145✔
2700
        edge1, edge2, err := fetchChanEdgePolicies(edgeIndex, edges, chanID)
145✔
2701
        if err != nil {
145✔
NEW
2702
                return err
×
NEW
2703
        }
×
2704
        err = delEdgeUpdateIndexEntry(edges, cid, edge1, edge2)
145✔
2705
        if err != nil {
145✔
NEW
2706
                return err
×
NEW
2707
        }
×
2708

2709
        // The edge key is of the format pubKey || chanID. First we construct
2710
        // the latter half, populating the channel ID.
2711
        var edgeKey [33 + 8]byte
145✔
2712
        copy(edgeKey[33:], chanID)
145✔
2713

145✔
2714
        // With the latter half constructed, copy over the first public key to
145✔
2715
        // delete the edge in this direction, then the second to delete the
145✔
2716
        // edge in the opposite direction.
145✔
2717
        copy(edgeKey[:33], edgeInfo.NodeKey1Bytes[:])
145✔
2718
        if edges.Get(edgeKey[:]) != nil {
290✔
2719
                if err := edges.Delete(edgeKey[:]); err != nil {
145✔
NEW
2720
                        return err
×
NEW
2721
                }
×
2722
        }
2723
        copy(edgeKey[:33], edgeInfo.NodeKey2Bytes[:])
145✔
2724
        if edges.Get(edgeKey[:]) != nil {
290✔
2725
                if err := edges.Delete(edgeKey[:]); err != nil {
145✔
NEW
2726
                        return err
×
NEW
2727
                }
×
2728
        }
2729

2730
        // As part of deleting the edge we also remove all disabled entries
2731
        // from the edgePolicyDisabledIndex bucket. We do that for both
2732
        // directions.
2733
        err = updateEdgePolicyDisabledIndex(edges, cid, false, false)
145✔
2734
        if err != nil {
145✔
NEW
2735
                return err
×
NEW
2736
        }
×
2737
        err = updateEdgePolicyDisabledIndex(edges, cid, true, false)
145✔
2738
        if err != nil {
145✔
NEW
2739
                return err
×
NEW
2740
        }
×
2741

2742
        // With the edge data deleted, we can purge the information from the two
2743
        // edge indexes.
2744
        if err := edgeIndex.Delete(chanID); err != nil {
145✔
NEW
2745
                return err
×
NEW
2746
        }
×
2747
        var b bytes.Buffer
145✔
2748
        if err := WriteOutpoint(&b, &edgeInfo.ChannelPoint); err != nil {
145✔
NEW
2749
                return err
×
NEW
2750
        }
×
2751
        if err := chanIndex.Delete(b.Bytes()); err != nil {
145✔
NEW
2752
                return err
×
NEW
2753
        }
×
2754

2755
        // Finally, we'll mark the edge as a zombie within our index if it's
2756
        // being removed due to the channel becoming a zombie. We do this to
2757
        // ensure we don't store unnecessary data for spent channels.
2758
        if !isZombie {
265✔
2759
                return nil
120✔
2760
        }
120✔
2761

2762
        nodeKey1, nodeKey2 := edgeInfo.NodeKey1Bytes, edgeInfo.NodeKey2Bytes
28✔
2763
        if strictZombie {
33✔
2764
                nodeKey1, nodeKey2 = makeZombiePubkeys(&edgeInfo, edge1, edge2)
5✔
2765
        }
5✔
2766

2767
        return markEdgeZombie(
28✔
2768
                zombieIndex, byteOrder.Uint64(chanID), nodeKey1, nodeKey2,
28✔
2769
        )
28✔
2770
}
2771

2772
// makeZombiePubkeys derives the node pubkeys to store in the zombie index for a
2773
// particular pair of channel policies. The return values are one of:
2774
//  1. (pubkey1, pubkey2)
2775
//  2. (pubkey1, blank)
2776
//  3. (blank, pubkey2)
2777
//
2778
// A blank pubkey means that corresponding node will be unable to resurrect a
2779
// channel on its own. For example, node1 may continue to publish recent
2780
// updates, but node2 has fallen way behind. After marking an edge as a zombie,
2781
// we don't want another fresh update from node1 to resurrect, as the edge can
2782
// only become live once node2 finally sends something recent.
2783
//
2784
// In the case where we have neither update, we allow either party to resurrect
2785
// the channel. If the channel were to be marked zombie again, it would be
2786
// marked with the correct lagging channel since we received an update from only
2787
// one side.
2788
func makeZombiePubkeys(info *models.ChannelEdgeInfo,
2789
        e1, e2 *models.ChannelEdgePolicy) ([33]byte, [33]byte) {
5✔
2790

5✔
2791
        switch {
5✔
2792
        // If we don't have either edge policy, we'll return both pubkeys so
2793
        // that the channel can be resurrected by either party.
2794
        case e1 == nil && e2 == nil:
2✔
2795
                return info.NodeKey1Bytes, info.NodeKey2Bytes
2✔
2796

2797
        // If we're missing edge1, or if both edges are present but edge1 is
2798
        // older, we'll return edge1's pubkey and a blank pubkey for edge2. This
2799
        // means that only an update from edge1 will be able to resurrect the
2800
        // channel.
2801
        case e1 == nil || (e2 != nil && e1.LastUpdate.Before(e2.LastUpdate)):
1✔
2802
                return info.NodeKey1Bytes, [33]byte{}
1✔
2803

2804
        // Otherwise, we're missing edge2 or edge2 is the older side, so we
2805
        // return a blank pubkey for edge1. In this case, only an update from
2806
        // edge2 can resurect the channel.
2807
        default:
2✔
2808
                return [33]byte{}, info.NodeKey2Bytes
2✔
2809
        }
2810
}
2811

2812
// UpdateEdgePolicy updates the edge routing policy for a single directed edge
2813
// within the database for the referenced channel. The `flags` attribute within
2814
// the ChannelEdgePolicy determines which of the directed edges are being
2815
// updated. If the flag is 1, then the first node's information is being
2816
// updated, otherwise it's the second node's information. The node ordering is
2817
// determined by the lexicographical ordering of the identity public keys of the
2818
// nodes on either side of the channel.
2819
func (c *KVStore) UpdateEdgePolicy(edge *models.ChannelEdgePolicy,
2820
        op ...batch.SchedulerOption) error {
2,666✔
2821

2,666✔
2822
        var (
2,666✔
2823
                isUpdate1    bool
2,666✔
2824
                edgeNotFound bool
2,666✔
2825
        )
2,666✔
2826

2,666✔
2827
        r := &batch.Request{
2,666✔
2828
                Reset: func() {
5,332✔
2829
                        isUpdate1 = false
2,666✔
2830
                        edgeNotFound = false
2,666✔
2831
                },
2,666✔
2832
                Update: func(tx kvdb.RwTx) error {
2,666✔
2833
                        var err error
2,666✔
2834
                        isUpdate1, err = updateEdgePolicy(
2,666✔
2835
                                tx, edge, c.graphCache,
2,666✔
2836
                        )
2,666✔
2837

2,666✔
2838
                        if err != nil {
2,669✔
2839
                                log.Errorf("UpdateEdgePolicy faild: %v", err)
3✔
2840
                        }
3✔
2841

2842
                        // Silence ErrEdgeNotFound so that the batch can
2843
                        // succeed, but propagate the error via local state.
2844
                        if errors.Is(err, ErrEdgeNotFound) {
2,669✔
2845
                                edgeNotFound = true
3✔
2846
                                return nil
3✔
2847
                        }
3✔
2848

2849
                        return err
2,663✔
2850
                },
2851
                OnCommit: func(err error) error {
2,666✔
2852
                        switch {
2,666✔
NEW
2853
                        case err != nil:
×
NEW
2854
                                return err
×
2855
                        case edgeNotFound:
3✔
2856
                                return ErrEdgeNotFound
3✔
2857
                        default:
2,663✔
2858
                                c.updateEdgeCache(edge, isUpdate1)
2,663✔
2859
                                return nil
2,663✔
2860
                        }
2861
                },
2862
        }
2863

2864
        for _, f := range op {
2,669✔
2865
                f(r)
3✔
2866
        }
3✔
2867

2868
        return c.chanScheduler.Execute(r)
2,666✔
2869
}
2870

2871
func (c *KVStore) updateEdgeCache(e *models.ChannelEdgePolicy,
2872
        isUpdate1 bool) {
2,663✔
2873

2,663✔
2874
        // If an entry for this channel is found in reject cache, we'll modify
2,663✔
2875
        // the entry with the updated timestamp for the direction that was just
2,663✔
2876
        // written. If the edge doesn't exist, we'll load the cache entry lazily
2,663✔
2877
        // during the next query for this edge.
2,663✔
2878
        if entry, ok := c.rejectCache.get(e.ChannelID); ok {
2,671✔
2879
                if isUpdate1 {
14✔
2880
                        entry.upd1Time = e.LastUpdate.Unix()
6✔
2881
                } else {
11✔
2882
                        entry.upd2Time = e.LastUpdate.Unix()
5✔
2883
                }
5✔
2884
                c.rejectCache.insert(e.ChannelID, entry)
8✔
2885
        }
2886

2887
        // If an entry for this channel is found in channel cache, we'll modify
2888
        // the entry with the updated policy for the direction that was just
2889
        // written. If the edge doesn't exist, we'll defer loading the info and
2890
        // policies and lazily read from disk during the next query.
2891
        if channel, ok := c.chanCache.get(e.ChannelID); ok {
2,666✔
2892
                if isUpdate1 {
6✔
2893
                        channel.Policy1 = e
3✔
2894
                } else {
6✔
2895
                        channel.Policy2 = e
3✔
2896
                }
3✔
2897
                c.chanCache.insert(e.ChannelID, channel)
3✔
2898
        }
2899
}
2900

2901
// updateEdgePolicy attempts to update an edge's policy within the relevant
2902
// buckets using an existing database transaction. The returned boolean will be
2903
// true if the updated policy belongs to node1, and false if the policy belonged
2904
// to node2.
2905
func updateEdgePolicy(tx kvdb.RwTx, edge *models.ChannelEdgePolicy,
2906
        graphCache *GraphCache) (bool, error) {
2,666✔
2907

2,666✔
2908
        edges := tx.ReadWriteBucket(edgeBucket)
2,666✔
2909
        if edges == nil {
2,666✔
NEW
2910
                return false, ErrEdgeNotFound
×
NEW
2911
        }
×
2912
        edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
2,666✔
2913
        if edgeIndex == nil {
2,666✔
NEW
2914
                return false, ErrEdgeNotFound
×
NEW
2915
        }
×
2916

2917
        // Create the channelID key be converting the channel ID
2918
        // integer into a byte slice.
2919
        var chanID [8]byte
2,666✔
2920
        byteOrder.PutUint64(chanID[:], edge.ChannelID)
2,666✔
2921

2,666✔
2922
        // With the channel ID, we then fetch the value storing the two
2,666✔
2923
        // nodes which connect this channel edge.
2,666✔
2924
        nodeInfo := edgeIndex.Get(chanID[:])
2,666✔
2925
        if nodeInfo == nil {
2,669✔
2926
                return false, ErrEdgeNotFound
3✔
2927
        }
3✔
2928

2929
        // Depending on the flags value passed above, either the first
2930
        // or second edge policy is being updated.
2931
        var fromNode, toNode []byte
2,663✔
2932
        var isUpdate1 bool
2,663✔
2933
        if edge.ChannelFlags&lnwire.ChanUpdateDirection == 0 {
4,000✔
2934
                fromNode = nodeInfo[:33]
1,337✔
2935
                toNode = nodeInfo[33:66]
1,337✔
2936
                isUpdate1 = true
1,337✔
2937
        } else {
2,666✔
2938
                fromNode = nodeInfo[33:66]
1,329✔
2939
                toNode = nodeInfo[:33]
1,329✔
2940
                isUpdate1 = false
1,329✔
2941
        }
1,329✔
2942

2943
        // Finally, with the direction of the edge being updated
2944
        // identified, we update the on-disk edge representation.
2945
        err := putChanEdgePolicy(edges, edge, fromNode, toNode)
2,663✔
2946
        if err != nil {
2,663✔
NEW
2947
                return false, err
×
NEW
2948
        }
×
2949

2950
        var (
2,663✔
2951
                fromNodePubKey route.Vertex
2,663✔
2952
                toNodePubKey   route.Vertex
2,663✔
2953
        )
2,663✔
2954
        copy(fromNodePubKey[:], fromNode)
2,663✔
2955
        copy(toNodePubKey[:], toNode)
2,663✔
2956

2,663✔
2957
        if graphCache != nil {
4,940✔
2958
                graphCache.UpdatePolicy(
2,277✔
2959
                        edge, fromNodePubKey, toNodePubKey, isUpdate1,
2,277✔
2960
                )
2,277✔
2961
        }
2,277✔
2962

2963
        return isUpdate1, nil
2,663✔
2964
}
2965

2966
// isPublic determines whether the node is seen as public within the graph from
2967
// the source node's point of view. An existing database transaction can also be
2968
// specified.
2969
func (c *KVStore) isPublic(tx kvdb.RTx, nodePub route.Vertex,
2970
        sourcePubKey []byte) (bool, error) {
16✔
2971

16✔
2972
        // In order to determine whether this node is publicly advertised within
16✔
2973
        // the graph, we'll need to look at all of its edges and check whether
16✔
2974
        // they extend to any other node than the source node. errDone will be
16✔
2975
        // used to terminate the check early.
16✔
2976
        nodeIsPublic := false
16✔
2977
        errDone := errors.New("done")
16✔
2978
        err := c.ForEachNodeChannelTx(tx, nodePub, func(tx kvdb.RTx,
16✔
2979
                info *models.ChannelEdgeInfo, _ *models.ChannelEdgePolicy,
16✔
2980
                _ *models.ChannelEdgePolicy) error {
29✔
2981

13✔
2982
                // If this edge doesn't extend to the source node, we'll
13✔
2983
                // terminate our search as we can now conclude that the node is
13✔
2984
                // publicly advertised within the graph due to the local node
13✔
2985
                // knowing of the current edge.
13✔
2986
                if !bytes.Equal(info.NodeKey1Bytes[:], sourcePubKey) &&
13✔
2987
                        !bytes.Equal(info.NodeKey2Bytes[:], sourcePubKey) {
19✔
2988

6✔
2989
                        nodeIsPublic = true
6✔
2990
                        return errDone
6✔
2991
                }
6✔
2992

2993
                // Since the edge _does_ extend to the source node, we'll also
2994
                // need to ensure that this is a public edge.
2995
                if info.AuthProof != nil {
19✔
2996
                        nodeIsPublic = true
9✔
2997
                        return errDone
9✔
2998
                }
9✔
2999

3000
                // Otherwise, we'll continue our search.
3001
                return nil
4✔
3002
        })
3003
        if err != nil && !errors.Is(err, errDone) {
16✔
NEW
3004
                return false, err
×
NEW
3005
        }
×
3006

3007
        return nodeIsPublic, nil
16✔
3008
}
3009

3010
// FetchLightningNodeTx attempts to look up a target node by its identity
3011
// public key. If the node isn't found in the database, then
3012
// ErrGraphNodeNotFound is returned. An optional transaction may be provided.
3013
// If none is provided, then a new one will be created.
3014
func (c *KVStore) FetchLightningNodeTx(tx kvdb.RTx, nodePub route.Vertex) (
3015
        *models.LightningNode, error) {
3,633✔
3016

3,633✔
3017
        return c.fetchLightningNode(tx, nodePub)
3,633✔
3018
}
3,633✔
3019

3020
// FetchLightningNode attempts to look up a target node by its identity public
3021
// key. If the node isn't found in the database, then ErrGraphNodeNotFound is
3022
// returned.
3023
func (c *KVStore) FetchLightningNode(nodePub route.Vertex) (
3024
        *models.LightningNode, error) {
155✔
3025

155✔
3026
        return c.fetchLightningNode(nil, nodePub)
155✔
3027
}
155✔
3028

3029
// fetchLightningNode attempts to look up a target node by its identity public
3030
// key. If the node isn't found in the database, then ErrGraphNodeNotFound is
3031
// returned. An optional transaction may be provided. If none is provided, then
3032
// a new one will be created.
3033
func (c *KVStore) fetchLightningNode(tx kvdb.RTx,
3034
        nodePub route.Vertex) (*models.LightningNode, error) {
3,785✔
3035

3,785✔
3036
        var node *models.LightningNode
3,785✔
3037
        fetch := func(tx kvdb.RTx) error {
7,570✔
3038
                // First grab the nodes bucket which stores the mapping from
3,785✔
3039
                // pubKey to node information.
3,785✔
3040
                nodes := tx.ReadBucket(nodeBucket)
3,785✔
3041
                if nodes == nil {
3,785✔
NEW
3042
                        return ErrGraphNotFound
×
NEW
3043
                }
×
3044

3045
                // If a key for this serialized public key isn't found, then
3046
                // the target node doesn't exist within the database.
3047
                nodeBytes := nodes.Get(nodePub[:])
3,785✔
3048
                if nodeBytes == nil {
3,802✔
3049
                        return ErrGraphNodeNotFound
17✔
3050
                }
17✔
3051

3052
                // If the node is found, then we can de deserialize the node
3053
                // information to return to the user.
3054
                nodeReader := bytes.NewReader(nodeBytes)
3,771✔
3055
                n, err := deserializeLightningNode(nodeReader)
3,771✔
3056
                if err != nil {
3,771✔
NEW
3057
                        return err
×
NEW
3058
                }
×
3059

3060
                node = &n
3,771✔
3061

3,771✔
3062
                return nil
3,771✔
3063
        }
3064

3065
        if tx == nil {
3,943✔
3066
                err := kvdb.View(
158✔
3067
                        c.db, fetch, func() {
316✔
3068
                                node = nil
158✔
3069
                        },
158✔
3070
                )
3071
                if err != nil {
164✔
3072
                        return nil, err
6✔
3073
                }
6✔
3074

3075
                return node, nil
155✔
3076
        }
3077

3078
        err := fetch(tx)
3,627✔
3079
        if err != nil {
3,638✔
3080
                return nil, err
11✔
3081
        }
11✔
3082

3083
        return node, nil
3,616✔
3084
}
3085

3086
// HasLightningNode determines if the graph has a vertex identified by the
3087
// target node identity public key. If the node exists in the database, a
3088
// timestamp of when the data for the node was lasted updated is returned along
3089
// with a true boolean. Otherwise, an empty time.Time is returned with a false
3090
// boolean.
3091
func (c *KVStore) HasLightningNode(nodePub [33]byte) (time.Time, bool,
3092
        error) {
19✔
3093

19✔
3094
        var (
19✔
3095
                updateTime time.Time
19✔
3096
                exists     bool
19✔
3097
        )
19✔
3098

19✔
3099
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
38✔
3100
                // First grab the nodes bucket which stores the mapping from
19✔
3101
                // pubKey to node information.
19✔
3102
                nodes := tx.ReadBucket(nodeBucket)
19✔
3103
                if nodes == nil {
19✔
NEW
3104
                        return ErrGraphNotFound
×
NEW
3105
                }
×
3106

3107
                // If a key for this serialized public key isn't found, we can
3108
                // exit early.
3109
                nodeBytes := nodes.Get(nodePub[:])
19✔
3110
                if nodeBytes == nil {
25✔
3111
                        exists = false
6✔
3112
                        return nil
6✔
3113
                }
6✔
3114

3115
                // Otherwise we continue on to obtain the time stamp
3116
                // representing the last time the data for this node was
3117
                // updated.
3118
                nodeReader := bytes.NewReader(nodeBytes)
16✔
3119
                node, err := deserializeLightningNode(nodeReader)
16✔
3120
                if err != nil {
16✔
NEW
3121
                        return err
×
NEW
3122
                }
×
3123

3124
                exists = true
16✔
3125
                updateTime = node.LastUpdate
16✔
3126

16✔
3127
                return nil
16✔
3128
        }, func() {
19✔
3129
                updateTime = time.Time{}
19✔
3130
                exists = false
19✔
3131
        })
19✔
3132
        if err != nil {
19✔
NEW
3133
                return time.Time{}, exists, err
×
NEW
3134
        }
×
3135

3136
        return updateTime, exists, nil
19✔
3137
}
3138

3139
// nodeTraversal is used to traverse all channels of a node given by its
3140
// public key and passes channel information into the specified callback.
3141
func nodeTraversal(tx kvdb.RTx, nodePub []byte, db kvdb.Backend,
3142
        cb func(kvdb.RTx, *models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3143
                *models.ChannelEdgePolicy) error) error {
1,269✔
3144

1,269✔
3145
        traversal := func(tx kvdb.RTx) error {
2,538✔
3146
                edges := tx.ReadBucket(edgeBucket)
1,269✔
3147
                if edges == nil {
1,269✔
NEW
3148
                        return ErrGraphNotFound
×
NEW
3149
                }
×
3150
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
1,269✔
3151
                if edgeIndex == nil {
1,269✔
NEW
3152
                        return ErrGraphNoEdgesFound
×
NEW
3153
                }
×
3154

3155
                // In order to reach all the edges for this node, we take
3156
                // advantage of the construction of the key-space within the
3157
                // edge bucket. The keys are stored in the form: pubKey ||
3158
                // chanID. Therefore, starting from a chanID of zero, we can
3159
                // scan forward in the bucket, grabbing all the edges for the
3160
                // node. Once the prefix no longer matches, then we know we're
3161
                // done.
3162
                var nodeStart [33 + 8]byte
1,269✔
3163
                copy(nodeStart[:], nodePub)
1,269✔
3164
                copy(nodeStart[33:], chanStart[:])
1,269✔
3165

1,269✔
3166
                // Starting from the key pubKey || 0, we seek forward in the
1,269✔
3167
                // bucket until the retrieved key no longer has the public key
1,269✔
3168
                // as its prefix. This indicates that we've stepped over into
1,269✔
3169
                // another node's edges, so we can terminate our scan.
1,269✔
3170
                edgeCursor := edges.ReadCursor()
1,269✔
3171
                for nodeEdge, _ := edgeCursor.Seek(nodeStart[:]); bytes.HasPrefix(nodeEdge, nodePub); nodeEdge, _ = edgeCursor.Next() { //nolint:ll
5,112✔
3172
                        // If the prefix still matches, the channel id is
3,843✔
3173
                        // returned in nodeEdge. Channel id is used to lookup
3,843✔
3174
                        // the node at the other end of the channel and both
3,843✔
3175
                        // edge policies.
3,843✔
3176
                        chanID := nodeEdge[33:]
3,843✔
3177
                        edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
3,843✔
3178
                        if err != nil {
3,843✔
NEW
3179
                                return err
×
NEW
3180
                        }
×
3181

3182
                        outgoingPolicy, err := fetchChanEdgePolicy(
3,843✔
3183
                                edges, chanID, nodePub,
3,843✔
3184
                        )
3,843✔
3185
                        if err != nil {
3,843✔
NEW
3186
                                return err
×
NEW
3187
                        }
×
3188

3189
                        otherNode, err := edgeInfo.OtherNodeKeyBytes(nodePub)
3,843✔
3190
                        if err != nil {
3,843✔
NEW
3191
                                return err
×
NEW
3192
                        }
×
3193

3194
                        incomingPolicy, err := fetchChanEdgePolicy(
3,843✔
3195
                                edges, chanID, otherNode[:],
3,843✔
3196
                        )
3,843✔
3197
                        if err != nil {
3,843✔
NEW
3198
                                return err
×
NEW
3199
                        }
×
3200

3201
                        // Finally, we execute the callback.
3202
                        err = cb(tx, &edgeInfo, outgoingPolicy, incomingPolicy)
3,843✔
3203
                        if err != nil {
3,855✔
3204
                                return err
12✔
3205
                        }
12✔
3206
                }
3207

3208
                return nil
1,260✔
3209
        }
3210

3211
        // If no transaction was provided, then we'll create a new transaction
3212
        // to execute the transaction within.
3213
        if tx == nil {
1,281✔
3214
                return kvdb.View(db, traversal, func() {})
24✔
3215
        }
3216

3217
        // Otherwise, we re-use the existing transaction to execute the graph
3218
        // traversal.
3219
        return traversal(tx)
1,260✔
3220
}
3221

3222
// ForEachNodeChannel iterates through all channels of the given node,
3223
// executing the passed callback with an edge info structure and the policies
3224
// of each end of the channel. The first edge policy is the outgoing edge *to*
3225
// the connecting node, while the second is the incoming edge *from* the
3226
// connecting node. If the callback returns an error, then the iteration is
3227
// halted with the error propagated back up to the caller.
3228
//
3229
// Unknown policies are passed into the callback as nil values.
3230
func (c *KVStore) ForEachNodeChannel(nodePub route.Vertex,
3231
        cb func(kvdb.RTx, *models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3232
                *models.ChannelEdgePolicy) error) error {
9✔
3233

9✔
3234
        return nodeTraversal(nil, nodePub[:], c.db, cb)
9✔
3235
}
9✔
3236

3237
// ForEachNodeChannelTx iterates through all channels of the given node,
3238
// executing the passed callback with an edge info structure and the policies
3239
// of each end of the channel. The first edge policy is the outgoing edge *to*
3240
// the connecting node, while the second is the incoming edge *from* the
3241
// connecting node. If the callback returns an error, then the iteration is
3242
// halted with the error propagated back up to the caller.
3243
//
3244
// Unknown policies are passed into the callback as nil values.
3245
//
3246
// If the caller wishes to re-use an existing boltdb transaction, then it
3247
// should be passed as the first argument.  Otherwise, the first argument should
3248
// be nil and a fresh transaction will be created to execute the graph
3249
// traversal.
3250
func (c *KVStore) ForEachNodeChannelTx(tx kvdb.RTx,
3251
        nodePub route.Vertex, cb func(kvdb.RTx, *models.ChannelEdgeInfo,
3252
                *models.ChannelEdgePolicy,
3253
                *models.ChannelEdgePolicy) error) error {
1,021✔
3254

1,021✔
3255
        return nodeTraversal(tx, nodePub[:], c.db, cb)
1,021✔
3256
}
1,021✔
3257

3258
// FetchOtherNode attempts to fetch the full LightningNode that's opposite of
3259
// the target node in the channel. This is useful when one knows the pubkey of
3260
// one of the nodes, and wishes to obtain the full LightningNode for the other
3261
// end of the channel.
3262
func (c *KVStore) FetchOtherNode(tx kvdb.RTx,
3263
        channel *models.ChannelEdgeInfo, thisNodeKey []byte) (
3264
        *models.LightningNode, error) {
3✔
3265

3✔
3266
        // Ensure that the node passed in is actually a member of the channel.
3✔
3267
        var targetNodeBytes [33]byte
3✔
3268
        switch {
3✔
3269
        case bytes.Equal(channel.NodeKey1Bytes[:], thisNodeKey):
3✔
3270
                targetNodeBytes = channel.NodeKey2Bytes
3✔
3271
        case bytes.Equal(channel.NodeKey2Bytes[:], thisNodeKey):
3✔
3272
                targetNodeBytes = channel.NodeKey1Bytes
3✔
NEW
3273
        default:
×
NEW
3274
                return nil, fmt.Errorf("node not participating in this channel")
×
3275
        }
3276

3277
        var targetNode *models.LightningNode
3✔
3278
        fetchNodeFunc := func(tx kvdb.RTx) error {
6✔
3279
                // First grab the nodes bucket which stores the mapping from
3✔
3280
                // pubKey to node information.
3✔
3281
                nodes := tx.ReadBucket(nodeBucket)
3✔
3282
                if nodes == nil {
3✔
NEW
3283
                        return ErrGraphNotFound
×
NEW
3284
                }
×
3285

3286
                node, err := fetchLightningNode(nodes, targetNodeBytes[:])
3✔
3287
                if err != nil {
3✔
NEW
3288
                        return err
×
NEW
3289
                }
×
3290

3291
                targetNode = &node
3✔
3292

3✔
3293
                return nil
3✔
3294
        }
3295

3296
        // If the transaction is nil, then we'll need to create a new one,
3297
        // otherwise we can use the existing db transaction.
3298
        var err error
3✔
3299
        if tx == nil {
3✔
NEW
3300
                err = kvdb.View(c.db, fetchNodeFunc, func() {
×
NEW
3301
                        targetNode = nil
×
NEW
3302
                })
×
3303
        } else {
3✔
3304
                err = fetchNodeFunc(tx)
3✔
3305
        }
3✔
3306

3307
        return targetNode, err
3✔
3308
}
3309

3310
// computeEdgePolicyKeys is a helper function that can be used to compute the
3311
// keys used to index the channel edge policy info for the two nodes of the
3312
// edge. The keys for node 1 and node 2 are returned respectively.
3313
func computeEdgePolicyKeys(info *models.ChannelEdgeInfo) ([]byte, []byte) {
25✔
3314
        var (
25✔
3315
                node1Key [33 + 8]byte
25✔
3316
                node2Key [33 + 8]byte
25✔
3317
        )
25✔
3318

25✔
3319
        copy(node1Key[:], info.NodeKey1Bytes[:])
25✔
3320
        copy(node2Key[:], info.NodeKey2Bytes[:])
25✔
3321

25✔
3322
        byteOrder.PutUint64(node1Key[33:], info.ChannelID)
25✔
3323
        byteOrder.PutUint64(node2Key[33:], info.ChannelID)
25✔
3324

25✔
3325
        return node1Key[:], node2Key[:]
25✔
3326
}
25✔
3327

3328
// FetchChannelEdgesByOutpoint attempts to lookup the two directed edges for
3329
// the channel identified by the funding outpoint. If the channel can't be
3330
// found, then ErrEdgeNotFound is returned. A struct which houses the general
3331
// information for the channel itself is returned as well as two structs that
3332
// contain the routing policies for the channel in either direction.
3333
func (c *KVStore) FetchChannelEdgesByOutpoint(op *wire.OutPoint) (
3334
        *models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3335
        *models.ChannelEdgePolicy, error) {
14✔
3336

14✔
3337
        var (
14✔
3338
                edgeInfo *models.ChannelEdgeInfo
14✔
3339
                policy1  *models.ChannelEdgePolicy
14✔
3340
                policy2  *models.ChannelEdgePolicy
14✔
3341
        )
14✔
3342

14✔
3343
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
28✔
3344
                // First, grab the node bucket. This will be used to populate
14✔
3345
                // the Node pointers in each edge read from disk.
14✔
3346
                nodes := tx.ReadBucket(nodeBucket)
14✔
3347
                if nodes == nil {
14✔
NEW
3348
                        return ErrGraphNotFound
×
NEW
3349
                }
×
3350

3351
                // Next, grab the edge bucket which stores the edges, and also
3352
                // the index itself so we can group the directed edges together
3353
                // logically.
3354
                edges := tx.ReadBucket(edgeBucket)
14✔
3355
                if edges == nil {
14✔
NEW
3356
                        return ErrGraphNoEdgesFound
×
NEW
3357
                }
×
3358
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
14✔
3359
                if edgeIndex == nil {
14✔
NEW
3360
                        return ErrGraphNoEdgesFound
×
NEW
3361
                }
×
3362

3363
                // If the channel's outpoint doesn't exist within the outpoint
3364
                // index, then the edge does not exist.
3365
                chanIndex := edges.NestedReadBucket(channelPointBucket)
14✔
3366
                if chanIndex == nil {
14✔
NEW
3367
                        return ErrGraphNoEdgesFound
×
NEW
3368
                }
×
3369
                var b bytes.Buffer
14✔
3370
                if err := WriteOutpoint(&b, op); err != nil {
14✔
NEW
3371
                        return err
×
NEW
3372
                }
×
3373
                chanID := chanIndex.Get(b.Bytes())
14✔
3374
                if chanID == nil {
27✔
3375
                        return fmt.Errorf("%w: op=%v", ErrEdgeNotFound, op)
13✔
3376
                }
13✔
3377

3378
                // If the channel is found to exists, then we'll first retrieve
3379
                // the general information for the channel.
3380
                edge, err := fetchChanEdgeInfo(edgeIndex, chanID)
4✔
3381
                if err != nil {
4✔
NEW
3382
                        return fmt.Errorf("%w: chanID=%x", err, chanID)
×
NEW
3383
                }
×
3384
                edgeInfo = &edge
4✔
3385

4✔
3386
                // Once we have the information about the channels' parameters,
4✔
3387
                // we'll fetch the routing policies for each for the directed
4✔
3388
                // edges.
4✔
3389
                e1, e2, err := fetchChanEdgePolicies(edgeIndex, edges, chanID)
4✔
3390
                if err != nil {
4✔
NEW
3391
                        return fmt.Errorf("failed to find policy: %w", err)
×
NEW
3392
                }
×
3393

3394
                policy1 = e1
4✔
3395
                policy2 = e2
4✔
3396

4✔
3397
                return nil
4✔
3398
        }, func() {
14✔
3399
                edgeInfo = nil
14✔
3400
                policy1 = nil
14✔
3401
                policy2 = nil
14✔
3402
        })
14✔
3403
        if err != nil {
27✔
3404
                return nil, nil, nil, err
13✔
3405
        }
13✔
3406

3407
        return edgeInfo, policy1, policy2, nil
4✔
3408
}
3409

3410
// FetchChannelEdgesByID attempts to lookup the two directed edges for the
3411
// channel identified by the channel ID. If the channel can't be found, then
3412
// ErrEdgeNotFound is returned. A struct which houses the general information
3413
// for the channel itself is returned as well as two structs that contain the
3414
// routing policies for the channel in either direction.
3415
//
3416
// ErrZombieEdge an be returned if the edge is currently marked as a zombie
3417
// within the database. In this case, the ChannelEdgePolicy's will be nil, and
3418
// the ChannelEdgeInfo will only include the public keys of each node.
3419
func (c *KVStore) FetchChannelEdgesByID(chanID uint64) (
3420
        *models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3421
        *models.ChannelEdgePolicy, error) {
27✔
3422

27✔
3423
        var (
27✔
3424
                edgeInfo  *models.ChannelEdgeInfo
27✔
3425
                policy1   *models.ChannelEdgePolicy
27✔
3426
                policy2   *models.ChannelEdgePolicy
27✔
3427
                channelID [8]byte
27✔
3428
        )
27✔
3429

27✔
3430
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
54✔
3431
                // First, grab the node bucket. This will be used to populate
27✔
3432
                // the Node pointers in each edge read from disk.
27✔
3433
                nodes := tx.ReadBucket(nodeBucket)
27✔
3434
                if nodes == nil {
27✔
NEW
3435
                        return ErrGraphNotFound
×
NEW
3436
                }
×
3437

3438
                // Next, grab the edge bucket which stores the edges, and also
3439
                // the index itself so we can group the directed edges together
3440
                // logically.
3441
                edges := tx.ReadBucket(edgeBucket)
27✔
3442
                if edges == nil {
27✔
NEW
3443
                        return ErrGraphNoEdgesFound
×
NEW
3444
                }
×
3445
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
27✔
3446
                if edgeIndex == nil {
27✔
NEW
3447
                        return ErrGraphNoEdgesFound
×
NEW
3448
                }
×
3449

3450
                byteOrder.PutUint64(channelID[:], chanID)
27✔
3451

27✔
3452
                // Now, attempt to fetch edge.
27✔
3453
                edge, err := fetchChanEdgeInfo(edgeIndex, channelID[:])
27✔
3454

27✔
3455
                // If it doesn't exist, we'll quickly check our zombie index to
27✔
3456
                // see if we've previously marked it as so.
27✔
3457
                if errors.Is(err, ErrEdgeNotFound) {
31✔
3458
                        // If the zombie index doesn't exist, or the edge is not
4✔
3459
                        // marked as a zombie within it, then we'll return the
4✔
3460
                        // original ErrEdgeNotFound error.
4✔
3461
                        zombieIndex := edges.NestedReadBucket(zombieBucket)
4✔
3462
                        if zombieIndex == nil {
4✔
NEW
3463
                                return ErrEdgeNotFound
×
NEW
3464
                        }
×
3465

3466
                        isZombie, pubKey1, pubKey2 := isZombieEdge(
4✔
3467
                                zombieIndex, chanID,
4✔
3468
                        )
4✔
3469
                        if !isZombie {
7✔
3470
                                return ErrEdgeNotFound
3✔
3471
                        }
3✔
3472

3473
                        // Otherwise, the edge is marked as a zombie, so we'll
3474
                        // populate the edge info with the public keys of each
3475
                        // party as this is the only information we have about
3476
                        // it and return an error signaling so.
3477
                        edgeInfo = &models.ChannelEdgeInfo{
4✔
3478
                                NodeKey1Bytes: pubKey1,
4✔
3479
                                NodeKey2Bytes: pubKey2,
4✔
3480
                        }
4✔
3481

4✔
3482
                        return ErrZombieEdge
4✔
3483
                }
3484

3485
                // Otherwise, we'll just return the error if any.
3486
                if err != nil {
26✔
NEW
3487
                        return err
×
NEW
3488
                }
×
3489

3490
                edgeInfo = &edge
26✔
3491

26✔
3492
                // Then we'll attempt to fetch the accompanying policies of this
26✔
3493
                // edge.
26✔
3494
                e1, e2, err := fetchChanEdgePolicies(
26✔
3495
                        edgeIndex, edges, channelID[:],
26✔
3496
                )
26✔
3497
                if err != nil {
26✔
NEW
3498
                        return err
×
NEW
3499
                }
×
3500

3501
                policy1 = e1
26✔
3502
                policy2 = e2
26✔
3503

26✔
3504
                return nil
26✔
3505
        }, func() {
27✔
3506
                edgeInfo = nil
27✔
3507
                policy1 = nil
27✔
3508
                policy2 = nil
27✔
3509
        })
27✔
3510
        if errors.Is(err, ErrZombieEdge) {
31✔
3511
                return edgeInfo, nil, nil, err
4✔
3512
        }
4✔
3513
        if err != nil {
29✔
3514
                return nil, nil, nil, err
3✔
3515
        }
3✔
3516

3517
        return edgeInfo, policy1, policy2, nil
26✔
3518
}
3519

3520
// IsPublicNode is a helper method that determines whether the node with the
3521
// given public key is seen as a public node in the graph from the graph's
3522
// source node's point of view.
3523
func (c *KVStore) IsPublicNode(pubKey [33]byte) (bool, error) {
16✔
3524
        var nodeIsPublic bool
16✔
3525
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
32✔
3526
                nodes := tx.ReadBucket(nodeBucket)
16✔
3527
                if nodes == nil {
16✔
NEW
3528
                        return ErrGraphNodesNotFound
×
NEW
3529
                }
×
3530
                ourPubKey := nodes.Get(sourceKey)
16✔
3531
                if ourPubKey == nil {
16✔
NEW
3532
                        return ErrSourceNodeNotSet
×
NEW
3533
                }
×
3534
                node, err := fetchLightningNode(nodes, pubKey[:])
16✔
3535
                if err != nil {
16✔
NEW
3536
                        return err
×
NEW
3537
                }
×
3538

3539
                nodeIsPublic, err = c.isPublic(tx, node.PubKeyBytes, ourPubKey)
16✔
3540

16✔
3541
                return err
16✔
3542
        }, func() {
16✔
3543
                nodeIsPublic = false
16✔
3544
        })
16✔
3545
        if err != nil {
16✔
NEW
3546
                return false, err
×
NEW
3547
        }
×
3548

3549
        return nodeIsPublic, nil
16✔
3550
}
3551

3552
// genMultiSigP2WSH generates the p2wsh'd multisig script for 2 of 2 pubkeys.
3553
func genMultiSigP2WSH(aPub, bPub []byte) ([]byte, error) {
49✔
3554
        witnessScript, err := input.GenMultiSigScript(aPub, bPub)
49✔
3555
        if err != nil {
49✔
NEW
3556
                return nil, err
×
NEW
3557
        }
×
3558

3559
        // With the witness script generated, we'll now turn it into a p2wsh
3560
        // script:
3561
        //  * OP_0 <sha256(script)>
3562
        bldr := txscript.NewScriptBuilder(
49✔
3563
                txscript.WithScriptAllocSize(input.P2WSHSize),
49✔
3564
        )
49✔
3565
        bldr.AddOp(txscript.OP_0)
49✔
3566
        scriptHash := sha256.Sum256(witnessScript)
49✔
3567
        bldr.AddData(scriptHash[:])
49✔
3568

49✔
3569
        return bldr.Script()
49✔
3570
}
3571

3572
// EdgePoint couples the outpoint of a channel with the funding script that it
3573
// creates. The FilteredChainView will use this to watch for spends of this
3574
// edge point on chain. We require both of these values as depending on the
3575
// concrete implementation, either the pkScript, or the out point will be used.
3576
type EdgePoint struct {
3577
        // FundingPkScript is the p2wsh multi-sig script of the target channel.
3578
        FundingPkScript []byte
3579

3580
        // OutPoint is the outpoint of the target channel.
3581
        OutPoint wire.OutPoint
3582
}
3583

3584
// String returns a human readable version of the target EdgePoint. We return
3585
// the outpoint directly as it is enough to uniquely identify the edge point.
NEW
3586
func (e *EdgePoint) String() string {
×
NEW
3587
        return e.OutPoint.String()
×
NEW
3588
}
×
3589

3590
// ChannelView returns the verifiable edge information for each active channel
3591
// within the known channel graph. The set of UTXO's (along with their scripts)
3592
// returned are the ones that need to be watched on chain to detect channel
3593
// closes on the resident blockchain.
3594
func (c *KVStore) ChannelView() ([]EdgePoint, error) {
25✔
3595
        var edgePoints []EdgePoint
25✔
3596
        if err := kvdb.View(c.db, func(tx kvdb.RTx) error {
50✔
3597
                // We're going to iterate over the entire channel index, so
25✔
3598
                // we'll need to fetch the edgeBucket to get to the index as
25✔
3599
                // it's a sub-bucket.
25✔
3600
                edges := tx.ReadBucket(edgeBucket)
25✔
3601
                if edges == nil {
25✔
NEW
3602
                        return ErrGraphNoEdgesFound
×
NEW
3603
                }
×
3604
                chanIndex := edges.NestedReadBucket(channelPointBucket)
25✔
3605
                if chanIndex == nil {
25✔
NEW
3606
                        return ErrGraphNoEdgesFound
×
NEW
3607
                }
×
3608
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
25✔
3609
                if edgeIndex == nil {
25✔
NEW
3610
                        return ErrGraphNoEdgesFound
×
NEW
3611
                }
×
3612

3613
                // Once we have the proper bucket, we'll range over each key
3614
                // (which is the channel point for the channel) and decode it,
3615
                // accumulating each entry.
3616
                return chanIndex.ForEach(
25✔
3617
                        func(chanPointBytes, chanID []byte) error {
70✔
3618
                                chanPointReader := bytes.NewReader(
45✔
3619
                                        chanPointBytes,
45✔
3620
                                )
45✔
3621

45✔
3622
                                var chanPoint wire.OutPoint
45✔
3623
                                err := ReadOutpoint(chanPointReader, &chanPoint)
45✔
3624
                                if err != nil {
45✔
NEW
3625
                                        return err
×
NEW
3626
                                }
×
3627

3628
                                edgeInfo, err := fetchChanEdgeInfo(
45✔
3629
                                        edgeIndex, chanID,
45✔
3630
                                )
45✔
3631
                                if err != nil {
45✔
NEW
3632
                                        return err
×
NEW
3633
                                }
×
3634

3635
                                pkScript, err := genMultiSigP2WSH(
45✔
3636
                                        edgeInfo.BitcoinKey1Bytes[:],
45✔
3637
                                        edgeInfo.BitcoinKey2Bytes[:],
45✔
3638
                                )
45✔
3639
                                if err != nil {
45✔
NEW
3640
                                        return err
×
NEW
3641
                                }
×
3642

3643
                                edgePoints = append(edgePoints, EdgePoint{
45✔
3644
                                        FundingPkScript: pkScript,
45✔
3645
                                        OutPoint:        chanPoint,
45✔
3646
                                })
45✔
3647

45✔
3648
                                return nil
45✔
3649
                        },
3650
                )
3651
        }, func() {
25✔
3652
                edgePoints = nil
25✔
3653
        }); err != nil {
25✔
NEW
3654
                return nil, err
×
NEW
3655
        }
×
3656

3657
        return edgePoints, nil
25✔
3658
}
3659

3660
// MarkEdgeZombie attempts to mark a channel identified by its channel ID as a
3661
// zombie. This method is used on an ad-hoc basis, when channels need to be
3662
// marked as zombies outside the normal pruning cycle.
3663
func (c *KVStore) MarkEdgeZombie(chanID uint64,
3664
        pubKey1, pubKey2 [33]byte) error {
123✔
3665

123✔
3666
        c.cacheMu.Lock()
123✔
3667
        defer c.cacheMu.Unlock()
123✔
3668

123✔
3669
        err := kvdb.Batch(c.db, func(tx kvdb.RwTx) error {
246✔
3670
                edges := tx.ReadWriteBucket(edgeBucket)
123✔
3671
                if edges == nil {
123✔
NEW
3672
                        return ErrGraphNoEdgesFound
×
NEW
3673
                }
×
3674
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
123✔
3675
                if err != nil {
123✔
NEW
3676
                        return fmt.Errorf("unable to create zombie "+
×
NEW
3677
                                "bucket: %w", err)
×
NEW
3678
                }
×
3679

3680
                if c.graphCache != nil {
246✔
3681
                        c.graphCache.RemoveChannel(pubKey1, pubKey2, chanID)
123✔
3682
                }
123✔
3683

3684
                return markEdgeZombie(zombieIndex, chanID, pubKey1, pubKey2)
123✔
3685
        })
3686
        if err != nil {
123✔
NEW
3687
                return err
×
NEW
3688
        }
×
3689

3690
        c.rejectCache.remove(chanID)
123✔
3691
        c.chanCache.remove(chanID)
123✔
3692

123✔
3693
        return nil
123✔
3694
}
3695

3696
// markEdgeZombie marks an edge as a zombie within our zombie index. The public
3697
// keys should represent the node public keys of the two parties involved in the
3698
// edge.
3699
func markEdgeZombie(zombieIndex kvdb.RwBucket, chanID uint64, pubKey1,
3700
        pubKey2 [33]byte) error {
151✔
3701

151✔
3702
        var k [8]byte
151✔
3703
        byteOrder.PutUint64(k[:], chanID)
151✔
3704

151✔
3705
        var v [66]byte
151✔
3706
        copy(v[:33], pubKey1[:])
151✔
3707
        copy(v[33:], pubKey2[:])
151✔
3708

151✔
3709
        return zombieIndex.Put(k[:], v[:])
151✔
3710
}
151✔
3711

3712
// MarkEdgeLive clears an edge from our zombie index, deeming it as live.
3713
func (c *KVStore) MarkEdgeLive(chanID uint64) error {
2✔
3714
        c.cacheMu.Lock()
2✔
3715
        defer c.cacheMu.Unlock()
2✔
3716

2✔
3717
        return c.markEdgeLiveUnsafe(nil, chanID)
2✔
3718
}
2✔
3719

3720
// markEdgeLiveUnsafe clears an edge from the zombie index. This method can be
3721
// called with an existing kvdb.RwTx or the argument can be set to nil in which
3722
// case a new transaction will be created.
3723
//
3724
// NOTE: this method MUST only be called if the cacheMu has already been
3725
// acquired.
3726
func (c *KVStore) markEdgeLiveUnsafe(tx kvdb.RwTx, chanID uint64) error {
21✔
3727
        dbFn := func(tx kvdb.RwTx) error {
42✔
3728
                edges := tx.ReadWriteBucket(edgeBucket)
21✔
3729
                if edges == nil {
21✔
NEW
3730
                        return ErrGraphNoEdgesFound
×
NEW
3731
                }
×
3732
                zombieIndex := edges.NestedReadWriteBucket(zombieBucket)
21✔
3733
                if zombieIndex == nil {
21✔
NEW
3734
                        return nil
×
NEW
3735
                }
×
3736

3737
                var k [8]byte
21✔
3738
                byteOrder.PutUint64(k[:], chanID)
21✔
3739

21✔
3740
                if len(zombieIndex.Get(k[:])) == 0 {
22✔
3741
                        return ErrZombieEdgeNotFound
1✔
3742
                }
1✔
3743

3744
                return zombieIndex.Delete(k[:])
20✔
3745
        }
3746

3747
        // If the transaction is nil, we'll create a new one. Otherwise, we use
3748
        // the existing transaction
3749
        var err error
21✔
3750
        if tx == nil {
23✔
3751
                err = kvdb.Update(c.db, dbFn, func() {})
4✔
3752
        } else {
19✔
3753
                err = dbFn(tx)
19✔
3754
        }
19✔
3755
        if err != nil {
22✔
3756
                return err
1✔
3757
        }
1✔
3758

3759
        c.rejectCache.remove(chanID)
20✔
3760
        c.chanCache.remove(chanID)
20✔
3761

20✔
3762
        // We need to add the channel back into our graph cache, otherwise we
20✔
3763
        // won't use it for path finding.
20✔
3764
        if c.graphCache != nil {
40✔
3765
                edgeInfos, err := c.fetchChanInfos(tx, []uint64{chanID})
20✔
3766
                if err != nil {
20✔
NEW
3767
                        return err
×
NEW
3768
                }
×
3769

3770
                for _, edgeInfo := range edgeInfos {
20✔
NEW
3771
                        c.graphCache.AddChannel(
×
NEW
3772
                                edgeInfo.Info, edgeInfo.Policy1,
×
NEW
3773
                                edgeInfo.Policy2,
×
NEW
3774
                        )
×
NEW
3775
                }
×
3776
        }
3777

3778
        return nil
20✔
3779
}
3780

3781
// IsZombieEdge returns whether the edge is considered zombie. If it is a
3782
// zombie, then the two node public keys corresponding to this edge are also
3783
// returned.
3784
func (c *KVStore) IsZombieEdge(chanID uint64) (bool, [33]byte, [33]byte) {
5✔
3785
        var (
5✔
3786
                isZombie         bool
5✔
3787
                pubKey1, pubKey2 [33]byte
5✔
3788
        )
5✔
3789

5✔
3790
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
10✔
3791
                edges := tx.ReadBucket(edgeBucket)
5✔
3792
                if edges == nil {
5✔
NEW
3793
                        return ErrGraphNoEdgesFound
×
NEW
3794
                }
×
3795
                zombieIndex := edges.NestedReadBucket(zombieBucket)
5✔
3796
                if zombieIndex == nil {
5✔
NEW
3797
                        return nil
×
NEW
3798
                }
×
3799

3800
                isZombie, pubKey1, pubKey2 = isZombieEdge(zombieIndex, chanID)
5✔
3801

5✔
3802
                return nil
5✔
3803
        }, func() {
5✔
3804
                isZombie = false
5✔
3805
                pubKey1 = [33]byte{}
5✔
3806
                pubKey2 = [33]byte{}
5✔
3807
        })
5✔
3808
        if err != nil {
5✔
NEW
3809
                return false, [33]byte{}, [33]byte{}
×
NEW
3810
        }
×
3811

3812
        return isZombie, pubKey1, pubKey2
5✔
3813
}
3814

3815
// isZombieEdge returns whether an entry exists for the given channel in the
3816
// zombie index. If an entry exists, then the two node public keys corresponding
3817
// to this edge are also returned.
3818
func isZombieEdge(zombieIndex kvdb.RBucket,
3819
        chanID uint64) (bool, [33]byte, [33]byte) {
190✔
3820

190✔
3821
        var k [8]byte
190✔
3822
        byteOrder.PutUint64(k[:], chanID)
190✔
3823

190✔
3824
        v := zombieIndex.Get(k[:])
190✔
3825
        if v == nil {
293✔
3826
                return false, [33]byte{}, [33]byte{}
103✔
3827
        }
103✔
3828

3829
        var pubKey1, pubKey2 [33]byte
90✔
3830
        copy(pubKey1[:], v[:33])
90✔
3831
        copy(pubKey2[:], v[33:])
90✔
3832

90✔
3833
        return true, pubKey1, pubKey2
90✔
3834
}
3835

3836
// NumZombies returns the current number of zombie channels in the graph.
3837
func (c *KVStore) NumZombies() (uint64, error) {
4✔
3838
        var numZombies uint64
4✔
3839
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
8✔
3840
                edges := tx.ReadBucket(edgeBucket)
4✔
3841
                if edges == nil {
4✔
NEW
3842
                        return nil
×
NEW
3843
                }
×
3844
                zombieIndex := edges.NestedReadBucket(zombieBucket)
4✔
3845
                if zombieIndex == nil {
4✔
NEW
3846
                        return nil
×
NEW
3847
                }
×
3848

3849
                return zombieIndex.ForEach(func(_, _ []byte) error {
6✔
3850
                        numZombies++
2✔
3851
                        return nil
2✔
3852
                })
2✔
3853
        }, func() {
4✔
3854
                numZombies = 0
4✔
3855
        })
4✔
3856
        if err != nil {
4✔
NEW
3857
                return 0, err
×
NEW
3858
        }
×
3859

3860
        return numZombies, nil
4✔
3861
}
3862

3863
// PutClosedScid stores a SCID for a closed channel in the database. This is so
3864
// that we can ignore channel announcements that we know to be closed without
3865
// having to validate them and fetch a block.
3866
func (c *KVStore) PutClosedScid(scid lnwire.ShortChannelID) error {
1✔
3867
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
2✔
3868
                closedScids, err := tx.CreateTopLevelBucket(closedScidBucket)
1✔
3869
                if err != nil {
1✔
NEW
3870
                        return err
×
NEW
3871
                }
×
3872

3873
                var k [8]byte
1✔
3874
                byteOrder.PutUint64(k[:], scid.ToUint64())
1✔
3875

1✔
3876
                return closedScids.Put(k[:], []byte{})
1✔
3877
        }, func() {})
1✔
3878
}
3879

3880
// IsClosedScid checks whether a channel identified by the passed in scid is
3881
// closed. This helps avoid having to perform expensive validation checks.
3882
// TODO: Add an LRU cache to cut down on disc reads.
3883
func (c *KVStore) IsClosedScid(scid lnwire.ShortChannelID) (bool, error) {
5✔
3884
        var isClosed bool
5✔
3885
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
10✔
3886
                closedScids := tx.ReadBucket(closedScidBucket)
5✔
3887
                if closedScids == nil {
5✔
NEW
3888
                        return ErrClosedScidsNotFound
×
NEW
3889
                }
×
3890

3891
                var k [8]byte
5✔
3892
                byteOrder.PutUint64(k[:], scid.ToUint64())
5✔
3893

5✔
3894
                if closedScids.Get(k[:]) != nil {
6✔
3895
                        isClosed = true
1✔
3896
                        return nil
1✔
3897
                }
1✔
3898

3899
                return nil
4✔
3900
        }, func() {
5✔
3901
                isClosed = false
5✔
3902
        })
5✔
3903
        if err != nil {
5✔
NEW
3904
                return false, err
×
NEW
3905
        }
×
3906

3907
        return isClosed, nil
5✔
3908
}
3909

3910
// GraphSession will provide the call-back with access to a NodeTraverser
3911
// instance which can be used to perform queries against the channel graph. If
3912
// the graph cache is not enabled, then the call-back will  be provided with
3913
// access to the graph via a consistent read-only transaction.
3914
func (c *KVStore) GraphSession(cb func(graph NodeTraverser) error) error {
136✔
3915
        if c.graphCache != nil {
218✔
3916
                return cb(&nodeTraverserSession{db: c})
82✔
3917
        }
82✔
3918

3919
        return c.db.View(func(tx walletdb.ReadTx) error {
108✔
3920
                return cb(&nodeTraverserSession{
54✔
3921
                        db: c,
54✔
3922
                        tx: tx,
54✔
3923
                })
54✔
3924
        }, func() {})
108✔
3925
}
3926

3927
// nodeTraverserSession implements the NodeTraverser interface but with a
3928
// backing read only transaction for a consistent view of the graph in the case
3929
// where the graph Cache has not been enabled.
3930
type nodeTraverserSession struct {
3931
        tx kvdb.RTx
3932
        db *KVStore
3933
}
3934

3935
// ForEachNodeDirectedChannel calls the callback for every channel of the given
3936
// node.
3937
//
3938
// NOTE: Part of the NodeTraverser interface.
3939
func (c *nodeTraverserSession) ForEachNodeDirectedChannel(nodePub route.Vertex,
3940
        cb func(channel *DirectedChannel) error) error {
592✔
3941

592✔
3942
        return c.db.forEachNodeDirectedChannel(c.tx, nodePub, cb)
592✔
3943
}
592✔
3944

3945
// FetchNodeFeatures returns the features of the given node. If the node is
3946
// unknown, assume no additional features are supported.
3947
//
3948
// NOTE: Part of the NodeTraverser interface.
3949
func (c *nodeTraverserSession) FetchNodeFeatures(nodePub route.Vertex) (
3950
        *lnwire.FeatureVector, error) {
623✔
3951

623✔
3952
        return c.db.fetchNodeFeatures(c.tx, nodePub)
623✔
3953
}
623✔
3954

3955
func putLightningNode(nodeBucket, aliasBucket, updateIndex kvdb.RwBucket,
3956
        node *models.LightningNode) error {
993✔
3957

993✔
3958
        var (
993✔
3959
                scratch [16]byte
993✔
3960
                b       bytes.Buffer
993✔
3961
        )
993✔
3962

993✔
3963
        pub, err := node.PubKey()
993✔
3964
        if err != nil {
993✔
NEW
3965
                return err
×
NEW
3966
        }
×
3967
        nodePub := pub.SerializeCompressed()
993✔
3968

993✔
3969
        // If the node has the update time set, write it, else write 0.
993✔
3970
        updateUnix := uint64(0)
993✔
3971
        if node.LastUpdate.Unix() > 0 {
1,859✔
3972
                updateUnix = uint64(node.LastUpdate.Unix())
866✔
3973
        }
866✔
3974

3975
        byteOrder.PutUint64(scratch[:8], updateUnix)
993✔
3976
        if _, err := b.Write(scratch[:8]); err != nil {
993✔
NEW
3977
                return err
×
NEW
3978
        }
×
3979

3980
        if _, err := b.Write(nodePub); err != nil {
993✔
NEW
3981
                return err
×
NEW
3982
        }
×
3983

3984
        // If we got a node announcement for this node, we will have the rest
3985
        // of the data available. If not we don't have more data to write.
3986
        if !node.HaveNodeAnnouncement {
1,070✔
3987
                // Write HaveNodeAnnouncement=0.
77✔
3988
                byteOrder.PutUint16(scratch[:2], 0)
77✔
3989
                if _, err := b.Write(scratch[:2]); err != nil {
77✔
NEW
3990
                        return err
×
NEW
3991
                }
×
3992

3993
                return nodeBucket.Put(nodePub, b.Bytes())
77✔
3994
        }
3995

3996
        // Write HaveNodeAnnouncement=1.
3997
        byteOrder.PutUint16(scratch[:2], 1)
919✔
3998
        if _, err := b.Write(scratch[:2]); err != nil {
919✔
NEW
3999
                return err
×
NEW
4000
        }
×
4001

4002
        if err := binary.Write(&b, byteOrder, node.Color.R); err != nil {
919✔
NEW
4003
                return err
×
NEW
4004
        }
×
4005
        if err := binary.Write(&b, byteOrder, node.Color.G); err != nil {
919✔
NEW
4006
                return err
×
NEW
4007
        }
×
4008
        if err := binary.Write(&b, byteOrder, node.Color.B); err != nil {
919✔
NEW
4009
                return err
×
NEW
4010
        }
×
4011

4012
        if err := wire.WriteVarString(&b, 0, node.Alias); err != nil {
919✔
NEW
4013
                return err
×
NEW
4014
        }
×
4015

4016
        if err := node.Features.Encode(&b); err != nil {
919✔
NEW
4017
                return err
×
NEW
4018
        }
×
4019

4020
        numAddresses := uint16(len(node.Addresses))
919✔
4021
        byteOrder.PutUint16(scratch[:2], numAddresses)
919✔
4022
        if _, err := b.Write(scratch[:2]); err != nil {
919✔
NEW
4023
                return err
×
NEW
4024
        }
×
4025

4026
        for _, address := range node.Addresses {
2,066✔
4027
                if err := SerializeAddr(&b, address); err != nil {
1,147✔
NEW
4028
                        return err
×
NEW
4029
                }
×
4030
        }
4031

4032
        sigLen := len(node.AuthSigBytes)
919✔
4033
        if sigLen > 80 {
919✔
NEW
4034
                return fmt.Errorf("max sig len allowed is 80, had %v",
×
NEW
4035
                        sigLen)
×
NEW
4036
        }
×
4037

4038
        err = wire.WriteVarBytes(&b, 0, node.AuthSigBytes)
919✔
4039
        if err != nil {
919✔
NEW
4040
                return err
×
NEW
4041
        }
×
4042

4043
        if len(node.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
919✔
NEW
4044
                return ErrTooManyExtraOpaqueBytes(len(node.ExtraOpaqueData))
×
NEW
4045
        }
×
4046
        err = wire.WriteVarBytes(&b, 0, node.ExtraOpaqueData)
919✔
4047
        if err != nil {
919✔
NEW
4048
                return err
×
NEW
4049
        }
×
4050

4051
        if err := aliasBucket.Put(nodePub, []byte(node.Alias)); err != nil {
919✔
NEW
4052
                return err
×
NEW
4053
        }
×
4054

4055
        // With the alias bucket updated, we'll now update the index that
4056
        // tracks the time series of node updates.
4057
        var indexKey [8 + 33]byte
919✔
4058
        byteOrder.PutUint64(indexKey[:8], updateUnix)
919✔
4059
        copy(indexKey[8:], nodePub)
919✔
4060

919✔
4061
        // If there was already an old index entry for this node, then we'll
919✔
4062
        // delete the old one before we write the new entry.
919✔
4063
        if nodeBytes := nodeBucket.Get(nodePub); nodeBytes != nil {
1,026✔
4064
                // Extract out the old update time to we can reconstruct the
107✔
4065
                // prior index key to delete it from the index.
107✔
4066
                oldUpdateTime := nodeBytes[:8]
107✔
4067

107✔
4068
                var oldIndexKey [8 + 33]byte
107✔
4069
                copy(oldIndexKey[:8], oldUpdateTime)
107✔
4070
                copy(oldIndexKey[8:], nodePub)
107✔
4071

107✔
4072
                if err := updateIndex.Delete(oldIndexKey[:]); err != nil {
107✔
NEW
4073
                        return err
×
NEW
4074
                }
×
4075
        }
4076

4077
        if err := updateIndex.Put(indexKey[:], nil); err != nil {
919✔
NEW
4078
                return err
×
NEW
4079
        }
×
4080

4081
        return nodeBucket.Put(nodePub, b.Bytes())
919✔
4082
}
4083

4084
func fetchLightningNode(nodeBucket kvdb.RBucket,
4085
        nodePub []byte) (models.LightningNode, error) {
3,619✔
4086

3,619✔
4087
        nodeBytes := nodeBucket.Get(nodePub)
3,619✔
4088
        if nodeBytes == nil {
3,695✔
4089
                return models.LightningNode{}, ErrGraphNodeNotFound
76✔
4090
        }
76✔
4091

4092
        nodeReader := bytes.NewReader(nodeBytes)
3,546✔
4093

3,546✔
4094
        return deserializeLightningNode(nodeReader)
3,546✔
4095
}
4096

4097
func deserializeLightningNodeCacheable(r io.Reader) (route.Vertex,
4098
        *lnwire.FeatureVector, error) {
123✔
4099

123✔
4100
        var (
123✔
4101
                pubKey      route.Vertex
123✔
4102
                features    = lnwire.EmptyFeatureVector()
123✔
4103
                nodeScratch [8]byte
123✔
4104
        )
123✔
4105

123✔
4106
        // Skip ahead:
123✔
4107
        // - LastUpdate (8 bytes)
123✔
4108
        if _, err := r.Read(nodeScratch[:]); err != nil {
123✔
NEW
4109
                return pubKey, nil, err
×
NEW
4110
        }
×
4111

4112
        if _, err := io.ReadFull(r, pubKey[:]); err != nil {
123✔
NEW
4113
                return pubKey, nil, err
×
NEW
4114
        }
×
4115

4116
        // Read the node announcement flag.
4117
        if _, err := r.Read(nodeScratch[:2]); err != nil {
123✔
NEW
4118
                return pubKey, nil, err
×
NEW
4119
        }
×
4120
        hasNodeAnn := byteOrder.Uint16(nodeScratch[:2])
123✔
4121

123✔
4122
        // The rest of the data is optional, and will only be there if we got a
123✔
4123
        // node announcement for this node.
123✔
4124
        if hasNodeAnn == 0 {
126✔
4125
                return pubKey, features, nil
3✔
4126
        }
3✔
4127

4128
        // We did get a node announcement for this node, so we'll have the rest
4129
        // of the data available.
4130
        var rgb uint8
123✔
4131
        if err := binary.Read(r, byteOrder, &rgb); err != nil {
123✔
NEW
4132
                return pubKey, nil, err
×
NEW
4133
        }
×
4134
        if err := binary.Read(r, byteOrder, &rgb); err != nil {
123✔
NEW
4135
                return pubKey, nil, err
×
NEW
4136
        }
×
4137
        if err := binary.Read(r, byteOrder, &rgb); err != nil {
123✔
NEW
4138
                return pubKey, nil, err
×
NEW
4139
        }
×
4140

4141
        if _, err := wire.ReadVarString(r, 0); err != nil {
123✔
NEW
4142
                return pubKey, nil, err
×
NEW
4143
        }
×
4144

4145
        if err := features.Decode(r); err != nil {
123✔
NEW
4146
                return pubKey, nil, err
×
NEW
4147
        }
×
4148

4149
        return pubKey, features, nil
123✔
4150
}
4151

4152
func deserializeLightningNode(r io.Reader) (models.LightningNode, error) {
8,505✔
4153
        var (
8,505✔
4154
                node    models.LightningNode
8,505✔
4155
                scratch [8]byte
8,505✔
4156
                err     error
8,505✔
4157
        )
8,505✔
4158

8,505✔
4159
        // Always populate a feature vector, even if we don't have a node
8,505✔
4160
        // announcement and short circuit below.
8,505✔
4161
        node.Features = lnwire.EmptyFeatureVector()
8,505✔
4162

8,505✔
4163
        if _, err := r.Read(scratch[:]); err != nil {
8,505✔
NEW
4164
                return models.LightningNode{}, err
×
NEW
4165
        }
×
4166

4167
        unix := int64(byteOrder.Uint64(scratch[:]))
8,505✔
4168
        node.LastUpdate = time.Unix(unix, 0)
8,505✔
4169

8,505✔
4170
        if _, err := io.ReadFull(r, node.PubKeyBytes[:]); err != nil {
8,505✔
NEW
4171
                return models.LightningNode{}, err
×
NEW
4172
        }
×
4173

4174
        if _, err := r.Read(scratch[:2]); err != nil {
8,505✔
NEW
4175
                return models.LightningNode{}, err
×
NEW
4176
        }
×
4177

4178
        hasNodeAnn := byteOrder.Uint16(scratch[:2])
8,505✔
4179
        if hasNodeAnn == 1 {
16,865✔
4180
                node.HaveNodeAnnouncement = true
8,360✔
4181
        } else {
8,508✔
4182
                node.HaveNodeAnnouncement = false
148✔
4183
        }
148✔
4184

4185
        // The rest of the data is optional, and will only be there if we got a
4186
        // node announcement for this node.
4187
        if !node.HaveNodeAnnouncement {
8,653✔
4188
                return node, nil
148✔
4189
        }
148✔
4190

4191
        // We did get a node announcement for this node, so we'll have the rest
4192
        // of the data available.
4193
        if err := binary.Read(r, byteOrder, &node.Color.R); err != nil {
8,360✔
NEW
4194
                return models.LightningNode{}, err
×
NEW
4195
        }
×
4196
        if err := binary.Read(r, byteOrder, &node.Color.G); err != nil {
8,360✔
NEW
4197
                return models.LightningNode{}, err
×
NEW
4198
        }
×
4199
        if err := binary.Read(r, byteOrder, &node.Color.B); err != nil {
8,360✔
NEW
4200
                return models.LightningNode{}, err
×
NEW
4201
        }
×
4202

4203
        node.Alias, err = wire.ReadVarString(r, 0)
8,360✔
4204
        if err != nil {
8,360✔
NEW
4205
                return models.LightningNode{}, err
×
NEW
4206
        }
×
4207

4208
        err = node.Features.Decode(r)
8,360✔
4209
        if err != nil {
8,360✔
NEW
4210
                return models.LightningNode{}, err
×
NEW
4211
        }
×
4212

4213
        if _, err := r.Read(scratch[:2]); err != nil {
8,360✔
NEW
4214
                return models.LightningNode{}, err
×
NEW
4215
        }
×
4216
        numAddresses := int(byteOrder.Uint16(scratch[:2]))
8,360✔
4217

8,360✔
4218
        var addresses []net.Addr
8,360✔
4219
        for i := 0; i < numAddresses; i++ {
18,948✔
4220
                address, err := DeserializeAddr(r)
10,588✔
4221
                if err != nil {
10,588✔
NEW
4222
                        return models.LightningNode{}, err
×
NEW
4223
                }
×
4224
                addresses = append(addresses, address)
10,588✔
4225
        }
4226
        node.Addresses = addresses
8,360✔
4227

8,360✔
4228
        node.AuthSigBytes, err = wire.ReadVarBytes(r, 0, 80, "sig")
8,360✔
4229
        if err != nil {
8,360✔
NEW
4230
                return models.LightningNode{}, err
×
NEW
4231
        }
×
4232

4233
        // We'll try and see if there are any opaque bytes left, if not, then
4234
        // we'll ignore the EOF error and return the node as is.
4235
        node.ExtraOpaqueData, err = wire.ReadVarBytes(
8,360✔
4236
                r, 0, MaxAllowedExtraOpaqueBytes, "blob",
8,360✔
4237
        )
8,360✔
4238
        switch {
8,360✔
NEW
4239
        case errors.Is(err, io.ErrUnexpectedEOF):
×
NEW
4240
        case errors.Is(err, io.EOF):
×
NEW
4241
        case err != nil:
×
NEW
4242
                return models.LightningNode{}, err
×
4243
        }
4244

4245
        return node, nil
8,360✔
4246
}
4247

4248
func putChanEdgeInfo(edgeIndex kvdb.RwBucket,
4249
        edgeInfo *models.ChannelEdgeInfo, chanID [8]byte) error {
1,490✔
4250

1,490✔
4251
        var b bytes.Buffer
1,490✔
4252

1,490✔
4253
        if _, err := b.Write(edgeInfo.NodeKey1Bytes[:]); err != nil {
1,490✔
NEW
4254
                return err
×
NEW
4255
        }
×
4256
        if _, err := b.Write(edgeInfo.NodeKey2Bytes[:]); err != nil {
1,490✔
NEW
4257
                return err
×
NEW
4258
        }
×
4259
        if _, err := b.Write(edgeInfo.BitcoinKey1Bytes[:]); err != nil {
1,490✔
NEW
4260
                return err
×
NEW
4261
        }
×
4262
        if _, err := b.Write(edgeInfo.BitcoinKey2Bytes[:]); err != nil {
1,490✔
NEW
4263
                return err
×
NEW
4264
        }
×
4265

4266
        if err := wire.WriteVarBytes(&b, 0, edgeInfo.Features); err != nil {
1,490✔
NEW
4267
                return err
×
NEW
4268
        }
×
4269

4270
        authProof := edgeInfo.AuthProof
1,490✔
4271
        var nodeSig1, nodeSig2, bitcoinSig1, bitcoinSig2 []byte
1,490✔
4272
        if authProof != nil {
2,897✔
4273
                nodeSig1 = authProof.NodeSig1Bytes
1,407✔
4274
                nodeSig2 = authProof.NodeSig2Bytes
1,407✔
4275
                bitcoinSig1 = authProof.BitcoinSig1Bytes
1,407✔
4276
                bitcoinSig2 = authProof.BitcoinSig2Bytes
1,407✔
4277
        }
1,407✔
4278

4279
        if err := wire.WriteVarBytes(&b, 0, nodeSig1); err != nil {
1,490✔
NEW
4280
                return err
×
NEW
4281
        }
×
4282
        if err := wire.WriteVarBytes(&b, 0, nodeSig2); err != nil {
1,490✔
NEW
4283
                return err
×
NEW
4284
        }
×
4285
        if err := wire.WriteVarBytes(&b, 0, bitcoinSig1); err != nil {
1,490✔
NEW
4286
                return err
×
NEW
4287
        }
×
4288
        if err := wire.WriteVarBytes(&b, 0, bitcoinSig2); err != nil {
1,490✔
NEW
4289
                return err
×
NEW
4290
        }
×
4291

4292
        if err := WriteOutpoint(&b, &edgeInfo.ChannelPoint); err != nil {
1,490✔
NEW
4293
                return err
×
NEW
4294
        }
×
4295
        err := binary.Write(&b, byteOrder, uint64(edgeInfo.Capacity))
1,490✔
4296
        if err != nil {
1,490✔
NEW
4297
                return err
×
NEW
4298
        }
×
4299
        if _, err := b.Write(chanID[:]); err != nil {
1,490✔
NEW
4300
                return err
×
NEW
4301
        }
×
4302
        if _, err := b.Write(edgeInfo.ChainHash[:]); err != nil {
1,490✔
NEW
4303
                return err
×
NEW
4304
        }
×
4305

4306
        if len(edgeInfo.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
1,490✔
NEW
4307
                return ErrTooManyExtraOpaqueBytes(len(edgeInfo.ExtraOpaqueData))
×
NEW
4308
        }
×
4309
        err = wire.WriteVarBytes(&b, 0, edgeInfo.ExtraOpaqueData)
1,490✔
4310
        if err != nil {
1,490✔
NEW
4311
                return err
×
NEW
4312
        }
×
4313

4314
        return edgeIndex.Put(chanID[:], b.Bytes())
1,490✔
4315
}
4316

4317
func fetchChanEdgeInfo(edgeIndex kvdb.RBucket,
4318
        chanID []byte) (models.ChannelEdgeInfo, error) {
4,183✔
4319

4,183✔
4320
        edgeInfoBytes := edgeIndex.Get(chanID)
4,183✔
4321
        if edgeInfoBytes == nil {
4,274✔
4322
                return models.ChannelEdgeInfo{}, ErrEdgeNotFound
91✔
4323
        }
91✔
4324

4325
        edgeInfoReader := bytes.NewReader(edgeInfoBytes)
4,095✔
4326

4,095✔
4327
        return deserializeChanEdgeInfo(edgeInfoReader)
4,095✔
4328
}
4329

4330
func deserializeChanEdgeInfo(r io.Reader) (models.ChannelEdgeInfo, error) {
4,731✔
4331
        var (
4,731✔
4332
                err      error
4,731✔
4333
                edgeInfo models.ChannelEdgeInfo
4,731✔
4334
        )
4,731✔
4335

4,731✔
4336
        if _, err := io.ReadFull(r, edgeInfo.NodeKey1Bytes[:]); err != nil {
4,731✔
NEW
4337
                return models.ChannelEdgeInfo{}, err
×
NEW
4338
        }
×
4339
        if _, err := io.ReadFull(r, edgeInfo.NodeKey2Bytes[:]); err != nil {
4,731✔
NEW
4340
                return models.ChannelEdgeInfo{}, err
×
NEW
4341
        }
×
4342
        if _, err := io.ReadFull(r, edgeInfo.BitcoinKey1Bytes[:]); err != nil {
4,731✔
NEW
4343
                return models.ChannelEdgeInfo{}, err
×
NEW
4344
        }
×
4345
        if _, err := io.ReadFull(r, edgeInfo.BitcoinKey2Bytes[:]); err != nil {
4,731✔
NEW
4346
                return models.ChannelEdgeInfo{}, err
×
NEW
4347
        }
×
4348

4349
        edgeInfo.Features, err = wire.ReadVarBytes(r, 0, 900, "features")
4,731✔
4350
        if err != nil {
4,731✔
NEW
4351
                return models.ChannelEdgeInfo{}, err
×
NEW
4352
        }
×
4353

4354
        proof := &models.ChannelAuthProof{}
4,731✔
4355

4,731✔
4356
        proof.NodeSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
4,731✔
4357
        if err != nil {
4,731✔
NEW
4358
                return models.ChannelEdgeInfo{}, err
×
NEW
4359
        }
×
4360
        proof.NodeSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
4,731✔
4361
        if err != nil {
4,731✔
NEW
4362
                return models.ChannelEdgeInfo{}, err
×
NEW
4363
        }
×
4364
        proof.BitcoinSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
4,731✔
4365
        if err != nil {
4,731✔
NEW
4366
                return models.ChannelEdgeInfo{}, err
×
NEW
4367
        }
×
4368
        proof.BitcoinSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
4,731✔
4369
        if err != nil {
4,731✔
NEW
4370
                return models.ChannelEdgeInfo{}, err
×
NEW
4371
        }
×
4372

4373
        if !proof.IsEmpty() {
6,513✔
4374
                edgeInfo.AuthProof = proof
1,782✔
4375
        }
1,782✔
4376

4377
        edgeInfo.ChannelPoint = wire.OutPoint{}
4,731✔
4378
        if err := ReadOutpoint(r, &edgeInfo.ChannelPoint); err != nil {
4,731✔
NEW
4379
                return models.ChannelEdgeInfo{}, err
×
NEW
4380
        }
×
4381
        if err := binary.Read(r, byteOrder, &edgeInfo.Capacity); err != nil {
4,731✔
NEW
4382
                return models.ChannelEdgeInfo{}, err
×
NEW
4383
        }
×
4384
        if err := binary.Read(r, byteOrder, &edgeInfo.ChannelID); err != nil {
4,731✔
NEW
4385
                return models.ChannelEdgeInfo{}, err
×
NEW
4386
        }
×
4387

4388
        if _, err := io.ReadFull(r, edgeInfo.ChainHash[:]); err != nil {
4,731✔
NEW
4389
                return models.ChannelEdgeInfo{}, err
×
NEW
4390
        }
×
4391

4392
        // We'll try and see if there are any opaque bytes left, if not, then
4393
        // we'll ignore the EOF error and return the edge as is.
4394
        edgeInfo.ExtraOpaqueData, err = wire.ReadVarBytes(
4,731✔
4395
                r, 0, MaxAllowedExtraOpaqueBytes, "blob",
4,731✔
4396
        )
4,731✔
4397
        switch {
4,731✔
NEW
4398
        case errors.Is(err, io.ErrUnexpectedEOF):
×
NEW
4399
        case errors.Is(err, io.EOF):
×
NEW
4400
        case err != nil:
×
NEW
4401
                return models.ChannelEdgeInfo{}, err
×
4402
        }
4403

4404
        return edgeInfo, nil
4,731✔
4405
}
4406

4407
func putChanEdgePolicy(edges kvdb.RwBucket, edge *models.ChannelEdgePolicy,
4408
        from, to []byte) error {
2,663✔
4409

2,663✔
4410
        var edgeKey [33 + 8]byte
2,663✔
4411
        copy(edgeKey[:], from)
2,663✔
4412
        byteOrder.PutUint64(edgeKey[33:], edge.ChannelID)
2,663✔
4413

2,663✔
4414
        var b bytes.Buffer
2,663✔
4415
        if err := serializeChanEdgePolicy(&b, edge, to); err != nil {
2,663✔
NEW
4416
                return err
×
NEW
4417
        }
×
4418

4419
        // Before we write out the new edge, we'll create a new entry in the
4420
        // update index in order to keep it fresh.
4421
        updateUnix := uint64(edge.LastUpdate.Unix())
2,663✔
4422
        var indexKey [8 + 8]byte
2,663✔
4423
        byteOrder.PutUint64(indexKey[:8], updateUnix)
2,663✔
4424
        byteOrder.PutUint64(indexKey[8:], edge.ChannelID)
2,663✔
4425

2,663✔
4426
        updateIndex, err := edges.CreateBucketIfNotExists(edgeUpdateIndexBucket)
2,663✔
4427
        if err != nil {
2,663✔
NEW
4428
                return err
×
NEW
4429
        }
×
4430

4431
        // If there was already an entry for this edge, then we'll need to
4432
        // delete the old one to ensure we don't leave around any after-images.
4433
        // An unknown policy value does not have a update time recorded, so
4434
        // it also does not need to be removed.
4435
        if edgeBytes := edges.Get(edgeKey[:]); edgeBytes != nil &&
2,663✔
4436
                !bytes.Equal(edgeBytes, unknownPolicy) {
2,690✔
4437

27✔
4438
                // In order to delete the old entry, we'll need to obtain the
27✔
4439
                // *prior* update time in order to delete it. To do this, we'll
27✔
4440
                // need to deserialize the existing policy within the database
27✔
4441
                // (now outdated by the new one), and delete its corresponding
27✔
4442
                // entry within the update index. We'll ignore any
27✔
4443
                // ErrEdgePolicyOptionalFieldNotFound error, as we only need
27✔
4444
                // the channel ID and update time to delete the entry.
27✔
4445
                // TODO(halseth): get rid of these invalid policies in a
27✔
4446
                // migration.
27✔
4447
                oldEdgePolicy, err := deserializeChanEdgePolicy(
27✔
4448
                        bytes.NewReader(edgeBytes),
27✔
4449
                )
27✔
4450
                if err != nil &&
27✔
4451
                        !errors.Is(err, ErrEdgePolicyOptionalFieldNotFound) {
27✔
NEW
4452

×
NEW
4453
                        return err
×
NEW
4454
                }
×
4455

4456
                oldUpdateTime := uint64(oldEdgePolicy.LastUpdate.Unix())
27✔
4457

27✔
4458
                var oldIndexKey [8 + 8]byte
27✔
4459
                byteOrder.PutUint64(oldIndexKey[:8], oldUpdateTime)
27✔
4460
                byteOrder.PutUint64(oldIndexKey[8:], edge.ChannelID)
27✔
4461

27✔
4462
                if err := updateIndex.Delete(oldIndexKey[:]); err != nil {
27✔
NEW
4463
                        return err
×
NEW
4464
                }
×
4465
        }
4466

4467
        if err := updateIndex.Put(indexKey[:], nil); err != nil {
2,663✔
NEW
4468
                return err
×
NEW
4469
        }
×
4470

4471
        err = updateEdgePolicyDisabledIndex(
2,663✔
4472
                edges, edge.ChannelID,
2,663✔
4473
                edge.ChannelFlags&lnwire.ChanUpdateDirection > 0,
2,663✔
4474
                edge.IsDisabled(),
2,663✔
4475
        )
2,663✔
4476
        if err != nil {
2,663✔
NEW
4477
                return err
×
NEW
4478
        }
×
4479

4480
        return edges.Put(edgeKey[:], b.Bytes())
2,663✔
4481
}
4482

4483
// updateEdgePolicyDisabledIndex is used to update the disabledEdgePolicyIndex
4484
// bucket by either add a new disabled ChannelEdgePolicy or remove an existing
4485
// one.
4486
// The direction represents the direction of the edge and disabled is used for
4487
// deciding whether to remove or add an entry to the bucket.
4488
// In general a channel is disabled if two entries for the same chanID exist
4489
// in this bucket.
4490
// Maintaining the bucket this way allows a fast retrieval of disabled
4491
// channels, for example when prune is needed.
4492
func updateEdgePolicyDisabledIndex(edges kvdb.RwBucket, chanID uint64,
4493
        direction bool, disabled bool) error {
2,947✔
4494

2,947✔
4495
        var disabledEdgeKey [8 + 1]byte
2,947✔
4496
        byteOrder.PutUint64(disabledEdgeKey[0:], chanID)
2,947✔
4497
        if direction {
4,418✔
4498
                disabledEdgeKey[8] = 1
1,471✔
4499
        }
1,471✔
4500

4501
        disabledEdgePolicyIndex, err := edges.CreateBucketIfNotExists(
2,947✔
4502
                disabledEdgePolicyBucket,
2,947✔
4503
        )
2,947✔
4504
        if err != nil {
2,947✔
NEW
4505
                return err
×
NEW
4506
        }
×
4507

4508
        if disabled {
2,976✔
4509
                return disabledEdgePolicyIndex.Put(disabledEdgeKey[:], []byte{})
29✔
4510
        }
29✔
4511

4512
        return disabledEdgePolicyIndex.Delete(disabledEdgeKey[:])
2,921✔
4513
}
4514

4515
// putChanEdgePolicyUnknown marks the edge policy as unknown
4516
// in the edges bucket.
4517
func putChanEdgePolicyUnknown(edges kvdb.RwBucket, channelID uint64,
4518
        from []byte) error {
2,975✔
4519

2,975✔
4520
        var edgeKey [33 + 8]byte
2,975✔
4521
        copy(edgeKey[:], from)
2,975✔
4522
        byteOrder.PutUint64(edgeKey[33:], channelID)
2,975✔
4523

2,975✔
4524
        if edges.Get(edgeKey[:]) != nil {
2,975✔
NEW
4525
                return fmt.Errorf("cannot write unknown policy for channel %v "+
×
NEW
4526
                        " when there is already a policy present", channelID)
×
NEW
4527
        }
×
4528

4529
        return edges.Put(edgeKey[:], unknownPolicy)
2,975✔
4530
}
4531

4532
func fetchChanEdgePolicy(edges kvdb.RBucket, chanID []byte,
4533
        nodePub []byte) (*models.ChannelEdgePolicy, error) {
8,165✔
4534

8,165✔
4535
        var edgeKey [33 + 8]byte
8,165✔
4536
        copy(edgeKey[:], nodePub)
8,165✔
4537
        copy(edgeKey[33:], chanID)
8,165✔
4538

8,165✔
4539
        edgeBytes := edges.Get(edgeKey[:])
8,165✔
4540
        if edgeBytes == nil {
8,165✔
NEW
4541
                return nil, ErrEdgeNotFound
×
NEW
4542
        }
×
4543

4544
        // No need to deserialize unknown policy.
4545
        if bytes.Equal(edgeBytes, unknownPolicy) {
8,533✔
4546
                return nil, nil
368✔
4547
        }
368✔
4548

4549
        edgeReader := bytes.NewReader(edgeBytes)
7,800✔
4550

7,800✔
4551
        ep, err := deserializeChanEdgePolicy(edgeReader)
7,800✔
4552
        switch {
7,800✔
4553
        // If the db policy was missing an expected optional field, we return
4554
        // nil as if the policy was unknown.
4555
        case errors.Is(err, ErrEdgePolicyOptionalFieldNotFound):
1✔
4556
                return nil, nil
1✔
4557

NEW
4558
        case err != nil:
×
NEW
4559
                return nil, err
×
4560
        }
4561

4562
        return ep, nil
7,799✔
4563
}
4564

4565
func fetchChanEdgePolicies(edgeIndex kvdb.RBucket, edges kvdb.RBucket,
4566
        chanID []byte) (*models.ChannelEdgePolicy, *models.ChannelEdgePolicy,
4567
        error) {
244✔
4568

244✔
4569
        edgeInfo := edgeIndex.Get(chanID)
244✔
4570
        if edgeInfo == nil {
244✔
NEW
4571
                return nil, nil, fmt.Errorf("%w: chanID=%x", ErrEdgeNotFound,
×
NEW
4572
                        chanID)
×
NEW
4573
        }
×
4574

4575
        // The first node is contained within the first half of the edge
4576
        // information. We only propagate the error here and below if it's
4577
        // something other than edge non-existence.
4578
        node1Pub := edgeInfo[:33]
244✔
4579
        edge1, err := fetchChanEdgePolicy(edges, chanID, node1Pub)
244✔
4580
        if err != nil {
244✔
NEW
4581
                return nil, nil, fmt.Errorf("%w: node1Pub=%x", ErrEdgeNotFound,
×
NEW
4582
                        node1Pub)
×
NEW
4583
        }
×
4584

4585
        // Similarly, the second node is contained within the latter
4586
        // half of the edge information.
4587
        node2Pub := edgeInfo[33:66]
244✔
4588
        edge2, err := fetchChanEdgePolicy(edges, chanID, node2Pub)
244✔
4589
        if err != nil {
244✔
NEW
4590
                return nil, nil, fmt.Errorf("%w: node2Pub=%x", ErrEdgeNotFound,
×
NEW
4591
                        node2Pub)
×
NEW
4592
        }
×
4593

4594
        return edge1, edge2, nil
244✔
4595
}
4596

4597
func serializeChanEdgePolicy(w io.Writer, edge *models.ChannelEdgePolicy,
4598
        to []byte) error {
2,665✔
4599

2,665✔
4600
        err := wire.WriteVarBytes(w, 0, edge.SigBytes)
2,665✔
4601
        if err != nil {
2,665✔
NEW
4602
                return err
×
NEW
4603
        }
×
4604

4605
        if err := binary.Write(w, byteOrder, edge.ChannelID); err != nil {
2,665✔
NEW
4606
                return err
×
NEW
4607
        }
×
4608

4609
        var scratch [8]byte
2,665✔
4610
        updateUnix := uint64(edge.LastUpdate.Unix())
2,665✔
4611
        byteOrder.PutUint64(scratch[:], updateUnix)
2,665✔
4612
        if _, err := w.Write(scratch[:]); err != nil {
2,665✔
NEW
4613
                return err
×
NEW
4614
        }
×
4615

4616
        if err := binary.Write(w, byteOrder, edge.MessageFlags); err != nil {
2,665✔
NEW
4617
                return err
×
NEW
4618
        }
×
4619
        if err := binary.Write(w, byteOrder, edge.ChannelFlags); err != nil {
2,665✔
NEW
4620
                return err
×
NEW
4621
        }
×
4622
        if err := binary.Write(w, byteOrder, edge.TimeLockDelta); err != nil {
2,665✔
NEW
4623
                return err
×
NEW
4624
        }
×
4625
        if err := binary.Write(w, byteOrder, uint64(edge.MinHTLC)); err != nil {
2,665✔
NEW
4626
                return err
×
NEW
4627
        }
×
4628
        err = binary.Write(w, byteOrder, uint64(edge.FeeBaseMSat))
2,665✔
4629
        if err != nil {
2,665✔
NEW
4630
                return err
×
NEW
4631
        }
×
4632
        err = binary.Write(
2,665✔
4633
                w, byteOrder, uint64(edge.FeeProportionalMillionths),
2,665✔
4634
        )
2,665✔
4635
        if err != nil {
2,665✔
NEW
4636
                return err
×
NEW
4637
        }
×
4638

4639
        if _, err := w.Write(to); err != nil {
2,665✔
NEW
4640
                return err
×
NEW
4641
        }
×
4642

4643
        // If the max_htlc field is present, we write it. To be compatible with
4644
        // older versions that wasn't aware of this field, we write it as part
4645
        // of the opaque data.
4646
        // TODO(halseth): clean up when moving to TLV.
4647
        var opaqueBuf bytes.Buffer
2,665✔
4648
        if edge.MessageFlags.HasMaxHtlc() {
4,946✔
4649
                err := binary.Write(&opaqueBuf, byteOrder, uint64(edge.MaxHTLC))
2,281✔
4650
                if err != nil {
2,281✔
NEW
4651
                        return err
×
NEW
4652
                }
×
4653
        }
4654

4655
        if len(edge.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
2,665✔
NEW
4656
                return ErrTooManyExtraOpaqueBytes(len(edge.ExtraOpaqueData))
×
NEW
4657
        }
×
4658
        if _, err := opaqueBuf.Write(edge.ExtraOpaqueData); err != nil {
2,665✔
NEW
4659
                return err
×
NEW
4660
        }
×
4661

4662
        if err := wire.WriteVarBytes(w, 0, opaqueBuf.Bytes()); err != nil {
2,665✔
NEW
4663
                return err
×
NEW
4664
        }
×
4665

4666
        return nil
2,665✔
4667
}
4668

4669
func deserializeChanEdgePolicy(r io.Reader) (*models.ChannelEdgePolicy, error) {
7,825✔
4670
        // Deserialize the policy. Note that in case an optional field is not
7,825✔
4671
        // found, both an error and a populated policy object are returned.
7,825✔
4672
        edge, deserializeErr := deserializeChanEdgePolicyRaw(r)
7,825✔
4673
        if deserializeErr != nil &&
7,825✔
4674
                !errors.Is(deserializeErr, ErrEdgePolicyOptionalFieldNotFound) {
7,825✔
NEW
4675

×
NEW
4676
                return nil, deserializeErr
×
NEW
4677
        }
×
4678

4679
        return edge, deserializeErr
7,825✔
4680
}
4681

4682
func deserializeChanEdgePolicyRaw(r io.Reader) (*models.ChannelEdgePolicy,
4683
        error) {
8,832✔
4684

8,832✔
4685
        edge := &models.ChannelEdgePolicy{}
8,832✔
4686

8,832✔
4687
        var err error
8,832✔
4688
        edge.SigBytes, err = wire.ReadVarBytes(r, 0, 80, "sig")
8,832✔
4689
        if err != nil {
8,832✔
NEW
4690
                return nil, err
×
NEW
4691
        }
×
4692

4693
        if err := binary.Read(r, byteOrder, &edge.ChannelID); err != nil {
8,832✔
NEW
4694
                return nil, err
×
NEW
4695
        }
×
4696

4697
        var scratch [8]byte
8,832✔
4698
        if _, err := r.Read(scratch[:]); err != nil {
8,832✔
NEW
4699
                return nil, err
×
NEW
4700
        }
×
4701
        unix := int64(byteOrder.Uint64(scratch[:]))
8,832✔
4702
        edge.LastUpdate = time.Unix(unix, 0)
8,832✔
4703

8,832✔
4704
        if err := binary.Read(r, byteOrder, &edge.MessageFlags); err != nil {
8,832✔
NEW
4705
                return nil, err
×
NEW
4706
        }
×
4707
        if err := binary.Read(r, byteOrder, &edge.ChannelFlags); err != nil {
8,832✔
NEW
4708
                return nil, err
×
NEW
4709
        }
×
4710
        if err := binary.Read(r, byteOrder, &edge.TimeLockDelta); err != nil {
8,832✔
NEW
4711
                return nil, err
×
NEW
4712
        }
×
4713

4714
        var n uint64
8,832✔
4715
        if err := binary.Read(r, byteOrder, &n); err != nil {
8,832✔
NEW
4716
                return nil, err
×
NEW
4717
        }
×
4718
        edge.MinHTLC = lnwire.MilliSatoshi(n)
8,832✔
4719

8,832✔
4720
        if err := binary.Read(r, byteOrder, &n); err != nil {
8,832✔
NEW
4721
                return nil, err
×
NEW
4722
        }
×
4723
        edge.FeeBaseMSat = lnwire.MilliSatoshi(n)
8,832✔
4724

8,832✔
4725
        if err := binary.Read(r, byteOrder, &n); err != nil {
8,832✔
NEW
4726
                return nil, err
×
NEW
4727
        }
×
4728
        edge.FeeProportionalMillionths = lnwire.MilliSatoshi(n)
8,832✔
4729

8,832✔
4730
        if _, err := r.Read(edge.ToNode[:]); err != nil {
8,832✔
NEW
4731
                return nil, err
×
NEW
4732
        }
×
4733

4734
        // We'll try and see if there are any opaque bytes left, if not, then
4735
        // we'll ignore the EOF error and return the edge as is.
4736
        edge.ExtraOpaqueData, err = wire.ReadVarBytes(
8,832✔
4737
                r, 0, MaxAllowedExtraOpaqueBytes, "blob",
8,832✔
4738
        )
8,832✔
4739
        switch {
8,832✔
NEW
4740
        case errors.Is(err, io.ErrUnexpectedEOF):
×
4741
        case errors.Is(err, io.EOF):
3✔
NEW
4742
        case err != nil:
×
NEW
4743
                return nil, err
×
4744
        }
4745

4746
        // See if optional fields are present.
4747
        if edge.MessageFlags.HasMaxHtlc() {
17,290✔
4748
                // The max_htlc field should be at the beginning of the opaque
8,458✔
4749
                // bytes.
8,458✔
4750
                opq := edge.ExtraOpaqueData
8,458✔
4751

8,458✔
4752
                // If the max_htlc field is not present, it might be old data
8,458✔
4753
                // stored before this field was validated. We'll return the
8,458✔
4754
                // edge along with an error.
8,458✔
4755
                if len(opq) < 8 {
8,461✔
4756
                        return edge, ErrEdgePolicyOptionalFieldNotFound
3✔
4757
                }
3✔
4758

4759
                maxHtlc := byteOrder.Uint64(opq[:8])
8,455✔
4760
                edge.MaxHTLC = lnwire.MilliSatoshi(maxHtlc)
8,455✔
4761

8,455✔
4762
                // Exclude the parsed field from the rest of the opaque data.
8,455✔
4763
                edge.ExtraOpaqueData = opq[8:]
8,455✔
4764
        }
4765

4766
        return edge, nil
8,829✔
4767
}
4768

4769
// chanGraphNodeTx is an implementation of the NodeRTx interface backed by the
4770
// KVStore and a kvdb.RTx.
4771
type chanGraphNodeTx struct {
4772
        tx   kvdb.RTx
4773
        db   *KVStore
4774
        node *models.LightningNode
4775
}
4776

4777
// A compile-time constraint to ensure chanGraphNodeTx implements the NodeRTx
4778
// interface.
4779
var _ NodeRTx = (*chanGraphNodeTx)(nil)
4780

4781
func newChanGraphNodeTx(tx kvdb.RTx, db *KVStore,
4782
        node *models.LightningNode) *chanGraphNodeTx {
3,917✔
4783

3,917✔
4784
        return &chanGraphNodeTx{
3,917✔
4785
                tx:   tx,
3,917✔
4786
                db:   db,
3,917✔
4787
                node: node,
3,917✔
4788
        }
3,917✔
4789
}
3,917✔
4790

4791
// Node returns the raw information of the node.
4792
//
4793
// NOTE: This is a part of the NodeRTx interface.
4794
func (c *chanGraphNodeTx) Node() *models.LightningNode {
4,842✔
4795
        return c.node
4,842✔
4796
}
4,842✔
4797

4798
// FetchNode fetches the node with the given pub key under the same transaction
4799
// used to fetch the current node. The returned node is also a NodeRTx and any
4800
// operations on that NodeRTx will also be done under the same transaction.
4801
//
4802
// NOTE: This is a part of the NodeRTx interface.
4803
func (c *chanGraphNodeTx) FetchNode(nodePub route.Vertex) (NodeRTx, error) {
2,944✔
4804
        node, err := c.db.FetchLightningNodeTx(c.tx, nodePub)
2,944✔
4805
        if err != nil {
2,944✔
NEW
4806
                return nil, err
×
NEW
4807
        }
×
4808

4809
        return newChanGraphNodeTx(c.tx, c.db, node), nil
2,944✔
4810
}
4811

4812
// ForEachChannel can be used to iterate over the node's channels under
4813
// the same transaction used to fetch the node.
4814
//
4815
// NOTE: This is a part of the NodeRTx interface.
4816
func (c *chanGraphNodeTx) ForEachChannel(f func(*models.ChannelEdgeInfo,
4817
        *models.ChannelEdgePolicy, *models.ChannelEdgePolicy) error) error {
965✔
4818

965✔
4819
        return c.db.ForEachNodeChannelTx(c.tx, c.node.PubKeyBytes,
965✔
4820
                func(_ kvdb.RTx, info *models.ChannelEdgeInfo, policy1,
965✔
4821
                        policy2 *models.ChannelEdgePolicy) error {
3,909✔
4822

2,944✔
4823
                        return f(info, policy1, policy2)
2,944✔
4824
                },
2,944✔
4825
        )
4826
}
4827

4828
// MakeTestGraph creates a new instance of the KVStore for testing
4829
// purposes.
4830
func MakeTestGraph(t testing.TB, modifiers ...OptionModifier) (*ChannelGraph,
4831
        error) {
40✔
4832

40✔
4833
        opts := DefaultOptions()
40✔
4834
        for _, modifier := range modifiers {
40✔
NEW
4835
                modifier(opts)
×
NEW
4836
        }
×
4837

4838
        // Next, create KVStore for the first time.
4839
        backend, backendCleanup, err := kvdb.GetTestBackend(t.TempDir(), "cgr")
40✔
4840
        if err != nil {
40✔
NEW
4841
                backendCleanup()
×
NEW
4842

×
NEW
4843
                return nil, err
×
NEW
4844
        }
×
4845

4846
        graph, err := NewChannelGraph(backend)
40✔
4847
        if err != nil {
40✔
NEW
4848
                backendCleanup()
×
NEW
4849

×
NEW
4850
                return nil, err
×
NEW
4851
        }
×
4852

4853
        t.Cleanup(func() {
80✔
4854
                _ = backend.Close()
40✔
4855
                backendCleanup()
40✔
4856
        })
40✔
4857

4858
        return graph, nil
40✔
4859
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc