• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 13522725226

25 Feb 2025 01:46PM UTC coverage: 58.83% (+0.02%) from 58.815%
13522725226

Pull #9551

github

ellemouton
graph/db: move cache writes for Prune methods

This commit moves the cache writes for PruneGraphNodes and PruneGraph
from the KVStore to the ChannelGraph.
Pull Request #9551: graph: extract cache from CRUD [4]

100 of 116 new or added lines in 2 files covered. (86.21%)

707 existing lines in 14 files now uncovered.

136401 of 231858 relevant lines covered (58.83%)

19263.57 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

76.09
/graph/db/kv_store.go
1
package graphdb
2

3
import (
4
        "bytes"
5
        "crypto/sha256"
6
        "encoding/binary"
7
        "errors"
8
        "fmt"
9
        "io"
10
        "math"
11
        "net"
12
        "sort"
13
        "sync"
14
        "testing"
15
        "time"
16

17
        "github.com/btcsuite/btcd/btcec/v2"
18
        "github.com/btcsuite/btcd/chaincfg/chainhash"
19
        "github.com/btcsuite/btcd/txscript"
20
        "github.com/btcsuite/btcd/wire"
21
        "github.com/btcsuite/btcwallet/walletdb"
22
        "github.com/lightningnetwork/lnd/aliasmgr"
23
        "github.com/lightningnetwork/lnd/batch"
24
        "github.com/lightningnetwork/lnd/graph/db/models"
25
        "github.com/lightningnetwork/lnd/input"
26
        "github.com/lightningnetwork/lnd/kvdb"
27
        "github.com/lightningnetwork/lnd/lnwire"
28
        "github.com/lightningnetwork/lnd/routing/route"
29
)
30

31
var (
32
        // nodeBucket is a bucket which houses all the vertices or nodes within
33
        // the channel graph. This bucket has a single-sub bucket which adds an
34
        // additional index from pubkey -> alias. Within the top-level of this
35
        // bucket, the key space maps a node's compressed public key to the
36
        // serialized information for that node. Additionally, there's a
37
        // special key "source" which stores the pubkey of the source node. The
38
        // source node is used as the starting point for all graph/queries and
39
        // traversals. The graph is formed as a star-graph with the source node
40
        // at the center.
41
        //
42
        // maps: pubKey -> nodeInfo
43
        // maps: source -> selfPubKey
44
        nodeBucket = []byte("graph-node")
45

46
        // nodeUpdateIndexBucket is a sub-bucket of the nodeBucket. This bucket
47
        // will be used to quickly look up the "freshness" of a node's last
48
        // update to the network. The bucket only contains keys, and no values,
49
        // it's mapping:
50
        //
51
        // maps: updateTime || nodeID -> nil
52
        nodeUpdateIndexBucket = []byte("graph-node-update-index")
53

54
        // sourceKey is a special key that resides within the nodeBucket. The
55
        // sourceKey maps a key to the public key of the "self node".
56
        sourceKey = []byte("source")
57

58
        // aliasIndexBucket is a sub-bucket that's nested within the main
59
        // nodeBucket. This bucket maps the public key of a node to its
60
        // current alias. This bucket is provided as it can be used within a
61
        // future UI layer to add an additional degree of confirmation.
62
        aliasIndexBucket = []byte("alias")
63

64
        // edgeBucket is a bucket which houses all of the edge or channel
65
        // information within the channel graph. This bucket essentially acts
66
        // as an adjacency list, which in conjunction with a range scan, can be
67
        // used to iterate over all the incoming and outgoing edges for a
68
        // particular node. Key in the bucket use a prefix scheme which leads
69
        // with the node's public key and sends with the compact edge ID.
70
        // For each chanID, there will be two entries within the bucket, as the
71
        // graph is directed: nodes may have different policies w.r.t to fees
72
        // for their respective directions.
73
        //
74
        // maps: pubKey || chanID -> channel edge policy for node
75
        edgeBucket = []byte("graph-edge")
76

77
        // unknownPolicy is represented as an empty slice. It is
78
        // used as the value in edgeBucket for unknown channel edge policies.
79
        // Unknown policies are still stored in the database to enable efficient
80
        // lookup of incoming channel edges.
81
        unknownPolicy = []byte{}
82

83
        // chanStart is an array of all zero bytes which is used to perform
84
        // range scans within the edgeBucket to obtain all of the outgoing
85
        // edges for a particular node.
86
        chanStart [8]byte
87

88
        // edgeIndexBucket is an index which can be used to iterate all edges
89
        // in the bucket, grouping them according to their in/out nodes.
90
        // Additionally, the items in this bucket also contain the complete
91
        // edge information for a channel. The edge information includes the
92
        // capacity of the channel, the nodes that made the channel, etc. This
93
        // bucket resides within the edgeBucket above. Creation of an edge
94
        // proceeds in two phases: first the edge is added to the edge index,
95
        // afterwards the edgeBucket can be updated with the latest details of
96
        // the edge as they are announced on the network.
97
        //
98
        // maps: chanID -> pubKey1 || pubKey2 || restofEdgeInfo
99
        edgeIndexBucket = []byte("edge-index")
100

101
        // edgeUpdateIndexBucket is a sub-bucket of the main edgeBucket. This
102
        // bucket contains an index which allows us to gauge the "freshness" of
103
        // a channel's last updates.
104
        //
105
        // maps: updateTime || chanID -> nil
106
        edgeUpdateIndexBucket = []byte("edge-update-index")
107

108
        // channelPointBucket maps a channel's full outpoint (txid:index) to
109
        // its short 8-byte channel ID. This bucket resides within the
110
        // edgeBucket above, and can be used to quickly remove an edge due to
111
        // the outpoint being spent, or to query for existence of a channel.
112
        //
113
        // maps: outPoint -> chanID
114
        channelPointBucket = []byte("chan-index")
115

116
        // zombieBucket is a sub-bucket of the main edgeBucket bucket
117
        // responsible for maintaining an index of zombie channels. Each entry
118
        // exists within the bucket as follows:
119
        //
120
        // maps: chanID -> pubKey1 || pubKey2
121
        //
122
        // The chanID represents the channel ID of the edge that is marked as a
123
        // zombie and is used as the key, which maps to the public keys of the
124
        // edge's participants.
125
        zombieBucket = []byte("zombie-index")
126

127
        // disabledEdgePolicyBucket is a sub-bucket of the main edgeBucket
128
        // bucket responsible for maintaining an index of disabled edge
129
        // policies. Each entry exists within the bucket as follows:
130
        //
131
        // maps: <chanID><direction> -> []byte{}
132
        //
133
        // The chanID represents the channel ID of the edge and the direction is
134
        // one byte representing the direction of the edge. The main purpose of
135
        // this index is to allow pruning disabled channels in a fast way
136
        // without the need to iterate all over the graph.
137
        disabledEdgePolicyBucket = []byte("disabled-edge-policy-index")
138

139
        // graphMetaBucket is a top-level bucket which stores various meta-deta
140
        // related to the on-disk channel graph. Data stored in this bucket
141
        // includes the block to which the graph has been synced to, the total
142
        // number of channels, etc.
143
        graphMetaBucket = []byte("graph-meta")
144

145
        // pruneLogBucket is a bucket within the graphMetaBucket that stores
146
        // a mapping from the block height to the hash for the blocks used to
147
        // prune the graph.
148
        // Once a new block is discovered, any channels that have been closed
149
        // (by spending the outpoint) can safely be removed from the graph, and
150
        // the block is added to the prune log. We need to keep such a log for
151
        // the case where a reorg happens, and we must "rewind" the state of the
152
        // graph by removing channels that were previously confirmed. In such a
153
        // case we'll remove all entries from the prune log with a block height
154
        // that no longer exists.
155
        pruneLogBucket = []byte("prune-log")
156

157
        // closedScidBucket is a top-level bucket that stores scids for
158
        // channels that we know to be closed. This is used so that we don't
159
        // need to perform expensive validation checks if we receive a channel
160
        // announcement for the channel again.
161
        //
162
        // maps: scid -> []byte{}
163
        closedScidBucket = []byte("closed-scid")
164
)
165

166
const (
167
        // MaxAllowedExtraOpaqueBytes is the largest amount of opaque bytes that
168
        // we'll permit to be written to disk. We limit this as otherwise, it
169
        // would be possible for a node to create a ton of updates and slowly
170
        // fill our disk, and also waste bandwidth due to relaying.
171
        MaxAllowedExtraOpaqueBytes = 10000
172
)
173

174
// KVStore is a persistent, on-disk graph representation of the Lightning
175
// Network. This struct can be used to implement path finding algorithms on top
176
// of, and also to update a node's view based on information received from the
177
// p2p network. Internally, the graph is stored using a modified adjacency list
178
// representation with some added object interaction possible with each
179
// serialized edge/node. The graph is stored is directed, meaning that are two
180
// edges stored for each channel: an inbound/outbound edge for each node pair.
181
// Nodes, edges, and edge information can all be added to the graph
182
// independently. Edge removal results in the deletion of all edge information
183
// for that edge.
184
type KVStore struct {
185
        db kvdb.Backend
186

187
        // cacheMu guards all caches (rejectCache, chanCache, graphCache). If
188
        // this mutex will be acquired at the same time as the DB mutex then
189
        // the cacheMu MUST be acquired first to prevent deadlock.
190
        cacheMu     sync.RWMutex
191
        rejectCache *rejectCache
192
        chanCache   *channelCache
193
        graphCache  *GraphCache
194

195
        chanScheduler batch.Scheduler
196
        nodeScheduler batch.Scheduler
197
}
198

199
// NewKVStore allocates a new KVStore backed by a DB instance. The
200
// returned instance has its own unique reject cache and channel cache.
201
func NewKVStore(db kvdb.Backend, options ...KVStoreOptionModifier) (*KVStore,
202
        error) {
176✔
203

176✔
204
        opts := DefaultOptions()
176✔
205
        for _, o := range options {
179✔
206
                o(opts)
3✔
207
        }
3✔
208

209
        if !opts.NoMigration {
352✔
210
                if err := initKVStore(db); err != nil {
176✔
211
                        return nil, err
×
212
                }
×
213
        }
214

215
        g := &KVStore{
176✔
216
                db:          db,
176✔
217
                rejectCache: newRejectCache(opts.RejectCacheSize),
176✔
218
                chanCache:   newChannelCache(opts.ChannelCacheSize),
176✔
219
        }
176✔
220
        g.chanScheduler = batch.NewTimeScheduler(
176✔
221
                db, &g.cacheMu, opts.BatchCommitInterval,
176✔
222
        )
176✔
223
        g.nodeScheduler = batch.NewTimeScheduler(
176✔
224
                db, nil, opts.BatchCommitInterval,
176✔
225
        )
176✔
226

176✔
227
        return g, nil
176✔
228
}
229

230
// setGraphCache sets the KVStore's graphCache.
231
//
232
// NOTE: this is temporary and will only be called from the ChannelGraph's
233
// constructor before the KVStore methods are available to be called. This will
234
// be removed once the graph cache is fully owned by the ChannelGraph.
235
func (c *KVStore) setGraphCache(cache *GraphCache) {
143✔
236
        c.graphCache = cache
143✔
237
}
143✔
238

239
// channelMapKey is the key structure used for storing channel edge policies.
240
type channelMapKey struct {
241
        nodeKey route.Vertex
242
        chanID  [8]byte
243
}
244

245
// getChannelMap loads all channel edge policies from the database and stores
246
// them in a map.
247
func (c *KVStore) getChannelMap(edges kvdb.RBucket) (
248
        map[channelMapKey]*models.ChannelEdgePolicy, error) {
147✔
249

147✔
250
        // Create a map to store all channel edge policies.
147✔
251
        channelMap := make(map[channelMapKey]*models.ChannelEdgePolicy)
147✔
252

147✔
253
        err := kvdb.ForAll(edges, func(k, edgeBytes []byte) error {
1,721✔
254
                // Skip embedded buckets.
1,574✔
255
                if bytes.Equal(k, edgeIndexBucket) ||
1,574✔
256
                        bytes.Equal(k, edgeUpdateIndexBucket) ||
1,574✔
257
                        bytes.Equal(k, zombieBucket) ||
1,574✔
258
                        bytes.Equal(k, disabledEdgePolicyBucket) ||
1,574✔
259
                        bytes.Equal(k, channelPointBucket) {
2,158✔
260

584✔
261
                        return nil
584✔
262
                }
584✔
263

264
                // Validate key length.
265
                if len(k) != 33+8 {
993✔
266
                        return fmt.Errorf("invalid edge key %x encountered", k)
×
267
                }
×
268

269
                var key channelMapKey
993✔
270
                copy(key.nodeKey[:], k[:33])
993✔
271
                copy(key.chanID[:], k[33:])
993✔
272

993✔
273
                // No need to deserialize unknown policy.
993✔
274
                if bytes.Equal(edgeBytes, unknownPolicy) {
993✔
275
                        return nil
×
276
                }
×
277

278
                edgeReader := bytes.NewReader(edgeBytes)
993✔
279
                edge, err := deserializeChanEdgePolicyRaw(
993✔
280
                        edgeReader,
993✔
281
                )
993✔
282

993✔
283
                switch {
993✔
284
                // If the db policy was missing an expected optional field, we
285
                // return nil as if the policy was unknown.
286
                case errors.Is(err, ErrEdgePolicyOptionalFieldNotFound):
×
287
                        return nil
×
288

289
                case err != nil:
×
290
                        return err
×
291
                }
292

293
                channelMap[key] = edge
993✔
294

993✔
295
                return nil
993✔
296
        })
297
        if err != nil {
147✔
298
                return nil, err
×
299
        }
×
300

301
        return channelMap, nil
147✔
302
}
303

304
var graphTopLevelBuckets = [][]byte{
305
        nodeBucket,
306
        edgeBucket,
307
        graphMetaBucket,
308
        closedScidBucket,
309
}
310

311
// Wipe completely deletes all saved state within all used buckets within the
312
// database. The deletion is done in a single transaction, therefore this
313
// operation is fully atomic.
314
func (c *KVStore) Wipe() error {
×
315
        err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
×
316
                for _, tlb := range graphTopLevelBuckets {
×
317
                        err := tx.DeleteTopLevelBucket(tlb)
×
318
                        if err != nil &&
×
319
                                !errors.Is(err, kvdb.ErrBucketNotFound) {
×
320

×
321
                                return err
×
322
                        }
×
323
                }
324

325
                return nil
×
326
        }, func() {})
×
327
        if err != nil {
×
328
                return err
×
329
        }
×
330

331
        return initKVStore(c.db)
×
332
}
333

334
// createChannelDB creates and initializes a fresh version of  In
335
// the case that the target path has not yet been created or doesn't yet exist,
336
// then the path is created. Additionally, all required top-level buckets used
337
// within the database are created.
338
func initKVStore(db kvdb.Backend) error {
176✔
339
        err := kvdb.Update(db, func(tx kvdb.RwTx) error {
352✔
340
                for _, tlb := range graphTopLevelBuckets {
871✔
341
                        if _, err := tx.CreateTopLevelBucket(tlb); err != nil {
695✔
342
                                return err
×
343
                        }
×
344
                }
345

346
                nodes := tx.ReadWriteBucket(nodeBucket)
176✔
347
                _, err := nodes.CreateBucketIfNotExists(aliasIndexBucket)
176✔
348
                if err != nil {
176✔
349
                        return err
×
350
                }
×
351
                _, err = nodes.CreateBucketIfNotExists(nodeUpdateIndexBucket)
176✔
352
                if err != nil {
176✔
353
                        return err
×
354
                }
×
355

356
                edges := tx.ReadWriteBucket(edgeBucket)
176✔
357
                _, err = edges.CreateBucketIfNotExists(edgeIndexBucket)
176✔
358
                if err != nil {
176✔
359
                        return err
×
360
                }
×
361
                _, err = edges.CreateBucketIfNotExists(edgeUpdateIndexBucket)
176✔
362
                if err != nil {
176✔
363
                        return err
×
364
                }
×
365
                _, err = edges.CreateBucketIfNotExists(channelPointBucket)
176✔
366
                if err != nil {
176✔
367
                        return err
×
368
                }
×
369
                _, err = edges.CreateBucketIfNotExists(zombieBucket)
176✔
370
                if err != nil {
176✔
371
                        return err
×
372
                }
×
373

374
                graphMeta := tx.ReadWriteBucket(graphMetaBucket)
176✔
375
                _, err = graphMeta.CreateBucketIfNotExists(pruneLogBucket)
176✔
376

176✔
377
                return err
176✔
378
        }, func() {})
176✔
379
        if err != nil {
176✔
380
                return fmt.Errorf("unable to create new channel graph: %w", err)
×
381
        }
×
382

383
        return nil
176✔
384
}
385

386
// AddrsForNode returns all known addresses for the target node public key that
387
// the graph DB is aware of. The returned boolean indicates if the given node is
388
// unknown to the graph DB or not.
389
//
390
// NOTE: this is part of the channeldb.AddrSource interface.
391
func (c *KVStore) AddrsForNode(nodePub *btcec.PublicKey) (bool, []net.Addr,
392
        error) {
4✔
393

4✔
394
        pubKey, err := route.NewVertexFromBytes(nodePub.SerializeCompressed())
4✔
395
        if err != nil {
4✔
396
                return false, nil, err
×
397
        }
×
398

399
        node, err := c.FetchLightningNode(pubKey)
4✔
400
        // We don't consider it an error if the graph is unaware of the node.
4✔
401
        switch {
4✔
402
        case err != nil && !errors.Is(err, ErrGraphNodeNotFound):
×
403
                return false, nil, err
×
404

405
        case errors.Is(err, ErrGraphNodeNotFound):
3✔
406
                return false, nil, nil
3✔
407
        }
408

409
        return true, node.Addresses, nil
4✔
410
}
411

412
// ForEachChannel iterates through all the channel edges stored within the
413
// graph and invokes the passed callback for each edge. The callback takes two
414
// edges as since this is a directed graph, both the in/out edges are visited.
415
// If the callback returns an error, then the transaction is aborted and the
416
// iteration stops early.
417
//
418
// NOTE: If an edge can't be found, or wasn't advertised, then a nil pointer
419
// for that particular channel edge routing policy will be passed into the
420
// callback.
421
func (c *KVStore) ForEachChannel(cb func(*models.ChannelEdgeInfo,
422
        *models.ChannelEdgePolicy, *models.ChannelEdgePolicy) error) error {
147✔
423

147✔
424
        return c.db.View(func(tx kvdb.RTx) error {
294✔
425
                edges := tx.ReadBucket(edgeBucket)
147✔
426
                if edges == nil {
147✔
427
                        return ErrGraphNoEdgesFound
×
428
                }
×
429

430
                // First, load all edges in memory indexed by node and channel
431
                // id.
432
                channelMap, err := c.getChannelMap(edges)
147✔
433
                if err != nil {
147✔
434
                        return err
×
435
                }
×
436

437
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
147✔
438
                if edgeIndex == nil {
147✔
439
                        return ErrGraphNoEdgesFound
×
440
                }
×
441

442
                // Load edge index, recombine each channel with the policies
443
                // loaded above and invoke the callback.
444
                return kvdb.ForAll(
147✔
445
                        edgeIndex, func(k, edgeInfoBytes []byte) error {
645✔
446
                                var chanID [8]byte
498✔
447
                                copy(chanID[:], k)
498✔
448

498✔
449
                                edgeInfoReader := bytes.NewReader(edgeInfoBytes)
498✔
450
                                info, err := deserializeChanEdgeInfo(
498✔
451
                                        edgeInfoReader,
498✔
452
                                )
498✔
453
                                if err != nil {
498✔
454
                                        return err
×
455
                                }
×
456

457
                                policy1 := channelMap[channelMapKey{
498✔
458
                                        nodeKey: info.NodeKey1Bytes,
498✔
459
                                        chanID:  chanID,
498✔
460
                                }]
498✔
461

498✔
462
                                policy2 := channelMap[channelMapKey{
498✔
463
                                        nodeKey: info.NodeKey2Bytes,
498✔
464
                                        chanID:  chanID,
498✔
465
                                }]
498✔
466

498✔
467
                                return cb(&info, policy1, policy2)
498✔
468
                        },
469
                )
470
        }, func() {})
147✔
471
}
472

473
// forEachNodeDirectedChannel iterates through all channels of a given node,
474
// executing the passed callback on the directed edge representing the channel
475
// and its incoming policy. If the callback returns an error, then the iteration
476
// is halted with the error propagated back up to the caller. An optional read
477
// transaction may be provided. If none is provided, a new one will be created.
478
//
479
// Unknown policies are passed into the callback as nil values.
480
func (c *KVStore) forEachNodeDirectedChannel(tx kvdb.RTx,
481
        node route.Vertex, cb func(channel *DirectedChannel) error) error {
707✔
482

707✔
483
        if c.graphCache != nil {
1,171✔
484
                return c.graphCache.ForEachChannel(node, cb)
464✔
485
        }
464✔
486

487
        // Fallback that uses the database.
488
        toNodeCallback := func() route.Vertex {
381✔
489
                return node
135✔
490
        }
135✔
491
        toNodeFeatures, err := c.fetchNodeFeatures(tx, node)
246✔
492
        if err != nil {
246✔
493
                return err
×
494
        }
×
495

496
        dbCallback := func(tx kvdb.RTx, e *models.ChannelEdgeInfo, p1,
246✔
497
                p2 *models.ChannelEdgePolicy) error {
746✔
498

500✔
499
                var cachedInPolicy *models.CachedEdgePolicy
500✔
500
                if p2 != nil {
997✔
501
                        cachedInPolicy = models.NewCachedPolicy(p2)
497✔
502
                        cachedInPolicy.ToNodePubKey = toNodeCallback
497✔
503
                        cachedInPolicy.ToNodeFeatures = toNodeFeatures
497✔
504
                }
497✔
505

506
                var inboundFee lnwire.Fee
500✔
507
                if p1 != nil {
999✔
508
                        // Extract inbound fee. If there is a decoding error,
499✔
509
                        // skip this edge.
499✔
510
                        _, err := p1.ExtraOpaqueData.ExtractRecords(&inboundFee)
499✔
511
                        if err != nil {
500✔
512
                                return nil
1✔
513
                        }
1✔
514
                }
515

516
                directedChannel := &DirectedChannel{
499✔
517
                        ChannelID:    e.ChannelID,
499✔
518
                        IsNode1:      node == e.NodeKey1Bytes,
499✔
519
                        OtherNode:    e.NodeKey2Bytes,
499✔
520
                        Capacity:     e.Capacity,
499✔
521
                        OutPolicySet: p1 != nil,
499✔
522
                        InPolicy:     cachedInPolicy,
499✔
523
                        InboundFee:   inboundFee,
499✔
524
                }
499✔
525

499✔
526
                if node == e.NodeKey2Bytes {
751✔
527
                        directedChannel.OtherNode = e.NodeKey1Bytes
252✔
528
                }
252✔
529

530
                return cb(directedChannel)
499✔
531
        }
532

533
        return nodeTraversal(tx, node[:], c.db, dbCallback)
246✔
534
}
535

536
// fetchNodeFeatures returns the features of a given node. If no features are
537
// known for the node, an empty feature vector is returned. An optional read
538
// transaction may be provided. If none is provided, a new one will be created.
539
func (c *KVStore) fetchNodeFeatures(tx kvdb.RTx,
540
        node route.Vertex) (*lnwire.FeatureVector, error) {
953✔
541

953✔
542
        if c.graphCache != nil {
1,409✔
543
                return c.graphCache.GetFeatures(node), nil
456✔
544
        }
456✔
545

546
        // Fallback that uses the database.
547
        targetNode, err := c.FetchLightningNodeTx(tx, node)
500✔
548
        switch {
500✔
549
        // If the node exists and has features, return them directly.
550
        case err == nil:
489✔
551
                return targetNode.Features, nil
489✔
552

553
        // If we couldn't find a node announcement, populate a blank feature
554
        // vector.
555
        case errors.Is(err, ErrGraphNodeNotFound):
11✔
556
                return lnwire.EmptyFeatureVector(), nil
11✔
557

558
        // Otherwise, bubble the error up.
559
        default:
×
560
                return nil, err
×
561
        }
562
}
563

564
// ForEachNodeDirectedChannel iterates through all channels of a given node,
565
// executing the passed callback on the directed edge representing the channel
566
// and its incoming policy. If the callback returns an error, then the iteration
567
// is halted with the error propagated back up to the caller. If the graphCache
568
// is available, then it will be used to retrieve the node's channels instead
569
// of the database.
570
//
571
// Unknown policies are passed into the callback as nil values.
572
//
573
// NOTE: this is part of the graphdb.NodeTraverser interface.
574
func (c *KVStore) ForEachNodeDirectedChannel(nodePub route.Vertex,
575
        cb func(channel *DirectedChannel) error) error {
114✔
576

114✔
577
        return c.forEachNodeDirectedChannel(nil, nodePub, cb)
114✔
578
}
114✔
579

580
// FetchNodeFeatures returns the features of the given node. If no features are
581
// known for the node, an empty feature vector is returned.
582
// If the graphCache is available, then it will be used to retrieve the node's
583
// features instead of the database.
584
//
585
// NOTE: this is part of the graphdb.NodeTraverser interface.
586
func (c *KVStore) FetchNodeFeatures(nodePub route.Vertex) (
587
        *lnwire.FeatureVector, error) {
90✔
588

90✔
589
        return c.fetchNodeFeatures(nil, nodePub)
90✔
590
}
90✔
591

592
// ForEachNodeCached is similar to forEachNode, but it utilizes the channel
593
// graph cache instead. Note that this doesn't return all the information the
594
// regular forEachNode method does.
595
//
596
// NOTE: The callback contents MUST not be modified.
597
func (c *KVStore) ForEachNodeCached(cb func(node route.Vertex,
598
        chans map[uint64]*DirectedChannel) error) error {
1✔
599

1✔
600
        if c.graphCache != nil {
2✔
601
                return c.graphCache.ForEachNode(cb)
1✔
602
        }
1✔
603

604
        // Otherwise call back to a version that uses the database directly.
605
        // We'll iterate over each node, then the set of channels for each
606
        // node, and construct a similar callback functiopn signature as the
607
        // main funcotin expects.
608
        return c.forEachNode(func(tx kvdb.RTx,
×
609
                node *models.LightningNode) error {
×
610

×
611
                channels := make(map[uint64]*DirectedChannel)
×
612

×
613
                err := c.ForEachNodeChannelTx(tx, node.PubKeyBytes,
×
614
                        func(tx kvdb.RTx, e *models.ChannelEdgeInfo,
×
615
                                p1 *models.ChannelEdgePolicy,
×
616
                                p2 *models.ChannelEdgePolicy) error {
×
617

×
618
                                toNodeCallback := func() route.Vertex {
×
619
                                        return node.PubKeyBytes
×
620
                                }
×
621
                                toNodeFeatures, err := c.fetchNodeFeatures(
×
622
                                        tx, node.PubKeyBytes,
×
623
                                )
×
624
                                if err != nil {
×
625
                                        return err
×
626
                                }
×
627

628
                                var cachedInPolicy *models.CachedEdgePolicy
×
629
                                if p2 != nil {
×
630
                                        cachedInPolicy =
×
631
                                                models.NewCachedPolicy(p2)
×
632
                                        cachedInPolicy.ToNodePubKey =
×
633
                                                toNodeCallback
×
634
                                        cachedInPolicy.ToNodeFeatures =
×
635
                                                toNodeFeatures
×
636
                                }
×
637

638
                                directedChannel := &DirectedChannel{
×
639
                                        ChannelID: e.ChannelID,
×
640
                                        IsNode1: node.PubKeyBytes ==
×
641
                                                e.NodeKey1Bytes,
×
642
                                        OtherNode:    e.NodeKey2Bytes,
×
643
                                        Capacity:     e.Capacity,
×
644
                                        OutPolicySet: p1 != nil,
×
645
                                        InPolicy:     cachedInPolicy,
×
646
                                }
×
647

×
648
                                if node.PubKeyBytes == e.NodeKey2Bytes {
×
649
                                        directedChannel.OtherNode =
×
650
                                                e.NodeKey1Bytes
×
651
                                }
×
652

653
                                channels[e.ChannelID] = directedChannel
×
654

×
655
                                return nil
×
656
                        })
657
                if err != nil {
×
658
                        return err
×
659
                }
×
660

661
                return cb(node.PubKeyBytes, channels)
×
662
        })
663
}
664

665
// DisabledChannelIDs returns the channel ids of disabled channels.
666
// A channel is disabled when two of the associated ChanelEdgePolicies
667
// have their disabled bit on.
668
func (c *KVStore) DisabledChannelIDs() ([]uint64, error) {
6✔
669
        var disabledChanIDs []uint64
6✔
670
        var chanEdgeFound map[uint64]struct{}
6✔
671

6✔
672
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
12✔
673
                edges := tx.ReadBucket(edgeBucket)
6✔
674
                if edges == nil {
6✔
675
                        return ErrGraphNoEdgesFound
×
676
                }
×
677

678
                disabledEdgePolicyIndex := edges.NestedReadBucket(
6✔
679
                        disabledEdgePolicyBucket,
6✔
680
                )
6✔
681
                if disabledEdgePolicyIndex == nil {
7✔
682
                        return nil
1✔
683
                }
1✔
684

685
                // We iterate over all disabled policies and we add each channel
686
                // that has more than one disabled policy to disabledChanIDs
687
                // array.
688
                return disabledEdgePolicyIndex.ForEach(
5✔
689
                        func(k, v []byte) error {
16✔
690
                                chanID := byteOrder.Uint64(k[:8])
11✔
691
                                _, edgeFound := chanEdgeFound[chanID]
11✔
692
                                if edgeFound {
15✔
693
                                        delete(chanEdgeFound, chanID)
4✔
694
                                        disabledChanIDs = append(
4✔
695
                                                disabledChanIDs, chanID,
4✔
696
                                        )
4✔
697

4✔
698
                                        return nil
4✔
699
                                }
4✔
700

701
                                chanEdgeFound[chanID] = struct{}{}
7✔
702

7✔
703
                                return nil
7✔
704
                        },
705
                )
706
        }, func() {
6✔
707
                disabledChanIDs = nil
6✔
708
                chanEdgeFound = make(map[uint64]struct{})
6✔
709
        })
6✔
710
        if err != nil {
6✔
711
                return nil, err
×
712
        }
×
713

714
        return disabledChanIDs, nil
6✔
715
}
716

717
// ForEachNode iterates through all the stored vertices/nodes in the graph,
718
// executing the passed callback with each node encountered. If the callback
719
// returns an error, then the transaction is aborted and the iteration stops
720
// early. Any operations performed on the NodeTx passed to the call-back are
721
// executed under the same read transaction and so, methods on the NodeTx object
722
// _MUST_ only be called from within the call-back.
723
func (c *KVStore) ForEachNode(cb func(tx NodeRTx) error) error {
123✔
724
        return c.forEachNode(func(tx kvdb.RTx,
123✔
725
                node *models.LightningNode) error {
1,096✔
726

973✔
727
                return cb(newChanGraphNodeTx(tx, c, node))
973✔
728
        })
973✔
729
}
730

731
// forEachNode iterates through all the stored vertices/nodes in the graph,
732
// executing the passed callback with each node encountered. If the callback
733
// returns an error, then the transaction is aborted and the iteration stops
734
// early.
735
//
736
// TODO(roasbeef): add iterator interface to allow for memory efficient graph
737
// traversal when graph gets mega.
738
func (c *KVStore) forEachNode(
739
        cb func(kvdb.RTx, *models.LightningNode) error) error {
131✔
740

131✔
741
        traversal := func(tx kvdb.RTx) error {
262✔
742
                // First grab the nodes bucket which stores the mapping from
131✔
743
                // pubKey to node information.
131✔
744
                nodes := tx.ReadBucket(nodeBucket)
131✔
745
                if nodes == nil {
131✔
746
                        return ErrGraphNotFound
×
747
                }
×
748

749
                return nodes.ForEach(func(pubKey, nodeBytes []byte) error {
1,551✔
750
                        // If this is the source key, then we skip this
1,420✔
751
                        // iteration as the value for this key is a pubKey
1,420✔
752
                        // rather than raw node information.
1,420✔
753
                        if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 {
1,682✔
754
                                return nil
262✔
755
                        }
262✔
756

757
                        nodeReader := bytes.NewReader(nodeBytes)
1,161✔
758
                        node, err := deserializeLightningNode(nodeReader)
1,161✔
759
                        if err != nil {
1,161✔
760
                                return err
×
761
                        }
×
762

763
                        // Execute the callback, the transaction will abort if
764
                        // this returns an error.
765
                        return cb(tx, &node)
1,161✔
766
                })
767
        }
768

769
        return kvdb.View(c.db, traversal, func() {})
262✔
770
}
771

772
// ForEachNodeCacheable iterates through all the stored vertices/nodes in the
773
// graph, executing the passed callback with each node encountered. If the
774
// callback returns an error, then the transaction is aborted and the iteration
775
// stops early.
776
func (c *KVStore) ForEachNodeCacheable(cb func(route.Vertex,
777
        *lnwire.FeatureVector) error) error {
144✔
778

144✔
779
        traversal := func(tx kvdb.RTx) error {
288✔
780
                // First grab the nodes bucket which stores the mapping from
144✔
781
                // pubKey to node information.
144✔
782
                nodes := tx.ReadBucket(nodeBucket)
144✔
783
                if nodes == nil {
144✔
784
                        return ErrGraphNotFound
×
785
                }
×
786

787
                return nodes.ForEach(func(pubKey, nodeBytes []byte) error {
549✔
788
                        // If this is the source key, then we skip this
405✔
789
                        // iteration as the value for this key is a pubKey
405✔
790
                        // rather than raw node information.
405✔
791
                        if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 {
690✔
792
                                return nil
285✔
793
                        }
285✔
794

795
                        nodeReader := bytes.NewReader(nodeBytes)
123✔
796
                        node, features, err := deserializeLightningNodeCacheable( //nolint:ll
123✔
797
                                nodeReader,
123✔
798
                        )
123✔
799
                        if err != nil {
123✔
800
                                return err
×
801
                        }
×
802

803
                        // Execute the callback, the transaction will abort if
804
                        // this returns an error.
805
                        return cb(node, features)
123✔
806
                })
807
        }
808

809
        return kvdb.View(c.db, traversal, func() {})
288✔
810
}
811

812
// SourceNode returns the source node of the graph. The source node is treated
813
// as the center node within a star-graph. This method may be used to kick off
814
// a path finding algorithm in order to explore the reachability of another
815
// node based off the source node.
816
func (c *KVStore) SourceNode() (*models.LightningNode, error) {
234✔
817
        var source *models.LightningNode
234✔
818
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
468✔
819
                // First grab the nodes bucket which stores the mapping from
234✔
820
                // pubKey to node information.
234✔
821
                nodes := tx.ReadBucket(nodeBucket)
234✔
822
                if nodes == nil {
234✔
823
                        return ErrGraphNotFound
×
824
                }
×
825

826
                node, err := c.sourceNode(nodes)
234✔
827
                if err != nil {
235✔
828
                        return err
1✔
829
                }
1✔
830
                source = node
233✔
831

233✔
832
                return nil
233✔
833
        }, func() {
234✔
834
                source = nil
234✔
835
        })
234✔
836
        if err != nil {
235✔
837
                return nil, err
1✔
838
        }
1✔
839

840
        return source, nil
233✔
841
}
842

843
// sourceNode uses an existing database transaction and returns the source node
844
// of the graph. The source node is treated as the center node within a
845
// star-graph. This method may be used to kick off a path finding algorithm in
846
// order to explore the reachability of another node based off the source node.
847
func (c *KVStore) sourceNode(nodes kvdb.RBucket) (*models.LightningNode,
848
        error) {
499✔
849

499✔
850
        selfPub := nodes.Get(sourceKey)
499✔
851
        if selfPub == nil {
500✔
852
                return nil, ErrSourceNodeNotSet
1✔
853
        }
1✔
854

855
        // With the pubKey of the source node retrieved, we're able to
856
        // fetch the full node information.
857
        node, err := fetchLightningNode(nodes, selfPub)
498✔
858
        if err != nil {
498✔
859
                return nil, err
×
860
        }
×
861

862
        return &node, nil
498✔
863
}
864

865
// SetSourceNode sets the source node within the graph database. The source
866
// node is to be used as the center of a star-graph within path finding
867
// algorithms.
868
func (c *KVStore) SetSourceNode(node *models.LightningNode) error {
120✔
869
        nodePubBytes := node.PubKeyBytes[:]
120✔
870

120✔
871
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
240✔
872
                // First grab the nodes bucket which stores the mapping from
120✔
873
                // pubKey to node information.
120✔
874
                nodes, err := tx.CreateTopLevelBucket(nodeBucket)
120✔
875
                if err != nil {
120✔
876
                        return err
×
877
                }
×
878

879
                // Next we create the mapping from source to the targeted
880
                // public key.
881
                if err := nodes.Put(sourceKey, nodePubBytes); err != nil {
120✔
882
                        return err
×
883
                }
×
884

885
                // Finally, we commit the information of the lightning node
886
                // itself.
887
                return addLightningNode(tx, node)
120✔
888
        }, func() {})
120✔
889
}
890

891
// AddLightningNode adds a vertex/node to the graph database. If the node is not
892
// in the database from before, this will add a new, unconnected one to the
893
// graph. If it is present from before, this will update that node's
894
// information. Note that this method is expected to only be called to update an
895
// already present node from a node announcement, or to insert a node found in a
896
// channel update.
897
//
898
// TODO(roasbeef): also need sig of announcement.
899
func (c *KVStore) AddLightningNode(node *models.LightningNode,
900
        op ...batch.SchedulerOption) error {
803✔
901

803✔
902
        r := &batch.Request{
803✔
903
                Update: func(tx kvdb.RwTx) error {
1,606✔
904
                        if c.graphCache != nil {
1,419✔
905
                                c.graphCache.AddNodeFeatures(
616✔
906
                                        node.PubKeyBytes, node.Features,
616✔
907
                                )
616✔
908
                        }
616✔
909

910
                        return addLightningNode(tx, node)
803✔
911
                },
912
        }
913

914
        for _, f := range op {
806✔
915
                f(r)
3✔
916
        }
3✔
917

918
        return c.nodeScheduler.Execute(r)
803✔
919
}
920

921
func addLightningNode(tx kvdb.RwTx, node *models.LightningNode) error {
989✔
922
        nodes, err := tx.CreateTopLevelBucket(nodeBucket)
989✔
923
        if err != nil {
989✔
924
                return err
×
925
        }
×
926

927
        aliases, err := nodes.CreateBucketIfNotExists(aliasIndexBucket)
989✔
928
        if err != nil {
989✔
929
                return err
×
930
        }
×
931

932
        updateIndex, err := nodes.CreateBucketIfNotExists(
989✔
933
                nodeUpdateIndexBucket,
989✔
934
        )
989✔
935
        if err != nil {
989✔
936
                return err
×
937
        }
×
938

939
        return putLightningNode(nodes, aliases, updateIndex, node)
989✔
940
}
941

942
// LookupAlias attempts to return the alias as advertised by the target node.
943
// TODO(roasbeef): currently assumes that aliases are unique...
944
func (c *KVStore) LookupAlias(pub *btcec.PublicKey) (string, error) {
5✔
945
        var alias string
5✔
946

5✔
947
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
10✔
948
                nodes := tx.ReadBucket(nodeBucket)
5✔
949
                if nodes == nil {
5✔
950
                        return ErrGraphNodesNotFound
×
951
                }
×
952

953
                aliases := nodes.NestedReadBucket(aliasIndexBucket)
5✔
954
                if aliases == nil {
5✔
955
                        return ErrGraphNodesNotFound
×
956
                }
×
957

958
                nodePub := pub.SerializeCompressed()
5✔
959
                a := aliases.Get(nodePub)
5✔
960
                if a == nil {
6✔
961
                        return ErrNodeAliasNotFound
1✔
962
                }
1✔
963

964
                // TODO(roasbeef): should actually be using the utf-8
965
                // package...
966
                alias = string(a)
4✔
967

4✔
968
                return nil
4✔
969
        }, func() {
5✔
970
                alias = ""
5✔
971
        })
5✔
972
        if err != nil {
6✔
973
                return "", err
1✔
974
        }
1✔
975

976
        return alias, nil
4✔
977
}
978

979
// DeleteLightningNode starts a new database transaction to remove a vertex/node
980
// from the database according to the node's public key.
981
func (c *KVStore) DeleteLightningNode(nodePub route.Vertex) error {
3✔
982
        // TODO(roasbeef): ensure dangling edges are removed...
3✔
983
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
6✔
984
                nodes := tx.ReadWriteBucket(nodeBucket)
3✔
985
                if nodes == nil {
3✔
986
                        return ErrGraphNodeNotFound
×
987
                }
×
988

989
                if c.graphCache != nil {
6✔
990
                        c.graphCache.RemoveNode(nodePub)
3✔
991
                }
3✔
992

993
                return c.deleteLightningNode(nodes, nodePub[:])
3✔
994
        }, func() {})
3✔
995
}
996

997
// deleteLightningNode uses an existing database transaction to remove a
998
// vertex/node from the database according to the node's public key.
999
func (c *KVStore) deleteLightningNode(nodes kvdb.RwBucket,
1000
        compressedPubKey []byte) error {
65✔
1001

65✔
1002
        aliases := nodes.NestedReadWriteBucket(aliasIndexBucket)
65✔
1003
        if aliases == nil {
65✔
1004
                return ErrGraphNodesNotFound
×
1005
        }
×
1006

1007
        if err := aliases.Delete(compressedPubKey); err != nil {
65✔
1008
                return err
×
1009
        }
×
1010

1011
        // Before we delete the node, we'll fetch its current state so we can
1012
        // determine when its last update was to clear out the node update
1013
        // index.
1014
        node, err := fetchLightningNode(nodes, compressedPubKey)
65✔
1015
        if err != nil {
65✔
1016
                return err
×
1017
        }
×
1018

1019
        if err := nodes.Delete(compressedPubKey); err != nil {
65✔
1020
                return err
×
1021
        }
×
1022

1023
        // Finally, we'll delete the index entry for the node within the
1024
        // nodeUpdateIndexBucket as this node is no longer active, so we don't
1025
        // need to track its last update.
1026
        nodeUpdateIndex := nodes.NestedReadWriteBucket(nodeUpdateIndexBucket)
65✔
1027
        if nodeUpdateIndex == nil {
65✔
1028
                return ErrGraphNodesNotFound
×
1029
        }
×
1030

1031
        // In order to delete the entry, we'll need to reconstruct the key for
1032
        // its last update.
1033
        updateUnix := uint64(node.LastUpdate.Unix())
65✔
1034
        var indexKey [8 + 33]byte
65✔
1035
        byteOrder.PutUint64(indexKey[:8], updateUnix)
65✔
1036
        copy(indexKey[8:], compressedPubKey)
65✔
1037

65✔
1038
        return nodeUpdateIndex.Delete(indexKey[:])
65✔
1039
}
1040

1041
// AddChannelEdge adds a new (undirected, blank) edge to the graph database. An
1042
// undirected edge from the two target nodes are created. The information stored
1043
// denotes the static attributes of the channel, such as the channelID, the keys
1044
// involved in creation of the channel, and the set of features that the channel
1045
// supports. The chanPoint and chanID are used to uniquely identify the edge
1046
// globally within the database.
1047
func (c *KVStore) AddChannelEdge(edge *models.ChannelEdgeInfo,
1048
        op ...batch.SchedulerOption) error {
1,721✔
1049

1,721✔
1050
        var alreadyExists bool
1,721✔
1051
        r := &batch.Request{
1,721✔
1052
                Reset: func() {
3,442✔
1053
                        alreadyExists = false
1,721✔
1054
                },
1,721✔
1055
                Update: func(tx kvdb.RwTx) error {
1,721✔
1056
                        err := c.addChannelEdge(tx, edge)
1,721✔
1057

1,721✔
1058
                        // Silence ErrEdgeAlreadyExist so that the batch can
1,721✔
1059
                        // succeed, but propagate the error via local state.
1,721✔
1060
                        if errors.Is(err, ErrEdgeAlreadyExist) {
1,955✔
1061
                                alreadyExists = true
234✔
1062
                                return nil
234✔
1063
                        }
234✔
1064

1065
                        return err
1,487✔
1066
                },
1067
                OnCommit: func(err error) error {
1,721✔
1068
                        switch {
1,721✔
1069
                        case err != nil:
×
1070
                                return err
×
1071
                        case alreadyExists:
234✔
1072
                                return ErrEdgeAlreadyExist
234✔
1073
                        default:
1,487✔
1074
                                c.rejectCache.remove(edge.ChannelID)
1,487✔
1075
                                c.chanCache.remove(edge.ChannelID)
1,487✔
1076
                                return nil
1,487✔
1077
                        }
1078
                },
1079
        }
1080

1081
        for _, f := range op {
1,724✔
1082
                if f == nil {
3✔
1083
                        return fmt.Errorf("nil scheduler option was used")
×
1084
                }
×
1085

1086
                f(r)
3✔
1087
        }
1088

1089
        return c.chanScheduler.Execute(r)
1,721✔
1090
}
1091

1092
// addChannelEdge is the private form of AddChannelEdge that allows callers to
1093
// utilize an existing db transaction.
1094
func (c *KVStore) addChannelEdge(tx kvdb.RwTx,
1095
        edge *models.ChannelEdgeInfo) error {
1,721✔
1096

1,721✔
1097
        // Construct the channel's primary key which is the 8-byte channel ID.
1,721✔
1098
        var chanKey [8]byte
1,721✔
1099
        binary.BigEndian.PutUint64(chanKey[:], edge.ChannelID)
1,721✔
1100

1,721✔
1101
        nodes, err := tx.CreateTopLevelBucket(nodeBucket)
1,721✔
1102
        if err != nil {
1,721✔
1103
                return err
×
1104
        }
×
1105
        edges, err := tx.CreateTopLevelBucket(edgeBucket)
1,721✔
1106
        if err != nil {
1,721✔
1107
                return err
×
1108
        }
×
1109
        edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
1,721✔
1110
        if err != nil {
1,721✔
1111
                return err
×
1112
        }
×
1113
        chanIndex, err := edges.CreateBucketIfNotExists(channelPointBucket)
1,721✔
1114
        if err != nil {
1,721✔
1115
                return err
×
1116
        }
×
1117

1118
        // First, attempt to check if this edge has already been created. If
1119
        // so, then we can exit early as this method is meant to be idempotent.
1120
        if edgeInfo := edgeIndex.Get(chanKey[:]); edgeInfo != nil {
1,955✔
1121
                return ErrEdgeAlreadyExist
234✔
1122
        }
234✔
1123

1124
        if c.graphCache != nil {
2,784✔
1125
                c.graphCache.AddChannel(edge, nil, nil)
1,297✔
1126
        }
1,297✔
1127

1128
        // Before we insert the channel into the database, we'll ensure that
1129
        // both nodes already exist in the channel graph. If either node
1130
        // doesn't, then we'll insert a "shell" node that just includes its
1131
        // public key, so subsequent validation and queries can work properly.
1132
        _, node1Err := fetchLightningNode(nodes, edge.NodeKey1Bytes[:])
1,487✔
1133
        switch {
1,487✔
1134
        case errors.Is(node1Err, ErrGraphNodeNotFound):
21✔
1135
                node1Shell := models.LightningNode{
21✔
1136
                        PubKeyBytes:          edge.NodeKey1Bytes,
21✔
1137
                        HaveNodeAnnouncement: false,
21✔
1138
                }
21✔
1139
                err := addLightningNode(tx, &node1Shell)
21✔
1140
                if err != nil {
21✔
1141
                        return fmt.Errorf("unable to create shell node "+
×
1142
                                "for: %x: %w", edge.NodeKey1Bytes, err)
×
1143
                }
×
1144
        case node1Err != nil:
×
1145
                return node1Err
×
1146
        }
1147

1148
        _, node2Err := fetchLightningNode(nodes, edge.NodeKey2Bytes[:])
1,487✔
1149
        switch {
1,487✔
1150
        case errors.Is(node2Err, ErrGraphNodeNotFound):
54✔
1151
                node2Shell := models.LightningNode{
54✔
1152
                        PubKeyBytes:          edge.NodeKey2Bytes,
54✔
1153
                        HaveNodeAnnouncement: false,
54✔
1154
                }
54✔
1155
                err := addLightningNode(tx, &node2Shell)
54✔
1156
                if err != nil {
54✔
1157
                        return fmt.Errorf("unable to create shell node "+
×
1158
                                "for: %x: %w", edge.NodeKey2Bytes, err)
×
1159
                }
×
1160
        case node2Err != nil:
×
1161
                return node2Err
×
1162
        }
1163

1164
        // If the edge hasn't been created yet, then we'll first add it to the
1165
        // edge index in order to associate the edge between two nodes and also
1166
        // store the static components of the channel.
1167
        if err := putChanEdgeInfo(edgeIndex, edge, chanKey); err != nil {
1,487✔
1168
                return err
×
1169
        }
×
1170

1171
        // Mark edge policies for both sides as unknown. This is to enable
1172
        // efficient incoming channel lookup for a node.
1173
        keys := []*[33]byte{
1,487✔
1174
                &edge.NodeKey1Bytes,
1,487✔
1175
                &edge.NodeKey2Bytes,
1,487✔
1176
        }
1,487✔
1177
        for _, key := range keys {
4,458✔
1178
                err := putChanEdgePolicyUnknown(edges, edge.ChannelID, key[:])
2,971✔
1179
                if err != nil {
2,971✔
1180
                        return err
×
1181
                }
×
1182
        }
1183

1184
        // Finally we add it to the channel index which maps channel points
1185
        // (outpoints) to the shorter channel ID's.
1186
        var b bytes.Buffer
1,487✔
1187
        if err := WriteOutpoint(&b, &edge.ChannelPoint); err != nil {
1,487✔
1188
                return err
×
1189
        }
×
1190

1191
        return chanIndex.Put(b.Bytes(), chanKey[:])
1,487✔
1192
}
1193

1194
// HasChannelEdge returns true if the database knows of a channel edge with the
1195
// passed channel ID, and false otherwise. If an edge with that ID is found
1196
// within the graph, then two time stamps representing the last time the edge
1197
// was updated for both directed edges are returned along with the boolean. If
1198
// it is not found, then the zombie index is checked and its result is returned
1199
// as the second boolean.
1200
func (c *KVStore) HasChannelEdge(
1201
        chanID uint64) (time.Time, time.Time, bool, bool, error) {
211✔
1202

211✔
1203
        var (
211✔
1204
                upd1Time time.Time
211✔
1205
                upd2Time time.Time
211✔
1206
                exists   bool
211✔
1207
                isZombie bool
211✔
1208
        )
211✔
1209

211✔
1210
        // We'll query the cache with the shared lock held to allow multiple
211✔
1211
        // readers to access values in the cache concurrently if they exist.
211✔
1212
        c.cacheMu.RLock()
211✔
1213
        if entry, ok := c.rejectCache.get(chanID); ok {
286✔
1214
                c.cacheMu.RUnlock()
75✔
1215
                upd1Time = time.Unix(entry.upd1Time, 0)
75✔
1216
                upd2Time = time.Unix(entry.upd2Time, 0)
75✔
1217
                exists, isZombie = entry.flags.unpack()
75✔
1218

75✔
1219
                return upd1Time, upd2Time, exists, isZombie, nil
75✔
1220
        }
75✔
1221
        c.cacheMu.RUnlock()
139✔
1222

139✔
1223
        c.cacheMu.Lock()
139✔
1224
        defer c.cacheMu.Unlock()
139✔
1225

139✔
1226
        // The item was not found with the shared lock, so we'll acquire the
139✔
1227
        // exclusive lock and check the cache again in case another method added
139✔
1228
        // the entry to the cache while no lock was held.
139✔
1229
        if entry, ok := c.rejectCache.get(chanID); ok {
145✔
1230
                upd1Time = time.Unix(entry.upd1Time, 0)
6✔
1231
                upd2Time = time.Unix(entry.upd2Time, 0)
6✔
1232
                exists, isZombie = entry.flags.unpack()
6✔
1233

6✔
1234
                return upd1Time, upd2Time, exists, isZombie, nil
6✔
1235
        }
6✔
1236

1237
        if err := kvdb.View(c.db, func(tx kvdb.RTx) error {
272✔
1238
                edges := tx.ReadBucket(edgeBucket)
136✔
1239
                if edges == nil {
136✔
1240
                        return ErrGraphNoEdgesFound
×
1241
                }
×
1242
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
136✔
1243
                if edgeIndex == nil {
136✔
1244
                        return ErrGraphNoEdgesFound
×
1245
                }
×
1246

1247
                var channelID [8]byte
136✔
1248
                byteOrder.PutUint64(channelID[:], chanID)
136✔
1249

136✔
1250
                // If the edge doesn't exist, then we'll also check our zombie
136✔
1251
                // index.
136✔
1252
                if edgeIndex.Get(channelID[:]) == nil {
217✔
1253
                        exists = false
81✔
1254
                        zombieIndex := edges.NestedReadBucket(zombieBucket)
81✔
1255
                        if zombieIndex != nil {
162✔
1256
                                isZombie, _, _ = isZombieEdge(
81✔
1257
                                        zombieIndex, chanID,
81✔
1258
                                )
81✔
1259
                        }
81✔
1260

1261
                        return nil
81✔
1262
                }
1263

1264
                exists = true
58✔
1265
                isZombie = false
58✔
1266

58✔
1267
                // If the channel has been found in the graph, then retrieve
58✔
1268
                // the edges itself so we can return the last updated
58✔
1269
                // timestamps.
58✔
1270
                nodes := tx.ReadBucket(nodeBucket)
58✔
1271
                if nodes == nil {
58✔
1272
                        return ErrGraphNodeNotFound
×
1273
                }
×
1274

1275
                e1, e2, err := fetchChanEdgePolicies(
58✔
1276
                        edgeIndex, edges, channelID[:],
58✔
1277
                )
58✔
1278
                if err != nil {
58✔
1279
                        return err
×
1280
                }
×
1281

1282
                // As we may have only one of the edges populated, only set the
1283
                // update time if the edge was found in the database.
1284
                if e1 != nil {
79✔
1285
                        upd1Time = e1.LastUpdate
21✔
1286
                }
21✔
1287
                if e2 != nil {
77✔
1288
                        upd2Time = e2.LastUpdate
19✔
1289
                }
19✔
1290

1291
                return nil
58✔
1292
        }, func() {}); err != nil {
136✔
1293
                return time.Time{}, time.Time{}, exists, isZombie, err
×
1294
        }
×
1295

1296
        c.rejectCache.insert(chanID, rejectCacheEntry{
136✔
1297
                upd1Time: upd1Time.Unix(),
136✔
1298
                upd2Time: upd2Time.Unix(),
136✔
1299
                flags:    packRejectFlags(exists, isZombie),
136✔
1300
        })
136✔
1301

136✔
1302
        return upd1Time, upd2Time, exists, isZombie, nil
136✔
1303
}
1304

1305
// AddEdgeProof sets the proof of an existing edge in the graph database.
1306
func (c *KVStore) AddEdgeProof(chanID lnwire.ShortChannelID,
1307
        proof *models.ChannelAuthProof) error {
4✔
1308

4✔
1309
        // Construct the channel's primary key which is the 8-byte channel ID.
4✔
1310
        var chanKey [8]byte
4✔
1311
        binary.BigEndian.PutUint64(chanKey[:], chanID.ToUint64())
4✔
1312

4✔
1313
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
8✔
1314
                edges := tx.ReadWriteBucket(edgeBucket)
4✔
1315
                if edges == nil {
4✔
1316
                        return ErrEdgeNotFound
×
1317
                }
×
1318

1319
                edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
4✔
1320
                if edgeIndex == nil {
4✔
1321
                        return ErrEdgeNotFound
×
1322
                }
×
1323

1324
                edge, err := fetchChanEdgeInfo(edgeIndex, chanKey[:])
4✔
1325
                if err != nil {
4✔
1326
                        return err
×
1327
                }
×
1328

1329
                edge.AuthProof = proof
4✔
1330

4✔
1331
                return putChanEdgeInfo(edgeIndex, &edge, chanKey)
4✔
1332
        }, func() {})
4✔
1333
}
1334

1335
const (
1336
        // pruneTipBytes is the total size of the value which stores a prune
1337
        // entry of the graph in the prune log. The "prune tip" is the last
1338
        // entry in the prune log, and indicates if the channel graph is in
1339
        // sync with the current UTXO state. The structure of the value
1340
        // is: blockHash, taking 32 bytes total.
1341
        pruneTipBytes = 32
1342
)
1343

1344
// PruneGraph prunes newly closed channels from the channel graph in response
1345
// to a new block being solved on the network. Any transactions which spend the
1346
// funding output of any known channels within he graph will be deleted.
1347
// Additionally, the "prune tip", or the last block which has been used to
1348
// prune the graph is stored so callers can ensure the graph is fully in sync
1349
// with the current UTXO state. A slice of channels that have been closed by
1350
// the target block are returned if the function succeeds without error.
1351
func (c *KVStore) PruneGraph(spentOutputs []*wire.OutPoint,
1352
        blockHash *chainhash.Hash, blockHeight uint32) (
1353
        []*models.ChannelEdgeInfo, error) {
245✔
1354

245✔
1355
        c.cacheMu.Lock()
245✔
1356
        defer c.cacheMu.Unlock()
245✔
1357

245✔
1358
        var chansClosed []*models.ChannelEdgeInfo
245✔
1359

245✔
1360
        err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
490✔
1361
                // First grab the edges bucket which houses the information
245✔
1362
                // we'd like to delete
245✔
1363
                edges, err := tx.CreateTopLevelBucket(edgeBucket)
245✔
1364
                if err != nil {
245✔
UNCOV
1365
                        return err
×
UNCOV
1366
                }
×
1367

1368
                // Next grab the two edge indexes which will also need to be
1369
                // updated.
1370
                edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
245✔
1371
                if err != nil {
245✔
UNCOV
1372
                        return err
×
UNCOV
1373
                }
×
1374
                chanIndex, err := edges.CreateBucketIfNotExists(
245✔
1375
                        channelPointBucket,
245✔
1376
                )
245✔
1377
                if err != nil {
245✔
UNCOV
1378
                        return err
×
UNCOV
1379
                }
×
1380
                nodes := tx.ReadWriteBucket(nodeBucket)
245✔
1381
                if nodes == nil {
245✔
1382
                        return ErrSourceNodeNotSet
×
1383
                }
×
1384
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
245✔
1385
                if err != nil {
245✔
1386
                        return err
×
1387
                }
×
1388

1389
                // For each of the outpoints that have been spent within the
1390
                // block, we attempt to delete them from the graph as if that
1391
                // outpoint was a channel, then it has now been closed.
1392
                for _, chanPoint := range spentOutputs {
384✔
1393
                        // TODO(roasbeef): load channel bloom filter, continue
139✔
1394
                        // if NOT if filter
139✔
1395

139✔
1396
                        var opBytes bytes.Buffer
139✔
1397
                        err := WriteOutpoint(&opBytes, chanPoint)
139✔
1398
                        if err != nil {
139✔
UNCOV
1399
                                return err
×
UNCOV
1400
                        }
×
1401

1402
                        // First attempt to see if the channel exists within
1403
                        // the database, if not, then we can exit early.
1404
                        chanID := chanIndex.Get(opBytes.Bytes())
139✔
1405
                        if chanID == nil {
258✔
1406
                                continue
119✔
1407
                        }
1408

1409
                        // Attempt to delete the channel, an ErrEdgeNotFound
1410
                        // will be returned if that outpoint isn't known to be
1411
                        // a channel. If no error is returned, then a channel
1412
                        // was successfully pruned.
1413
                        edgeInfo, err := c.delChannelEdgeUnsafe(
20✔
1414
                                edges, edgeIndex, chanIndex, zombieIndex,
20✔
1415
                                chanID, false, false,
20✔
1416
                        )
20✔
1417
                        if err != nil && !errors.Is(err, ErrEdgeNotFound) {
20✔
UNCOV
1418
                                return err
×
UNCOV
1419
                        }
×
1420

1421
                        chansClosed = append(chansClosed, edgeInfo)
20✔
1422
                }
1423

1424
                metaBucket, err := tx.CreateTopLevelBucket(graphMetaBucket)
245✔
1425
                if err != nil {
245✔
UNCOV
1426
                        return err
×
UNCOV
1427
                }
×
1428

1429
                pruneBucket, err := metaBucket.CreateBucketIfNotExists(
245✔
1430
                        pruneLogBucket,
245✔
1431
                )
245✔
1432
                if err != nil {
245✔
UNCOV
1433
                        return err
×
UNCOV
1434
                }
×
1435

1436
                // With the graph pruned, add a new entry to the prune log,
1437
                // which can be used to check if the graph is fully synced with
1438
                // the current UTXO state.
1439
                var blockHeightBytes [4]byte
245✔
1440
                byteOrder.PutUint32(blockHeightBytes[:], blockHeight)
245✔
1441

245✔
1442
                var newTip [pruneTipBytes]byte
245✔
1443
                copy(newTip[:], blockHash[:])
245✔
1444

245✔
1445
                return pruneBucket.Put(blockHeightBytes[:], newTip[:])
245✔
1446
        }, func() {
245✔
1447
                chansClosed = nil
245✔
1448
        })
245✔
1449
        if err != nil {
245✔
UNCOV
1450
                return nil, err
×
1451
        }
×
1452

1453
        for _, channel := range chansClosed {
265✔
1454
                c.rejectCache.remove(channel.ChannelID)
20✔
1455
                c.chanCache.remove(channel.ChannelID)
20✔
1456
        }
20✔
1457

1458
        return chansClosed, nil
245✔
1459
}
1460

1461
// PruneGraphNodes is a garbage collection method which attempts to prune out
1462
// any nodes from the channel graph that are currently unconnected. This ensure
1463
// that we only maintain a graph of reachable nodes. In the event that a pruned
1464
// node gains more channels, it will be re-added back to the graph.
1465
func (c *KVStore) PruneGraphNodes() ([]route.Vertex, error) {
268✔
1466
        var prunedNodes []route.Vertex
268✔
1467
        err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
536✔
1468
                nodes := tx.ReadWriteBucket(nodeBucket)
268✔
1469
                if nodes == nil {
268✔
UNCOV
1470
                        return ErrGraphNodesNotFound
×
UNCOV
1471
                }
×
1472
                edges := tx.ReadWriteBucket(edgeBucket)
268✔
1473
                if edges == nil {
268✔
UNCOV
1474
                        return ErrGraphNotFound
×
UNCOV
1475
                }
×
1476
                edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
268✔
1477
                if edgeIndex == nil {
268✔
UNCOV
1478
                        return ErrGraphNoEdgesFound
×
UNCOV
1479
                }
×
1480

1481
                var err error
268✔
1482
                prunedNodes, err = c.pruneGraphNodes(nodes, edgeIndex)
268✔
1483
                if err != nil {
268✔
UNCOV
1484
                        return err
×
1485
                }
×
1486

1487
                return nil
268✔
1488
        }, func() {
268✔
1489
                prunedNodes = nil
268✔
1490
        })
268✔
1491

1492
        return prunedNodes, err
268✔
1493
}
1494

1495
// pruneGraphNodes attempts to remove any nodes from the graph who have had a
1496
// channel closed within the current block. If the node still has existing
1497
// channels in the graph, this will act as a no-op.
1498
func (c *KVStore) pruneGraphNodes(nodes kvdb.RwBucket,
1499
        edgeIndex kvdb.RwBucket) ([]route.Vertex, error) {
268✔
1500

268✔
1501
        log.Trace("Pruning nodes from graph with no open channels")
268✔
1502

268✔
1503
        // We'll retrieve the graph's source node to ensure we don't remove it
268✔
1504
        // even if it no longer has any open channels.
268✔
1505
        sourceNode, err := c.sourceNode(nodes)
268✔
1506
        if err != nil {
268✔
NEW
1507
                return nil, err
×
UNCOV
1508
        }
×
1509

1510
        // We'll use this map to keep count the number of references to a node
1511
        // in the graph. A node should only be removed once it has no more
1512
        // references in the graph.
1513
        nodeRefCounts := make(map[[33]byte]int)
268✔
1514
        err = nodes.ForEach(func(pubKey, nodeBytes []byte) error {
1,586✔
1515
                // If this is the source key, then we skip this
1,318✔
1516
                // iteration as the value for this key is a pubKey
1,318✔
1517
                // rather than raw node information.
1,318✔
1518
                if bytes.Equal(pubKey, sourceKey) || len(pubKey) != 33 {
2,116✔
1519
                        return nil
798✔
1520
                }
798✔
1521

1522
                var nodePub [33]byte
523✔
1523
                copy(nodePub[:], pubKey)
523✔
1524
                nodeRefCounts[nodePub] = 0
523✔
1525

523✔
1526
                return nil
523✔
1527
        })
1528
        if err != nil {
268✔
UNCOV
1529
                return nil, err
×
UNCOV
1530
        }
×
1531

1532
        // To ensure we never delete the source node, we'll start off by
1533
        // bumping its ref count to 1.
1534
        nodeRefCounts[sourceNode.PubKeyBytes] = 1
268✔
1535

268✔
1536
        // Next, we'll run through the edgeIndex which maps a channel ID to the
268✔
1537
        // edge info. We'll use this scan to populate our reference count map
268✔
1538
        // above.
268✔
1539
        err = edgeIndex.ForEach(func(chanID, edgeInfoBytes []byte) error {
505✔
1540
                // The first 66 bytes of the edge info contain the pubkeys of
237✔
1541
                // the nodes that this edge attaches. We'll extract them, and
237✔
1542
                // add them to the ref count map.
237✔
1543
                var node1, node2 [33]byte
237✔
1544
                copy(node1[:], edgeInfoBytes[:33])
237✔
1545
                copy(node2[:], edgeInfoBytes[33:])
237✔
1546

237✔
1547
                // With the nodes extracted, we'll increase the ref count of
237✔
1548
                // each of the nodes.
237✔
1549
                nodeRefCounts[node1]++
237✔
1550
                nodeRefCounts[node2]++
237✔
1551

237✔
1552
                return nil
237✔
1553
        })
237✔
1554
        if err != nil {
268✔
UNCOV
1555
                return nil, err
×
UNCOV
1556
        }
×
1557

1558
        // Finally, we'll make a second pass over the set of nodes, and delete
1559
        // any nodes that have a ref count of zero.
1560
        var pruned []route.Vertex
268✔
1561
        for nodePubKey, refCount := range nodeRefCounts {
791✔
1562
                // If the ref count of the node isn't zero, then we can safely
523✔
1563
                // skip it as it still has edges to or from it within the
523✔
1564
                // graph.
523✔
1565
                if refCount != 0 {
987✔
1566
                        continue
464✔
1567
                }
1568

1569
                // If we reach this point, then there are no longer any edges
1570
                // that connect this node, so we can delete it.
1571
                err := c.deleteLightningNode(nodes, nodePubKey[:])
62✔
1572
                if err != nil {
62✔
UNCOV
1573
                        if errors.Is(err, ErrGraphNodeNotFound) ||
×
UNCOV
1574
                                errors.Is(err, ErrGraphNodesNotFound) {
×
NEW
1575

×
UNCOV
1576
                                log.Warnf("Unable to prune node %x from the "+
×
UNCOV
1577
                                        "graph: %v", nodePubKey, err)
×
UNCOV
1578
                                continue
×
1579
                        }
1580

UNCOV
1581
                        return nil, err
×
1582
                }
1583

1584
                log.Infof("Pruned unconnected node %x from channel graph",
62✔
1585
                        nodePubKey[:])
62✔
1586

62✔
1587
                pruned = append(pruned, nodePubKey)
62✔
1588
        }
1589

1590
        if len(pruned) > 0 {
314✔
1591
                log.Infof("Pruned %v unconnected nodes from the channel graph",
46✔
1592
                        len(pruned))
46✔
1593
        }
46✔
1594

1595
        return pruned, err
268✔
1596
}
1597

1598
// DisconnectBlockAtHeight is used to indicate that the block specified
1599
// by the passed height has been disconnected from the main chain. This
1600
// will "rewind" the graph back to the height below, deleting channels
1601
// that are no longer confirmed from the graph. The prune log will be
1602
// set to the last prune height valid for the remaining chain.
1603
// Channels that were removed from the graph resulting from the
1604
// disconnected block are returned.
1605
func (c *KVStore) DisconnectBlockAtHeight(height uint32) (
1606
        []*models.ChannelEdgeInfo, error) {
169✔
1607

169✔
1608
        // Every channel having a ShortChannelID starting at 'height'
169✔
1609
        // will no longer be confirmed.
169✔
1610
        startShortChanID := lnwire.ShortChannelID{
169✔
1611
                BlockHeight: height,
169✔
1612
        }
169✔
1613

169✔
1614
        // Delete everything after this height from the db up until the
169✔
1615
        // SCID alias range.
169✔
1616
        endShortChanID := aliasmgr.StartingAlias
169✔
1617

169✔
1618
        // The block height will be the 3 first bytes of the channel IDs.
169✔
1619
        var chanIDStart [8]byte
169✔
1620
        byteOrder.PutUint64(chanIDStart[:], startShortChanID.ToUint64())
169✔
1621
        var chanIDEnd [8]byte
169✔
1622
        byteOrder.PutUint64(chanIDEnd[:], endShortChanID.ToUint64())
169✔
1623

169✔
1624
        c.cacheMu.Lock()
169✔
1625
        defer c.cacheMu.Unlock()
169✔
1626

169✔
1627
        // Keep track of the channels that are removed from the graph.
169✔
1628
        var removedChans []*models.ChannelEdgeInfo
169✔
1629

169✔
1630
        if err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
338✔
1631
                edges, err := tx.CreateTopLevelBucket(edgeBucket)
169✔
1632
                if err != nil {
169✔
UNCOV
1633
                        return err
×
UNCOV
1634
                }
×
1635
                edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
169✔
1636
                if err != nil {
169✔
UNCOV
1637
                        return err
×
UNCOV
1638
                }
×
1639
                chanIndex, err := edges.CreateBucketIfNotExists(
169✔
1640
                        channelPointBucket,
169✔
1641
                )
169✔
1642
                if err != nil {
169✔
UNCOV
1643
                        return err
×
UNCOV
1644
                }
×
1645
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
169✔
1646
                if err != nil {
169✔
UNCOV
1647
                        return err
×
1648
                }
×
1649

1650
                // Scan from chanIDStart to chanIDEnd, deleting every
1651
                // found edge.
1652
                // NOTE: we must delete the edges after the cursor loop, since
1653
                // modifying the bucket while traversing is not safe.
1654
                // NOTE: We use a < comparison in bytes.Compare instead of <=
1655
                // so that the StartingAlias itself isn't deleted.
1656
                var keys [][]byte
169✔
1657
                cursor := edgeIndex.ReadWriteCursor()
169✔
1658

169✔
1659
                //nolint:ll
169✔
1660
                for k, _ := cursor.Seek(chanIDStart[:]); k != nil &&
169✔
1661
                        bytes.Compare(k, chanIDEnd[:]) < 0; k, _ = cursor.Next() {
273✔
1662
                        keys = append(keys, k)
104✔
1663
                }
104✔
1664

1665
                for _, k := range keys {
273✔
1666
                        edgeInfo, err := c.delChannelEdgeUnsafe(
104✔
1667
                                edges, edgeIndex, chanIndex, zombieIndex,
104✔
1668
                                k, false, false,
104✔
1669
                        )
104✔
1670
                        if err != nil && !errors.Is(err, ErrEdgeNotFound) {
104✔
UNCOV
1671
                                return err
×
UNCOV
1672
                        }
×
1673

1674
                        removedChans = append(removedChans, edgeInfo)
104✔
1675
                }
1676

1677
                // Delete all the entries in the prune log having a height
1678
                // greater or equal to the block disconnected.
1679
                metaBucket, err := tx.CreateTopLevelBucket(graphMetaBucket)
169✔
1680
                if err != nil {
169✔
NEW
1681
                        return err
×
UNCOV
1682
                }
×
1683

1684
                pruneBucket, err := metaBucket.CreateBucketIfNotExists(
169✔
1685
                        pruneLogBucket,
169✔
1686
                )
169✔
1687
                if err != nil {
169✔
NEW
1688
                        return err
×
NEW
1689
                }
×
1690

1691
                var pruneKeyStart [4]byte
169✔
1692
                byteOrder.PutUint32(pruneKeyStart[:], height)
169✔
1693

169✔
1694
                var pruneKeyEnd [4]byte
169✔
1695
                byteOrder.PutUint32(pruneKeyEnd[:], math.MaxUint32)
169✔
1696

169✔
1697
                // To avoid modifying the bucket while traversing, we delete
169✔
1698
                // the keys in a second loop.
169✔
1699
                var pruneKeys [][]byte
169✔
1700
                pruneCursor := pruneBucket.ReadWriteCursor()
169✔
1701
                //nolint:ll
169✔
1702
                for k, _ := pruneCursor.Seek(pruneKeyStart[:]); k != nil &&
169✔
1703
                        bytes.Compare(k, pruneKeyEnd[:]) <= 0; k, _ = pruneCursor.Next() {
267✔
1704
                        pruneKeys = append(pruneKeys, k)
98✔
1705
                }
98✔
1706

1707
                for _, k := range pruneKeys {
267✔
1708
                        if err := pruneBucket.Delete(k); err != nil {
98✔
UNCOV
1709
                                return err
×
UNCOV
1710
                        }
×
1711
                }
1712

1713
                return nil
169✔
1714
        }, func() {
169✔
1715
                removedChans = nil
169✔
1716
        }); err != nil {
169✔
UNCOV
1717
                return nil, err
×
UNCOV
1718
        }
×
1719

1720
        for _, channel := range removedChans {
273✔
1721
                c.rejectCache.remove(channel.ChannelID)
104✔
1722
                c.chanCache.remove(channel.ChannelID)
104✔
1723
        }
104✔
1724

1725
        return removedChans, nil
169✔
1726
}
1727

1728
// PruneTip returns the block height and hash of the latest block that has been
1729
// used to prune channels in the graph. Knowing the "prune tip" allows callers
1730
// to tell if the graph is currently in sync with the current best known UTXO
1731
// state.
1732
func (c *KVStore) PruneTip() (*chainhash.Hash, uint32, error) {
56✔
1733
        var (
56✔
1734
                tipHash   chainhash.Hash
56✔
1735
                tipHeight uint32
56✔
1736
        )
56✔
1737

56✔
1738
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
112✔
1739
                graphMeta := tx.ReadBucket(graphMetaBucket)
56✔
1740
                if graphMeta == nil {
56✔
UNCOV
1741
                        return ErrGraphNotFound
×
UNCOV
1742
                }
×
1743
                pruneBucket := graphMeta.NestedReadBucket(pruneLogBucket)
56✔
1744
                if pruneBucket == nil {
56✔
UNCOV
1745
                        return ErrGraphNeverPruned
×
UNCOV
1746
                }
×
1747

1748
                pruneCursor := pruneBucket.ReadCursor()
56✔
1749

56✔
1750
                // The prune key with the largest block height will be our
56✔
1751
                // prune tip.
56✔
1752
                k, v := pruneCursor.Last()
56✔
1753
                if k == nil {
77✔
1754
                        return ErrGraphNeverPruned
21✔
1755
                }
21✔
1756

1757
                // Once we have the prune tip, the value will be the block hash,
1758
                // and the key the block height.
1759
                copy(tipHash[:], v)
38✔
1760
                tipHeight = byteOrder.Uint32(k)
38✔
1761

38✔
1762
                return nil
38✔
1763
        }, func() {})
56✔
1764
        if err != nil {
77✔
1765
                return nil, 0, err
21✔
1766
        }
21✔
1767

1768
        return &tipHash, tipHeight, nil
38✔
1769
}
1770

1771
// DeleteChannelEdges removes edges with the given channel IDs from the
1772
// database and marks them as zombies. This ensures that we're unable to re-add
1773
// it to our database once again. If an edge does not exist within the
1774
// database, then ErrEdgeNotFound will be returned. If strictZombiePruning is
1775
// true, then when we mark these edges as zombies, we'll set up the keys such
1776
// that we require the node that failed to send the fresh update to be the one
1777
// that resurrects the channel from its zombie state. The markZombie bool
1778
// denotes whether or not to mark the channel as a zombie.
1779
func (c *KVStore) DeleteChannelEdges(strictZombiePruning, markZombie bool,
1780
        chanIDs ...uint64) ([]*models.ChannelEdgeInfo, error) {
137✔
1781

137✔
1782
        // TODO(roasbeef): possibly delete from node bucket if node has no more
137✔
1783
        // channels
137✔
1784
        // TODO(roasbeef): don't delete both edges?
137✔
1785

137✔
1786
        c.cacheMu.Lock()
137✔
1787
        defer c.cacheMu.Unlock()
137✔
1788

137✔
1789
        var infos []*models.ChannelEdgeInfo
137✔
1790
        err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
274✔
1791
                edges := tx.ReadWriteBucket(edgeBucket)
137✔
1792
                if edges == nil {
137✔
UNCOV
1793
                        return ErrEdgeNotFound
×
UNCOV
1794
                }
×
1795
                edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
137✔
1796
                if edgeIndex == nil {
137✔
UNCOV
1797
                        return ErrEdgeNotFound
×
UNCOV
1798
                }
×
1799
                chanIndex := edges.NestedReadWriteBucket(channelPointBucket)
137✔
1800
                if chanIndex == nil {
137✔
UNCOV
1801
                        return ErrEdgeNotFound
×
UNCOV
1802
                }
×
1803
                nodes := tx.ReadWriteBucket(nodeBucket)
137✔
1804
                if nodes == nil {
137✔
UNCOV
1805
                        return ErrGraphNodeNotFound
×
UNCOV
1806
                }
×
1807
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
137✔
1808
                if err != nil {
137✔
1809
                        return err
×
UNCOV
1810
                }
×
1811

1812
                var rawChanID [8]byte
137✔
1813
                for _, chanID := range chanIDs {
224✔
1814
                        byteOrder.PutUint64(rawChanID[:], chanID)
87✔
1815
                        edgeInfo, err := c.delChannelEdgeUnsafe(
87✔
1816
                                edges, edgeIndex, chanIndex, zombieIndex,
87✔
1817
                                rawChanID[:], markZombie, strictZombiePruning,
87✔
1818
                        )
87✔
1819
                        if err != nil {
147✔
1820
                                return err
60✔
1821
                        }
60✔
1822

1823
                        infos = append(infos, edgeInfo)
27✔
1824
                }
1825

1826
                return nil
77✔
1827
        }, func() {
137✔
1828
                infos = nil
137✔
1829
        })
137✔
1830
        if err != nil {
197✔
1831
                return nil, err
60✔
1832
        }
60✔
1833

1834
        for _, chanID := range chanIDs {
104✔
1835
                c.rejectCache.remove(chanID)
27✔
1836
                c.chanCache.remove(chanID)
27✔
1837
        }
27✔
1838

1839
        return infos, nil
77✔
1840
}
1841

1842
// ChannelID attempt to lookup the 8-byte compact channel ID which maps to the
1843
// passed channel point (outpoint). If the passed channel doesn't exist within
1844
// the database, then ErrEdgeNotFound is returned.
1845
func (c *KVStore) ChannelID(chanPoint *wire.OutPoint) (uint64, error) {
4✔
1846
        var chanID uint64
4✔
1847
        if err := kvdb.View(c.db, func(tx kvdb.RTx) error {
8✔
1848
                var err error
4✔
1849
                chanID, err = getChanID(tx, chanPoint)
4✔
1850
                return err
4✔
1851
        }, func() {
8✔
1852
                chanID = 0
4✔
1853
        }); err != nil {
7✔
1854
                return 0, err
3✔
1855
        }
3✔
1856

1857
        return chanID, nil
4✔
1858
}
1859

1860
// getChanID returns the assigned channel ID for a given channel point.
1861
func getChanID(tx kvdb.RTx, chanPoint *wire.OutPoint) (uint64, error) {
4✔
1862
        var b bytes.Buffer
4✔
1863
        if err := WriteOutpoint(&b, chanPoint); err != nil {
4✔
UNCOV
1864
                return 0, err
×
UNCOV
1865
        }
×
1866

1867
        edges := tx.ReadBucket(edgeBucket)
4✔
1868
        if edges == nil {
4✔
UNCOV
1869
                return 0, ErrGraphNoEdgesFound
×
UNCOV
1870
        }
×
1871
        chanIndex := edges.NestedReadBucket(channelPointBucket)
4✔
1872
        if chanIndex == nil {
4✔
UNCOV
1873
                return 0, ErrGraphNoEdgesFound
×
UNCOV
1874
        }
×
1875

1876
        chanIDBytes := chanIndex.Get(b.Bytes())
4✔
1877
        if chanIDBytes == nil {
7✔
1878
                return 0, ErrEdgeNotFound
3✔
1879
        }
3✔
1880

1881
        chanID := byteOrder.Uint64(chanIDBytes)
4✔
1882

4✔
1883
        return chanID, nil
4✔
1884
}
1885

1886
// TODO(roasbeef): allow updates to use Batch?
1887

1888
// HighestChanID returns the "highest" known channel ID in the channel graph.
1889
// This represents the "newest" channel from the PoV of the chain. This method
1890
// can be used by peers to quickly determine if they're graphs are in sync.
1891
func (c *KVStore) HighestChanID() (uint64, error) {
6✔
1892
        var cid uint64
6✔
1893

6✔
1894
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
12✔
1895
                edges := tx.ReadBucket(edgeBucket)
6✔
1896
                if edges == nil {
6✔
UNCOV
1897
                        return ErrGraphNoEdgesFound
×
UNCOV
1898
                }
×
1899
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
6✔
1900
                if edgeIndex == nil {
6✔
UNCOV
1901
                        return ErrGraphNoEdgesFound
×
UNCOV
1902
                }
×
1903

1904
                // In order to find the highest chan ID, we'll fetch a cursor
1905
                // and use that to seek to the "end" of our known rage.
1906
                cidCursor := edgeIndex.ReadCursor()
6✔
1907

6✔
1908
                lastChanID, _ := cidCursor.Last()
6✔
1909

6✔
1910
                // If there's no key, then this means that we don't actually
6✔
1911
                // know of any channels, so we'll return a predicable error.
6✔
1912
                if lastChanID == nil {
10✔
1913
                        return ErrGraphNoEdgesFound
4✔
1914
                }
4✔
1915

1916
                // Otherwise, we'll de serialize the channel ID and return it
1917
                // to the caller.
1918
                cid = byteOrder.Uint64(lastChanID)
5✔
1919

5✔
1920
                return nil
5✔
1921
        }, func() {
6✔
1922
                cid = 0
6✔
1923
        })
6✔
1924
        if err != nil && !errors.Is(err, ErrGraphNoEdgesFound) {
6✔
UNCOV
1925
                return 0, err
×
UNCOV
1926
        }
×
1927

1928
        return cid, nil
6✔
1929
}
1930

1931
// ChannelEdge represents the complete set of information for a channel edge in
1932
// the known channel graph. This struct couples the core information of the
1933
// edge as well as each of the known advertised edge policies.
1934
type ChannelEdge struct {
1935
        // Info contains all the static information describing the channel.
1936
        Info *models.ChannelEdgeInfo
1937

1938
        // Policy1 points to the "first" edge policy of the channel containing
1939
        // the dynamic information required to properly route through the edge.
1940
        Policy1 *models.ChannelEdgePolicy
1941

1942
        // Policy2 points to the "second" edge policy of the channel containing
1943
        // the dynamic information required to properly route through the edge.
1944
        Policy2 *models.ChannelEdgePolicy
1945

1946
        // Node1 is "node 1" in the channel. This is the node that would have
1947
        // produced Policy1 if it exists.
1948
        Node1 *models.LightningNode
1949

1950
        // Node2 is "node 2" in the channel. This is the node that would have
1951
        // produced Policy2 if it exists.
1952
        Node2 *models.LightningNode
1953
}
1954

1955
// ChanUpdatesInHorizon returns all the known channel edges which have at least
1956
// one edge that has an update timestamp within the specified horizon.
1957
func (c *KVStore) ChanUpdatesInHorizon(startTime,
1958
        endTime time.Time) ([]ChannelEdge, error) {
145✔
1959

145✔
1960
        // To ensure we don't return duplicate ChannelEdges, we'll use an
145✔
1961
        // additional map to keep track of the edges already seen to prevent
145✔
1962
        // re-adding it.
145✔
1963
        var edgesSeen map[uint64]struct{}
145✔
1964
        var edgesToCache map[uint64]ChannelEdge
145✔
1965
        var edgesInHorizon []ChannelEdge
145✔
1966

145✔
1967
        c.cacheMu.Lock()
145✔
1968
        defer c.cacheMu.Unlock()
145✔
1969

145✔
1970
        var hits int
145✔
1971
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
290✔
1972
                edges := tx.ReadBucket(edgeBucket)
145✔
1973
                if edges == nil {
145✔
UNCOV
1974
                        return ErrGraphNoEdgesFound
×
UNCOV
1975
                }
×
1976
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
145✔
1977
                if edgeIndex == nil {
145✔
UNCOV
1978
                        return ErrGraphNoEdgesFound
×
UNCOV
1979
                }
×
1980
                edgeUpdateIndex := edges.NestedReadBucket(edgeUpdateIndexBucket)
145✔
1981
                if edgeUpdateIndex == nil {
145✔
UNCOV
1982
                        return ErrGraphNoEdgesFound
×
UNCOV
1983
                }
×
1984

1985
                nodes := tx.ReadBucket(nodeBucket)
145✔
1986
                if nodes == nil {
145✔
UNCOV
1987
                        return ErrGraphNodesNotFound
×
UNCOV
1988
                }
×
1989

1990
                // We'll now obtain a cursor to perform a range query within
1991
                // the index to find all channels within the horizon.
1992
                updateCursor := edgeUpdateIndex.ReadCursor()
145✔
1993

145✔
1994
                var startTimeBytes, endTimeBytes [8 + 8]byte
145✔
1995
                byteOrder.PutUint64(
145✔
1996
                        startTimeBytes[:8], uint64(startTime.Unix()),
145✔
1997
                )
145✔
1998
                byteOrder.PutUint64(
145✔
1999
                        endTimeBytes[:8], uint64(endTime.Unix()),
145✔
2000
                )
145✔
2001

145✔
2002
                // With our start and end times constructed, we'll step through
145✔
2003
                // the index collecting the info and policy of each update of
145✔
2004
                // each channel that has a last update within the time range.
145✔
2005
                //
145✔
2006
                //nolint:ll
145✔
2007
                for indexKey, _ := updateCursor.Seek(startTimeBytes[:]); indexKey != nil &&
145✔
2008
                        bytes.Compare(indexKey, endTimeBytes[:]) <= 0; indexKey, _ = updateCursor.Next() {
194✔
2009
                        // We have a new eligible entry, so we'll slice of the
49✔
2010
                        // chan ID so we can query it in the DB.
49✔
2011
                        chanID := indexKey[8:]
49✔
2012

49✔
2013
                        // If we've already retrieved the info and policies for
49✔
2014
                        // this edge, then we can skip it as we don't need to do
49✔
2015
                        // so again.
49✔
2016
                        chanIDInt := byteOrder.Uint64(chanID)
49✔
2017
                        if _, ok := edgesSeen[chanIDInt]; ok {
68✔
2018
                                continue
19✔
2019
                        }
2020

2021
                        if channel, ok := c.chanCache.get(chanIDInt); ok {
42✔
2022
                                hits++
12✔
2023
                                edgesSeen[chanIDInt] = struct{}{}
12✔
2024
                                edgesInHorizon = append(edgesInHorizon, channel)
12✔
2025

12✔
2026
                                continue
12✔
2027
                        }
2028

2029
                        // First, we'll fetch the static edge information.
2030
                        edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
21✔
2031
                        if err != nil {
21✔
UNCOV
2032
                                chanID := byteOrder.Uint64(chanID)
×
UNCOV
2033
                                return fmt.Errorf("unable to fetch info for "+
×
UNCOV
2034
                                        "edge with chan_id=%v: %v", chanID, err)
×
UNCOV
2035
                        }
×
2036

2037
                        // With the static information obtained, we'll now
2038
                        // fetch the dynamic policy info.
2039
                        edge1, edge2, err := fetchChanEdgePolicies(
21✔
2040
                                edgeIndex, edges, chanID,
21✔
2041
                        )
21✔
2042
                        if err != nil {
21✔
UNCOV
2043
                                chanID := byteOrder.Uint64(chanID)
×
UNCOV
2044
                                return fmt.Errorf("unable to fetch policies "+
×
UNCOV
2045
                                        "for edge with chan_id=%v: %v", chanID,
×
UNCOV
2046
                                        err)
×
2047
                        }
×
2048

2049
                        node1, err := fetchLightningNode(
21✔
2050
                                nodes, edgeInfo.NodeKey1Bytes[:],
21✔
2051
                        )
21✔
2052
                        if err != nil {
21✔
UNCOV
2053
                                return err
×
UNCOV
2054
                        }
×
2055

2056
                        node2, err := fetchLightningNode(
21✔
2057
                                nodes, edgeInfo.NodeKey2Bytes[:],
21✔
2058
                        )
21✔
2059
                        if err != nil {
21✔
2060
                                return err
×
2061
                        }
×
2062

2063
                        // Finally, we'll collate this edge with the rest of
2064
                        // edges to be returned.
2065
                        edgesSeen[chanIDInt] = struct{}{}
21✔
2066
                        channel := ChannelEdge{
21✔
2067
                                Info:    &edgeInfo,
21✔
2068
                                Policy1: edge1,
21✔
2069
                                Policy2: edge2,
21✔
2070
                                Node1:   &node1,
21✔
2071
                                Node2:   &node2,
21✔
2072
                        }
21✔
2073
                        edgesInHorizon = append(edgesInHorizon, channel)
21✔
2074
                        edgesToCache[chanIDInt] = channel
21✔
2075
                }
2076

2077
                return nil
145✔
2078
        }, func() {
145✔
2079
                edgesSeen = make(map[uint64]struct{})
145✔
2080
                edgesToCache = make(map[uint64]ChannelEdge)
145✔
2081
                edgesInHorizon = nil
145✔
2082
        })
145✔
2083
        switch {
145✔
UNCOV
2084
        case errors.Is(err, ErrGraphNoEdgesFound):
×
UNCOV
2085
                fallthrough
×
UNCOV
2086
        case errors.Is(err, ErrGraphNodesNotFound):
×
UNCOV
2087
                break
×
2088

UNCOV
2089
        case err != nil:
×
UNCOV
2090
                return nil, err
×
2091
        }
2092

2093
        // Insert any edges loaded from disk into the cache.
2094
        for chanid, channel := range edgesToCache {
166✔
2095
                c.chanCache.insert(chanid, channel)
21✔
2096
        }
21✔
2097

2098
        log.Debugf("ChanUpdatesInHorizon hit percentage: %f (%d/%d)",
145✔
2099
                float64(hits)/float64(len(edgesInHorizon)), hits,
145✔
2100
                len(edgesInHorizon))
145✔
2101

145✔
2102
        return edgesInHorizon, nil
145✔
2103
}
2104

2105
// NodeUpdatesInHorizon returns all the known lightning node which have an
2106
// update timestamp within the passed range. This method can be used by two
2107
// nodes to quickly determine if they have the same set of up to date node
2108
// announcements.
2109
func (c *KVStore) NodeUpdatesInHorizon(startTime,
2110
        endTime time.Time) ([]models.LightningNode, error) {
11✔
2111

11✔
2112
        var nodesInHorizon []models.LightningNode
11✔
2113

11✔
2114
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
22✔
2115
                nodes := tx.ReadBucket(nodeBucket)
11✔
2116
                if nodes == nil {
11✔
UNCOV
2117
                        return ErrGraphNodesNotFound
×
UNCOV
2118
                }
×
2119

2120
                nodeUpdateIndex := nodes.NestedReadBucket(nodeUpdateIndexBucket)
11✔
2121
                if nodeUpdateIndex == nil {
11✔
UNCOV
2122
                        return ErrGraphNodesNotFound
×
UNCOV
2123
                }
×
2124

2125
                // We'll now obtain a cursor to perform a range query within
2126
                // the index to find all node announcements within the horizon.
2127
                updateCursor := nodeUpdateIndex.ReadCursor()
11✔
2128

11✔
2129
                var startTimeBytes, endTimeBytes [8 + 33]byte
11✔
2130
                byteOrder.PutUint64(
11✔
2131
                        startTimeBytes[:8], uint64(startTime.Unix()),
11✔
2132
                )
11✔
2133
                byteOrder.PutUint64(
11✔
2134
                        endTimeBytes[:8], uint64(endTime.Unix()),
11✔
2135
                )
11✔
2136

11✔
2137
                // With our start and end times constructed, we'll step through
11✔
2138
                // the index collecting info for each node within the time
11✔
2139
                // range.
11✔
2140
                //
11✔
2141
                //nolint:ll
11✔
2142
                for indexKey, _ := updateCursor.Seek(startTimeBytes[:]); indexKey != nil &&
11✔
2143
                        bytes.Compare(indexKey, endTimeBytes[:]) <= 0; indexKey, _ = updateCursor.Next() {
43✔
2144
                        nodePub := indexKey[8:]
32✔
2145
                        node, err := fetchLightningNode(nodes, nodePub)
32✔
2146
                        if err != nil {
32✔
UNCOV
2147
                                return err
×
UNCOV
2148
                        }
×
2149

2150
                        nodesInHorizon = append(nodesInHorizon, node)
32✔
2151
                }
2152

2153
                return nil
11✔
2154
        }, func() {
11✔
2155
                nodesInHorizon = nil
11✔
2156
        })
11✔
2157
        switch {
11✔
UNCOV
2158
        case errors.Is(err, ErrGraphNoEdgesFound):
×
UNCOV
2159
                fallthrough
×
UNCOV
2160
        case errors.Is(err, ErrGraphNodesNotFound):
×
UNCOV
2161
                break
×
2162

2163
        case err != nil:
×
UNCOV
2164
                return nil, err
×
2165
        }
2166

2167
        return nodesInHorizon, nil
11✔
2168
}
2169

2170
// FilterKnownChanIDs takes a set of channel IDs and return the subset of chan
2171
// ID's that we don't know and are not known zombies of the passed set. In other
2172
// words, we perform a set difference of our set of chan ID's and the ones
2173
// passed in. This method can be used by callers to determine the set of
2174
// channels another peer knows of that we don't.
2175
func (c *KVStore) FilterKnownChanIDs(chansInfo []ChannelUpdateInfo,
2176
        isZombieChan func(time.Time, time.Time) bool) ([]uint64, error) {
124✔
2177

124✔
2178
        var newChanIDs []uint64
124✔
2179

124✔
2180
        c.cacheMu.Lock()
124✔
2181
        defer c.cacheMu.Unlock()
124✔
2182

124✔
2183
        err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
248✔
2184
                edges := tx.ReadBucket(edgeBucket)
124✔
2185
                if edges == nil {
124✔
UNCOV
2186
                        return ErrGraphNoEdgesFound
×
UNCOV
2187
                }
×
2188
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
124✔
2189
                if edgeIndex == nil {
124✔
UNCOV
2190
                        return ErrGraphNoEdgesFound
×
UNCOV
2191
                }
×
2192

2193
                // Fetch the zombie index, it may not exist if no edges have
2194
                // ever been marked as zombies. If the index has been
2195
                // initialized, we will use it later to skip known zombie edges.
2196
                zombieIndex := edges.NestedReadBucket(zombieBucket)
124✔
2197

124✔
2198
                // We'll run through the set of chanIDs and collate only the
124✔
2199
                // set of channel that are unable to be found within our db.
124✔
2200
                var cidBytes [8]byte
124✔
2201
                for _, info := range chansInfo {
255✔
2202
                        scid := info.ShortChannelID.ToUint64()
131✔
2203
                        byteOrder.PutUint64(cidBytes[:], scid)
131✔
2204

131✔
2205
                        // If the edge is already known, skip it.
131✔
2206
                        if v := edgeIndex.Get(cidBytes[:]); v != nil {
153✔
2207
                                continue
22✔
2208
                        }
2209

2210
                        // If the edge is a known zombie, skip it.
2211
                        if zombieIndex != nil {
224✔
2212
                                isZombie, _, _ := isZombieEdge(
112✔
2213
                                        zombieIndex, scid,
112✔
2214
                                )
112✔
2215

112✔
2216
                                // TODO(ziggie): Make sure that for the strict
112✔
2217
                                // pruning case we compare the pubkeys and
112✔
2218
                                // whether the right timestamp is not older than
112✔
2219
                                // the `ChannelPruneExpiry`.
112✔
2220
                                //
112✔
2221
                                // NOTE: The timestamp data has no verification
112✔
2222
                                // attached to it in the `ReplyChannelRange` msg
112✔
2223
                                // so we are trusting this data at this point.
112✔
2224
                                // However it is not critical because we are
112✔
2225
                                // just removing the channel from the db when
112✔
2226
                                // the timestamps are more recent. During the
112✔
2227
                                // querying of the gossip msg verification
112✔
2228
                                // happens as usual.
112✔
2229
                                // However we should start punishing peers when
112✔
2230
                                // they don't provide us honest data ?
112✔
2231
                                isStillZombie := isZombieChan(
112✔
2232
                                        info.Node1UpdateTimestamp,
112✔
2233
                                        info.Node2UpdateTimestamp,
112✔
2234
                                )
112✔
2235

112✔
2236
                                switch {
112✔
2237
                                // If the edge is a known zombie and if we
2238
                                // would still consider it a zombie given the
2239
                                // latest update timestamps, then we skip this
2240
                                // channel.
2241
                                case isZombie && isStillZombie:
31✔
2242
                                        continue
31✔
2243

2244
                                // Otherwise, if we have marked it as a zombie
2245
                                // but the latest update timestamps could bring
2246
                                // it back from the dead, then we mark it alive,
2247
                                // and we let it be added to the set of IDs to
2248
                                // query our peer for.
2249
                                case isZombie && !isStillZombie:
20✔
2250
                                        err := c.markEdgeLiveUnsafe(tx, scid)
20✔
2251
                                        if err != nil {
20✔
UNCOV
2252
                                                return err
×
UNCOV
2253
                                        }
×
2254
                                }
2255
                        }
2256

2257
                        newChanIDs = append(newChanIDs, scid)
81✔
2258
                }
2259

2260
                return nil
124✔
2261
        }, func() {
124✔
2262
                newChanIDs = nil
124✔
2263
        })
124✔
2264
        switch {
124✔
2265
        // If we don't know of any edges yet, then we'll return the entire set
2266
        // of chan IDs specified.
2267
        case errors.Is(err, ErrGraphNoEdgesFound):
×
2268
                ogChanIDs := make([]uint64, len(chansInfo))
×
UNCOV
2269
                for i, info := range chansInfo {
×
UNCOV
2270
                        ogChanIDs[i] = info.ShortChannelID.ToUint64()
×
UNCOV
2271
                }
×
2272

UNCOV
2273
                return ogChanIDs, nil
×
2274

UNCOV
2275
        case err != nil:
×
UNCOV
2276
                return nil, err
×
2277
        }
2278

2279
        return newChanIDs, nil
124✔
2280
}
2281

2282
// ChannelUpdateInfo couples the SCID of a channel with the timestamps of the
2283
// latest received channel updates for the channel.
2284
type ChannelUpdateInfo struct {
2285
        // ShortChannelID is the SCID identifier of the channel.
2286
        ShortChannelID lnwire.ShortChannelID
2287

2288
        // Node1UpdateTimestamp is the timestamp of the latest received update
2289
        // from the node 1 channel peer. This will be set to zero time if no
2290
        // update has yet been received from this node.
2291
        Node1UpdateTimestamp time.Time
2292

2293
        // Node2UpdateTimestamp is the timestamp of the latest received update
2294
        // from the node 2 channel peer. This will be set to zero time if no
2295
        // update has yet been received from this node.
2296
        Node2UpdateTimestamp time.Time
2297
}
2298

2299
// NewChannelUpdateInfo is a constructor which makes sure we initialize the
2300
// timestamps with zero seconds unix timestamp which equals
2301
// `January 1, 1970, 00:00:00 UTC` in case the value is `time.Time{}`.
2302
func NewChannelUpdateInfo(scid lnwire.ShortChannelID, node1Timestamp,
2303
        node2Timestamp time.Time) ChannelUpdateInfo {
221✔
2304

221✔
2305
        chanInfo := ChannelUpdateInfo{
221✔
2306
                ShortChannelID:       scid,
221✔
2307
                Node1UpdateTimestamp: node1Timestamp,
221✔
2308
                Node2UpdateTimestamp: node2Timestamp,
221✔
2309
        }
221✔
2310

221✔
2311
        if node1Timestamp.IsZero() {
432✔
2312
                chanInfo.Node1UpdateTimestamp = time.Unix(0, 0)
211✔
2313
        }
211✔
2314

2315
        if node2Timestamp.IsZero() {
432✔
2316
                chanInfo.Node2UpdateTimestamp = time.Unix(0, 0)
211✔
2317
        }
211✔
2318

2319
        return chanInfo
221✔
2320
}
2321

2322
// BlockChannelRange represents a range of channels for a given block height.
2323
type BlockChannelRange struct {
2324
        // Height is the height of the block all of the channels below were
2325
        // included in.
2326
        Height uint32
2327

2328
        // Channels is the list of channels identified by their short ID
2329
        // representation known to us that were included in the block height
2330
        // above. The list may include channel update timestamp information if
2331
        // requested.
2332
        Channels []ChannelUpdateInfo
2333
}
2334

2335
// FilterChannelRange returns the channel ID's of all known channels which were
2336
// mined in a block height within the passed range. The channel IDs are grouped
2337
// by their common block height. This method can be used to quickly share with a
2338
// peer the set of channels we know of within a particular range to catch them
2339
// up after a period of time offline. If withTimestamps is true then the
2340
// timestamp info of the latest received channel update messages of the channel
2341
// will be included in the response.
2342
func (c *KVStore) FilterChannelRange(startHeight,
2343
        endHeight uint32, withTimestamps bool) ([]BlockChannelRange, error) {
14✔
2344

14✔
2345
        startChanID := &lnwire.ShortChannelID{
14✔
2346
                BlockHeight: startHeight,
14✔
2347
        }
14✔
2348

14✔
2349
        endChanID := lnwire.ShortChannelID{
14✔
2350
                BlockHeight: endHeight,
14✔
2351
                TxIndex:     math.MaxUint32 & 0x00ffffff,
14✔
2352
                TxPosition:  math.MaxUint16,
14✔
2353
        }
14✔
2354

14✔
2355
        // As we need to perform a range scan, we'll convert the starting and
14✔
2356
        // ending height to their corresponding values when encoded using short
14✔
2357
        // channel ID's.
14✔
2358
        var chanIDStart, chanIDEnd [8]byte
14✔
2359
        byteOrder.PutUint64(chanIDStart[:], startChanID.ToUint64())
14✔
2360
        byteOrder.PutUint64(chanIDEnd[:], endChanID.ToUint64())
14✔
2361

14✔
2362
        var channelsPerBlock map[uint32][]ChannelUpdateInfo
14✔
2363
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
28✔
2364
                edges := tx.ReadBucket(edgeBucket)
14✔
2365
                if edges == nil {
14✔
UNCOV
2366
                        return ErrGraphNoEdgesFound
×
UNCOV
2367
                }
×
2368
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
14✔
2369
                if edgeIndex == nil {
14✔
UNCOV
2370
                        return ErrGraphNoEdgesFound
×
UNCOV
2371
                }
×
2372

2373
                cursor := edgeIndex.ReadCursor()
14✔
2374

14✔
2375
                // We'll now iterate through the database, and find each
14✔
2376
                // channel ID that resides within the specified range.
14✔
2377
                //
14✔
2378
                //nolint:ll
14✔
2379
                for k, v := cursor.Seek(chanIDStart[:]); k != nil &&
14✔
2380
                        bytes.Compare(k, chanIDEnd[:]) <= 0; k, v = cursor.Next() {
61✔
2381
                        // Don't send alias SCIDs during gossip sync.
47✔
2382
                        edgeReader := bytes.NewReader(v)
47✔
2383
                        edgeInfo, err := deserializeChanEdgeInfo(edgeReader)
47✔
2384
                        if err != nil {
47✔
2385
                                return err
×
2386
                        }
×
2387

2388
                        if edgeInfo.AuthProof == nil {
50✔
2389
                                continue
3✔
2390
                        }
2391

2392
                        // This channel ID rests within the target range, so
2393
                        // we'll add it to our returned set.
2394
                        rawCid := byteOrder.Uint64(k)
47✔
2395
                        cid := lnwire.NewShortChanIDFromInt(rawCid)
47✔
2396

47✔
2397
                        chanInfo := NewChannelUpdateInfo(
47✔
2398
                                cid, time.Time{}, time.Time{},
47✔
2399
                        )
47✔
2400

47✔
2401
                        if !withTimestamps {
69✔
2402
                                channelsPerBlock[cid.BlockHeight] = append(
22✔
2403
                                        channelsPerBlock[cid.BlockHeight],
22✔
2404
                                        chanInfo,
22✔
2405
                                )
22✔
2406

22✔
2407
                                continue
22✔
2408
                        }
2409

2410
                        node1Key, node2Key := computeEdgePolicyKeys(&edgeInfo)
25✔
2411

25✔
2412
                        rawPolicy := edges.Get(node1Key)
25✔
2413
                        if len(rawPolicy) != 0 {
36✔
2414
                                r := bytes.NewReader(rawPolicy)
11✔
2415

11✔
2416
                                edge, err := deserializeChanEdgePolicyRaw(r)
11✔
2417
                                if err != nil && !errors.Is(
11✔
2418
                                        err, ErrEdgePolicyOptionalFieldNotFound,
11✔
2419
                                ) {
11✔
UNCOV
2420

×
UNCOV
2421
                                        return err
×
UNCOV
2422
                                }
×
2423

2424
                                chanInfo.Node1UpdateTimestamp = edge.LastUpdate
11✔
2425
                        }
2426

2427
                        rawPolicy = edges.Get(node2Key)
25✔
2428
                        if len(rawPolicy) != 0 {
32✔
2429
                                r := bytes.NewReader(rawPolicy)
7✔
2430

7✔
2431
                                edge, err := deserializeChanEdgePolicyRaw(r)
7✔
2432
                                if err != nil && !errors.Is(
7✔
2433
                                        err, ErrEdgePolicyOptionalFieldNotFound,
7✔
2434
                                ) {
7✔
2435

×
2436
                                        return err
×
2437
                                }
×
2438

2439
                                chanInfo.Node2UpdateTimestamp = edge.LastUpdate
7✔
2440
                        }
2441

2442
                        channelsPerBlock[cid.BlockHeight] = append(
25✔
2443
                                channelsPerBlock[cid.BlockHeight], chanInfo,
25✔
2444
                        )
25✔
2445
                }
2446

2447
                return nil
14✔
2448
        }, func() {
14✔
2449
                channelsPerBlock = make(map[uint32][]ChannelUpdateInfo)
14✔
2450
        })
14✔
2451

2452
        switch {
14✔
2453
        // If we don't know of any channels yet, then there's nothing to
2454
        // filter, so we'll return an empty slice.
2455
        case errors.Is(err, ErrGraphNoEdgesFound) || len(channelsPerBlock) == 0:
6✔
2456
                return nil, nil
6✔
2457

UNCOV
2458
        case err != nil:
×
UNCOV
2459
                return nil, err
×
2460
        }
2461

2462
        // Return the channel ranges in ascending block height order.
2463
        blocks := make([]uint32, 0, len(channelsPerBlock))
11✔
2464
        for block := range channelsPerBlock {
36✔
2465
                blocks = append(blocks, block)
25✔
2466
        }
25✔
2467
        sort.Slice(blocks, func(i, j int) bool {
39✔
2468
                return blocks[i] < blocks[j]
28✔
2469
        })
28✔
2470

2471
        channelRanges := make([]BlockChannelRange, 0, len(channelsPerBlock))
11✔
2472
        for _, block := range blocks {
36✔
2473
                channelRanges = append(channelRanges, BlockChannelRange{
25✔
2474
                        Height:   block,
25✔
2475
                        Channels: channelsPerBlock[block],
25✔
2476
                })
25✔
2477
        }
25✔
2478

2479
        return channelRanges, nil
11✔
2480
}
2481

2482
// FetchChanInfos returns the set of channel edges that correspond to the passed
2483
// channel ID's. If an edge is the query is unknown to the database, it will
2484
// skipped and the result will contain only those edges that exist at the time
2485
// of the query. This can be used to respond to peer queries that are seeking to
2486
// fill in gaps in their view of the channel graph.
2487
func (c *KVStore) FetchChanInfos(chanIDs []uint64) ([]ChannelEdge, error) {
6✔
2488
        return c.fetchChanInfos(nil, chanIDs)
6✔
2489
}
6✔
2490

2491
// fetchChanInfos returns the set of channel edges that correspond to the passed
2492
// channel ID's. If an edge is the query is unknown to the database, it will
2493
// skipped and the result will contain only those edges that exist at the time
2494
// of the query. This can be used to respond to peer queries that are seeking to
2495
// fill in gaps in their view of the channel graph.
2496
//
2497
// NOTE: An optional transaction may be provided. If none is provided, then a
2498
// new one will be created.
2499
func (c *KVStore) fetchChanInfos(tx kvdb.RTx, chanIDs []uint64) (
2500
        []ChannelEdge, error) {
27✔
2501
        // TODO(roasbeef): sort cids?
27✔
2502

27✔
2503
        var (
27✔
2504
                chanEdges []ChannelEdge
27✔
2505
                cidBytes  [8]byte
27✔
2506
        )
27✔
2507

27✔
2508
        fetchChanInfos := func(tx kvdb.RTx) error {
54✔
2509
                edges := tx.ReadBucket(edgeBucket)
27✔
2510
                if edges == nil {
27✔
UNCOV
2511
                        return ErrGraphNoEdgesFound
×
UNCOV
2512
                }
×
2513
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
27✔
2514
                if edgeIndex == nil {
27✔
UNCOV
2515
                        return ErrGraphNoEdgesFound
×
UNCOV
2516
                }
×
2517
                nodes := tx.ReadBucket(nodeBucket)
27✔
2518
                if nodes == nil {
27✔
UNCOV
2519
                        return ErrGraphNotFound
×
UNCOV
2520
                }
×
2521

2522
                for _, cid := range chanIDs {
61✔
2523
                        byteOrder.PutUint64(cidBytes[:], cid)
34✔
2524

34✔
2525
                        // First, we'll fetch the static edge information. If
34✔
2526
                        // the edge is unknown, we will skip the edge and
34✔
2527
                        // continue gathering all known edges.
34✔
2528
                        edgeInfo, err := fetchChanEdgeInfo(
34✔
2529
                                edgeIndex, cidBytes[:],
34✔
2530
                        )
34✔
2531
                        switch {
34✔
2532
                        case errors.Is(err, ErrEdgeNotFound):
23✔
2533
                                continue
23✔
2534
                        case err != nil:
×
2535
                                return err
×
2536
                        }
2537

2538
                        // With the static information obtained, we'll now
2539
                        // fetch the dynamic policy info.
2540
                        edge1, edge2, err := fetchChanEdgePolicies(
11✔
2541
                                edgeIndex, edges, cidBytes[:],
11✔
2542
                        )
11✔
2543
                        if err != nil {
11✔
UNCOV
2544
                                return err
×
UNCOV
2545
                        }
×
2546

2547
                        node1, err := fetchLightningNode(
11✔
2548
                                nodes, edgeInfo.NodeKey1Bytes[:],
11✔
2549
                        )
11✔
2550
                        if err != nil {
11✔
UNCOV
2551
                                return err
×
UNCOV
2552
                        }
×
2553

2554
                        node2, err := fetchLightningNode(
11✔
2555
                                nodes, edgeInfo.NodeKey2Bytes[:],
11✔
2556
                        )
11✔
2557
                        if err != nil {
11✔
UNCOV
2558
                                return err
×
2559
                        }
×
2560

2561
                        chanEdges = append(chanEdges, ChannelEdge{
11✔
2562
                                Info:    &edgeInfo,
11✔
2563
                                Policy1: edge1,
11✔
2564
                                Policy2: edge2,
11✔
2565
                                Node1:   &node1,
11✔
2566
                                Node2:   &node2,
11✔
2567
                        })
11✔
2568
                }
2569

2570
                return nil
27✔
2571
        }
2572

2573
        if tx == nil {
34✔
2574
                err := kvdb.View(c.db, fetchChanInfos, func() {
14✔
2575
                        chanEdges = nil
7✔
2576
                })
7✔
2577
                if err != nil {
7✔
UNCOV
2578
                        return nil, err
×
UNCOV
2579
                }
×
2580

2581
                return chanEdges, nil
7✔
2582
        }
2583

2584
        err := fetchChanInfos(tx)
20✔
2585
        if err != nil {
20✔
UNCOV
2586
                return nil, err
×
UNCOV
2587
        }
×
2588

2589
        return chanEdges, nil
20✔
2590
}
2591

2592
func delEdgeUpdateIndexEntry(edgesBucket kvdb.RwBucket, chanID uint64,
2593
        edge1, edge2 *models.ChannelEdgePolicy) error {
146✔
2594

146✔
2595
        // First, we'll fetch the edge update index bucket which currently
146✔
2596
        // stores an entry for the channel we're about to delete.
146✔
2597
        updateIndex := edgesBucket.NestedReadWriteBucket(edgeUpdateIndexBucket)
146✔
2598
        if updateIndex == nil {
146✔
UNCOV
2599
                // No edges in bucket, return early.
×
UNCOV
2600
                return nil
×
2601
        }
×
2602

2603
        // Now that we have the bucket, we'll attempt to construct a template
2604
        // for the index key: updateTime || chanid.
2605
        var indexKey [8 + 8]byte
146✔
2606
        byteOrder.PutUint64(indexKey[8:], chanID)
146✔
2607

146✔
2608
        // With the template constructed, we'll attempt to delete an entry that
146✔
2609
        // would have been created by both edges: we'll alternate the update
146✔
2610
        // times, as one may had overridden the other.
146✔
2611
        if edge1 != nil {
159✔
2612
                byteOrder.PutUint64(
13✔
2613
                        indexKey[:8], uint64(edge1.LastUpdate.Unix()),
13✔
2614
                )
13✔
2615
                if err := updateIndex.Delete(indexKey[:]); err != nil {
13✔
2616
                        return err
×
UNCOV
2617
                }
×
2618
        }
2619

2620
        // We'll also attempt to delete the entry that may have been created by
2621
        // the second edge.
2622
        if edge2 != nil {
161✔
2623
                byteOrder.PutUint64(
15✔
2624
                        indexKey[:8], uint64(edge2.LastUpdate.Unix()),
15✔
2625
                )
15✔
2626
                if err := updateIndex.Delete(indexKey[:]); err != nil {
15✔
UNCOV
2627
                        return err
×
UNCOV
2628
                }
×
2629
        }
2630

2631
        return nil
146✔
2632
}
2633

2634
// delChannelEdgeUnsafe deletes the edge with the given chanID from the graph
2635
// cache. It then goes on to delete any policy info and edge info for this
2636
// channel from the DB and finally, if isZombie is true, it will add an entry
2637
// for this channel in the zombie index.
2638
//
2639
// NOTE: this method MUST only be called if the cacheMu has already been
2640
// acquired.
2641
func (c *KVStore) delChannelEdgeUnsafe(edges, edgeIndex, chanIndex,
2642
        zombieIndex kvdb.RwBucket, chanID []byte, isZombie,
2643
        strictZombie bool) (*models.ChannelEdgeInfo, error) {
206✔
2644

206✔
2645
        edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
206✔
2646
        if err != nil {
266✔
2647
                return nil, err
60✔
2648
        }
60✔
2649

2650
        // We'll also remove the entry in the edge update index bucket before
2651
        // we delete the edges themselves so we can access their last update
2652
        // times.
2653
        cid := byteOrder.Uint64(chanID)
146✔
2654
        edge1, edge2, err := fetchChanEdgePolicies(edgeIndex, edges, chanID)
146✔
2655
        if err != nil {
146✔
UNCOV
2656
                return nil, err
×
UNCOV
2657
        }
×
2658
        err = delEdgeUpdateIndexEntry(edges, cid, edge1, edge2)
146✔
2659
        if err != nil {
146✔
UNCOV
2660
                return nil, err
×
UNCOV
2661
        }
×
2662

2663
        // The edge key is of the format pubKey || chanID. First we construct
2664
        // the latter half, populating the channel ID.
2665
        var edgeKey [33 + 8]byte
146✔
2666
        copy(edgeKey[33:], chanID)
146✔
2667

146✔
2668
        // With the latter half constructed, copy over the first public key to
146✔
2669
        // delete the edge in this direction, then the second to delete the
146✔
2670
        // edge in the opposite direction.
146✔
2671
        copy(edgeKey[:33], edgeInfo.NodeKey1Bytes[:])
146✔
2672
        if edges.Get(edgeKey[:]) != nil {
292✔
2673
                if err := edges.Delete(edgeKey[:]); err != nil {
146✔
UNCOV
2674
                        return nil, err
×
NEW
2675
                }
×
2676
        }
2677
        copy(edgeKey[:33], edgeInfo.NodeKey2Bytes[:])
146✔
2678
        if edges.Get(edgeKey[:]) != nil {
292✔
2679
                if err := edges.Delete(edgeKey[:]); err != nil {
146✔
UNCOV
2680
                        return nil, err
×
UNCOV
2681
                }
×
2682
        }
2683

2684
        // As part of deleting the edge we also remove all disabled entries
2685
        // from the edgePolicyDisabledIndex bucket. We do that for both
2686
        // directions.
2687
        err = updateEdgePolicyDisabledIndex(edges, cid, false, false)
146✔
2688
        if err != nil {
146✔
NEW
2689
                return nil, err
×
2690
        }
×
2691
        err = updateEdgePolicyDisabledIndex(edges, cid, true, false)
146✔
2692
        if err != nil {
146✔
UNCOV
2693
                return nil, err
×
UNCOV
2694
        }
×
2695

2696
        // With the edge data deleted, we can purge the information from the two
2697
        // edge indexes.
2698
        if err := edgeIndex.Delete(chanID); err != nil {
146✔
UNCOV
2699
                return nil, err
×
UNCOV
2700
        }
×
2701
        var b bytes.Buffer
146✔
2702
        if err := WriteOutpoint(&b, &edgeInfo.ChannelPoint); err != nil {
146✔
UNCOV
2703
                return nil, err
×
NEW
2704
        }
×
2705
        if err := chanIndex.Delete(b.Bytes()); err != nil {
146✔
UNCOV
2706
                return nil, err
×
UNCOV
2707
        }
×
2708

2709
        // Finally, we'll mark the edge as a zombie within our index if it's
2710
        // being removed due to the channel becoming a zombie. We do this to
2711
        // ensure we don't store unnecessary data for spent channels.
2712
        if !isZombie {
270✔
2713
                return &edgeInfo, nil
124✔
2714
        }
124✔
2715

2716
        nodeKey1, nodeKey2 := edgeInfo.NodeKey1Bytes, edgeInfo.NodeKey2Bytes
25✔
2717
        if strictZombie {
28✔
2718
                nodeKey1, nodeKey2 = makeZombiePubkeys(&edgeInfo, edge1, edge2)
3✔
2719
        }
3✔
2720

2721
        return &edgeInfo, markEdgeZombie(
25✔
2722
                zombieIndex, byteOrder.Uint64(chanID), nodeKey1, nodeKey2,
25✔
2723
        )
25✔
2724
}
2725

2726
// makeZombiePubkeys derives the node pubkeys to store in the zombie index for a
2727
// particular pair of channel policies. The return values are one of:
2728
//  1. (pubkey1, pubkey2)
2729
//  2. (pubkey1, blank)
2730
//  3. (blank, pubkey2)
2731
//
2732
// A blank pubkey means that corresponding node will be unable to resurrect a
2733
// channel on its own. For example, node1 may continue to publish recent
2734
// updates, but node2 has fallen way behind. After marking an edge as a zombie,
2735
// we don't want another fresh update from node1 to resurrect, as the edge can
2736
// only become live once node2 finally sends something recent.
2737
//
2738
// In the case where we have neither update, we allow either party to resurrect
2739
// the channel. If the channel were to be marked zombie again, it would be
2740
// marked with the correct lagging channel since we received an update from only
2741
// one side.
2742
func makeZombiePubkeys(info *models.ChannelEdgeInfo,
2743
        e1, e2 *models.ChannelEdgePolicy) ([33]byte, [33]byte) {
3✔
2744

3✔
2745
        switch {
3✔
2746
        // If we don't have either edge policy, we'll return both pubkeys so
2747
        // that the channel can be resurrected by either party.
UNCOV
2748
        case e1 == nil && e2 == nil:
×
UNCOV
2749
                return info.NodeKey1Bytes, info.NodeKey2Bytes
×
2750

2751
        // If we're missing edge1, or if both edges are present but edge1 is
2752
        // older, we'll return edge1's pubkey and a blank pubkey for edge2. This
2753
        // means that only an update from edge1 will be able to resurrect the
2754
        // channel.
2755
        case e1 == nil || (e2 != nil && e1.LastUpdate.Before(e2.LastUpdate)):
1✔
2756
                return info.NodeKey1Bytes, [33]byte{}
1✔
2757

2758
        // Otherwise, we're missing edge2 or edge2 is the older side, so we
2759
        // return a blank pubkey for edge1. In this case, only an update from
2760
        // edge2 can resurect the channel.
2761
        default:
2✔
2762
                return [33]byte{}, info.NodeKey2Bytes
2✔
2763
        }
2764
}
2765

2766
// UpdateEdgePolicy updates the edge routing policy for a single directed edge
2767
// within the database for the referenced channel. The `flags` attribute within
2768
// the ChannelEdgePolicy determines which of the directed edges are being
2769
// updated. If the flag is 1, then the first node's information is being
2770
// updated, otherwise it's the second node's information. The node ordering is
2771
// determined by the lexicographical ordering of the identity public keys of the
2772
// nodes on either side of the channel.
2773
func (c *KVStore) UpdateEdgePolicy(edge *models.ChannelEdgePolicy,
2774
        op ...batch.SchedulerOption) error {
2,664✔
2775

2,664✔
2776
        var (
2,664✔
2777
                isUpdate1    bool
2,664✔
2778
                edgeNotFound bool
2,664✔
2779
        )
2,664✔
2780

2,664✔
2781
        r := &batch.Request{
2,664✔
2782
                Reset: func() {
5,328✔
2783
                        isUpdate1 = false
2,664✔
2784
                        edgeNotFound = false
2,664✔
2785
                },
2,664✔
2786
                Update: func(tx kvdb.RwTx) error {
2,664✔
2787
                        var err error
2,664✔
2788
                        isUpdate1, err = updateEdgePolicy(
2,664✔
2789
                                tx, edge, c.graphCache,
2,664✔
2790
                        )
2,664✔
2791

2,664✔
2792
                        if err != nil {
2,667✔
2793
                                log.Errorf("UpdateEdgePolicy faild: %v", err)
3✔
2794
                        }
3✔
2795

2796
                        // Silence ErrEdgeNotFound so that the batch can
2797
                        // succeed, but propagate the error via local state.
2798
                        if errors.Is(err, ErrEdgeNotFound) {
2,667✔
2799
                                edgeNotFound = true
3✔
2800
                                return nil
3✔
2801
                        }
3✔
2802

2803
                        return err
2,661✔
2804
                },
2805
                OnCommit: func(err error) error {
2,664✔
2806
                        switch {
2,664✔
UNCOV
2807
                        case err != nil:
×
UNCOV
2808
                                return err
×
2809
                        case edgeNotFound:
3✔
2810
                                return ErrEdgeNotFound
3✔
2811
                        default:
2,661✔
2812
                                c.updateEdgeCache(edge, isUpdate1)
2,661✔
2813
                                return nil
2,661✔
2814
                        }
2815
                },
2816
        }
2817

2818
        for _, f := range op {
2,667✔
2819
                f(r)
3✔
2820
        }
3✔
2821

2822
        return c.chanScheduler.Execute(r)
2,664✔
2823
}
2824

2825
func (c *KVStore) updateEdgeCache(e *models.ChannelEdgePolicy,
2826
        isUpdate1 bool) {
2,661✔
2827

2,661✔
2828
        // If an entry for this channel is found in reject cache, we'll modify
2,661✔
2829
        // the entry with the updated timestamp for the direction that was just
2,661✔
2830
        // written. If the edge doesn't exist, we'll load the cache entry lazily
2,661✔
2831
        // during the next query for this edge.
2,661✔
2832
        if entry, ok := c.rejectCache.get(e.ChannelID); ok {
2,669✔
2833
                if isUpdate1 {
14✔
2834
                        entry.upd1Time = e.LastUpdate.Unix()
6✔
2835
                } else {
11✔
2836
                        entry.upd2Time = e.LastUpdate.Unix()
5✔
2837
                }
5✔
2838
                c.rejectCache.insert(e.ChannelID, entry)
8✔
2839
        }
2840

2841
        // If an entry for this channel is found in channel cache, we'll modify
2842
        // the entry with the updated policy for the direction that was just
2843
        // written. If the edge doesn't exist, we'll defer loading the info and
2844
        // policies and lazily read from disk during the next query.
2845
        if channel, ok := c.chanCache.get(e.ChannelID); ok {
2,664✔
2846
                if isUpdate1 {
6✔
2847
                        channel.Policy1 = e
3✔
2848
                } else {
6✔
2849
                        channel.Policy2 = e
3✔
2850
                }
3✔
2851
                c.chanCache.insert(e.ChannelID, channel)
3✔
2852
        }
2853
}
2854

2855
// updateEdgePolicy attempts to update an edge's policy within the relevant
2856
// buckets using an existing database transaction. The returned boolean will be
2857
// true if the updated policy belongs to node1, and false if the policy belonged
2858
// to node2.
2859
func updateEdgePolicy(tx kvdb.RwTx, edge *models.ChannelEdgePolicy,
2860
        graphCache *GraphCache) (bool, error) {
2,664✔
2861

2,664✔
2862
        edges := tx.ReadWriteBucket(edgeBucket)
2,664✔
2863
        if edges == nil {
2,664✔
UNCOV
2864
                return false, ErrEdgeNotFound
×
UNCOV
2865
        }
×
2866
        edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
2,664✔
2867
        if edgeIndex == nil {
2,664✔
UNCOV
2868
                return false, ErrEdgeNotFound
×
UNCOV
2869
        }
×
2870

2871
        // Create the channelID key be converting the channel ID
2872
        // integer into a byte slice.
2873
        var chanID [8]byte
2,664✔
2874
        byteOrder.PutUint64(chanID[:], edge.ChannelID)
2,664✔
2875

2,664✔
2876
        // With the channel ID, we then fetch the value storing the two
2,664✔
2877
        // nodes which connect this channel edge.
2,664✔
2878
        nodeInfo := edgeIndex.Get(chanID[:])
2,664✔
2879
        if nodeInfo == nil {
2,667✔
2880
                return false, ErrEdgeNotFound
3✔
2881
        }
3✔
2882

2883
        // Depending on the flags value passed above, either the first
2884
        // or second edge policy is being updated.
2885
        var fromNode, toNode []byte
2,661✔
2886
        var isUpdate1 bool
2,661✔
2887
        if edge.ChannelFlags&lnwire.ChanUpdateDirection == 0 {
3,999✔
2888
                fromNode = nodeInfo[:33]
1,338✔
2889
                toNode = nodeInfo[33:66]
1,338✔
2890
                isUpdate1 = true
1,338✔
2891
        } else {
2,664✔
2892
                fromNode = nodeInfo[33:66]
1,326✔
2893
                toNode = nodeInfo[:33]
1,326✔
2894
                isUpdate1 = false
1,326✔
2895
        }
1,326✔
2896

2897
        // Finally, with the direction of the edge being updated
2898
        // identified, we update the on-disk edge representation.
2899
        err := putChanEdgePolicy(edges, edge, fromNode, toNode)
2,661✔
2900
        if err != nil {
2,661✔
UNCOV
2901
                return false, err
×
UNCOV
2902
        }
×
2903

2904
        var (
2,661✔
2905
                fromNodePubKey route.Vertex
2,661✔
2906
                toNodePubKey   route.Vertex
2,661✔
2907
        )
2,661✔
2908
        copy(fromNodePubKey[:], fromNode)
2,661✔
2909
        copy(toNodePubKey[:], toNode)
2,661✔
2910

2,661✔
2911
        if graphCache != nil {
4,936✔
2912
                graphCache.UpdatePolicy(
2,275✔
2913
                        edge, fromNodePubKey, toNodePubKey, isUpdate1,
2,275✔
2914
                )
2,275✔
2915
        }
2,275✔
2916

2917
        return isUpdate1, nil
2,661✔
2918
}
2919

2920
// isPublic determines whether the node is seen as public within the graph from
2921
// the source node's point of view. An existing database transaction can also be
2922
// specified.
2923
func (c *KVStore) isPublic(tx kvdb.RTx, nodePub route.Vertex,
2924
        sourcePubKey []byte) (bool, error) {
16✔
2925

16✔
2926
        // In order to determine whether this node is publicly advertised within
16✔
2927
        // the graph, we'll need to look at all of its edges and check whether
16✔
2928
        // they extend to any other node than the source node. errDone will be
16✔
2929
        // used to terminate the check early.
16✔
2930
        nodeIsPublic := false
16✔
2931
        errDone := errors.New("done")
16✔
2932
        err := c.ForEachNodeChannelTx(tx, nodePub, func(tx kvdb.RTx,
16✔
2933
                info *models.ChannelEdgeInfo, _ *models.ChannelEdgePolicy,
16✔
2934
                _ *models.ChannelEdgePolicy) error {
29✔
2935

13✔
2936
                // If this edge doesn't extend to the source node, we'll
13✔
2937
                // terminate our search as we can now conclude that the node is
13✔
2938
                // publicly advertised within the graph due to the local node
13✔
2939
                // knowing of the current edge.
13✔
2940
                if !bytes.Equal(info.NodeKey1Bytes[:], sourcePubKey) &&
13✔
2941
                        !bytes.Equal(info.NodeKey2Bytes[:], sourcePubKey) {
19✔
2942

6✔
2943
                        nodeIsPublic = true
6✔
2944
                        return errDone
6✔
2945
                }
6✔
2946

2947
                // Since the edge _does_ extend to the source node, we'll also
2948
                // need to ensure that this is a public edge.
2949
                if info.AuthProof != nil {
19✔
2950
                        nodeIsPublic = true
9✔
2951
                        return errDone
9✔
2952
                }
9✔
2953

2954
                // Otherwise, we'll continue our search.
2955
                return nil
4✔
2956
        })
2957
        if err != nil && !errors.Is(err, errDone) {
16✔
UNCOV
2958
                return false, err
×
UNCOV
2959
        }
×
2960

2961
        return nodeIsPublic, nil
16✔
2962
}
2963

2964
// FetchLightningNodeTx attempts to look up a target node by its identity
2965
// public key. If the node isn't found in the database, then
2966
// ErrGraphNodeNotFound is returned. An optional transaction may be provided.
2967
// If none is provided, then a new one will be created.
2968
func (c *KVStore) FetchLightningNodeTx(tx kvdb.RTx, nodePub route.Vertex) (
2969
        *models.LightningNode, error) {
3,444✔
2970

3,444✔
2971
        return c.fetchLightningNode(tx, nodePub)
3,444✔
2972
}
3,444✔
2973

2974
// FetchLightningNode attempts to look up a target node by its identity public
2975
// key. If the node isn't found in the database, then ErrGraphNodeNotFound is
2976
// returned.
2977
func (c *KVStore) FetchLightningNode(nodePub route.Vertex) (
2978
        *models.LightningNode, error) {
155✔
2979

155✔
2980
        return c.fetchLightningNode(nil, nodePub)
155✔
2981
}
155✔
2982

2983
// fetchLightningNode attempts to look up a target node by its identity public
2984
// key. If the node isn't found in the database, then ErrGraphNodeNotFound is
2985
// returned. An optional transaction may be provided. If none is provided, then
2986
// a new one will be created.
2987
func (c *KVStore) fetchLightningNode(tx kvdb.RTx,
2988
        nodePub route.Vertex) (*models.LightningNode, error) {
3,596✔
2989

3,596✔
2990
        var node *models.LightningNode
3,596✔
2991
        fetch := func(tx kvdb.RTx) error {
7,192✔
2992
                // First grab the nodes bucket which stores the mapping from
3,596✔
2993
                // pubKey to node information.
3,596✔
2994
                nodes := tx.ReadBucket(nodeBucket)
3,596✔
2995
                if nodes == nil {
3,596✔
UNCOV
2996
                        return ErrGraphNotFound
×
UNCOV
2997
                }
×
2998

2999
                // If a key for this serialized public key isn't found, then
3000
                // the target node doesn't exist within the database.
3001
                nodeBytes := nodes.Get(nodePub[:])
3,596✔
3002
                if nodeBytes == nil {
3,613✔
3003
                        return ErrGraphNodeNotFound
17✔
3004
                }
17✔
3005

3006
                // If the node is found, then we can de deserialize the node
3007
                // information to return to the user.
3008
                nodeReader := bytes.NewReader(nodeBytes)
3,582✔
3009
                n, err := deserializeLightningNode(nodeReader)
3,582✔
3010
                if err != nil {
3,582✔
3011
                        return err
×
3012
                }
×
3013

3014
                node = &n
3,582✔
3015

3,582✔
3016
                return nil
3,582✔
3017
        }
3018

3019
        if tx == nil {
3,754✔
3020
                err := kvdb.View(
158✔
3021
                        c.db, fetch, func() {
316✔
3022
                                node = nil
158✔
3023
                        },
158✔
3024
                )
3025
                if err != nil {
164✔
3026
                        return nil, err
6✔
3027
                }
6✔
3028

3029
                return node, nil
155✔
3030
        }
3031

3032
        err := fetch(tx)
3,438✔
3033
        if err != nil {
3,449✔
3034
                return nil, err
11✔
3035
        }
11✔
3036

3037
        return node, nil
3,427✔
3038
}
3039

3040
// HasLightningNode determines if the graph has a vertex identified by the
3041
// target node identity public key. If the node exists in the database, a
3042
// timestamp of when the data for the node was lasted updated is returned along
3043
// with a true boolean. Otherwise, an empty time.Time is returned with a false
3044
// boolean.
3045
func (c *KVStore) HasLightningNode(nodePub [33]byte) (time.Time, bool,
3046
        error) {
19✔
3047

19✔
3048
        var (
19✔
3049
                updateTime time.Time
19✔
3050
                exists     bool
19✔
3051
        )
19✔
3052

19✔
3053
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
38✔
3054
                // First grab the nodes bucket which stores the mapping from
19✔
3055
                // pubKey to node information.
19✔
3056
                nodes := tx.ReadBucket(nodeBucket)
19✔
3057
                if nodes == nil {
19✔
UNCOV
3058
                        return ErrGraphNotFound
×
UNCOV
3059
                }
×
3060

3061
                // If a key for this serialized public key isn't found, we can
3062
                // exit early.
3063
                nodeBytes := nodes.Get(nodePub[:])
19✔
3064
                if nodeBytes == nil {
25✔
3065
                        exists = false
6✔
3066
                        return nil
6✔
3067
                }
6✔
3068

3069
                // Otherwise we continue on to obtain the time stamp
3070
                // representing the last time the data for this node was
3071
                // updated.
3072
                nodeReader := bytes.NewReader(nodeBytes)
16✔
3073
                node, err := deserializeLightningNode(nodeReader)
16✔
3074
                if err != nil {
16✔
UNCOV
3075
                        return err
×
UNCOV
3076
                }
×
3077

3078
                exists = true
16✔
3079
                updateTime = node.LastUpdate
16✔
3080

16✔
3081
                return nil
16✔
3082
        }, func() {
19✔
3083
                updateTime = time.Time{}
19✔
3084
                exists = false
19✔
3085
        })
19✔
3086
        if err != nil {
19✔
UNCOV
3087
                return time.Time{}, exists, err
×
UNCOV
3088
        }
×
3089

3090
        return updateTime, exists, nil
19✔
3091
}
3092

3093
// nodeTraversal is used to traverse all channels of a node given by its
3094
// public key and passes channel information into the specified callback.
3095
func nodeTraversal(tx kvdb.RTx, nodePub []byte, db kvdb.Backend,
3096
        cb func(kvdb.RTx, *models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3097
                *models.ChannelEdgePolicy) error) error {
1,250✔
3098

1,250✔
3099
        traversal := func(tx kvdb.RTx) error {
2,500✔
3100
                edges := tx.ReadBucket(edgeBucket)
1,250✔
3101
                if edges == nil {
1,250✔
3102
                        return ErrGraphNotFound
×
3103
                }
×
3104
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
1,250✔
3105
                if edgeIndex == nil {
1,250✔
UNCOV
3106
                        return ErrGraphNoEdgesFound
×
UNCOV
3107
                }
×
3108

3109
                // In order to reach all the edges for this node, we take
3110
                // advantage of the construction of the key-space within the
3111
                // edge bucket. The keys are stored in the form: pubKey ||
3112
                // chanID. Therefore, starting from a chanID of zero, we can
3113
                // scan forward in the bucket, grabbing all the edges for the
3114
                // node. Once the prefix no longer matches, then we know we're
3115
                // done.
3116
                var nodeStart [33 + 8]byte
1,250✔
3117
                copy(nodeStart[:], nodePub)
1,250✔
3118
                copy(nodeStart[33:], chanStart[:])
1,250✔
3119

1,250✔
3120
                // Starting from the key pubKey || 0, we seek forward in the
1,250✔
3121
                // bucket until the retrieved key no longer has the public key
1,250✔
3122
                // as its prefix. This indicates that we've stepped over into
1,250✔
3123
                // another node's edges, so we can terminate our scan.
1,250✔
3124
                edgeCursor := edges.ReadCursor()
1,250✔
3125
                for nodeEdge, _ := edgeCursor.Seek(nodeStart[:]); bytes.HasPrefix(nodeEdge, nodePub); nodeEdge, _ = edgeCursor.Next() { //nolint:ll
4,904✔
3126
                        // If the prefix still matches, the channel id is
3,654✔
3127
                        // returned in nodeEdge. Channel id is used to lookup
3,654✔
3128
                        // the node at the other end of the channel and both
3,654✔
3129
                        // edge policies.
3,654✔
3130
                        chanID := nodeEdge[33:]
3,654✔
3131
                        edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
3,654✔
3132
                        if err != nil {
3,654✔
UNCOV
3133
                                return err
×
UNCOV
3134
                        }
×
3135

3136
                        outgoingPolicy, err := fetchChanEdgePolicy(
3,654✔
3137
                                edges, chanID, nodePub,
3,654✔
3138
                        )
3,654✔
3139
                        if err != nil {
3,654✔
UNCOV
3140
                                return err
×
UNCOV
3141
                        }
×
3142

3143
                        otherNode, err := edgeInfo.OtherNodeKeyBytes(nodePub)
3,654✔
3144
                        if err != nil {
3,654✔
UNCOV
3145
                                return err
×
UNCOV
3146
                        }
×
3147

3148
                        incomingPolicy, err := fetchChanEdgePolicy(
3,654✔
3149
                                edges, chanID, otherNode[:],
3,654✔
3150
                        )
3,654✔
3151
                        if err != nil {
3,654✔
UNCOV
3152
                                return err
×
UNCOV
3153
                        }
×
3154

3155
                        // Finally, we execute the callback.
3156
                        err = cb(tx, &edgeInfo, outgoingPolicy, incomingPolicy)
3,654✔
3157
                        if err != nil {
3,666✔
3158
                                return err
12✔
3159
                        }
12✔
3160
                }
3161

3162
                return nil
1,241✔
3163
        }
3164

3165
        // If no transaction was provided, then we'll create a new transaction
3166
        // to execute the transaction within.
3167
        if tx == nil {
1,262✔
3168
                return kvdb.View(db, traversal, func() {})
24✔
3169
        }
3170

3171
        // Otherwise, we re-use the existing transaction to execute the graph
3172
        // traversal.
3173
        return traversal(tx)
1,241✔
3174
}
3175

3176
// ForEachNodeChannel iterates through all channels of the given node,
3177
// executing the passed callback with an edge info structure and the policies
3178
// of each end of the channel. The first edge policy is the outgoing edge *to*
3179
// the connecting node, while the second is the incoming edge *from* the
3180
// connecting node. If the callback returns an error, then the iteration is
3181
// halted with the error propagated back up to the caller.
3182
//
3183
// Unknown policies are passed into the callback as nil values.
3184
func (c *KVStore) ForEachNodeChannel(nodePub route.Vertex,
3185
        cb func(kvdb.RTx, *models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3186
                *models.ChannelEdgePolicy) error) error {
9✔
3187

9✔
3188
        return nodeTraversal(nil, nodePub[:], c.db, cb)
9✔
3189
}
9✔
3190

3191
// ForEachNodeChannelTx iterates through all channels of the given node,
3192
// executing the passed callback with an edge info structure and the policies
3193
// of each end of the channel. The first edge policy is the outgoing edge *to*
3194
// the connecting node, while the second is the incoming edge *from* the
3195
// connecting node. If the callback returns an error, then the iteration is
3196
// halted with the error propagated back up to the caller.
3197
//
3198
// Unknown policies are passed into the callback as nil values.
3199
//
3200
// If the caller wishes to re-use an existing boltdb transaction, then it
3201
// should be passed as the first argument.  Otherwise, the first argument should
3202
// be nil and a fresh transaction will be created to execute the graph
3203
// traversal.
3204
func (c *KVStore) ForEachNodeChannelTx(tx kvdb.RTx,
3205
        nodePub route.Vertex, cb func(kvdb.RTx, *models.ChannelEdgeInfo,
3206
                *models.ChannelEdgePolicy,
3207
                *models.ChannelEdgePolicy) error) error {
1,001✔
3208

1,001✔
3209
        return nodeTraversal(tx, nodePub[:], c.db, cb)
1,001✔
3210
}
1,001✔
3211

3212
// FetchOtherNode attempts to fetch the full LightningNode that's opposite of
3213
// the target node in the channel. This is useful when one knows the pubkey of
3214
// one of the nodes, and wishes to obtain the full LightningNode for the other
3215
// end of the channel.
3216
func (c *KVStore) FetchOtherNode(tx kvdb.RTx,
3217
        channel *models.ChannelEdgeInfo, thisNodeKey []byte) (
3218
        *models.LightningNode, error) {
3✔
3219

3✔
3220
        // Ensure that the node passed in is actually a member of the channel.
3✔
3221
        var targetNodeBytes [33]byte
3✔
3222
        switch {
3✔
3223
        case bytes.Equal(channel.NodeKey1Bytes[:], thisNodeKey):
3✔
3224
                targetNodeBytes = channel.NodeKey2Bytes
3✔
3225
        case bytes.Equal(channel.NodeKey2Bytes[:], thisNodeKey):
3✔
3226
                targetNodeBytes = channel.NodeKey1Bytes
3✔
UNCOV
3227
        default:
×
UNCOV
3228
                return nil, fmt.Errorf("node not participating in this channel")
×
3229
        }
3230

3231
        var targetNode *models.LightningNode
3✔
3232
        fetchNodeFunc := func(tx kvdb.RTx) error {
6✔
3233
                // First grab the nodes bucket which stores the mapping from
3✔
3234
                // pubKey to node information.
3✔
3235
                nodes := tx.ReadBucket(nodeBucket)
3✔
3236
                if nodes == nil {
3✔
UNCOV
3237
                        return ErrGraphNotFound
×
UNCOV
3238
                }
×
3239

3240
                node, err := fetchLightningNode(nodes, targetNodeBytes[:])
3✔
3241
                if err != nil {
3✔
3242
                        return err
×
3243
                }
×
3244

3245
                targetNode = &node
3✔
3246

3✔
3247
                return nil
3✔
3248
        }
3249

3250
        // If the transaction is nil, then we'll need to create a new one,
3251
        // otherwise we can use the existing db transaction.
3252
        var err error
3✔
3253
        if tx == nil {
3✔
UNCOV
3254
                err = kvdb.View(c.db, fetchNodeFunc, func() {
×
UNCOV
3255
                        targetNode = nil
×
UNCOV
3256
                })
×
3257
        } else {
3✔
3258
                err = fetchNodeFunc(tx)
3✔
3259
        }
3✔
3260

3261
        return targetNode, err
3✔
3262
}
3263

3264
// computeEdgePolicyKeys is a helper function that can be used to compute the
3265
// keys used to index the channel edge policy info for the two nodes of the
3266
// edge. The keys for node 1 and node 2 are returned respectively.
3267
func computeEdgePolicyKeys(info *models.ChannelEdgeInfo) ([]byte, []byte) {
25✔
3268
        var (
25✔
3269
                node1Key [33 + 8]byte
25✔
3270
                node2Key [33 + 8]byte
25✔
3271
        )
25✔
3272

25✔
3273
        copy(node1Key[:], info.NodeKey1Bytes[:])
25✔
3274
        copy(node2Key[:], info.NodeKey2Bytes[:])
25✔
3275

25✔
3276
        byteOrder.PutUint64(node1Key[33:], info.ChannelID)
25✔
3277
        byteOrder.PutUint64(node2Key[33:], info.ChannelID)
25✔
3278

25✔
3279
        return node1Key[:], node2Key[:]
25✔
3280
}
25✔
3281

3282
// FetchChannelEdgesByOutpoint attempts to lookup the two directed edges for
3283
// the channel identified by the funding outpoint. If the channel can't be
3284
// found, then ErrEdgeNotFound is returned. A struct which houses the general
3285
// information for the channel itself is returned as well as two structs that
3286
// contain the routing policies for the channel in either direction.
3287
func (c *KVStore) FetchChannelEdgesByOutpoint(op *wire.OutPoint) (
3288
        *models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3289
        *models.ChannelEdgePolicy, error) {
14✔
3290

14✔
3291
        var (
14✔
3292
                edgeInfo *models.ChannelEdgeInfo
14✔
3293
                policy1  *models.ChannelEdgePolicy
14✔
3294
                policy2  *models.ChannelEdgePolicy
14✔
3295
        )
14✔
3296

14✔
3297
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
28✔
3298
                // First, grab the node bucket. This will be used to populate
14✔
3299
                // the Node pointers in each edge read from disk.
14✔
3300
                nodes := tx.ReadBucket(nodeBucket)
14✔
3301
                if nodes == nil {
14✔
UNCOV
3302
                        return ErrGraphNotFound
×
UNCOV
3303
                }
×
3304

3305
                // Next, grab the edge bucket which stores the edges, and also
3306
                // the index itself so we can group the directed edges together
3307
                // logically.
3308
                edges := tx.ReadBucket(edgeBucket)
14✔
3309
                if edges == nil {
14✔
UNCOV
3310
                        return ErrGraphNoEdgesFound
×
UNCOV
3311
                }
×
3312
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
14✔
3313
                if edgeIndex == nil {
14✔
UNCOV
3314
                        return ErrGraphNoEdgesFound
×
UNCOV
3315
                }
×
3316

3317
                // If the channel's outpoint doesn't exist within the outpoint
3318
                // index, then the edge does not exist.
3319
                chanIndex := edges.NestedReadBucket(channelPointBucket)
14✔
3320
                if chanIndex == nil {
14✔
UNCOV
3321
                        return ErrGraphNoEdgesFound
×
UNCOV
3322
                }
×
3323
                var b bytes.Buffer
14✔
3324
                if err := WriteOutpoint(&b, op); err != nil {
14✔
3325
                        return err
×
3326
                }
×
3327
                chanID := chanIndex.Get(b.Bytes())
14✔
3328
                if chanID == nil {
27✔
3329
                        return fmt.Errorf("%w: op=%v", ErrEdgeNotFound, op)
13✔
3330
                }
13✔
3331

3332
                // If the channel is found to exists, then we'll first retrieve
3333
                // the general information for the channel.
3334
                edge, err := fetchChanEdgeInfo(edgeIndex, chanID)
4✔
3335
                if err != nil {
4✔
3336
                        return fmt.Errorf("%w: chanID=%x", err, chanID)
×
3337
                }
×
3338
                edgeInfo = &edge
4✔
3339

4✔
3340
                // Once we have the information about the channels' parameters,
4✔
3341
                // we'll fetch the routing policies for each for the directed
4✔
3342
                // edges.
4✔
3343
                e1, e2, err := fetchChanEdgePolicies(edgeIndex, edges, chanID)
4✔
3344
                if err != nil {
4✔
UNCOV
3345
                        return fmt.Errorf("failed to find policy: %w", err)
×
UNCOV
3346
                }
×
3347

3348
                policy1 = e1
4✔
3349
                policy2 = e2
4✔
3350

4✔
3351
                return nil
4✔
3352
        }, func() {
14✔
3353
                edgeInfo = nil
14✔
3354
                policy1 = nil
14✔
3355
                policy2 = nil
14✔
3356
        })
14✔
3357
        if err != nil {
27✔
3358
                return nil, nil, nil, err
13✔
3359
        }
13✔
3360

3361
        return edgeInfo, policy1, policy2, nil
4✔
3362
}
3363

3364
// FetchChannelEdgesByID attempts to lookup the two directed edges for the
3365
// channel identified by the channel ID. If the channel can't be found, then
3366
// ErrEdgeNotFound is returned. A struct which houses the general information
3367
// for the channel itself is returned as well as two structs that contain the
3368
// routing policies for the channel in either direction.
3369
//
3370
// ErrZombieEdge an be returned if the edge is currently marked as a zombie
3371
// within the database. In this case, the ChannelEdgePolicy's will be nil, and
3372
// the ChannelEdgeInfo will only include the public keys of each node.
3373
func (c *KVStore) FetchChannelEdgesByID(chanID uint64) (
3374
        *models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
3375
        *models.ChannelEdgePolicy, error) {
27✔
3376

27✔
3377
        var (
27✔
3378
                edgeInfo  *models.ChannelEdgeInfo
27✔
3379
                policy1   *models.ChannelEdgePolicy
27✔
3380
                policy2   *models.ChannelEdgePolicy
27✔
3381
                channelID [8]byte
27✔
3382
        )
27✔
3383

27✔
3384
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
54✔
3385
                // First, grab the node bucket. This will be used to populate
27✔
3386
                // the Node pointers in each edge read from disk.
27✔
3387
                nodes := tx.ReadBucket(nodeBucket)
27✔
3388
                if nodes == nil {
27✔
UNCOV
3389
                        return ErrGraphNotFound
×
UNCOV
3390
                }
×
3391

3392
                // Next, grab the edge bucket which stores the edges, and also
3393
                // the index itself so we can group the directed edges together
3394
                // logically.
3395
                edges := tx.ReadBucket(edgeBucket)
27✔
3396
                if edges == nil {
27✔
UNCOV
3397
                        return ErrGraphNoEdgesFound
×
UNCOV
3398
                }
×
3399
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
27✔
3400
                if edgeIndex == nil {
27✔
UNCOV
3401
                        return ErrGraphNoEdgesFound
×
UNCOV
3402
                }
×
3403

3404
                byteOrder.PutUint64(channelID[:], chanID)
27✔
3405

27✔
3406
                // Now, attempt to fetch edge.
27✔
3407
                edge, err := fetchChanEdgeInfo(edgeIndex, channelID[:])
27✔
3408

27✔
3409
                // If it doesn't exist, we'll quickly check our zombie index to
27✔
3410
                // see if we've previously marked it as so.
27✔
3411
                if errors.Is(err, ErrEdgeNotFound) {
31✔
3412
                        // If the zombie index doesn't exist, or the edge is not
4✔
3413
                        // marked as a zombie within it, then we'll return the
4✔
3414
                        // original ErrEdgeNotFound error.
4✔
3415
                        zombieIndex := edges.NestedReadBucket(zombieBucket)
4✔
3416
                        if zombieIndex == nil {
4✔
3417
                                return ErrEdgeNotFound
×
UNCOV
3418
                        }
×
3419

3420
                        isZombie, pubKey1, pubKey2 := isZombieEdge(
4✔
3421
                                zombieIndex, chanID,
4✔
3422
                        )
4✔
3423
                        if !isZombie {
7✔
3424
                                return ErrEdgeNotFound
3✔
3425
                        }
3✔
3426

3427
                        // Otherwise, the edge is marked as a zombie, so we'll
3428
                        // populate the edge info with the public keys of each
3429
                        // party as this is the only information we have about
3430
                        // it and return an error signaling so.
3431
                        edgeInfo = &models.ChannelEdgeInfo{
4✔
3432
                                NodeKey1Bytes: pubKey1,
4✔
3433
                                NodeKey2Bytes: pubKey2,
4✔
3434
                        }
4✔
3435

4✔
3436
                        return ErrZombieEdge
4✔
3437
                }
3438

3439
                // Otherwise, we'll just return the error if any.
3440
                if err != nil {
26✔
UNCOV
3441
                        return err
×
UNCOV
3442
                }
×
3443

3444
                edgeInfo = &edge
26✔
3445

26✔
3446
                // Then we'll attempt to fetch the accompanying policies of this
26✔
3447
                // edge.
26✔
3448
                e1, e2, err := fetchChanEdgePolicies(
26✔
3449
                        edgeIndex, edges, channelID[:],
26✔
3450
                )
26✔
3451
                if err != nil {
26✔
UNCOV
3452
                        return err
×
UNCOV
3453
                }
×
3454

3455
                policy1 = e1
26✔
3456
                policy2 = e2
26✔
3457

26✔
3458
                return nil
26✔
3459
        }, func() {
27✔
3460
                edgeInfo = nil
27✔
3461
                policy1 = nil
27✔
3462
                policy2 = nil
27✔
3463
        })
27✔
3464
        if errors.Is(err, ErrZombieEdge) {
31✔
3465
                return edgeInfo, nil, nil, err
4✔
3466
        }
4✔
3467
        if err != nil {
29✔
3468
                return nil, nil, nil, err
3✔
3469
        }
3✔
3470

3471
        return edgeInfo, policy1, policy2, nil
26✔
3472
}
3473

3474
// IsPublicNode is a helper method that determines whether the node with the
3475
// given public key is seen as a public node in the graph from the graph's
3476
// source node's point of view.
3477
func (c *KVStore) IsPublicNode(pubKey [33]byte) (bool, error) {
16✔
3478
        var nodeIsPublic bool
16✔
3479
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
32✔
3480
                nodes := tx.ReadBucket(nodeBucket)
16✔
3481
                if nodes == nil {
16✔
UNCOV
3482
                        return ErrGraphNodesNotFound
×
UNCOV
3483
                }
×
3484
                ourPubKey := nodes.Get(sourceKey)
16✔
3485
                if ourPubKey == nil {
16✔
UNCOV
3486
                        return ErrSourceNodeNotSet
×
UNCOV
3487
                }
×
3488
                node, err := fetchLightningNode(nodes, pubKey[:])
16✔
3489
                if err != nil {
16✔
UNCOV
3490
                        return err
×
UNCOV
3491
                }
×
3492

3493
                nodeIsPublic, err = c.isPublic(tx, node.PubKeyBytes, ourPubKey)
16✔
3494

16✔
3495
                return err
16✔
3496
        }, func() {
16✔
3497
                nodeIsPublic = false
16✔
3498
        })
16✔
3499
        if err != nil {
16✔
UNCOV
3500
                return false, err
×
3501
        }
×
3502

3503
        return nodeIsPublic, nil
16✔
3504
}
3505

3506
// genMultiSigP2WSH generates the p2wsh'd multisig script for 2 of 2 pubkeys.
3507
func genMultiSigP2WSH(aPub, bPub []byte) ([]byte, error) {
49✔
3508
        witnessScript, err := input.GenMultiSigScript(aPub, bPub)
49✔
3509
        if err != nil {
49✔
UNCOV
3510
                return nil, err
×
UNCOV
3511
        }
×
3512

3513
        // With the witness script generated, we'll now turn it into a p2wsh
3514
        // script:
3515
        //  * OP_0 <sha256(script)>
3516
        bldr := txscript.NewScriptBuilder(
49✔
3517
                txscript.WithScriptAllocSize(input.P2WSHSize),
49✔
3518
        )
49✔
3519
        bldr.AddOp(txscript.OP_0)
49✔
3520
        scriptHash := sha256.Sum256(witnessScript)
49✔
3521
        bldr.AddData(scriptHash[:])
49✔
3522

49✔
3523
        return bldr.Script()
49✔
3524
}
3525

3526
// EdgePoint couples the outpoint of a channel with the funding script that it
3527
// creates. The FilteredChainView will use this to watch for spends of this
3528
// edge point on chain. We require both of these values as depending on the
3529
// concrete implementation, either the pkScript, or the out point will be used.
3530
type EdgePoint struct {
3531
        // FundingPkScript is the p2wsh multi-sig script of the target channel.
3532
        FundingPkScript []byte
3533

3534
        // OutPoint is the outpoint of the target channel.
3535
        OutPoint wire.OutPoint
3536
}
3537

3538
// String returns a human readable version of the target EdgePoint. We return
3539
// the outpoint directly as it is enough to uniquely identify the edge point.
UNCOV
3540
func (e *EdgePoint) String() string {
×
UNCOV
3541
        return e.OutPoint.String()
×
UNCOV
3542
}
×
3543

3544
// ChannelView returns the verifiable edge information for each active channel
3545
// within the known channel graph. The set of UTXO's (along with their scripts)
3546
// returned are the ones that need to be watched on chain to detect channel
3547
// closes on the resident blockchain.
3548
func (c *KVStore) ChannelView() ([]EdgePoint, error) {
25✔
3549
        var edgePoints []EdgePoint
25✔
3550
        if err := kvdb.View(c.db, func(tx kvdb.RTx) error {
50✔
3551
                // We're going to iterate over the entire channel index, so
25✔
3552
                // we'll need to fetch the edgeBucket to get to the index as
25✔
3553
                // it's a sub-bucket.
25✔
3554
                edges := tx.ReadBucket(edgeBucket)
25✔
3555
                if edges == nil {
25✔
3556
                        return ErrGraphNoEdgesFound
×
3557
                }
×
3558
                chanIndex := edges.NestedReadBucket(channelPointBucket)
25✔
3559
                if chanIndex == nil {
25✔
UNCOV
3560
                        return ErrGraphNoEdgesFound
×
UNCOV
3561
                }
×
3562
                edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
25✔
3563
                if edgeIndex == nil {
25✔
UNCOV
3564
                        return ErrGraphNoEdgesFound
×
UNCOV
3565
                }
×
3566

3567
                // Once we have the proper bucket, we'll range over each key
3568
                // (which is the channel point for the channel) and decode it,
3569
                // accumulating each entry.
3570
                return chanIndex.ForEach(
25✔
3571
                        func(chanPointBytes, chanID []byte) error {
70✔
3572
                                chanPointReader := bytes.NewReader(
45✔
3573
                                        chanPointBytes,
45✔
3574
                                )
45✔
3575

45✔
3576
                                var chanPoint wire.OutPoint
45✔
3577
                                err := ReadOutpoint(chanPointReader, &chanPoint)
45✔
3578
                                if err != nil {
45✔
3579
                                        return err
×
3580
                                }
×
3581

3582
                                edgeInfo, err := fetchChanEdgeInfo(
45✔
3583
                                        edgeIndex, chanID,
45✔
3584
                                )
45✔
3585
                                if err != nil {
45✔
UNCOV
3586
                                        return err
×
UNCOV
3587
                                }
×
3588

3589
                                pkScript, err := genMultiSigP2WSH(
45✔
3590
                                        edgeInfo.BitcoinKey1Bytes[:],
45✔
3591
                                        edgeInfo.BitcoinKey2Bytes[:],
45✔
3592
                                )
45✔
3593
                                if err != nil {
45✔
3594
                                        return err
×
3595
                                }
×
3596

3597
                                edgePoints = append(edgePoints, EdgePoint{
45✔
3598
                                        FundingPkScript: pkScript,
45✔
3599
                                        OutPoint:        chanPoint,
45✔
3600
                                })
45✔
3601

45✔
3602
                                return nil
45✔
3603
                        },
3604
                )
3605
        }, func() {
25✔
3606
                edgePoints = nil
25✔
3607
        }); err != nil {
25✔
UNCOV
3608
                return nil, err
×
3609
        }
×
3610

3611
        return edgePoints, nil
25✔
3612
}
3613

3614
// MarkEdgeZombie attempts to mark a channel identified by its channel ID as a
3615
// zombie. This method is used on an ad-hoc basis, when channels need to be
3616
// marked as zombies outside the normal pruning cycle.
3617
func (c *KVStore) MarkEdgeZombie(chanID uint64,
3618
        pubKey1, pubKey2 [33]byte) error {
119✔
3619

119✔
3620
        c.cacheMu.Lock()
119✔
3621
        defer c.cacheMu.Unlock()
119✔
3622

119✔
3623
        err := kvdb.Batch(c.db, func(tx kvdb.RwTx) error {
238✔
3624
                edges := tx.ReadWriteBucket(edgeBucket)
119✔
3625
                if edges == nil {
119✔
UNCOV
3626
                        return ErrGraphNoEdgesFound
×
UNCOV
3627
                }
×
3628
                zombieIndex, err := edges.CreateBucketIfNotExists(zombieBucket)
119✔
3629
                if err != nil {
119✔
UNCOV
3630
                        return fmt.Errorf("unable to create zombie "+
×
UNCOV
3631
                                "bucket: %w", err)
×
UNCOV
3632
                }
×
3633

3634
                if c.graphCache != nil {
238✔
3635
                        c.graphCache.RemoveChannel(pubKey1, pubKey2, chanID)
119✔
3636
                }
119✔
3637

3638
                return markEdgeZombie(zombieIndex, chanID, pubKey1, pubKey2)
119✔
3639
        })
3640
        if err != nil {
119✔
3641
                return err
×
3642
        }
×
3643

3644
        c.rejectCache.remove(chanID)
119✔
3645
        c.chanCache.remove(chanID)
119✔
3646

119✔
3647
        return nil
119✔
3648
}
3649

3650
// markEdgeZombie marks an edge as a zombie within our zombie index. The public
3651
// keys should represent the node public keys of the two parties involved in the
3652
// edge.
3653
func markEdgeZombie(zombieIndex kvdb.RwBucket, chanID uint64, pubKey1,
3654
        pubKey2 [33]byte) error {
144✔
3655

144✔
3656
        var k [8]byte
144✔
3657
        byteOrder.PutUint64(k[:], chanID)
144✔
3658

144✔
3659
        var v [66]byte
144✔
3660
        copy(v[:33], pubKey1[:])
144✔
3661
        copy(v[33:], pubKey2[:])
144✔
3662

144✔
3663
        return zombieIndex.Put(k[:], v[:])
144✔
3664
}
144✔
3665

3666
// MarkEdgeLive clears an edge from our zombie index, deeming it as live.
3667
func (c *KVStore) MarkEdgeLive(chanID uint64) error {
2✔
3668
        c.cacheMu.Lock()
2✔
3669
        defer c.cacheMu.Unlock()
2✔
3670

2✔
3671
        return c.markEdgeLiveUnsafe(nil, chanID)
2✔
3672
}
2✔
3673

3674
// markEdgeLiveUnsafe clears an edge from the zombie index. This method can be
3675
// called with an existing kvdb.RwTx or the argument can be set to nil in which
3676
// case a new transaction will be created.
3677
//
3678
// NOTE: this method MUST only be called if the cacheMu has already been
3679
// acquired.
3680
func (c *KVStore) markEdgeLiveUnsafe(tx kvdb.RwTx, chanID uint64) error {
22✔
3681
        dbFn := func(tx kvdb.RwTx) error {
44✔
3682
                edges := tx.ReadWriteBucket(edgeBucket)
22✔
3683
                if edges == nil {
22✔
UNCOV
3684
                        return ErrGraphNoEdgesFound
×
UNCOV
3685
                }
×
3686
                zombieIndex := edges.NestedReadWriteBucket(zombieBucket)
22✔
3687
                if zombieIndex == nil {
22✔
UNCOV
3688
                        return nil
×
UNCOV
3689
                }
×
3690

3691
                var k [8]byte
22✔
3692
                byteOrder.PutUint64(k[:], chanID)
22✔
3693

22✔
3694
                if len(zombieIndex.Get(k[:])) == 0 {
23✔
3695
                        return ErrZombieEdgeNotFound
1✔
3696
                }
1✔
3697

3698
                return zombieIndex.Delete(k[:])
21✔
3699
        }
3700

3701
        // If the transaction is nil, we'll create a new one. Otherwise, we use
3702
        // the existing transaction
3703
        var err error
22✔
3704
        if tx == nil {
24✔
3705
                err = kvdb.Update(c.db, dbFn, func() {})
4✔
3706
        } else {
20✔
3707
                err = dbFn(tx)
20✔
3708
        }
20✔
3709
        if err != nil {
23✔
3710
                return err
1✔
3711
        }
1✔
3712

3713
        c.rejectCache.remove(chanID)
21✔
3714
        c.chanCache.remove(chanID)
21✔
3715

21✔
3716
        // We need to add the channel back into our graph cache, otherwise we
21✔
3717
        // won't use it for path finding.
21✔
3718
        if c.graphCache != nil {
42✔
3719
                edgeInfos, err := c.fetchChanInfos(tx, []uint64{chanID})
21✔
3720
                if err != nil {
21✔
UNCOV
3721
                        return err
×
UNCOV
3722
                }
×
3723

3724
                for _, edgeInfo := range edgeInfos {
21✔
UNCOV
3725
                        c.graphCache.AddChannel(
×
UNCOV
3726
                                edgeInfo.Info, edgeInfo.Policy1,
×
UNCOV
3727
                                edgeInfo.Policy2,
×
UNCOV
3728
                        )
×
UNCOV
3729
                }
×
3730
        }
3731

3732
        return nil
21✔
3733
}
3734

3735
// IsZombieEdge returns whether the edge is considered zombie. If it is a
3736
// zombie, then the two node public keys corresponding to this edge are also
3737
// returned.
3738
func (c *KVStore) IsZombieEdge(chanID uint64) (bool, [33]byte, [33]byte) {
5✔
3739
        var (
5✔
3740
                isZombie         bool
5✔
3741
                pubKey1, pubKey2 [33]byte
5✔
3742
        )
5✔
3743

5✔
3744
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
10✔
3745
                edges := tx.ReadBucket(edgeBucket)
5✔
3746
                if edges == nil {
5✔
UNCOV
3747
                        return ErrGraphNoEdgesFound
×
UNCOV
3748
                }
×
3749
                zombieIndex := edges.NestedReadBucket(zombieBucket)
5✔
3750
                if zombieIndex == nil {
5✔
UNCOV
3751
                        return nil
×
UNCOV
3752
                }
×
3753

3754
                isZombie, pubKey1, pubKey2 = isZombieEdge(zombieIndex, chanID)
5✔
3755

5✔
3756
                return nil
5✔
3757
        }, func() {
5✔
3758
                isZombie = false
5✔
3759
                pubKey1 = [33]byte{}
5✔
3760
                pubKey2 = [33]byte{}
5✔
3761
        })
5✔
3762
        if err != nil {
5✔
3763
                return false, [33]byte{}, [33]byte{}
×
UNCOV
3764
        }
×
3765

3766
        return isZombie, pubKey1, pubKey2
5✔
3767
}
3768

3769
// isZombieEdge returns whether an entry exists for the given channel in the
3770
// zombie index. If an entry exists, then the two node public keys corresponding
3771
// to this edge are also returned.
3772
func isZombieEdge(zombieIndex kvdb.RBucket,
3773
        chanID uint64) (bool, [33]byte, [33]byte) {
196✔
3774

196✔
3775
        var k [8]byte
196✔
3776
        byteOrder.PutUint64(k[:], chanID)
196✔
3777

196✔
3778
        v := zombieIndex.Get(k[:])
196✔
3779
        if v == nil {
308✔
3780
                return false, [33]byte{}, [33]byte{}
112✔
3781
        }
112✔
3782

3783
        var pubKey1, pubKey2 [33]byte
87✔
3784
        copy(pubKey1[:], v[:33])
87✔
3785
        copy(pubKey2[:], v[33:])
87✔
3786

87✔
3787
        return true, pubKey1, pubKey2
87✔
3788
}
3789

3790
// NumZombies returns the current number of zombie channels in the graph.
3791
func (c *KVStore) NumZombies() (uint64, error) {
4✔
3792
        var numZombies uint64
4✔
3793
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
8✔
3794
                edges := tx.ReadBucket(edgeBucket)
4✔
3795
                if edges == nil {
4✔
UNCOV
3796
                        return nil
×
UNCOV
3797
                }
×
3798
                zombieIndex := edges.NestedReadBucket(zombieBucket)
4✔
3799
                if zombieIndex == nil {
4✔
UNCOV
3800
                        return nil
×
UNCOV
3801
                }
×
3802

3803
                return zombieIndex.ForEach(func(_, _ []byte) error {
6✔
3804
                        numZombies++
2✔
3805
                        return nil
2✔
3806
                })
2✔
3807
        }, func() {
4✔
3808
                numZombies = 0
4✔
3809
        })
4✔
3810
        if err != nil {
4✔
3811
                return 0, err
×
3812
        }
×
3813

3814
        return numZombies, nil
4✔
3815
}
3816

3817
// PutClosedScid stores a SCID for a closed channel in the database. This is so
3818
// that we can ignore channel announcements that we know to be closed without
3819
// having to validate them and fetch a block.
3820
func (c *KVStore) PutClosedScid(scid lnwire.ShortChannelID) error {
1✔
3821
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
2✔
3822
                closedScids, err := tx.CreateTopLevelBucket(closedScidBucket)
1✔
3823
                if err != nil {
1✔
UNCOV
3824
                        return err
×
UNCOV
3825
                }
×
3826

3827
                var k [8]byte
1✔
3828
                byteOrder.PutUint64(k[:], scid.ToUint64())
1✔
3829

1✔
3830
                return closedScids.Put(k[:], []byte{})
1✔
3831
        }, func() {})
1✔
3832
}
3833

3834
// IsClosedScid checks whether a channel identified by the passed in scid is
3835
// closed. This helps avoid having to perform expensive validation checks.
3836
// TODO: Add an LRU cache to cut down on disc reads.
3837
func (c *KVStore) IsClosedScid(scid lnwire.ShortChannelID) (bool, error) {
5✔
3838
        var isClosed bool
5✔
3839
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
10✔
3840
                closedScids := tx.ReadBucket(closedScidBucket)
5✔
3841
                if closedScids == nil {
5✔
UNCOV
3842
                        return ErrClosedScidsNotFound
×
UNCOV
3843
                }
×
3844

3845
                var k [8]byte
5✔
3846
                byteOrder.PutUint64(k[:], scid.ToUint64())
5✔
3847

5✔
3848
                if closedScids.Get(k[:]) != nil {
6✔
3849
                        isClosed = true
1✔
3850
                        return nil
1✔
3851
                }
1✔
3852

3853
                return nil
4✔
3854
        }, func() {
5✔
3855
                isClosed = false
5✔
3856
        })
5✔
3857
        if err != nil {
5✔
3858
                return false, err
×
UNCOV
3859
        }
×
3860

3861
        return isClosed, nil
5✔
3862
}
3863

3864
// GraphSession will provide the call-back with access to a NodeTraverser
3865
// instance which can be used to perform queries against the channel graph. If
3866
// the graph cache is not enabled, then the call-back will  be provided with
3867
// access to the graph via a consistent read-only transaction.
3868
func (c *KVStore) GraphSession(cb func(graph NodeTraverser) error) error {
136✔
3869
        if c.graphCache != nil {
218✔
3870
                return cb(&nodeTraverserSession{db: c})
82✔
3871
        }
82✔
3872

3873
        return c.db.View(func(tx walletdb.ReadTx) error {
108✔
3874
                return cb(&nodeTraverserSession{
54✔
3875
                        db: c,
54✔
3876
                        tx: tx,
54✔
3877
                })
54✔
3878
        }, func() {})
108✔
3879
}
3880

3881
// nodeTraverserSession implements the NodeTraverser interface but with a
3882
// backing read only transaction for a consistent view of the graph in the case
3883
// where the graph Cache has not been enabled.
3884
type nodeTraverserSession struct {
3885
        tx kvdb.RTx
3886
        db *KVStore
3887
}
3888

3889
// ForEachNodeDirectedChannel calls the callback for every channel of the given
3890
// node.
3891
//
3892
// NOTE: Part of the NodeTraverser interface.
3893
func (c *nodeTraverserSession) ForEachNodeDirectedChannel(nodePub route.Vertex,
3894
        cb func(channel *DirectedChannel) error) error {
593✔
3895

593✔
3896
        return c.db.forEachNodeDirectedChannel(c.tx, nodePub, cb)
593✔
3897
}
593✔
3898

3899
// FetchNodeFeatures returns the features of the given node. If the node is
3900
// unknown, assume no additional features are supported.
3901
//
3902
// NOTE: Part of the NodeTraverser interface.
3903
func (c *nodeTraverserSession) FetchNodeFeatures(nodePub route.Vertex) (
3904
        *lnwire.FeatureVector, error) {
623✔
3905

623✔
3906
        return c.db.fetchNodeFeatures(c.tx, nodePub)
623✔
3907
}
623✔
3908

3909
func putLightningNode(nodeBucket, aliasBucket, updateIndex kvdb.RwBucket,
3910
        node *models.LightningNode) error {
989✔
3911

989✔
3912
        var (
989✔
3913
                scratch [16]byte
989✔
3914
                b       bytes.Buffer
989✔
3915
        )
989✔
3916

989✔
3917
        pub, err := node.PubKey()
989✔
3918
        if err != nil {
989✔
UNCOV
3919
                return err
×
UNCOV
3920
        }
×
3921
        nodePub := pub.SerializeCompressed()
989✔
3922

989✔
3923
        // If the node has the update time set, write it, else write 0.
989✔
3924
        updateUnix := uint64(0)
989✔
3925
        if node.LastUpdate.Unix() > 0 {
1,855✔
3926
                updateUnix = uint64(node.LastUpdate.Unix())
866✔
3927
        }
866✔
3928

3929
        byteOrder.PutUint64(scratch[:8], updateUnix)
989✔
3930
        if _, err := b.Write(scratch[:8]); err != nil {
989✔
UNCOV
3931
                return err
×
UNCOV
3932
        }
×
3933

3934
        if _, err := b.Write(nodePub); err != nil {
989✔
3935
                return err
×
UNCOV
3936
        }
×
3937

3938
        // If we got a node announcement for this node, we will have the rest
3939
        // of the data available. If not we don't have more data to write.
3940
        if !node.HaveNodeAnnouncement {
1,062✔
3941
                // Write HaveNodeAnnouncement=0.
73✔
3942
                byteOrder.PutUint16(scratch[:2], 0)
73✔
3943
                if _, err := b.Write(scratch[:2]); err != nil {
73✔
UNCOV
3944
                        return err
×
UNCOV
3945
                }
×
3946

3947
                return nodeBucket.Put(nodePub, b.Bytes())
73✔
3948
        }
3949

3950
        // Write HaveNodeAnnouncement=1.
3951
        byteOrder.PutUint16(scratch[:2], 1)
919✔
3952
        if _, err := b.Write(scratch[:2]); err != nil {
919✔
UNCOV
3953
                return err
×
UNCOV
3954
        }
×
3955

3956
        if err := binary.Write(&b, byteOrder, node.Color.R); err != nil {
919✔
UNCOV
3957
                return err
×
UNCOV
3958
        }
×
3959
        if err := binary.Write(&b, byteOrder, node.Color.G); err != nil {
919✔
3960
                return err
×
UNCOV
3961
        }
×
3962
        if err := binary.Write(&b, byteOrder, node.Color.B); err != nil {
919✔
UNCOV
3963
                return err
×
UNCOV
3964
        }
×
3965

3966
        if err := wire.WriteVarString(&b, 0, node.Alias); err != nil {
919✔
UNCOV
3967
                return err
×
3968
        }
×
3969

3970
        if err := node.Features.Encode(&b); err != nil {
919✔
UNCOV
3971
                return err
×
3972
        }
×
3973

3974
        numAddresses := uint16(len(node.Addresses))
919✔
3975
        byteOrder.PutUint16(scratch[:2], numAddresses)
919✔
3976
        if _, err := b.Write(scratch[:2]); err != nil {
919✔
UNCOV
3977
                return err
×
3978
        }
×
3979

3980
        for _, address := range node.Addresses {
2,066✔
3981
                if err := SerializeAddr(&b, address); err != nil {
1,147✔
3982
                        return err
×
3983
                }
×
3984
        }
3985

3986
        sigLen := len(node.AuthSigBytes)
919✔
3987
        if sigLen > 80 {
919✔
UNCOV
3988
                return fmt.Errorf("max sig len allowed is 80, had %v",
×
UNCOV
3989
                        sigLen)
×
UNCOV
3990
        }
×
3991

3992
        err = wire.WriteVarBytes(&b, 0, node.AuthSigBytes)
919✔
3993
        if err != nil {
919✔
UNCOV
3994
                return err
×
UNCOV
3995
        }
×
3996

3997
        if len(node.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
919✔
3998
                return ErrTooManyExtraOpaqueBytes(len(node.ExtraOpaqueData))
×
UNCOV
3999
        }
×
4000
        err = wire.WriteVarBytes(&b, 0, node.ExtraOpaqueData)
919✔
4001
        if err != nil {
919✔
UNCOV
4002
                return err
×
4003
        }
×
4004

4005
        if err := aliasBucket.Put(nodePub, []byte(node.Alias)); err != nil {
919✔
UNCOV
4006
                return err
×
UNCOV
4007
        }
×
4008

4009
        // With the alias bucket updated, we'll now update the index that
4010
        // tracks the time series of node updates.
4011
        var indexKey [8 + 33]byte
919✔
4012
        byteOrder.PutUint64(indexKey[:8], updateUnix)
919✔
4013
        copy(indexKey[8:], nodePub)
919✔
4014

919✔
4015
        // If there was already an old index entry for this node, then we'll
919✔
4016
        // delete the old one before we write the new entry.
919✔
4017
        if nodeBytes := nodeBucket.Get(nodePub); nodeBytes != nil {
1,026✔
4018
                // Extract out the old update time to we can reconstruct the
107✔
4019
                // prior index key to delete it from the index.
107✔
4020
                oldUpdateTime := nodeBytes[:8]
107✔
4021

107✔
4022
                var oldIndexKey [8 + 33]byte
107✔
4023
                copy(oldIndexKey[:8], oldUpdateTime)
107✔
4024
                copy(oldIndexKey[8:], nodePub)
107✔
4025

107✔
4026
                if err := updateIndex.Delete(oldIndexKey[:]); err != nil {
107✔
UNCOV
4027
                        return err
×
UNCOV
4028
                }
×
4029
        }
4030

4031
        if err := updateIndex.Put(indexKey[:], nil); err != nil {
919✔
UNCOV
4032
                return err
×
UNCOV
4033
        }
×
4034

4035
        return nodeBucket.Put(nodePub, b.Bytes())
919✔
4036
}
4037

4038
func fetchLightningNode(nodeBucket kvdb.RBucket,
4039
        nodePub []byte) (models.LightningNode, error) {
3,622✔
4040

3,622✔
4041
        nodeBytes := nodeBucket.Get(nodePub)
3,622✔
4042
        if nodeBytes == nil {
3,694✔
4043
                return models.LightningNode{}, ErrGraphNodeNotFound
72✔
4044
        }
72✔
4045

4046
        nodeReader := bytes.NewReader(nodeBytes)
3,553✔
4047

3,553✔
4048
        return deserializeLightningNode(nodeReader)
3,553✔
4049
}
4050

4051
func deserializeLightningNodeCacheable(r io.Reader) (route.Vertex,
4052
        *lnwire.FeatureVector, error) {
123✔
4053

123✔
4054
        var (
123✔
4055
                pubKey      route.Vertex
123✔
4056
                features    = lnwire.EmptyFeatureVector()
123✔
4057
                nodeScratch [8]byte
123✔
4058
        )
123✔
4059

123✔
4060
        // Skip ahead:
123✔
4061
        // - LastUpdate (8 bytes)
123✔
4062
        if _, err := r.Read(nodeScratch[:]); err != nil {
123✔
UNCOV
4063
                return pubKey, nil, err
×
UNCOV
4064
        }
×
4065

4066
        if _, err := io.ReadFull(r, pubKey[:]); err != nil {
123✔
UNCOV
4067
                return pubKey, nil, err
×
UNCOV
4068
        }
×
4069

4070
        // Read the node announcement flag.
4071
        if _, err := r.Read(nodeScratch[:2]); err != nil {
123✔
UNCOV
4072
                return pubKey, nil, err
×
UNCOV
4073
        }
×
4074
        hasNodeAnn := byteOrder.Uint16(nodeScratch[:2])
123✔
4075

123✔
4076
        // The rest of the data is optional, and will only be there if we got a
123✔
4077
        // node announcement for this node.
123✔
4078
        if hasNodeAnn == 0 {
126✔
4079
                return pubKey, features, nil
3✔
4080
        }
3✔
4081

4082
        // We did get a node announcement for this node, so we'll have the rest
4083
        // of the data available.
4084
        var rgb uint8
123✔
4085
        if err := binary.Read(r, byteOrder, &rgb); err != nil {
123✔
UNCOV
4086
                return pubKey, nil, err
×
4087
        }
×
4088
        if err := binary.Read(r, byteOrder, &rgb); err != nil {
123✔
UNCOV
4089
                return pubKey, nil, err
×
UNCOV
4090
        }
×
4091
        if err := binary.Read(r, byteOrder, &rgb); err != nil {
123✔
UNCOV
4092
                return pubKey, nil, err
×
UNCOV
4093
        }
×
4094

4095
        if _, err := wire.ReadVarString(r, 0); err != nil {
123✔
UNCOV
4096
                return pubKey, nil, err
×
UNCOV
4097
        }
×
4098

4099
        if err := features.Decode(r); err != nil {
123✔
UNCOV
4100
                return pubKey, nil, err
×
4101
        }
×
4102

4103
        return pubKey, features, nil
123✔
4104
}
4105

4106
func deserializeLightningNode(r io.Reader) (models.LightningNode, error) {
8,303✔
4107
        var (
8,303✔
4108
                node    models.LightningNode
8,303✔
4109
                scratch [8]byte
8,303✔
4110
                err     error
8,303✔
4111
        )
8,303✔
4112

8,303✔
4113
        // Always populate a feature vector, even if we don't have a node
8,303✔
4114
        // announcement and short circuit below.
8,303✔
4115
        node.Features = lnwire.EmptyFeatureVector()
8,303✔
4116

8,303✔
4117
        if _, err := r.Read(scratch[:]); err != nil {
8,303✔
UNCOV
4118
                return models.LightningNode{}, err
×
UNCOV
4119
        }
×
4120

4121
        unix := int64(byteOrder.Uint64(scratch[:]))
8,303✔
4122
        node.LastUpdate = time.Unix(unix, 0)
8,303✔
4123

8,303✔
4124
        if _, err := io.ReadFull(r, node.PubKeyBytes[:]); err != nil {
8,303✔
UNCOV
4125
                return models.LightningNode{}, err
×
UNCOV
4126
        }
×
4127

4128
        if _, err := r.Read(scratch[:2]); err != nil {
8,303✔
UNCOV
4129
                return models.LightningNode{}, err
×
UNCOV
4130
        }
×
4131

4132
        hasNodeAnn := byteOrder.Uint16(scratch[:2])
8,303✔
4133
        if hasNodeAnn == 1 {
16,464✔
4134
                node.HaveNodeAnnouncement = true
8,161✔
4135
        } else {
8,306✔
4136
                node.HaveNodeAnnouncement = false
145✔
4137
        }
145✔
4138

4139
        // The rest of the data is optional, and will only be there if we got a
4140
        // node announcement for this node.
4141
        if !node.HaveNodeAnnouncement {
8,448✔
4142
                return node, nil
145✔
4143
        }
145✔
4144

4145
        // We did get a node announcement for this node, so we'll have the rest
4146
        // of the data available.
4147
        if err := binary.Read(r, byteOrder, &node.Color.R); err != nil {
8,161✔
UNCOV
4148
                return models.LightningNode{}, err
×
UNCOV
4149
        }
×
4150
        if err := binary.Read(r, byteOrder, &node.Color.G); err != nil {
8,161✔
UNCOV
4151
                return models.LightningNode{}, err
×
UNCOV
4152
        }
×
4153
        if err := binary.Read(r, byteOrder, &node.Color.B); err != nil {
8,161✔
UNCOV
4154
                return models.LightningNode{}, err
×
UNCOV
4155
        }
×
4156

4157
        node.Alias, err = wire.ReadVarString(r, 0)
8,161✔
4158
        if err != nil {
8,161✔
UNCOV
4159
                return models.LightningNode{}, err
×
UNCOV
4160
        }
×
4161

4162
        err = node.Features.Decode(r)
8,161✔
4163
        if err != nil {
8,161✔
4164
                return models.LightningNode{}, err
×
UNCOV
4165
        }
×
4166

4167
        if _, err := r.Read(scratch[:2]); err != nil {
8,161✔
UNCOV
4168
                return models.LightningNode{}, err
×
4169
        }
×
4170
        numAddresses := int(byteOrder.Uint16(scratch[:2]))
8,161✔
4171

8,161✔
4172
        var addresses []net.Addr
8,161✔
4173
        for i := 0; i < numAddresses; i++ {
18,351✔
4174
                address, err := DeserializeAddr(r)
10,190✔
4175
                if err != nil {
10,190✔
UNCOV
4176
                        return models.LightningNode{}, err
×
UNCOV
4177
                }
×
4178
                addresses = append(addresses, address)
10,190✔
4179
        }
4180
        node.Addresses = addresses
8,161✔
4181

8,161✔
4182
        node.AuthSigBytes, err = wire.ReadVarBytes(r, 0, 80, "sig")
8,161✔
4183
        if err != nil {
8,161✔
4184
                return models.LightningNode{}, err
×
UNCOV
4185
        }
×
4186

4187
        // We'll try and see if there are any opaque bytes left, if not, then
4188
        // we'll ignore the EOF error and return the node as is.
4189
        node.ExtraOpaqueData, err = wire.ReadVarBytes(
8,161✔
4190
                r, 0, MaxAllowedExtraOpaqueBytes, "blob",
8,161✔
4191
        )
8,161✔
4192
        switch {
8,161✔
UNCOV
4193
        case errors.Is(err, io.ErrUnexpectedEOF):
×
UNCOV
4194
        case errors.Is(err, io.EOF):
×
UNCOV
4195
        case err != nil:
×
UNCOV
4196
                return models.LightningNode{}, err
×
4197
        }
4198

4199
        return node, nil
8,161✔
4200
}
4201

4202
func putChanEdgeInfo(edgeIndex kvdb.RwBucket,
4203
        edgeInfo *models.ChannelEdgeInfo, chanID [8]byte) error {
1,488✔
4204

1,488✔
4205
        var b bytes.Buffer
1,488✔
4206

1,488✔
4207
        if _, err := b.Write(edgeInfo.NodeKey1Bytes[:]); err != nil {
1,488✔
4208
                return err
×
4209
        }
×
4210
        if _, err := b.Write(edgeInfo.NodeKey2Bytes[:]); err != nil {
1,488✔
4211
                return err
×
UNCOV
4212
        }
×
4213
        if _, err := b.Write(edgeInfo.BitcoinKey1Bytes[:]); err != nil {
1,488✔
UNCOV
4214
                return err
×
UNCOV
4215
        }
×
4216
        if _, err := b.Write(edgeInfo.BitcoinKey2Bytes[:]); err != nil {
1,488✔
UNCOV
4217
                return err
×
UNCOV
4218
        }
×
4219

4220
        if err := wire.WriteVarBytes(&b, 0, edgeInfo.Features); err != nil {
1,488✔
UNCOV
4221
                return err
×
UNCOV
4222
        }
×
4223

4224
        authProof := edgeInfo.AuthProof
1,488✔
4225
        var nodeSig1, nodeSig2, bitcoinSig1, bitcoinSig2 []byte
1,488✔
4226
        if authProof != nil {
2,893✔
4227
                nodeSig1 = authProof.NodeSig1Bytes
1,405✔
4228
                nodeSig2 = authProof.NodeSig2Bytes
1,405✔
4229
                bitcoinSig1 = authProof.BitcoinSig1Bytes
1,405✔
4230
                bitcoinSig2 = authProof.BitcoinSig2Bytes
1,405✔
4231
        }
1,405✔
4232

4233
        if err := wire.WriteVarBytes(&b, 0, nodeSig1); err != nil {
1,488✔
UNCOV
4234
                return err
×
UNCOV
4235
        }
×
4236
        if err := wire.WriteVarBytes(&b, 0, nodeSig2); err != nil {
1,488✔
4237
                return err
×
UNCOV
4238
        }
×
4239
        if err := wire.WriteVarBytes(&b, 0, bitcoinSig1); err != nil {
1,488✔
UNCOV
4240
                return err
×
UNCOV
4241
        }
×
4242
        if err := wire.WriteVarBytes(&b, 0, bitcoinSig2); err != nil {
1,488✔
UNCOV
4243
                return err
×
UNCOV
4244
        }
×
4245

4246
        if err := WriteOutpoint(&b, &edgeInfo.ChannelPoint); err != nil {
1,488✔
UNCOV
4247
                return err
×
UNCOV
4248
        }
×
4249
        err := binary.Write(&b, byteOrder, uint64(edgeInfo.Capacity))
1,488✔
4250
        if err != nil {
1,488✔
UNCOV
4251
                return err
×
4252
        }
×
4253
        if _, err := b.Write(chanID[:]); err != nil {
1,488✔
UNCOV
4254
                return err
×
4255
        }
×
4256
        if _, err := b.Write(edgeInfo.ChainHash[:]); err != nil {
1,488✔
UNCOV
4257
                return err
×
4258
        }
×
4259

4260
        if len(edgeInfo.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
1,488✔
UNCOV
4261
                return ErrTooManyExtraOpaqueBytes(len(edgeInfo.ExtraOpaqueData))
×
4262
        }
×
4263
        err = wire.WriteVarBytes(&b, 0, edgeInfo.ExtraOpaqueData)
1,488✔
4264
        if err != nil {
1,488✔
UNCOV
4265
                return err
×
4266
        }
×
4267

4268
        return edgeIndex.Put(chanID[:], b.Bytes())
1,488✔
4269
}
4270

4271
func fetchChanEdgeInfo(edgeIndex kvdb.RBucket,
4272
        chanID []byte) (models.ChannelEdgeInfo, error) {
3,974✔
4273

3,974✔
4274
        edgeInfoBytes := edgeIndex.Get(chanID)
3,974✔
4275
        if edgeInfoBytes == nil {
4,061✔
4276
                return models.ChannelEdgeInfo{}, ErrEdgeNotFound
87✔
4277
        }
87✔
4278

4279
        edgeInfoReader := bytes.NewReader(edgeInfoBytes)
3,890✔
4280

3,890✔
4281
        return deserializeChanEdgeInfo(edgeInfoReader)
3,890✔
4282
}
4283

4284
func deserializeChanEdgeInfo(r io.Reader) (models.ChannelEdgeInfo, error) {
4,429✔
4285
        var (
4,429✔
4286
                err      error
4,429✔
4287
                edgeInfo models.ChannelEdgeInfo
4,429✔
4288
        )
4,429✔
4289

4,429✔
4290
        if _, err := io.ReadFull(r, edgeInfo.NodeKey1Bytes[:]); err != nil {
4,429✔
UNCOV
4291
                return models.ChannelEdgeInfo{}, err
×
UNCOV
4292
        }
×
4293
        if _, err := io.ReadFull(r, edgeInfo.NodeKey2Bytes[:]); err != nil {
4,429✔
UNCOV
4294
                return models.ChannelEdgeInfo{}, err
×
UNCOV
4295
        }
×
4296
        if _, err := io.ReadFull(r, edgeInfo.BitcoinKey1Bytes[:]); err != nil {
4,429✔
UNCOV
4297
                return models.ChannelEdgeInfo{}, err
×
UNCOV
4298
        }
×
4299
        if _, err := io.ReadFull(r, edgeInfo.BitcoinKey2Bytes[:]); err != nil {
4,429✔
UNCOV
4300
                return models.ChannelEdgeInfo{}, err
×
UNCOV
4301
        }
×
4302

4303
        edgeInfo.Features, err = wire.ReadVarBytes(r, 0, 900, "features")
4,429✔
4304
        if err != nil {
4,429✔
UNCOV
4305
                return models.ChannelEdgeInfo{}, err
×
4306
        }
×
4307

4308
        proof := &models.ChannelAuthProof{}
4,429✔
4309

4,429✔
4310
        proof.NodeSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
4,429✔
4311
        if err != nil {
4,429✔
4312
                return models.ChannelEdgeInfo{}, err
×
4313
        }
×
4314
        proof.NodeSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
4,429✔
4315
        if err != nil {
4,429✔
4316
                return models.ChannelEdgeInfo{}, err
×
UNCOV
4317
        }
×
4318
        proof.BitcoinSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
4,429✔
4319
        if err != nil {
4,429✔
4320
                return models.ChannelEdgeInfo{}, err
×
4321
        }
×
4322
        proof.BitcoinSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
4,429✔
4323
        if err != nil {
4,429✔
UNCOV
4324
                return models.ChannelEdgeInfo{}, err
×
UNCOV
4325
        }
×
4326

4327
        if !proof.IsEmpty() {
5,909✔
4328
                edgeInfo.AuthProof = proof
1,480✔
4329
        }
1,480✔
4330

4331
        edgeInfo.ChannelPoint = wire.OutPoint{}
4,429✔
4332
        if err := ReadOutpoint(r, &edgeInfo.ChannelPoint); err != nil {
4,429✔
UNCOV
4333
                return models.ChannelEdgeInfo{}, err
×
UNCOV
4334
        }
×
4335
        if err := binary.Read(r, byteOrder, &edgeInfo.Capacity); err != nil {
4,429✔
4336
                return models.ChannelEdgeInfo{}, err
×
UNCOV
4337
        }
×
4338
        if err := binary.Read(r, byteOrder, &edgeInfo.ChannelID); err != nil {
4,429✔
4339
                return models.ChannelEdgeInfo{}, err
×
4340
        }
×
4341

4342
        if _, err := io.ReadFull(r, edgeInfo.ChainHash[:]); err != nil {
4,429✔
UNCOV
4343
                return models.ChannelEdgeInfo{}, err
×
UNCOV
4344
        }
×
4345

4346
        // We'll try and see if there are any opaque bytes left, if not, then
4347
        // we'll ignore the EOF error and return the edge as is.
4348
        edgeInfo.ExtraOpaqueData, err = wire.ReadVarBytes(
4,429✔
4349
                r, 0, MaxAllowedExtraOpaqueBytes, "blob",
4,429✔
4350
        )
4,429✔
4351
        switch {
4,429✔
4352
        case errors.Is(err, io.ErrUnexpectedEOF):
×
UNCOV
4353
        case errors.Is(err, io.EOF):
×
4354
        case err != nil:
×
4355
                return models.ChannelEdgeInfo{}, err
×
4356
        }
4357

4358
        return edgeInfo, nil
4,429✔
4359
}
4360

4361
func putChanEdgePolicy(edges kvdb.RwBucket, edge *models.ChannelEdgePolicy,
4362
        from, to []byte) error {
2,661✔
4363

2,661✔
4364
        var edgeKey [33 + 8]byte
2,661✔
4365
        copy(edgeKey[:], from)
2,661✔
4366
        byteOrder.PutUint64(edgeKey[33:], edge.ChannelID)
2,661✔
4367

2,661✔
4368
        var b bytes.Buffer
2,661✔
4369
        if err := serializeChanEdgePolicy(&b, edge, to); err != nil {
2,661✔
4370
                return err
×
UNCOV
4371
        }
×
4372

4373
        // Before we write out the new edge, we'll create a new entry in the
4374
        // update index in order to keep it fresh.
4375
        updateUnix := uint64(edge.LastUpdate.Unix())
2,661✔
4376
        var indexKey [8 + 8]byte
2,661✔
4377
        byteOrder.PutUint64(indexKey[:8], updateUnix)
2,661✔
4378
        byteOrder.PutUint64(indexKey[8:], edge.ChannelID)
2,661✔
4379

2,661✔
4380
        updateIndex, err := edges.CreateBucketIfNotExists(edgeUpdateIndexBucket)
2,661✔
4381
        if err != nil {
2,661✔
UNCOV
4382
                return err
×
UNCOV
4383
        }
×
4384

4385
        // If there was already an entry for this edge, then we'll need to
4386
        // delete the old one to ensure we don't leave around any after-images.
4387
        // An unknown policy value does not have a update time recorded, so
4388
        // it also does not need to be removed.
4389
        if edgeBytes := edges.Get(edgeKey[:]); edgeBytes != nil &&
2,661✔
4390
                !bytes.Equal(edgeBytes, unknownPolicy) {
2,688✔
4391

27✔
4392
                // In order to delete the old entry, we'll need to obtain the
27✔
4393
                // *prior* update time in order to delete it. To do this, we'll
27✔
4394
                // need to deserialize the existing policy within the database
27✔
4395
                // (now outdated by the new one), and delete its corresponding
27✔
4396
                // entry within the update index. We'll ignore any
27✔
4397
                // ErrEdgePolicyOptionalFieldNotFound error, as we only need
27✔
4398
                // the channel ID and update time to delete the entry.
27✔
4399
                // TODO(halseth): get rid of these invalid policies in a
27✔
4400
                // migration.
27✔
4401
                oldEdgePolicy, err := deserializeChanEdgePolicy(
27✔
4402
                        bytes.NewReader(edgeBytes),
27✔
4403
                )
27✔
4404
                if err != nil &&
27✔
4405
                        !errors.Is(err, ErrEdgePolicyOptionalFieldNotFound) {
27✔
UNCOV
4406

×
UNCOV
4407
                        return err
×
UNCOV
4408
                }
×
4409

4410
                oldUpdateTime := uint64(oldEdgePolicy.LastUpdate.Unix())
27✔
4411

27✔
4412
                var oldIndexKey [8 + 8]byte
27✔
4413
                byteOrder.PutUint64(oldIndexKey[:8], oldUpdateTime)
27✔
4414
                byteOrder.PutUint64(oldIndexKey[8:], edge.ChannelID)
27✔
4415

27✔
4416
                if err := updateIndex.Delete(oldIndexKey[:]); err != nil {
27✔
UNCOV
4417
                        return err
×
UNCOV
4418
                }
×
4419
        }
4420

4421
        if err := updateIndex.Put(indexKey[:], nil); err != nil {
2,661✔
4422
                return err
×
4423
        }
×
4424

4425
        err = updateEdgePolicyDisabledIndex(
2,661✔
4426
                edges, edge.ChannelID,
2,661✔
4427
                edge.ChannelFlags&lnwire.ChanUpdateDirection > 0,
2,661✔
4428
                edge.IsDisabled(),
2,661✔
4429
        )
2,661✔
4430
        if err != nil {
2,661✔
UNCOV
4431
                return err
×
4432
        }
×
4433

4434
        return edges.Put(edgeKey[:], b.Bytes())
2,661✔
4435
}
4436

4437
// updateEdgePolicyDisabledIndex is used to update the disabledEdgePolicyIndex
4438
// bucket by either add a new disabled ChannelEdgePolicy or remove an existing
4439
// one.
4440
// The direction represents the direction of the edge and disabled is used for
4441
// deciding whether to remove or add an entry to the bucket.
4442
// In general a channel is disabled if two entries for the same chanID exist
4443
// in this bucket.
4444
// Maintaining the bucket this way allows a fast retrieval of disabled
4445
// channels, for example when prune is needed.
4446
func updateEdgePolicyDisabledIndex(edges kvdb.RwBucket, chanID uint64,
4447
        direction bool, disabled bool) error {
2,947✔
4448

2,947✔
4449
        var disabledEdgeKey [8 + 1]byte
2,947✔
4450
        byteOrder.PutUint64(disabledEdgeKey[0:], chanID)
2,947✔
4451
        if direction {
4,416✔
4452
                disabledEdgeKey[8] = 1
1,469✔
4453
        }
1,469✔
4454

4455
        disabledEdgePolicyIndex, err := edges.CreateBucketIfNotExists(
2,947✔
4456
                disabledEdgePolicyBucket,
2,947✔
4457
        )
2,947✔
4458
        if err != nil {
2,947✔
UNCOV
4459
                return err
×
UNCOV
4460
        }
×
4461

4462
        if disabled {
2,976✔
4463
                return disabledEdgePolicyIndex.Put(disabledEdgeKey[:], []byte{})
29✔
4464
        }
29✔
4465

4466
        return disabledEdgePolicyIndex.Delete(disabledEdgeKey[:])
2,921✔
4467
}
4468

4469
// putChanEdgePolicyUnknown marks the edge policy as unknown
4470
// in the edges bucket.
4471
func putChanEdgePolicyUnknown(edges kvdb.RwBucket, channelID uint64,
4472
        from []byte) error {
2,971✔
4473

2,971✔
4474
        var edgeKey [33 + 8]byte
2,971✔
4475
        copy(edgeKey[:], from)
2,971✔
4476
        byteOrder.PutUint64(edgeKey[33:], channelID)
2,971✔
4477

2,971✔
4478
        if edges.Get(edgeKey[:]) != nil {
2,971✔
UNCOV
4479
                return fmt.Errorf("cannot write unknown policy for channel %v "+
×
UNCOV
4480
                        " when there is already a policy present", channelID)
×
UNCOV
4481
        }
×
4482

4483
        return edges.Put(edgeKey[:], unknownPolicy)
2,971✔
4484
}
4485

4486
func fetchChanEdgePolicy(edges kvdb.RBucket, chanID []byte,
4487
        nodePub []byte) (*models.ChannelEdgePolicy, error) {
7,801✔
4488

7,801✔
4489
        var edgeKey [33 + 8]byte
7,801✔
4490
        copy(edgeKey[:], nodePub)
7,801✔
4491
        copy(edgeKey[33:], chanID)
7,801✔
4492

7,801✔
4493
        edgeBytes := edges.Get(edgeKey[:])
7,801✔
4494
        if edgeBytes == nil {
7,801✔
4495
                return nil, ErrEdgeNotFound
×
4496
        }
×
4497

4498
        // No need to deserialize unknown policy.
4499
        if bytes.Equal(edgeBytes, unknownPolicy) {
8,183✔
4500
                return nil, nil
382✔
4501
        }
382✔
4502

4503
        edgeReader := bytes.NewReader(edgeBytes)
7,422✔
4504

7,422✔
4505
        ep, err := deserializeChanEdgePolicy(edgeReader)
7,422✔
4506
        switch {
7,422✔
4507
        // If the db policy was missing an expected optional field, we return
4508
        // nil as if the policy was unknown.
4509
        case errors.Is(err, ErrEdgePolicyOptionalFieldNotFound):
1✔
4510
                return nil, nil
1✔
4511

UNCOV
4512
        case err != nil:
×
UNCOV
4513
                return nil, err
×
4514
        }
4515

4516
        return ep, nil
7,421✔
4517
}
4518

4519
func fetchChanEdgePolicies(edgeIndex kvdb.RBucket, edges kvdb.RBucket,
4520
        chanID []byte) (*models.ChannelEdgePolicy, *models.ChannelEdgePolicy,
4521
        error) {
251✔
4522

251✔
4523
        edgeInfo := edgeIndex.Get(chanID)
251✔
4524
        if edgeInfo == nil {
251✔
UNCOV
4525
                return nil, nil, fmt.Errorf("%w: chanID=%x", ErrEdgeNotFound,
×
UNCOV
4526
                        chanID)
×
4527
        }
×
4528

4529
        // The first node is contained within the first half of the edge
4530
        // information. We only propagate the error here and below if it's
4531
        // something other than edge non-existence.
4532
        node1Pub := edgeInfo[:33]
251✔
4533
        edge1, err := fetchChanEdgePolicy(edges, chanID, node1Pub)
251✔
4534
        if err != nil {
251✔
UNCOV
4535
                return nil, nil, fmt.Errorf("%w: node1Pub=%x", ErrEdgeNotFound,
×
UNCOV
4536
                        node1Pub)
×
UNCOV
4537
        }
×
4538

4539
        // Similarly, the second node is contained within the latter
4540
        // half of the edge information.
4541
        node2Pub := edgeInfo[33:66]
251✔
4542
        edge2, err := fetchChanEdgePolicy(edges, chanID, node2Pub)
251✔
4543
        if err != nil {
251✔
UNCOV
4544
                return nil, nil, fmt.Errorf("%w: node2Pub=%x", ErrEdgeNotFound,
×
UNCOV
4545
                        node2Pub)
×
UNCOV
4546
        }
×
4547

4548
        return edge1, edge2, nil
251✔
4549
}
4550

4551
func serializeChanEdgePolicy(w io.Writer, edge *models.ChannelEdgePolicy,
4552
        to []byte) error {
2,663✔
4553

2,663✔
4554
        err := wire.WriteVarBytes(w, 0, edge.SigBytes)
2,663✔
4555
        if err != nil {
2,663✔
UNCOV
4556
                return err
×
UNCOV
4557
        }
×
4558

4559
        if err := binary.Write(w, byteOrder, edge.ChannelID); err != nil {
2,663✔
4560
                return err
×
4561
        }
×
4562

4563
        var scratch [8]byte
2,663✔
4564
        updateUnix := uint64(edge.LastUpdate.Unix())
2,663✔
4565
        byteOrder.PutUint64(scratch[:], updateUnix)
2,663✔
4566
        if _, err := w.Write(scratch[:]); err != nil {
2,663✔
UNCOV
4567
                return err
×
UNCOV
4568
        }
×
4569

4570
        if err := binary.Write(w, byteOrder, edge.MessageFlags); err != nil {
2,663✔
4571
                return err
×
4572
        }
×
4573
        if err := binary.Write(w, byteOrder, edge.ChannelFlags); err != nil {
2,663✔
UNCOV
4574
                return err
×
4575
        }
×
4576
        if err := binary.Write(w, byteOrder, edge.TimeLockDelta); err != nil {
2,663✔
UNCOV
4577
                return err
×
UNCOV
4578
        }
×
4579
        if err := binary.Write(w, byteOrder, uint64(edge.MinHTLC)); err != nil {
2,663✔
UNCOV
4580
                return err
×
UNCOV
4581
        }
×
4582
        err = binary.Write(w, byteOrder, uint64(edge.FeeBaseMSat))
2,663✔
4583
        if err != nil {
2,663✔
UNCOV
4584
                return err
×
UNCOV
4585
        }
×
4586
        err = binary.Write(
2,663✔
4587
                w, byteOrder, uint64(edge.FeeProportionalMillionths),
2,663✔
4588
        )
2,663✔
4589
        if err != nil {
2,663✔
4590
                return err
×
UNCOV
4591
        }
×
4592

4593
        if _, err := w.Write(to); err != nil {
2,663✔
UNCOV
4594
                return err
×
4595
        }
×
4596

4597
        // If the max_htlc field is present, we write it. To be compatible with
4598
        // older versions that wasn't aware of this field, we write it as part
4599
        // of the opaque data.
4600
        // TODO(halseth): clean up when moving to TLV.
4601
        var opaqueBuf bytes.Buffer
2,663✔
4602
        if edge.MessageFlags.HasMaxHtlc() {
4,944✔
4603
                err := binary.Write(&opaqueBuf, byteOrder, uint64(edge.MaxHTLC))
2,281✔
4604
                if err != nil {
2,281✔
4605
                        return err
×
4606
                }
×
4607
        }
4608

4609
        if len(edge.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
2,663✔
4610
                return ErrTooManyExtraOpaqueBytes(len(edge.ExtraOpaqueData))
×
UNCOV
4611
        }
×
4612
        if _, err := opaqueBuf.Write(edge.ExtraOpaqueData); err != nil {
2,663✔
UNCOV
4613
                return err
×
UNCOV
4614
        }
×
4615

4616
        if err := wire.WriteVarBytes(w, 0, opaqueBuf.Bytes()); err != nil {
2,663✔
UNCOV
4617
                return err
×
UNCOV
4618
        }
×
4619

4620
        return nil
2,663✔
4621
}
4622

4623
func deserializeChanEdgePolicy(r io.Reader) (*models.ChannelEdgePolicy, error) {
7,447✔
4624
        // Deserialize the policy. Note that in case an optional field is not
7,447✔
4625
        // found, both an error and a populated policy object are returned.
7,447✔
4626
        edge, deserializeErr := deserializeChanEdgePolicyRaw(r)
7,447✔
4627
        if deserializeErr != nil &&
7,447✔
4628
                !errors.Is(deserializeErr, ErrEdgePolicyOptionalFieldNotFound) {
7,447✔
4629

×
UNCOV
4630
                return nil, deserializeErr
×
UNCOV
4631
        }
×
4632

4633
        return edge, deserializeErr
7,447✔
4634
}
4635

4636
func deserializeChanEdgePolicyRaw(r io.Reader) (*models.ChannelEdgePolicy,
4637
        error) {
8,449✔
4638

8,449✔
4639
        edge := &models.ChannelEdgePolicy{}
8,449✔
4640

8,449✔
4641
        var err error
8,449✔
4642
        edge.SigBytes, err = wire.ReadVarBytes(r, 0, 80, "sig")
8,449✔
4643
        if err != nil {
8,449✔
4644
                return nil, err
×
4645
        }
×
4646

4647
        if err := binary.Read(r, byteOrder, &edge.ChannelID); err != nil {
8,449✔
UNCOV
4648
                return nil, err
×
UNCOV
4649
        }
×
4650

4651
        var scratch [8]byte
8,449✔
4652
        if _, err := r.Read(scratch[:]); err != nil {
8,449✔
UNCOV
4653
                return nil, err
×
UNCOV
4654
        }
×
4655
        unix := int64(byteOrder.Uint64(scratch[:]))
8,449✔
4656
        edge.LastUpdate = time.Unix(unix, 0)
8,449✔
4657

8,449✔
4658
        if err := binary.Read(r, byteOrder, &edge.MessageFlags); err != nil {
8,449✔
4659
                return nil, err
×
4660
        }
×
4661
        if err := binary.Read(r, byteOrder, &edge.ChannelFlags); err != nil {
8,449✔
UNCOV
4662
                return nil, err
×
4663
        }
×
4664
        if err := binary.Read(r, byteOrder, &edge.TimeLockDelta); err != nil {
8,449✔
UNCOV
4665
                return nil, err
×
UNCOV
4666
        }
×
4667

4668
        var n uint64
8,449✔
4669
        if err := binary.Read(r, byteOrder, &n); err != nil {
8,449✔
UNCOV
4670
                return nil, err
×
UNCOV
4671
        }
×
4672
        edge.MinHTLC = lnwire.MilliSatoshi(n)
8,449✔
4673

8,449✔
4674
        if err := binary.Read(r, byteOrder, &n); err != nil {
8,449✔
4675
                return nil, err
×
UNCOV
4676
        }
×
4677
        edge.FeeBaseMSat = lnwire.MilliSatoshi(n)
8,449✔
4678

8,449✔
4679
        if err := binary.Read(r, byteOrder, &n); err != nil {
8,449✔
4680
                return nil, err
×
4681
        }
×
4682
        edge.FeeProportionalMillionths = lnwire.MilliSatoshi(n)
8,449✔
4683

8,449✔
4684
        if _, err := r.Read(edge.ToNode[:]); err != nil {
8,449✔
4685
                return nil, err
×
4686
        }
×
4687

4688
        // We'll try and see if there are any opaque bytes left, if not, then
4689
        // we'll ignore the EOF error and return the edge as is.
4690
        edge.ExtraOpaqueData, err = wire.ReadVarBytes(
8,449✔
4691
                r, 0, MaxAllowedExtraOpaqueBytes, "blob",
8,449✔
4692
        )
8,449✔
4693
        switch {
8,449✔
UNCOV
4694
        case errors.Is(err, io.ErrUnexpectedEOF):
×
4695
        case errors.Is(err, io.EOF):
3✔
4696
        case err != nil:
×
UNCOV
4697
                return nil, err
×
4698
        }
4699

4700
        // See if optional fields are present.
4701
        if edge.MessageFlags.HasMaxHtlc() {
16,529✔
4702
                // The max_htlc field should be at the beginning of the opaque
8,080✔
4703
                // bytes.
8,080✔
4704
                opq := edge.ExtraOpaqueData
8,080✔
4705

8,080✔
4706
                // If the max_htlc field is not present, it might be old data
8,080✔
4707
                // stored before this field was validated. We'll return the
8,080✔
4708
                // edge along with an error.
8,080✔
4709
                if len(opq) < 8 {
8,083✔
4710
                        return edge, ErrEdgePolicyOptionalFieldNotFound
3✔
4711
                }
3✔
4712

4713
                maxHtlc := byteOrder.Uint64(opq[:8])
8,077✔
4714
                edge.MaxHTLC = lnwire.MilliSatoshi(maxHtlc)
8,077✔
4715

8,077✔
4716
                // Exclude the parsed field from the rest of the opaque data.
8,077✔
4717
                edge.ExtraOpaqueData = opq[8:]
8,077✔
4718
        }
4719

4720
        return edge, nil
8,446✔
4721
}
4722

4723
// chanGraphNodeTx is an implementation of the NodeRTx interface backed by the
4724
// KVStore and a kvdb.RTx.
4725
type chanGraphNodeTx struct {
4726
        tx   kvdb.RTx
4727
        db   *KVStore
4728
        node *models.LightningNode
4729
}
4730

4731
// A compile-time constraint to ensure chanGraphNodeTx implements the NodeRTx
4732
// interface.
4733
var _ NodeRTx = (*chanGraphNodeTx)(nil)
4734

4735
func newChanGraphNodeTx(tx kvdb.RTx, db *KVStore,
4736
        node *models.LightningNode) *chanGraphNodeTx {
3,917✔
4737

3,917✔
4738
        return &chanGraphNodeTx{
3,917✔
4739
                tx:   tx,
3,917✔
4740
                db:   db,
3,917✔
4741
                node: node,
3,917✔
4742
        }
3,917✔
4743
}
3,917✔
4744

4745
// Node returns the raw information of the node.
4746
//
4747
// NOTE: This is a part of the NodeRTx interface.
4748
func (c *chanGraphNodeTx) Node() *models.LightningNode {
4,842✔
4749
        return c.node
4,842✔
4750
}
4,842✔
4751

4752
// FetchNode fetches the node with the given pub key under the same transaction
4753
// used to fetch the current node. The returned node is also a NodeRTx and any
4754
// operations on that NodeRTx will also be done under the same transaction.
4755
//
4756
// NOTE: This is a part of the NodeRTx interface.
4757
func (c *chanGraphNodeTx) FetchNode(nodePub route.Vertex) (NodeRTx, error) {
2,944✔
4758
        node, err := c.db.FetchLightningNodeTx(c.tx, nodePub)
2,944✔
4759
        if err != nil {
2,944✔
UNCOV
4760
                return nil, err
×
UNCOV
4761
        }
×
4762

4763
        return newChanGraphNodeTx(c.tx, c.db, node), nil
2,944✔
4764
}
4765

4766
// ForEachChannel can be used to iterate over the node's channels under
4767
// the same transaction used to fetch the node.
4768
//
4769
// NOTE: This is a part of the NodeRTx interface.
4770
func (c *chanGraphNodeTx) ForEachChannel(f func(*models.ChannelEdgeInfo,
4771
        *models.ChannelEdgePolicy, *models.ChannelEdgePolicy) error) error {
965✔
4772

965✔
4773
        return c.db.ForEachNodeChannelTx(c.tx, c.node.PubKeyBytes,
965✔
4774
                func(_ kvdb.RTx, info *models.ChannelEdgeInfo, policy1,
965✔
4775
                        policy2 *models.ChannelEdgePolicy) error {
3,909✔
4776

2,944✔
4777
                        return f(info, policy1, policy2)
2,944✔
4778
                },
2,944✔
4779
        )
4780
}
4781

4782
// MakeTestGraph creates a new instance of the KVStore for testing
4783
// purposes.
4784
func MakeTestGraph(t testing.TB, modifiers ...KVStoreOptionModifier) (
4785
        *ChannelGraph, error) {
40✔
4786

40✔
4787
        opts := DefaultOptions()
40✔
4788
        for _, modifier := range modifiers {
40✔
UNCOV
4789
                modifier(opts)
×
UNCOV
4790
        }
×
4791

4792
        // Next, create KVStore for the first time.
4793
        backend, backendCleanup, err := kvdb.GetTestBackend(t.TempDir(), "cgr")
40✔
4794
        if err != nil {
40✔
UNCOV
4795
                backendCleanup()
×
UNCOV
4796

×
UNCOV
4797
                return nil, err
×
UNCOV
4798
        }
×
4799

4800
        graph, err := NewChannelGraph(&Config{
40✔
4801
                KVDB:        backend,
40✔
4802
                KVStoreOpts: modifiers,
40✔
4803
        })
40✔
4804
        if err != nil {
40✔
4805
                backendCleanup()
×
UNCOV
4806

×
UNCOV
4807
                return nil, err
×
UNCOV
4808
        }
×
4809

4810
        t.Cleanup(func() {
80✔
4811
                _ = backend.Close()
40✔
4812
                backendCleanup()
40✔
4813
        })
40✔
4814

4815
        return graph, nil
40✔
4816
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc