• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 16777989842

06 Aug 2025 01:14PM UTC coverage: 66.948% (-0.006%) from 66.954%
16777989842

push

github

web-flow
Merge pull request #10132 from ffranr/update-ffranr-signing-key

scripts: update ffranr release signing key

135677 of 202660 relevant lines covered (66.95%)

21599.01 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

85.67
/graph/db/graph.go
1
package graphdb
2

3
import (
4
        "context"
5
        "errors"
6
        "fmt"
7
        "net"
8
        "sync"
9
        "sync/atomic"
10
        "testing"
11
        "time"
12

13
        "github.com/btcsuite/btcd/chaincfg/chainhash"
14
        "github.com/btcsuite/btcd/wire"
15
        "github.com/lightningnetwork/lnd/batch"
16
        "github.com/lightningnetwork/lnd/graph/db/models"
17
        "github.com/lightningnetwork/lnd/lnwire"
18
        "github.com/lightningnetwork/lnd/routing/route"
19
        "github.com/stretchr/testify/require"
20
)
21

22
// ErrChanGraphShuttingDown indicates that the ChannelGraph has shutdown or is
23
// busy shutting down.
24
var ErrChanGraphShuttingDown = fmt.Errorf("ChannelGraph shutting down")
25

26
// ChannelGraph is a layer above the graph's CRUD layer.
27
//
28
// NOTE: currently, this is purely a pass-through layer directly to the backing
29
// KVStore. Upcoming commits will move the graph cache out of the KVStore and
30
// into this layer so that the KVStore is only responsible for CRUD operations.
31
type ChannelGraph struct {
32
        started atomic.Bool
33
        stopped atomic.Bool
34

35
        graphCache *GraphCache
36

37
        V1Store
38
        *topologyManager
39

40
        quit chan struct{}
41
        wg   sync.WaitGroup
42
}
43

44
// NewChannelGraph creates a new ChannelGraph instance with the given backend.
45
func NewChannelGraph(v1Store V1Store,
46
        options ...ChanGraphOption) (*ChannelGraph, error) {
174✔
47

174✔
48
        opts := defaultChanGraphOptions()
174✔
49
        for _, o := range options {
268✔
50
                o(opts)
94✔
51
        }
94✔
52

53
        g := &ChannelGraph{
174✔
54
                V1Store:         v1Store,
174✔
55
                topologyManager: newTopologyManager(),
174✔
56
                quit:            make(chan struct{}),
174✔
57
        }
174✔
58

174✔
59
        // The graph cache can be turned off (e.g. for mobile users) for a
174✔
60
        // speed/memory usage tradeoff.
174✔
61
        if opts.useGraphCache {
315✔
62
                g.graphCache = NewGraphCache(opts.preAllocCacheNumNodes)
141✔
63
        }
141✔
64

65
        return g, nil
174✔
66
}
67

68
// Start kicks off any goroutines required for the ChannelGraph to function.
69
// If the graph cache is enabled, then it will be populated with the contents of
70
// the database.
71
func (c *ChannelGraph) Start() error {
287✔
72
        if !c.started.CompareAndSwap(false, true) {
400✔
73
                return nil
113✔
74
        }
113✔
75
        log.Debugf("ChannelGraph starting")
174✔
76
        defer log.Debug("ChannelGraph started")
174✔
77

174✔
78
        if c.graphCache != nil {
315✔
79
                if err := c.populateCache(context.TODO()); err != nil {
141✔
80
                        return fmt.Errorf("could not populate the graph "+
×
81
                                "cache: %w", err)
×
82
                }
×
83
        }
84

85
        c.wg.Add(1)
174✔
86
        go c.handleTopologySubscriptions()
174✔
87

174✔
88
        return nil
174✔
89
}
90

91
// Stop signals any active goroutines for a graceful closure.
92
func (c *ChannelGraph) Stop() error {
287✔
93
        if !c.stopped.CompareAndSwap(false, true) {
400✔
94
                return nil
113✔
95
        }
113✔
96

97
        log.Debugf("ChannelGraph shutting down...")
174✔
98
        defer log.Debug("ChannelGraph shutdown complete")
174✔
99

174✔
100
        close(c.quit)
174✔
101
        c.wg.Wait()
174✔
102

174✔
103
        return nil
174✔
104
}
105

106
// handleTopologySubscriptions ensures that topology client subscriptions,
107
// subscription cancellations and topology notifications are handled
108
// synchronously.
109
//
110
// NOTE: this MUST be run in a goroutine.
111
func (c *ChannelGraph) handleTopologySubscriptions() {
174✔
112
        defer c.wg.Done()
174✔
113

174✔
114
        for {
5,243✔
115
                select {
5,069✔
116
                // A new fully validated topology update has just arrived.
117
                // We'll notify any registered clients.
118
                case update := <-c.topologyUpdate:
4,893✔
119
                        // TODO(elle): change topology handling to be handled
4,893✔
120
                        // synchronously so that we can guarantee the order of
4,893✔
121
                        // notification delivery.
4,893✔
122
                        c.wg.Add(1)
4,893✔
123
                        go c.handleTopologyUpdate(update)
4,893✔
124

125
                        // TODO(roasbeef): remove all unconnected vertexes
126
                        // after N blocks pass with no corresponding
127
                        // announcements.
128

129
                // A new notification client update has arrived. We're either
130
                // gaining a new client, or cancelling notifications for an
131
                // existing client.
132
                case ntfnUpdate := <-c.ntfnClientUpdates:
8✔
133
                        clientID := ntfnUpdate.clientID
8✔
134

8✔
135
                        if ntfnUpdate.cancel {
12✔
136
                                client, ok := c.topologyClients.LoadAndDelete(
4✔
137
                                        clientID,
4✔
138
                                )
4✔
139
                                if ok {
8✔
140
                                        close(client.exit)
4✔
141
                                        client.wg.Wait()
4✔
142

4✔
143
                                        close(client.ntfnChan)
4✔
144
                                }
4✔
145

146
                                continue
4✔
147
                        }
148

149
                        c.topologyClients.Store(clientID, &topologyClient{
7✔
150
                                ntfnChan: ntfnUpdate.ntfnChan,
7✔
151
                                exit:     make(chan struct{}),
7✔
152
                        })
7✔
153

154
                case <-c.quit:
174✔
155
                        return
174✔
156
                }
157
        }
158
}
159

160
// populateCache loads the entire channel graph into the in-memory graph cache.
161
//
162
// NOTE: This should only be called if the graphCache has been constructed.
163
func (c *ChannelGraph) populateCache(ctx context.Context) error {
141✔
164
        startTime := time.Now()
141✔
165
        log.Info("Populating in-memory channel graph, this might take a " +
141✔
166
                "while...")
141✔
167

141✔
168
        err := c.V1Store.ForEachNodeCacheable(ctx, func(node route.Vertex,
141✔
169
                features *lnwire.FeatureVector) error {
244✔
170

103✔
171
                c.graphCache.AddNodeFeatures(node, features)
103✔
172

103✔
173
                return nil
103✔
174
        }, func() {})
244✔
175
        if err != nil {
141✔
176
                return err
×
177
        }
×
178

179
        err = c.V1Store.ForEachChannelCacheable(
141✔
180
                func(info *models.CachedEdgeInfo,
141✔
181
                        policy1, policy2 *models.CachedEdgePolicy) error {
540✔
182

399✔
183
                        c.graphCache.AddChannel(info, policy1, policy2)
399✔
184

399✔
185
                        return nil
399✔
186
                }, func() {},
540✔
187
        )
188
        if err != nil {
141✔
189
                return err
×
190
        }
×
191

192
        log.Infof("Finished populating in-memory channel graph (took %v, %s)",
141✔
193
                time.Since(startTime), c.graphCache.Stats())
141✔
194

141✔
195
        return nil
141✔
196
}
197

198
// ForEachNodeDirectedChannel iterates through all channels of a given node,
199
// executing the passed callback on the directed edge representing the channel
200
// and its incoming policy. If the callback returns an error, then the iteration
201
// is halted with the error propagated back up to the caller. If the graphCache
202
// is available, then it will be used to retrieve the node's channels instead
203
// of the database.
204
//
205
// Unknown policies are passed into the callback as nil values.
206
//
207
// NOTE: this is part of the graphdb.NodeTraverser interface.
208
func (c *ChannelGraph) ForEachNodeDirectedChannel(node route.Vertex,
209
        cb func(channel *DirectedChannel) error, reset func()) error {
511✔
210

511✔
211
        if c.graphCache != nil {
1,019✔
212
                return c.graphCache.ForEachChannel(node, cb)
508✔
213
        }
508✔
214

215
        return c.V1Store.ForEachNodeDirectedChannel(node, cb, reset)
6✔
216
}
217

218
// FetchNodeFeatures returns the features of the given node. If no features are
219
// known for the node, an empty feature vector is returned.
220
// If the graphCache is available, then it will be used to retrieve the node's
221
// features instead of the database.
222
//
223
// NOTE: this is part of the graphdb.NodeTraverser interface.
224
func (c *ChannelGraph) FetchNodeFeatures(node route.Vertex) (
225
        *lnwire.FeatureVector, error) {
465✔
226

465✔
227
        if c.graphCache != nil {
930✔
228
                return c.graphCache.GetFeatures(node), nil
465✔
229
        }
465✔
230

231
        return c.V1Store.FetchNodeFeatures(node)
3✔
232
}
233

234
// GraphSession will provide the call-back with access to a NodeTraverser
235
// instance which can be used to perform queries against the channel graph. If
236
// the graph cache is not enabled, then the call-back will be provided with
237
// access to the graph via a consistent read-only transaction.
238
func (c *ChannelGraph) GraphSession(cb func(graph NodeTraverser) error,
239
        reset func()) error {
136✔
240

136✔
241
        if c.graphCache != nil {
218✔
242
                return cb(c)
82✔
243
        }
82✔
244

245
        return c.V1Store.GraphSession(cb, reset)
54✔
246
}
247

248
// ForEachNodeCached iterates through all the stored vertices/nodes in the
249
// graph, executing the passed callback with each node encountered.
250
//
251
// NOTE: The callback contents MUST not be modified.
252
func (c *ChannelGraph) ForEachNodeCached(ctx context.Context, withAddrs bool,
253
        cb func(ctx context.Context, node route.Vertex, addrs []net.Addr,
254
                chans map[uint64]*DirectedChannel) error, reset func()) error {
120✔
255

120✔
256
        if !withAddrs && c.graphCache != nil {
120✔
257
                return c.graphCache.ForEachNode(
×
258
                        func(node route.Vertex,
×
259
                                channels map[uint64]*DirectedChannel) error {
×
260

×
261
                                return cb(ctx, node, nil, channels)
×
262
                        },
×
263
                )
264
        }
265

266
        return c.V1Store.ForEachNodeCached(ctx, withAddrs, cb, reset)
120✔
267
}
268

269
// AddLightningNode adds a vertex/node to the graph database. If the node is not
270
// in the database from before, this will add a new, unconnected one to the
271
// graph. If it is present from before, this will update that node's
272
// information. Note that this method is expected to only be called to update an
273
// already present node from a node announcement, or to insert a node found in a
274
// channel update.
275
func (c *ChannelGraph) AddLightningNode(ctx context.Context,
276
        node *models.LightningNode, op ...batch.SchedulerOption) error {
715✔
277

715✔
278
        err := c.V1Store.AddLightningNode(ctx, node, op...)
715✔
279
        if err != nil {
715✔
280
                return err
×
281
        }
×
282

283
        if c.graphCache != nil {
1,276✔
284
                c.graphCache.AddNodeFeatures(
561✔
285
                        node.PubKeyBytes, node.Features,
561✔
286
                )
561✔
287
        }
561✔
288

289
        select {
715✔
290
        case c.topologyUpdate <- node:
715✔
291
        case <-c.quit:
×
292
                return ErrChanGraphShuttingDown
×
293
        }
294

295
        return nil
715✔
296
}
297

298
// DeleteLightningNode starts a new database transaction to remove a vertex/node
299
// from the database according to the node's public key.
300
func (c *ChannelGraph) DeleteLightningNode(ctx context.Context,
301
        nodePub route.Vertex) error {
4✔
302

4✔
303
        err := c.V1Store.DeleteLightningNode(ctx, nodePub)
4✔
304
        if err != nil {
5✔
305
                return err
1✔
306
        }
1✔
307

308
        if c.graphCache != nil {
6✔
309
                c.graphCache.RemoveNode(nodePub)
3✔
310
        }
3✔
311

312
        return nil
3✔
313
}
314

315
// AddChannelEdge adds a new (undirected, blank) edge to the graph database. An
316
// undirected edge from the two target nodes are created. The information stored
317
// denotes the static attributes of the channel, such as the channelID, the keys
318
// involved in creation of the channel, and the set of features that the channel
319
// supports. The chanPoint and chanID are used to uniquely identify the edge
320
// globally within the database.
321
func (c *ChannelGraph) AddChannelEdge(ctx context.Context,
322
        edge *models.ChannelEdgeInfo, op ...batch.SchedulerOption) error {
1,733✔
323

1,733✔
324
        err := c.V1Store.AddChannelEdge(ctx, edge, op...)
1,733✔
325
        if err != nil {
1,970✔
326
                return err
237✔
327
        }
237✔
328

329
        if c.graphCache != nil {
2,802✔
330
                c.graphCache.AddChannel(models.NewCachedEdge(edge), nil, nil)
1,306✔
331
        }
1,306✔
332

333
        select {
1,496✔
334
        case c.topologyUpdate <- edge:
1,496✔
335
        case <-c.quit:
×
336
                return ErrChanGraphShuttingDown
×
337
        }
338

339
        return nil
1,496✔
340
}
341

342
// MarkEdgeLive clears an edge from our zombie index, deeming it as live.
343
// If the cache is enabled, the edge will be added back to the graph cache if
344
// we still have a record of this channel in the DB.
345
func (c *ChannelGraph) MarkEdgeLive(chanID uint64) error {
2✔
346
        err := c.V1Store.MarkEdgeLive(chanID)
2✔
347
        if err != nil {
3✔
348
                return err
1✔
349
        }
1✔
350

351
        if c.graphCache != nil {
2✔
352
                // We need to add the channel back into our graph cache,
1✔
353
                // otherwise we won't use it for path finding.
1✔
354
                infos, err := c.V1Store.FetchChanInfos([]uint64{chanID})
1✔
355
                if err != nil {
1✔
356
                        return err
×
357
                }
×
358

359
                if len(infos) == 0 {
2✔
360
                        return nil
1✔
361
                }
1✔
362

363
                info := infos[0]
×
364

×
365
                var policy1, policy2 *models.CachedEdgePolicy
×
366
                if info.Policy1 != nil {
×
367
                        policy1 = models.NewCachedPolicy(info.Policy1)
×
368
                }
×
369
                if info.Policy2 != nil {
×
370
                        policy2 = models.NewCachedPolicy(info.Policy2)
×
371
                }
×
372

373
                c.graphCache.AddChannel(
×
374
                        models.NewCachedEdge(info.Info), policy1, policy2,
×
375
                )
×
376
        }
377

378
        return nil
×
379
}
380

381
// DeleteChannelEdges removes edges with the given channel IDs from the
382
// database and marks them as zombies. This ensures that we're unable to re-add
383
// it to our database once again. If an edge does not exist within the
384
// database, then ErrEdgeNotFound will be returned. If strictZombiePruning is
385
// true, then when we mark these edges as zombies, we'll set up the keys such
386
// that we require the node that failed to send the fresh update to be the one
387
// that resurrects the channel from its zombie state. The markZombie bool
388
// denotes whether to mark the channel as a zombie.
389
func (c *ChannelGraph) DeleteChannelEdges(strictZombiePruning, markZombie bool,
390
        chanIDs ...uint64) error {
147✔
391

147✔
392
        infos, err := c.V1Store.DeleteChannelEdges(
147✔
393
                strictZombiePruning, markZombie, chanIDs...,
147✔
394
        )
147✔
395
        if err != nil {
209✔
396
                return err
62✔
397
        }
62✔
398

399
        if c.graphCache != nil {
170✔
400
                for _, info := range infos {
111✔
401
                        c.graphCache.RemoveChannel(
26✔
402
                                info.NodeKey1Bytes, info.NodeKey2Bytes,
26✔
403
                                info.ChannelID,
26✔
404
                        )
26✔
405
                }
26✔
406
        }
407

408
        return err
85✔
409
}
410

411
// DisconnectBlockAtHeight is used to indicate that the block specified
412
// by the passed height has been disconnected from the main chain. This
413
// will "rewind" the graph back to the height below, deleting channels
414
// that are no longer confirmed from the graph. The prune log will be
415
// set to the last prune height valid for the remaining chain.
416
// Channels that were removed from the graph resulting from the
417
// disconnected block are returned.
418
func (c *ChannelGraph) DisconnectBlockAtHeight(height uint32) (
419
        []*models.ChannelEdgeInfo, error) {
156✔
420

156✔
421
        edges, err := c.V1Store.DisconnectBlockAtHeight(height)
156✔
422
        if err != nil {
156✔
423
                return nil, err
×
424
        }
×
425

426
        if c.graphCache != nil {
312✔
427
                for _, edge := range edges {
260✔
428
                        c.graphCache.RemoveChannel(
104✔
429
                                edge.NodeKey1Bytes, edge.NodeKey2Bytes,
104✔
430
                                edge.ChannelID,
104✔
431
                        )
104✔
432
                }
104✔
433
        }
434

435
        return edges, nil
156✔
436
}
437

438
// PruneGraph prunes newly closed channels from the channel graph in response
439
// to a new block being solved on the network. Any transactions which spend the
440
// funding output of any known channels within he graph will be deleted.
441
// Additionally, the "prune tip", or the last block which has been used to
442
// prune the graph is stored so callers can ensure the graph is fully in sync
443
// with the current UTXO state. A slice of channels that have been closed by
444
// the target block are returned if the function succeeds without error.
445
func (c *ChannelGraph) PruneGraph(spentOutputs []*wire.OutPoint,
446
        blockHash *chainhash.Hash, blockHeight uint32) (
447
        []*models.ChannelEdgeInfo, error) {
241✔
448

241✔
449
        edges, nodes, err := c.V1Store.PruneGraph(
241✔
450
                spentOutputs, blockHash, blockHeight,
241✔
451
        )
241✔
452
        if err != nil {
241✔
453
                return nil, err
×
454
        }
×
455

456
        if c.graphCache != nil {
482✔
457
                for _, edge := range edges {
264✔
458
                        c.graphCache.RemoveChannel(
23✔
459
                                edge.NodeKey1Bytes, edge.NodeKey2Bytes,
23✔
460
                                edge.ChannelID,
23✔
461
                        )
23✔
462
                }
23✔
463

464
                for _, node := range nodes {
301✔
465
                        c.graphCache.RemoveNode(node)
60✔
466
                }
60✔
467

468
                log.Debugf("Pruned graph, cache now has %s",
241✔
469
                        c.graphCache.Stats())
241✔
470
        }
471

472
        if len(edges) != 0 {
262✔
473
                // Notify all currently registered clients of the newly closed
21✔
474
                // channels.
21✔
475
                closeSummaries := createCloseSummaries(
21✔
476
                        blockHeight, edges...,
21✔
477
                )
21✔
478

21✔
479
                select {
21✔
480
                case c.topologyUpdate <- closeSummaries:
21✔
481
                case <-c.quit:
×
482
                        return nil, ErrChanGraphShuttingDown
×
483
                }
484
        }
485

486
        return edges, nil
241✔
487
}
488

489
// PruneGraphNodes is a garbage collection method which attempts to prune out
490
// any nodes from the channel graph that are currently unconnected. This ensure
491
// that we only maintain a graph of reachable nodes. In the event that a pruned
492
// node gains more channels, it will be re-added back to the graph.
493
func (c *ChannelGraph) PruneGraphNodes() error {
26✔
494
        nodes, err := c.V1Store.PruneGraphNodes()
26✔
495
        if err != nil {
26✔
496
                return err
×
497
        }
×
498

499
        if c.graphCache != nil {
52✔
500
                for _, node := range nodes {
33✔
501
                        c.graphCache.RemoveNode(node)
7✔
502
                }
7✔
503
        }
504

505
        return nil
26✔
506
}
507

508
// FilterKnownChanIDs takes a set of channel IDs and return the subset of chan
509
// ID's that we don't know and are not known zombies of the passed set. In other
510
// words, we perform a set difference of our set of chan ID's and the ones
511
// passed in. This method can be used by callers to determine the set of
512
// channels another peer knows of that we don't.
513
func (c *ChannelGraph) FilterKnownChanIDs(chansInfo []ChannelUpdateInfo,
514
        isZombieChan func(time.Time, time.Time) bool) ([]uint64, error) {
126✔
515

126✔
516
        unknown, knownZombies, err := c.V1Store.FilterKnownChanIDs(chansInfo)
126✔
517
        if err != nil {
126✔
518
                return nil, err
×
519
        }
×
520

521
        for _, info := range knownZombies {
182✔
522
                // TODO(ziggie): Make sure that for the strict pruning case we
56✔
523
                // compare the pubkeys and whether the right timestamp is not
56✔
524
                // older than the `ChannelPruneExpiry`.
56✔
525
                //
56✔
526
                // NOTE: The timestamp data has no verification attached to it
56✔
527
                // in the `ReplyChannelRange` msg so we are trusting this data
56✔
528
                // at this point. However it is not critical because we are just
56✔
529
                // removing the channel from the db when the timestamps are more
56✔
530
                // recent. During the querying of the gossip msg verification
56✔
531
                // happens as usual. However we should start punishing peers
56✔
532
                // when they don't provide us honest data ?
56✔
533
                isStillZombie := isZombieChan(
56✔
534
                        info.Node1UpdateTimestamp, info.Node2UpdateTimestamp,
56✔
535
                )
56✔
536

56✔
537
                if isStillZombie {
87✔
538
                        continue
31✔
539
                }
540

541
                // If we have marked it as a zombie but the latest update
542
                // timestamps could bring it back from the dead, then we mark it
543
                // alive, and we let it be added to the set of IDs to query our
544
                // peer for.
545
                err := c.V1Store.MarkEdgeLive(
25✔
546
                        info.ShortChannelID.ToUint64(),
25✔
547
                )
25✔
548
                // Since there is a chance that the edge could have been marked
25✔
549
                // as "live" between the FilterKnownChanIDs call and the
25✔
550
                // MarkEdgeLive call, we ignore the error if the edge is already
25✔
551
                // marked as live.
25✔
552
                if err != nil && !errors.Is(err, ErrZombieEdgeNotFound) {
25✔
553
                        return nil, err
×
554
                }
×
555
        }
556

557
        return unknown, nil
126✔
558
}
559

560
// MarkEdgeZombie attempts to mark a channel identified by its channel ID as a
561
// zombie. This method is used on an ad-hoc basis, when channels need to be
562
// marked as zombies outside the normal pruning cycle.
563
func (c *ChannelGraph) MarkEdgeZombie(chanID uint64,
564
        pubKey1, pubKey2 [33]byte) error {
124✔
565

124✔
566
        err := c.V1Store.MarkEdgeZombie(chanID, pubKey1, pubKey2)
124✔
567
        if err != nil {
124✔
568
                return err
×
569
        }
×
570

571
        if c.graphCache != nil {
248✔
572
                c.graphCache.RemoveChannel(pubKey1, pubKey2, chanID)
124✔
573
        }
124✔
574

575
        return nil
124✔
576
}
577

578
// UpdateEdgePolicy updates the edge routing policy for a single directed edge
579
// within the database for the referenced channel. The `flags` attribute within
580
// the ChannelEdgePolicy determines which of the directed edges are being
581
// updated. If the flag is 1, then the first node's information is being
582
// updated, otherwise it's the second node's information. The node ordering is
583
// determined by the lexicographical ordering of the identity public keys of the
584
// nodes on either side of the channel.
585
func (c *ChannelGraph) UpdateEdgePolicy(ctx context.Context,
586
        edge *models.ChannelEdgePolicy, op ...batch.SchedulerOption) error {
2,675✔
587

2,675✔
588
        from, to, err := c.V1Store.UpdateEdgePolicy(ctx, edge, op...)
2,675✔
589
        if err != nil {
2,680✔
590
                return err
5✔
591
        }
5✔
592

593
        if c.graphCache != nil {
4,955✔
594
                c.graphCache.UpdatePolicy(
2,285✔
595
                        models.NewCachedPolicy(edge), from, to,
2,285✔
596
                )
2,285✔
597
        }
2,285✔
598

599
        select {
2,670✔
600
        case c.topologyUpdate <- edge:
2,670✔
601
        case <-c.quit:
×
602
                return ErrChanGraphShuttingDown
×
603
        }
604

605
        return nil
2,670✔
606
}
607

608
// MakeTestGraph creates a new instance of the ChannelGraph for testing
609
// purposes. The backing V1Store implementation depends on the version of
610
// NewTestDB included in the current build.
611
//
612
// NOTE: this is currently unused, but is left here for future use to show how
613
// NewTestDB can be used. As the SQL implementation of the V1Store is
614
// implemented, unit tests will be switched to use this function instead of
615
// the existing MakeTestGraph helper. Once only this function is used, the
616
// existing MakeTestGraph function will be removed and this one will be renamed.
617
func MakeTestGraph(t testing.TB,
618
        opts ...ChanGraphOption) *ChannelGraph {
169✔
619

169✔
620
        t.Helper()
169✔
621

169✔
622
        store := NewTestDB(t)
169✔
623

169✔
624
        graph, err := NewChannelGraph(store, opts...)
169✔
625
        require.NoError(t, err)
169✔
626
        require.NoError(t, graph.Start())
169✔
627

169✔
628
        t.Cleanup(func() {
338✔
629
                require.NoError(t, graph.Stop())
169✔
630
        })
169✔
631

632
        return graph
169✔
633
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc