• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 15591525289

11 Jun 2025 05:26PM UTC coverage: 67.412% (+9.1%) from 58.306%
15591525289

Pull #9932

github

web-flow
Merge 0149d1bb0 into 92a5d35cf
Pull Request #9932: [draft] graph/db+sqldb: graph store SQL implementation + migration

19 of 3311 new or added lines in 7 files covered. (0.57%)

573 existing lines in 10 files now uncovered.

134443 of 199434 relevant lines covered (67.41%)

21909.5 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

87.87
/graph/db/graph.go
1
package graphdb
2

3
import (
4
        "errors"
5
        "fmt"
6
        "sync"
7
        "sync/atomic"
8
        "testing"
9
        "time"
10

11
        "github.com/btcsuite/btcd/chaincfg/chainhash"
12
        "github.com/btcsuite/btcd/wire"
13
        "github.com/lightningnetwork/lnd/batch"
14
        "github.com/lightningnetwork/lnd/graph/db/models"
15
        "github.com/lightningnetwork/lnd/lnwire"
16
        "github.com/lightningnetwork/lnd/routing/route"
17
        "github.com/stretchr/testify/require"
18
)
19

20
// ErrChanGraphShuttingDown indicates that the ChannelGraph has shutdown or is
21
// busy shutting down.
22
var ErrChanGraphShuttingDown = fmt.Errorf("ChannelGraph shutting down")
23

24
// ChannelGraph is a layer above the graph's CRUD layer.
25
//
26
// NOTE: currently, this is purely a pass-through layer directly to the backing
27
// KVStore. Upcoming commits will move the graph cache out of the KVStore and
28
// into this layer so that the KVStore is only responsible for CRUD operations.
29
type ChannelGraph struct {
30
        started atomic.Bool
31
        stopped atomic.Bool
32

33
        graphCache *GraphCache
34

35
        V1Store
36
        *topologyManager
37

38
        quit chan struct{}
39
        wg   sync.WaitGroup
40
}
41

42
// NewChannelGraph creates a new ChannelGraph instance with the given backend.
43
func NewChannelGraph(v1Store V1Store,
44
        options ...ChanGraphOption) (*ChannelGraph, error) {
173✔
45

173✔
46
        opts := defaultChanGraphOptions()
173✔
47
        for _, o := range options {
267✔
48
                o(opts)
94✔
49
        }
94✔
50

51
        g := &ChannelGraph{
173✔
52
                V1Store:         v1Store,
173✔
53
                topologyManager: newTopologyManager(),
173✔
54
                quit:            make(chan struct{}),
173✔
55
        }
173✔
56

173✔
57
        // The graph cache can be turned off (e.g. for mobile users) for a
173✔
58
        // speed/memory usage tradeoff.
173✔
59
        if opts.useGraphCache {
313✔
60
                g.graphCache = NewGraphCache(opts.preAllocCacheNumNodes)
140✔
61
        }
140✔
62

63
        return g, nil
173✔
64
}
65

66
// Start kicks off any goroutines required for the ChannelGraph to function.
67
// If the graph cache is enabled, then it will be populated with the contents of
68
// the database.
69
func (c *ChannelGraph) Start() error {
286✔
70
        if !c.started.CompareAndSwap(false, true) {
399✔
71
                return nil
113✔
72
        }
113✔
73
        log.Debugf("ChannelGraph starting")
173✔
74
        defer log.Debug("ChannelGraph started")
173✔
75

173✔
76
        if c.graphCache != nil {
313✔
77
                if err := c.populateCache(); err != nil {
140✔
78
                        return fmt.Errorf("could not populate the graph "+
×
79
                                "cache: %w", err)
×
80
                }
×
81
        }
82

83
        c.wg.Add(1)
173✔
84
        go c.handleTopologySubscriptions()
173✔
85

173✔
86
        return nil
173✔
87
}
88

89
// Stop signals any active goroutines for a graceful closure.
90
func (c *ChannelGraph) Stop() error {
286✔
91
        if !c.stopped.CompareAndSwap(false, true) {
399✔
92
                return nil
113✔
93
        }
113✔
94

95
        log.Debugf("ChannelGraph shutting down...")
173✔
96
        defer log.Debug("ChannelGraph shutdown complete")
173✔
97

173✔
98
        close(c.quit)
173✔
99
        c.wg.Wait()
173✔
100

173✔
101
        return nil
173✔
102
}
103

104
// handleTopologySubscriptions ensures that topology client subscriptions,
105
// subscription cancellations and topology notifications are handled
106
// synchronously.
107
//
108
// NOTE: this MUST be run in a goroutine.
109
func (c *ChannelGraph) handleTopologySubscriptions() {
173✔
110
        defer c.wg.Done()
173✔
111

173✔
112
        for {
5,220✔
113
                select {
5,047✔
114
                // A new fully validated topology update has just arrived.
115
                // We'll notify any registered clients.
116
                case update := <-c.topologyUpdate:
4,872✔
117
                        // TODO(elle): change topology handling to be handled
4,872✔
118
                        // synchronously so that we can guarantee the order of
4,872✔
119
                        // notification delivery.
4,872✔
120
                        c.wg.Add(1)
4,872✔
121
                        go c.handleTopologyUpdate(update)
4,872✔
122

123
                        // TODO(roasbeef): remove all unconnected vertexes
124
                        // after N blocks pass with no corresponding
125
                        // announcements.
126

127
                // A new notification client update has arrived. We're either
128
                // gaining a new client, or cancelling notifications for an
129
                // existing client.
130
                case ntfnUpdate := <-c.ntfnClientUpdates:
8✔
131
                        clientID := ntfnUpdate.clientID
8✔
132

8✔
133
                        if ntfnUpdate.cancel {
12✔
134
                                client, ok := c.topologyClients.LoadAndDelete(
4✔
135
                                        clientID,
4✔
136
                                )
4✔
137
                                if ok {
8✔
138
                                        close(client.exit)
4✔
139
                                        client.wg.Wait()
4✔
140

4✔
141
                                        close(client.ntfnChan)
4✔
142
                                }
4✔
143

144
                                continue
4✔
145
                        }
146

147
                        c.topologyClients.Store(clientID, &topologyClient{
7✔
148
                                ntfnChan: ntfnUpdate.ntfnChan,
7✔
149
                                exit:     make(chan struct{}),
7✔
150
                        })
7✔
151

152
                case <-c.quit:
173✔
153
                        return
173✔
154
                }
155
        }
156
}
157

158
// populateCache loads the entire channel graph into the in-memory graph cache.
159
//
160
// NOTE: This should only be called if the graphCache has been constructed.
161
func (c *ChannelGraph) populateCache() error {
140✔
162
        startTime := time.Now()
140✔
163
        log.Info("Populating in-memory channel graph, this might take a " +
140✔
164
                "while...")
140✔
165

140✔
166
        err := c.V1Store.ForEachNodeCacheable(func(node route.Vertex,
140✔
167
                features *lnwire.FeatureVector) error {
243✔
168

103✔
169
                c.graphCache.AddNodeFeatures(node, features)
103✔
170

103✔
171
                return nil
103✔
172
        })
103✔
173
        if err != nil {
140✔
174
                return err
×
175
        }
×
176

177
        err = c.V1Store.ForEachChannelCacheable(
140✔
178
                func(info *models.CachedEdgeInfo,
140✔
179
                        policy1, policy2 *models.CachedEdgePolicy) error {
539✔
180

399✔
181
                        c.graphCache.AddChannel(info, policy1, policy2)
399✔
182

399✔
183
                        return nil
399✔
184
                })
399✔
185
        if err != nil {
140✔
186
                return err
×
UNCOV
187
        }
×
188

189
        log.Infof("Finished populating in-memory channel graph (took %v, %s)",
140✔
190
                time.Since(startTime), c.graphCache.Stats())
140✔
191

140✔
192
        return nil
140✔
193
}
194

195
// ForEachNodeDirectedChannel iterates through all channels of a given node,
196
// executing the passed callback on the directed edge representing the channel
197
// and its incoming policy. If the callback returns an error, then the iteration
198
// is halted with the error propagated back up to the caller. If the graphCache
199
// is available, then it will be used to retrieve the node's channels instead
200
// of the database.
201
//
202
// Unknown policies are passed into the callback as nil values.
203
//
204
// NOTE: this is part of the graphdb.NodeTraverser interface.
205
func (c *ChannelGraph) ForEachNodeDirectedChannel(node route.Vertex,
206
        cb func(channel *DirectedChannel) error) error {
506✔
207

506✔
208
        if c.graphCache != nil {
1,009✔
209
                return c.graphCache.ForEachChannel(node, cb)
503✔
210
        }
503✔
211

212
        return c.V1Store.ForEachNodeDirectedChannel(node, cb)
6✔
213
}
214

215
// FetchNodeFeatures returns the features of the given node. If no features are
216
// known for the node, an empty feature vector is returned.
217
// If the graphCache is available, then it will be used to retrieve the node's
218
// features instead of the database.
219
//
220
// NOTE: this is part of the graphdb.NodeTraverser interface.
221
func (c *ChannelGraph) FetchNodeFeatures(node route.Vertex) (
222
        *lnwire.FeatureVector, error) {
465✔
223

465✔
224
        if c.graphCache != nil {
930✔
225
                return c.graphCache.GetFeatures(node), nil
465✔
226
        }
465✔
227

228
        return c.V1Store.FetchNodeFeatures(node)
3✔
229
}
230

231
// GraphSession will provide the call-back with access to a NodeTraverser
232
// instance which can be used to perform queries against the channel graph. If
233
// the graph cache is not enabled, then the call-back will be provided with
234
// access to the graph via a consistent read-only transaction.
235
func (c *ChannelGraph) GraphSession(cb func(graph NodeTraverser) error) error {
136✔
236
        if c.graphCache != nil {
218✔
237
                return cb(c)
82✔
238
        }
82✔
239

240
        return c.V1Store.GraphSession(cb)
54✔
241
}
242

243
// ForEachNodeCached iterates through all the stored vertices/nodes in the
244
// graph, executing the passed callback with each node encountered.
245
//
246
// NOTE: The callback contents MUST not be modified.
247
func (c *ChannelGraph) ForEachNodeCached(cb func(node route.Vertex,
248
        chans map[uint64]*DirectedChannel) error) error {
1✔
249

1✔
250
        if c.graphCache != nil {
1✔
251
                return c.graphCache.ForEachNode(cb)
×
UNCOV
252
        }
×
253

254
        return c.V1Store.ForEachNodeCached(cb)
1✔
255
}
256

257
// AddLightningNode adds a vertex/node to the graph database. If the node is not
258
// in the database from before, this will add a new, unconnected one to the
259
// graph. If it is present from before, this will update that node's
260
// information. Note that this method is expected to only be called to update an
261
// already present node from a node announcement, or to insert a node found in a
262
// channel update.
263
func (c *ChannelGraph) AddLightningNode(node *models.LightningNode,
264
        op ...batch.SchedulerOption) error {
715✔
265

715✔
266
        err := c.V1Store.AddLightningNode(node, op...)
715✔
267
        if err != nil {
715✔
268
                return err
×
UNCOV
269
        }
×
270

271
        if c.graphCache != nil {
1,276✔
272
                c.graphCache.AddNodeFeatures(
561✔
273
                        node.PubKeyBytes, node.Features,
561✔
274
                )
561✔
275
        }
561✔
276

277
        select {
715✔
278
        case c.topologyUpdate <- node:
715✔
279
        case <-c.quit:
×
UNCOV
280
                return ErrChanGraphShuttingDown
×
281
        }
282

283
        return nil
715✔
284
}
285

286
// DeleteLightningNode starts a new database transaction to remove a vertex/node
287
// from the database according to the node's public key.
288
func (c *ChannelGraph) DeleteLightningNode(nodePub route.Vertex) error {
4✔
289
        err := c.V1Store.DeleteLightningNode(nodePub)
4✔
290
        if err != nil {
5✔
291
                return err
1✔
292
        }
1✔
293

294
        if c.graphCache != nil {
6✔
295
                c.graphCache.RemoveNode(nodePub)
3✔
296
        }
3✔
297

298
        return nil
3✔
299
}
300

301
// AddChannelEdge adds a new (undirected, blank) edge to the graph database. An
302
// undirected edge from the two target nodes are created. The information stored
303
// denotes the static attributes of the channel, such as the channelID, the keys
304
// involved in creation of the channel, and the set of features that the channel
305
// supports. The chanPoint and chanID are used to uniquely identify the edge
306
// globally within the database.
307
func (c *ChannelGraph) AddChannelEdge(edge *models.ChannelEdgeInfo,
308
        op ...batch.SchedulerOption) error {
1,720✔
309

1,720✔
310
        err := c.V1Store.AddChannelEdge(edge, op...)
1,720✔
311
        if err != nil {
1,957✔
312
                return err
237✔
313
        }
237✔
314

315
        if c.graphCache != nil {
2,776✔
316
                c.graphCache.AddChannel(models.NewCachedEdge(edge), nil, nil)
1,293✔
317
        }
1,293✔
318

319
        select {
1,483✔
320
        case c.topologyUpdate <- edge:
1,483✔
321
        case <-c.quit:
×
UNCOV
322
                return ErrChanGraphShuttingDown
×
323
        }
324

325
        return nil
1,483✔
326
}
327

328
// MarkEdgeLive clears an edge from our zombie index, deeming it as live.
329
// If the cache is enabled, the edge will be added back to the graph cache if
330
// we still have a record of this channel in the DB.
331
func (c *ChannelGraph) MarkEdgeLive(chanID uint64) error {
2✔
332
        err := c.V1Store.MarkEdgeLive(chanID)
2✔
333
        if err != nil {
3✔
334
                return err
1✔
335
        }
1✔
336

337
        if c.graphCache != nil {
2✔
338
                // We need to add the channel back into our graph cache,
1✔
339
                // otherwise we won't use it for path finding.
1✔
340
                infos, err := c.V1Store.FetchChanInfos([]uint64{chanID})
1✔
341
                if err != nil {
1✔
342
                        return err
×
UNCOV
343
                }
×
344

345
                if len(infos) == 0 {
2✔
346
                        return nil
1✔
347
                }
1✔
348

349
                info := infos[0]
×
350

×
UNCOV
351
                c.graphCache.AddChannel(
×
UNCOV
352
                        models.NewCachedEdge(info.Info),
×
353
                        models.NewCachedPolicy(info.Policy1),
×
UNCOV
354
                        models.NewCachedPolicy(info.Policy2),
×
UNCOV
355
                )
×
356
        }
357

UNCOV
358
        return nil
×
359
}
360

361
// DeleteChannelEdges removes edges with the given channel IDs from the
362
// database and marks them as zombies. This ensures that we're unable to re-add
363
// it to our database once again. If an edge does not exist within the
364
// database, then ErrEdgeNotFound will be returned. If strictZombiePruning is
365
// true, then when we mark these edges as zombies, we'll set up the keys such
366
// that we require the node that failed to send the fresh update to be the one
367
// that resurrects the channel from its zombie state. The markZombie bool
368
// denotes whether to mark the channel as a zombie.
369
func (c *ChannelGraph) DeleteChannelEdges(strictZombiePruning, markZombie bool,
370
        chanIDs ...uint64) error {
142✔
371

142✔
372
        infos, err := c.V1Store.DeleteChannelEdges(
142✔
373
                strictZombiePruning, markZombie, chanIDs...,
142✔
374
        )
142✔
375
        if err != nil {
202✔
376
                return err
60✔
377
        }
60✔
378

379
        if c.graphCache != nil {
164✔
380
                for _, info := range infos {
107✔
381
                        c.graphCache.RemoveChannel(
25✔
382
                                info.NodeKey1Bytes, info.NodeKey2Bytes,
25✔
383
                                info.ChannelID,
25✔
384
                        )
25✔
385
                }
25✔
386
        }
387

388
        return err
82✔
389
}
390

391
// DisconnectBlockAtHeight is used to indicate that the block specified
392
// by the passed height has been disconnected from the main chain. This
393
// will "rewind" the graph back to the height below, deleting channels
394
// that are no longer confirmed from the graph. The prune log will be
395
// set to the last prune height valid for the remaining chain.
396
// Channels that were removed from the graph resulting from the
397
// disconnected block are returned.
398
func (c *ChannelGraph) DisconnectBlockAtHeight(height uint32) (
399
        []*models.ChannelEdgeInfo, error) {
160✔
400

160✔
401
        edges, err := c.V1Store.DisconnectBlockAtHeight(height)
160✔
402
        if err != nil {
160✔
UNCOV
403
                return nil, err
×
UNCOV
404
        }
×
405

406
        if c.graphCache != nil {
320✔
407
                for _, edge := range edges {
254✔
408
                        c.graphCache.RemoveChannel(
94✔
409
                                edge.NodeKey1Bytes, edge.NodeKey2Bytes,
94✔
410
                                edge.ChannelID,
94✔
411
                        )
94✔
412
                }
94✔
413
        }
414

415
        return edges, nil
160✔
416
}
417

418
// PruneGraph prunes newly closed channels from the channel graph in response
419
// to a new block being solved on the network. Any transactions which spend the
420
// funding output of any known channels within he graph will be deleted.
421
// Additionally, the "prune tip", or the last block which has been used to
422
// prune the graph is stored so callers can ensure the graph is fully in sync
423
// with the current UTXO state. A slice of channels that have been closed by
424
// the target block are returned if the function succeeds without error.
425
func (c *ChannelGraph) PruneGraph(spentOutputs []*wire.OutPoint,
426
        blockHash *chainhash.Hash, blockHeight uint32) (
427
        []*models.ChannelEdgeInfo, error) {
239✔
428

239✔
429
        edges, nodes, err := c.V1Store.PruneGraph(
239✔
430
                spentOutputs, blockHash, blockHeight,
239✔
431
        )
239✔
432
        if err != nil {
239✔
UNCOV
433
                return nil, err
×
UNCOV
434
        }
×
435

436
        if c.graphCache != nil {
478✔
437
                for _, edge := range edges {
261✔
438
                        c.graphCache.RemoveChannel(
22✔
439
                                edge.NodeKey1Bytes, edge.NodeKey2Bytes,
22✔
440
                                edge.ChannelID,
22✔
441
                        )
22✔
442
                }
22✔
443

444
                for _, node := range nodes {
293✔
445
                        c.graphCache.RemoveNode(node)
54✔
446
                }
54✔
447

448
                log.Debugf("Pruned graph, cache now has %s",
239✔
449
                        c.graphCache.Stats())
239✔
450
        }
451

452
        if len(edges) != 0 {
258✔
453
                // Notify all currently registered clients of the newly closed
19✔
454
                // channels.
19✔
455
                closeSummaries := createCloseSummaries(
19✔
456
                        blockHeight, edges...,
19✔
457
                )
19✔
458

19✔
459
                select {
19✔
460
                case c.topologyUpdate <- closeSummaries:
19✔
UNCOV
461
                case <-c.quit:
×
UNCOV
462
                        return nil, ErrChanGraphShuttingDown
×
463
                }
464
        }
465

466
        return edges, nil
239✔
467
}
468

469
// PruneGraphNodes is a garbage collection method which attempts to prune out
470
// any nodes from the channel graph that are currently unconnected. This ensure
471
// that we only maintain a graph of reachable nodes. In the event that a pruned
472
// node gains more channels, it will be re-added back to the graph.
473
func (c *ChannelGraph) PruneGraphNodes() error {
26✔
474
        nodes, err := c.V1Store.PruneGraphNodes()
26✔
475
        if err != nil {
26✔
476
                return err
×
477
        }
×
478

479
        if c.graphCache != nil {
52✔
480
                for _, node := range nodes {
33✔
481
                        c.graphCache.RemoveNode(node)
7✔
482
                }
7✔
483
        }
484

485
        return nil
26✔
486
}
487

488
// FilterKnownChanIDs takes a set of channel IDs and return the subset of chan
489
// ID's that we don't know and are not known zombies of the passed set. In other
490
// words, we perform a set difference of our set of chan ID's and the ones
491
// passed in. This method can be used by callers to determine the set of
492
// channels another peer knows of that we don't.
493
func (c *ChannelGraph) FilterKnownChanIDs(chansInfo []ChannelUpdateInfo,
494
        isZombieChan func(time.Time, time.Time) bool) ([]uint64, error) {
126✔
495

126✔
496
        unknown, knownZombies, err := c.V1Store.FilterKnownChanIDs(chansInfo)
126✔
497
        if err != nil {
126✔
498
                return nil, err
×
499
        }
×
500

501
        for _, info := range knownZombies {
177✔
502
                // TODO(ziggie): Make sure that for the strict pruning case we
51✔
503
                // compare the pubkeys and whether the right timestamp is not
51✔
504
                // older than the `ChannelPruneExpiry`.
51✔
505
                //
51✔
506
                // NOTE: The timestamp data has no verification attached to it
51✔
507
                // in the `ReplyChannelRange` msg so we are trusting this data
51✔
508
                // at this point. However it is not critical because we are just
51✔
509
                // removing the channel from the db when the timestamps are more
51✔
510
                // recent. During the querying of the gossip msg verification
51✔
511
                // happens as usual. However we should start punishing peers
51✔
512
                // when they don't provide us honest data ?
51✔
513
                isStillZombie := isZombieChan(
51✔
514
                        info.Node1UpdateTimestamp, info.Node2UpdateTimestamp,
51✔
515
                )
51✔
516

51✔
517
                if isStillZombie {
77✔
518
                        continue
26✔
519
                }
520

521
                // If we have marked it as a zombie but the latest update
522
                // timestamps could bring it back from the dead, then we mark it
523
                // alive, and we let it be added to the set of IDs to query our
524
                // peer for.
525
                err := c.V1Store.MarkEdgeLive(
25✔
526
                        info.ShortChannelID.ToUint64(),
25✔
527
                )
25✔
528
                // Since there is a chance that the edge could have been marked
25✔
529
                // as "live" between the FilterKnownChanIDs call and the
25✔
530
                // MarkEdgeLive call, we ignore the error if the edge is already
25✔
531
                // marked as live.
25✔
532
                if err != nil && !errors.Is(err, ErrZombieEdgeNotFound) {
25✔
UNCOV
533
                        return nil, err
×
UNCOV
534
                }
×
535
        }
536

537
        return unknown, nil
126✔
538
}
539

540
// MarkEdgeZombie attempts to mark a channel identified by its channel ID as a
541
// zombie. This method is used on an ad-hoc basis, when channels need to be
542
// marked as zombies outside the normal pruning cycle.
543
func (c *ChannelGraph) MarkEdgeZombie(chanID uint64,
544
        pubKey1, pubKey2 [33]byte) error {
134✔
545

134✔
546
        err := c.V1Store.MarkEdgeZombie(chanID, pubKey1, pubKey2)
134✔
547
        if err != nil {
134✔
548
                return err
×
UNCOV
549
        }
×
550

551
        if c.graphCache != nil {
268✔
552
                c.graphCache.RemoveChannel(pubKey1, pubKey2, chanID)
134✔
553
        }
134✔
554

555
        return nil
134✔
556
}
557

558
// UpdateEdgePolicy updates the edge routing policy for a single directed edge
559
// within the database for the referenced channel. The `flags` attribute within
560
// the ChannelEdgePolicy determines which of the directed edges are being
561
// updated. If the flag is 1, then the first node's information is being
562
// updated, otherwise it's the second node's information. The node ordering is
563
// determined by the lexicographical ordering of the identity public keys of the
564
// nodes on either side of the channel.
565
func (c *ChannelGraph) UpdateEdgePolicy(edge *models.ChannelEdgePolicy,
566
        op ...batch.SchedulerOption) error {
2,668✔
567

2,668✔
568
        from, to, err := c.V1Store.UpdateEdgePolicy(edge, op...)
2,668✔
569
        if err != nil {
2,672✔
570
                return err
4✔
571
        }
4✔
572

573
        if c.graphCache != nil {
4,943✔
574
                c.graphCache.UpdatePolicy(
2,279✔
575
                        models.NewCachedPolicy(edge), from, to,
2,279✔
576
                )
2,279✔
577
        }
2,279✔
578

579
        select {
2,664✔
580
        case c.topologyUpdate <- edge:
2,664✔
UNCOV
581
        case <-c.quit:
×
UNCOV
582
                return ErrChanGraphShuttingDown
×
583
        }
584

585
        return nil
2,664✔
586
}
587

588
// MakeTestGraph creates a new instance of the ChannelGraph for testing
589
// purposes. The backing V1Store implementation depends on the version of
590
// NewTestDB included in the current build.
591
//
592
// NOTE: this is currently unused, but is left here for future use to show how
593
// NewTestDB can be used. As the SQL implementation of the V1Store is
594
// implemented, unit tests will be switched to use this function instead of
595
// the existing MakeTestGraph helper. Once only this function is used, the
596
// existing MakeTestGraph function will be removed and this one will be renamed.
597
func MakeTestGraph(t testing.TB,
598
        opts ...ChanGraphOption) *ChannelGraph {
168✔
599

168✔
600
        t.Helper()
168✔
601

168✔
602
        store := NewTestDB(t)
168✔
603

168✔
604
        graph, err := NewChannelGraph(store, opts...)
168✔
605
        require.NoError(t, err)
168✔
606
        require.NoError(t, graph.Start())
168✔
607

168✔
608
        t.Cleanup(func() {
336✔
609
                require.NoError(t, graph.Stop())
168✔
610
        })
168✔
611

612
        return graph
168✔
613
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc