• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 14969904482

12 May 2025 10:28AM UTC coverage: 58.602%. First build
14969904482

Pull #9800

github

web-flow
Merge d1e39a77a into e8ac28067
Pull Request #9800: multi: various test preparations for different graph store impl

0 of 37 new or added lines in 5 files covered. (0.0%)

97486 of 166354 relevant lines covered (58.6%)

1.82 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

64.19
/graph/db/graph.go
1
package graphdb
2

3
import (
4
        "errors"
5
        "fmt"
6
        "sync"
7
        "sync/atomic"
8
        "testing"
9
        "time"
10

11
        "github.com/btcsuite/btcd/chaincfg/chainhash"
12
        "github.com/btcsuite/btcd/wire"
13
        "github.com/lightningnetwork/lnd/batch"
14
        "github.com/lightningnetwork/lnd/graph/db/models"
15
        "github.com/lightningnetwork/lnd/lnwire"
16
        "github.com/lightningnetwork/lnd/routing/route"
17
        "github.com/stretchr/testify/require"
18
)
19

20
// ErrChanGraphShuttingDown indicates that the ChannelGraph has shutdown or is
21
// busy shutting down.
22
var ErrChanGraphShuttingDown = fmt.Errorf("ChannelGraph shutting down")
23

24
// ChannelGraph is a layer above the graph's CRUD layer.
25
//
26
// NOTE: currently, this is purely a pass-through layer directly to the backing
27
// KVStore. Upcoming commits will move the graph cache out of the KVStore and
28
// into this layer so that the KVStore is only responsible for CRUD operations.
29
type ChannelGraph struct {
30
        started atomic.Bool
31
        stopped atomic.Bool
32

33
        // cacheMu guards any writes to the graphCache. It should be held
34
        // across the DB write call and the graphCache update to make the
35
        // two updates as atomic as possible.
36
        cacheMu sync.Mutex
37

38
        graphCache *GraphCache
39

40
        V1Store
41
        *topologyManager
42

43
        quit chan struct{}
44
        wg   sync.WaitGroup
45
}
46

47
// NewChannelGraph creates a new ChannelGraph instance with the given backend.
48
func NewChannelGraph(v1Store V1Store,
49
        options ...ChanGraphOption) (*ChannelGraph, error) {
3✔
50

3✔
51
        opts := defaultChanGraphOptions()
3✔
52
        for _, o := range options {
6✔
53
                o(opts)
3✔
54
        }
3✔
55

56
        g := &ChannelGraph{
3✔
57
                V1Store:         v1Store,
3✔
58
                topologyManager: newTopologyManager(),
3✔
59
                quit:            make(chan struct{}),
3✔
60
        }
3✔
61

3✔
62
        // The graph cache can be turned off (e.g. for mobile users) for a
3✔
63
        // speed/memory usage tradeoff.
3✔
64
        if opts.useGraphCache {
6✔
65
                g.graphCache = NewGraphCache(opts.preAllocCacheNumNodes)
3✔
66
        }
3✔
67

68
        return g, nil
3✔
69
}
70

71
// Start kicks off any goroutines required for the ChannelGraph to function.
72
// If the graph cache is enabled, then it will be populated with the contents of
73
// the database.
74
func (c *ChannelGraph) Start() error {
3✔
75
        if !c.started.CompareAndSwap(false, true) {
3✔
76
                return nil
×
77
        }
×
78
        log.Debugf("ChannelGraph starting")
3✔
79
        defer log.Debug("ChannelGraph started")
3✔
80

3✔
81
        if c.graphCache != nil {
6✔
82
                if err := c.populateCache(); err != nil {
3✔
83
                        return fmt.Errorf("could not populate the graph "+
×
84
                                "cache: %w", err)
×
85
                }
×
86
        }
87

88
        c.wg.Add(1)
3✔
89
        go c.handleTopologySubscriptions()
3✔
90

3✔
91
        return nil
3✔
92
}
93

94
// Stop signals any active goroutines for a graceful closure.
95
func (c *ChannelGraph) Stop() error {
3✔
96
        if !c.stopped.CompareAndSwap(false, true) {
3✔
97
                return nil
×
98
        }
×
99

100
        log.Debugf("ChannelGraph shutting down...")
3✔
101
        defer log.Debug("ChannelGraph shutdown complete")
3✔
102

3✔
103
        close(c.quit)
3✔
104
        c.wg.Wait()
3✔
105

3✔
106
        return nil
3✔
107
}
108

109
// handleTopologySubscriptions ensures that topology client subscriptions,
110
// subscription cancellations and topology notifications are handled
111
// synchronously.
112
//
113
// NOTE: this MUST be run in a goroutine.
114
func (c *ChannelGraph) handleTopologySubscriptions() {
3✔
115
        defer c.wg.Done()
3✔
116

3✔
117
        for {
6✔
118
                select {
3✔
119
                // A new fully validated topology update has just arrived.
120
                // We'll notify any registered clients.
121
                case update := <-c.topologyUpdate:
3✔
122
                        // TODO(elle): change topology handling to be handled
3✔
123
                        // synchronously so that we can guarantee the order of
3✔
124
                        // notification delivery.
3✔
125
                        c.wg.Add(1)
3✔
126
                        go c.handleTopologyUpdate(update)
3✔
127

128
                        // TODO(roasbeef): remove all unconnected vertexes
129
                        // after N blocks pass with no corresponding
130
                        // announcements.
131

132
                // A new notification client update has arrived. We're either
133
                // gaining a new client, or cancelling notifications for an
134
                // existing client.
135
                case ntfnUpdate := <-c.ntfnClientUpdates:
3✔
136
                        clientID := ntfnUpdate.clientID
3✔
137

3✔
138
                        if ntfnUpdate.cancel {
6✔
139
                                client, ok := c.topologyClients.LoadAndDelete(
3✔
140
                                        clientID,
3✔
141
                                )
3✔
142
                                if ok {
6✔
143
                                        close(client.exit)
3✔
144
                                        client.wg.Wait()
3✔
145

3✔
146
                                        close(client.ntfnChan)
3✔
147
                                }
3✔
148

149
                                continue
3✔
150
                        }
151

152
                        c.topologyClients.Store(clientID, &topologyClient{
3✔
153
                                ntfnChan: ntfnUpdate.ntfnChan,
3✔
154
                                exit:     make(chan struct{}),
3✔
155
                        })
3✔
156

157
                case <-c.quit:
3✔
158
                        return
3✔
159
                }
160
        }
161
}
162

163
// populateCache loads the entire channel graph into the in-memory graph cache.
164
//
165
// NOTE: This should only be called if the graphCache has been constructed.
166
func (c *ChannelGraph) populateCache() error {
3✔
167
        startTime := time.Now()
3✔
168
        log.Info("Populating in-memory channel graph, this might take a " +
3✔
169
                "while...")
3✔
170

3✔
171
        err := c.V1Store.ForEachNodeCacheable(func(node route.Vertex,
3✔
172
                features *lnwire.FeatureVector) error {
6✔
173

3✔
174
                c.graphCache.AddNodeFeatures(node, features)
3✔
175

3✔
176
                return nil
3✔
177
        })
3✔
178
        if err != nil {
3✔
179
                return err
×
180
        }
×
181

182
        err = c.V1Store.ForEachChannel(func(info *models.ChannelEdgeInfo,
3✔
183
                policy1, policy2 *models.ChannelEdgePolicy) error {
6✔
184

3✔
185
                c.graphCache.AddChannel(info, policy1, policy2)
3✔
186

3✔
187
                return nil
3✔
188
        })
3✔
189
        if err != nil {
3✔
190
                return err
×
191
        }
×
192

193
        log.Infof("Finished populating in-memory channel graph (took %v, %s)",
3✔
194
                time.Since(startTime), c.graphCache.Stats())
3✔
195

3✔
196
        return nil
3✔
197
}
198

199
// ForEachNodeDirectedChannel iterates through all channels of a given node,
200
// executing the passed callback on the directed edge representing the channel
201
// and its incoming policy. If the callback returns an error, then the iteration
202
// is halted with the error propagated back up to the caller. If the graphCache
203
// is available, then it will be used to retrieve the node's channels instead
204
// of the database.
205
//
206
// Unknown policies are passed into the callback as nil values.
207
//
208
// NOTE: this is part of the graphdb.NodeTraverser interface.
209
func (c *ChannelGraph) ForEachNodeDirectedChannel(node route.Vertex,
210
        cb func(channel *DirectedChannel) error) error {
3✔
211

3✔
212
        if c.graphCache != nil {
6✔
213
                return c.graphCache.ForEachChannel(node, cb)
3✔
214
        }
3✔
215

216
        return c.V1Store.ForEachNodeDirectedChannel(node, cb)
3✔
217
}
218

219
// FetchNodeFeatures returns the features of the given node. If no features are
220
// known for the node, an empty feature vector is returned.
221
// If the graphCache is available, then it will be used to retrieve the node's
222
// features instead of the database.
223
//
224
// NOTE: this is part of the graphdb.NodeTraverser interface.
225
func (c *ChannelGraph) FetchNodeFeatures(node route.Vertex) (
226
        *lnwire.FeatureVector, error) {
3✔
227

3✔
228
        if c.graphCache != nil {
6✔
229
                return c.graphCache.GetFeatures(node), nil
3✔
230
        }
3✔
231

232
        return c.V1Store.FetchNodeFeatures(node)
3✔
233
}
234

235
// GraphSession will provide the call-back with access to a NodeTraverser
236
// instance which can be used to perform queries against the channel graph. If
237
// the graph cache is not enabled, then the call-back will be provided with
238
// access to the graph via a consistent read-only transaction.
239
func (c *ChannelGraph) GraphSession(cb func(graph NodeTraverser) error) error {
3✔
240
        if c.graphCache != nil {
6✔
241
                return cb(c)
3✔
242
        }
3✔
243

244
        return c.V1Store.GraphSession(cb)
×
245
}
246

247
// ForEachNodeCached iterates through all the stored vertices/nodes in the
248
// graph, executing the passed callback with each node encountered.
249
//
250
// NOTE: The callback contents MUST not be modified.
251
func (c *ChannelGraph) ForEachNodeCached(cb func(node route.Vertex,
252
        chans map[uint64]*DirectedChannel) error) error {
×
253

×
254
        if c.graphCache != nil {
×
255
                return c.graphCache.ForEachNode(cb)
×
256
        }
×
257

258
        return c.V1Store.ForEachNodeCached(cb)
×
259
}
260

261
// AddLightningNode adds a vertex/node to the graph database. If the node is not
262
// in the database from before, this will add a new, unconnected one to the
263
// graph. If it is present from before, this will update that node's
264
// information. Note that this method is expected to only be called to update an
265
// already present node from a node announcement, or to insert a node found in a
266
// channel update.
267
func (c *ChannelGraph) AddLightningNode(node *models.LightningNode,
268
        op ...batch.SchedulerOption) error {
3✔
269

3✔
270
        c.cacheMu.Lock()
3✔
271
        defer c.cacheMu.Unlock()
3✔
272

3✔
273
        err := c.V1Store.AddLightningNode(node, op...)
3✔
274
        if err != nil {
3✔
275
                return err
×
276
        }
×
277

278
        if c.graphCache != nil {
6✔
279
                c.graphCache.AddNodeFeatures(
3✔
280
                        node.PubKeyBytes, node.Features,
3✔
281
                )
3✔
282
        }
3✔
283

284
        select {
3✔
285
        case c.topologyUpdate <- node:
3✔
286
        case <-c.quit:
×
287
                return ErrChanGraphShuttingDown
×
288
        }
289

290
        return nil
3✔
291
}
292

293
// DeleteLightningNode starts a new database transaction to remove a vertex/node
294
// from the database according to the node's public key.
295
func (c *ChannelGraph) DeleteLightningNode(nodePub route.Vertex) error {
×
296
        c.cacheMu.Lock()
×
297
        defer c.cacheMu.Unlock()
×
298

×
299
        err := c.V1Store.DeleteLightningNode(nodePub)
×
300
        if err != nil {
×
301
                return err
×
302
        }
×
303

304
        if c.graphCache != nil {
×
305
                c.graphCache.RemoveNode(nodePub)
×
306
        }
×
307

308
        return nil
×
309
}
310

311
// AddChannelEdge adds a new (undirected, blank) edge to the graph database. An
312
// undirected edge from the two target nodes are created. The information stored
313
// denotes the static attributes of the channel, such as the channelID, the keys
314
// involved in creation of the channel, and the set of features that the channel
315
// supports. The chanPoint and chanID are used to uniquely identify the edge
316
// globally within the database.
317
func (c *ChannelGraph) AddChannelEdge(edge *models.ChannelEdgeInfo,
318
        op ...batch.SchedulerOption) error {
3✔
319

3✔
320
        c.cacheMu.Lock()
3✔
321
        defer c.cacheMu.Unlock()
3✔
322

3✔
323
        err := c.V1Store.AddChannelEdge(edge, op...)
3✔
324
        if err != nil {
3✔
325
                return err
×
326
        }
×
327

328
        if c.graphCache != nil {
6✔
329
                c.graphCache.AddChannel(edge, nil, nil)
3✔
330
        }
3✔
331

332
        select {
3✔
333
        case c.topologyUpdate <- edge:
3✔
334
        case <-c.quit:
×
335
                return ErrChanGraphShuttingDown
×
336
        }
337

338
        return nil
3✔
339
}
340

341
// MarkEdgeLive clears an edge from our zombie index, deeming it as live.
342
// If the cache is enabled, the edge will be added back to the graph cache if
343
// we still have a record of this channel in the DB.
344
func (c *ChannelGraph) MarkEdgeLive(chanID uint64) error {
×
345
        c.cacheMu.Lock()
×
346
        defer c.cacheMu.Unlock()
×
347

×
348
        err := c.V1Store.MarkEdgeLive(chanID)
×
349
        if err != nil {
×
350
                return err
×
351
        }
×
352

353
        if c.graphCache != nil {
×
354
                // We need to add the channel back into our graph cache,
×
355
                // otherwise we won't use it for path finding.
×
356
                infos, err := c.V1Store.FetchChanInfos([]uint64{chanID})
×
357
                if err != nil {
×
358
                        return err
×
359
                }
×
360

361
                if len(infos) == 0 {
×
362
                        return nil
×
363
                }
×
364

365
                info := infos[0]
×
366

×
367
                c.graphCache.AddChannel(info.Info, info.Policy1, info.Policy2)
×
368
        }
369

370
        return nil
×
371
}
372

373
// DeleteChannelEdges removes edges with the given channel IDs from the
374
// database and marks them as zombies. This ensures that we're unable to re-add
375
// it to our database once again. If an edge does not exist within the
376
// database, then ErrEdgeNotFound will be returned. If strictZombiePruning is
377
// true, then when we mark these edges as zombies, we'll set up the keys such
378
// that we require the node that failed to send the fresh update to be the one
379
// that resurrects the channel from its zombie state. The markZombie bool
380
// denotes whether to mark the channel as a zombie.
381
func (c *ChannelGraph) DeleteChannelEdges(strictZombiePruning, markZombie bool,
382
        chanIDs ...uint64) error {
3✔
383

3✔
384
        c.cacheMu.Lock()
3✔
385
        defer c.cacheMu.Unlock()
3✔
386

3✔
387
        infos, err := c.V1Store.DeleteChannelEdges(
3✔
388
                strictZombiePruning, markZombie, chanIDs...,
3✔
389
        )
3✔
390
        if err != nil {
3✔
391
                return err
×
392
        }
×
393

394
        if c.graphCache != nil {
6✔
395
                for _, info := range infos {
6✔
396
                        c.graphCache.RemoveChannel(
3✔
397
                                info.NodeKey1Bytes, info.NodeKey2Bytes,
3✔
398
                                info.ChannelID,
3✔
399
                        )
3✔
400
                }
3✔
401
        }
402

403
        return err
3✔
404
}
405

406
// DisconnectBlockAtHeight is used to indicate that the block specified
407
// by the passed height has been disconnected from the main chain. This
408
// will "rewind" the graph back to the height below, deleting channels
409
// that are no longer confirmed from the graph. The prune log will be
410
// set to the last prune height valid for the remaining chain.
411
// Channels that were removed from the graph resulting from the
412
// disconnected block are returned.
413
func (c *ChannelGraph) DisconnectBlockAtHeight(height uint32) (
414
        []*models.ChannelEdgeInfo, error) {
2✔
415

2✔
416
        c.cacheMu.Lock()
2✔
417
        defer c.cacheMu.Unlock()
2✔
418

2✔
419
        edges, err := c.V1Store.DisconnectBlockAtHeight(height)
2✔
420
        if err != nil {
2✔
421
                return nil, err
×
422
        }
×
423

424
        if c.graphCache != nil {
4✔
425
                for _, edge := range edges {
4✔
426
                        c.graphCache.RemoveChannel(
2✔
427
                                edge.NodeKey1Bytes, edge.NodeKey2Bytes,
2✔
428
                                edge.ChannelID,
2✔
429
                        )
2✔
430
                }
2✔
431
        }
432

433
        return edges, nil
2✔
434
}
435

436
// PruneGraph prunes newly closed channels from the channel graph in response
437
// to a new block being solved on the network. Any transactions which spend the
438
// funding output of any known channels within he graph will be deleted.
439
// Additionally, the "prune tip", or the last block which has been used to
440
// prune the graph is stored so callers can ensure the graph is fully in sync
441
// with the current UTXO state. A slice of channels that have been closed by
442
// the target block are returned if the function succeeds without error.
443
func (c *ChannelGraph) PruneGraph(spentOutputs []*wire.OutPoint,
444
        blockHash *chainhash.Hash, blockHeight uint32) (
445
        []*models.ChannelEdgeInfo, error) {
3✔
446

3✔
447
        c.cacheMu.Lock()
3✔
448
        defer c.cacheMu.Unlock()
3✔
449

3✔
450
        edges, nodes, err := c.V1Store.PruneGraph(
3✔
451
                spentOutputs, blockHash, blockHeight,
3✔
452
        )
3✔
453
        if err != nil {
3✔
454
                return nil, err
×
455
        }
×
456

457
        if c.graphCache != nil {
6✔
458
                for _, edge := range edges {
6✔
459
                        c.graphCache.RemoveChannel(
3✔
460
                                edge.NodeKey1Bytes, edge.NodeKey2Bytes,
3✔
461
                                edge.ChannelID,
3✔
462
                        )
3✔
463
                }
3✔
464

465
                for _, node := range nodes {
6✔
466
                        c.graphCache.RemoveNode(node)
3✔
467
                }
3✔
468

469
                log.Debugf("Pruned graph, cache now has %s",
3✔
470
                        c.graphCache.Stats())
3✔
471
        }
472

473
        if len(edges) != 0 {
6✔
474
                // Notify all currently registered clients of the newly closed
3✔
475
                // channels.
3✔
476
                closeSummaries := createCloseSummaries(
3✔
477
                        blockHeight, edges...,
3✔
478
                )
3✔
479
                c.notifyTopologyChange(&TopologyChange{
3✔
480
                        ClosedChannels: closeSummaries,
3✔
481
                })
3✔
482
        }
3✔
483

484
        return edges, nil
3✔
485
}
486

487
// PruneGraphNodes is a garbage collection method which attempts to prune out
488
// any nodes from the channel graph that are currently unconnected. This ensure
489
// that we only maintain a graph of reachable nodes. In the event that a pruned
490
// node gains more channels, it will be re-added back to the graph.
491
func (c *ChannelGraph) PruneGraphNodes() error {
3✔
492
        c.cacheMu.Lock()
3✔
493
        defer c.cacheMu.Unlock()
3✔
494

3✔
495
        nodes, err := c.V1Store.PruneGraphNodes()
3✔
496
        if err != nil {
3✔
497
                return err
×
498
        }
×
499

500
        if c.graphCache != nil {
6✔
501
                for _, node := range nodes {
3✔
502
                        c.graphCache.RemoveNode(node)
×
503
                }
×
504
        }
505

506
        return nil
3✔
507
}
508

509
// FilterKnownChanIDs takes a set of channel IDs and return the subset of chan
510
// ID's that we don't know and are not known zombies of the passed set. In other
511
// words, we perform a set difference of our set of chan ID's and the ones
512
// passed in. This method can be used by callers to determine the set of
513
// channels another peer knows of that we don't.
514
func (c *ChannelGraph) FilterKnownChanIDs(chansInfo []ChannelUpdateInfo,
515
        isZombieChan func(time.Time, time.Time) bool) ([]uint64, error) {
3✔
516

3✔
517
        unknown, knownZombies, err := c.V1Store.FilterKnownChanIDs(chansInfo)
3✔
518
        if err != nil {
3✔
519
                return nil, err
×
520
        }
×
521

522
        for _, info := range knownZombies {
3✔
523
                // TODO(ziggie): Make sure that for the strict pruning case we
×
524
                // compare the pubkeys and whether the right timestamp is not
×
525
                // older than the `ChannelPruneExpiry`.
×
526
                //
×
527
                // NOTE: The timestamp data has no verification attached to it
×
528
                // in the `ReplyChannelRange` msg so we are trusting this data
×
529
                // at this point. However it is not critical because we are just
×
530
                // removing the channel from the db when the timestamps are more
×
531
                // recent. During the querying of the gossip msg verification
×
532
                // happens as usual. However we should start punishing peers
×
533
                // when they don't provide us honest data ?
×
534
                isStillZombie := isZombieChan(
×
535
                        info.Node1UpdateTimestamp, info.Node2UpdateTimestamp,
×
536
                )
×
537

×
538
                if isStillZombie {
×
539
                        continue
×
540
                }
541

542
                // If we have marked it as a zombie but the latest update
543
                // timestamps could bring it back from the dead, then we mark it
544
                // alive, and we let it be added to the set of IDs to query our
545
                // peer for.
546
                err := c.V1Store.MarkEdgeLive(
×
547
                        info.ShortChannelID.ToUint64(),
×
548
                )
×
549
                // Since there is a chance that the edge could have been marked
×
550
                // as "live" between the FilterKnownChanIDs call and the
×
551
                // MarkEdgeLive call, we ignore the error if the edge is already
×
552
                // marked as live.
×
553
                if err != nil && !errors.Is(err, ErrZombieEdgeNotFound) {
×
554
                        return nil, err
×
555
                }
×
556
        }
557

558
        return unknown, nil
3✔
559
}
560

561
// MarkEdgeZombie attempts to mark a channel identified by its channel ID as a
562
// zombie. This method is used on an ad-hoc basis, when channels need to be
563
// marked as zombies outside the normal pruning cycle.
564
func (c *ChannelGraph) MarkEdgeZombie(chanID uint64,
565
        pubKey1, pubKey2 [33]byte) error {
×
566

×
567
        c.cacheMu.Lock()
×
568
        defer c.cacheMu.Unlock()
×
569

×
570
        err := c.V1Store.MarkEdgeZombie(chanID, pubKey1, pubKey2)
×
571
        if err != nil {
×
572
                return err
×
573
        }
×
574

575
        if c.graphCache != nil {
×
576
                c.graphCache.RemoveChannel(pubKey1, pubKey2, chanID)
×
577
        }
×
578

579
        return nil
×
580
}
581

582
// UpdateEdgePolicy updates the edge routing policy for a single directed edge
583
// within the database for the referenced channel. The `flags` attribute within
584
// the ChannelEdgePolicy determines which of the directed edges are being
585
// updated. If the flag is 1, then the first node's information is being
586
// updated, otherwise it's the second node's information. The node ordering is
587
// determined by the lexicographical ordering of the identity public keys of the
588
// nodes on either side of the channel.
589
func (c *ChannelGraph) UpdateEdgePolicy(edge *models.ChannelEdgePolicy,
590
        op ...batch.SchedulerOption) error {
3✔
591

3✔
592
        c.cacheMu.Lock()
3✔
593
        defer c.cacheMu.Unlock()
3✔
594

3✔
595
        from, to, err := c.V1Store.UpdateEdgePolicy(edge, op...)
3✔
596
        if err != nil {
3✔
597
                return err
×
598
        }
×
599

600
        if c.graphCache != nil {
6✔
601
                var isUpdate1 bool
3✔
602
                if edge.ChannelFlags&lnwire.ChanUpdateDirection == 0 {
6✔
603
                        isUpdate1 = true
3✔
604
                }
3✔
605

606
                c.graphCache.UpdatePolicy(edge, from, to, isUpdate1)
3✔
607
        }
608

609
        select {
3✔
610
        case c.topologyUpdate <- edge:
3✔
611
        case <-c.quit:
×
612
                return ErrChanGraphShuttingDown
×
613
        }
614

615
        return nil
3✔
616
}
617

618
// MakeTestGraphNew creates a new instance of the ChannelGraph for testing
619
// purposes. The backing V1Store implementation depends on the version of
620
// NewTestDB included in the current build.
621
//
622
// NOTE: this is currently unused, but is left here for future use to show how
623
// NewTestDB can be used. As the SQL implementation of the V1Store is
624
// implemented, unit tests will be switched to use this function instead of
625
// the existing MakeTestGraph helper. Once only this function is used, the
626
// existing MakeTestGraph function will be removed and this one will be renamed.
627
func MakeTestGraphNew(t testing.TB,
NEW
628
        opts ...ChanGraphOption) *ChannelGraph {
×
NEW
629

×
NEW
630
        t.Helper()
×
NEW
631

×
NEW
632
        store := NewTestDB(t)
×
NEW
633

×
NEW
634
        graph, err := NewChannelGraph(store, opts...)
×
NEW
635
        require.NoError(t, err)
×
NEW
636
        require.NoError(t, graph.Start())
×
NEW
637

×
NEW
638
        t.Cleanup(func() {
×
NEW
639
                require.NoError(t, graph.Stop())
×
NEW
640
        })
×
641

NEW
642
        return graph
×
643
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc