• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 14930876590

09 May 2025 02:12PM UTC coverage: 68.56%. First build
14930876590

Pull #9800

github

web-flow
Merge b16ab27fd into e8ac28067
Pull Request #9800: multi: various test preparations for different graph store impl

22 of 42 new or added lines in 7 files covered. (52.38%)

133130 of 194180 relevant lines covered (68.56%)

22129.8 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

85.95
/graph/db/graph.go
1
package graphdb
2

3
import (
4
        "errors"
5
        "fmt"
6
        "sync"
7
        "sync/atomic"
8
        "testing"
9
        "time"
10

11
        "github.com/btcsuite/btcd/chaincfg/chainhash"
12
        "github.com/btcsuite/btcd/wire"
13
        "github.com/lightningnetwork/lnd/batch"
14
        "github.com/lightningnetwork/lnd/graph/db/models"
15
        "github.com/lightningnetwork/lnd/lnwire"
16
        "github.com/lightningnetwork/lnd/routing/route"
17
        "github.com/stretchr/testify/require"
18
)
19

20
// ErrChanGraphShuttingDown indicates that the ChannelGraph has shutdown or is
21
// busy shutting down.
22
var ErrChanGraphShuttingDown = fmt.Errorf("ChannelGraph shutting down")
23

24
// ChannelGraph is a layer above the graph's CRUD layer.
25
//
26
// NOTE: currently, this is purely a pass-through layer directly to the backing
27
// KVStore. Upcoming commits will move the graph cache out of the KVStore and
28
// into this layer so that the KVStore is only responsible for CRUD operations.
29
type ChannelGraph struct {
30
        started atomic.Bool
31
        stopped atomic.Bool
32

33
        // cacheMu guards any writes to the graphCache. It should be held
34
        // across the DB write call and the graphCache update to make the
35
        // two updates as atomic as possible.
36
        cacheMu sync.Mutex
37

38
        graphCache *GraphCache
39

40
        V1Store
41
        *topologyManager
42

43
        quit chan struct{}
44
        wg   sync.WaitGroup
45
}
46

47
// NewChannelGraph creates a new ChannelGraph instance with the given backend.
48
func NewChannelGraph(v1Store V1Store,
49
        options ...ChanGraphOption) (*ChannelGraph, error) {
171✔
50

171✔
51
        opts := defaultChanGraphOptions()
171✔
52
        for _, o := range options {
263✔
53
                o(opts)
92✔
54
        }
92✔
55

56
        g := &ChannelGraph{
171✔
57
                V1Store:         v1Store,
171✔
58
                topologyManager: newTopologyManager(),
171✔
59
                quit:            make(chan struct{}),
171✔
60
        }
171✔
61

171✔
62
        // The graph cache can be turned off (e.g. for mobile users) for a
171✔
63
        // speed/memory usage tradeoff.
171✔
64
        if opts.useGraphCache {
309✔
65
                g.graphCache = NewGraphCache(opts.preAllocCacheNumNodes)
138✔
66
        }
138✔
67

68
        return g, nil
171✔
69
}
70

71
// Start kicks off any goroutines required for the ChannelGraph to function.
72
// If the graph cache is enabled, then it will be populated with the contents of
73
// the database.
74
func (c *ChannelGraph) Start() error {
284✔
75
        if !c.started.CompareAndSwap(false, true) {
397✔
76
                return nil
113✔
77
        }
113✔
78
        log.Debugf("ChannelGraph starting")
171✔
79
        defer log.Debug("ChannelGraph started")
171✔
80

171✔
81
        if c.graphCache != nil {
309✔
82
                if err := c.populateCache(); err != nil {
138✔
83
                        return fmt.Errorf("could not populate the graph "+
×
84
                                "cache: %w", err)
×
85
                }
×
86
        }
87

88
        c.wg.Add(1)
171✔
89
        go c.handleTopologySubscriptions()
171✔
90

171✔
91
        return nil
171✔
92
}
93

94
// Stop signals any active goroutines for a graceful closure.
95
func (c *ChannelGraph) Stop() error {
284✔
96
        if !c.stopped.CompareAndSwap(false, true) {
397✔
97
                return nil
113✔
98
        }
113✔
99

100
        log.Debugf("ChannelGraph shutting down...")
171✔
101
        defer log.Debug("ChannelGraph shutdown complete")
171✔
102

171✔
103
        close(c.quit)
171✔
104
        c.wg.Wait()
171✔
105

171✔
106
        return nil
171✔
107
}
108

109
// handleTopologySubscriptions ensures that topology client subscriptions,
110
// subscription cancellations and topology notifications are handled
111
// synchronously.
112
//
113
// NOTE: this MUST be run in a goroutine.
114
func (c *ChannelGraph) handleTopologySubscriptions() {
171✔
115
        defer c.wg.Done()
171✔
116

171✔
117
        for {
5,299✔
118
                select {
5,128✔
119
                // A new fully validated topology update has just arrived.
120
                // We'll notify any registered clients.
121
                case update := <-c.topologyUpdate:
4,953✔
122
                        // TODO(elle): change topology handling to be handled
4,953✔
123
                        // synchronously so that we can guarantee the order of
4,953✔
124
                        // notification delivery.
4,953✔
125
                        c.wg.Add(1)
4,953✔
126
                        go c.handleTopologyUpdate(update)
4,953✔
127

128
                        // TODO(roasbeef): remove all unconnected vertexes
129
                        // after N blocks pass with no corresponding
130
                        // announcements.
131

132
                // A new notification client update has arrived. We're either
133
                // gaining a new client, or cancelling notifications for an
134
                // existing client.
135
                case ntfnUpdate := <-c.ntfnClientUpdates:
6✔
136
                        clientID := ntfnUpdate.clientID
6✔
137

6✔
138
                        if ntfnUpdate.cancel {
8✔
139
                                client, ok := c.topologyClients.LoadAndDelete(
2✔
140
                                        clientID,
2✔
141
                                )
2✔
142
                                if ok {
4✔
143
                                        close(client.exit)
2✔
144
                                        client.wg.Wait()
2✔
145

2✔
146
                                        close(client.ntfnChan)
2✔
147
                                }
2✔
148

149
                                continue
2✔
150
                        }
151

152
                        c.topologyClients.Store(clientID, &topologyClient{
5✔
153
                                ntfnChan: ntfnUpdate.ntfnChan,
5✔
154
                                exit:     make(chan struct{}),
5✔
155
                        })
5✔
156

157
                case <-c.quit:
171✔
158
                        return
171✔
159
                }
160
        }
161
}
162

163
// populateCache loads the entire channel graph into the in-memory graph cache.
164
//
165
// NOTE: This should only be called if the graphCache has been constructed.
166
func (c *ChannelGraph) populateCache() error {
138✔
167
        startTime := time.Now()
138✔
168
        log.Info("Populating in-memory channel graph, this might take a " +
138✔
169
                "while...")
138✔
170

138✔
171
        err := c.V1Store.ForEachNodeCacheable(func(node route.Vertex,
138✔
172
                features *lnwire.FeatureVector) error {
239✔
173

101✔
174
                c.graphCache.AddNodeFeatures(node, features)
101✔
175

101✔
176
                return nil
101✔
177
        })
101✔
178
        if err != nil {
138✔
179
                return err
×
180
        }
×
181

182
        err = c.V1Store.ForEachChannel(func(info *models.ChannelEdgeInfo,
138✔
183
                policy1, policy2 *models.ChannelEdgePolicy) error {
535✔
184

397✔
185
                c.graphCache.AddChannel(info, policy1, policy2)
397✔
186

397✔
187
                return nil
397✔
188
        })
397✔
189
        if err != nil {
138✔
190
                return err
×
191
        }
×
192

193
        log.Infof("Finished populating in-memory channel graph (took %v, %s)",
138✔
194
                time.Since(startTime), c.graphCache.Stats())
138✔
195

138✔
196
        return nil
138✔
197
}
198

199
// ForEachNodeDirectedChannel iterates through all channels of a given node,
200
// executing the passed callback on the directed edge representing the channel
201
// and its incoming policy. If the callback returns an error, then the iteration
202
// is halted with the error propagated back up to the caller. If the graphCache
203
// is available, then it will be used to retrieve the node's channels instead
204
// of the database.
205
//
206
// Unknown policies are passed into the callback as nil values.
207
//
208
// NOTE: this is part of the graphdb.NodeTraverser interface.
209
func (c *ChannelGraph) ForEachNodeDirectedChannel(node route.Vertex,
210
        cb func(channel *DirectedChannel) error) error {
485✔
211

485✔
212
        if c.graphCache != nil {
967✔
213
                return c.graphCache.ForEachChannel(node, cb)
482✔
214
        }
482✔
215

216
        return c.V1Store.ForEachNodeDirectedChannel(node, cb)
4✔
217
}
218

219
// FetchNodeFeatures returns the features of the given node. If no features are
220
// known for the node, an empty feature vector is returned.
221
// If the graphCache is available, then it will be used to retrieve the node's
222
// features instead of the database.
223
//
224
// NOTE: this is part of the graphdb.NodeTraverser interface.
225
func (c *ChannelGraph) FetchNodeFeatures(node route.Vertex) (
226
        *lnwire.FeatureVector, error) {
455✔
227

455✔
228
        if c.graphCache != nil {
910✔
229
                return c.graphCache.GetFeatures(node), nil
455✔
230
        }
455✔
231

232
        return c.V1Store.FetchNodeFeatures(node)
1✔
233
}
234

235
// GraphSession will provide the call-back with access to a NodeTraverser
236
// instance which can be used to perform queries against the channel graph. If
237
// the graph cache is not enabled, then the call-back will be provided with
238
// access to the graph via a consistent read-only transaction.
239
func (c *ChannelGraph) GraphSession(cb func(graph NodeTraverser) error) error {
134✔
240
        if c.graphCache != nil {
214✔
241
                return cb(c)
80✔
242
        }
80✔
243

244
        return c.V1Store.GraphSession(cb)
54✔
245
}
246

247
// ForEachNodeCached iterates through all the stored vertices/nodes in the
248
// graph, executing the passed callback with each node encountered.
249
//
250
// NOTE: The callback contents MUST not be modified.
251
func (c *ChannelGraph) ForEachNodeCached(cb func(node route.Vertex,
252
        chans map[uint64]*DirectedChannel) error) error {
1✔
253

1✔
254
        if c.graphCache != nil {
1✔
255
                return c.graphCache.ForEachNode(cb)
×
256
        }
×
257

258
        return c.V1Store.ForEachNodeCached(cb)
1✔
259
}
260

261
// AddLightningNode adds a vertex/node to the graph database. If the node is not
262
// in the database from before, this will add a new, unconnected one to the
263
// graph. If it is present from before, this will update that node's
264
// information. Note that this method is expected to only be called to update an
265
// already present node from a node announcement, or to insert a node found in a
266
// channel update.
267
func (c *ChannelGraph) AddLightningNode(node *models.LightningNode,
268
        op ...batch.SchedulerOption) error {
800✔
269

800✔
270
        c.cacheMu.Lock()
800✔
271
        defer c.cacheMu.Unlock()
800✔
272

800✔
273
        err := c.V1Store.AddLightningNode(node, op...)
800✔
274
        if err != nil {
800✔
275
                return err
×
276
        }
×
277

278
        if c.graphCache != nil {
1,413✔
279
                c.graphCache.AddNodeFeatures(
613✔
280
                        node.PubKeyBytes, node.Features,
613✔
281
                )
613✔
282
        }
613✔
283

284
        select {
800✔
285
        case c.topologyUpdate <- node:
800✔
286
        case <-c.quit:
×
287
                return ErrChanGraphShuttingDown
×
288
        }
289

290
        return nil
800✔
291
}
292

293
// DeleteLightningNode starts a new database transaction to remove a vertex/node
294
// from the database according to the node's public key.
295
func (c *ChannelGraph) DeleteLightningNode(nodePub route.Vertex) error {
3✔
296
        c.cacheMu.Lock()
3✔
297
        defer c.cacheMu.Unlock()
3✔
298

3✔
299
        err := c.V1Store.DeleteLightningNode(nodePub)
3✔
300
        if err != nil {
3✔
301
                return err
×
302
        }
×
303

304
        if c.graphCache != nil {
6✔
305
                c.graphCache.RemoveNode(nodePub)
3✔
306
        }
3✔
307

308
        return nil
3✔
309
}
310

311
// AddChannelEdge adds a new (undirected, blank) edge to the graph database. An
312
// undirected edge from the two target nodes are created. The information stored
313
// denotes the static attributes of the channel, such as the channelID, the keys
314
// involved in creation of the channel, and the set of features that the channel
315
// supports. The chanPoint and chanID are used to uniquely identify the edge
316
// globally within the database.
317
func (c *ChannelGraph) AddChannelEdge(edge *models.ChannelEdgeInfo,
318
        op ...batch.SchedulerOption) error {
1,727✔
319

1,727✔
320
        c.cacheMu.Lock()
1,727✔
321
        defer c.cacheMu.Unlock()
1,727✔
322

1,727✔
323
        err := c.V1Store.AddChannelEdge(edge, op...)
1,727✔
324
        if err != nil {
1,962✔
325
                return err
235✔
326
        }
235✔
327

328
        if c.graphCache != nil {
2,794✔
329
                c.graphCache.AddChannel(edge, nil, nil)
1,302✔
330
        }
1,302✔
331

332
        select {
1,492✔
333
        case c.topologyUpdate <- edge:
1,492✔
334
        case <-c.quit:
×
335
                return ErrChanGraphShuttingDown
×
336
        }
337

338
        return nil
1,492✔
339
}
340

341
// MarkEdgeLive clears an edge from our zombie index, deeming it as live.
342
// If the cache is enabled, the edge will be added back to the graph cache if
343
// we still have a record of this channel in the DB.
344
func (c *ChannelGraph) MarkEdgeLive(chanID uint64) error {
2✔
345
        c.cacheMu.Lock()
2✔
346
        defer c.cacheMu.Unlock()
2✔
347

2✔
348
        err := c.V1Store.MarkEdgeLive(chanID)
2✔
349
        if err != nil {
3✔
350
                return err
1✔
351
        }
1✔
352

353
        if c.graphCache != nil {
2✔
354
                // We need to add the channel back into our graph cache,
1✔
355
                // otherwise we won't use it for path finding.
1✔
356
                infos, err := c.V1Store.FetchChanInfos([]uint64{chanID})
1✔
357
                if err != nil {
1✔
358
                        return err
×
359
                }
×
360

361
                if len(infos) == 0 {
2✔
362
                        return nil
1✔
363
                }
1✔
364

365
                info := infos[0]
×
366

×
367
                c.graphCache.AddChannel(info.Info, info.Policy1, info.Policy2)
×
368
        }
369

370
        return nil
×
371
}
372

373
// DeleteChannelEdges removes edges with the given channel IDs from the
374
// database and marks them as zombies. This ensures that we're unable to re-add
375
// it to our database once again. If an edge does not exist within the
376
// database, then ErrEdgeNotFound will be returned. If strictZombiePruning is
377
// true, then when we mark these edges as zombies, we'll set up the keys such
378
// that we require the node that failed to send the fresh update to be the one
379
// that resurrects the channel from its zombie state. The markZombie bool
380
// denotes whether to mark the channel as a zombie.
381
func (c *ChannelGraph) DeleteChannelEdges(strictZombiePruning, markZombie bool,
382
        chanIDs ...uint64) error {
142✔
383

142✔
384
        c.cacheMu.Lock()
142✔
385
        defer c.cacheMu.Unlock()
142✔
386

142✔
387
        infos, err := c.V1Store.DeleteChannelEdges(
142✔
388
                strictZombiePruning, markZombie, chanIDs...,
142✔
389
        )
142✔
390
        if err != nil {
206✔
391
                return err
64✔
392
        }
64✔
393

394
        if c.graphCache != nil {
156✔
395
                for _, info := range infos {
103✔
396
                        c.graphCache.RemoveChannel(
25✔
397
                                info.NodeKey1Bytes, info.NodeKey2Bytes,
25✔
398
                                info.ChannelID,
25✔
399
                        )
25✔
400
                }
25✔
401
        }
402

403
        return err
78✔
404
}
405

406
// DisconnectBlockAtHeight is used to indicate that the block specified
407
// by the passed height has been disconnected from the main chain. This
408
// will "rewind" the graph back to the height below, deleting channels
409
// that are no longer confirmed from the graph. The prune log will be
410
// set to the last prune height valid for the remaining chain.
411
// Channels that were removed from the graph resulting from the
412
// disconnected block are returned.
413
func (c *ChannelGraph) DisconnectBlockAtHeight(height uint32) (
414
        []*models.ChannelEdgeInfo, error) {
154✔
415

154✔
416
        c.cacheMu.Lock()
154✔
417
        defer c.cacheMu.Unlock()
154✔
418

154✔
419
        edges, err := c.V1Store.DisconnectBlockAtHeight(height)
154✔
420
        if err != nil {
154✔
421
                return nil, err
×
422
        }
×
423

424
        if c.graphCache != nil {
308✔
425
                for _, edge := range edges {
255✔
426
                        c.graphCache.RemoveChannel(
101✔
427
                                edge.NodeKey1Bytes, edge.NodeKey2Bytes,
101✔
428
                                edge.ChannelID,
101✔
429
                        )
101✔
430
                }
101✔
431
        }
432

433
        return edges, nil
154✔
434
}
435

436
// PruneGraph prunes newly closed channels from the channel graph in response
437
// to a new block being solved on the network. Any transactions which spend the
438
// funding output of any known channels within he graph will be deleted.
439
// Additionally, the "prune tip", or the last block which has been used to
440
// prune the graph is stored so callers can ensure the graph is fully in sync
441
// with the current UTXO state. A slice of channels that have been closed by
442
// the target block are returned if the function succeeds without error.
443
func (c *ChannelGraph) PruneGraph(spentOutputs []*wire.OutPoint,
444
        blockHash *chainhash.Hash, blockHeight uint32) (
445
        []*models.ChannelEdgeInfo, error) {
242✔
446

242✔
447
        c.cacheMu.Lock()
242✔
448
        defer c.cacheMu.Unlock()
242✔
449

242✔
450
        edges, nodes, err := c.V1Store.PruneGraph(
242✔
451
                spentOutputs, blockHash, blockHeight,
242✔
452
        )
242✔
453
        if err != nil {
242✔
454
                return nil, err
×
455
        }
×
456

457
        if c.graphCache != nil {
484✔
458
                for _, edge := range edges {
261✔
459
                        c.graphCache.RemoveChannel(
19✔
460
                                edge.NodeKey1Bytes, edge.NodeKey2Bytes,
19✔
461
                                edge.ChannelID,
19✔
462
                        )
19✔
463
                }
19✔
464

465
                for _, node := range nodes {
296✔
466
                        c.graphCache.RemoveNode(node)
54✔
467
                }
54✔
468

469
                log.Debugf("Pruned graph, cache now has %s",
242✔
470
                        c.graphCache.Stats())
242✔
471
        }
472

473
        if len(edges) != 0 {
258✔
474
                // Notify all currently registered clients of the newly closed
16✔
475
                // channels.
16✔
476
                closeSummaries := createCloseSummaries(
16✔
477
                        blockHeight, edges...,
16✔
478
                )
16✔
479
                c.notifyTopologyChange(&TopologyChange{
16✔
480
                        ClosedChannels: closeSummaries,
16✔
481
                })
16✔
482
        }
16✔
483

484
        return edges, nil
242✔
485
}
486

487
// PruneGraphNodes is a garbage collection method which attempts to prune out
488
// any nodes from the channel graph that are currently unconnected. This ensure
489
// that we only maintain a graph of reachable nodes. In the event that a pruned
490
// node gains more channels, it will be re-added back to the graph.
491
func (c *ChannelGraph) PruneGraphNodes() error {
24✔
492
        c.cacheMu.Lock()
24✔
493
        defer c.cacheMu.Unlock()
24✔
494

24✔
495
        nodes, err := c.V1Store.PruneGraphNodes()
24✔
496
        if err != nil {
24✔
497
                return err
×
498
        }
×
499

500
        if c.graphCache != nil {
48✔
501
                for _, node := range nodes {
31✔
502
                        c.graphCache.RemoveNode(node)
7✔
503
                }
7✔
504
        }
505

506
        return nil
24✔
507
}
508

509
// FilterKnownChanIDs takes a set of channel IDs and return the subset of chan
510
// ID's that we don't know and are not known zombies of the passed set. In other
511
// words, we perform a set difference of our set of chan ID's and the ones
512
// passed in. This method can be used by callers to determine the set of
513
// channels another peer knows of that we don't.
514
func (c *ChannelGraph) FilterKnownChanIDs(chansInfo []ChannelUpdateInfo,
515
        isZombieChan func(time.Time, time.Time) bool) ([]uint64, error) {
123✔
516

123✔
517
        unknown, knownZombies, err := c.V1Store.FilterKnownChanIDs(chansInfo)
123✔
518
        if err != nil {
123✔
519
                return nil, err
×
520
        }
×
521

522
        for _, info := range knownZombies {
174✔
523
                // TODO(ziggie): Make sure that for the strict pruning case we
51✔
524
                // compare the pubkeys and whether the right timestamp is not
51✔
525
                // older than the `ChannelPruneExpiry`.
51✔
526
                //
51✔
527
                // NOTE: The timestamp data has no verification attached to it
51✔
528
                // in the `ReplyChannelRange` msg so we are trusting this data
51✔
529
                // at this point. However it is not critical because we are just
51✔
530
                // removing the channel from the db when the timestamps are more
51✔
531
                // recent. During the querying of the gossip msg verification
51✔
532
                // happens as usual. However we should start punishing peers
51✔
533
                // when they don't provide us honest data ?
51✔
534
                isStillZombie := isZombieChan(
51✔
535
                        info.Node1UpdateTimestamp, info.Node2UpdateTimestamp,
51✔
536
                )
51✔
537

51✔
538
                if isStillZombie {
78✔
539
                        continue
27✔
540
                }
541

542
                // If we have marked it as a zombie but the latest update
543
                // timestamps could bring it back from the dead, then we mark it
544
                // alive, and we let it be added to the set of IDs to query our
545
                // peer for.
546
                err := c.V1Store.MarkEdgeLive(
24✔
547
                        info.ShortChannelID.ToUint64(),
24✔
548
                )
24✔
549
                // Since there is a chance that the edge could have been marked
24✔
550
                // as "live" between the FilterKnownChanIDs call and the
24✔
551
                // MarkEdgeLive call, we ignore the error if the edge is already
24✔
552
                // marked as live.
24✔
553
                if err != nil && !errors.Is(err, ErrZombieEdgeNotFound) {
24✔
554
                        return nil, err
×
555
                }
×
556
        }
557

558
        return unknown, nil
123✔
559
}
560

561
// MarkEdgeZombie attempts to mark a channel identified by its channel ID as a
562
// zombie. This method is used on an ad-hoc basis, when channels need to be
563
// marked as zombies outside the normal pruning cycle.
564
func (c *ChannelGraph) MarkEdgeZombie(chanID uint64,
565
        pubKey1, pubKey2 [33]byte) error {
131✔
566

131✔
567
        c.cacheMu.Lock()
131✔
568
        defer c.cacheMu.Unlock()
131✔
569

131✔
570
        err := c.V1Store.MarkEdgeZombie(chanID, pubKey1, pubKey2)
131✔
571
        if err != nil {
131✔
572
                return err
×
573
        }
×
574

575
        if c.graphCache != nil {
262✔
576
                c.graphCache.RemoveChannel(pubKey1, pubKey2, chanID)
131✔
577
        }
131✔
578

579
        return nil
131✔
580
}
581

582
// UpdateEdgePolicy updates the edge routing policy for a single directed edge
583
// within the database for the referenced channel. The `flags` attribute within
584
// the ChannelEdgePolicy determines which of the directed edges are being
585
// updated. If the flag is 1, then the first node's information is being
586
// updated, otherwise it's the second node's information. The node ordering is
587
// determined by the lexicographical ordering of the identity public keys of the
588
// nodes on either side of the channel.
589
func (c *ChannelGraph) UpdateEdgePolicy(edge *models.ChannelEdgePolicy,
590
        op ...batch.SchedulerOption) error {
2,666✔
591

2,666✔
592
        c.cacheMu.Lock()
2,666✔
593
        defer c.cacheMu.Unlock()
2,666✔
594

2,666✔
595
        from, to, err := c.V1Store.UpdateEdgePolicy(edge, op...)
2,666✔
596
        if err != nil {
2,669✔
597
                return err
3✔
598
        }
3✔
599

600
        if c.graphCache != nil {
4,940✔
601
                var isUpdate1 bool
2,277✔
602
                if edge.ChannelFlags&lnwire.ChanUpdateDirection == 0 {
3,418✔
603
                        isUpdate1 = true
1,141✔
604
                }
1,141✔
605

606
                c.graphCache.UpdatePolicy(edge, from, to, isUpdate1)
2,277✔
607
        }
608

609
        select {
2,663✔
610
        case c.topologyUpdate <- edge:
2,663✔
611
        case <-c.quit:
×
612
                return ErrChanGraphShuttingDown
×
613
        }
614

615
        return nil
2,663✔
616
}
617

618
// MakeTestGraphNew creates a new instance of the ChannelGraph for testing
619
// purposes. The backing V1Store implementation depends on the version of
620
// NewTestDB included in the current build.
621
//
622
// NOTE: this is currently unused, but is left here for future use to show how
623
// NewTestDB can be used. As the SQL implementation of the V1Store is
624
// implemented, unit tests will be switched to use this function instead of
625
// the existing MakeTestGraph helper. Once only this function is used, the
626
// existing MakeTestGraph function will be removed and this one will be renamed.
627
func MakeTestGraphNew(t testing.TB,
NEW
628
        opts ...ChanGraphOption) *ChannelGraph { //nolint:unused
×
NEW
629

×
NEW
630
        t.Helper()
×
NEW
631

×
NEW
632
        store := NewTestDB(t)
×
NEW
633

×
NEW
634
        graph, err := NewChannelGraph(store, opts...)
×
NEW
635
        require.NoError(t, err)
×
NEW
636
        require.NoError(t, graph.Start())
×
NEW
637

×
NEW
638
        t.Cleanup(func() {
×
NEW
639
                require.NoError(t, graph.Stop())
×
NEW
640
        })
×
641

NEW
642
        return graph
×
643
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc