• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 15160358425

21 May 2025 10:56AM UTC coverage: 58.584% (-10.4%) from 68.996%
15160358425

Pull #9847

github

web-flow
Merge 2880b9a35 into c52a6ddeb
Pull Request #9847: Refactor Payment PR 4

634 of 942 new or added lines in 17 files covered. (67.3%)

28108 existing lines in 450 files now uncovered.

97449 of 166342 relevant lines covered (58.58%)

1.82 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

65.74
/graph/db/graph.go
1
package graphdb
2

3
import (
4
        "errors"
5
        "fmt"
6
        "sync"
7
        "sync/atomic"
8
        "time"
9

10
        "github.com/btcsuite/btcd/chaincfg/chainhash"
11
        "github.com/btcsuite/btcd/wire"
12
        "github.com/lightningnetwork/lnd/batch"
13
        "github.com/lightningnetwork/lnd/graph/db/models"
14
        "github.com/lightningnetwork/lnd/kvdb"
15
        "github.com/lightningnetwork/lnd/lnwire"
16
        "github.com/lightningnetwork/lnd/routing/route"
17
)
18

19
// ErrChanGraphShuttingDown indicates that the ChannelGraph has shutdown or is
20
// busy shutting down.
21
var ErrChanGraphShuttingDown = fmt.Errorf("ChannelGraph shutting down")
22

23
// Config is a struct that holds all the necessary dependencies for a
24
// ChannelGraph.
25
type Config struct {
26
        // KVDB is the kvdb.Backend that will be used for initializing the
27
        // KVStore CRUD layer.
28
        KVDB kvdb.Backend
29

30
        // KVStoreOpts is a list of functional options that will be used when
31
        // initializing the KVStore.
32
        KVStoreOpts []KVStoreOptionModifier
33
}
34

35
// ChannelGraph is a layer above the graph's CRUD layer.
36
//
37
// NOTE: currently, this is purely a pass-through layer directly to the backing
38
// KVStore. Upcoming commits will move the graph cache out of the KVStore and
39
// into this layer so that the KVStore is only responsible for CRUD operations.
40
type ChannelGraph struct {
41
        started atomic.Bool
42
        stopped atomic.Bool
43

44
        graphCache *GraphCache
45

46
        *KVStore
47
        *topologyManager
48

49
        quit chan struct{}
50
        wg   sync.WaitGroup
51
}
52

53
// NewChannelGraph creates a new ChannelGraph instance with the given backend.
54
func NewChannelGraph(cfg *Config, options ...ChanGraphOption) (*ChannelGraph,
55
        error) {
3✔
56

3✔
57
        opts := defaultChanGraphOptions()
3✔
58
        for _, o := range options {
6✔
59
                o(opts)
3✔
60
        }
3✔
61

62
        store, err := NewKVStore(cfg.KVDB, cfg.KVStoreOpts...)
3✔
63
        if err != nil {
3✔
64
                return nil, err
×
65
        }
×
66

67
        g := &ChannelGraph{
3✔
68
                KVStore:         store,
3✔
69
                topologyManager: newTopologyManager(),
3✔
70
                quit:            make(chan struct{}),
3✔
71
        }
3✔
72

3✔
73
        // The graph cache can be turned off (e.g. for mobile users) for a
3✔
74
        // speed/memory usage tradeoff.
3✔
75
        if opts.useGraphCache {
6✔
76
                g.graphCache = NewGraphCache(opts.preAllocCacheNumNodes)
3✔
77
        }
3✔
78

79
        return g, nil
3✔
80
}
81

82
// Start kicks off any goroutines required for the ChannelGraph to function.
83
// If the graph cache is enabled, then it will be populated with the contents of
84
// the database.
85
func (c *ChannelGraph) Start() error {
3✔
86
        if !c.started.CompareAndSwap(false, true) {
3✔
87
                return nil
×
88
        }
×
89
        log.Debugf("ChannelGraph starting")
3✔
90
        defer log.Debug("ChannelGraph started")
3✔
91

3✔
92
        if c.graphCache != nil {
6✔
93
                if err := c.populateCache(); err != nil {
3✔
94
                        return fmt.Errorf("could not populate the graph "+
×
95
                                "cache: %w", err)
×
96
                }
×
97
        }
98

99
        c.wg.Add(1)
3✔
100
        go c.handleTopologySubscriptions()
3✔
101

3✔
102
        return nil
3✔
103
}
104

105
// Stop signals any active goroutines for a graceful closure.
106
func (c *ChannelGraph) Stop() error {
3✔
107
        if !c.stopped.CompareAndSwap(false, true) {
3✔
108
                return nil
×
109
        }
×
110

111
        log.Debugf("ChannelGraph shutting down...")
3✔
112
        defer log.Debug("ChannelGraph shutdown complete")
3✔
113

3✔
114
        close(c.quit)
3✔
115
        c.wg.Wait()
3✔
116

3✔
117
        return nil
3✔
118
}
119

120
// handleTopologySubscriptions ensures that topology client subscriptions,
121
// subscription cancellations and topology notifications are handled
122
// synchronously.
123
//
124
// NOTE: this MUST be run in a goroutine.
125
func (c *ChannelGraph) handleTopologySubscriptions() {
3✔
126
        defer c.wg.Done()
3✔
127

3✔
128
        for {
6✔
129
                select {
3✔
130
                // A new fully validated topology update has just arrived.
131
                // We'll notify any registered clients.
132
                case update := <-c.topologyUpdate:
3✔
133
                        // TODO(elle): change topology handling to be handled
3✔
134
                        // synchronously so that we can guarantee the order of
3✔
135
                        // notification delivery.
3✔
136
                        c.wg.Add(1)
3✔
137
                        go c.handleTopologyUpdate(update)
3✔
138

139
                        // TODO(roasbeef): remove all unconnected vertexes
140
                        // after N blocks pass with no corresponding
141
                        // announcements.
142

143
                // A new notification client update has arrived. We're either
144
                // gaining a new client, or cancelling notifications for an
145
                // existing client.
146
                case ntfnUpdate := <-c.ntfnClientUpdates:
3✔
147
                        clientID := ntfnUpdate.clientID
3✔
148

3✔
149
                        if ntfnUpdate.cancel {
6✔
150
                                client, ok := c.topologyClients.LoadAndDelete(
3✔
151
                                        clientID,
3✔
152
                                )
3✔
153
                                if ok {
6✔
154
                                        close(client.exit)
3✔
155
                                        client.wg.Wait()
3✔
156

3✔
157
                                        close(client.ntfnChan)
3✔
158
                                }
3✔
159

160
                                continue
3✔
161
                        }
162

163
                        c.topologyClients.Store(clientID, &topologyClient{
3✔
164
                                ntfnChan: ntfnUpdate.ntfnChan,
3✔
165
                                exit:     make(chan struct{}),
3✔
166
                        })
3✔
167

168
                case <-c.quit:
3✔
169
                        return
3✔
170
                }
171
        }
172
}
173

174
// populateCache loads the entire channel graph into the in-memory graph cache.
175
//
176
// NOTE: This should only be called if the graphCache has been constructed.
177
func (c *ChannelGraph) populateCache() error {
3✔
178
        startTime := time.Now()
3✔
179
        log.Info("Populating in-memory channel graph, this might take a " +
3✔
180
                "while...")
3✔
181

3✔
182
        err := c.KVStore.ForEachNodeCacheable(func(node route.Vertex,
3✔
183
                features *lnwire.FeatureVector) error {
6✔
184

3✔
185
                c.graphCache.AddNodeFeatures(node, features)
3✔
186

3✔
187
                return nil
3✔
188
        })
3✔
189
        if err != nil {
3✔
190
                return err
×
191
        }
×
192

193
        err = c.KVStore.ForEachChannel(func(info *models.ChannelEdgeInfo,
3✔
194
                policy1, policy2 *models.ChannelEdgePolicy) error {
6✔
195

3✔
196
                c.graphCache.AddChannel(info, policy1, policy2)
3✔
197

3✔
198
                return nil
3✔
199
        })
3✔
200
        if err != nil {
3✔
201
                return err
×
202
        }
×
203

204
        log.Infof("Finished populating in-memory channel graph (took %v, %s)",
3✔
205
                time.Since(startTime), c.graphCache.Stats())
3✔
206

3✔
207
        return nil
3✔
208
}
209

210
// ForEachNodeDirectedChannel iterates through all channels of a given node,
211
// executing the passed callback on the directed edge representing the channel
212
// and its incoming policy. If the callback returns an error, then the iteration
213
// is halted with the error propagated back up to the caller. If the graphCache
214
// is available, then it will be used to retrieve the node's channels instead
215
// of the database.
216
//
217
// Unknown policies are passed into the callback as nil values.
218
//
219
// NOTE: this is part of the graphdb.NodeTraverser interface.
220
func (c *ChannelGraph) ForEachNodeDirectedChannel(node route.Vertex,
221
        cb func(channel *DirectedChannel) error) error {
3✔
222

3✔
223
        if c.graphCache != nil {
6✔
224
                return c.graphCache.ForEachChannel(node, cb)
3✔
225
        }
3✔
226

227
        return c.KVStore.ForEachNodeDirectedChannel(node, cb)
3✔
228
}
229

230
// FetchNodeFeatures returns the features of the given node. If no features are
231
// known for the node, an empty feature vector is returned.
232
// If the graphCache is available, then it will be used to retrieve the node's
233
// features instead of the database.
234
//
235
// NOTE: this is part of the graphdb.NodeTraverser interface.
236
func (c *ChannelGraph) FetchNodeFeatures(node route.Vertex) (
237
        *lnwire.FeatureVector, error) {
3✔
238

3✔
239
        if c.graphCache != nil {
6✔
240
                return c.graphCache.GetFeatures(node), nil
3✔
241
        }
3✔
242

243
        return c.KVStore.FetchNodeFeatures(node)
3✔
244
}
245

246
// GraphSession will provide the call-back with access to a NodeTraverser
247
// instance which can be used to perform queries against the channel graph. If
248
// the graph cache is not enabled, then the call-back will be provided with
249
// access to the graph via a consistent read-only transaction.
250
func (c *ChannelGraph) GraphSession(cb func(graph NodeTraverser) error) error {
3✔
251
        if c.graphCache != nil {
6✔
252
                return cb(c)
3✔
253
        }
3✔
254

UNCOV
255
        return c.KVStore.GraphSession(cb)
×
256
}
257

258
// ForEachNodeCached iterates through all the stored vertices/nodes in the
259
// graph, executing the passed callback with each node encountered.
260
//
261
// NOTE: The callback contents MUST not be modified.
262
func (c *ChannelGraph) ForEachNodeCached(cb func(node route.Vertex,
UNCOV
263
        chans map[uint64]*DirectedChannel) error) error {
×
UNCOV
264

×
UNCOV
265
        if c.graphCache != nil {
×
266
                return c.graphCache.ForEachNode(cb)
×
267
        }
×
268

UNCOV
269
        return c.KVStore.ForEachNodeCached(cb)
×
270
}
271

272
// AddLightningNode adds a vertex/node to the graph database. If the node is not
273
// in the database from before, this will add a new, unconnected one to the
274
// graph. If it is present from before, this will update that node's
275
// information. Note that this method is expected to only be called to update an
276
// already present node from a node announcement, or to insert a node found in a
277
// channel update.
278
func (c *ChannelGraph) AddLightningNode(node *models.LightningNode,
279
        op ...batch.SchedulerOption) error {
3✔
280

3✔
281
        err := c.KVStore.AddLightningNode(node, op...)
3✔
282
        if err != nil {
3✔
283
                return err
×
284
        }
×
285

286
        if c.graphCache != nil {
6✔
287
                c.graphCache.AddNodeFeatures(
3✔
288
                        node.PubKeyBytes, node.Features,
3✔
289
                )
3✔
290
        }
3✔
291

292
        select {
3✔
293
        case c.topologyUpdate <- node:
3✔
294
        case <-c.quit:
×
295
                return ErrChanGraphShuttingDown
×
296
        }
297

298
        return nil
3✔
299
}
300

301
// DeleteLightningNode starts a new database transaction to remove a vertex/node
302
// from the database according to the node's public key.
UNCOV
303
func (c *ChannelGraph) DeleteLightningNode(nodePub route.Vertex) error {
×
UNCOV
304
        err := c.KVStore.DeleteLightningNode(nodePub)
×
UNCOV
305
        if err != nil {
×
306
                return err
×
307
        }
×
308

UNCOV
309
        if c.graphCache != nil {
×
UNCOV
310
                c.graphCache.RemoveNode(nodePub)
×
UNCOV
311
        }
×
312

UNCOV
313
        return nil
×
314
}
315

316
// AddChannelEdge adds a new (undirected, blank) edge to the graph database. An
317
// undirected edge from the two target nodes are created. The information stored
318
// denotes the static attributes of the channel, such as the channelID, the keys
319
// involved in creation of the channel, and the set of features that the channel
320
// supports. The chanPoint and chanID are used to uniquely identify the edge
321
// globally within the database.
322
func (c *ChannelGraph) AddChannelEdge(edge *models.ChannelEdgeInfo,
323
        op ...batch.SchedulerOption) error {
3✔
324

3✔
325
        err := c.KVStore.AddChannelEdge(edge, op...)
3✔
326
        if err != nil {
3✔
UNCOV
327
                return err
×
UNCOV
328
        }
×
329

330
        if c.graphCache != nil {
6✔
331
                c.graphCache.AddChannel(edge, nil, nil)
3✔
332
        }
3✔
333

334
        select {
3✔
335
        case c.topologyUpdate <- edge:
3✔
336
        case <-c.quit:
×
337
                return ErrChanGraphShuttingDown
×
338
        }
339

340
        return nil
3✔
341
}
342

343
// MarkEdgeLive clears an edge from our zombie index, deeming it as live.
344
// If the cache is enabled, the edge will be added back to the graph cache if
345
// we still have a record of this channel in the DB.
UNCOV
346
func (c *ChannelGraph) MarkEdgeLive(chanID uint64) error {
×
UNCOV
347
        err := c.KVStore.MarkEdgeLive(chanID)
×
UNCOV
348
        if err != nil {
×
UNCOV
349
                return err
×
UNCOV
350
        }
×
351

UNCOV
352
        if c.graphCache != nil {
×
UNCOV
353
                // We need to add the channel back into our graph cache,
×
UNCOV
354
                // otherwise we won't use it for path finding.
×
UNCOV
355
                infos, err := c.KVStore.FetchChanInfos([]uint64{chanID})
×
UNCOV
356
                if err != nil {
×
357
                        return err
×
358
                }
×
359

UNCOV
360
                if len(infos) == 0 {
×
UNCOV
361
                        return nil
×
UNCOV
362
                }
×
363

364
                info := infos[0]
×
365

×
366
                c.graphCache.AddChannel(info.Info, info.Policy1, info.Policy2)
×
367
        }
368

369
        return nil
×
370
}
371

372
// DeleteChannelEdges removes edges with the given channel IDs from the
373
// database and marks them as zombies. This ensures that we're unable to re-add
374
// it to our database once again. If an edge does not exist within the
375
// database, then ErrEdgeNotFound will be returned. If strictZombiePruning is
376
// true, then when we mark these edges as zombies, we'll set up the keys such
377
// that we require the node that failed to send the fresh update to be the one
378
// that resurrects the channel from its zombie state. The markZombie bool
379
// denotes whether to mark the channel as a zombie.
380
func (c *ChannelGraph) DeleteChannelEdges(strictZombiePruning, markZombie bool,
381
        chanIDs ...uint64) error {
3✔
382

3✔
383
        infos, err := c.KVStore.DeleteChannelEdges(
3✔
384
                strictZombiePruning, markZombie, chanIDs...,
3✔
385
        )
3✔
386
        if err != nil {
3✔
UNCOV
387
                return err
×
UNCOV
388
        }
×
389

390
        if c.graphCache != nil {
6✔
391
                for _, info := range infos {
6✔
392
                        c.graphCache.RemoveChannel(
3✔
393
                                info.NodeKey1Bytes, info.NodeKey2Bytes,
3✔
394
                                info.ChannelID,
3✔
395
                        )
3✔
396
                }
3✔
397
        }
398

399
        return err
3✔
400
}
401

402
// DisconnectBlockAtHeight is used to indicate that the block specified
403
// by the passed height has been disconnected from the main chain. This
404
// will "rewind" the graph back to the height below, deleting channels
405
// that are no longer confirmed from the graph. The prune log will be
406
// set to the last prune height valid for the remaining chain.
407
// Channels that were removed from the graph resulting from the
408
// disconnected block are returned.
409
func (c *ChannelGraph) DisconnectBlockAtHeight(height uint32) (
410
        []*models.ChannelEdgeInfo, error) {
2✔
411

2✔
412
        edges, err := c.KVStore.DisconnectBlockAtHeight(height)
2✔
413
        if err != nil {
2✔
414
                return nil, err
×
415
        }
×
416

417
        if c.graphCache != nil {
4✔
418
                for _, edge := range edges {
4✔
419
                        c.graphCache.RemoveChannel(
2✔
420
                                edge.NodeKey1Bytes, edge.NodeKey2Bytes,
2✔
421
                                edge.ChannelID,
2✔
422
                        )
2✔
423
                }
2✔
424
        }
425

426
        return edges, nil
2✔
427
}
428

429
// PruneGraph prunes newly closed channels from the channel graph in response
430
// to a new block being solved on the network. Any transactions which spend the
431
// funding output of any known channels within he graph will be deleted.
432
// Additionally, the "prune tip", or the last block which has been used to
433
// prune the graph is stored so callers can ensure the graph is fully in sync
434
// with the current UTXO state. A slice of channels that have been closed by
435
// the target block are returned if the function succeeds without error.
436
func (c *ChannelGraph) PruneGraph(spentOutputs []*wire.OutPoint,
437
        blockHash *chainhash.Hash, blockHeight uint32) (
438
        []*models.ChannelEdgeInfo, error) {
3✔
439

3✔
440
        edges, nodes, err := c.KVStore.PruneGraph(
3✔
441
                spentOutputs, blockHash, blockHeight,
3✔
442
        )
3✔
443
        if err != nil {
3✔
444
                return nil, err
×
445
        }
×
446

447
        if c.graphCache != nil {
6✔
448
                for _, edge := range edges {
6✔
449
                        c.graphCache.RemoveChannel(
3✔
450
                                edge.NodeKey1Bytes, edge.NodeKey2Bytes,
3✔
451
                                edge.ChannelID,
3✔
452
                        )
3✔
453
                }
3✔
454

455
                for _, node := range nodes {
6✔
456
                        c.graphCache.RemoveNode(node)
3✔
457
                }
3✔
458

459
                log.Debugf("Pruned graph, cache now has %s",
3✔
460
                        c.graphCache.Stats())
3✔
461
        }
462

463
        if len(edges) != 0 {
6✔
464
                // Notify all currently registered clients of the newly closed
3✔
465
                // channels.
3✔
466
                closeSummaries := createCloseSummaries(
3✔
467
                        blockHeight, edges...,
3✔
468
                )
3✔
469

3✔
470
                select {
3✔
471
                case c.topologyUpdate <- closeSummaries:
3✔
472
                case <-c.quit:
×
473
                        return nil, ErrChanGraphShuttingDown
×
474
                }
475
        }
476

477
        return edges, nil
3✔
478
}
479

480
// PruneGraphNodes is a garbage collection method which attempts to prune out
481
// any nodes from the channel graph that are currently unconnected. This ensure
482
// that we only maintain a graph of reachable nodes. In the event that a pruned
483
// node gains more channels, it will be re-added back to the graph.
484
func (c *ChannelGraph) PruneGraphNodes() error {
3✔
485
        nodes, err := c.KVStore.PruneGraphNodes()
3✔
486
        if err != nil {
3✔
487
                return err
×
488
        }
×
489

490
        if c.graphCache != nil {
6✔
491
                for _, node := range nodes {
3✔
UNCOV
492
                        c.graphCache.RemoveNode(node)
×
UNCOV
493
                }
×
494
        }
495

496
        return nil
3✔
497
}
498

499
// FilterKnownChanIDs takes a set of channel IDs and return the subset of chan
500
// ID's that we don't know and are not known zombies of the passed set. In other
501
// words, we perform a set difference of our set of chan ID's and the ones
502
// passed in. This method can be used by callers to determine the set of
503
// channels another peer knows of that we don't.
504
func (c *ChannelGraph) FilterKnownChanIDs(chansInfo []ChannelUpdateInfo,
505
        isZombieChan func(time.Time, time.Time) bool) ([]uint64, error) {
3✔
506

3✔
507
        unknown, knownZombies, err := c.KVStore.FilterKnownChanIDs(chansInfo)
3✔
508
        if err != nil {
3✔
509
                return nil, err
×
510
        }
×
511

512
        for _, info := range knownZombies {
3✔
UNCOV
513
                // TODO(ziggie): Make sure that for the strict pruning case we
×
UNCOV
514
                // compare the pubkeys and whether the right timestamp is not
×
UNCOV
515
                // older than the `ChannelPruneExpiry`.
×
UNCOV
516
                //
×
UNCOV
517
                // NOTE: The timestamp data has no verification attached to it
×
UNCOV
518
                // in the `ReplyChannelRange` msg so we are trusting this data
×
UNCOV
519
                // at this point. However it is not critical because we are just
×
UNCOV
520
                // removing the channel from the db when the timestamps are more
×
UNCOV
521
                // recent. During the querying of the gossip msg verification
×
UNCOV
522
                // happens as usual. However we should start punishing peers
×
UNCOV
523
                // when they don't provide us honest data ?
×
UNCOV
524
                isStillZombie := isZombieChan(
×
UNCOV
525
                        info.Node1UpdateTimestamp, info.Node2UpdateTimestamp,
×
UNCOV
526
                )
×
UNCOV
527

×
UNCOV
528
                if isStillZombie {
×
UNCOV
529
                        continue
×
530
                }
531

532
                // If we have marked it as a zombie but the latest update
533
                // timestamps could bring it back from the dead, then we mark it
534
                // alive, and we let it be added to the set of IDs to query our
535
                // peer for.
UNCOV
536
                err := c.KVStore.MarkEdgeLive(
×
UNCOV
537
                        info.ShortChannelID.ToUint64(),
×
UNCOV
538
                )
×
UNCOV
539
                // Since there is a chance that the edge could have been marked
×
UNCOV
540
                // as "live" between the FilterKnownChanIDs call and the
×
UNCOV
541
                // MarkEdgeLive call, we ignore the error if the edge is already
×
UNCOV
542
                // marked as live.
×
UNCOV
543
                if err != nil && !errors.Is(err, ErrZombieEdgeNotFound) {
×
544
                        return nil, err
×
545
                }
×
546
        }
547

548
        return unknown, nil
3✔
549
}
550

551
// MarkEdgeZombie attempts to mark a channel identified by its channel ID as a
552
// zombie. This method is used on an ad-hoc basis, when channels need to be
553
// marked as zombies outside the normal pruning cycle.
554
func (c *ChannelGraph) MarkEdgeZombie(chanID uint64,
UNCOV
555
        pubKey1, pubKey2 [33]byte) error {
×
UNCOV
556

×
UNCOV
557
        err := c.KVStore.MarkEdgeZombie(chanID, pubKey1, pubKey2)
×
UNCOV
558
        if err != nil {
×
559
                return err
×
560
        }
×
561

UNCOV
562
        if c.graphCache != nil {
×
UNCOV
563
                c.graphCache.RemoveChannel(pubKey1, pubKey2, chanID)
×
UNCOV
564
        }
×
565

UNCOV
566
        return nil
×
567
}
568

569
// UpdateEdgePolicy updates the edge routing policy for a single directed edge
570
// within the database for the referenced channel. The `flags` attribute within
571
// the ChannelEdgePolicy determines which of the directed edges are being
572
// updated. If the flag is 1, then the first node's information is being
573
// updated, otherwise it's the second node's information. The node ordering is
574
// determined by the lexicographical ordering of the identity public keys of the
575
// nodes on either side of the channel.
576
func (c *ChannelGraph) UpdateEdgePolicy(edge *models.ChannelEdgePolicy,
577
        op ...batch.SchedulerOption) error {
3✔
578

3✔
579
        from, to, err := c.KVStore.UpdateEdgePolicy(edge, op...)
3✔
580
        if err != nil {
3✔
UNCOV
581
                return err
×
UNCOV
582
        }
×
583

584
        if c.graphCache != nil {
6✔
585
                var isUpdate1 bool
3✔
586
                if edge.ChannelFlags&lnwire.ChanUpdateDirection == 0 {
6✔
587
                        isUpdate1 = true
3✔
588
                }
3✔
589

590
                c.graphCache.UpdatePolicy(edge, from, to, isUpdate1)
3✔
591
        }
592

593
        select {
3✔
594
        case c.topologyUpdate <- edge:
3✔
595
        case <-c.quit:
×
596
                return ErrChanGraphShuttingDown
×
597
        }
598

599
        return nil
3✔
600
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc