• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 14193549836

01 Apr 2025 10:40AM UTC coverage: 69.046% (+0.007%) from 69.039%
14193549836

Pull #9665

github

web-flow
Merge e8825f209 into b01f4e514
Pull Request #9665: kvdb: bump etcd libs to v3.5.12

133439 of 193262 relevant lines covered (69.05%)

22119.45 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

87.82
/graph/db/graph.go
1
package graphdb
2

3
import (
4
        "errors"
5
        "fmt"
6
        "sync"
7
        "sync/atomic"
8
        "time"
9

10
        "github.com/btcsuite/btcd/chaincfg/chainhash"
11
        "github.com/btcsuite/btcd/wire"
12
        "github.com/lightningnetwork/lnd/batch"
13
        "github.com/lightningnetwork/lnd/graph/db/models"
14
        "github.com/lightningnetwork/lnd/kvdb"
15
        "github.com/lightningnetwork/lnd/lnwire"
16
        "github.com/lightningnetwork/lnd/routing/route"
17
)
18

19
// ErrChanGraphShuttingDown indicates that the ChannelGraph has shutdown or is
20
// busy shutting down.
21
var ErrChanGraphShuttingDown = fmt.Errorf("ChannelGraph shutting down")
22

23
// Config is a struct that holds all the necessary dependencies for a
24
// ChannelGraph.
25
type Config struct {
26
        // KVDB is the kvdb.Backend that will be used for initializing the
27
        // KVStore CRUD layer.
28
        KVDB kvdb.Backend
29

30
        // KVStoreOpts is a list of functional options that will be used when
31
        // initializing the KVStore.
32
        KVStoreOpts []KVStoreOptionModifier
33
}
34

35
// ChannelGraph is a layer above the graph's CRUD layer.
36
//
37
// NOTE: currently, this is purely a pass-through layer directly to the backing
38
// KVStore. Upcoming commits will move the graph cache out of the KVStore and
39
// into this layer so that the KVStore is only responsible for CRUD operations.
40
type ChannelGraph struct {
41
        started atomic.Bool
42
        stopped atomic.Bool
43

44
        // cacheMu guards any writes to the graphCache. It should be held
45
        // across the DB write call and the graphCache update to make the
46
        // two updates as atomic as possible.
47
        cacheMu sync.Mutex
48

49
        graphCache *GraphCache
50

51
        *KVStore
52
        *topologyManager
53

54
        quit chan struct{}
55
        wg   sync.WaitGroup
56
}
57

58
// NewChannelGraph creates a new ChannelGraph instance with the given backend.
59
func NewChannelGraph(cfg *Config, options ...ChanGraphOption) (*ChannelGraph,
60
        error) {
177✔
61

177✔
62
        opts := defaultChanGraphOptions()
177✔
63
        for _, o := range options {
282✔
64
                o(opts)
105✔
65
        }
105✔
66

67
        store, err := NewKVStore(cfg.KVDB, cfg.KVStoreOpts...)
177✔
68
        if err != nil {
177✔
69
                return nil, err
×
70
        }
×
71

72
        g := &ChannelGraph{
177✔
73
                KVStore:         store,
177✔
74
                topologyManager: newTopologyManager(),
177✔
75
                quit:            make(chan struct{}),
177✔
76
        }
177✔
77

177✔
78
        // The graph cache can be turned off (e.g. for mobile users) for a
177✔
79
        // speed/memory usage tradeoff.
177✔
80
        if opts.useGraphCache {
321✔
81
                g.graphCache = NewGraphCache(opts.preAllocCacheNumNodes)
144✔
82
        }
144✔
83

84
        return g, nil
177✔
85
}
86

87
// Start kicks off any goroutines required for the ChannelGraph to function.
88
// If the graph cache is enabled, then it will be populated with the contents of
89
// the database.
90
func (c *ChannelGraph) Start() error {
177✔
91
        if !c.started.CompareAndSwap(false, true) {
177✔
92
                return nil
×
93
        }
×
94
        log.Debugf("ChannelGraph starting")
177✔
95
        defer log.Debug("ChannelGraph started")
177✔
96

177✔
97
        if c.graphCache != nil {
321✔
98
                if err := c.populateCache(); err != nil {
144✔
99
                        return fmt.Errorf("could not populate the graph "+
×
100
                                "cache: %w", err)
×
101
                }
×
102
        }
103

104
        c.wg.Add(1)
177✔
105
        go c.handleTopologySubscriptions()
177✔
106

177✔
107
        return nil
177✔
108
}
109

110
// Stop signals any active goroutines for a graceful closure.
111
func (c *ChannelGraph) Stop() error {
177✔
112
        if !c.stopped.CompareAndSwap(false, true) {
177✔
113
                return nil
×
114
        }
×
115

116
        log.Debugf("ChannelGraph shutting down...")
177✔
117
        defer log.Debug("ChannelGraph shutdown complete")
177✔
118

177✔
119
        close(c.quit)
177✔
120
        c.wg.Wait()
177✔
121

177✔
122
        return nil
177✔
123
}
124

125
// handleTopologySubscriptions ensures that topology client subscriptions,
126
// subscription cancellations and topology notifications are handled
127
// synchronously.
128
//
129
// NOTE: this MUST be run in a goroutine.
130
func (c *ChannelGraph) handleTopologySubscriptions() {
177✔
131
        defer c.wg.Done()
177✔
132

177✔
133
        for {
5,300✔
134
                select {
5,123✔
135
                // A new fully validated topology update has just arrived.
136
                // We'll notify any registered clients.
137
                case update := <-c.topologyUpdate:
4,944✔
138
                        // TODO(elle): change topology handling to be handled
4,944✔
139
                        // synchronously so that we can guarantee the order of
4,944✔
140
                        // notification delivery.
4,944✔
141
                        c.wg.Add(1)
4,944✔
142
                        go c.handleTopologyUpdate(update)
4,944✔
143

144
                        // TODO(roasbeef): remove all unconnected vertexes
145
                        // after N blocks pass with no corresponding
146
                        // announcements.
147

148
                // A new notification client update has arrived. We're either
149
                // gaining a new client, or cancelling notifications for an
150
                // existing client.
151
                case ntfnUpdate := <-c.ntfnClientUpdates:
8✔
152
                        clientID := ntfnUpdate.clientID
8✔
153

8✔
154
                        if ntfnUpdate.cancel {
12✔
155
                                client, ok := c.topologyClients.LoadAndDelete(
4✔
156
                                        clientID,
4✔
157
                                )
4✔
158
                                if ok {
8✔
159
                                        close(client.exit)
4✔
160
                                        client.wg.Wait()
4✔
161

4✔
162
                                        close(client.ntfnChan)
4✔
163
                                }
4✔
164

165
                                continue
4✔
166
                        }
167

168
                        c.topologyClients.Store(clientID, &topologyClient{
7✔
169
                                ntfnChan: ntfnUpdate.ntfnChan,
7✔
170
                                exit:     make(chan struct{}),
7✔
171
                        })
7✔
172

173
                case <-c.quit:
177✔
174
                        return
177✔
175
                }
176
        }
177
}
178

179
// populateCache loads the entire channel graph into the in-memory graph cache.
180
//
181
// NOTE: This should only be called if the graphCache has been constructed.
182
func (c *ChannelGraph) populateCache() error {
144✔
183
        startTime := time.Now()
144✔
184
        log.Info("Populating in-memory channel graph, this might take a " +
144✔
185
                "while...")
144✔
186

144✔
187
        err := c.KVStore.ForEachNodeCacheable(func(node route.Vertex,
144✔
188
                features *lnwire.FeatureVector) error {
247✔
189

103✔
190
                c.graphCache.AddNodeFeatures(node, features)
103✔
191

103✔
192
                return nil
103✔
193
        })
103✔
194
        if err != nil {
144✔
195
                return err
×
196
        }
×
197

198
        err = c.KVStore.ForEachChannel(func(info *models.ChannelEdgeInfo,
144✔
199
                policy1, policy2 *models.ChannelEdgePolicy) error {
543✔
200

399✔
201
                c.graphCache.AddChannel(info, policy1, policy2)
399✔
202

399✔
203
                return nil
399✔
204
        })
399✔
205
        if err != nil {
144✔
206
                return err
×
207
        }
×
208

209
        log.Infof("Finished populating in-memory channel graph (took %v, %s)",
144✔
210
                time.Since(startTime), c.graphCache.Stats())
144✔
211

144✔
212
        return nil
144✔
213
}
214

215
// ForEachNodeDirectedChannel iterates through all channels of a given node,
216
// executing the passed callback on the directed edge representing the channel
217
// and its incoming policy. If the callback returns an error, then the iteration
218
// is halted with the error propagated back up to the caller. If the graphCache
219
// is available, then it will be used to retrieve the node's channels instead
220
// of the database.
221
//
222
// Unknown policies are passed into the callback as nil values.
223
//
224
// NOTE: this is part of the graphdb.NodeTraverser interface.
225
func (c *ChannelGraph) ForEachNodeDirectedChannel(node route.Vertex,
226
        cb func(channel *DirectedChannel) error) error {
467✔
227

467✔
228
        if c.graphCache != nil {
931✔
229
                return c.graphCache.ForEachChannel(node, cb)
464✔
230
        }
464✔
231

232
        return c.KVStore.ForEachNodeDirectedChannel(node, cb)
6✔
233
}
234

235
// FetchNodeFeatures returns the features of the given node. If no features are
236
// known for the node, an empty feature vector is returned.
237
// If the graphCache is available, then it will be used to retrieve the node's
238
// features instead of the database.
239
//
240
// NOTE: this is part of the graphdb.NodeTraverser interface.
241
func (c *ChannelGraph) FetchNodeFeatures(node route.Vertex) (
242
        *lnwire.FeatureVector, error) {
456✔
243

456✔
244
        if c.graphCache != nil {
912✔
245
                return c.graphCache.GetFeatures(node), nil
456✔
246
        }
456✔
247

248
        return c.KVStore.FetchNodeFeatures(node)
3✔
249
}
250

251
// GraphSession will provide the call-back with access to a NodeTraverser
252
// instance which can be used to perform queries against the channel graph. If
253
// the graph cache is not enabled, then the call-back will be provided with
254
// access to the graph via a consistent read-only transaction.
255
func (c *ChannelGraph) GraphSession(cb func(graph NodeTraverser) error) error {
136✔
256
        if c.graphCache != nil {
218✔
257
                return cb(c)
82✔
258
        }
82✔
259

260
        return c.KVStore.GraphSession(cb)
54✔
261
}
262

263
// ForEachNodeCached iterates through all the stored vertices/nodes in the
264
// graph, executing the passed callback with each node encountered.
265
//
266
// NOTE: The callback contents MUST not be modified.
267
func (c *ChannelGraph) ForEachNodeCached(cb func(node route.Vertex,
268
        chans map[uint64]*DirectedChannel) error) error {
1✔
269

1✔
270
        if c.graphCache != nil {
1✔
271
                return c.graphCache.ForEachNode(cb)
×
272
        }
×
273

274
        return c.KVStore.ForEachNodeCached(cb)
1✔
275
}
276

277
// AddLightningNode adds a vertex/node to the graph database. If the node is not
278
// in the database from before, this will add a new, unconnected one to the
279
// graph. If it is present from before, this will update that node's
280
// information. Note that this method is expected to only be called to update an
281
// already present node from a node announcement, or to insert a node found in a
282
// channel update.
283
func (c *ChannelGraph) AddLightningNode(node *models.LightningNode,
284
        op ...batch.SchedulerOption) error {
802✔
285

802✔
286
        c.cacheMu.Lock()
802✔
287
        defer c.cacheMu.Unlock()
802✔
288

802✔
289
        err := c.KVStore.AddLightningNode(node, op...)
802✔
290
        if err != nil {
802✔
291
                return err
×
292
        }
×
293

294
        if c.graphCache != nil {
1,417✔
295
                c.graphCache.AddNodeFeatures(
615✔
296
                        node.PubKeyBytes, node.Features,
615✔
297
                )
615✔
298
        }
615✔
299

300
        select {
802✔
301
        case c.topologyUpdate <- node:
802✔
302
        case <-c.quit:
×
303
                return ErrChanGraphShuttingDown
×
304
        }
305

306
        return nil
802✔
307
}
308

309
// DeleteLightningNode starts a new database transaction to remove a vertex/node
310
// from the database according to the node's public key.
311
func (c *ChannelGraph) DeleteLightningNode(nodePub route.Vertex) error {
3✔
312
        c.cacheMu.Lock()
3✔
313
        defer c.cacheMu.Unlock()
3✔
314

3✔
315
        err := c.KVStore.DeleteLightningNode(nodePub)
3✔
316
        if err != nil {
3✔
317
                return err
×
318
        }
×
319

320
        if c.graphCache != nil {
6✔
321
                c.graphCache.RemoveNode(nodePub)
3✔
322
        }
3✔
323

324
        return nil
3✔
325
}
326

327
// AddChannelEdge adds a new (undirected, blank) edge to the graph database. An
328
// undirected edge from the two target nodes are created. The information stored
329
// denotes the static attributes of the channel, such as the channelID, the keys
330
// involved in creation of the channel, and the set of features that the channel
331
// supports. The chanPoint and chanID are used to uniquely identify the edge
332
// globally within the database.
333
func (c *ChannelGraph) AddChannelEdge(edge *models.ChannelEdgeInfo,
334
        op ...batch.SchedulerOption) error {
1,719✔
335

1,719✔
336
        c.cacheMu.Lock()
1,719✔
337
        defer c.cacheMu.Unlock()
1,719✔
338

1,719✔
339
        err := c.KVStore.AddChannelEdge(edge, op...)
1,719✔
340
        if err != nil {
1,953✔
341
                return err
234✔
342
        }
234✔
343

344
        if c.graphCache != nil {
2,780✔
345
                c.graphCache.AddChannel(edge, nil, nil)
1,295✔
346
        }
1,295✔
347

348
        select {
1,485✔
349
        case c.topologyUpdate <- edge:
1,485✔
350
        case <-c.quit:
×
351
                return ErrChanGraphShuttingDown
×
352
        }
353

354
        return nil
1,485✔
355
}
356

357
// MarkEdgeLive clears an edge from our zombie index, deeming it as live.
358
// If the cache is enabled, the edge will be added back to the graph cache if
359
// we still have a record of this channel in the DB.
360
func (c *ChannelGraph) MarkEdgeLive(chanID uint64) error {
2✔
361
        c.cacheMu.Lock()
2✔
362
        defer c.cacheMu.Unlock()
2✔
363

2✔
364
        err := c.KVStore.MarkEdgeLive(chanID)
2✔
365
        if err != nil {
3✔
366
                return err
1✔
367
        }
1✔
368

369
        if c.graphCache != nil {
2✔
370
                // We need to add the channel back into our graph cache,
1✔
371
                // otherwise we won't use it for path finding.
1✔
372
                infos, err := c.KVStore.FetchChanInfos([]uint64{chanID})
1✔
373
                if err != nil {
1✔
374
                        return err
×
375
                }
×
376

377
                if len(infos) == 0 {
2✔
378
                        return nil
1✔
379
                }
1✔
380

381
                info := infos[0]
×
382

×
383
                c.graphCache.AddChannel(info.Info, info.Policy1, info.Policy2)
×
384
        }
385

386
        return nil
×
387
}
388

389
// DeleteChannelEdges removes edges with the given channel IDs from the
390
// database and marks them as zombies. This ensures that we're unable to re-add
391
// it to our database once again. If an edge does not exist within the
392
// database, then ErrEdgeNotFound will be returned. If strictZombiePruning is
393
// true, then when we mark these edges as zombies, we'll set up the keys such
394
// that we require the node that failed to send the fresh update to be the one
395
// that resurrects the channel from its zombie state. The markZombie bool
396
// denotes whether to mark the channel as a zombie.
397
func (c *ChannelGraph) DeleteChannelEdges(strictZombiePruning, markZombie bool,
398
        chanIDs ...uint64) error {
145✔
399

145✔
400
        c.cacheMu.Lock()
145✔
401
        defer c.cacheMu.Unlock()
145✔
402

145✔
403
        infos, err := c.KVStore.DeleteChannelEdges(
145✔
404
                strictZombiePruning, markZombie, chanIDs...,
145✔
405
        )
145✔
406
        if err != nil {
209✔
407
                return err
64✔
408
        }
64✔
409

410
        if c.graphCache != nil {
162✔
411
                for _, info := range infos {
106✔
412
                        c.graphCache.RemoveChannel(
25✔
413
                                info.NodeKey1Bytes, info.NodeKey2Bytes,
25✔
414
                                info.ChannelID,
25✔
415
                        )
25✔
416
                }
25✔
417
        }
418

419
        return err
81✔
420
}
421

422
// DisconnectBlockAtHeight is used to indicate that the block specified
423
// by the passed height has been disconnected from the main chain. This
424
// will "rewind" the graph back to the height below, deleting channels
425
// that are no longer confirmed from the graph. The prune log will be
426
// set to the last prune height valid for the remaining chain.
427
// Channels that were removed from the graph resulting from the
428
// disconnected block are returned.
429
func (c *ChannelGraph) DisconnectBlockAtHeight(height uint32) (
430
        []*models.ChannelEdgeInfo, error) {
158✔
431

158✔
432
        c.cacheMu.Lock()
158✔
433
        defer c.cacheMu.Unlock()
158✔
434

158✔
435
        edges, err := c.KVStore.DisconnectBlockAtHeight(height)
158✔
436
        if err != nil {
158✔
437
                return nil, err
×
438
        }
×
439

440
        if c.graphCache != nil {
316✔
441
                for _, edge := range edges {
257✔
442
                        c.graphCache.RemoveChannel(
99✔
443
                                edge.NodeKey1Bytes, edge.NodeKey2Bytes,
99✔
444
                                edge.ChannelID,
99✔
445
                        )
99✔
446
                }
99✔
447
        }
448

449
        return edges, nil
158✔
450
}
451

452
// PruneGraph prunes newly closed channels from the channel graph in response
453
// to a new block being solved on the network. Any transactions which spend the
454
// funding output of any known channels within he graph will be deleted.
455
// Additionally, the "prune tip", or the last block which has been used to
456
// prune the graph is stored so callers can ensure the graph is fully in sync
457
// with the current UTXO state. A slice of channels that have been closed by
458
// the target block are returned if the function succeeds without error.
459
func (c *ChannelGraph) PruneGraph(spentOutputs []*wire.OutPoint,
460
        blockHash *chainhash.Hash, blockHeight uint32) (
461
        []*models.ChannelEdgeInfo, error) {
244✔
462

244✔
463
        c.cacheMu.Lock()
244✔
464
        defer c.cacheMu.Unlock()
244✔
465

244✔
466
        edges, nodes, err := c.KVStore.PruneGraph(
244✔
467
                spentOutputs, blockHash, blockHeight,
244✔
468
        )
244✔
469
        if err != nil {
244✔
470
                return nil, err
×
471
        }
×
472

473
        if c.graphCache != nil {
488✔
474
                for _, edge := range edges {
267✔
475
                        c.graphCache.RemoveChannel(
23✔
476
                                edge.NodeKey1Bytes, edge.NodeKey2Bytes,
23✔
477
                                edge.ChannelID,
23✔
478
                        )
23✔
479
                }
23✔
480

481
                for _, node := range nodes {
303✔
482
                        c.graphCache.RemoveNode(node)
59✔
483
                }
59✔
484

485
                log.Debugf("Pruned graph, cache now has %s",
244✔
486
                        c.graphCache.Stats())
244✔
487
        }
488

489
        if len(edges) != 0 {
265✔
490
                // Notify all currently registered clients of the newly closed
21✔
491
                // channels.
21✔
492
                closeSummaries := createCloseSummaries(
21✔
493
                        blockHeight, edges...,
21✔
494
                )
21✔
495
                c.notifyTopologyChange(&TopologyChange{
21✔
496
                        ClosedChannels: closeSummaries,
21✔
497
                })
21✔
498
        }
21✔
499

500
        return edges, nil
244✔
501
}
502

503
// PruneGraphNodes is a garbage collection method which attempts to prune out
504
// any nodes from the channel graph that are currently unconnected. This ensure
505
// that we only maintain a graph of reachable nodes. In the event that a pruned
506
// node gains more channels, it will be re-added back to the graph.
507
func (c *ChannelGraph) PruneGraphNodes() error {
26✔
508
        c.cacheMu.Lock()
26✔
509
        defer c.cacheMu.Unlock()
26✔
510

26✔
511
        nodes, err := c.KVStore.PruneGraphNodes()
26✔
512
        if err != nil {
26✔
513
                return err
×
514
        }
×
515

516
        if c.graphCache != nil {
52✔
517
                for _, node := range nodes {
33✔
518
                        c.graphCache.RemoveNode(node)
7✔
519
                }
7✔
520
        }
521

522
        return nil
26✔
523
}
524

525
// FilterKnownChanIDs takes a set of channel IDs and return the subset of chan
526
// ID's that we don't know and are not known zombies of the passed set. In other
527
// words, we perform a set difference of our set of chan ID's and the ones
528
// passed in. This method can be used by callers to determine the set of
529
// channels another peer knows of that we don't.
530
func (c *ChannelGraph) FilterKnownChanIDs(chansInfo []ChannelUpdateInfo,
531
        isZombieChan func(time.Time, time.Time) bool) ([]uint64, error) {
129✔
532

129✔
533
        unknown, knownZombies, err := c.KVStore.FilterKnownChanIDs(chansInfo)
129✔
534
        if err != nil {
129✔
535
                return nil, err
×
536
        }
×
537

538
        for _, info := range knownZombies {
175✔
539
                // TODO(ziggie): Make sure that for the strict pruning case we
46✔
540
                // compare the pubkeys and whether the right timestamp is not
46✔
541
                // older than the `ChannelPruneExpiry`.
46✔
542
                //
46✔
543
                // NOTE: The timestamp data has no verification attached to it
46✔
544
                // in the `ReplyChannelRange` msg so we are trusting this data
46✔
545
                // at this point. However it is not critical because we are just
46✔
546
                // removing the channel from the db when the timestamps are more
46✔
547
                // recent. During the querying of the gossip msg verification
46✔
548
                // happens as usual. However we should start punishing peers
46✔
549
                // when they don't provide us honest data ?
46✔
550
                isStillZombie := isZombieChan(
46✔
551
                        info.Node1UpdateTimestamp, info.Node2UpdateTimestamp,
46✔
552
                )
46✔
553

46✔
554
                if isStillZombie {
73✔
555
                        continue
27✔
556
                }
557

558
                // If we have marked it as a zombie but the latest update
559
                // timestamps could bring it back from the dead, then we mark it
560
                // alive, and we let it be added to the set of IDs to query our
561
                // peer for.
562
                err := c.KVStore.MarkEdgeLive(
19✔
563
                        info.ShortChannelID.ToUint64(),
19✔
564
                )
19✔
565
                // Since there is a chance that the edge could have been marked
19✔
566
                // as "live" between the FilterKnownChanIDs call and the
19✔
567
                // MarkEdgeLive call, we ignore the error if the edge is already
19✔
568
                // marked as live.
19✔
569
                if err != nil && !errors.Is(err, ErrZombieEdgeNotFound) {
19✔
570
                        return nil, err
×
571
                }
×
572
        }
573

574
        return unknown, nil
129✔
575
}
576

577
// MarkEdgeZombie attempts to mark a channel identified by its channel ID as a
578
// zombie. This method is used on an ad-hoc basis, when channels need to be
579
// marked as zombies outside the normal pruning cycle.
580
func (c *ChannelGraph) MarkEdgeZombie(chanID uint64,
581
        pubKey1, pubKey2 [33]byte) error {
130✔
582

130✔
583
        c.cacheMu.Lock()
130✔
584
        defer c.cacheMu.Unlock()
130✔
585

130✔
586
        err := c.KVStore.MarkEdgeZombie(chanID, pubKey1, pubKey2)
130✔
587
        if err != nil {
130✔
588
                return err
×
589
        }
×
590

591
        if c.graphCache != nil {
260✔
592
                c.graphCache.RemoveChannel(pubKey1, pubKey2, chanID)
130✔
593
        }
130✔
594

595
        return nil
130✔
596
}
597

598
// UpdateEdgePolicy updates the edge routing policy for a single directed edge
599
// within the database for the referenced channel. The `flags` attribute within
600
// the ChannelEdgePolicy determines which of the directed edges are being
601
// updated. If the flag is 1, then the first node's information is being
602
// updated, otherwise it's the second node's information. The node ordering is
603
// determined by the lexicographical ordering of the identity public keys of the
604
// nodes on either side of the channel.
605
func (c *ChannelGraph) UpdateEdgePolicy(edge *models.ChannelEdgePolicy,
606
        op ...batch.SchedulerOption) error {
2,666✔
607

2,666✔
608
        c.cacheMu.Lock()
2,666✔
609
        defer c.cacheMu.Unlock()
2,666✔
610

2,666✔
611
        from, to, err := c.KVStore.UpdateEdgePolicy(edge, op...)
2,666✔
612
        if err != nil {
2,669✔
613
                return err
3✔
614
        }
3✔
615

616
        if c.graphCache != nil {
4,940✔
617
                var isUpdate1 bool
2,277✔
618
                if edge.ChannelFlags&lnwire.ChanUpdateDirection == 0 {
3,419✔
619
                        isUpdate1 = true
1,142✔
620
                }
1,142✔
621

622
                c.graphCache.UpdatePolicy(edge, from, to, isUpdate1)
2,277✔
623
        }
624

625
        select {
2,663✔
626
        case c.topologyUpdate <- edge:
2,663✔
627
        case <-c.quit:
×
628
                return ErrChanGraphShuttingDown
×
629
        }
630

631
        return nil
2,663✔
632
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc