• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 14614595224

23 Apr 2025 09:21AM UTC coverage: 68.901% (+10.3%) from 58.562%
14614595224

push

github

web-flow
Merge pull request #9739 from ellemouton/rpcInterceptorMD

lnrpc+rpcperms: add ctx metadata pairs to RPCMiddlewareRequest

17 of 17 new or added lines in 1 file covered. (100.0%)

300 existing lines in 24 files now uncovered.

133638 of 193957 relevant lines covered (68.9%)

22022.08 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

87.82
/graph/db/graph.go
1
package graphdb
2

3
import (
4
        "errors"
5
        "fmt"
6
        "sync"
7
        "sync/atomic"
8
        "time"
9

10
        "github.com/btcsuite/btcd/chaincfg/chainhash"
11
        "github.com/btcsuite/btcd/wire"
12
        "github.com/lightningnetwork/lnd/batch"
13
        "github.com/lightningnetwork/lnd/graph/db/models"
14
        "github.com/lightningnetwork/lnd/kvdb"
15
        "github.com/lightningnetwork/lnd/lnwire"
16
        "github.com/lightningnetwork/lnd/routing/route"
17
)
18

19
// ErrChanGraphShuttingDown indicates that the ChannelGraph has shutdown or is
20
// busy shutting down.
21
var ErrChanGraphShuttingDown = fmt.Errorf("ChannelGraph shutting down")
22

23
// Config is a struct that holds all the necessary dependencies for a
24
// ChannelGraph.
25
type Config struct {
26
        // KVDB is the kvdb.Backend that will be used for initializing the
27
        // KVStore CRUD layer.
28
        KVDB kvdb.Backend
29

30
        // KVStoreOpts is a list of functional options that will be used when
31
        // initializing the KVStore.
32
        KVStoreOpts []KVStoreOptionModifier
33
}
34

35
// ChannelGraph is a layer above the graph's CRUD layer.
36
//
37
// NOTE: currently, this is purely a pass-through layer directly to the backing
38
// KVStore. Upcoming commits will move the graph cache out of the KVStore and
39
// into this layer so that the KVStore is only responsible for CRUD operations.
40
type ChannelGraph struct {
41
        started atomic.Bool
42
        stopped atomic.Bool
43

44
        // cacheMu guards any writes to the graphCache. It should be held
45
        // across the DB write call and the graphCache update to make the
46
        // two updates as atomic as possible.
47
        cacheMu sync.Mutex
48

49
        graphCache *GraphCache
50

51
        *KVStore
52
        *topologyManager
53

54
        quit chan struct{}
55
        wg   sync.WaitGroup
56
}
57

58
// NewChannelGraph creates a new ChannelGraph instance with the given backend.
59
func NewChannelGraph(cfg *Config, options ...ChanGraphOption) (*ChannelGraph,
60
        error) {
176✔
61

176✔
62
        opts := defaultChanGraphOptions()
176✔
63
        for _, o := range options {
280✔
64
                o(opts)
104✔
65
        }
104✔
66

67
        store, err := NewKVStore(cfg.KVDB, cfg.KVStoreOpts...)
176✔
68
        if err != nil {
176✔
69
                return nil, err
×
70
        }
×
71

72
        g := &ChannelGraph{
176✔
73
                KVStore:         store,
176✔
74
                topologyManager: newTopologyManager(),
176✔
75
                quit:            make(chan struct{}),
176✔
76
        }
176✔
77

176✔
78
        // The graph cache can be turned off (e.g. for mobile users) for a
176✔
79
        // speed/memory usage tradeoff.
176✔
80
        if opts.useGraphCache {
319✔
81
                g.graphCache = NewGraphCache(opts.preAllocCacheNumNodes)
143✔
82
        }
143✔
83

84
        return g, nil
176✔
85
}
86

87
// Start kicks off any goroutines required for the ChannelGraph to function.
88
// If the graph cache is enabled, then it will be populated with the contents of
89
// the database.
90
func (c *ChannelGraph) Start() error {
176✔
91
        if !c.started.CompareAndSwap(false, true) {
176✔
92
                return nil
×
93
        }
×
94
        log.Debugf("ChannelGraph starting")
176✔
95
        defer log.Debug("ChannelGraph started")
176✔
96

176✔
97
        if c.graphCache != nil {
319✔
98
                if err := c.populateCache(); err != nil {
143✔
99
                        return fmt.Errorf("could not populate the graph "+
×
100
                                "cache: %w", err)
×
101
                }
×
102
        }
103

104
        c.wg.Add(1)
176✔
105
        go c.handleTopologySubscriptions()
176✔
106

176✔
107
        return nil
176✔
108
}
109

110
// Stop signals any active goroutines for a graceful closure.
111
func (c *ChannelGraph) Stop() error {
176✔
112
        if !c.stopped.CompareAndSwap(false, true) {
176✔
113
                return nil
×
114
        }
×
115

116
        log.Debugf("ChannelGraph shutting down...")
176✔
117
        defer log.Debug("ChannelGraph shutdown complete")
176✔
118

176✔
119
        close(c.quit)
176✔
120
        c.wg.Wait()
176✔
121

176✔
122
        return nil
176✔
123
}
124

125
// handleTopologySubscriptions ensures that topology client subscriptions,
126
// subscription cancellations and topology notifications are handled
127
// synchronously.
128
//
129
// NOTE: this MUST be run in a goroutine.
130
func (c *ChannelGraph) handleTopologySubscriptions() {
176✔
131
        defer c.wg.Done()
176✔
132

176✔
133
        for {
5,301✔
134
                select {
5,125✔
135
                // A new fully validated topology update has just arrived.
136
                // We'll notify any registered clients.
137
                case update := <-c.topologyUpdate:
4,946✔
138
                        // TODO(elle): change topology handling to be handled
4,946✔
139
                        // synchronously so that we can guarantee the order of
4,946✔
140
                        // notification delivery.
4,946✔
141
                        c.wg.Add(1)
4,946✔
142
                        go c.handleTopologyUpdate(update)
4,946✔
143

144
                        // TODO(roasbeef): remove all unconnected vertexes
145
                        // after N blocks pass with no corresponding
146
                        // announcements.
147

148
                // A new notification client update has arrived. We're either
149
                // gaining a new client, or cancelling notifications for an
150
                // existing client.
151
                case ntfnUpdate := <-c.ntfnClientUpdates:
7✔
152
                        clientID := ntfnUpdate.clientID
7✔
153

7✔
154
                        if ntfnUpdate.cancel {
10✔
155
                                client, ok := c.topologyClients.LoadAndDelete(
3✔
156
                                        clientID,
3✔
157
                                )
3✔
158
                                if ok {
6✔
159
                                        close(client.exit)
3✔
160
                                        client.wg.Wait()
3✔
161

3✔
162
                                        close(client.ntfnChan)
3✔
163
                                }
3✔
164

165
                                continue
3✔
166
                        }
167

168
                        c.topologyClients.Store(clientID, &topologyClient{
6✔
169
                                ntfnChan: ntfnUpdate.ntfnChan,
6✔
170
                                exit:     make(chan struct{}),
6✔
171
                        })
6✔
172

173
                case <-c.quit:
176✔
174
                        return
176✔
175
                }
176
        }
177
}
178

179
// populateCache loads the entire channel graph into the in-memory graph cache.
180
//
181
// NOTE: This should only be called if the graphCache has been constructed.
182
func (c *ChannelGraph) populateCache() error {
143✔
183
        startTime := time.Now()
143✔
184
        log.Info("Populating in-memory channel graph, this might take a " +
143✔
185
                "while...")
143✔
186

143✔
187
        err := c.KVStore.ForEachNodeCacheable(func(node route.Vertex,
143✔
188
                features *lnwire.FeatureVector) error {
245✔
189

102✔
190
                c.graphCache.AddNodeFeatures(node, features)
102✔
191

102✔
192
                return nil
102✔
193
        })
102✔
194
        if err != nil {
143✔
195
                return err
×
196
        }
×
197

198
        err = c.KVStore.ForEachChannel(func(info *models.ChannelEdgeInfo,
143✔
199
                policy1, policy2 *models.ChannelEdgePolicy) error {
541✔
200

398✔
201
                c.graphCache.AddChannel(info, policy1, policy2)
398✔
202

398✔
203
                return nil
398✔
204
        })
398✔
205
        if err != nil {
143✔
206
                return err
×
207
        }
×
208

209
        log.Infof("Finished populating in-memory channel graph (took %v, %s)",
143✔
210
                time.Since(startTime), c.graphCache.Stats())
143✔
211

143✔
212
        return nil
143✔
213
}
214

215
// ForEachNodeDirectedChannel iterates through all channels of a given node,
216
// executing the passed callback on the directed edge representing the channel
217
// and its incoming policy. If the callback returns an error, then the iteration
218
// is halted with the error propagated back up to the caller. If the graphCache
219
// is available, then it will be used to retrieve the node's channels instead
220
// of the database.
221
//
222
// Unknown policies are passed into the callback as nil values.
223
//
224
// NOTE: this is part of the graphdb.NodeTraverser interface.
225
func (c *ChannelGraph) ForEachNodeDirectedChannel(node route.Vertex,
226
        cb func(channel *DirectedChannel) error) error {
467✔
227

467✔
228
        if c.graphCache != nil {
931✔
229
                return c.graphCache.ForEachChannel(node, cb)
464✔
230
        }
464✔
231

232
        return c.KVStore.ForEachNodeDirectedChannel(node, cb)
5✔
233
}
234

235
// FetchNodeFeatures returns the features of the given node. If no features are
236
// known for the node, an empty feature vector is returned.
237
// If the graphCache is available, then it will be used to retrieve the node's
238
// features instead of the database.
239
//
240
// NOTE: this is part of the graphdb.NodeTraverser interface.
241
func (c *ChannelGraph) FetchNodeFeatures(node route.Vertex) (
242
        *lnwire.FeatureVector, error) {
455✔
243

455✔
244
        if c.graphCache != nil {
910✔
245
                return c.graphCache.GetFeatures(node), nil
455✔
246
        }
455✔
247

248
        return c.KVStore.FetchNodeFeatures(node)
2✔
249
}
250

251
// GraphSession will provide the call-back with access to a NodeTraverser
252
// instance which can be used to perform queries against the channel graph. If
253
// the graph cache is not enabled, then the call-back will be provided with
254
// access to the graph via a consistent read-only transaction.
255
func (c *ChannelGraph) GraphSession(cb func(graph NodeTraverser) error) error {
135✔
256
        if c.graphCache != nil {
216✔
257
                return cb(c)
81✔
258
        }
81✔
259

260
        return c.KVStore.GraphSession(cb)
54✔
261
}
262

263
// ForEachNodeCached iterates through all the stored vertices/nodes in the
264
// graph, executing the passed callback with each node encountered.
265
//
266
// NOTE: The callback contents MUST not be modified.
267
func (c *ChannelGraph) ForEachNodeCached(cb func(node route.Vertex,
268
        chans map[uint64]*DirectedChannel) error) error {
1✔
269

1✔
270
        if c.graphCache != nil {
1✔
271
                return c.graphCache.ForEachNode(cb)
×
272
        }
×
273

274
        return c.KVStore.ForEachNodeCached(cb)
1✔
275
}
276

277
// AddLightningNode adds a vertex/node to the graph database. If the node is not
278
// in the database from before, this will add a new, unconnected one to the
279
// graph. If it is present from before, this will update that node's
280
// information. Note that this method is expected to only be called to update an
281
// already present node from a node announcement, or to insert a node found in a
282
// channel update.
283
func (c *ChannelGraph) AddLightningNode(node *models.LightningNode,
284
        op ...batch.SchedulerOption) error {
801✔
285

801✔
286
        c.cacheMu.Lock()
801✔
287
        defer c.cacheMu.Unlock()
801✔
288

801✔
289
        err := c.KVStore.AddLightningNode(node, op...)
801✔
290
        if err != nil {
801✔
291
                return err
×
292
        }
×
293

294
        if c.graphCache != nil {
1,415✔
295
                c.graphCache.AddNodeFeatures(
614✔
296
                        node.PubKeyBytes, node.Features,
614✔
297
                )
614✔
298
        }
614✔
299

300
        select {
801✔
301
        case c.topologyUpdate <- node:
801✔
UNCOV
302
        case <-c.quit:
×
UNCOV
303
                return ErrChanGraphShuttingDown
×
304
        }
305

306
        return nil
801✔
307
}
308

309
// DeleteLightningNode starts a new database transaction to remove a vertex/node
310
// from the database according to the node's public key.
311
func (c *ChannelGraph) DeleteLightningNode(nodePub route.Vertex) error {
3✔
312
        c.cacheMu.Lock()
3✔
313
        defer c.cacheMu.Unlock()
3✔
314

3✔
315
        err := c.KVStore.DeleteLightningNode(nodePub)
3✔
316
        if err != nil {
3✔
317
                return err
×
318
        }
×
319

320
        if c.graphCache != nil {
6✔
321
                c.graphCache.RemoveNode(nodePub)
3✔
322
        }
3✔
323

324
        return nil
3✔
325
}
326

327
// AddChannelEdge adds a new (undirected, blank) edge to the graph database. An
328
// undirected edge from the two target nodes are created. The information stored
329
// denotes the static attributes of the channel, such as the channelID, the keys
330
// involved in creation of the channel, and the set of features that the channel
331
// supports. The chanPoint and chanID are used to uniquely identify the edge
332
// globally within the database.
333
func (c *ChannelGraph) AddChannelEdge(edge *models.ChannelEdgeInfo,
334
        op ...batch.SchedulerOption) error {
1,721✔
335

1,721✔
336
        c.cacheMu.Lock()
1,721✔
337
        defer c.cacheMu.Unlock()
1,721✔
338

1,721✔
339
        err := c.KVStore.AddChannelEdge(edge, op...)
1,721✔
340
        if err != nil {
1,955✔
341
                return err
234✔
342
        }
234✔
343

344
        if c.graphCache != nil {
2,784✔
345
                c.graphCache.AddChannel(edge, nil, nil)
1,297✔
346
        }
1,297✔
347

348
        select {
1,487✔
349
        case c.topologyUpdate <- edge:
1,487✔
350
        case <-c.quit:
×
351
                return ErrChanGraphShuttingDown
×
352
        }
353

354
        return nil
1,487✔
355
}
356

357
// MarkEdgeLive clears an edge from our zombie index, deeming it as live.
358
// If the cache is enabled, the edge will be added back to the graph cache if
359
// we still have a record of this channel in the DB.
360
func (c *ChannelGraph) MarkEdgeLive(chanID uint64) error {
2✔
361
        c.cacheMu.Lock()
2✔
362
        defer c.cacheMu.Unlock()
2✔
363

2✔
364
        err := c.KVStore.MarkEdgeLive(chanID)
2✔
365
        if err != nil {
3✔
366
                return err
1✔
367
        }
1✔
368

369
        if c.graphCache != nil {
2✔
370
                // We need to add the channel back into our graph cache,
1✔
371
                // otherwise we won't use it for path finding.
1✔
372
                infos, err := c.KVStore.FetchChanInfos([]uint64{chanID})
1✔
373
                if err != nil {
1✔
374
                        return err
×
375
                }
×
376

377
                if len(infos) == 0 {
2✔
378
                        return nil
1✔
379
                }
1✔
380

381
                info := infos[0]
×
382

×
383
                c.graphCache.AddChannel(info.Info, info.Policy1, info.Policy2)
×
384
        }
385

386
        return nil
×
387
}
388

389
// DeleteChannelEdges removes edges with the given channel IDs from the
390
// database and marks them as zombies. This ensures that we're unable to re-add
391
// it to our database once again. If an edge does not exist within the
392
// database, then ErrEdgeNotFound will be returned. If strictZombiePruning is
393
// true, then when we mark these edges as zombies, we'll set up the keys such
394
// that we require the node that failed to send the fresh update to be the one
395
// that resurrects the channel from its zombie state. The markZombie bool
396
// denotes whether to mark the channel as a zombie.
397
func (c *ChannelGraph) DeleteChannelEdges(strictZombiePruning, markZombie bool,
398
        chanIDs ...uint64) error {
145✔
399

145✔
400
        c.cacheMu.Lock()
145✔
401
        defer c.cacheMu.Unlock()
145✔
402

145✔
403
        infos, err := c.KVStore.DeleteChannelEdges(
145✔
404
                strictZombiePruning, markZombie, chanIDs...,
145✔
405
        )
145✔
406
        if err != nil {
209✔
407
                return err
64✔
408
        }
64✔
409

410
        if c.graphCache != nil {
162✔
411
                for _, info := range infos {
106✔
412
                        c.graphCache.RemoveChannel(
25✔
413
                                info.NodeKey1Bytes, info.NodeKey2Bytes,
25✔
414
                                info.ChannelID,
25✔
415
                        )
25✔
416
                }
25✔
417
        }
418

419
        return err
81✔
420
}
421

422
// DisconnectBlockAtHeight is used to indicate that the block specified
423
// by the passed height has been disconnected from the main chain. This
424
// will "rewind" the graph back to the height below, deleting channels
425
// that are no longer confirmed from the graph. The prune log will be
426
// set to the last prune height valid for the remaining chain.
427
// Channels that were removed from the graph resulting from the
428
// disconnected block are returned.
429
func (c *ChannelGraph) DisconnectBlockAtHeight(height uint32) (
430
        []*models.ChannelEdgeInfo, error) {
154✔
431

154✔
432
        c.cacheMu.Lock()
154✔
433
        defer c.cacheMu.Unlock()
154✔
434

154✔
435
        edges, err := c.KVStore.DisconnectBlockAtHeight(height)
154✔
436
        if err != nil {
154✔
437
                return nil, err
×
438
        }
×
439

440
        if c.graphCache != nil {
308✔
441
                for _, edge := range edges {
257✔
442
                        c.graphCache.RemoveChannel(
103✔
443
                                edge.NodeKey1Bytes, edge.NodeKey2Bytes,
103✔
444
                                edge.ChannelID,
103✔
445
                        )
103✔
446
                }
103✔
447
        }
448

449
        return edges, nil
154✔
450
}
451

452
// PruneGraph prunes newly closed channels from the channel graph in response
453
// to a new block being solved on the network. Any transactions which spend the
454
// funding output of any known channels within he graph will be deleted.
455
// Additionally, the "prune tip", or the last block which has been used to
456
// prune the graph is stored so callers can ensure the graph is fully in sync
457
// with the current UTXO state. A slice of channels that have been closed by
458
// the target block are returned if the function succeeds without error.
459
func (c *ChannelGraph) PruneGraph(spentOutputs []*wire.OutPoint,
460
        blockHash *chainhash.Hash, blockHeight uint32) (
461
        []*models.ChannelEdgeInfo, error) {
243✔
462

243✔
463
        c.cacheMu.Lock()
243✔
464
        defer c.cacheMu.Unlock()
243✔
465

243✔
466
        edges, nodes, err := c.KVStore.PruneGraph(
243✔
467
                spentOutputs, blockHash, blockHeight,
243✔
468
        )
243✔
469
        if err != nil {
243✔
470
                return nil, err
×
471
        }
×
472

473
        if c.graphCache != nil {
486✔
474
                for _, edge := range edges {
263✔
475
                        c.graphCache.RemoveChannel(
20✔
476
                                edge.NodeKey1Bytes, edge.NodeKey2Bytes,
20✔
477
                                edge.ChannelID,
20✔
478
                        )
20✔
479
                }
20✔
480

481
                for _, node := range nodes {
300✔
482
                        c.graphCache.RemoveNode(node)
57✔
483
                }
57✔
484

485
                log.Debugf("Pruned graph, cache now has %s",
243✔
486
                        c.graphCache.Stats())
243✔
487
        }
488

489
        if len(edges) != 0 {
261✔
490
                // Notify all currently registered clients of the newly closed
18✔
491
                // channels.
18✔
492
                closeSummaries := createCloseSummaries(
18✔
493
                        blockHeight, edges...,
18✔
494
                )
18✔
495
                c.notifyTopologyChange(&TopologyChange{
18✔
496
                        ClosedChannels: closeSummaries,
18✔
497
                })
18✔
498
        }
18✔
499

500
        return edges, nil
243✔
501
}
502

503
// PruneGraphNodes is a garbage collection method which attempts to prune out
504
// any nodes from the channel graph that are currently unconnected. This ensure
505
// that we only maintain a graph of reachable nodes. In the event that a pruned
506
// node gains more channels, it will be re-added back to the graph.
507
func (c *ChannelGraph) PruneGraphNodes() error {
25✔
508
        c.cacheMu.Lock()
25✔
509
        defer c.cacheMu.Unlock()
25✔
510

25✔
511
        nodes, err := c.KVStore.PruneGraphNodes()
25✔
512
        if err != nil {
25✔
513
                return err
×
514
        }
×
515

516
        if c.graphCache != nil {
50✔
517
                for _, node := range nodes {
32✔
518
                        c.graphCache.RemoveNode(node)
7✔
519
                }
7✔
520
        }
521

522
        return nil
25✔
523
}
524

525
// FilterKnownChanIDs takes a set of channel IDs and return the subset of chan
526
// ID's that we don't know and are not known zombies of the passed set. In other
527
// words, we perform a set difference of our set of chan ID's and the ones
528
// passed in. This method can be used by callers to determine the set of
529
// channels another peer knows of that we don't.
530
func (c *ChannelGraph) FilterKnownChanIDs(chansInfo []ChannelUpdateInfo,
531
        isZombieChan func(time.Time, time.Time) bool) ([]uint64, error) {
126✔
532

126✔
533
        unknown, knownZombies, err := c.KVStore.FilterKnownChanIDs(chansInfo)
126✔
534
        if err != nil {
126✔
535
                return nil, err
×
536
        }
×
537

538
        for _, info := range knownZombies {
168✔
539
                // TODO(ziggie): Make sure that for the strict pruning case we
42✔
540
                // compare the pubkeys and whether the right timestamp is not
42✔
541
                // older than the `ChannelPruneExpiry`.
42✔
542
                //
42✔
543
                // NOTE: The timestamp data has no verification attached to it
42✔
544
                // in the `ReplyChannelRange` msg so we are trusting this data
42✔
545
                // at this point. However it is not critical because we are just
42✔
546
                // removing the channel from the db when the timestamps are more
42✔
547
                // recent. During the querying of the gossip msg verification
42✔
548
                // happens as usual. However we should start punishing peers
42✔
549
                // when they don't provide us honest data ?
42✔
550
                isStillZombie := isZombieChan(
42✔
551
                        info.Node1UpdateTimestamp, info.Node2UpdateTimestamp,
42✔
552
                )
42✔
553

42✔
554
                if isStillZombie {
68✔
555
                        continue
26✔
556
                }
557

558
                // If we have marked it as a zombie but the latest update
559
                // timestamps could bring it back from the dead, then we mark it
560
                // alive, and we let it be added to the set of IDs to query our
561
                // peer for.
562
                err := c.KVStore.MarkEdgeLive(
16✔
563
                        info.ShortChannelID.ToUint64(),
16✔
564
                )
16✔
565
                // Since there is a chance that the edge could have been marked
16✔
566
                // as "live" between the FilterKnownChanIDs call and the
16✔
567
                // MarkEdgeLive call, we ignore the error if the edge is already
16✔
568
                // marked as live.
16✔
569
                if err != nil && !errors.Is(err, ErrZombieEdgeNotFound) {
16✔
570
                        return nil, err
×
571
                }
×
572
        }
573

574
        return unknown, nil
126✔
575
}
576

577
// MarkEdgeZombie attempts to mark a channel identified by its channel ID as a
578
// zombie. This method is used on an ad-hoc basis, when channels need to be
579
// marked as zombies outside the normal pruning cycle.
580
func (c *ChannelGraph) MarkEdgeZombie(chanID uint64,
581
        pubKey1, pubKey2 [33]byte) error {
128✔
582

128✔
583
        c.cacheMu.Lock()
128✔
584
        defer c.cacheMu.Unlock()
128✔
585

128✔
586
        err := c.KVStore.MarkEdgeZombie(chanID, pubKey1, pubKey2)
128✔
587
        if err != nil {
128✔
588
                return err
×
589
        }
×
590

591
        if c.graphCache != nil {
256✔
592
                c.graphCache.RemoveChannel(pubKey1, pubKey2, chanID)
128✔
593
        }
128✔
594

595
        return nil
128✔
596
}
597

598
// UpdateEdgePolicy updates the edge routing policy for a single directed edge
599
// within the database for the referenced channel. The `flags` attribute within
600
// the ChannelEdgePolicy determines which of the directed edges are being
601
// updated. If the flag is 1, then the first node's information is being
602
// updated, otherwise it's the second node's information. The node ordering is
603
// determined by the lexicographical ordering of the identity public keys of the
604
// nodes on either side of the channel.
605
func (c *ChannelGraph) UpdateEdgePolicy(edge *models.ChannelEdgePolicy,
606
        op ...batch.SchedulerOption) error {
2,665✔
607

2,665✔
608
        c.cacheMu.Lock()
2,665✔
609
        defer c.cacheMu.Unlock()
2,665✔
610

2,665✔
611
        from, to, err := c.KVStore.UpdateEdgePolicy(edge, op...)
2,665✔
612
        if err != nil {
2,668✔
613
                return err
3✔
614
        }
3✔
615

616
        if c.graphCache != nil {
4,938✔
617
                var isUpdate1 bool
2,276✔
618
                if edge.ChannelFlags&lnwire.ChanUpdateDirection == 0 {
3,417✔
619
                        isUpdate1 = true
1,141✔
620
                }
1,141✔
621

622
                c.graphCache.UpdatePolicy(edge, from, to, isUpdate1)
2,276✔
623
        }
624

625
        select {
2,662✔
626
        case c.topologyUpdate <- edge:
2,662✔
627
        case <-c.quit:
×
628
                return ErrChanGraphShuttingDown
×
629
        }
630

631
        return nil
2,662✔
632
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc