• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 13035292482

29 Jan 2025 03:59PM UTC coverage: 49.3% (-9.5%) from 58.777%
13035292482

Pull #9456

github

mohamedawnallah
docs: update release-notes-0.19.0.md

In this commit, we warn users about the removal
of RPCs `SendToRoute`, `SendToRouteSync`, `SendPayment`,
and `SendPaymentSync` in the next release 0.20.
Pull Request #9456: lnrpc+docs: deprecate warning `SendToRoute`, `SendToRouteSync`, `SendPayment`, and `SendPaymentSync` in Release 0.19

100634 of 204126 relevant lines covered (49.3%)

1.54 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/channeldb/migration_01_to_11/graph.go
1
package migration_01_to_11
2

3
import (
4
        "bytes"
5
        "encoding/binary"
6
        "fmt"
7
        "image/color"
8
        "io"
9
        "net"
10
        "time"
11

12
        "github.com/btcsuite/btcd/btcec/v2"
13
        "github.com/btcsuite/btcd/btcutil"
14
        "github.com/btcsuite/btcd/chaincfg/chainhash"
15
        "github.com/btcsuite/btcd/wire"
16
        lnwire "github.com/lightningnetwork/lnd/channeldb/migration/lnwire21"
17
        "github.com/lightningnetwork/lnd/kvdb"
18
)
19

20
var (
21
        // nodeBucket is a bucket which houses all the vertices or nodes within
22
        // the channel graph. This bucket has a single-sub bucket which adds an
23
        // additional index from pubkey -> alias. Within the top-level of this
24
        // bucket, the key space maps a node's compressed public key to the
25
        // serialized information for that node. Additionally, there's a
26
        // special key "source" which stores the pubkey of the source node. The
27
        // source node is used as the starting point for all graph/queries and
28
        // traversals. The graph is formed as a star-graph with the source node
29
        // at the center.
30
        //
31
        // maps: pubKey -> nodeInfo
32
        // maps: source -> selfPubKey
33
        nodeBucket = []byte("graph-node")
34

35
        // nodeUpdateIndexBucket is a sub-bucket of the nodeBucket. This bucket
36
        // will be used to quickly look up the "freshness" of a node's last
37
        // update to the network. The bucket only contains keys, and no values,
38
        // it's mapping:
39
        //
40
        // maps: updateTime || nodeID -> nil
41
        nodeUpdateIndexBucket = []byte("graph-node-update-index")
42

43
        // sourceKey is a special key that resides within the nodeBucket. The
44
        // sourceKey maps a key to the public key of the "self node".
45
        sourceKey = []byte("source")
46

47
        // aliasIndexBucket is a sub-bucket that's nested within the main
48
        // nodeBucket. This bucket maps the public key of a node to its
49
        // current alias. This bucket is provided as it can be used within a
50
        // future UI layer to add an additional degree of confirmation.
51
        aliasIndexBucket = []byte("alias")
52

53
        // edgeBucket is a bucket which houses all of the edge or channel
54
        // information within the channel graph. This bucket essentially acts
55
        // as an adjacency list, which in conjunction with a range scan, can be
56
        // used to iterate over all the incoming and outgoing edges for a
57
        // particular node. Key in the bucket use a prefix scheme which leads
58
        // with the node's public key and sends with the compact edge ID.
59
        // For each chanID, there will be two entries within the bucket, as the
60
        // graph is directed: nodes may have different policies w.r.t to fees
61
        // for their respective directions.
62
        //
63
        // maps: pubKey || chanID -> channel edge policy for node
64
        edgeBucket = []byte("graph-edge")
65

66
        // unknownPolicy is represented as an empty slice. It is
67
        // used as the value in edgeBucket for unknown channel edge policies.
68
        // Unknown policies are still stored in the database to enable efficient
69
        // lookup of incoming channel edges.
70
        unknownPolicy = []byte{}
71

72
        // edgeIndexBucket is an index which can be used to iterate all edges
73
        // in the bucket, grouping them according to their in/out nodes.
74
        // Additionally, the items in this bucket also contain the complete
75
        // edge information for a channel. The edge information includes the
76
        // capacity of the channel, the nodes that made the channel, etc. This
77
        // bucket resides within the edgeBucket above. Creation of an edge
78
        // proceeds in two phases: first the edge is added to the edge index,
79
        // afterwards the edgeBucket can be updated with the latest details of
80
        // the edge as they are announced on the network.
81
        //
82
        // maps: chanID -> pubKey1 || pubKey2 || restofEdgeInfo
83
        edgeIndexBucket = []byte("edge-index")
84

85
        // edgeUpdateIndexBucket is a sub-bucket of the main edgeBucket. This
86
        // bucket contains an index which allows us to gauge the "freshness" of
87
        // a channel's last updates.
88
        //
89
        // maps: updateTime || chanID -> nil
90
        edgeUpdateIndexBucket = []byte("edge-update-index")
91

92
        // channelPointBucket maps a channel's full outpoint (txid:index) to
93
        // its short 8-byte channel ID. This bucket resides within the
94
        // edgeBucket above, and can be used to quickly remove an edge due to
95
        // the outpoint being spent, or to query for existence of a channel.
96
        //
97
        // maps: outPoint -> chanID
98
        channelPointBucket = []byte("chan-index")
99

100
        // zombieBucket is a sub-bucket of the main edgeBucket bucket
101
        // responsible for maintaining an index of zombie channels. Each entry
102
        // exists within the bucket as follows:
103
        //
104
        // maps: chanID -> pubKey1 || pubKey2
105
        //
106
        // The chanID represents the channel ID of the edge that is marked as a
107
        // zombie and is used as the key, which maps to the public keys of the
108
        // edge's participants.
109
        zombieBucket = []byte("zombie-index")
110

111
        // disabledEdgePolicyBucket is a sub-bucket of the main edgeBucket bucket
112
        // responsible for maintaining an index of disabled edge policies. Each
113
        // entry exists within the bucket as follows:
114
        //
115
        // maps: <chanID><direction> -> []byte{}
116
        //
117
        // The chanID represents the channel ID of the edge and the direction is
118
        // one byte representing the direction of the edge. The main purpose of
119
        // this index is to allow pruning disabled channels in a fast way without
120
        // the need to iterate all over the graph.
121
        disabledEdgePolicyBucket = []byte("disabled-edge-policy-index")
122

123
        // graphMetaBucket is a top-level bucket which stores various meta-deta
124
        // related to the on-disk channel graph. Data stored in this bucket
125
        // includes the block to which the graph has been synced to, the total
126
        // number of channels, etc.
127
        graphMetaBucket = []byte("graph-meta")
128

129
        // pruneLogBucket is a bucket within the graphMetaBucket that stores
130
        // a mapping from the block height to the hash for the blocks used to
131
        // prune the graph.
132
        // Once a new block is discovered, any channels that have been closed
133
        // (by spending the outpoint) can safely be removed from the graph, and
134
        // the block is added to the prune log. We need to keep such a log for
135
        // the case where a reorg happens, and we must "rewind" the state of the
136
        // graph by removing channels that were previously confirmed. In such a
137
        // case we'll remove all entries from the prune log with a block height
138
        // that no longer exists.
139
        pruneLogBucket = []byte("prune-log")
140
)
141

142
const (
143
        // MaxAllowedExtraOpaqueBytes is the largest amount of opaque bytes that
144
        // we'll permit to be written to disk. We limit this as otherwise, it
145
        // would be possible for a node to create a ton of updates and slowly
146
        // fill our disk, and also waste bandwidth due to relaying.
147
        MaxAllowedExtraOpaqueBytes = 10000
148
)
149

150
// ChannelGraph is a persistent, on-disk graph representation of the Lightning
151
// Network. This struct can be used to implement path finding algorithms on top
152
// of, and also to update a node's view based on information received from the
153
// p2p network. Internally, the graph is stored using a modified adjacency list
154
// representation with some added object interaction possible with each
155
// serialized edge/node. The graph is stored is directed, meaning that are two
156
// edges stored for each channel: an inbound/outbound edge for each node pair.
157
// Nodes, edges, and edge information can all be added to the graph
158
// independently. Edge removal results in the deletion of all edge information
159
// for that edge.
160
type ChannelGraph struct {
161
        db *DB
162
}
163

164
// newChannelGraph allocates a new ChannelGraph backed by a DB instance. The
165
// returned instance has its own unique reject cache and channel cache.
166
func newChannelGraph(db *DB, rejectCacheSize, chanCacheSize int) *ChannelGraph {
×
167
        return &ChannelGraph{
×
168
                db: db,
×
169
        }
×
170
}
×
171

172
// SourceNode returns the source node of the graph. The source node is treated
173
// as the center node within a star-graph. This method may be used to kick off
174
// a path finding algorithm in order to explore the reachability of another
175
// node based off the source node.
176
func (c *ChannelGraph) SourceNode() (*LightningNode, error) {
×
177
        var source *LightningNode
×
178
        err := kvdb.View(c.db, func(tx kvdb.RTx) error {
×
179
                // First grab the nodes bucket which stores the mapping from
×
180
                // pubKey to node information.
×
181
                nodes := tx.ReadBucket(nodeBucket)
×
182
                if nodes == nil {
×
183
                        return ErrGraphNotFound
×
184
                }
×
185

186
                node, err := c.sourceNode(nodes)
×
187
                if err != nil {
×
188
                        return err
×
189
                }
×
190
                source = node
×
191

×
192
                return nil
×
193
        }, func() {
×
194
                source = nil
×
195
        })
×
196
        if err != nil {
×
197
                return nil, err
×
198
        }
×
199

200
        return source, nil
×
201
}
202

203
// sourceNode uses an existing database transaction and returns the source node
204
// of the graph. The source node is treated as the center node within a
205
// star-graph. This method may be used to kick off a path finding algorithm in
206
// order to explore the reachability of another node based off the source node.
207
func (c *ChannelGraph) sourceNode(nodes kvdb.RBucket) (*LightningNode, error) {
×
208
        selfPub := nodes.Get(sourceKey)
×
209
        if selfPub == nil {
×
210
                return nil, ErrSourceNodeNotSet
×
211
        }
×
212

213
        // With the pubKey of the source node retrieved, we're able to
214
        // fetch the full node information.
215
        node, err := fetchLightningNode(nodes, selfPub)
×
216
        if err != nil {
×
217
                return nil, err
×
218
        }
×
219
        node.db = c.db
×
220

×
221
        return &node, nil
×
222
}
223

224
// SetSourceNode sets the source node within the graph database. The source
225
// node is to be used as the center of a star-graph within path finding
226
// algorithms.
227
func (c *ChannelGraph) SetSourceNode(node *LightningNode) error {
×
228
        nodePubBytes := node.PubKeyBytes[:]
×
229

×
230
        return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
×
231
                // First grab the nodes bucket which stores the mapping from
×
232
                // pubKey to node information.
×
233
                nodes, err := tx.CreateTopLevelBucket(nodeBucket)
×
234
                if err != nil {
×
235
                        return err
×
236
                }
×
237

238
                // Next we create the mapping from source to the targeted
239
                // public key.
240
                if err := nodes.Put(sourceKey, nodePubBytes); err != nil {
×
241
                        return err
×
242
                }
×
243

244
                // Finally, we commit the information of the lightning node
245
                // itself.
246
                return addLightningNode(tx, node)
×
247
        }, func() {})
×
248
}
249

250
func addLightningNode(tx kvdb.RwTx, node *LightningNode) error {
×
251
        nodes, err := tx.CreateTopLevelBucket(nodeBucket)
×
252
        if err != nil {
×
253
                return err
×
254
        }
×
255

256
        aliases, err := nodes.CreateBucketIfNotExists(aliasIndexBucket)
×
257
        if err != nil {
×
258
                return err
×
259
        }
×
260

261
        updateIndex, err := nodes.CreateBucketIfNotExists(
×
262
                nodeUpdateIndexBucket,
×
263
        )
×
264
        if err != nil {
×
265
                return err
×
266
        }
×
267

268
        return putLightningNode(nodes, aliases, updateIndex, node)
×
269
}
270

271
// updateEdgePolicy attempts to update an edge's policy within the relevant
272
// buckets using an existing database transaction. The returned boolean will be
273
// true if the updated policy belongs to node1, and false if the policy belonged
274
// to node2.
275
func updateEdgePolicy(tx kvdb.RwTx, edge *ChannelEdgePolicy) (bool, error) {
×
276
        edges, err := tx.CreateTopLevelBucket(edgeBucket)
×
277
        if err != nil {
×
278
                return false, ErrEdgeNotFound
×
279

×
280
        }
×
281
        edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
×
282
        if edgeIndex == nil {
×
283
                return false, ErrEdgeNotFound
×
284
        }
×
285
        nodes, err := tx.CreateTopLevelBucket(nodeBucket)
×
286
        if err != nil {
×
287
                return false, err
×
288
        }
×
289

290
        // Create the channelID key be converting the channel ID
291
        // integer into a byte slice.
292
        var chanID [8]byte
×
293
        byteOrder.PutUint64(chanID[:], edge.ChannelID)
×
294

×
295
        // With the channel ID, we then fetch the value storing the two
×
296
        // nodes which connect this channel edge.
×
297
        nodeInfo := edgeIndex.Get(chanID[:])
×
298
        if nodeInfo == nil {
×
299
                return false, ErrEdgeNotFound
×
300
        }
×
301

302
        // Depending on the flags value passed above, either the first
303
        // or second edge policy is being updated.
304
        var fromNode, toNode []byte
×
305
        var isUpdate1 bool
×
306
        if edge.ChannelFlags&lnwire.ChanUpdateDirection == 0 {
×
307
                fromNode = nodeInfo[:33]
×
308
                toNode = nodeInfo[33:66]
×
309
                isUpdate1 = true
×
310
        } else {
×
311
                fromNode = nodeInfo[33:66]
×
312
                toNode = nodeInfo[:33]
×
313
                isUpdate1 = false
×
314
        }
×
315

316
        // Finally, with the direction of the edge being updated
317
        // identified, we update the on-disk edge representation.
318
        err = putChanEdgePolicy(edges, nodes, edge, fromNode, toNode)
×
319
        if err != nil {
×
320
                return false, err
×
321
        }
×
322

323
        return isUpdate1, nil
×
324
}
325

326
// LightningNode represents an individual vertex/node within the channel graph.
327
// A node is connected to other nodes by one or more channel edges emanating
328
// from it. As the graph is directed, a node will also have an incoming edge
329
// attached to it for each outgoing edge.
330
type LightningNode struct {
331
        // PubKeyBytes is the raw bytes of the public key of the target node.
332
        PubKeyBytes [33]byte
333
        pubKey      *btcec.PublicKey
334

335
        // HaveNodeAnnouncement indicates whether we received a node
336
        // announcement for this particular node. If true, the remaining fields
337
        // will be set, if false only the PubKey is known for this node.
338
        HaveNodeAnnouncement bool
339

340
        // LastUpdate is the last time the vertex information for this node has
341
        // been updated.
342
        LastUpdate time.Time
343

344
        // Address is the TCP address this node is reachable over.
345
        Addresses []net.Addr
346

347
        // Color is the selected color for the node.
348
        Color color.RGBA
349

350
        // Alias is a nick-name for the node. The alias can be used to confirm
351
        // a node's identity or to serve as a short ID for an address book.
352
        Alias string
353

354
        // AuthSigBytes is the raw signature under the advertised public key
355
        // which serves to authenticate the attributes announced by this node.
356
        AuthSigBytes []byte
357

358
        // Features is the list of protocol features supported by this node.
359
        Features *lnwire.FeatureVector
360

361
        // ExtraOpaqueData is the set of data that was appended to this
362
        // message, some of which we may not actually know how to iterate or
363
        // parse. By holding onto this data, we ensure that we're able to
364
        // properly validate the set of signatures that cover these new fields,
365
        // and ensure we're able to make upgrades to the network in a forwards
366
        // compatible manner.
367
        ExtraOpaqueData []byte
368

369
        db *DB
370

371
        // TODO(roasbeef): discovery will need storage to keep it's last IP
372
        // address and re-announce if interface changes?
373

374
        // TODO(roasbeef): add update method and fetch?
375
}
376

377
// PubKey is the node's long-term identity public key. This key will be used to
378
// authenticated any advertisements/updates sent by the node.
379
//
380
// NOTE: By having this method to access an attribute, we ensure we only need
381
// to fully deserialize the pubkey if absolutely necessary.
382
func (l *LightningNode) PubKey() (*btcec.PublicKey, error) {
×
383
        if l.pubKey != nil {
×
384
                return l.pubKey, nil
×
385
        }
×
386

387
        key, err := btcec.ParsePubKey(l.PubKeyBytes[:])
×
388
        if err != nil {
×
389
                return nil, err
×
390
        }
×
391
        l.pubKey = key
×
392

×
393
        return key, nil
×
394
}
395

396
// ChannelEdgeInfo represents a fully authenticated channel along with all its
397
// unique attributes. Once an authenticated channel announcement has been
398
// processed on the network, then an instance of ChannelEdgeInfo encapsulating
399
// the channels attributes is stored. The other portions relevant to routing
400
// policy of a channel are stored within a ChannelEdgePolicy for each direction
401
// of the channel.
402
type ChannelEdgeInfo struct {
403
        // ChannelID is the unique channel ID for the channel. The first 3
404
        // bytes are the block height, the next 3 the index within the block,
405
        // and the last 2 bytes are the output index for the channel.
406
        ChannelID uint64
407

408
        // ChainHash is the hash that uniquely identifies the chain that this
409
        // channel was opened within.
410
        //
411
        // TODO(roasbeef): need to modify db keying for multi-chain
412
        //  * must add chain hash to prefix as well
413
        ChainHash chainhash.Hash
414

415
        // NodeKey1Bytes is the raw public key of the first node.
416
        NodeKey1Bytes [33]byte
417

418
        // NodeKey2Bytes is the raw public key of the first node.
419
        NodeKey2Bytes [33]byte
420

421
        // BitcoinKey1Bytes is the raw public key of the first node.
422
        BitcoinKey1Bytes [33]byte
423

424
        // BitcoinKey2Bytes is the raw public key of the first node.
425
        BitcoinKey2Bytes [33]byte
426

427
        // Features is an opaque byte slice that encodes the set of channel
428
        // specific features that this channel edge supports.
429
        Features []byte
430

431
        // AuthProof is the authentication proof for this channel. This proof
432
        // contains a set of signatures binding four identities, which attests
433
        // to the legitimacy of the advertised channel.
434
        AuthProof *ChannelAuthProof
435

436
        // ChannelPoint is the funding outpoint of the channel. This can be
437
        // used to uniquely identify the channel within the channel graph.
438
        ChannelPoint wire.OutPoint
439

440
        // Capacity is the total capacity of the channel, this is determined by
441
        // the value output in the outpoint that created this channel.
442
        Capacity btcutil.Amount
443

444
        // ExtraOpaqueData is the set of data that was appended to this
445
        // message, some of which we may not actually know how to iterate or
446
        // parse. By holding onto this data, we ensure that we're able to
447
        // properly validate the set of signatures that cover these new fields,
448
        // and ensure we're able to make upgrades to the network in a forwards
449
        // compatible manner.
450
        ExtraOpaqueData []byte
451
}
452

453
// ChannelAuthProof is the authentication proof (the signature portion) for a
454
// channel. Using the four signatures contained in the struct, and some
455
// auxiliary knowledge (the funding script, node identities, and outpoint) nodes
456
// on the network are able to validate the authenticity and existence of a
457
// channel. Each of these signatures signs the following digest: chanID ||
458
// nodeID1 || nodeID2 || bitcoinKey1|| bitcoinKey2 || 2-byte-feature-len ||
459
// features.
460
type ChannelAuthProof struct {
461
        // NodeSig1Bytes are the raw bytes of the first node signature encoded
462
        // in DER format.
463
        NodeSig1Bytes []byte
464

465
        // NodeSig2Bytes are the raw bytes of the second node signature
466
        // encoded in DER format.
467
        NodeSig2Bytes []byte
468

469
        // BitcoinSig1Bytes are the raw bytes of the first bitcoin signature
470
        // encoded in DER format.
471
        BitcoinSig1Bytes []byte
472

473
        // BitcoinSig2Bytes are the raw bytes of the second bitcoin signature
474
        // encoded in DER format.
475
        BitcoinSig2Bytes []byte
476
}
477

478
// IsEmpty check is the authentication proof is empty Proof is empty if at
479
// least one of the signatures are equal to nil.
480
func (c *ChannelAuthProof) IsEmpty() bool {
×
481
        return len(c.NodeSig1Bytes) == 0 ||
×
482
                len(c.NodeSig2Bytes) == 0 ||
×
483
                len(c.BitcoinSig1Bytes) == 0 ||
×
484
                len(c.BitcoinSig2Bytes) == 0
×
485
}
×
486

487
// ChannelEdgePolicy represents a *directed* edge within the channel graph. For
488
// each channel in the database, there are two distinct edges: one for each
489
// possible direction of travel along the channel. The edges themselves hold
490
// information concerning fees, and minimum time-lock information which is
491
// utilized during path finding.
492
type ChannelEdgePolicy struct {
493
        // SigBytes is the raw bytes of the signature of the channel edge
494
        // policy. We'll only parse these if the caller needs to access the
495
        // signature for validation purposes. Do not set SigBytes directly, but
496
        // use SetSigBytes instead to make sure that the cache is invalidated.
497
        SigBytes []byte
498

499
        // ChannelID is the unique channel ID for the channel. The first 3
500
        // bytes are the block height, the next 3 the index within the block,
501
        // and the last 2 bytes are the output index for the channel.
502
        ChannelID uint64
503

504
        // LastUpdate is the last time an authenticated edge for this channel
505
        // was received.
506
        LastUpdate time.Time
507

508
        // MessageFlags is a bitfield which indicates the presence of optional
509
        // fields (like max_htlc) in the policy.
510
        MessageFlags lnwire.ChanUpdateMsgFlags
511

512
        // ChannelFlags is a bitfield which signals the capabilities of the
513
        // channel as well as the directed edge this update applies to.
514
        ChannelFlags lnwire.ChanUpdateChanFlags
515

516
        // TimeLockDelta is the number of blocks this node will subtract from
517
        // the expiry of an incoming HTLC. This value expresses the time buffer
518
        // the node would like to HTLC exchanges.
519
        TimeLockDelta uint16
520

521
        // MinHTLC is the smallest value HTLC this node will accept, expressed
522
        // in millisatoshi.
523
        MinHTLC lnwire.MilliSatoshi
524

525
        // MaxHTLC is the largest value HTLC this node will accept, expressed
526
        // in millisatoshi.
527
        MaxHTLC lnwire.MilliSatoshi
528

529
        // FeeBaseMSat is the base HTLC fee that will be charged for forwarding
530
        // ANY HTLC, expressed in mSAT's.
531
        FeeBaseMSat lnwire.MilliSatoshi
532

533
        // FeeProportionalMillionths is the rate that the node will charge for
534
        // HTLCs for each millionth of a satoshi forwarded.
535
        FeeProportionalMillionths lnwire.MilliSatoshi
536

537
        // Node is the LightningNode that this directed edge leads to. Using
538
        // this pointer the channel graph can further be traversed.
539
        Node *LightningNode
540

541
        // ExtraOpaqueData is the set of data that was appended to this
542
        // message, some of which we may not actually know how to iterate or
543
        // parse. By holding onto this data, we ensure that we're able to
544
        // properly validate the set of signatures that cover these new fields,
545
        // and ensure we're able to make upgrades to the network in a forwards
546
        // compatible manner.
547
        ExtraOpaqueData []byte
548
}
549

550
// IsDisabled determines whether the edge has the disabled bit set.
551
func (c *ChannelEdgePolicy) IsDisabled() bool {
×
552
        return c.ChannelFlags&lnwire.ChanUpdateDisabled ==
×
553
                lnwire.ChanUpdateDisabled
×
554
}
×
555

556
func putLightningNode(nodeBucket kvdb.RwBucket, aliasBucket kvdb.RwBucket,
557
        updateIndex kvdb.RwBucket, node *LightningNode) error {
×
558

×
559
        var (
×
560
                scratch [16]byte
×
561
                b       bytes.Buffer
×
562
        )
×
563

×
564
        pub, err := node.PubKey()
×
565
        if err != nil {
×
566
                return err
×
567
        }
×
568
        nodePub := pub.SerializeCompressed()
×
569

×
570
        // If the node has the update time set, write it, else write 0.
×
571
        updateUnix := uint64(0)
×
572
        if node.LastUpdate.Unix() > 0 {
×
573
                updateUnix = uint64(node.LastUpdate.Unix())
×
574
        }
×
575

576
        byteOrder.PutUint64(scratch[:8], updateUnix)
×
577
        if _, err := b.Write(scratch[:8]); err != nil {
×
578
                return err
×
579
        }
×
580

581
        if _, err := b.Write(nodePub); err != nil {
×
582
                return err
×
583
        }
×
584

585
        // If we got a node announcement for this node, we will have the rest
586
        // of the data available. If not we don't have more data to write.
587
        if !node.HaveNodeAnnouncement {
×
588
                // Write HaveNodeAnnouncement=0.
×
589
                byteOrder.PutUint16(scratch[:2], 0)
×
590
                if _, err := b.Write(scratch[:2]); err != nil {
×
591
                        return err
×
592
                }
×
593

594
                return nodeBucket.Put(nodePub, b.Bytes())
×
595
        }
596

597
        // Write HaveNodeAnnouncement=1.
598
        byteOrder.PutUint16(scratch[:2], 1)
×
599
        if _, err := b.Write(scratch[:2]); err != nil {
×
600
                return err
×
601
        }
×
602

603
        if err := binary.Write(&b, byteOrder, node.Color.R); err != nil {
×
604
                return err
×
605
        }
×
606
        if err := binary.Write(&b, byteOrder, node.Color.G); err != nil {
×
607
                return err
×
608
        }
×
609
        if err := binary.Write(&b, byteOrder, node.Color.B); err != nil {
×
610
                return err
×
611
        }
×
612

613
        if err := wire.WriteVarString(&b, 0, node.Alias); err != nil {
×
614
                return err
×
615
        }
×
616

617
        if err := node.Features.Encode(&b); err != nil {
×
618
                return err
×
619
        }
×
620

621
        numAddresses := uint16(len(node.Addresses))
×
622
        byteOrder.PutUint16(scratch[:2], numAddresses)
×
623
        if _, err := b.Write(scratch[:2]); err != nil {
×
624
                return err
×
625
        }
×
626

627
        for _, address := range node.Addresses {
×
628
                if err := serializeAddr(&b, address); err != nil {
×
629
                        return err
×
630
                }
×
631
        }
632

633
        sigLen := len(node.AuthSigBytes)
×
634
        if sigLen > 80 {
×
635
                return fmt.Errorf("max sig len allowed is 80, had %v",
×
636
                        sigLen)
×
637
        }
×
638

639
        err = wire.WriteVarBytes(&b, 0, node.AuthSigBytes)
×
640
        if err != nil {
×
641
                return err
×
642
        }
×
643

644
        if len(node.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
×
645
                return ErrTooManyExtraOpaqueBytes(len(node.ExtraOpaqueData))
×
646
        }
×
647
        err = wire.WriteVarBytes(&b, 0, node.ExtraOpaqueData)
×
648
        if err != nil {
×
649
                return err
×
650
        }
×
651

652
        if err := aliasBucket.Put(nodePub, []byte(node.Alias)); err != nil {
×
653
                return err
×
654
        }
×
655

656
        // With the alias bucket updated, we'll now update the index that
657
        // tracks the time series of node updates.
658
        var indexKey [8 + 33]byte
×
659
        byteOrder.PutUint64(indexKey[:8], updateUnix)
×
660
        copy(indexKey[8:], nodePub)
×
661

×
662
        // If there was already an old index entry for this node, then we'll
×
663
        // delete the old one before we write the new entry.
×
664
        if nodeBytes := nodeBucket.Get(nodePub); nodeBytes != nil {
×
665
                // Extract out the old update time to we can reconstruct the
×
666
                // prior index key to delete it from the index.
×
667
                oldUpdateTime := nodeBytes[:8]
×
668

×
669
                var oldIndexKey [8 + 33]byte
×
670
                copy(oldIndexKey[:8], oldUpdateTime)
×
671
                copy(oldIndexKey[8:], nodePub)
×
672

×
673
                if err := updateIndex.Delete(oldIndexKey[:]); err != nil {
×
674
                        return err
×
675
                }
×
676
        }
677

678
        if err := updateIndex.Put(indexKey[:], nil); err != nil {
×
679
                return err
×
680
        }
×
681

682
        return nodeBucket.Put(nodePub, b.Bytes())
×
683
}
684

685
func fetchLightningNode(nodeBucket kvdb.RBucket,
686
        nodePub []byte) (LightningNode, error) {
×
687

×
688
        nodeBytes := nodeBucket.Get(nodePub)
×
689
        if nodeBytes == nil {
×
690
                return LightningNode{}, ErrGraphNodeNotFound
×
691
        }
×
692

693
        nodeReader := bytes.NewReader(nodeBytes)
×
694
        return deserializeLightningNode(nodeReader)
×
695
}
696

697
func deserializeLightningNode(r io.Reader) (LightningNode, error) {
×
698
        var (
×
699
                node    LightningNode
×
700
                scratch [8]byte
×
701
                err     error
×
702
        )
×
703

×
704
        if _, err := r.Read(scratch[:]); err != nil {
×
705
                return LightningNode{}, err
×
706
        }
×
707

708
        unix := int64(byteOrder.Uint64(scratch[:]))
×
709
        node.LastUpdate = time.Unix(unix, 0)
×
710

×
711
        if _, err := io.ReadFull(r, node.PubKeyBytes[:]); err != nil {
×
712
                return LightningNode{}, err
×
713
        }
×
714

715
        if _, err := r.Read(scratch[:2]); err != nil {
×
716
                return LightningNode{}, err
×
717
        }
×
718

719
        hasNodeAnn := byteOrder.Uint16(scratch[:2])
×
720
        if hasNodeAnn == 1 {
×
721
                node.HaveNodeAnnouncement = true
×
722
        } else {
×
723
                node.HaveNodeAnnouncement = false
×
724
        }
×
725

726
        // The rest of the data is optional, and will only be there if we got a node
727
        // announcement for this node.
728
        if !node.HaveNodeAnnouncement {
×
729
                return node, nil
×
730
        }
×
731

732
        // We did get a node announcement for this node, so we'll have the rest
733
        // of the data available.
734
        if err := binary.Read(r, byteOrder, &node.Color.R); err != nil {
×
735
                return LightningNode{}, err
×
736
        }
×
737
        if err := binary.Read(r, byteOrder, &node.Color.G); err != nil {
×
738
                return LightningNode{}, err
×
739
        }
×
740
        if err := binary.Read(r, byteOrder, &node.Color.B); err != nil {
×
741
                return LightningNode{}, err
×
742
        }
×
743

744
        node.Alias, err = wire.ReadVarString(r, 0)
×
745
        if err != nil {
×
746
                return LightningNode{}, err
×
747
        }
×
748

749
        fv := lnwire.NewFeatureVector(nil, nil)
×
750
        err = fv.Decode(r)
×
751
        if err != nil {
×
752
                return LightningNode{}, err
×
753
        }
×
754
        node.Features = fv
×
755

×
756
        if _, err := r.Read(scratch[:2]); err != nil {
×
757
                return LightningNode{}, err
×
758
        }
×
759
        numAddresses := int(byteOrder.Uint16(scratch[:2]))
×
760

×
761
        var addresses []net.Addr
×
762
        for i := 0; i < numAddresses; i++ {
×
763
                address, err := deserializeAddr(r)
×
764
                if err != nil {
×
765
                        return LightningNode{}, err
×
766
                }
×
767
                addresses = append(addresses, address)
×
768
        }
769
        node.Addresses = addresses
×
770

×
771
        node.AuthSigBytes, err = wire.ReadVarBytes(r, 0, 80, "sig")
×
772
        if err != nil {
×
773
                return LightningNode{}, err
×
774
        }
×
775

776
        // We'll try and see if there are any opaque bytes left, if not, then
777
        // we'll ignore the EOF error and return the node as is.
778
        node.ExtraOpaqueData, err = wire.ReadVarBytes(
×
779
                r, 0, MaxAllowedExtraOpaqueBytes, "blob",
×
780
        )
×
781
        switch {
×
782
        case err == io.ErrUnexpectedEOF:
×
783
        case err == io.EOF:
×
784
        case err != nil:
×
785
                return LightningNode{}, err
×
786
        }
787

788
        return node, nil
×
789
}
790

791
func deserializeChanEdgeInfo(r io.Reader) (ChannelEdgeInfo, error) {
×
792
        var (
×
793
                err      error
×
794
                edgeInfo ChannelEdgeInfo
×
795
        )
×
796

×
797
        if _, err := io.ReadFull(r, edgeInfo.NodeKey1Bytes[:]); err != nil {
×
798
                return ChannelEdgeInfo{}, err
×
799
        }
×
800
        if _, err := io.ReadFull(r, edgeInfo.NodeKey2Bytes[:]); err != nil {
×
801
                return ChannelEdgeInfo{}, err
×
802
        }
×
803
        if _, err := io.ReadFull(r, edgeInfo.BitcoinKey1Bytes[:]); err != nil {
×
804
                return ChannelEdgeInfo{}, err
×
805
        }
×
806
        if _, err := io.ReadFull(r, edgeInfo.BitcoinKey2Bytes[:]); err != nil {
×
807
                return ChannelEdgeInfo{}, err
×
808
        }
×
809

810
        edgeInfo.Features, err = wire.ReadVarBytes(r, 0, 900, "features")
×
811
        if err != nil {
×
812
                return ChannelEdgeInfo{}, err
×
813
        }
×
814

815
        proof := &ChannelAuthProof{}
×
816

×
817
        proof.NodeSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
×
818
        if err != nil {
×
819
                return ChannelEdgeInfo{}, err
×
820
        }
×
821
        proof.NodeSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
×
822
        if err != nil {
×
823
                return ChannelEdgeInfo{}, err
×
824
        }
×
825
        proof.BitcoinSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
×
826
        if err != nil {
×
827
                return ChannelEdgeInfo{}, err
×
828
        }
×
829
        proof.BitcoinSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
×
830
        if err != nil {
×
831
                return ChannelEdgeInfo{}, err
×
832
        }
×
833

834
        if !proof.IsEmpty() {
×
835
                edgeInfo.AuthProof = proof
×
836
        }
×
837

838
        edgeInfo.ChannelPoint = wire.OutPoint{}
×
839
        if err := ReadOutpoint(r, &edgeInfo.ChannelPoint); err != nil {
×
840
                return ChannelEdgeInfo{}, err
×
841
        }
×
842
        if err := binary.Read(r, byteOrder, &edgeInfo.Capacity); err != nil {
×
843
                return ChannelEdgeInfo{}, err
×
844
        }
×
845
        if err := binary.Read(r, byteOrder, &edgeInfo.ChannelID); err != nil {
×
846
                return ChannelEdgeInfo{}, err
×
847
        }
×
848

849
        if _, err := io.ReadFull(r, edgeInfo.ChainHash[:]); err != nil {
×
850
                return ChannelEdgeInfo{}, err
×
851
        }
×
852

853
        // We'll try and see if there are any opaque bytes left, if not, then
854
        // we'll ignore the EOF error and return the edge as is.
855
        edgeInfo.ExtraOpaqueData, err = wire.ReadVarBytes(
×
856
                r, 0, MaxAllowedExtraOpaqueBytes, "blob",
×
857
        )
×
858
        switch {
×
859
        case err == io.ErrUnexpectedEOF:
×
860
        case err == io.EOF:
×
861
        case err != nil:
×
862
                return ChannelEdgeInfo{}, err
×
863
        }
864

865
        return edgeInfo, nil
×
866
}
867

868
func putChanEdgePolicy(edges, nodes kvdb.RwBucket, edge *ChannelEdgePolicy,
869
        from, to []byte) error {
×
870

×
871
        var edgeKey [33 + 8]byte
×
872
        copy(edgeKey[:], from)
×
873
        byteOrder.PutUint64(edgeKey[33:], edge.ChannelID)
×
874

×
875
        var b bytes.Buffer
×
876
        if err := serializeChanEdgePolicy(&b, edge, to); err != nil {
×
877
                return err
×
878
        }
×
879

880
        // Before we write out the new edge, we'll create a new entry in the
881
        // update index in order to keep it fresh.
882
        updateUnix := uint64(edge.LastUpdate.Unix())
×
883
        var indexKey [8 + 8]byte
×
884
        byteOrder.PutUint64(indexKey[:8], updateUnix)
×
885
        byteOrder.PutUint64(indexKey[8:], edge.ChannelID)
×
886

×
887
        updateIndex, err := edges.CreateBucketIfNotExists(edgeUpdateIndexBucket)
×
888
        if err != nil {
×
889
                return err
×
890
        }
×
891

892
        // If there was already an entry for this edge, then we'll need to
893
        // delete the old one to ensure we don't leave around any after-images.
894
        // An unknown policy value does not have a update time recorded, so
895
        // it also does not need to be removed.
896
        if edgeBytes := edges.Get(edgeKey[:]); edgeBytes != nil &&
×
897
                !bytes.Equal(edgeBytes[:], unknownPolicy) {
×
898

×
899
                // In order to delete the old entry, we'll need to obtain the
×
900
                // *prior* update time in order to delete it. To do this, we'll
×
901
                // need to deserialize the existing policy within the database
×
902
                // (now outdated by the new one), and delete its corresponding
×
903
                // entry within the update index. We'll ignore any
×
904
                // ErrEdgePolicyOptionalFieldNotFound error, as we only need
×
905
                // the channel ID and update time to delete the entry.
×
906
                // TODO(halseth): get rid of these invalid policies in a
×
907
                // migration.
×
908
                oldEdgePolicy, err := deserializeChanEdgePolicy(
×
909
                        bytes.NewReader(edgeBytes), nodes,
×
910
                )
×
911
                if err != nil && err != ErrEdgePolicyOptionalFieldNotFound {
×
912
                        return err
×
913
                }
×
914

915
                oldUpdateTime := uint64(oldEdgePolicy.LastUpdate.Unix())
×
916

×
917
                var oldIndexKey [8 + 8]byte
×
918
                byteOrder.PutUint64(oldIndexKey[:8], oldUpdateTime)
×
919
                byteOrder.PutUint64(oldIndexKey[8:], edge.ChannelID)
×
920

×
921
                if err := updateIndex.Delete(oldIndexKey[:]); err != nil {
×
922
                        return err
×
923
                }
×
924
        }
925

926
        if err := updateIndex.Put(indexKey[:], nil); err != nil {
×
927
                return err
×
928
        }
×
929

930
        updateEdgePolicyDisabledIndex(
×
931
                edges, edge.ChannelID,
×
932
                edge.ChannelFlags&lnwire.ChanUpdateDirection > 0,
×
933
                edge.IsDisabled(),
×
934
        )
×
935

×
936
        return edges.Put(edgeKey[:], b.Bytes()[:])
×
937
}
938

939
// updateEdgePolicyDisabledIndex is used to update the disabledEdgePolicyIndex
940
// bucket by either add a new disabled ChannelEdgePolicy or remove an existing
941
// one.
942
// The direction represents the direction of the edge and disabled is used for
943
// deciding whether to remove or add an entry to the bucket.
944
// In general a channel is disabled if two entries for the same chanID exist
945
// in this bucket.
946
// Maintaining the bucket this way allows a fast retrieval of disabled
947
// channels, for example when prune is needed.
948
func updateEdgePolicyDisabledIndex(edges kvdb.RwBucket, chanID uint64,
949
        direction bool, disabled bool) error {
×
950

×
951
        var disabledEdgeKey [8 + 1]byte
×
952
        byteOrder.PutUint64(disabledEdgeKey[0:], chanID)
×
953
        if direction {
×
954
                disabledEdgeKey[8] = 1
×
955
        }
×
956

957
        disabledEdgePolicyIndex, err := edges.CreateBucketIfNotExists(
×
958
                disabledEdgePolicyBucket,
×
959
        )
×
960
        if err != nil {
×
961
                return err
×
962
        }
×
963

964
        if disabled {
×
965
                return disabledEdgePolicyIndex.Put(disabledEdgeKey[:], []byte{})
×
966
        }
×
967

968
        return disabledEdgePolicyIndex.Delete(disabledEdgeKey[:])
×
969
}
970

971
// putChanEdgePolicyUnknown marks the edge policy as unknown
972
// in the edges bucket.
973
func putChanEdgePolicyUnknown(edges kvdb.RwBucket, channelID uint64,
974
        from []byte) error {
×
975

×
976
        var edgeKey [33 + 8]byte
×
977
        copy(edgeKey[:], from)
×
978
        byteOrder.PutUint64(edgeKey[33:], channelID)
×
979

×
980
        if edges.Get(edgeKey[:]) != nil {
×
981
                return fmt.Errorf("Cannot write unknown policy for channel %v "+
×
982
                        " when there is already a policy present", channelID)
×
983
        }
×
984

985
        return edges.Put(edgeKey[:], unknownPolicy)
×
986
}
987

988
func fetchChanEdgePolicy(edges kvdb.RBucket, chanID []byte,
989
        nodePub []byte, nodes kvdb.RBucket) (*ChannelEdgePolicy, error) {
×
990

×
991
        var edgeKey [33 + 8]byte
×
992
        copy(edgeKey[:], nodePub)
×
993
        copy(edgeKey[33:], chanID[:])
×
994

×
995
        edgeBytes := edges.Get(edgeKey[:])
×
996
        if edgeBytes == nil {
×
997
                return nil, ErrEdgeNotFound
×
998
        }
×
999

1000
        // No need to deserialize unknown policy.
1001
        if bytes.Equal(edgeBytes[:], unknownPolicy) {
×
1002
                return nil, nil
×
1003
        }
×
1004

1005
        edgeReader := bytes.NewReader(edgeBytes)
×
1006

×
1007
        ep, err := deserializeChanEdgePolicy(edgeReader, nodes)
×
1008
        switch {
×
1009
        // If the db policy was missing an expected optional field, we return
1010
        // nil as if the policy was unknown.
1011
        case err == ErrEdgePolicyOptionalFieldNotFound:
×
1012
                return nil, nil
×
1013

1014
        case err != nil:
×
1015
                return nil, err
×
1016
        }
1017

1018
        return ep, nil
×
1019
}
1020

1021
func serializeChanEdgePolicy(w io.Writer, edge *ChannelEdgePolicy,
1022
        to []byte) error {
×
1023

×
1024
        err := wire.WriteVarBytes(w, 0, edge.SigBytes)
×
1025
        if err != nil {
×
1026
                return err
×
1027
        }
×
1028

1029
        if err := binary.Write(w, byteOrder, edge.ChannelID); err != nil {
×
1030
                return err
×
1031
        }
×
1032

1033
        var scratch [8]byte
×
1034
        updateUnix := uint64(edge.LastUpdate.Unix())
×
1035
        byteOrder.PutUint64(scratch[:], updateUnix)
×
1036
        if _, err := w.Write(scratch[:]); err != nil {
×
1037
                return err
×
1038
        }
×
1039

1040
        if err := binary.Write(w, byteOrder, edge.MessageFlags); err != nil {
×
1041
                return err
×
1042
        }
×
1043
        if err := binary.Write(w, byteOrder, edge.ChannelFlags); err != nil {
×
1044
                return err
×
1045
        }
×
1046
        if err := binary.Write(w, byteOrder, edge.TimeLockDelta); err != nil {
×
1047
                return err
×
1048
        }
×
1049
        if err := binary.Write(w, byteOrder, uint64(edge.MinHTLC)); err != nil {
×
1050
                return err
×
1051
        }
×
1052
        if err := binary.Write(w, byteOrder, uint64(edge.FeeBaseMSat)); err != nil {
×
1053
                return err
×
1054
        }
×
1055
        if err := binary.Write(w, byteOrder, uint64(edge.FeeProportionalMillionths)); err != nil {
×
1056
                return err
×
1057
        }
×
1058

1059
        if _, err := w.Write(to); err != nil {
×
1060
                return err
×
1061
        }
×
1062

1063
        // If the max_htlc field is present, we write it. To be compatible with
1064
        // older versions that wasn't aware of this field, we write it as part
1065
        // of the opaque data.
1066
        // TODO(halseth): clean up when moving to TLV.
1067
        var opaqueBuf bytes.Buffer
×
1068
        if edge.MessageFlags.HasMaxHtlc() {
×
1069
                err := binary.Write(&opaqueBuf, byteOrder, uint64(edge.MaxHTLC))
×
1070
                if err != nil {
×
1071
                        return err
×
1072
                }
×
1073
        }
1074

1075
        if len(edge.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
×
1076
                return ErrTooManyExtraOpaqueBytes(len(edge.ExtraOpaqueData))
×
1077
        }
×
1078
        if _, err := opaqueBuf.Write(edge.ExtraOpaqueData); err != nil {
×
1079
                return err
×
1080
        }
×
1081

1082
        if err := wire.WriteVarBytes(w, 0, opaqueBuf.Bytes()); err != nil {
×
1083
                return err
×
1084
        }
×
1085
        return nil
×
1086
}
1087

1088
func deserializeChanEdgePolicy(r io.Reader,
1089
        nodes kvdb.RBucket) (*ChannelEdgePolicy, error) {
×
1090

×
1091
        edge := &ChannelEdgePolicy{}
×
1092

×
1093
        var err error
×
1094
        edge.SigBytes, err = wire.ReadVarBytes(r, 0, 80, "sig")
×
1095
        if err != nil {
×
1096
                return nil, err
×
1097
        }
×
1098

1099
        if err := binary.Read(r, byteOrder, &edge.ChannelID); err != nil {
×
1100
                return nil, err
×
1101
        }
×
1102

1103
        var scratch [8]byte
×
1104
        if _, err := r.Read(scratch[:]); err != nil {
×
1105
                return nil, err
×
1106
        }
×
1107
        unix := int64(byteOrder.Uint64(scratch[:]))
×
1108
        edge.LastUpdate = time.Unix(unix, 0)
×
1109

×
1110
        if err := binary.Read(r, byteOrder, &edge.MessageFlags); err != nil {
×
1111
                return nil, err
×
1112
        }
×
1113
        if err := binary.Read(r, byteOrder, &edge.ChannelFlags); err != nil {
×
1114
                return nil, err
×
1115
        }
×
1116
        if err := binary.Read(r, byteOrder, &edge.TimeLockDelta); err != nil {
×
1117
                return nil, err
×
1118
        }
×
1119

1120
        var n uint64
×
1121
        if err := binary.Read(r, byteOrder, &n); err != nil {
×
1122
                return nil, err
×
1123
        }
×
1124
        edge.MinHTLC = lnwire.MilliSatoshi(n)
×
1125

×
1126
        if err := binary.Read(r, byteOrder, &n); err != nil {
×
1127
                return nil, err
×
1128
        }
×
1129
        edge.FeeBaseMSat = lnwire.MilliSatoshi(n)
×
1130

×
1131
        if err := binary.Read(r, byteOrder, &n); err != nil {
×
1132
                return nil, err
×
1133
        }
×
1134
        edge.FeeProportionalMillionths = lnwire.MilliSatoshi(n)
×
1135

×
1136
        var pub [33]byte
×
1137
        if _, err := r.Read(pub[:]); err != nil {
×
1138
                return nil, err
×
1139
        }
×
1140

1141
        node, err := fetchLightningNode(nodes, pub[:])
×
1142
        if err != nil {
×
1143
                return nil, fmt.Errorf("unable to fetch node: %x, %w",
×
1144
                        pub[:], err)
×
1145
        }
×
1146
        edge.Node = &node
×
1147

×
1148
        // We'll try and see if there are any opaque bytes left, if not, then
×
1149
        // we'll ignore the EOF error and return the edge as is.
×
1150
        edge.ExtraOpaqueData, err = wire.ReadVarBytes(
×
1151
                r, 0, MaxAllowedExtraOpaqueBytes, "blob",
×
1152
        )
×
1153
        switch {
×
1154
        case err == io.ErrUnexpectedEOF:
×
1155
        case err == io.EOF:
×
1156
        case err != nil:
×
1157
                return nil, err
×
1158
        }
1159

1160
        // See if optional fields are present.
1161
        if edge.MessageFlags.HasMaxHtlc() {
×
1162
                // The max_htlc field should be at the beginning of the opaque
×
1163
                // bytes.
×
1164
                opq := edge.ExtraOpaqueData
×
1165

×
1166
                // If the max_htlc field is not present, it might be old data
×
1167
                // stored before this field was validated. We'll return the
×
1168
                // edge along with an error.
×
1169
                if len(opq) < 8 {
×
1170
                        return edge, ErrEdgePolicyOptionalFieldNotFound
×
1171
                }
×
1172

1173
                maxHtlc := byteOrder.Uint64(opq[:8])
×
1174
                edge.MaxHTLC = lnwire.MilliSatoshi(maxHtlc)
×
1175

×
1176
                // Exclude the parsed field from the rest of the opaque data.
×
1177
                edge.ExtraOpaqueData = opq[8:]
×
1178
        }
1179

1180
        return edge, nil
×
1181
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc