• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 13211764208

08 Feb 2025 03:08AM UTC coverage: 49.288% (-9.5%) from 58.815%
13211764208

Pull #9489

github

calvinrzachman
itest: verify switchrpc server enforces send then track

We prevent the rpc server from allowing onion dispatches for
attempt IDs which have already been tracked by rpc clients.

This helps protect the client from leaking a duplicate onion
attempt. NOTE: This is not the only method for solving this
issue! The issue could be addressed via careful client side
programming which accounts for the uncertainty and async
nature of dispatching onions to a remote process via RPC.
This would require some lnd ChannelRouter changes for how
we intend to use these RPCs though.
Pull Request #9489: multi: add BuildOnion, SendOnion, and TrackOnion RPCs

474 of 990 new or added lines in 11 files covered. (47.88%)

27321 existing lines in 435 files now uncovered.

101192 of 205306 relevant lines covered (49.29%)

1.54 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

54.2
/channeldb/payments.go
1
package channeldb
2

3
import (
4
        "bytes"
5
        "encoding/binary"
6
        "errors"
7
        "fmt"
8
        "io"
9
        "sort"
10
        "time"
11

12
        "github.com/btcsuite/btcd/btcec/v2"
13
        "github.com/btcsuite/btcd/wire"
14
        "github.com/lightningnetwork/lnd/kvdb"
15
        "github.com/lightningnetwork/lnd/lntypes"
16
        "github.com/lightningnetwork/lnd/lnwire"
17
        "github.com/lightningnetwork/lnd/record"
18
        "github.com/lightningnetwork/lnd/routing/route"
19
        "github.com/lightningnetwork/lnd/tlv"
20
)
21

22
var (
23
        // paymentsRootBucket is the name of the top-level bucket within the
24
        // database that stores all data related to payments. Within this
25
        // bucket, each payment hash its own sub-bucket keyed by its payment
26
        // hash.
27
        //
28
        // Bucket hierarchy:
29
        //
30
        // root-bucket
31
        //      |
32
        //      |-- <paymenthash>
33
        //      |        |--sequence-key: <sequence number>
34
        //      |        |--creation-info-key: <creation info>
35
        //      |        |--fail-info-key: <(optional) fail info>
36
        //      |        |
37
        //      |        |--payment-htlcs-bucket (shard-bucket)
38
        //      |        |        |
39
        //      |        |        |-- ai<htlc attempt ID>: <htlc attempt info>
40
        //      |        |        |-- si<htlc attempt ID>: <(optional) settle info>
41
        //      |        |        |-- fi<htlc attempt ID>: <(optional) fail info>
42
        //      |        |        |
43
        //      |        |       ...
44
        //      |        |
45
        //      |        |
46
        //      |        |--duplicate-bucket (only for old, completed payments)
47
        //      |                 |
48
        //      |                 |-- <seq-num>
49
        //      |                 |       |--sequence-key: <sequence number>
50
        //      |                 |       |--creation-info-key: <creation info>
51
        //      |                 |       |--ai: <attempt info>
52
        //      |                 |       |--si: <settle info>
53
        //      |                 |       |--fi: <fail info>
54
        //      |                 |
55
        //      |                 |-- <seq-num>
56
        //      |                 |       |
57
        //      |                ...     ...
58
        //      |
59
        //      |-- <paymenthash>
60
        //      |        |
61
        //      |       ...
62
        //     ...
63
        //
64
        paymentsRootBucket = []byte("payments-root-bucket")
65

66
        // paymentSequenceKey is a key used in the payment's sub-bucket to
67
        // store the sequence number of the payment.
68
        paymentSequenceKey = []byte("payment-sequence-key")
69

70
        // paymentCreationInfoKey is a key used in the payment's sub-bucket to
71
        // store the creation info of the payment.
72
        paymentCreationInfoKey = []byte("payment-creation-info")
73

74
        // paymentHtlcsBucket is a bucket where we'll store the information
75
        // about the HTLCs that were attempted for a payment.
76
        paymentHtlcsBucket = []byte("payment-htlcs-bucket")
77

78
        // htlcAttemptInfoKey is the key used as the prefix of an HTLC attempt
79
        // to store the info about the attempt that was done for the HTLC in
80
        // question. The HTLC attempt ID is concatenated at the end.
81
        htlcAttemptInfoKey = []byte("ai")
82

83
        // htlcSettleInfoKey is the key used as the prefix of an HTLC attempt
84
        // settle info, if any. The HTLC attempt ID is concatenated at the end.
85
        htlcSettleInfoKey = []byte("si")
86

87
        // htlcFailInfoKey is the key used as the prefix of an HTLC attempt
88
        // failure information, if any.The  HTLC attempt ID is concatenated at
89
        // the end.
90
        htlcFailInfoKey = []byte("fi")
91

92
        // paymentFailInfoKey is a key used in the payment's sub-bucket to
93
        // store information about the reason a payment failed.
94
        paymentFailInfoKey = []byte("payment-fail-info")
95

96
        // paymentsIndexBucket is the name of the top-level bucket within the
97
        // database that stores an index of payment sequence numbers to its
98
        // payment hash.
99
        // payments-sequence-index-bucket
100
        //         |--<sequence-number>: <payment hash>
101
        //         |--...
102
        //         |--<sequence-number>: <payment hash>
103
        paymentsIndexBucket = []byte("payments-index-bucket")
104
)
105

106
var (
107
        // ErrNoSequenceNumber is returned if we look up a payment which does
108
        // not have a sequence number.
109
        ErrNoSequenceNumber = errors.New("sequence number not found")
110

111
        // ErrDuplicateNotFound is returned when we lookup a payment by its
112
        // index and cannot find a payment with a matching sequence number.
113
        ErrDuplicateNotFound = errors.New("duplicate payment not found")
114

115
        // ErrNoDuplicateBucket is returned when we expect to find duplicates
116
        // when looking up a payment from its index, but the payment does not
117
        // have any.
118
        ErrNoDuplicateBucket = errors.New("expected duplicate bucket")
119

120
        // ErrNoDuplicateNestedBucket is returned if we do not find duplicate
121
        // payments in their own sub-bucket.
122
        ErrNoDuplicateNestedBucket = errors.New("nested duplicate bucket not " +
123
                "found")
124
)
125

126
// FailureReason encodes the reason a payment ultimately failed.
127
type FailureReason byte
128

129
const (
130
        // FailureReasonTimeout indicates that the payment did timeout before a
131
        // successful payment attempt was made.
132
        FailureReasonTimeout FailureReason = 0
133

134
        // FailureReasonNoRoute indicates no successful route to the
135
        // destination was found during path finding.
136
        FailureReasonNoRoute FailureReason = 1
137

138
        // FailureReasonError indicates that an unexpected error happened during
139
        // payment.
140
        FailureReasonError FailureReason = 2
141

142
        // FailureReasonPaymentDetails indicates that either the hash is unknown
143
        // or the final cltv delta or amount is incorrect.
144
        FailureReasonPaymentDetails FailureReason = 3
145

146
        // FailureReasonInsufficientBalance indicates that we didn't have enough
147
        // balance to complete the payment.
148
        FailureReasonInsufficientBalance FailureReason = 4
149

150
        // FailureReasonCanceled indicates that the payment was canceled by the
151
        // user.
152
        FailureReasonCanceled FailureReason = 5
153

154
        // TODO(joostjager): Add failure reasons for:
155
        // LocalLiquidityInsufficient, RemoteCapacityInsufficient.
156
)
157

158
// Error returns a human-readable error string for the FailureReason.
159
func (r FailureReason) Error() string {
3✔
160
        return r.String()
3✔
161
}
3✔
162

163
// String returns a human-readable FailureReason.
164
func (r FailureReason) String() string {
3✔
165
        switch r {
3✔
UNCOV
166
        case FailureReasonTimeout:
×
UNCOV
167
                return "timeout"
×
168
        case FailureReasonNoRoute:
3✔
169
                return "no_route"
3✔
UNCOV
170
        case FailureReasonError:
×
UNCOV
171
                return "error"
×
172
        case FailureReasonPaymentDetails:
3✔
173
                return "incorrect_payment_details"
3✔
174
        case FailureReasonInsufficientBalance:
3✔
175
                return "insufficient_balance"
3✔
UNCOV
176
        case FailureReasonCanceled:
×
UNCOV
177
                return "canceled"
×
178
        }
179

180
        return "unknown"
×
181
}
182

183
// PaymentCreationInfo is the information necessary to have ready when
184
// initiating a payment, moving it into state InFlight.
185
type PaymentCreationInfo struct {
186
        // PaymentIdentifier is the hash this payment is paying to in case of
187
        // non-AMP payments, and the SetID for AMP payments.
188
        PaymentIdentifier lntypes.Hash
189

190
        // Value is the amount we are paying.
191
        Value lnwire.MilliSatoshi
192

193
        // CreationTime is the time when this payment was initiated.
194
        CreationTime time.Time
195

196
        // PaymentRequest is the full payment request, if any.
197
        PaymentRequest []byte
198

199
        // FirstHopCustomRecords are the TLV records that are to be sent to the
200
        // first hop of this payment. These records will be transmitted via the
201
        // wire message only and therefore do not affect the onion payload size.
202
        FirstHopCustomRecords lnwire.CustomRecords
203
}
204

205
// htlcBucketKey creates a composite key from prefix and id where the result is
206
// simply the two concatenated.
207
func htlcBucketKey(prefix, id []byte) []byte {
3✔
208
        key := make([]byte, len(prefix)+len(id))
3✔
209
        copy(key, prefix)
3✔
210
        copy(key[len(prefix):], id)
3✔
211
        return key
3✔
212
}
3✔
213

214
// FetchPayments returns all sent payments found in the DB.
215
//
216
// nolint: dupl
UNCOV
217
func (d *DB) FetchPayments() ([]*MPPayment, error) {
×
UNCOV
218
        var payments []*MPPayment
×
UNCOV
219

×
UNCOV
220
        err := kvdb.View(d, func(tx kvdb.RTx) error {
×
UNCOV
221
                paymentsBucket := tx.ReadBucket(paymentsRootBucket)
×
UNCOV
222
                if paymentsBucket == nil {
×
223
                        return nil
×
224
                }
×
225

UNCOV
226
                return paymentsBucket.ForEach(func(k, v []byte) error {
×
UNCOV
227
                        bucket := paymentsBucket.NestedReadBucket(k)
×
UNCOV
228
                        if bucket == nil {
×
229
                                // We only expect sub-buckets to be found in
×
230
                                // this top-level bucket.
×
231
                                return fmt.Errorf("non bucket element in " +
×
232
                                        "payments bucket")
×
233
                        }
×
234

UNCOV
235
                        p, err := fetchPayment(bucket)
×
UNCOV
236
                        if err != nil {
×
237
                                return err
×
238
                        }
×
239

UNCOV
240
                        payments = append(payments, p)
×
UNCOV
241

×
UNCOV
242
                        // For older versions of lnd, duplicate payments to a
×
UNCOV
243
                        // payment has was possible. These will be found in a
×
UNCOV
244
                        // sub-bucket indexed by their sequence number if
×
UNCOV
245
                        // available.
×
UNCOV
246
                        duplicatePayments, err := fetchDuplicatePayments(bucket)
×
UNCOV
247
                        if err != nil {
×
248
                                return err
×
249
                        }
×
250

UNCOV
251
                        payments = append(payments, duplicatePayments...)
×
UNCOV
252
                        return nil
×
253
                })
UNCOV
254
        }, func() {
×
UNCOV
255
                payments = nil
×
UNCOV
256
        })
×
UNCOV
257
        if err != nil {
×
258
                return nil, err
×
259
        }
×
260

261
        // Before returning, sort the payments by their sequence number.
UNCOV
262
        sort.Slice(payments, func(i, j int) bool {
×
UNCOV
263
                return payments[i].SequenceNum < payments[j].SequenceNum
×
UNCOV
264
        })
×
265

UNCOV
266
        return payments, nil
×
267
}
268

269
func fetchCreationInfo(bucket kvdb.RBucket) (*PaymentCreationInfo, error) {
3✔
270
        b := bucket.Get(paymentCreationInfoKey)
3✔
271
        if b == nil {
3✔
272
                return nil, fmt.Errorf("creation info not found")
×
273
        }
×
274

275
        r := bytes.NewReader(b)
3✔
276
        return deserializePaymentCreationInfo(r)
3✔
277
}
278

279
func fetchPayment(bucket kvdb.RBucket) (*MPPayment, error) {
3✔
280
        seqBytes := bucket.Get(paymentSequenceKey)
3✔
281
        if seqBytes == nil {
3✔
282
                return nil, fmt.Errorf("sequence number not found")
×
283
        }
×
284

285
        sequenceNum := binary.BigEndian.Uint64(seqBytes)
3✔
286

3✔
287
        // Get the PaymentCreationInfo.
3✔
288
        creationInfo, err := fetchCreationInfo(bucket)
3✔
289
        if err != nil {
3✔
290
                return nil, err
×
291
        }
×
292

293
        var htlcs []HTLCAttempt
3✔
294
        htlcsBucket := bucket.NestedReadBucket(paymentHtlcsBucket)
3✔
295
        if htlcsBucket != nil {
6✔
296
                // Get the payment attempts. This can be empty.
3✔
297
                htlcs, err = fetchHtlcAttempts(htlcsBucket)
3✔
298
                if err != nil {
3✔
299
                        return nil, err
×
300
                }
×
301
        }
302

303
        // Get failure reason if available.
304
        var failureReason *FailureReason
3✔
305
        b := bucket.Get(paymentFailInfoKey)
3✔
306
        if b != nil {
6✔
307
                reason := FailureReason(b[0])
3✔
308
                failureReason = &reason
3✔
309
        }
3✔
310

311
        // Create a new payment.
312
        payment := &MPPayment{
3✔
313
                SequenceNum:   sequenceNum,
3✔
314
                Info:          creationInfo,
3✔
315
                HTLCs:         htlcs,
3✔
316
                FailureReason: failureReason,
3✔
317
        }
3✔
318

3✔
319
        // Set its state and status.
3✔
320
        if err := payment.setState(); err != nil {
3✔
321
                return nil, err
×
322
        }
×
323

324
        return payment, nil
3✔
325
}
326

327
// fetchHtlcAttempts retrieves all htlc attempts made for the payment found in
328
// the given bucket.
329
func fetchHtlcAttempts(bucket kvdb.RBucket) ([]HTLCAttempt, error) {
3✔
330
        htlcsMap := make(map[uint64]*HTLCAttempt)
3✔
331

3✔
332
        attemptInfoCount := 0
3✔
333
        err := bucket.ForEach(func(k, v []byte) error {
6✔
334
                aid := byteOrder.Uint64(k[len(k)-8:])
3✔
335

3✔
336
                if _, ok := htlcsMap[aid]; !ok {
6✔
337
                        htlcsMap[aid] = &HTLCAttempt{}
3✔
338
                }
3✔
339

340
                var err error
3✔
341
                switch {
3✔
342
                case bytes.HasPrefix(k, htlcAttemptInfoKey):
3✔
343
                        attemptInfo, err := readHtlcAttemptInfo(v)
3✔
344
                        if err != nil {
3✔
345
                                return err
×
346
                        }
×
347

348
                        attemptInfo.AttemptID = aid
3✔
349
                        htlcsMap[aid].HTLCAttemptInfo = *attemptInfo
3✔
350
                        attemptInfoCount++
3✔
351

352
                case bytes.HasPrefix(k, htlcSettleInfoKey):
3✔
353
                        htlcsMap[aid].Settle, err = readHtlcSettleInfo(v)
3✔
354
                        if err != nil {
3✔
355
                                return err
×
356
                        }
×
357

358
                case bytes.HasPrefix(k, htlcFailInfoKey):
3✔
359
                        htlcsMap[aid].Failure, err = readHtlcFailInfo(v)
3✔
360
                        if err != nil {
3✔
361
                                return err
×
362
                        }
×
363

364
                default:
×
365
                        return fmt.Errorf("unknown htlc attempt key")
×
366
                }
367

368
                return nil
3✔
369
        })
370
        if err != nil {
3✔
371
                return nil, err
×
372
        }
×
373

374
        // Sanity check that all htlcs have an attempt info.
375
        if attemptInfoCount != len(htlcsMap) {
3✔
376
                return nil, errNoAttemptInfo
×
377
        }
×
378

379
        keys := make([]uint64, len(htlcsMap))
3✔
380
        i := 0
3✔
381
        for k := range htlcsMap {
6✔
382
                keys[i] = k
3✔
383
                i++
3✔
384
        }
3✔
385

386
        // Sort HTLC attempts by their attempt ID. This is needed because in the
387
        // DB we store the attempts with keys prefixed by their status which
388
        // changes order (groups them together by status).
389
        sort.Slice(keys, func(i, j int) bool {
6✔
390
                return keys[i] < keys[j]
3✔
391
        })
3✔
392

393
        htlcs := make([]HTLCAttempt, len(htlcsMap))
3✔
394
        for i, key := range keys {
6✔
395
                htlcs[i] = *htlcsMap[key]
3✔
396
        }
3✔
397

398
        return htlcs, nil
3✔
399
}
400

401
// readHtlcAttemptInfo reads the payment attempt info for this htlc.
402
func readHtlcAttemptInfo(b []byte) (*HTLCAttemptInfo, error) {
3✔
403
        r := bytes.NewReader(b)
3✔
404
        return deserializeHTLCAttemptInfo(r)
3✔
405
}
3✔
406

407
// readHtlcSettleInfo reads the settle info for the htlc. If the htlc isn't
408
// settled, nil is returned.
409
func readHtlcSettleInfo(b []byte) (*HTLCSettleInfo, error) {
3✔
410
        r := bytes.NewReader(b)
3✔
411
        return deserializeHTLCSettleInfo(r)
3✔
412
}
3✔
413

414
// readHtlcFailInfo reads the failure info for the htlc. If the htlc hasn't
415
// failed, nil is returned.
416
func readHtlcFailInfo(b []byte) (*HTLCFailInfo, error) {
3✔
417
        r := bytes.NewReader(b)
3✔
418
        return deserializeHTLCFailInfo(r)
3✔
419
}
3✔
420

421
// fetchFailedHtlcKeys retrieves the bucket keys of all failed HTLCs of a
422
// payment bucket.
UNCOV
423
func fetchFailedHtlcKeys(bucket kvdb.RBucket) ([][]byte, error) {
×
UNCOV
424
        htlcsBucket := bucket.NestedReadBucket(paymentHtlcsBucket)
×
UNCOV
425

×
UNCOV
426
        var htlcs []HTLCAttempt
×
UNCOV
427
        var err error
×
UNCOV
428
        if htlcsBucket != nil {
×
UNCOV
429
                htlcs, err = fetchHtlcAttempts(htlcsBucket)
×
UNCOV
430
                if err != nil {
×
431
                        return nil, err
×
432
                }
×
433
        }
434

435
        // Now iterate though them and save the bucket keys for the failed
436
        // HTLCs.
UNCOV
437
        var htlcKeys [][]byte
×
UNCOV
438
        for _, h := range htlcs {
×
UNCOV
439
                if h.Failure == nil {
×
UNCOV
440
                        continue
×
441
                }
442

UNCOV
443
                htlcKeyBytes := make([]byte, 8)
×
UNCOV
444
                binary.BigEndian.PutUint64(htlcKeyBytes, h.AttemptID)
×
UNCOV
445

×
UNCOV
446
                htlcKeys = append(htlcKeys, htlcKeyBytes)
×
447
        }
448

UNCOV
449
        return htlcKeys, nil
×
450
}
451

452
// PaymentsQuery represents a query to the payments database starting or ending
453
// at a certain offset index. The number of retrieved records can be limited.
454
type PaymentsQuery struct {
455
        // IndexOffset determines the starting point of the payments query and
456
        // is always exclusive. In normal order, the query starts at the next
457
        // higher (available) index compared to IndexOffset. In reversed order,
458
        // the query ends at the next lower (available) index compared to the
459
        // IndexOffset. In the case of a zero index_offset, the query will start
460
        // with the oldest payment when paginating forwards, or will end with
461
        // the most recent payment when paginating backwards.
462
        IndexOffset uint64
463

464
        // MaxPayments is the maximal number of payments returned in the
465
        // payments query.
466
        MaxPayments uint64
467

468
        // Reversed gives a meaning to the IndexOffset. If reversed is set to
469
        // true, the query will fetch payments with indices lower than the
470
        // IndexOffset, otherwise, it will return payments with indices greater
471
        // than the IndexOffset.
472
        Reversed bool
473

474
        // If IncludeIncomplete is true, then return payments that have not yet
475
        // fully completed. This means that pending payments, as well as failed
476
        // payments will show up if this field is set to true.
477
        IncludeIncomplete bool
478

479
        // CountTotal indicates that all payments currently present in the
480
        // payment index (complete and incomplete) should be counted.
481
        CountTotal bool
482

483
        // CreationDateStart, expressed in Unix seconds, if set, filters out
484
        // all payments with a creation date greater than or equal to it.
485
        CreationDateStart int64
486

487
        // CreationDateEnd, expressed in Unix seconds, if set, filters out all
488
        // payments with a creation date less than or equal to it.
489
        CreationDateEnd int64
490
}
491

492
// PaymentsResponse contains the result of a query to the payments database.
493
// It includes the set of payments that match the query and integers which
494
// represent the index of the first and last item returned in the series of
495
// payments. These integers allow callers to resume their query in the event
496
// that the query's response exceeds the max number of returnable events.
497
type PaymentsResponse struct {
498
        // Payments is the set of payments returned from the database for the
499
        // PaymentsQuery.
500
        Payments []*MPPayment
501

502
        // FirstIndexOffset is the index of the first element in the set of
503
        // returned MPPayments. Callers can use this to resume their query
504
        // in the event that the slice has too many events to fit into a single
505
        // response. The offset can be used to continue reverse pagination.
506
        FirstIndexOffset uint64
507

508
        // LastIndexOffset is the index of the last element in the set of
509
        // returned MPPayments. Callers can use this to resume their query
510
        // in the event that the slice has too many events to fit into a single
511
        // response. The offset can be used to continue forward pagination.
512
        LastIndexOffset uint64
513

514
        // TotalCount represents the total number of payments that are currently
515
        // stored in the payment database. This will only be set if the
516
        // CountTotal field in the query was set to true.
517
        TotalCount uint64
518
}
519

520
// QueryPayments is a query to the payments database which is restricted
521
// to a subset of payments by the payments query, containing an offset
522
// index and a maximum number of returned payments.
523
func (d *DB) QueryPayments(query PaymentsQuery) (PaymentsResponse, error) {
3✔
524
        var resp PaymentsResponse
3✔
525

3✔
526
        if err := kvdb.View(d, func(tx kvdb.RTx) error {
6✔
527
                // Get the root payments bucket.
3✔
528
                paymentsBucket := tx.ReadBucket(paymentsRootBucket)
3✔
529
                if paymentsBucket == nil {
6✔
530
                        return nil
3✔
531
                }
3✔
532

533
                // Get the index bucket which maps sequence number -> payment
534
                // hash and duplicate bool. If we have a payments bucket, we
535
                // should have an indexes bucket as well.
536
                indexes := tx.ReadBucket(paymentsIndexBucket)
3✔
537
                if indexes == nil {
3✔
538
                        return fmt.Errorf("index bucket does not exist")
×
539
                }
×
540

541
                // accumulatePayments gets payments with the sequence number
542
                // and hash provided and adds them to our list of payments if
543
                // they meet the criteria of our query. It returns the number
544
                // of payments that were added.
545
                accumulatePayments := func(sequenceKey, hash []byte) (bool,
3✔
546
                        error) {
6✔
547

3✔
548
                        r := bytes.NewReader(hash)
3✔
549
                        paymentHash, err := deserializePaymentIndex(r)
3✔
550
                        if err != nil {
3✔
551
                                return false, err
×
552
                        }
×
553

554
                        payment, err := fetchPaymentWithSequenceNumber(
3✔
555
                                tx, paymentHash, sequenceKey,
3✔
556
                        )
3✔
557
                        if err != nil {
3✔
558
                                return false, err
×
559
                        }
×
560

561
                        // To keep compatibility with the old API, we only
562
                        // return non-succeeded payments if requested.
563
                        if payment.Status != StatusSucceeded &&
3✔
564
                                !query.IncludeIncomplete {
3✔
UNCOV
565

×
UNCOV
566
                                return false, err
×
UNCOV
567
                        }
×
568

569
                        // Get the creation time in Unix seconds, this always
570
                        // rounds down the nanoseconds to full seconds.
571
                        createTime := payment.Info.CreationTime.Unix()
3✔
572

3✔
573
                        // Skip any payments that were created before the
3✔
574
                        // specified time.
3✔
575
                        if createTime < query.CreationDateStart {
6✔
576
                                return false, nil
3✔
577
                        }
3✔
578

579
                        // Skip any payments that were created after the
580
                        // specified time.
581
                        if query.CreationDateEnd != 0 &&
3✔
582
                                createTime > query.CreationDateEnd {
6✔
583

3✔
584
                                return false, nil
3✔
585
                        }
3✔
586

587
                        // At this point, we've exhausted the offset, so we'll
588
                        // begin collecting invoices found within the range.
589
                        resp.Payments = append(resp.Payments, payment)
3✔
590
                        return true, nil
3✔
591
                }
592

593
                // Create a paginator which reads from our sequence index bucket
594
                // with the parameters provided by the payments query.
595
                paginator := newPaginator(
3✔
596
                        indexes.ReadCursor(), query.Reversed, query.IndexOffset,
3✔
597
                        query.MaxPayments,
3✔
598
                )
3✔
599

3✔
600
                // Run a paginated query, adding payments to our response.
3✔
601
                if err := paginator.query(accumulatePayments); err != nil {
3✔
602
                        return err
×
603
                }
×
604

605
                // Counting the total number of payments is expensive, since we
606
                // literally have to traverse the cursor linearly, which can
607
                // take quite a while. So it's an optional query parameter.
608
                if query.CountTotal {
3✔
609
                        var (
×
610
                                totalPayments uint64
×
611
                                err           error
×
612
                        )
×
613
                        countFn := func(_, _ []byte) error {
×
614
                                totalPayments++
×
615

×
616
                                return nil
×
617
                        }
×
618

619
                        // In non-boltdb database backends, there's a faster
620
                        // ForAll query that allows for batch fetching items.
621
                        if fastBucket, ok := indexes.(kvdb.ExtendedRBucket); ok {
×
622
                                err = fastBucket.ForAll(countFn)
×
623
                        } else {
×
624
                                err = indexes.ForEach(countFn)
×
625
                        }
×
626
                        if err != nil {
×
627
                                return fmt.Errorf("error counting payments: %w",
×
628
                                        err)
×
629
                        }
×
630

631
                        resp.TotalCount = totalPayments
×
632
                }
633

634
                return nil
3✔
635
        }, func() {
3✔
636
                resp = PaymentsResponse{}
3✔
637
        }); err != nil {
3✔
638
                return resp, err
×
639
        }
×
640

641
        // Need to swap the payments slice order if reversed order.
642
        if query.Reversed {
3✔
UNCOV
643
                for l, r := 0, len(resp.Payments)-1; l < r; l, r = l+1, r-1 {
×
UNCOV
644
                        resp.Payments[l], resp.Payments[r] =
×
UNCOV
645
                                resp.Payments[r], resp.Payments[l]
×
UNCOV
646
                }
×
647
        }
648

649
        // Set the first and last index of the returned payments so that the
650
        // caller can resume from this point later on.
651
        if len(resp.Payments) > 0 {
6✔
652
                resp.FirstIndexOffset = resp.Payments[0].SequenceNum
3✔
653
                resp.LastIndexOffset =
3✔
654
                        resp.Payments[len(resp.Payments)-1].SequenceNum
3✔
655
        }
3✔
656

657
        return resp, nil
3✔
658
}
659

660
// fetchPaymentWithSequenceNumber get the payment which matches the payment hash
661
// *and* sequence number provided from the database. This is required because
662
// we previously had more than one payment per hash, so we have multiple indexes
663
// pointing to a single payment; we want to retrieve the correct one.
664
func fetchPaymentWithSequenceNumber(tx kvdb.RTx, paymentHash lntypes.Hash,
665
        sequenceNumber []byte) (*MPPayment, error) {
3✔
666

3✔
667
        // We can now lookup the payment keyed by its hash in
3✔
668
        // the payments root bucket.
3✔
669
        bucket, err := fetchPaymentBucket(tx, paymentHash)
3✔
670
        if err != nil {
3✔
671
                return nil, err
×
672
        }
×
673

674
        // A single payment hash can have multiple payments associated with it.
675
        // We lookup our sequence number first, to determine whether this is
676
        // the payment we are actually looking for.
677
        seqBytes := bucket.Get(paymentSequenceKey)
3✔
678
        if seqBytes == nil {
3✔
679
                return nil, ErrNoSequenceNumber
×
680
        }
×
681

682
        // If this top level payment has the sequence number we are looking for,
683
        // return it.
684
        if bytes.Equal(seqBytes, sequenceNumber) {
6✔
685
                return fetchPayment(bucket)
3✔
686
        }
3✔
687

688
        // If we were not looking for the top level payment, we are looking for
689
        // one of our duplicate payments. We need to iterate through the seq
690
        // numbers in this bucket to find the correct payments. If we do not
691
        // find a duplicate payments bucket here, something is wrong.
UNCOV
692
        dup := bucket.NestedReadBucket(duplicatePaymentsBucket)
×
UNCOV
693
        if dup == nil {
×
UNCOV
694
                return nil, ErrNoDuplicateBucket
×
UNCOV
695
        }
×
696

UNCOV
697
        var duplicatePayment *MPPayment
×
UNCOV
698
        err = dup.ForEach(func(k, v []byte) error {
×
UNCOV
699
                subBucket := dup.NestedReadBucket(k)
×
UNCOV
700
                if subBucket == nil {
×
701
                        // We one bucket for each duplicate to be found.
×
702
                        return ErrNoDuplicateNestedBucket
×
703
                }
×
704

UNCOV
705
                seqBytes := subBucket.Get(duplicatePaymentSequenceKey)
×
UNCOV
706
                if seqBytes == nil {
×
707
                        return err
×
708
                }
×
709

710
                // If this duplicate payment is not the sequence number we are
711
                // looking for, we can continue.
UNCOV
712
                if !bytes.Equal(seqBytes, sequenceNumber) {
×
UNCOV
713
                        return nil
×
UNCOV
714
                }
×
715

UNCOV
716
                duplicatePayment, err = fetchDuplicatePayment(subBucket)
×
UNCOV
717
                if err != nil {
×
718
                        return err
×
719
                }
×
720

UNCOV
721
                return nil
×
722
        })
UNCOV
723
        if err != nil {
×
724
                return nil, err
×
725
        }
×
726

727
        // If none of the duplicate payments matched our sequence number, we
728
        // failed to find the payment with this sequence number; something is
729
        // wrong.
UNCOV
730
        if duplicatePayment == nil {
×
UNCOV
731
                return nil, ErrDuplicateNotFound
×
UNCOV
732
        }
×
733

UNCOV
734
        return duplicatePayment, nil
×
735
}
736

737
// DeletePayment deletes a payment from the DB given its payment hash. If
738
// failedHtlcsOnly is set, only failed HTLC attempts of the payment will be
739
// deleted.
740
func (d *DB) DeletePayment(paymentHash lntypes.Hash,
UNCOV
741
        failedHtlcsOnly bool) error {
×
UNCOV
742

×
UNCOV
743
        return kvdb.Update(d, func(tx kvdb.RwTx) error {
×
UNCOV
744
                payments := tx.ReadWriteBucket(paymentsRootBucket)
×
UNCOV
745
                if payments == nil {
×
746
                        return nil
×
747
                }
×
748

UNCOV
749
                bucket := payments.NestedReadWriteBucket(paymentHash[:])
×
UNCOV
750
                if bucket == nil {
×
UNCOV
751
                        return fmt.Errorf("non bucket element in payments " +
×
UNCOV
752
                                "bucket")
×
UNCOV
753
                }
×
754

755
                // If the status is InFlight, we cannot safely delete
756
                // the payment information, so we return early.
UNCOV
757
                paymentStatus, err := fetchPaymentStatus(bucket)
×
UNCOV
758
                if err != nil {
×
759
                        return err
×
760
                }
×
761

762
                // If the payment has inflight HTLCs, we cannot safely delete
763
                // the payment information, so we return an error.
UNCOV
764
                if err := paymentStatus.removable(); err != nil {
×
UNCOV
765
                        return fmt.Errorf("payment '%v' has inflight HTLCs"+
×
UNCOV
766
                                "and therefore cannot be deleted: %w",
×
UNCOV
767
                                paymentHash.String(), err)
×
UNCOV
768
                }
×
769

770
                // Delete the failed HTLC attempts we found.
UNCOV
771
                if failedHtlcsOnly {
×
UNCOV
772
                        toDelete, err := fetchFailedHtlcKeys(bucket)
×
UNCOV
773
                        if err != nil {
×
774
                                return err
×
775
                        }
×
776

UNCOV
777
                        htlcsBucket := bucket.NestedReadWriteBucket(
×
UNCOV
778
                                paymentHtlcsBucket,
×
UNCOV
779
                        )
×
UNCOV
780

×
UNCOV
781
                        for _, htlcID := range toDelete {
×
UNCOV
782
                                err = htlcsBucket.Delete(
×
UNCOV
783
                                        htlcBucketKey(htlcAttemptInfoKey, htlcID),
×
UNCOV
784
                                )
×
UNCOV
785
                                if err != nil {
×
786
                                        return err
×
787
                                }
×
788

UNCOV
789
                                err = htlcsBucket.Delete(
×
UNCOV
790
                                        htlcBucketKey(htlcFailInfoKey, htlcID),
×
UNCOV
791
                                )
×
UNCOV
792
                                if err != nil {
×
793
                                        return err
×
794
                                }
×
795

UNCOV
796
                                err = htlcsBucket.Delete(
×
UNCOV
797
                                        htlcBucketKey(htlcSettleInfoKey, htlcID),
×
UNCOV
798
                                )
×
UNCOV
799
                                if err != nil {
×
800
                                        return err
×
801
                                }
×
802
                        }
803

UNCOV
804
                        return nil
×
805
                }
806

UNCOV
807
                seqNrs, err := fetchSequenceNumbers(bucket)
×
UNCOV
808
                if err != nil {
×
809
                        return err
×
810
                }
×
811

UNCOV
812
                if err := payments.DeleteNestedBucket(paymentHash[:]); err != nil {
×
813
                        return err
×
814
                }
×
815

UNCOV
816
                indexBucket := tx.ReadWriteBucket(paymentsIndexBucket)
×
UNCOV
817
                for _, k := range seqNrs {
×
UNCOV
818
                        if err := indexBucket.Delete(k); err != nil {
×
819
                                return err
×
820
                        }
×
821
                }
822

UNCOV
823
                return nil
×
UNCOV
824
        }, func() {})
×
825
}
826

827
// DeletePayments deletes all completed and failed payments from the DB. If
828
// failedOnly is set, only failed payments will be considered for deletion. If
829
// failedHtlcsOnly is set, the payment itself won't be deleted, only failed HTLC
830
// attempts. The method returns the number of deleted payments, which is always
831
// 0 if failedHtlcsOnly is set.
832
func (d *DB) DeletePayments(failedOnly, failedHtlcsOnly bool) (int, error) {
3✔
833
        var numPayments int
3✔
834
        err := kvdb.Update(d, func(tx kvdb.RwTx) error {
6✔
835
                payments := tx.ReadWriteBucket(paymentsRootBucket)
3✔
836
                if payments == nil {
3✔
837
                        return nil
×
838
                }
×
839

840
                var (
3✔
841
                        // deleteBuckets is the set of payment buckets we need
3✔
842
                        // to delete.
3✔
843
                        deleteBuckets [][]byte
3✔
844

3✔
845
                        // deleteIndexes is the set of indexes pointing to these
3✔
846
                        // payments that need to be deleted.
3✔
847
                        deleteIndexes [][]byte
3✔
848

3✔
849
                        // deleteHtlcs maps a payment hash to the HTLC IDs we
3✔
850
                        // want to delete for that payment.
3✔
851
                        deleteHtlcs = make(map[lntypes.Hash][][]byte)
3✔
852
                )
3✔
853
                err := payments.ForEach(func(k, _ []byte) error {
6✔
854
                        bucket := payments.NestedReadBucket(k)
3✔
855
                        if bucket == nil {
3✔
856
                                // We only expect sub-buckets to be found in
×
857
                                // this top-level bucket.
×
858
                                return fmt.Errorf("non bucket element in " +
×
859
                                        "payments bucket")
×
860
                        }
×
861

862
                        // If the status is InFlight, we cannot safely delete
863
                        // the payment information, so we return early.
864
                        paymentStatus, err := fetchPaymentStatus(bucket)
3✔
865
                        if err != nil {
3✔
866
                                return err
×
867
                        }
×
868

869
                        // If the payment has inflight HTLCs, we cannot safely
870
                        // delete the payment information, so we return an nil
871
                        // to skip it.
872
                        if err := paymentStatus.removable(); err != nil {
3✔
UNCOV
873
                                return nil
×
UNCOV
874
                        }
×
875

876
                        // If we requested to only delete failed payments, we
877
                        // can return if this one is not.
878
                        if failedOnly && paymentStatus != StatusFailed {
3✔
UNCOV
879
                                return nil
×
UNCOV
880
                        }
×
881

882
                        // If we are only deleting failed HTLCs, fetch them.
883
                        if failedHtlcsOnly {
3✔
UNCOV
884
                                toDelete, err := fetchFailedHtlcKeys(bucket)
×
UNCOV
885
                                if err != nil {
×
886
                                        return err
×
887
                                }
×
888

UNCOV
889
                                hash, err := lntypes.MakeHash(k)
×
UNCOV
890
                                if err != nil {
×
891
                                        return err
×
892
                                }
×
893

UNCOV
894
                                deleteHtlcs[hash] = toDelete
×
UNCOV
895

×
UNCOV
896
                                // We return, we are only deleting attempts.
×
UNCOV
897
                                return nil
×
898
                        }
899

900
                        // Add the bucket to the set of buckets we can delete.
901
                        deleteBuckets = append(deleteBuckets, k)
3✔
902

3✔
903
                        // Get all the sequence number associated with the
3✔
904
                        // payment, including duplicates.
3✔
905
                        seqNrs, err := fetchSequenceNumbers(bucket)
3✔
906
                        if err != nil {
3✔
907
                                return err
×
908
                        }
×
909

910
                        deleteIndexes = append(deleteIndexes, seqNrs...)
3✔
911
                        numPayments++
3✔
912
                        return nil
3✔
913
                })
914
                if err != nil {
3✔
915
                        return err
×
916
                }
×
917

918
                // Delete the failed HTLC attempts we found.
919
                for hash, htlcIDs := range deleteHtlcs {
3✔
UNCOV
920
                        bucket := payments.NestedReadWriteBucket(hash[:])
×
UNCOV
921
                        htlcsBucket := bucket.NestedReadWriteBucket(
×
UNCOV
922
                                paymentHtlcsBucket,
×
UNCOV
923
                        )
×
UNCOV
924

×
UNCOV
925
                        for _, aid := range htlcIDs {
×
UNCOV
926
                                if err := htlcsBucket.Delete(
×
UNCOV
927
                                        htlcBucketKey(htlcAttemptInfoKey, aid),
×
UNCOV
928
                                ); err != nil {
×
929
                                        return err
×
930
                                }
×
931

UNCOV
932
                                if err := htlcsBucket.Delete(
×
UNCOV
933
                                        htlcBucketKey(htlcFailInfoKey, aid),
×
UNCOV
934
                                ); err != nil {
×
935
                                        return err
×
936
                                }
×
937

UNCOV
938
                                if err := htlcsBucket.Delete(
×
UNCOV
939
                                        htlcBucketKey(htlcSettleInfoKey, aid),
×
UNCOV
940
                                ); err != nil {
×
941
                                        return err
×
942
                                }
×
943
                        }
944
                }
945

946
                for _, k := range deleteBuckets {
6✔
947
                        if err := payments.DeleteNestedBucket(k); err != nil {
3✔
948
                                return err
×
949
                        }
×
950
                }
951

952
                // Get our index bucket and delete all indexes pointing to the
953
                // payments we are deleting.
954
                indexBucket := tx.ReadWriteBucket(paymentsIndexBucket)
3✔
955
                for _, k := range deleteIndexes {
6✔
956
                        if err := indexBucket.Delete(k); err != nil {
3✔
957
                                return err
×
958
                        }
×
959
                }
960

961
                return nil
3✔
962
        }, func() {
3✔
963
                numPayments = 0
3✔
964
        })
3✔
965
        if err != nil {
3✔
966
                return 0, err
×
967
        }
×
968

969
        return numPayments, nil
3✔
970
}
971

972
// fetchSequenceNumbers fetches all the sequence numbers associated with a
973
// payment, including those belonging to any duplicate payments.
974
func fetchSequenceNumbers(paymentBucket kvdb.RBucket) ([][]byte, error) {
3✔
975
        seqNum := paymentBucket.Get(paymentSequenceKey)
3✔
976
        if seqNum == nil {
3✔
977
                return nil, errors.New("expected sequence number")
×
978
        }
×
979

980
        sequenceNumbers := [][]byte{seqNum}
3✔
981

3✔
982
        // Get the duplicate payments bucket, if it has no duplicates, just
3✔
983
        // return early with the payment sequence number.
3✔
984
        duplicates := paymentBucket.NestedReadBucket(duplicatePaymentsBucket)
3✔
985
        if duplicates == nil {
6✔
986
                return sequenceNumbers, nil
3✔
987
        }
3✔
988

989
        // If we do have duplicated, they are keyed by sequence number, so we
990
        // iterate through the duplicates bucket and add them to our set of
991
        // sequence numbers.
UNCOV
992
        if err := duplicates.ForEach(func(k, v []byte) error {
×
UNCOV
993
                sequenceNumbers = append(sequenceNumbers, k)
×
UNCOV
994
                return nil
×
UNCOV
995
        }); err != nil {
×
996
                return nil, err
×
997
        }
×
998

UNCOV
999
        return sequenceNumbers, nil
×
1000
}
1001

1002
// nolint: dupl
1003
func serializePaymentCreationInfo(w io.Writer, c *PaymentCreationInfo) error {
3✔
1004
        var scratch [8]byte
3✔
1005

3✔
1006
        if _, err := w.Write(c.PaymentIdentifier[:]); err != nil {
3✔
1007
                return err
×
1008
        }
×
1009

1010
        byteOrder.PutUint64(scratch[:], uint64(c.Value))
3✔
1011
        if _, err := w.Write(scratch[:]); err != nil {
3✔
1012
                return err
×
1013
        }
×
1014

1015
        if err := serializeTime(w, c.CreationTime); err != nil {
3✔
1016
                return err
×
1017
        }
×
1018

1019
        byteOrder.PutUint32(scratch[:4], uint32(len(c.PaymentRequest)))
3✔
1020
        if _, err := w.Write(scratch[:4]); err != nil {
3✔
1021
                return err
×
1022
        }
×
1023

1024
        if _, err := w.Write(c.PaymentRequest[:]); err != nil {
3✔
1025
                return err
×
1026
        }
×
1027

1028
        // Any remaining bytes are TLV encoded records. Currently, these are
1029
        // only the custom records provided by the user to be sent to the first
1030
        // hop. But this can easily be extended with further records by merging
1031
        // the records into a single TLV stream.
1032
        err := c.FirstHopCustomRecords.SerializeTo(w)
3✔
1033
        if err != nil {
3✔
1034
                return err
×
1035
        }
×
1036

1037
        return nil
3✔
1038
}
1039

1040
func deserializePaymentCreationInfo(r io.Reader) (*PaymentCreationInfo,
1041
        error) {
3✔
1042

3✔
1043
        var scratch [8]byte
3✔
1044

3✔
1045
        c := &PaymentCreationInfo{}
3✔
1046

3✔
1047
        if _, err := io.ReadFull(r, c.PaymentIdentifier[:]); err != nil {
3✔
1048
                return nil, err
×
1049
        }
×
1050

1051
        if _, err := io.ReadFull(r, scratch[:]); err != nil {
3✔
1052
                return nil, err
×
1053
        }
×
1054
        c.Value = lnwire.MilliSatoshi(byteOrder.Uint64(scratch[:]))
3✔
1055

3✔
1056
        creationTime, err := deserializeTime(r)
3✔
1057
        if err != nil {
3✔
1058
                return nil, err
×
1059
        }
×
1060
        c.CreationTime = creationTime
3✔
1061

3✔
1062
        if _, err := io.ReadFull(r, scratch[:4]); err != nil {
3✔
1063
                return nil, err
×
1064
        }
×
1065

1066
        reqLen := uint32(byteOrder.Uint32(scratch[:4]))
3✔
1067
        payReq := make([]byte, reqLen)
3✔
1068
        if reqLen > 0 {
6✔
1069
                if _, err := io.ReadFull(r, payReq); err != nil {
3✔
1070
                        return nil, err
×
1071
                }
×
1072
        }
1073
        c.PaymentRequest = payReq
3✔
1074

3✔
1075
        // Any remaining bytes are TLV encoded records. Currently, these are
3✔
1076
        // only the custom records provided by the user to be sent to the first
3✔
1077
        // hop. But this can easily be extended with further records by merging
3✔
1078
        // the records into a single TLV stream.
3✔
1079
        c.FirstHopCustomRecords, err = lnwire.ParseCustomRecordsFrom(r)
3✔
1080
        if err != nil {
3✔
1081
                return nil, err
×
1082
        }
×
1083

1084
        return c, nil
3✔
1085
}
1086

1087
func serializeHTLCAttemptInfo(w io.Writer, a *HTLCAttemptInfo) error {
3✔
1088
        if err := WriteElements(w, a.sessionKey); err != nil {
3✔
1089
                return err
×
1090
        }
×
1091

1092
        if err := SerializeRoute(w, a.Route); err != nil {
3✔
1093
                return err
×
1094
        }
×
1095

1096
        if err := serializeTime(w, a.AttemptTime); err != nil {
3✔
1097
                return err
×
1098
        }
×
1099

1100
        // If the hash is nil we can just return.
1101
        if a.Hash == nil {
3✔
UNCOV
1102
                return nil
×
UNCOV
1103
        }
×
1104

1105
        if _, err := w.Write(a.Hash[:]); err != nil {
3✔
1106
                return err
×
1107
        }
×
1108

1109
        // Merge the fixed/known records together with the custom records to
1110
        // serialize them as a single blob. We can't do this in SerializeRoute
1111
        // because we're in the middle of the byte stream there. We can only do
1112
        // TLV serialization at the end of the stream, since EOF is allowed for
1113
        // a stream if no more data is expected.
1114
        producers := []tlv.RecordProducer{
3✔
1115
                &a.Route.FirstHopAmount,
3✔
1116
        }
3✔
1117
        tlvData, err := lnwire.MergeAndEncode(
3✔
1118
                producers, nil, a.Route.FirstHopWireCustomRecords,
3✔
1119
        )
3✔
1120
        if err != nil {
3✔
1121
                return err
×
1122
        }
×
1123

1124
        if _, err := w.Write(tlvData); err != nil {
3✔
1125
                return err
×
1126
        }
×
1127

1128
        return nil
3✔
1129
}
1130

1131
func deserializeHTLCAttemptInfo(r io.Reader) (*HTLCAttemptInfo, error) {
3✔
1132
        a := &HTLCAttemptInfo{}
3✔
1133
        err := ReadElements(r, &a.sessionKey)
3✔
1134
        if err != nil {
3✔
1135
                return nil, err
×
1136
        }
×
1137

1138
        a.Route, err = DeserializeRoute(r)
3✔
1139
        if err != nil {
3✔
1140
                return nil, err
×
1141
        }
×
1142

1143
        a.AttemptTime, err = deserializeTime(r)
3✔
1144
        if err != nil {
3✔
1145
                return nil, err
×
1146
        }
×
1147

1148
        hash := lntypes.Hash{}
3✔
1149
        _, err = io.ReadFull(r, hash[:])
3✔
1150

3✔
1151
        switch {
3✔
1152
        // Older payment attempts wouldn't have the hash set, in which case we
1153
        // can just return.
UNCOV
1154
        case err == io.EOF, err == io.ErrUnexpectedEOF:
×
UNCOV
1155
                return a, nil
×
1156

1157
        case err != nil:
×
1158
                return nil, err
×
1159

1160
        default:
3✔
1161
        }
1162

1163
        a.Hash = &hash
3✔
1164

3✔
1165
        // Read any remaining data (if any) and parse it into the known records
3✔
1166
        // and custom records.
3✔
1167
        extraData, err := io.ReadAll(r)
3✔
1168
        if err != nil {
3✔
1169
                return nil, err
×
1170
        }
×
1171

1172
        customRecords, _, _, err := lnwire.ParseAndExtractCustomRecords(
3✔
1173
                extraData, &a.Route.FirstHopAmount,
3✔
1174
        )
3✔
1175
        if err != nil {
3✔
1176
                return nil, err
×
1177
        }
×
1178

1179
        a.Route.FirstHopWireCustomRecords = customRecords
3✔
1180

3✔
1181
        return a, nil
3✔
1182
}
1183

1184
func serializeHop(w io.Writer, h *route.Hop) error {
3✔
1185
        if err := WriteElements(w,
3✔
1186
                h.PubKeyBytes[:],
3✔
1187
                h.ChannelID,
3✔
1188
                h.OutgoingTimeLock,
3✔
1189
                h.AmtToForward,
3✔
1190
        ); err != nil {
3✔
1191
                return err
×
1192
        }
×
1193

1194
        if err := binary.Write(w, byteOrder, h.LegacyPayload); err != nil {
3✔
1195
                return err
×
1196
        }
×
1197

1198
        // For legacy payloads, we don't need to write any TLV records, so
1199
        // we'll write a zero indicating the our serialized TLV map has no
1200
        // records.
1201
        if h.LegacyPayload {
3✔
UNCOV
1202
                return WriteElements(w, uint32(0))
×
UNCOV
1203
        }
×
1204

1205
        // Gather all non-primitive TLV records so that they can be serialized
1206
        // as a single blob.
1207
        //
1208
        // TODO(conner): add migration to unify all fields in a single TLV
1209
        // blobs. The split approach will cause headaches down the road as more
1210
        // fields are added, which we can avoid by having a single TLV stream
1211
        // for all payload fields.
1212
        var records []tlv.Record
3✔
1213
        if h.MPP != nil {
6✔
1214
                records = append(records, h.MPP.Record())
3✔
1215
        }
3✔
1216

1217
        // Add blinding point and encrypted data if present.
1218
        if h.EncryptedData != nil {
6✔
1219
                records = append(records, record.NewEncryptedDataRecord(
3✔
1220
                        &h.EncryptedData,
3✔
1221
                ))
3✔
1222
        }
3✔
1223

1224
        if h.BlindingPoint != nil {
6✔
1225
                records = append(records, record.NewBlindingPointRecord(
3✔
1226
                        &h.BlindingPoint,
3✔
1227
                ))
3✔
1228
        }
3✔
1229

1230
        if h.AMP != nil {
6✔
1231
                records = append(records, h.AMP.Record())
3✔
1232
        }
3✔
1233

1234
        if h.Metadata != nil {
3✔
UNCOV
1235
                records = append(records, record.NewMetadataRecord(&h.Metadata))
×
UNCOV
1236
        }
×
1237

1238
        if h.TotalAmtMsat != 0 {
6✔
1239
                totalMsatInt := uint64(h.TotalAmtMsat)
3✔
1240
                records = append(
3✔
1241
                        records, record.NewTotalAmtMsatBlinded(&totalMsatInt),
3✔
1242
                )
3✔
1243
        }
3✔
1244

1245
        // Final sanity check to absolutely rule out custom records that are not
1246
        // custom and write into the standard range.
1247
        if err := h.CustomRecords.Validate(); err != nil {
3✔
1248
                return err
×
1249
        }
×
1250

1251
        // Convert custom records to tlv and add to the record list.
1252
        // MapToRecords sorts the list, so adding it here will keep the list
1253
        // canonical.
1254
        tlvRecords := tlv.MapToRecords(h.CustomRecords)
3✔
1255
        records = append(records, tlvRecords...)
3✔
1256

3✔
1257
        // Otherwise, we'll transform our slice of records into a map of the
3✔
1258
        // raw bytes, then serialize them in-line with a length (number of
3✔
1259
        // elements) prefix.
3✔
1260
        mapRecords, err := tlv.RecordsToMap(records)
3✔
1261
        if err != nil {
3✔
1262
                return err
×
1263
        }
×
1264

1265
        numRecords := uint32(len(mapRecords))
3✔
1266
        if err := WriteElements(w, numRecords); err != nil {
3✔
1267
                return err
×
1268
        }
×
1269

1270
        for recordType, rawBytes := range mapRecords {
6✔
1271
                if err := WriteElements(w, recordType); err != nil {
3✔
1272
                        return err
×
1273
                }
×
1274

1275
                if err := wire.WriteVarBytes(w, 0, rawBytes); err != nil {
3✔
1276
                        return err
×
1277
                }
×
1278
        }
1279

1280
        return nil
3✔
1281
}
1282

1283
// maxOnionPayloadSize is the largest Sphinx payload possible, so we don't need
1284
// to read/write a TLV stream larger than this.
1285
const maxOnionPayloadSize = 1300
1286

1287
func deserializeHop(r io.Reader) (*route.Hop, error) {
3✔
1288
        h := &route.Hop{}
3✔
1289

3✔
1290
        var pub []byte
3✔
1291
        if err := ReadElements(r, &pub); err != nil {
3✔
1292
                return nil, err
×
1293
        }
×
1294
        copy(h.PubKeyBytes[:], pub)
3✔
1295

3✔
1296
        if err := ReadElements(r,
3✔
1297
                &h.ChannelID, &h.OutgoingTimeLock, &h.AmtToForward,
3✔
1298
        ); err != nil {
3✔
1299
                return nil, err
×
1300
        }
×
1301

1302
        // TODO(roasbeef): change field to allow LegacyPayload false to be the
1303
        // legacy default?
1304
        err := binary.Read(r, byteOrder, &h.LegacyPayload)
3✔
1305
        if err != nil {
3✔
1306
                return nil, err
×
1307
        }
×
1308

1309
        var numElements uint32
3✔
1310
        if err := ReadElements(r, &numElements); err != nil {
3✔
1311
                return nil, err
×
1312
        }
×
1313

1314
        // If there're no elements, then we can return early.
1315
        if numElements == 0 {
6✔
1316
                return h, nil
3✔
1317
        }
3✔
1318

1319
        tlvMap := make(map[uint64][]byte)
3✔
1320
        for i := uint32(0); i < numElements; i++ {
6✔
1321
                var tlvType uint64
3✔
1322
                if err := ReadElements(r, &tlvType); err != nil {
3✔
1323
                        return nil, err
×
1324
                }
×
1325

1326
                rawRecordBytes, err := wire.ReadVarBytes(
3✔
1327
                        r, 0, maxOnionPayloadSize, "tlv",
3✔
1328
                )
3✔
1329
                if err != nil {
3✔
1330
                        return nil, err
×
1331
                }
×
1332

1333
                tlvMap[tlvType] = rawRecordBytes
3✔
1334
        }
1335

1336
        // If the MPP type is present, remove it from the generic TLV map and
1337
        // parse it back into a proper MPP struct.
1338
        //
1339
        // TODO(conner): add migration to unify all fields in a single TLV
1340
        // blobs. The split approach will cause headaches down the road as more
1341
        // fields are added, which we can avoid by having a single TLV stream
1342
        // for all payload fields.
1343
        mppType := uint64(record.MPPOnionType)
3✔
1344
        if mppBytes, ok := tlvMap[mppType]; ok {
6✔
1345
                delete(tlvMap, mppType)
3✔
1346

3✔
1347
                var (
3✔
1348
                        mpp    = &record.MPP{}
3✔
1349
                        mppRec = mpp.Record()
3✔
1350
                        r      = bytes.NewReader(mppBytes)
3✔
1351
                )
3✔
1352
                err := mppRec.Decode(r, uint64(len(mppBytes)))
3✔
1353
                if err != nil {
3✔
1354
                        return nil, err
×
1355
                }
×
1356
                h.MPP = mpp
3✔
1357
        }
1358

1359
        // If encrypted data or blinding key are present, remove them from
1360
        // the TLV map and parse into proper types.
1361
        encryptedDataType := uint64(record.EncryptedDataOnionType)
3✔
1362
        if data, ok := tlvMap[encryptedDataType]; ok {
6✔
1363
                delete(tlvMap, encryptedDataType)
3✔
1364
                h.EncryptedData = data
3✔
1365
        }
3✔
1366

1367
        blindingType := uint64(record.BlindingPointOnionType)
3✔
1368
        if blindingPoint, ok := tlvMap[blindingType]; ok {
6✔
1369
                delete(tlvMap, blindingType)
3✔
1370

3✔
1371
                h.BlindingPoint, err = btcec.ParsePubKey(blindingPoint)
3✔
1372
                if err != nil {
3✔
1373
                        return nil, fmt.Errorf("invalid blinding point: %w",
×
1374
                                err)
×
1375
                }
×
1376
        }
1377

1378
        ampType := uint64(record.AMPOnionType)
3✔
1379
        if ampBytes, ok := tlvMap[ampType]; ok {
6✔
1380
                delete(tlvMap, ampType)
3✔
1381

3✔
1382
                var (
3✔
1383
                        amp    = &record.AMP{}
3✔
1384
                        ampRec = amp.Record()
3✔
1385
                        r      = bytes.NewReader(ampBytes)
3✔
1386
                )
3✔
1387
                err := ampRec.Decode(r, uint64(len(ampBytes)))
3✔
1388
                if err != nil {
3✔
1389
                        return nil, err
×
1390
                }
×
1391
                h.AMP = amp
3✔
1392
        }
1393

1394
        // If the metadata type is present, remove it from the tlv map and
1395
        // populate directly on the hop.
1396
        metadataType := uint64(record.MetadataOnionType)
3✔
1397
        if metadata, ok := tlvMap[metadataType]; ok {
3✔
UNCOV
1398
                delete(tlvMap, metadataType)
×
UNCOV
1399

×
UNCOV
1400
                h.Metadata = metadata
×
UNCOV
1401
        }
×
1402

1403
        totalAmtMsatType := uint64(record.TotalAmtMsatBlindedType)
3✔
1404
        if totalAmtMsat, ok := tlvMap[totalAmtMsatType]; ok {
6✔
1405
                delete(tlvMap, totalAmtMsatType)
3✔
1406

3✔
1407
                var (
3✔
1408
                        totalAmtMsatInt uint64
3✔
1409
                        buf             [8]byte
3✔
1410
                )
3✔
1411
                if err := tlv.DTUint64(
3✔
1412
                        bytes.NewReader(totalAmtMsat),
3✔
1413
                        &totalAmtMsatInt,
3✔
1414
                        &buf,
3✔
1415
                        uint64(len(totalAmtMsat)),
3✔
1416
                ); err != nil {
3✔
1417
                        return nil, err
×
1418
                }
×
1419

1420
                h.TotalAmtMsat = lnwire.MilliSatoshi(totalAmtMsatInt)
3✔
1421
        }
1422

1423
        h.CustomRecords = tlvMap
3✔
1424

3✔
1425
        return h, nil
3✔
1426
}
1427

1428
// SerializeRoute serializes a route.
1429
func SerializeRoute(w io.Writer, r route.Route) error {
3✔
1430
        if err := WriteElements(w,
3✔
1431
                r.TotalTimeLock, r.TotalAmount, r.SourcePubKey[:],
3✔
1432
        ); err != nil {
3✔
1433
                return err
×
1434
        }
×
1435

1436
        if err := WriteElements(w, uint32(len(r.Hops))); err != nil {
3✔
1437
                return err
×
1438
        }
×
1439

1440
        for _, h := range r.Hops {
6✔
1441
                if err := serializeHop(w, h); err != nil {
3✔
1442
                        return err
×
1443
                }
×
1444
        }
1445

1446
        // Any new/extra TLV data is encoded in serializeHTLCAttemptInfo!
1447

1448
        return nil
3✔
1449
}
1450

1451
// DeserializeRoute deserializes a route.
1452
func DeserializeRoute(r io.Reader) (route.Route, error) {
3✔
1453
        rt := route.Route{}
3✔
1454
        if err := ReadElements(r,
3✔
1455
                &rt.TotalTimeLock, &rt.TotalAmount,
3✔
1456
        ); err != nil {
3✔
1457
                return rt, err
×
1458
        }
×
1459

1460
        var pub []byte
3✔
1461
        if err := ReadElements(r, &pub); err != nil {
3✔
1462
                return rt, err
×
1463
        }
×
1464
        copy(rt.SourcePubKey[:], pub)
3✔
1465

3✔
1466
        var numHops uint32
3✔
1467
        if err := ReadElements(r, &numHops); err != nil {
3✔
1468
                return rt, err
×
1469
        }
×
1470

1471
        var hops []*route.Hop
3✔
1472
        for i := uint32(0); i < numHops; i++ {
6✔
1473
                hop, err := deserializeHop(r)
3✔
1474
                if err != nil {
3✔
1475
                        return rt, err
×
1476
                }
×
1477
                hops = append(hops, hop)
3✔
1478
        }
1479
        rt.Hops = hops
3✔
1480

3✔
1481
        // Any new/extra TLV data is decoded in deserializeHTLCAttemptInfo!
3✔
1482

3✔
1483
        return rt, nil
3✔
1484
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc