• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 12199391122

06 Dec 2024 01:10PM UTC coverage: 49.807% (-9.1%) from 58.933%
12199391122

push

github

web-flow
Merge pull request #9337 from Guayaba221/patch-1

chore: fix typo in ruby.md

100137 of 201051 relevant lines covered (49.81%)

2.07 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

68.41
/routing/result_interpretation.go
1
package routing
2

3
import (
4
        "bytes"
5
        "fmt"
6
        "io"
7

8
        "github.com/lightningnetwork/lnd/channeldb"
9
        "github.com/lightningnetwork/lnd/fn"
10
        "github.com/lightningnetwork/lnd/lnwire"
11
        "github.com/lightningnetwork/lnd/routing/route"
12
        "github.com/lightningnetwork/lnd/tlv"
13
)
14

15
// Instantiate variables to allow taking a reference from the failure reason.
16
var (
17
        reasonError            = channeldb.FailureReasonError
18
        reasonIncorrectDetails = channeldb.FailureReasonPaymentDetails
19
)
20

21
// pairResult contains the result of the interpretation of a payment attempt for
22
// a specific node pair.
23
type pairResult struct {
24
        // amt is the amount that was forwarded for this pair. Can be set to
25
        // zero for failures that are amount independent.
26
        amt lnwire.MilliSatoshi
27

28
        // success indicates whether the payment attempt was successful through
29
        // this pair.
30
        success bool
31
}
32

33
// failPairResult creates a new result struct for a failure.
34
func failPairResult(minPenalizeAmt lnwire.MilliSatoshi) pairResult {
4✔
35
        return pairResult{
4✔
36
                amt: minPenalizeAmt,
4✔
37
        }
4✔
38
}
4✔
39

40
// newSuccessPairResult creates a new result struct for a success.
41
func successPairResult(successAmt lnwire.MilliSatoshi) pairResult {
4✔
42
        return pairResult{
4✔
43
                success: true,
4✔
44
                amt:     successAmt,
4✔
45
        }
4✔
46
}
4✔
47

48
// String returns the human-readable representation of a pair result.
49
func (p pairResult) String() string {
×
50
        var resultType string
×
51
        if p.success {
×
52
                resultType = "success"
×
53
        } else {
×
54
                resultType = "failed"
×
55
        }
×
56

57
        return fmt.Sprintf("%v (amt=%v)", resultType, p.amt)
×
58
}
59

60
// interpretedResult contains the result of the interpretation of a payment
61
// attempt.
62
type interpretedResult struct {
63
        // nodeFailure points to a node pubkey if all channels of that node are
64
        // responsible for the result.
65
        nodeFailure *route.Vertex
66

67
        // pairResults contains a map of node pairs for which we have a result.
68
        pairResults map[DirectedNodePair]pairResult
69

70
        // finalFailureReason is set to a non-nil value if it makes no more
71
        // sense to start another payment attempt. It will contain the reason
72
        // why.
73
        finalFailureReason *channeldb.FailureReason
74

75
        // policyFailure is set to a node pair if there is a policy failure on
76
        // that connection. This is used to control the second chance logic for
77
        // policy failures.
78
        policyFailure *DirectedNodePair
79
}
80

81
// interpretResult interprets a payment outcome and returns an object that
82
// contains information required to update mission control.
83
func interpretResult(rt *mcRoute,
84
        failure fn.Option[paymentFailure]) *interpretedResult {
4✔
85

4✔
86
        i := &interpretedResult{
4✔
87
                pairResults: make(map[DirectedNodePair]pairResult),
4✔
88
        }
4✔
89

4✔
90
        return fn.ElimOption(failure, func() *interpretedResult {
8✔
91
                i.processSuccess(rt)
4✔
92

4✔
93
                return i
4✔
94
        }, func(info paymentFailure) *interpretedResult {
8✔
95
                i.processFail(rt, info)
4✔
96

4✔
97
                return i
4✔
98
        })
4✔
99
}
100

101
// processSuccess processes a successful payment attempt.
102
func (i *interpretedResult) processSuccess(route *mcRoute) {
4✔
103
        // For successes, all nodes must have acted in the right way. Therefore
4✔
104
        // we mark all of them with a success result.
4✔
105
        i.successPairRange(route, 0, len(route.hops.Val)-1)
4✔
106
}
4✔
107

108
// processFail processes a failed payment attempt.
109
func (i *interpretedResult) processFail(rt *mcRoute, failure paymentFailure) {
4✔
110
        if failure.info.IsNone() {
4✔
111
                i.processPaymentOutcomeUnknown(rt)
×
112
                return
×
113
        }
×
114

115
        var (
4✔
116
                idx     int
4✔
117
                failMsg lnwire.FailureMessage
4✔
118
        )
4✔
119

4✔
120
        failure.info.WhenSome(
4✔
121
                func(r tlv.RecordT[tlv.TlvType0, paymentFailureInfo]) {
8✔
122
                        idx = int(r.Val.sourceIdx.Val)
4✔
123
                        failMsg = r.Val.msg.Val.FailureMessage
4✔
124
                },
4✔
125
        )
126

127
        // If the payment was to a blinded route and we received an error from
128
        // after the introduction point, handle this error separately - there
129
        // has been a protocol violation from the introduction node. This
130
        // penalty applies regardless of the error code that is returned.
131
        introIdx, isBlinded := introductionPointIndex(rt)
4✔
132
        if isBlinded && introIdx < idx {
4✔
133
                i.processPaymentOutcomeBadIntro(rt, introIdx, idx)
×
134
                return
×
135
        }
×
136

137
        switch idx {
4✔
138
        // We are the source of the failure.
139
        case 0:
4✔
140
                i.processPaymentOutcomeSelf(rt, failMsg)
4✔
141

142
        // A failure from the final hop was received.
143
        case len(rt.hops.Val):
4✔
144
                i.processPaymentOutcomeFinal(rt, failMsg)
4✔
145

146
        // An intermediate hop failed. Interpret the outcome, update reputation
147
        // and try again.
148
        default:
4✔
149
                i.processPaymentOutcomeIntermediate(rt, idx, failMsg)
4✔
150
        }
151
}
152

153
// processPaymentOutcomeBadIntro handles the case where we have made payment
154
// to a blinded route, but received an error from a node after the introduction
155
// node. This indicates that the introduction node is not obeying the route
156
// blinding specification, as we expect all errors from the introduction node
157
// to be source from it.
158
func (i *interpretedResult) processPaymentOutcomeBadIntro(route *mcRoute,
159
        introIdx, errSourceIdx int) {
×
160

×
161
        // We fail the introduction node for not obeying the specification.
×
162
        i.failNode(route, introIdx)
×
163

×
164
        // Other preceding channels in the route forwarded correctly. Note
×
165
        // that we do not assign success to the incoming link to the
×
166
        // introduction node because it has not handled the error correctly.
×
167
        if introIdx > 1 {
×
168
                i.successPairRange(route, 0, introIdx-2)
×
169
        }
×
170

171
        // If the source of the failure was from the final node, we also set
172
        // a final failure reason because the recipient can't process the
173
        // payment (independent of the introduction failing to convert the
174
        // error, we can't complete the payment if the last hop fails).
175
        if errSourceIdx == len(route.hops.Val) {
×
176
                i.finalFailureReason = &reasonError
×
177
        }
×
178
}
179

180
// processPaymentOutcomeSelf handles failures sent by ourselves.
181
func (i *interpretedResult) processPaymentOutcomeSelf(rt *mcRoute,
182
        failure lnwire.FailureMessage) {
4✔
183

4✔
184
        switch failure.(type) {
4✔
185

186
        // We receive a malformed htlc failure from our peer. We trust ourselves
187
        // to send the correct htlc, so our peer must be at fault.
188
        case *lnwire.FailInvalidOnionVersion,
189
                *lnwire.FailInvalidOnionHmac,
190
                *lnwire.FailInvalidOnionKey:
×
191

×
192
                i.failNode(rt, 1)
×
193

×
194
                // If this was a payment to a direct peer, we can stop trying.
×
195
                if len(rt.hops.Val) == 1 {
×
196
                        i.finalFailureReason = &reasonError
×
197
                }
×
198

199
        // Any other failure originating from ourselves should be temporary and
200
        // caused by changing conditions between path finding and execution of
201
        // the payment. We just retry and trust that the information locally
202
        // available in the link has been updated.
203
        default:
4✔
204
                log.Warnf("Routing failure for local channel %v occurred",
4✔
205
                        rt.hops.Val[0].channelID)
4✔
206
        }
207
}
208

209
// processPaymentOutcomeFinal handles failures sent by the final hop.
210
func (i *interpretedResult) processPaymentOutcomeFinal(route *mcRoute,
211
        failure lnwire.FailureMessage) {
4✔
212

4✔
213
        n := len(route.hops.Val)
4✔
214

4✔
215
        failNode := func() {
4✔
216
                i.failNode(route, n)
×
217

×
218
                // Other channels in the route forwarded correctly.
×
219
                if n > 1 {
×
220
                        i.successPairRange(route, 0, n-2)
×
221
                }
×
222

223
                i.finalFailureReason = &reasonError
×
224
        }
225

226
        // If a failure from the final node is received, we will fail the
227
        // payment in almost all cases. Only when the penultimate node sends an
228
        // incorrect htlc, we want to retry via another route. Invalid onion
229
        // failures are not expected, because the final node wouldn't be able to
230
        // encrypt that failure.
231
        switch failure.(type) {
4✔
232

233
        // Expiry or amount of the HTLC doesn't match the onion, try another
234
        // route.
235
        case *lnwire.FailFinalIncorrectCltvExpiry,
236
                *lnwire.FailFinalIncorrectHtlcAmount:
×
237

×
238
                // We trust ourselves. If this is a direct payment, we penalize
×
239
                // the final node and fail the payment.
×
240
                if n == 1 {
×
241
                        i.failNode(route, n)
×
242
                        i.finalFailureReason = &reasonError
×
243

×
244
                        return
×
245
                }
×
246

247
                // Otherwise penalize the last pair of the route and retry.
248
                // Either the final node is at fault, or it gets sent a bad htlc
249
                // from its predecessor.
250
                i.failPair(route, n-1)
×
251

×
252
                // The other hops relayed correctly, so assign those pairs a
×
253
                // success result. At this point, n >= 2.
×
254
                i.successPairRange(route, 0, n-2)
×
255

256
        // We are using wrong payment hash or amount, fail the payment.
257
        case *lnwire.FailIncorrectPaymentAmount,
258
                *lnwire.FailIncorrectDetails:
4✔
259

4✔
260
                // Assign all pairs a success result, as the payment reached the
4✔
261
                // destination correctly.
4✔
262
                i.successPairRange(route, 0, n-1)
4✔
263

4✔
264
                i.finalFailureReason = &reasonIncorrectDetails
4✔
265

266
        // The HTLC that was extended to the final hop expires too soon. Fail
267
        // the payment, because we may be using the wrong final cltv delta.
268
        case *lnwire.FailFinalExpiryTooSoon:
×
269
                // TODO(roasbeef): can happen to to race condition, try again
×
270
                // with recent block height
×
271

×
272
                // TODO(joostjager): can also happen because a node delayed
×
273
                // deliberately. What to penalize?
×
274
                i.finalFailureReason = &reasonIncorrectDetails
×
275

276
        case *lnwire.FailMPPTimeout:
×
277
                // Assign all pairs a success result, as the payment reached the
×
278
                // destination correctly. Continue the payment process.
×
279
                i.successPairRange(route, 0, n-1)
×
280

281
        // We do not expect to receive an invalid blinding error from the final
282
        // node in the route. This could erroneously happen in the following
283
        // cases:
284
        // 1. Unblinded node: misuses the error code.
285
        // 2. A receiving introduction node: erroneously sends the error code,
286
        //    as the spec indicates that receiving introduction nodes should
287
        //    use regular errors.
288
        //
289
        // Note that we expect the case where this error is sent from a node
290
        // after the introduction node to be handled elsewhere as this is part
291
        // of a more general class of errors where the introduction node has
292
        // failed to convert errors for the blinded route.
293
        case *lnwire.FailInvalidBlinding:
×
294
                failNode()
×
295

296
        // All other errors are considered terminal if coming from the
297
        // final hop. They indicate that something is wrong at the
298
        // recipient, so we do apply a penalty.
299
        default:
×
300
                failNode()
×
301
        }
302
}
303

304
// processPaymentOutcomeIntermediate handles failures sent by an intermediate
305
// hop.
306
//
307
//nolint:funlen
308
func (i *interpretedResult) processPaymentOutcomeIntermediate(route *mcRoute,
309
        errorSourceIdx int, failure lnwire.FailureMessage) {
4✔
310

4✔
311
        reportOutgoing := func() {
8✔
312
                i.failPair(
4✔
313
                        route, errorSourceIdx,
4✔
314
                )
4✔
315
        }
4✔
316

317
        reportOutgoingBalance := func() {
8✔
318
                i.failPairBalance(
4✔
319
                        route, errorSourceIdx,
4✔
320
                )
4✔
321

4✔
322
                // All nodes up to the failing pair must have forwarded
4✔
323
                // successfully.
4✔
324
                i.successPairRange(route, 0, errorSourceIdx-1)
4✔
325
        }
4✔
326

327
        reportIncoming := func() {
8✔
328
                // We trust ourselves. If the error comes from the first hop, we
4✔
329
                // can penalize the whole node. In that case there is no
4✔
330
                // uncertainty as to which node to blame.
4✔
331
                if errorSourceIdx == 1 {
8✔
332
                        i.failNode(route, errorSourceIdx)
4✔
333
                        return
4✔
334
                }
4✔
335

336
                // Otherwise report the incoming pair.
337
                i.failPair(
×
338
                        route, errorSourceIdx-1,
×
339
                )
×
340

×
341
                // All nodes up to the failing pair must have forwarded
×
342
                // successfully.
×
343
                if errorSourceIdx > 1 {
×
344
                        i.successPairRange(route, 0, errorSourceIdx-2)
×
345
                }
×
346
        }
347

348
        reportNode := func() {
4✔
349
                // Fail only the node that reported the failure.
×
350
                i.failNode(route, errorSourceIdx)
×
351

×
352
                // Other preceding channels in the route forwarded correctly.
×
353
                if errorSourceIdx > 1 {
×
354
                        i.successPairRange(route, 0, errorSourceIdx-2)
×
355
                }
×
356
        }
357

358
        reportAll := func() {
4✔
359
                // We trust ourselves. If the error comes from the first hop, we
×
360
                // can penalize the whole node. In that case there is no
×
361
                // uncertainty as to which node to blame.
×
362
                if errorSourceIdx == 1 {
×
363
                        i.failNode(route, errorSourceIdx)
×
364
                        return
×
365
                }
×
366

367
                // Otherwise penalize all pairs up to the error source. This
368
                // includes our own outgoing connection.
369
                i.failPairRange(
×
370
                        route, 0, errorSourceIdx-1,
×
371
                )
×
372
        }
373

374
        switch failure.(type) {
4✔
375

376
        // If a node reports onion payload corruption or an invalid version,
377
        // that node may be responsible, but it could also be that it is just
378
        // relaying a malformed htlc failure from it successor. By reporting the
379
        // outgoing channel set, we will surely hit the responsible node. At
380
        // this point, it is not possible that the node's predecessor corrupted
381
        // the onion blob. If the predecessor would have corrupted the payload,
382
        // the error source wouldn't have been able to encrypt this failure
383
        // message for us.
384
        case *lnwire.FailInvalidOnionVersion,
385
                *lnwire.FailInvalidOnionHmac,
386
                *lnwire.FailInvalidOnionKey:
4✔
387

4✔
388
                reportOutgoing()
4✔
389

390
        // If InvalidOnionPayload is received, we penalize only the reporting
391
        // node. We know the preceding hop didn't corrupt the onion, since the
392
        // reporting node is able to send the failure. We assume that we
393
        // constructed a valid onion payload and that the failure is most likely
394
        // an unknown required type or a bug in their implementation.
395
        case *lnwire.InvalidOnionPayload:
×
396
                reportNode()
×
397

398
        // If the next hop in the route wasn't known or offline, we'll only
399
        // penalize the channel set which we attempted to route over. This is
400
        // conservative, and it can handle faulty channels between nodes
401
        // properly. Additionally, this guards against routing nodes returning
402
        // errors in order to attempt to black list another node.
403
        case *lnwire.FailUnknownNextPeer:
4✔
404
                reportOutgoing()
4✔
405

406
        // Some implementations use this error when the next hop is offline, so we
407
        // do the same as FailUnknownNextPeer and also process the channel update.
408
        case *lnwire.FailChannelDisabled:
4✔
409

4✔
410
                // Set the node pair for which a channel update may be out of
4✔
411
                // date. The second chance logic uses the policyFailure field.
4✔
412
                i.policyFailure = &DirectedNodePair{
4✔
413
                        From: route.hops.Val[errorSourceIdx-1].pubKeyBytes.Val,
4✔
414
                        To:   route.hops.Val[errorSourceIdx].pubKeyBytes.Val,
4✔
415
                }
4✔
416

4✔
417
                reportOutgoing()
4✔
418

4✔
419
                // All nodes up to the failing pair must have forwarded
4✔
420
                // successfully.
4✔
421
                i.successPairRange(route, 0, errorSourceIdx-1)
4✔
422

423
        // If we get a permanent channel, we'll prune the channel set in both
424
        // directions and continue with the rest of the routes.
425
        case *lnwire.FailPermanentChannelFailure:
4✔
426
                reportOutgoing()
4✔
427

428
        // When an HTLC parameter is incorrect, the node sending the error may
429
        // be doing something wrong. But it could also be that its predecessor
430
        // is intentionally modifying the htlc parameters that we instructed it
431
        // via the hop payload. Therefore we penalize the incoming node pair. A
432
        // third cause of this error may be that we have an out of date channel
433
        // update. This is handled by the second chance logic up in mission
434
        // control.
435
        case *lnwire.FailAmountBelowMinimum,
436
                *lnwire.FailFeeInsufficient,
437
                *lnwire.FailIncorrectCltvExpiry:
4✔
438

4✔
439
                // Set the node pair for which a channel update may be out of
4✔
440
                // date. The second chance logic uses the policyFailure field.
4✔
441
                i.policyFailure = &DirectedNodePair{
4✔
442
                        From: route.hops.Val[errorSourceIdx-1].pubKeyBytes.Val,
4✔
443
                        To:   route.hops.Val[errorSourceIdx].pubKeyBytes.Val,
4✔
444
                }
4✔
445

4✔
446
                // We report incoming channel. If a second pair is granted in
4✔
447
                // mission control, this report is ignored.
4✔
448
                reportIncoming()
4✔
449

450
        // If the outgoing channel doesn't have enough capacity, we penalize.
451
        // But we penalize only in a single direction and only for amounts
452
        // greater than the attempted amount.
453
        case *lnwire.FailTemporaryChannelFailure:
4✔
454
                reportOutgoingBalance()
4✔
455

456
        // If FailExpiryTooSoon is received, there must have been some delay
457
        // along the path. We can't know which node is causing the delay, so we
458
        // penalize all of them up to the error source.
459
        //
460
        // Alternatively it could also be that we ourselves have fallen behind
461
        // somehow. We ignore that case for now.
462
        case *lnwire.FailExpiryTooSoon:
×
463
                reportAll()
×
464

465
        // We only expect to get FailInvalidBlinding from an introduction node
466
        // in a blinded route. The introduction node in a blinded route is
467
        // always responsible for reporting errors for the blinded portion of
468
        // the route (to protect the privacy of the members of the route), so
469
        // we need to be careful not to unfairly "shoot the messenger".
470
        //
471
        // The introduction node has no incentive to falsely report errors to
472
        // sabotage the blinded route because:
473
        //   1. Its ability to route this payment is strictly tied to the
474
        //      blinded route.
475
        //   2. The pubkeys in the blinded route are ephemeral, so doing so
476
        //      will have no impact on the nodes beyond the individual payment.
477
        //
478
        // Here we handle a few cases where we could unexpectedly receive this
479
        // error:
480
        // 1. Outside of a blinded route: erring node is not spec compliant.
481
        // 2. Before the introduction point: erring node is not spec compliant.
482
        //
483
        // Note that we expect the case where this error is sent from a node
484
        // after the introduction node to be handled elsewhere as this is part
485
        // of a more general class of errors where the introduction node has
486
        // failed to convert errors for the blinded route.
487
        case *lnwire.FailInvalidBlinding:
4✔
488
                introIdx, isBlinded := introductionPointIndex(route)
4✔
489

4✔
490
                // Deal with cases where a node has incorrectly returned a
4✔
491
                // blinding error:
4✔
492
                // 1. A node before the introduction point returned it.
4✔
493
                // 2. A node in a non-blinded route returned it.
4✔
494
                if errorSourceIdx < introIdx || !isBlinded {
4✔
495
                        reportNode()
×
496
                        return
×
497
                }
×
498

499
                // Otherwise, the error was at the introduction node. All
500
                // nodes up until the introduction node forwarded correctly,
501
                // so we award them as successful.
502
                if introIdx >= 1 {
8✔
503
                        i.successPairRange(route, 0, introIdx-1)
4✔
504
                }
4✔
505

506
                // If the hop after the introduction node that sent us an
507
                // error is the final recipient, then we finally fail the
508
                // payment because the receiver has generated a blinded route
509
                // that they're unable to use. We have this special case so
510
                // that we don't penalize the introduction node, and there is
511
                // no point in retrying the payment while LND only supports
512
                // one blinded route per payment.
513
                //
514
                // Note that if LND is extended to support multiple blinded
515
                // routes, this will terminate the payment without re-trying
516
                // the other routes.
517
                if introIdx == len(route.hops.Val)-1 {
4✔
518
                        i.finalFailureReason = &reasonError
×
519
                } else {
4✔
520
                        // If there are other hops between the recipient and
4✔
521
                        // introduction node, then we just penalize the last
4✔
522
                        // hop in the blinded route to minimize the storage of
4✔
523
                        // results for ephemeral keys.
4✔
524
                        i.failPairBalance(route, len(route.hops.Val)-1)
4✔
525
                }
4✔
526

527
        // In all other cases, we penalize the reporting node. These are all
528
        // failures that should not happen.
529
        default:
×
530
                i.failNode(route, errorSourceIdx)
×
531
        }
532
}
533

534
// introductionPointIndex returns the index of an introduction point in a
535
// route, using the same indexing in the route that we use for errorSourceIdx
536
// (i.e., that we consider our own node to be at index zero). A boolean is
537
// returned to indicate whether the route contains a blinded portion at all.
538
func introductionPointIndex(route *mcRoute) (int, bool) {
4✔
539
        for i, hop := range route.hops.Val {
8✔
540
                if hop.hasBlindingPoint.IsSome() {
8✔
541
                        return i + 1, true
4✔
542
                }
4✔
543
        }
544

545
        return 0, false
4✔
546
}
547

548
// processPaymentOutcomeUnknown processes a payment outcome for which no failure
549
// message or source is available.
550
func (i *interpretedResult) processPaymentOutcomeUnknown(route *mcRoute) {
×
551
        n := len(route.hops.Val)
×
552

×
553
        // If this is a direct payment, the destination must be at fault.
×
554
        if n == 1 {
×
555
                i.failNode(route, n)
×
556
                i.finalFailureReason = &reasonError
×
557
                return
×
558
        }
×
559

560
        // Otherwise penalize all channels in the route to make sure the
561
        // responsible node is at least hit too. We even penalize the connection
562
        // to our own peer, because that peer could also be responsible.
563
        i.failPairRange(route, 0, n-1)
×
564
}
565

566
// extractMCRoute extracts the fields required by MC from the Route struct to
567
// create the more minimal mcRoute struct.
568
func extractMCRoute(r *route.Route) *mcRoute {
4✔
569
        return &mcRoute{
4✔
570
                sourcePubKey: tlv.NewRecordT[tlv.TlvType0](r.SourcePubKey),
4✔
571
                totalAmount:  tlv.NewRecordT[tlv.TlvType1](r.TotalAmount),
4✔
572
                hops: tlv.NewRecordT[tlv.TlvType2](
4✔
573
                        extractMCHops(r.Hops),
4✔
574
                ),
4✔
575
        }
4✔
576
}
4✔
577

578
// extractMCHops extracts the Hop fields that MC actually uses from a slice of
579
// Hops.
580
func extractMCHops(hops []*route.Hop) mcHops {
4✔
581
        return fn.Map(extractMCHop, hops)
4✔
582
}
4✔
583

584
// extractMCHop extracts the Hop fields that MC actually uses from a Hop.
585
func extractMCHop(hop *route.Hop) *mcHop {
4✔
586
        h := mcHop{
4✔
587
                channelID: tlv.NewPrimitiveRecord[tlv.TlvType0](
4✔
588
                        hop.ChannelID,
4✔
589
                ),
4✔
590
                pubKeyBytes: tlv.NewRecordT[tlv.TlvType1](hop.PubKeyBytes),
4✔
591
                amtToFwd:    tlv.NewRecordT[tlv.TlvType2](hop.AmtToForward),
4✔
592
        }
4✔
593

4✔
594
        if hop.BlindingPoint != nil {
8✔
595
                h.hasBlindingPoint = tlv.SomeRecordT(
4✔
596
                        tlv.NewRecordT[tlv.TlvType3](lnwire.TrueBoolean{}),
4✔
597
                )
4✔
598
        }
4✔
599

600
        if hop.CustomRecords != nil {
8✔
601
                h.hasCustomRecords = tlv.SomeRecordT(
4✔
602
                        tlv.NewRecordT[tlv.TlvType4](lnwire.TrueBoolean{}),
4✔
603
                )
4✔
604
        }
4✔
605

606
        return &h
4✔
607
}
608

609
// mcRoute holds the bare minimum info about a payment attempt route that MC
610
// requires.
611
type mcRoute struct {
612
        sourcePubKey tlv.RecordT[tlv.TlvType0, route.Vertex]
613
        totalAmount  tlv.RecordT[tlv.TlvType1, lnwire.MilliSatoshi]
614
        hops         tlv.RecordT[tlv.TlvType2, mcHops]
615
}
616

617
// Record returns a TLV record that can be used to encode/decode an mcRoute
618
// to/from a TLV stream.
619
func (r *mcRoute) Record() tlv.Record {
4✔
620
        recordSize := func() uint64 {
8✔
621
                var (
4✔
622
                        b   bytes.Buffer
4✔
623
                        buf [8]byte
4✔
624
                )
4✔
625
                if err := encodeMCRoute(&b, r, &buf); err != nil {
4✔
626
                        panic(err)
×
627
                }
628

629
                return uint64(len(b.Bytes()))
4✔
630
        }
631

632
        return tlv.MakeDynamicRecord(
4✔
633
                0, r, recordSize, encodeMCRoute, decodeMCRoute,
4✔
634
        )
4✔
635
}
636

637
func encodeMCRoute(w io.Writer, val interface{}, _ *[8]byte) error {
4✔
638
        if v, ok := val.(*mcRoute); ok {
8✔
639
                return serializeRoute(w, v)
4✔
640
        }
4✔
641

642
        return tlv.NewTypeForEncodingErr(val, "routing.mcRoute")
×
643
}
644

645
func decodeMCRoute(r io.Reader, val interface{}, _ *[8]byte, l uint64) error {
4✔
646
        if v, ok := val.(*mcRoute); ok {
8✔
647
                route, err := deserializeRoute(io.LimitReader(r, int64(l)))
4✔
648
                if err != nil {
4✔
649
                        return err
×
650
                }
×
651

652
                *v = *route
4✔
653

4✔
654
                return nil
4✔
655
        }
656

657
        return tlv.NewTypeForDecodingErr(val, "routing.mcRoute", l, l)
×
658
}
659

660
// mcHops is a list of mcHop records.
661
type mcHops []*mcHop
662

663
// Record returns a TLV record that can be used to encode/decode a list of
664
// mcHop to/from a TLV stream.
665
func (h *mcHops) Record() tlv.Record {
4✔
666
        recordSize := func() uint64 {
8✔
667
                var (
4✔
668
                        b   bytes.Buffer
4✔
669
                        buf [8]byte
4✔
670
                )
4✔
671
                if err := encodeMCHops(&b, h, &buf); err != nil {
4✔
672
                        panic(err)
×
673
                }
674

675
                return uint64(len(b.Bytes()))
4✔
676
        }
677

678
        return tlv.MakeDynamicRecord(
4✔
679
                0, h, recordSize, encodeMCHops, decodeMCHops,
4✔
680
        )
4✔
681
}
682

683
func encodeMCHops(w io.Writer, val interface{}, buf *[8]byte) error {
4✔
684
        if v, ok := val.(*mcHops); ok {
8✔
685
                // Encode the number of hops as a var int.
4✔
686
                if err := tlv.WriteVarInt(w, uint64(len(*v)), buf); err != nil {
4✔
687
                        return err
×
688
                }
×
689

690
                // With that written out, we'll now encode the entries
691
                // themselves as a sub-TLV record, which includes its _own_
692
                // inner length prefix.
693
                for _, hop := range *v {
8✔
694
                        var hopBytes bytes.Buffer
4✔
695
                        if err := serializeHop(&hopBytes, hop); err != nil {
4✔
696
                                return err
×
697
                        }
×
698

699
                        // We encode the record with a varint length followed by
700
                        // the _raw_ TLV bytes.
701
                        tlvLen := uint64(len(hopBytes.Bytes()))
4✔
702
                        if err := tlv.WriteVarInt(w, tlvLen, buf); err != nil {
4✔
703
                                return err
×
704
                        }
×
705

706
                        if _, err := w.Write(hopBytes.Bytes()); err != nil {
4✔
707
                                return err
×
708
                        }
×
709
                }
710

711
                return nil
4✔
712
        }
713

714
        return tlv.NewTypeForEncodingErr(val, "routing.mcHops")
×
715
}
716

717
func decodeMCHops(r io.Reader, val interface{}, buf *[8]byte, l uint64) error {
4✔
718
        if v, ok := val.(*mcHops); ok {
8✔
719
                // First, we'll decode the varint that encodes how many hops
4✔
720
                // are encoded in the stream.
4✔
721
                numHops, err := tlv.ReadVarInt(r, buf)
4✔
722
                if err != nil {
4✔
723
                        return err
×
724
                }
×
725

726
                // Now that we know how many records we'll need to read, we can
727
                // iterate and read them all out in series.
728
                for i := uint64(0); i < numHops; i++ {
8✔
729
                        // Read out the varint that encodes the size of this
4✔
730
                        // inner TLV record.
4✔
731
                        hopSize, err := tlv.ReadVarInt(r, buf)
4✔
732
                        if err != nil {
4✔
733
                                return err
×
734
                        }
×
735

736
                        // Using this information, we'll create a new limited
737
                        // reader that'll return an EOF once the end has been
738
                        // reached so the stream stops consuming bytes.
739
                        innerTlvReader := &io.LimitedReader{
4✔
740
                                R: r,
4✔
741
                                N: int64(hopSize),
4✔
742
                        }
4✔
743

4✔
744
                        hop, err := deserializeHop(innerTlvReader)
4✔
745
                        if err != nil {
4✔
746
                                return err
×
747
                        }
×
748

749
                        *v = append(*v, hop)
4✔
750
                }
751

752
                return nil
4✔
753
        }
754

755
        return tlv.NewTypeForDecodingErr(val, "routing.mcHops", l, l)
×
756
}
757

758
// mcHop holds the bare minimum info about a payment attempt route hop that MC
759
// requires.
760
type mcHop struct {
761
        channelID        tlv.RecordT[tlv.TlvType0, uint64]
762
        pubKeyBytes      tlv.RecordT[tlv.TlvType1, route.Vertex]
763
        amtToFwd         tlv.RecordT[tlv.TlvType2, lnwire.MilliSatoshi]
764
        hasBlindingPoint tlv.OptionalRecordT[tlv.TlvType3, lnwire.TrueBoolean]
765
        hasCustomRecords tlv.OptionalRecordT[tlv.TlvType4, lnwire.TrueBoolean]
766
}
767

768
// failNode marks the node indicated by idx in the route as failed. It also
769
// marks the incoming and outgoing channels of the node as failed. This function
770
// intentionally panics when the self node is failed.
771
func (i *interpretedResult) failNode(rt *mcRoute, idx int) {
4✔
772
        // Mark the node as failing.
4✔
773
        i.nodeFailure = &rt.hops.Val[idx-1].pubKeyBytes.Val
4✔
774

4✔
775
        // Mark the incoming connection as failed for the node. We intent to
4✔
776
        // penalize as much as we can for a node level failure, including future
4✔
777
        // outgoing traffic for this connection. The pair as it is returned by
4✔
778
        // getPair is penalized in the original and the reversed direction. Note
4✔
779
        // that this will also affect the score of the failing node's peers.
4✔
780
        // This is necessary to prevent future routes from keep going into the
4✔
781
        // same node again.
4✔
782
        incomingChannelIdx := idx - 1
4✔
783
        inPair, _ := getPair(rt, incomingChannelIdx)
4✔
784
        i.pairResults[inPair] = failPairResult(0)
4✔
785
        i.pairResults[inPair.Reverse()] = failPairResult(0)
4✔
786

4✔
787
        // If not the ultimate node, mark the outgoing connection as failed for
4✔
788
        // the node.
4✔
789
        if idx < len(rt.hops.Val) {
8✔
790
                outgoingChannelIdx := idx
4✔
791
                outPair, _ := getPair(rt, outgoingChannelIdx)
4✔
792
                i.pairResults[outPair] = failPairResult(0)
4✔
793
                i.pairResults[outPair.Reverse()] = failPairResult(0)
4✔
794
        }
4✔
795
}
796

797
// failPairRange marks the node pairs from node fromIdx to node toIdx as failed
798
// in both direction.
799
func (i *interpretedResult) failPairRange(rt *mcRoute, fromIdx, toIdx int) {
×
800
        for idx := fromIdx; idx <= toIdx; idx++ {
×
801
                i.failPair(rt, idx)
×
802
        }
×
803
}
804

805
// failPair marks a pair as failed in both directions.
806
func (i *interpretedResult) failPair(rt *mcRoute, idx int) {
4✔
807
        pair, _ := getPair(rt, idx)
4✔
808

4✔
809
        // Report pair in both directions without a minimum penalization amount.
4✔
810
        i.pairResults[pair] = failPairResult(0)
4✔
811
        i.pairResults[pair.Reverse()] = failPairResult(0)
4✔
812
}
4✔
813

814
// failPairBalance marks a pair as failed with a minimum penalization amount.
815
func (i *interpretedResult) failPairBalance(rt *mcRoute, channelIdx int) {
4✔
816
        pair, amt := getPair(rt, channelIdx)
4✔
817

4✔
818
        i.pairResults[pair] = failPairResult(amt)
4✔
819
}
4✔
820

821
// successPairRange marks the node pairs from node fromIdx to node toIdx as
822
// succeeded.
823
func (i *interpretedResult) successPairRange(rt *mcRoute, fromIdx, toIdx int) {
4✔
824
        for idx := fromIdx; idx <= toIdx; idx++ {
8✔
825
                pair, amt := getPair(rt, idx)
4✔
826

4✔
827
                i.pairResults[pair] = successPairResult(amt)
4✔
828
        }
4✔
829
}
830

831
// getPair returns a node pair from the route and the amount passed between that
832
// pair.
833
func getPair(rt *mcRoute, channelIdx int) (DirectedNodePair,
834
        lnwire.MilliSatoshi) {
4✔
835

4✔
836
        nodeTo := rt.hops.Val[channelIdx].pubKeyBytes.Val
4✔
837
        var (
4✔
838
                nodeFrom route.Vertex
4✔
839
                amt      lnwire.MilliSatoshi
4✔
840
        )
4✔
841

4✔
842
        if channelIdx == 0 {
8✔
843
                nodeFrom = rt.sourcePubKey.Val
4✔
844
                amt = rt.totalAmount.Val
4✔
845
        } else {
8✔
846
                nodeFrom = rt.hops.Val[channelIdx-1].pubKeyBytes.Val
4✔
847
                amt = rt.hops.Val[channelIdx-1].amtToFwd.Val
4✔
848
        }
4✔
849

850
        pair := NewDirectedNodePair(nodeFrom, nodeTo)
4✔
851

4✔
852
        return pair, amt
4✔
853
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc