• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 12058234999

27 Nov 2024 09:06PM UTC coverage: 57.847% (-1.1%) from 58.921%
12058234999

Pull #9148

github

ProofOfKeags
lnwire: convert DynPropose and DynCommit to use typed tlv records
Pull Request #9148: DynComms [2/n]: lnwire: add authenticated wire messages for Dyn*

142 of 177 new or added lines in 4 files covered. (80.23%)

19365 existing lines in 251 files now uncovered.

100876 of 174383 relevant lines covered (57.85%)

25338.28 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

86.06
/routing/result_interpretation.go
1
package routing
2

3
import (
4
        "bytes"
5
        "fmt"
6
        "io"
7

8
        "github.com/lightningnetwork/lnd/channeldb"
9
        "github.com/lightningnetwork/lnd/fn"
10
        "github.com/lightningnetwork/lnd/lnwire"
11
        "github.com/lightningnetwork/lnd/routing/route"
12
        "github.com/lightningnetwork/lnd/tlv"
13
)
14

15
// Instantiate variables to allow taking a reference from the failure reason.
16
var (
17
        reasonError            = channeldb.FailureReasonError
18
        reasonIncorrectDetails = channeldb.FailureReasonPaymentDetails
19
)
20

21
// pairResult contains the result of the interpretation of a payment attempt for
22
// a specific node pair.
23
type pairResult struct {
24
        // amt is the amount that was forwarded for this pair. Can be set to
25
        // zero for failures that are amount independent.
26
        amt lnwire.MilliSatoshi
27

28
        // success indicates whether the payment attempt was successful through
29
        // this pair.
30
        success bool
31
}
32

33
// failPairResult creates a new result struct for a failure.
34
func failPairResult(minPenalizeAmt lnwire.MilliSatoshi) pairResult {
183✔
35
        return pairResult{
183✔
36
                amt: minPenalizeAmt,
183✔
37
        }
183✔
38
}
183✔
39

40
// newSuccessPairResult creates a new result struct for a success.
41
func successPairResult(successAmt lnwire.MilliSatoshi) pairResult {
129✔
42
        return pairResult{
129✔
43
                success: true,
129✔
44
                amt:     successAmt,
129✔
45
        }
129✔
46
}
129✔
47

48
// String returns the human-readable representation of a pair result.
49
func (p pairResult) String() string {
×
50
        var resultType string
×
51
        if p.success {
×
52
                resultType = "success"
×
53
        } else {
×
54
                resultType = "failed"
×
55
        }
×
56

57
        return fmt.Sprintf("%v (amt=%v)", resultType, p.amt)
×
58
}
59

60
// interpretedResult contains the result of the interpretation of a payment
61
// attempt.
62
type interpretedResult struct {
63
        // nodeFailure points to a node pubkey if all channels of that node are
64
        // responsible for the result.
65
        nodeFailure *route.Vertex
66

67
        // pairResults contains a map of node pairs for which we have a result.
68
        pairResults map[DirectedNodePair]pairResult
69

70
        // finalFailureReason is set to a non-nil value if it makes no more
71
        // sense to start another payment attempt. It will contain the reason
72
        // why.
73
        finalFailureReason *channeldb.FailureReason
74

75
        // policyFailure is set to a node pair if there is a policy failure on
76
        // that connection. This is used to control the second chance logic for
77
        // policy failures.
78
        policyFailure *DirectedNodePair
79
}
80

81
// interpretResult interprets a payment outcome and returns an object that
82
// contains information required to update mission control.
83
func interpretResult(rt *mcRoute,
84
        failure fn.Option[paymentFailure]) *interpretedResult {
97✔
85

97✔
86
        i := &interpretedResult{
97✔
87
                pairResults: make(map[DirectedNodePair]pairResult),
97✔
88
        }
97✔
89

97✔
90
        return fn.ElimOption(failure, func() *interpretedResult {
122✔
91
                i.processSuccess(rt)
25✔
92

25✔
93
                return i
25✔
94
        }, func(info paymentFailure) *interpretedResult {
97✔
95
                i.processFail(rt, info)
72✔
96

72✔
97
                return i
72✔
98
        })
72✔
99
}
100

101
// processSuccess processes a successful payment attempt.
102
func (i *interpretedResult) processSuccess(route *mcRoute) {
25✔
103
        // For successes, all nodes must have acted in the right way. Therefore
25✔
104
        // we mark all of them with a success result.
25✔
105
        i.successPairRange(route, 0, len(route.hops.Val)-1)
25✔
106
}
25✔
107

108
// processFail processes a failed payment attempt.
109
func (i *interpretedResult) processFail(rt *mcRoute, failure paymentFailure) {
72✔
110
        if failure.info.IsNone() {
73✔
111
                i.processPaymentOutcomeUnknown(rt)
1✔
112
                return
1✔
113
        }
1✔
114

115
        var (
71✔
116
                idx     int
71✔
117
                failMsg lnwire.FailureMessage
71✔
118
        )
71✔
119

71✔
120
        failure.info.WhenSome(
71✔
121
                func(r tlv.RecordT[tlv.TlvType0, paymentFailureInfo]) {
142✔
122
                        idx = int(r.Val.sourceIdx.Val)
71✔
123
                        failMsg = r.Val.msg.Val.FailureMessage
71✔
124
                },
71✔
125
        )
126

127
        // If the payment was to a blinded route and we received an error from
128
        // after the introduction point, handle this error separately - there
129
        // has been a protocol violation from the introduction node. This
130
        // penalty applies regardless of the error code that is returned.
131
        introIdx, isBlinded := introductionPointIndex(rt)
71✔
132
        if isBlinded && introIdx < idx {
73✔
133
                i.processPaymentOutcomeBadIntro(rt, introIdx, idx)
2✔
134
                return
2✔
135
        }
2✔
136

137
        switch idx {
69✔
138
        // We are the source of the failure.
139
        case 0:
2✔
140
                i.processPaymentOutcomeSelf(rt, failMsg)
2✔
141

142
        // A failure from the final hop was received.
143
        case len(rt.hops.Val):
9✔
144
                i.processPaymentOutcomeFinal(rt, failMsg)
9✔
145

146
        // An intermediate hop failed. Interpret the outcome, update reputation
147
        // and try again.
148
        default:
58✔
149
                i.processPaymentOutcomeIntermediate(rt, idx, failMsg)
58✔
150
        }
151
}
152

153
// processPaymentOutcomeBadIntro handles the case where we have made payment
154
// to a blinded route, but received an error from a node after the introduction
155
// node. This indicates that the introduction node is not obeying the route
156
// blinding specification, as we expect all errors from the introduction node
157
// to be source from it.
158
func (i *interpretedResult) processPaymentOutcomeBadIntro(route *mcRoute,
159
        introIdx, errSourceIdx int) {
2✔
160

2✔
161
        // We fail the introduction node for not obeying the specification.
2✔
162
        i.failNode(route, introIdx)
2✔
163

2✔
164
        // Other preceding channels in the route forwarded correctly. Note
2✔
165
        // that we do not assign success to the incoming link to the
2✔
166
        // introduction node because it has not handled the error correctly.
2✔
167
        if introIdx > 1 {
3✔
168
                i.successPairRange(route, 0, introIdx-2)
1✔
169
        }
1✔
170

171
        // If the source of the failure was from the final node, we also set
172
        // a final failure reason because the recipient can't process the
173
        // payment (independent of the introduction failing to convert the
174
        // error, we can't complete the payment if the last hop fails).
175
        if errSourceIdx == len(route.hops.Val) {
3✔
176
                i.finalFailureReason = &reasonError
1✔
177
        }
1✔
178
}
179

180
// processPaymentOutcomeSelf handles failures sent by ourselves.
181
func (i *interpretedResult) processPaymentOutcomeSelf(rt *mcRoute,
182
        failure lnwire.FailureMessage) {
2✔
183

2✔
184
        switch failure.(type) {
2✔
185

186
        // We receive a malformed htlc failure from our peer. We trust ourselves
187
        // to send the correct htlc, so our peer must be at fault.
188
        case *lnwire.FailInvalidOnionVersion,
189
                *lnwire.FailInvalidOnionHmac,
190
                *lnwire.FailInvalidOnionKey:
2✔
191

2✔
192
                i.failNode(rt, 1)
2✔
193

2✔
194
                // If this was a payment to a direct peer, we can stop trying.
2✔
195
                if len(rt.hops.Val) == 1 {
3✔
196
                        i.finalFailureReason = &reasonError
1✔
197
                }
1✔
198

199
        // Any other failure originating from ourselves should be temporary and
200
        // caused by changing conditions between path finding and execution of
201
        // the payment. We just retry and trust that the information locally
202
        // available in the link has been updated.
UNCOV
203
        default:
×
UNCOV
204
                log.Warnf("Routing failure for local channel %v occurred",
×
UNCOV
205
                        rt.hops.Val[0].channelID)
×
206
        }
207
}
208

209
// processPaymentOutcomeFinal handles failures sent by the final hop.
210
func (i *interpretedResult) processPaymentOutcomeFinal(route *mcRoute,
211
        failure lnwire.FailureMessage) {
9✔
212

9✔
213
        n := len(route.hops.Val)
9✔
214

9✔
215
        failNode := func() {
14✔
216
                i.failNode(route, n)
5✔
217

5✔
218
                // Other channels in the route forwarded correctly.
5✔
219
                if n > 1 {
9✔
220
                        i.successPairRange(route, 0, n-2)
4✔
221
                }
4✔
222

223
                i.finalFailureReason = &reasonError
5✔
224
        }
225

226
        // If a failure from the final node is received, we will fail the
227
        // payment in almost all cases. Only when the penultimate node sends an
228
        // incorrect htlc, we want to retry via another route. Invalid onion
229
        // failures are not expected, because the final node wouldn't be able to
230
        // encrypt that failure.
231
        switch failure.(type) {
9✔
232

233
        // Expiry or amount of the HTLC doesn't match the onion, try another
234
        // route.
235
        case *lnwire.FailFinalIncorrectCltvExpiry,
236
                *lnwire.FailFinalIncorrectHtlcAmount:
×
237

×
238
                // We trust ourselves. If this is a direct payment, we penalize
×
239
                // the final node and fail the payment.
×
240
                if n == 1 {
×
241
                        i.failNode(route, n)
×
242
                        i.finalFailureReason = &reasonError
×
243

×
244
                        return
×
245
                }
×
246

247
                // Otherwise penalize the last pair of the route and retry.
248
                // Either the final node is at fault, or it gets sent a bad htlc
249
                // from its predecessor.
250
                i.failPair(route, n-1)
×
251

×
252
                // The other hops relayed correctly, so assign those pairs a
×
253
                // success result. At this point, n >= 2.
×
254
                i.successPairRange(route, 0, n-2)
×
255

256
        // We are using wrong payment hash or amount, fail the payment.
257
        case *lnwire.FailIncorrectPaymentAmount,
258
                *lnwire.FailIncorrectDetails:
2✔
259

2✔
260
                // Assign all pairs a success result, as the payment reached the
2✔
261
                // destination correctly.
2✔
262
                i.successPairRange(route, 0, n-1)
2✔
263

2✔
264
                i.finalFailureReason = &reasonIncorrectDetails
2✔
265

266
        // The HTLC that was extended to the final hop expires too soon. Fail
267
        // the payment, because we may be using the wrong final cltv delta.
268
        case *lnwire.FailFinalExpiryTooSoon:
×
269
                // TODO(roasbeef): can happen to to race condition, try again
×
270
                // with recent block height
×
271

×
272
                // TODO(joostjager): can also happen because a node delayed
×
273
                // deliberately. What to penalize?
×
274
                i.finalFailureReason = &reasonIncorrectDetails
×
275

276
        case *lnwire.FailMPPTimeout:
2✔
277
                // Assign all pairs a success result, as the payment reached the
2✔
278
                // destination correctly. Continue the payment process.
2✔
279
                i.successPairRange(route, 0, n-1)
2✔
280

281
        // We do not expect to receive an invalid blinding error from the final
282
        // node in the route. This could erroneously happen in the following
283
        // cases:
284
        // 1. Unblinded node: misuses the error code.
285
        // 2. A receiving introduction node: erroneously sends the error code,
286
        //    as the spec indicates that receiving introduction nodes should
287
        //    use regular errors.
288
        //
289
        // Note that we expect the case where this error is sent from a node
290
        // after the introduction node to be handled elsewhere as this is part
291
        // of a more general class of errors where the introduction node has
292
        // failed to convert errors for the blinded route.
293
        case *lnwire.FailInvalidBlinding:
2✔
294
                failNode()
2✔
295

296
        // All other errors are considered terminal if coming from the
297
        // final hop. They indicate that something is wrong at the
298
        // recipient, so we do apply a penalty.
299
        default:
3✔
300
                failNode()
3✔
301
        }
302
}
303

304
// processPaymentOutcomeIntermediate handles failures sent by an intermediate
305
// hop.
306
//
307
//nolint:funlen
308
func (i *interpretedResult) processPaymentOutcomeIntermediate(route *mcRoute,
309
        errorSourceIdx int, failure lnwire.FailureMessage) {
58✔
310

58✔
311
        reportOutgoing := func() {
61✔
312
                i.failPair(
3✔
313
                        route, errorSourceIdx,
3✔
314
                )
3✔
315
        }
3✔
316

317
        reportOutgoingBalance := func() {
92✔
318
                i.failPairBalance(
34✔
319
                        route, errorSourceIdx,
34✔
320
                )
34✔
321

34✔
322
                // All nodes up to the failing pair must have forwarded
34✔
323
                // successfully.
34✔
324
                i.successPairRange(route, 0, errorSourceIdx-1)
34✔
325
        }
34✔
326

327
        reportIncoming := func() {
68✔
328
                // We trust ourselves. If the error comes from the first hop, we
10✔
329
                // can penalize the whole node. In that case there is no
10✔
330
                // uncertainty as to which node to blame.
10✔
331
                if errorSourceIdx == 1 {
19✔
332
                        i.failNode(route, errorSourceIdx)
9✔
333
                        return
9✔
334
                }
9✔
335

336
                // Otherwise report the incoming pair.
337
                i.failPair(
1✔
338
                        route, errorSourceIdx-1,
1✔
339
                )
1✔
340

1✔
341
                // All nodes up to the failing pair must have forwarded
1✔
342
                // successfully.
1✔
343
                if errorSourceIdx > 1 {
2✔
344
                        i.successPairRange(route, 0, errorSourceIdx-2)
1✔
345
                }
1✔
346
        }
347

348
        reportNode := func() {
62✔
349
                // Fail only the node that reported the failure.
4✔
350
                i.failNode(route, errorSourceIdx)
4✔
351

4✔
352
                // Other preceding channels in the route forwarded correctly.
4✔
353
                if errorSourceIdx > 1 {
6✔
354
                        i.successPairRange(route, 0, errorSourceIdx-2)
2✔
355
                }
2✔
356
        }
357

358
        reportAll := func() {
61✔
359
                // We trust ourselves. If the error comes from the first hop, we
3✔
360
                // can penalize the whole node. In that case there is no
3✔
361
                // uncertainty as to which node to blame.
3✔
362
                if errorSourceIdx == 1 {
5✔
363
                        i.failNode(route, errorSourceIdx)
2✔
364
                        return
2✔
365
                }
2✔
366

367
                // Otherwise penalize all pairs up to the error source. This
368
                // includes our own outgoing connection.
369
                i.failPairRange(
1✔
370
                        route, 0, errorSourceIdx-1,
1✔
371
                )
1✔
372
        }
373

374
        switch failure.(type) {
58✔
375

376
        // If a node reports onion payload corruption or an invalid version,
377
        // that node may be responsible, but it could also be that it is just
378
        // relaying a malformed htlc failure from it successor. By reporting the
379
        // outgoing channel set, we will surely hit the responsible node. At
380
        // this point, it is not possible that the node's predecessor corrupted
381
        // the onion blob. If the predecessor would have corrupted the payload,
382
        // the error source wouldn't have been able to encrypt this failure
383
        // message for us.
384
        case *lnwire.FailInvalidOnionVersion,
385
                *lnwire.FailInvalidOnionHmac,
UNCOV
386
                *lnwire.FailInvalidOnionKey:
×
UNCOV
387

×
UNCOV
388
                reportOutgoing()
×
389

390
        // If InvalidOnionPayload is received, we penalize only the reporting
391
        // node. We know the preceding hop didn't corrupt the onion, since the
392
        // reporting node is able to send the failure. We assume that we
393
        // constructed a valid onion payload and that the failure is most likely
394
        // an unknown required type or a bug in their implementation.
395
        case *lnwire.InvalidOnionPayload:
1✔
396
                reportNode()
1✔
397

398
        // If the next hop in the route wasn't known or offline, we'll only
399
        // penalize the channel set which we attempted to route over. This is
400
        // conservative, and it can handle faulty channels between nodes
401
        // properly. Additionally, this guards against routing nodes returning
402
        // errors in order to attempt to black list another node.
403
        case *lnwire.FailUnknownNextPeer:
2✔
404
                reportOutgoing()
2✔
405

406
        // Some implementations use this error when the next hop is offline, so we
407
        // do the same as FailUnknownNextPeer and also process the channel update.
408
        case *lnwire.FailChannelDisabled:
1✔
409

1✔
410
                // Set the node pair for which a channel update may be out of
1✔
411
                // date. The second chance logic uses the policyFailure field.
1✔
412
                i.policyFailure = &DirectedNodePair{
1✔
413
                        From: route.hops.Val[errorSourceIdx-1].pubKeyBytes.Val,
1✔
414
                        To:   route.hops.Val[errorSourceIdx].pubKeyBytes.Val,
1✔
415
                }
1✔
416

1✔
417
                reportOutgoing()
1✔
418

1✔
419
                // All nodes up to the failing pair must have forwarded
1✔
420
                // successfully.
1✔
421
                i.successPairRange(route, 0, errorSourceIdx-1)
1✔
422

423
        // If we get a permanent channel, we'll prune the channel set in both
424
        // directions and continue with the rest of the routes.
UNCOV
425
        case *lnwire.FailPermanentChannelFailure:
×
UNCOV
426
                reportOutgoing()
×
427

428
        // When an HTLC parameter is incorrect, the node sending the error may
429
        // be doing something wrong. But it could also be that its predecessor
430
        // is intentionally modifying the htlc parameters that we instructed it
431
        // via the hop payload. Therefore we penalize the incoming node pair. A
432
        // third cause of this error may be that we have an out of date channel
433
        // update. This is handled by the second chance logic up in mission
434
        // control.
435
        case *lnwire.FailAmountBelowMinimum,
436
                *lnwire.FailFeeInsufficient,
437
                *lnwire.FailIncorrectCltvExpiry:
10✔
438

10✔
439
                // Set the node pair for which a channel update may be out of
10✔
440
                // date. The second chance logic uses the policyFailure field.
10✔
441
                i.policyFailure = &DirectedNodePair{
10✔
442
                        From: route.hops.Val[errorSourceIdx-1].pubKeyBytes.Val,
10✔
443
                        To:   route.hops.Val[errorSourceIdx].pubKeyBytes.Val,
10✔
444
                }
10✔
445

10✔
446
                // We report incoming channel. If a second pair is granted in
10✔
447
                // mission control, this report is ignored.
10✔
448
                reportIncoming()
10✔
449

450
        // If the outgoing channel doesn't have enough capacity, we penalize.
451
        // But we penalize only in a single direction and only for amounts
452
        // greater than the attempted amount.
453
        case *lnwire.FailTemporaryChannelFailure:
34✔
454
                reportOutgoingBalance()
34✔
455

456
        // If FailExpiryTooSoon is received, there must have been some delay
457
        // along the path. We can't know which node is causing the delay, so we
458
        // penalize all of them up to the error source.
459
        //
460
        // Alternatively it could also be that we ourselves have fallen behind
461
        // somehow. We ignore that case for now.
462
        case *lnwire.FailExpiryTooSoon:
3✔
463
                reportAll()
3✔
464

465
        // We only expect to get FailInvalidBlinding from an introduction node
466
        // in a blinded route. The introduction node in a blinded route is
467
        // always responsible for reporting errors for the blinded portion of
468
        // the route (to protect the privacy of the members of the route), so
469
        // we need to be careful not to unfairly "shoot the messenger".
470
        //
471
        // The introduction node has no incentive to falsely report errors to
472
        // sabotage the blinded route because:
473
        //   1. Its ability to route this payment is strictly tied to the
474
        //      blinded route.
475
        //   2. The pubkeys in the blinded route are ephemeral, so doing so
476
        //      will have no impact on the nodes beyond the individual payment.
477
        //
478
        // Here we handle a few cases where we could unexpectedly receive this
479
        // error:
480
        // 1. Outside of a blinded route: erring node is not spec compliant.
481
        // 2. Before the introduction point: erring node is not spec compliant.
482
        //
483
        // Note that we expect the case where this error is sent from a node
484
        // after the introduction node to be handled elsewhere as this is part
485
        // of a more general class of errors where the introduction node has
486
        // failed to convert errors for the blinded route.
487
        case *lnwire.FailInvalidBlinding:
6✔
488
                introIdx, isBlinded := introductionPointIndex(route)
6✔
489

6✔
490
                // Deal with cases where a node has incorrectly returned a
6✔
491
                // blinding error:
6✔
492
                // 1. A node before the introduction point returned it.
6✔
493
                // 2. A node in a non-blinded route returned it.
6✔
494
                if errorSourceIdx < introIdx || !isBlinded {
9✔
495
                        reportNode()
3✔
496
                        return
3✔
497
                }
3✔
498

499
                // Otherwise, the error was at the introduction node. All
500
                // nodes up until the introduction node forwarded correctly,
501
                // so we award them as successful.
502
                if introIdx >= 1 {
6✔
503
                        i.successPairRange(route, 0, introIdx-1)
3✔
504
                }
3✔
505

506
                // If the hop after the introduction node that sent us an
507
                // error is the final recipient, then we finally fail the
508
                // payment because the receiver has generated a blinded route
509
                // that they're unable to use. We have this special case so
510
                // that we don't penalize the introduction node, and there is
511
                // no point in retrying the payment while LND only supports
512
                // one blinded route per payment.
513
                //
514
                // Note that if LND is extended to support multiple blinded
515
                // routes, this will terminate the payment without re-trying
516
                // the other routes.
517
                if introIdx == len(route.hops.Val)-1 {
4✔
518
                        i.finalFailureReason = &reasonError
1✔
519
                } else {
3✔
520
                        // If there are other hops between the recipient and
2✔
521
                        // introduction node, then we just penalize the last
2✔
522
                        // hop in the blinded route to minimize the storage of
2✔
523
                        // results for ephemeral keys.
2✔
524
                        i.failPairBalance(route, len(route.hops.Val)-1)
2✔
525
                }
2✔
526

527
        // In all other cases, we penalize the reporting node. These are all
528
        // failures that should not happen.
529
        default:
1✔
530
                i.failNode(route, errorSourceIdx)
1✔
531
        }
532
}
533

534
// introductionPointIndex returns the index of an introduction point in a
535
// route, using the same indexing in the route that we use for errorSourceIdx
536
// (i.e., that we consider our own node to be at index zero). A boolean is
537
// returned to indicate whether the route contains a blinded portion at all.
538
func introductionPointIndex(route *mcRoute) (int, bool) {
77✔
539
        for i, hop := range route.hops.Val {
253✔
540
                if hop.hasBlindingPoint.IsSome() {
187✔
541
                        return i + 1, true
11✔
542
                }
11✔
543
        }
544

545
        return 0, false
66✔
546
}
547

548
// processPaymentOutcomeUnknown processes a payment outcome for which no failure
549
// message or source is available.
550
func (i *interpretedResult) processPaymentOutcomeUnknown(route *mcRoute) {
1✔
551
        n := len(route.hops.Val)
1✔
552

1✔
553
        // If this is a direct payment, the destination must be at fault.
1✔
554
        if n == 1 {
1✔
555
                i.failNode(route, n)
×
556
                i.finalFailureReason = &reasonError
×
557
                return
×
558
        }
×
559

560
        // Otherwise penalize all channels in the route to make sure the
561
        // responsible node is at least hit too. We even penalize the connection
562
        // to our own peer, because that peer could also be responsible.
563
        i.failPairRange(route, 0, n-1)
1✔
564
}
565

566
// extractMCRoute extracts the fields required by MC from the Route struct to
567
// create the more minimal mcRoute struct.
568
func extractMCRoute(r *route.Route) *mcRoute {
77✔
569
        return &mcRoute{
77✔
570
                sourcePubKey: tlv.NewRecordT[tlv.TlvType0](r.SourcePubKey),
77✔
571
                totalAmount:  tlv.NewRecordT[tlv.TlvType1](r.TotalAmount),
77✔
572
                hops: tlv.NewRecordT[tlv.TlvType2](
77✔
573
                        extractMCHops(r.Hops),
77✔
574
                ),
77✔
575
        }
77✔
576
}
77✔
577

578
// extractMCHops extracts the Hop fields that MC actually uses from a slice of
579
// Hops.
580
func extractMCHops(hops []*route.Hop) mcHops {
77✔
581
        return fn.Map(extractMCHop, hops)
77✔
582
}
77✔
583

584
// extractMCHop extracts the Hop fields that MC actually uses from a Hop.
585
func extractMCHop(hop *route.Hop) *mcHop {
173✔
586
        h := mcHop{
173✔
587
                channelID: tlv.NewPrimitiveRecord[tlv.TlvType0](
173✔
588
                        hop.ChannelID,
173✔
589
                ),
173✔
590
                pubKeyBytes: tlv.NewRecordT[tlv.TlvType1](hop.PubKeyBytes),
173✔
591
                amtToFwd:    tlv.NewRecordT[tlv.TlvType2](hop.AmtToForward),
173✔
592
        }
173✔
593

173✔
594
        if hop.BlindingPoint != nil {
177✔
595
                h.hasBlindingPoint = tlv.SomeRecordT(
4✔
596
                        tlv.NewRecordT[tlv.TlvType3](lnwire.TrueBoolean{}),
4✔
597
                )
4✔
598
        }
4✔
599

600
        if hop.CustomRecords != nil {
174✔
601
                h.hasCustomRecords = tlv.SomeRecordT(
1✔
602
                        tlv.NewRecordT[tlv.TlvType4](lnwire.TrueBoolean{}),
1✔
603
                )
1✔
604
        }
1✔
605

606
        return &h
173✔
607
}
608

609
// mcRoute holds the bare minimum info about a payment attempt route that MC
610
// requires.
611
type mcRoute struct {
612
        sourcePubKey tlv.RecordT[tlv.TlvType0, route.Vertex]
613
        totalAmount  tlv.RecordT[tlv.TlvType1, lnwire.MilliSatoshi]
614
        hops         tlv.RecordT[tlv.TlvType2, mcHops]
615
}
616

617
// Record returns a TLV record that can be used to encode/decode an mcRoute
618
// to/from a TLV stream.
619
func (r *mcRoute) Record() tlv.Record {
36✔
620
        recordSize := func() uint64 {
49✔
621
                var (
13✔
622
                        b   bytes.Buffer
13✔
623
                        buf [8]byte
13✔
624
                )
13✔
625
                if err := encodeMCRoute(&b, r, &buf); err != nil {
13✔
626
                        panic(err)
×
627
                }
628

629
                return uint64(len(b.Bytes()))
13✔
630
        }
631

632
        return tlv.MakeDynamicRecord(
36✔
633
                0, r, recordSize, encodeMCRoute, decodeMCRoute,
36✔
634
        )
36✔
635
}
636

637
func encodeMCRoute(w io.Writer, val interface{}, _ *[8]byte) error {
26✔
638
        if v, ok := val.(*mcRoute); ok {
52✔
639
                return serializeRoute(w, v)
26✔
640
        }
26✔
641

642
        return tlv.NewTypeForEncodingErr(val, "routing.mcRoute")
×
643
}
644

645
func decodeMCRoute(r io.Reader, val interface{}, _ *[8]byte, l uint64) error {
23✔
646
        if v, ok := val.(*mcRoute); ok {
46✔
647
                route, err := deserializeRoute(io.LimitReader(r, int64(l)))
23✔
648
                if err != nil {
23✔
649
                        return err
×
650
                }
×
651

652
                *v = *route
23✔
653

23✔
654
                return nil
23✔
655
        }
656

657
        return tlv.NewTypeForDecodingErr(val, "routing.mcRoute", l, l)
×
658
}
659

660
// mcHops is a list of mcHop records.
661
type mcHops []*mcHop
662

663
// Record returns a TLV record that can be used to encode/decode a list of
664
// mcHop to/from a TLV stream.
665
func (h *mcHops) Record() tlv.Record {
49✔
666
        recordSize := func() uint64 {
75✔
667
                var (
26✔
668
                        b   bytes.Buffer
26✔
669
                        buf [8]byte
26✔
670
                )
26✔
671
                if err := encodeMCHops(&b, h, &buf); err != nil {
26✔
672
                        panic(err)
×
673
                }
674

675
                return uint64(len(b.Bytes()))
26✔
676
        }
677

678
        return tlv.MakeDynamicRecord(
49✔
679
                0, h, recordSize, encodeMCHops, decodeMCHops,
49✔
680
        )
49✔
681
}
682

683
func encodeMCHops(w io.Writer, val interface{}, buf *[8]byte) error {
52✔
684
        if v, ok := val.(*mcHops); ok {
104✔
685
                // Encode the number of hops as a var int.
52✔
686
                if err := tlv.WriteVarInt(w, uint64(len(*v)), buf); err != nil {
52✔
687
                        return err
×
688
                }
×
689

690
                // With that written out, we'll now encode the entries
691
                // themselves as a sub-TLV record, which includes its _own_
692
                // inner length prefix.
693
                for _, hop := range *v {
120✔
694
                        var hopBytes bytes.Buffer
68✔
695
                        if err := serializeHop(&hopBytes, hop); err != nil {
68✔
696
                                return err
×
697
                        }
×
698

699
                        // We encode the record with a varint length followed by
700
                        // the _raw_ TLV bytes.
701
                        tlvLen := uint64(len(hopBytes.Bytes()))
68✔
702
                        if err := tlv.WriteVarInt(w, tlvLen, buf); err != nil {
68✔
703
                                return err
×
704
                        }
×
705

706
                        if _, err := w.Write(hopBytes.Bytes()); err != nil {
68✔
707
                                return err
×
708
                        }
×
709
                }
710

711
                return nil
52✔
712
        }
713

714
        return tlv.NewTypeForEncodingErr(val, "routing.mcHops")
×
715
}
716

717
func decodeMCHops(r io.Reader, val interface{}, buf *[8]byte, l uint64) error {
23✔
718
        if v, ok := val.(*mcHops); ok {
46✔
719
                // First, we'll decode the varint that encodes how many hops
23✔
720
                // are encoded in the stream.
23✔
721
                numHops, err := tlv.ReadVarInt(r, buf)
23✔
722
                if err != nil {
23✔
723
                        return err
×
724
                }
×
725

726
                // Now that we know how many records we'll need to read, we can
727
                // iterate and read them all out in series.
728
                for i := uint64(0); i < numHops; i++ {
50✔
729
                        // Read out the varint that encodes the size of this
27✔
730
                        // inner TLV record.
27✔
731
                        hopSize, err := tlv.ReadVarInt(r, buf)
27✔
732
                        if err != nil {
27✔
733
                                return err
×
734
                        }
×
735

736
                        // Using this information, we'll create a new limited
737
                        // reader that'll return an EOF once the end has been
738
                        // reached so the stream stops consuming bytes.
739
                        innerTlvReader := &io.LimitedReader{
27✔
740
                                R: r,
27✔
741
                                N: int64(hopSize),
27✔
742
                        }
27✔
743

27✔
744
                        hop, err := deserializeHop(innerTlvReader)
27✔
745
                        if err != nil {
27✔
746
                                return err
×
747
                        }
×
748

749
                        *v = append(*v, hop)
27✔
750
                }
751

752
                return nil
23✔
753
        }
754

755
        return tlv.NewTypeForDecodingErr(val, "routing.mcHops", l, l)
×
756
}
757

758
// mcHop holds the bare minimum info about a payment attempt route hop that MC
759
// requires.
760
type mcHop struct {
761
        channelID        tlv.RecordT[tlv.TlvType0, uint64]
762
        pubKeyBytes      tlv.RecordT[tlv.TlvType1, route.Vertex]
763
        amtToFwd         tlv.RecordT[tlv.TlvType2, lnwire.MilliSatoshi]
764
        hasBlindingPoint tlv.OptionalRecordT[tlv.TlvType3, lnwire.TrueBoolean]
765
        hasCustomRecords tlv.OptionalRecordT[tlv.TlvType4, lnwire.TrueBoolean]
766
}
767

768
// failNode marks the node indicated by idx in the route as failed. It also
769
// marks the incoming and outgoing channels of the node as failed. This function
770
// intentionally panics when the self node is failed.
771
func (i *interpretedResult) failNode(rt *mcRoute, idx int) {
25✔
772
        // Mark the node as failing.
25✔
773
        i.nodeFailure = &rt.hops.Val[idx-1].pubKeyBytes.Val
25✔
774

25✔
775
        // Mark the incoming connection as failed for the node. We intent to
25✔
776
        // penalize as much as we can for a node level failure, including future
25✔
777
        // outgoing traffic for this connection. The pair as it is returned by
25✔
778
        // getPair is penalized in the original and the reversed direction. Note
25✔
779
        // that this will also affect the score of the failing node's peers.
25✔
780
        // This is necessary to prevent future routes from keep going into the
25✔
781
        // same node again.
25✔
782
        incomingChannelIdx := idx - 1
25✔
783
        inPair, _ := getPair(rt, incomingChannelIdx)
25✔
784
        i.pairResults[inPair] = failPairResult(0)
25✔
785
        i.pairResults[inPair.Reverse()] = failPairResult(0)
25✔
786

25✔
787
        // If not the ultimate node, mark the outgoing connection as failed for
25✔
788
        // the node.
25✔
789
        if idx < len(rt.hops.Val) {
44✔
790
                outgoingChannelIdx := idx
19✔
791
                outPair, _ := getPair(rt, outgoingChannelIdx)
19✔
792
                i.pairResults[outPair] = failPairResult(0)
19✔
793
                i.pairResults[outPair.Reverse()] = failPairResult(0)
19✔
794
        }
19✔
795
}
796

797
// failPairRange marks the node pairs from node fromIdx to node toIdx as failed
798
// in both direction.
799
func (i *interpretedResult) failPairRange(rt *mcRoute, fromIdx, toIdx int) {
2✔
800
        for idx := fromIdx; idx <= toIdx; idx++ {
7✔
801
                i.failPair(rt, idx)
5✔
802
        }
5✔
803
}
804

805
// failPair marks a pair as failed in both directions.
806
func (i *interpretedResult) failPair(rt *mcRoute, idx int) {
9✔
807
        pair, _ := getPair(rt, idx)
9✔
808

9✔
809
        // Report pair in both directions without a minimum penalization amount.
9✔
810
        i.pairResults[pair] = failPairResult(0)
9✔
811
        i.pairResults[pair.Reverse()] = failPairResult(0)
9✔
812
}
9✔
813

814
// failPairBalance marks a pair as failed with a minimum penalization amount.
815
func (i *interpretedResult) failPairBalance(rt *mcRoute, channelIdx int) {
36✔
816
        pair, amt := getPair(rt, channelIdx)
36✔
817

36✔
818
        i.pairResults[pair] = failPairResult(amt)
36✔
819
}
36✔
820

821
// successPairRange marks the node pairs from node fromIdx to node toIdx as
822
// succeeded.
823
func (i *interpretedResult) successPairRange(rt *mcRoute, fromIdx, toIdx int) {
75✔
824
        for idx := fromIdx; idx <= toIdx; idx++ {
184✔
825
                pair, amt := getPair(rt, idx)
109✔
826

109✔
827
                i.pairResults[pair] = successPairResult(amt)
109✔
828
        }
109✔
829
}
830

831
// getPair returns a node pair from the route and the amount passed between that
832
// pair.
833
func getPair(rt *mcRoute, channelIdx int) (DirectedNodePair,
834
        lnwire.MilliSatoshi) {
198✔
835

198✔
836
        nodeTo := rt.hops.Val[channelIdx].pubKeyBytes.Val
198✔
837
        var (
198✔
838
                nodeFrom route.Vertex
198✔
839
                amt      lnwire.MilliSatoshi
198✔
840
        )
198✔
841

198✔
842
        if channelIdx == 0 {
293✔
843
                nodeFrom = rt.sourcePubKey.Val
95✔
844
                amt = rt.totalAmount.Val
95✔
845
        } else {
198✔
846
                nodeFrom = rt.hops.Val[channelIdx-1].pubKeyBytes.Val
103✔
847
                amt = rt.hops.Val[channelIdx-1].amtToFwd.Val
103✔
848
        }
103✔
849

850
        pair := NewDirectedNodePair(nodeFrom, nodeTo)
198✔
851

198✔
852
        return pair, amt
198✔
853
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc