• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 13211764208

08 Feb 2025 03:08AM UTC coverage: 49.288% (-9.5%) from 58.815%
13211764208

Pull #9489

github

calvinrzachman
itest: verify switchrpc server enforces send then track

We prevent the rpc server from allowing onion dispatches for
attempt IDs which have already been tracked by rpc clients.

This helps protect the client from leaking a duplicate onion
attempt. NOTE: This is not the only method for solving this
issue! The issue could be addressed via careful client side
programming which accounts for the uncertainty and async
nature of dispatching onions to a remote process via RPC.
This would require some lnd ChannelRouter changes for how
we intend to use these RPCs though.
Pull Request #9489: multi: add BuildOnion, SendOnion, and TrackOnion RPCs

474 of 990 new or added lines in 11 files covered. (47.88%)

27321 existing lines in 435 files now uncovered.

101192 of 205306 relevant lines covered (49.29%)

1.54 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

6.29
/routing/probability_bimodal.go
1
package routing
2

3
import (
4
        "fmt"
5
        "math"
6
        "time"
7

8
        "github.com/btcsuite/btcd/btcutil"
9
        "github.com/go-errors/errors"
10
        "github.com/lightningnetwork/lnd/lnwire"
11
        "github.com/lightningnetwork/lnd/routing/route"
12
)
13

14
const (
15
        // DefaultBimodalScaleMsat is the default value for BimodalScaleMsat in
16
        // BimodalConfig. It describes the distribution of funds in the LN based
17
        // on empirical findings. We assume an unbalanced network by default.
18
        DefaultBimodalScaleMsat = lnwire.MilliSatoshi(300_000_000)
19

20
        // DefaultBimodalNodeWeight is the default value for the
21
        // BimodalNodeWeight in BimodalConfig. It is chosen such that past
22
        // forwardings on other channels of a router are only slightly taken
23
        // into account.
24
        DefaultBimodalNodeWeight = 0.2
25

26
        // DefaultBimodalDecayTime is the default value for BimodalDecayTime.
27
        // We will forget about previous learnings about channel liquidity on
28
        // the timescale of about a week.
29
        DefaultBimodalDecayTime = 7 * 24 * time.Hour
30

31
        // BimodalScaleMsatMax is the maximum value for BimodalScaleMsat. We
32
        // limit it here to the fakeHopHintCapacity to avoid issues with hop
33
        // hint probability calculations.
34
        BimodalScaleMsatMax = lnwire.MilliSatoshi(
35
                1000 * fakeHopHintCapacity / 4,
36
        )
37

38
        // BimodalEstimatorName is used to identify the bimodal estimator.
39
        BimodalEstimatorName = "bimodal"
40
)
41

42
var (
43
        // ErrInvalidScale is returned when we get a scale below or equal zero.
44
        ErrInvalidScale = errors.New("scale must be >= 0 and sane")
45

46
        // ErrInvalidNodeWeight is returned when we get a node weight that is
47
        // out of range.
48
        ErrInvalidNodeWeight = errors.New("node weight must be in [0, 1]")
49

50
        // ErrInvalidDecayTime is returned when we get a decay time below zero.
51
        ErrInvalidDecayTime = errors.New("decay time must be larger than zero")
52

53
        // ErrZeroCapacity is returned when we encounter a channel with zero
54
        // capacity in probability estimation.
55
        ErrZeroCapacity = errors.New("capacity must be larger than zero")
56
)
57

58
// BimodalConfig contains configuration for our probability estimator.
59
type BimodalConfig struct {
60
        // BimodalNodeWeight defines how strongly other previous forwardings on
61
        // channels of a router should be taken into account when computing a
62
        // channel's probability to route. The allowed values are in the range
63
        // [0, 1], where a value of 0 means that only direct information about a
64
        // channel is taken into account.
65
        BimodalNodeWeight float64
66

67
        // BimodalScaleMsat describes the scale over which channels
68
        // statistically have some liquidity left. The value determines how
69
        // quickly the bimodal distribution drops off from the edges of a
70
        // channel. A larger value (compared to typical channel capacities)
71
        // means that the drop off is slow and that channel balances are
72
        // distributed more uniformly. A small value leads to the assumption of
73
        // very unbalanced channels.
74
        BimodalScaleMsat lnwire.MilliSatoshi
75

76
        // BimodalDecayTime is the scale for the exponential information decay
77
        // over time for previous successes or failures.
78
        BimodalDecayTime time.Duration
79
}
80

81
// validate checks the configuration of the estimator for allowed values.
82
func (p BimodalConfig) validate() error {
3✔
83
        if p.BimodalDecayTime <= 0 {
3✔
84
                return fmt.Errorf("%v: %w", BimodalEstimatorName,
×
85
                        ErrInvalidDecayTime)
×
86
        }
×
87

88
        if p.BimodalNodeWeight < 0 || p.BimodalNodeWeight > 1 {
3✔
89
                return fmt.Errorf("%v: %w", BimodalEstimatorName,
×
90
                        ErrInvalidNodeWeight)
×
91
        }
×
92

93
        if p.BimodalScaleMsat == 0 || p.BimodalScaleMsat > BimodalScaleMsatMax {
3✔
94
                return fmt.Errorf("%v: %w", BimodalEstimatorName,
×
95
                        ErrInvalidScale)
×
96
        }
×
97

98
        return nil
3✔
99
}
100

101
// DefaultBimodalConfig returns the default configuration for the estimator.
102
func DefaultBimodalConfig() BimodalConfig {
×
103
        return BimodalConfig{
×
104
                BimodalNodeWeight: DefaultBimodalNodeWeight,
×
105
                BimodalScaleMsat:  DefaultBimodalScaleMsat,
×
106
                BimodalDecayTime:  DefaultBimodalDecayTime,
×
107
        }
×
108
}
×
109

110
// BimodalEstimator returns node and pair probabilities based on historical
111
// payment results based on a liquidity distribution model of the LN. The main
112
// function is to estimate the direct channel probability based on a depleted
113
// liquidity distribution model, with additional information decay over time. A
114
// per-node probability can be mixed with the direct probability, taking into
115
// account successes/failures on other channels of the forwarder.
116
type BimodalEstimator struct {
117
        // BimodalConfig contains configuration options for our estimator.
118
        BimodalConfig
119
}
120

121
// NewBimodalEstimator creates a new BimodalEstimator.
122
func NewBimodalEstimator(cfg BimodalConfig) (*BimodalEstimator, error) {
3✔
123
        if err := cfg.validate(); err != nil {
3✔
124
                return nil, err
×
125
        }
×
126

127
        return &BimodalEstimator{
3✔
128
                BimodalConfig: cfg,
3✔
129
        }, nil
3✔
130
}
131

132
// Compile-time checks that interfaces are implemented.
133
var _ Estimator = (*BimodalEstimator)(nil)
134
var _ estimatorConfig = (*BimodalConfig)(nil)
135

136
// config returns the current configuration of the estimator.
137
func (p *BimodalEstimator) Config() estimatorConfig {
3✔
138
        return p.BimodalConfig
3✔
139
}
3✔
140

141
// String returns the estimator's configuration as a string representation.
142
func (p *BimodalEstimator) String() string {
3✔
143
        return fmt.Sprintf("estimator type: %v, decay time: %v, liquidity "+
3✔
144
                "scale: %v, node weight: %v", BimodalEstimatorName,
3✔
145
                p.BimodalDecayTime, p.BimodalScaleMsat, p.BimodalNodeWeight)
3✔
146
}
3✔
147

148
// PairProbability estimates the probability of successfully traversing to
149
// toNode based on historical payment outcomes for the from node. Those outcomes
150
// are passed in via the results parameter.
151
func (p *BimodalEstimator) PairProbability(now time.Time,
152
        results NodeResults, toNode route.Vertex, amt lnwire.MilliSatoshi,
153
        capacity btcutil.Amount) float64 {
×
154

×
155
        // We first compute the probability for the desired hop taking into
×
156
        // account previous knowledge.
×
157
        directProbability := p.directProbability(
×
158
                now, results, toNode, amt, lnwire.NewMSatFromSatoshis(capacity),
×
159
        )
×
160

×
161
        // The final probability is computed by taking into account other
×
162
        // channels of the from node.
×
163
        return p.calculateProbability(directProbability, now, results, toNode)
×
164
}
×
165

166
// LocalPairProbability computes the probability to reach toNode given a set of
167
// previous learnings.
168
func (p *BimodalEstimator) LocalPairProbability(now time.Time,
UNCOV
169
        results NodeResults, toNode route.Vertex) float64 {
×
UNCOV
170

×
UNCOV
171
        // For direct local probabilities we assume to know exactly how much we
×
UNCOV
172
        // can send over a channel, which assumes that channels are active and
×
UNCOV
173
        // have enough liquidity.
×
UNCOV
174
        directProbability := 1.0
×
UNCOV
175

×
UNCOV
176
        // If we had an unexpected failure for this node, we reduce the
×
UNCOV
177
        // probability for some time to avoid infinite retries.
×
UNCOV
178
        result, ok := results[toNode]
×
UNCOV
179
        if ok && !result.FailTime.IsZero() {
×
UNCOV
180
                timeAgo := now.Sub(result.FailTime)
×
UNCOV
181

×
UNCOV
182
                // We only expect results in the past to get a probability
×
UNCOV
183
                // between 0 and 1.
×
UNCOV
184
                if timeAgo < 0 {
×
185
                        timeAgo = 0
×
186
                }
×
UNCOV
187
                exponent := -float64(timeAgo) / float64(p.BimodalDecayTime)
×
UNCOV
188
                directProbability -= math.Exp(exponent)
×
189
        }
190

UNCOV
191
        return directProbability
×
192
}
193

194
// directProbability computes the probability to reach a node based on the
195
// liquidity distribution in the LN.
196
func (p *BimodalEstimator) directProbability(now time.Time,
197
        results NodeResults, toNode route.Vertex, amt lnwire.MilliSatoshi,
198
        capacity lnwire.MilliSatoshi) float64 {
×
199

×
200
        // We first determine the time-adjusted success and failure amounts to
×
201
        // then compute a probability. We know that we can send a zero amount.
×
202
        successAmount := lnwire.MilliSatoshi(0)
×
203

×
204
        // We know that we cannot send the full capacity.
×
205
        failAmount := capacity
×
206

×
207
        // If we have information about past successes or failures, we modify
×
208
        // them with a time decay.
×
209
        result, ok := results[toNode]
×
210
        if ok {
×
211
                // Apply a time decay for the amount we cannot send.
×
212
                if !result.FailTime.IsZero() {
×
213
                        failAmount = cannotSend(
×
214
                                result.FailAmt, capacity, now, result.FailTime,
×
215
                                p.BimodalDecayTime,
×
216
                        )
×
217
                }
×
218

219
                // Apply a time decay for the amount we can send.
220
                if !result.SuccessTime.IsZero() {
×
221
                        successAmount = canSend(
×
222
                                result.SuccessAmt, now, result.SuccessTime,
×
223
                                p.BimodalDecayTime,
×
224
                        )
×
225
                }
×
226
        }
227

228
        // Compute the direct channel probability.
229
        probability, err := p.probabilityFormula(
×
230
                capacity, successAmount, failAmount, amt,
×
231
        )
×
232
        if err != nil {
×
233
                log.Errorf("error computing probability to node: %v "+
×
234
                        "(node: %v, results: %v, amt: %v, capacity: %v)",
×
235
                        err, toNode, results, amt, capacity)
×
236

×
237
                return 0.0
×
238
        }
×
239

240
        return probability
×
241
}
242

243
// calculateProbability computes the total hop probability combining the channel
244
// probability and historic forwarding data of other channels of the node we try
245
// to send from.
246
//
247
// Goals:
248
// * We want to incentivize good routing nodes: the more routable channels a
249
// node has, the more we want to incentivize (vice versa for failures).
250
// -> We reduce/increase the direct probability depending on past
251
// failures/successes for other channels of the node.
252
//
253
// * We want to be forgiving/give other nodes a chance as well: we want to
254
// forget about (non-)routable channels over time.
255
// -> We weight the successes/failures with a time decay such that they will not
256
// influence the total probability if a long time went by.
257
//
258
// * If we don't have other info, we want to solely rely on the direct
259
// probability.
260
//
261
// * We want to be able to specify how important the other channels are compared
262
// to the direct channel.
263
// -> Introduce a node weight factor that weights the direct probability against
264
// the node-wide average. The larger the node weight, the more important other
265
// channels of the node are.
266
//
267
// How do failures on low fee nodes redirect routing to higher fee nodes?
268
// Assumptions:
269
// * attemptCostPPM of 1000 PPM
270
// * constant direct channel probability of P0 (usually 0.5 for large amounts)
271
// * node weight w of 0.2
272
//
273
// The question we want to answer is:
274
// How often would a zero-fee node be tried (even if there were failures for its
275
// other channels) over trying a high-fee node with 2000 PPM and no direct
276
// knowledge about the channel to send over?
277
//
278
// The probability of a route of length l is P(l) = l * P0.
279
//
280
// The total probability after n failures (with the implemented method here) is:
281
// P(l, n) = P(l-1) * P(n)
282
// = P(l-1) * (P0 + n*0) / (1 + n*w)
283
// = P(l) / (1 + n*w)
284
//
285
// Condition for a high-fee channel to overcome a low fee channel in the
286
// Dijkstra weight function (only looking at fee and probability PPM terms):
287
// highFeePPM + attemptCostPPM * 1/P(l) = 0PPM + attemptCostPPM * 1/P(l, n)
288
// highFeePPM/attemptCostPPM = 1/P(l, n) - 1/P(l) =
289
// = (1 + n*w)/P(l) - 1/P(l) =
290
// = n*w/P(l)
291
//
292
// Therefore:
293
// n = (highFeePPM/attemptCostPPM) * (P(l)/w) =
294
// = (2000/1000) * 0.5 * l / w = l/w
295
//
296
// For a one-hop route we get:
297
// n = 1/0.2 = 5 tolerated failures
298
//
299
// For a three-hop route we get:
300
// n = 3/0.2 = 15 tolerated failures
301
//
302
// For more details on the behavior see tests.
303
func (p *BimodalEstimator) calculateProbability(directProbability float64,
UNCOV
304
        now time.Time, results NodeResults, toNode route.Vertex) float64 {
×
UNCOV
305

×
UNCOV
306
        // If we don't take other channels into account, we can return early.
×
UNCOV
307
        if p.BimodalNodeWeight == 0.0 {
×
308
                return directProbability
×
309
        }
×
310

311
        // If we have up-to-date information about the channel we want to use,
312
        // i.e. the info stems from results not longer ago than the decay time,
313
        // we will only use the direct probability. This is needed in order to
314
        // avoid that other previous results (on all other channels of the same
315
        // routing node) will distort and pin the calculated probability even if
316
        // we have accurate direct information. This helps to dip the
317
        // probability below the min probability in case of failures, to start
318
        // the splitting process.
UNCOV
319
        directResult, ok := results[toNode]
×
UNCOV
320
        if ok {
×
UNCOV
321
                latest := directResult.SuccessTime
×
UNCOV
322
                if directResult.FailTime.After(latest) {
×
323
                        latest = directResult.FailTime
×
324
                }
×
325

326
                // We use BimonodalDecayTime to judge the currentness of the
327
                // data. It is the time scale on which we assume to have lost
328
                // information.
UNCOV
329
                if now.Sub(latest) < p.BimodalDecayTime {
×
UNCOV
330
                        log.Tracef("Using direct probability for node %v: %v",
×
UNCOV
331
                                toNode, directResult)
×
UNCOV
332

×
UNCOV
333
                        return directProbability
×
UNCOV
334
                }
×
335
        }
336

337
        // w is a parameter which determines how strongly the other channels of
338
        // a node should be incorporated, the higher the stronger.
UNCOV
339
        w := p.BimodalNodeWeight
×
UNCOV
340

×
UNCOV
341
        // dt determines the timeliness of the previous successes/failures
×
UNCOV
342
        // to be taken into account.
×
UNCOV
343
        dt := float64(p.BimodalDecayTime)
×
UNCOV
344

×
UNCOV
345
        // The direct channel probability is weighted fully, all other results
×
UNCOV
346
        // are weighted according to how recent the information is.
×
UNCOV
347
        totalProbabilities := directProbability
×
UNCOV
348
        totalWeights := 1.0
×
UNCOV
349

×
UNCOV
350
        for peer, result := range results {
×
UNCOV
351
                // We don't include the direct hop probability here because it
×
UNCOV
352
                // is already included in totalProbabilities.
×
UNCOV
353
                if peer == toNode {
×
UNCOV
354
                        continue
×
355
                }
356

357
                // We add probabilities weighted by how recent the info is.
UNCOV
358
                var weight float64
×
UNCOV
359
                if result.SuccessAmt > 0 {
×
UNCOV
360
                        exponent := -float64(now.Sub(result.SuccessTime)) / dt
×
UNCOV
361
                        weight = math.Exp(exponent)
×
UNCOV
362
                        totalProbabilities += w * weight
×
UNCOV
363
                        totalWeights += w * weight
×
UNCOV
364
                }
×
UNCOV
365
                if result.FailAmt > 0 {
×
UNCOV
366
                        exponent := -float64(now.Sub(result.FailTime)) / dt
×
UNCOV
367
                        weight = math.Exp(exponent)
×
UNCOV
368

×
UNCOV
369
                        // Failures don't add to total success probability.
×
UNCOV
370
                        totalWeights += w * weight
×
UNCOV
371
                }
×
372
        }
373

UNCOV
374
        return totalProbabilities / totalWeights
×
375
}
376

377
// canSend returns the sendable amount over the channel, respecting time decay.
378
// canSend approaches zero, if we wait for a much longer time than the decay
379
// time.
380
func canSend(successAmount lnwire.MilliSatoshi, now, successTime time.Time,
UNCOV
381
        decayConstant time.Duration) lnwire.MilliSatoshi {
×
UNCOV
382

×
UNCOV
383
        // The factor approaches 0 for successTime a long time in the past,
×
UNCOV
384
        // is 1 when the successTime is now.
×
UNCOV
385
        factor := math.Exp(
×
UNCOV
386
                -float64(now.Sub(successTime)) / float64(decayConstant),
×
UNCOV
387
        )
×
UNCOV
388

×
UNCOV
389
        canSend := factor * float64(successAmount)
×
UNCOV
390

×
UNCOV
391
        return lnwire.MilliSatoshi(canSend)
×
UNCOV
392
}
×
393

394
// cannotSend returns the not sendable amount over the channel, respecting time
395
// decay. cannotSend approaches the capacity, if we wait for a much longer time
396
// than the decay time.
397
func cannotSend(failAmount, capacity lnwire.MilliSatoshi, now,
UNCOV
398
        failTime time.Time, decayConstant time.Duration) lnwire.MilliSatoshi {
×
UNCOV
399

×
UNCOV
400
        if failAmount > capacity {
×
401
                failAmount = capacity
×
402
        }
×
403

404
        // The factor approaches 0 for failTime a long time in the past and it
405
        // is 1 when the failTime is now.
UNCOV
406
        factor := math.Exp(
×
UNCOV
407
                -float64(now.Sub(failTime)) / float64(decayConstant),
×
UNCOV
408
        )
×
UNCOV
409

×
UNCOV
410
        cannotSend := capacity - lnwire.MilliSatoshi(
×
UNCOV
411
                factor*float64(capacity-failAmount),
×
UNCOV
412
        )
×
UNCOV
413

×
UNCOV
414
        return cannotSend
×
415
}
416

417
// primitive computes the indefinite integral of our assumed (normalized)
418
// liquidity probability distribution. The distribution of liquidity x here is
419
// the function P(x) ~ exp(-x/s) + exp((x-c)/s), i.e., two exponentials residing
420
// at the ends of channels. This means that we expect liquidity to be at either
421
// side of the channel with capacity c. The s parameter (scale) defines how far
422
// the liquidity leaks into the channel. A very low scale assumes completely
423
// unbalanced channels, a very high scale assumes a random distribution. More
424
// details can be found in
425
// https://github.com/lightningnetwork/lnd/issues/5988#issuecomment-1131234858.
UNCOV
426
func (p *BimodalEstimator) primitive(c, x float64) float64 {
×
UNCOV
427
        s := float64(p.BimodalScaleMsat)
×
UNCOV
428

×
UNCOV
429
        // The indefinite integral of P(x) is given by
×
UNCOV
430
        // Int P(x) dx = H(x) = s * (-e(-x/s) + e((x-c)/s)),
×
UNCOV
431
        // and its norm from 0 to c can be computed from it,
×
UNCOV
432
        // norm = [H(x)]_0^c = s * (-e(-c/s) + 1 -(1 + e(-c/s))).
×
UNCOV
433
        ecs := math.Exp(-c / s)
×
UNCOV
434
        exs := math.Exp(-x / s)
×
UNCOV
435

×
UNCOV
436
        // It would be possible to split the next term and reuse the factors
×
UNCOV
437
        // from before, but this can lead to numerical issues with large
×
UNCOV
438
        // numbers.
×
UNCOV
439
        excs := math.Exp((x - c) / s)
×
UNCOV
440

×
UNCOV
441
        // norm can only become zero, if c is zero, which we sorted out before
×
UNCOV
442
        // calling this method.
×
UNCOV
443
        norm := -2*ecs + 2
×
UNCOV
444

×
UNCOV
445
        // We end up with the primitive function of the normalized P(x).
×
UNCOV
446
        return (-exs + excs) / norm
×
UNCOV
447
}
×
448

449
// integral computes the integral of our liquidity distribution from the lower
450
// to the upper value.
UNCOV
451
func (p *BimodalEstimator) integral(capacity, lower, upper float64) float64 {
×
UNCOV
452
        if lower < 0 || lower > upper {
×
453
                log.Errorf("probability integral limits nonsensical: capacity:"+
×
454
                        "%v lower: %v upper: %v", capacity, lower, upper)
×
455

×
456
                return 0.0
×
457
        }
×
458

UNCOV
459
        return p.primitive(capacity, upper) - p.primitive(capacity, lower)
×
460
}
461

462
// probabilityFormula computes the expected probability for a payment of
463
// amountMsat given prior learnings for a channel of certain capacity.
464
// successAmountMsat and failAmountMsat stand for the unsettled success and
465
// failure amounts, respectively. The formula is derived using the formalism
466
// presented in Pickhardt et al., https://arxiv.org/abs/2103.08576.
467
func (p *BimodalEstimator) probabilityFormula(capacityMsat, successAmountMsat,
UNCOV
468
        failAmountMsat, amountMsat lnwire.MilliSatoshi) (float64, error) {
×
UNCOV
469

×
UNCOV
470
        // Convert to positive-valued floats.
×
UNCOV
471
        capacity := float64(capacityMsat)
×
UNCOV
472
        successAmount := float64(successAmountMsat)
×
UNCOV
473
        failAmount := float64(failAmountMsat)
×
UNCOV
474
        amount := float64(amountMsat)
×
UNCOV
475

×
UNCOV
476
        // In order for this formula to give reasonable results, we need to have
×
UNCOV
477
        // an estimate of the capacity of a channel (or edge between nodes).
×
UNCOV
478
        if capacity == 0.0 {
×
UNCOV
479
                return 0, ErrZeroCapacity
×
UNCOV
480
        }
×
481

482
        // We cannot send more than the capacity.
UNCOV
483
        if amount > capacity {
×
UNCOV
484
                return 0.0, nil
×
UNCOV
485
        }
×
486

487
        // Mission control may have some outdated values, we correct them here.
488
        // TODO(bitromortac): there may be better decisions to make in these
489
        //  cases, e.g., resetting failAmount=cap and successAmount=0.
490

491
        // failAmount should be capacity at max.
UNCOV
492
        if failAmount > capacity {
×
UNCOV
493
                log.Debugf("Correcting failAmount %v to capacity %v",
×
UNCOV
494
                        failAmount, capacity)
×
UNCOV
495

×
UNCOV
496
                failAmount = capacity
×
UNCOV
497
        }
×
498

499
        // successAmount should be capacity at max.
UNCOV
500
        if successAmount > capacity {
×
UNCOV
501
                log.Debugf("Correcting successAmount %v to capacity %v",
×
UNCOV
502
                        successAmount, capacity)
×
UNCOV
503

×
UNCOV
504
                successAmount = capacity
×
UNCOV
505
        }
×
506

507
        // The next statement is a safety check against an illogical condition,
508
        // otherwise the renormalization integral would become zero. This may
509
        // happen if a large channel gets closed and smaller ones remain, but
510
        // it should recover with the time decay.
UNCOV
511
        if failAmount <= successAmount {
×
UNCOV
512
                log.Tracef("fail amount (%v) is smaller than or equal the "+
×
UNCOV
513
                        "success amount (%v) for capacity (%v)",
×
UNCOV
514
                        failAmountMsat, successAmountMsat, capacityMsat)
×
UNCOV
515

×
UNCOV
516
                return 0.0, nil
×
UNCOV
517
        }
×
518

519
        // We cannot send more than the fail amount.
UNCOV
520
        if amount >= failAmount {
×
UNCOV
521
                return 0.0, nil
×
UNCOV
522
        }
×
523

524
        // The success probability for payment amount a is the integral over the
525
        // prior distribution P(x), the probability to find liquidity between
526
        // the amount a and channel capacity c (or failAmount a_f):
527
        // P(X >= a | X < a_f) = Integral_{a}^{a_f} P(x) dx
UNCOV
528
        prob := p.integral(capacity, amount, failAmount)
×
UNCOV
529
        if math.IsNaN(prob) {
×
530
                return 0.0, fmt.Errorf("non-normalized probability is NaN, "+
×
531
                        "capacity: %v, amount: %v, fail amount: %v",
×
532
                        capacity, amount, failAmount)
×
533
        }
×
534

535
        // If we have payment information, we need to adjust the prior
536
        // distribution P(x) and get the posterior distribution by renormalizing
537
        // the prior distribution in such a way that the probability mass lies
538
        // between a_s and a_f.
UNCOV
539
        reNorm := p.integral(capacity, successAmount, failAmount)
×
UNCOV
540
        if math.IsNaN(reNorm) {
×
541
                return 0.0, fmt.Errorf("normalization factor is NaN, "+
×
542
                        "capacity: %v, success amount: %v, fail amount: %v",
×
543
                        capacity, successAmount, failAmount)
×
544
        }
×
545

546
        // The normalization factor can only be zero if the success amount is
547
        // equal or larger than the fail amount. This should not happen as we
548
        // have checked this scenario above.
UNCOV
549
        if reNorm == 0.0 {
×
550
                return 0.0, fmt.Errorf("normalization factor is zero, "+
×
551
                        "capacity: %v, success amount: %v, fail amount: %v",
×
552
                        capacity, successAmount, failAmount)
×
553
        }
×
554

UNCOV
555
        prob /= reNorm
×
UNCOV
556

×
UNCOV
557
        // Note that for payment amounts smaller than successAmount, we can get
×
UNCOV
558
        // a value larger than unity, which we cap here to get a proper
×
UNCOV
559
        // probability.
×
UNCOV
560
        if prob > 1.0 {
×
UNCOV
561
                if amount > successAmount {
×
562
                        return 0.0, fmt.Errorf("unexpected large probability "+
×
563
                                "(%v) capacity: %v, amount: %v, success "+
×
564
                                "amount: %v, fail amount: %v", prob, capacity,
×
565
                                amount, successAmount, failAmount)
×
566
                }
×
567

UNCOV
568
                return 1.0, nil
×
UNCOV
569
        } else if prob < 0.0 {
×
570
                return 0.0, fmt.Errorf("negative probability "+
×
571
                        "(%v) capacity: %v, amount: %v, success "+
×
572
                        "amount: %v, fail amount: %v", prob, capacity,
×
573
                        amount, successAmount, failAmount)
×
574
        }
×
575

UNCOV
576
        return prob, nil
×
577
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc