• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 11722605046

07 Nov 2024 11:17AM UTC coverage: 58.989% (+0.1%) from 58.869%
11722605046

Pull #8330

github

bitromortac
release-notes: update for 0.19.0
Pull Request #8330: bimodal pathfinding probability improvements

23 of 34 new or added lines in 1 file covered. (67.65%)

57 existing lines in 15 files now uncovered.

132095 of 223933 relevant lines covered (58.99%)

19772.68 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

60.82
/routing/probability_bimodal.go
1
package routing
2

3
import (
4
        "fmt"
5
        "math"
6
        "time"
7

8
        "github.com/btcsuite/btcd/btcutil"
9
        "github.com/go-errors/errors"
10
        "github.com/lightningnetwork/lnd/lnwire"
11
        "github.com/lightningnetwork/lnd/routing/route"
12
)
13

14
const (
15
        // DefaultBimodalScaleMsat is the default value for BimodalScaleMsat in
16
        // BimodalConfig. It describes the distribution of funds in the LN based
17
        // on empirical findings. We assume an unbalanced network by default.
18
        DefaultBimodalScaleMsat = lnwire.MilliSatoshi(300_000_000)
19

20
        // DefaultBimodalNodeWeight is the default value for the
21
        // BimodalNodeWeight in BimodalConfig. It is chosen such that past
22
        // forwardings on other channels of a router are only slightly taken
23
        // into account.
24
        DefaultBimodalNodeWeight = 0.2
25

26
        // DefaultBimodalDecayTime is the default value for BimodalDecayTime.
27
        // We will forget about previous learnings about channel liquidity on
28
        // the timescale of about a week.
29
        DefaultBimodalDecayTime = 7 * 24 * time.Hour
30

31
        // BimodalScaleMsatMax is the maximum value for BimodalScaleMsat. We
32
        // limit it here to the fakeHopHintCapacity to avoid issues with hop
33
        // hint probability calculations.
34
        BimodalScaleMsatMax = lnwire.MilliSatoshi(
35
                1000 * fakeHopHintCapacity / 4,
36
        )
37

38
        // BimodalEstimatorName is used to identify the bimodal estimator.
39
        BimodalEstimatorName = "bimodal"
40
)
41

42
var (
43
        // ErrInvalidScale is returned when we get a scale below or equal zero.
44
        ErrInvalidScale = errors.New("scale must be >= 0 and sane")
45

46
        // ErrInvalidNodeWeight is returned when we get a node weight that is
47
        // out of range.
48
        ErrInvalidNodeWeight = errors.New("node weight must be in [0, 1]")
49

50
        // ErrInvalidDecayTime is returned when we get a decay time below zero.
51
        ErrInvalidDecayTime = errors.New("decay time must be larger than zero")
52

53
        // ErrZeroCapacity is returned when we encounter a channel with zero
54
        // capacity in probability estimation.
55
        ErrZeroCapacity = errors.New("capacity must be larger than zero")
56
)
57

58
// BimodalConfig contains configuration for our probability estimator.
59
type BimodalConfig struct {
60
        // BimodalNodeWeight defines how strongly other previous forwardings on
61
        // channels of a router should be taken into account when computing a
62
        // channel's probability to route. The allowed values are in the range
63
        // [0, 1], where a value of 0 means that only direct information about a
64
        // channel is taken into account.
65
        BimodalNodeWeight float64
66

67
        // BimodalScaleMsat describes the scale over which channels
68
        // statistically have some liquidity left. The value determines how
69
        // quickly the bimodal distribution drops off from the edges of a
70
        // channel. A larger value (compared to typical channel capacities)
71
        // means that the drop off is slow and that channel balances are
72
        // distributed more uniformly. A small value leads to the assumption of
73
        // very unbalanced channels.
74
        BimodalScaleMsat lnwire.MilliSatoshi
75

76
        // BimodalDecayTime is the scale for the exponential information decay
77
        // over time for previous successes or failures.
78
        BimodalDecayTime time.Duration
79
}
80

81
// validate checks the configuration of the estimator for allowed values.
82
func (p BimodalConfig) validate() error {
4✔
83
        if p.BimodalDecayTime <= 0 {
4✔
84
                return fmt.Errorf("%v: %w", BimodalEstimatorName,
×
85
                        ErrInvalidDecayTime)
×
86
        }
×
87

88
        if p.BimodalNodeWeight < 0 || p.BimodalNodeWeight > 1 {
4✔
89
                return fmt.Errorf("%v: %w", BimodalEstimatorName,
×
90
                        ErrInvalidNodeWeight)
×
91
        }
×
92

93
        if p.BimodalScaleMsat == 0 || p.BimodalScaleMsat > BimodalScaleMsatMax {
4✔
94
                return fmt.Errorf("%v: %w", BimodalEstimatorName,
×
95
                        ErrInvalidScale)
×
96
        }
×
97

98
        return nil
4✔
99
}
100

101
// DefaultBimodalConfig returns the default configuration for the estimator.
102
func DefaultBimodalConfig() BimodalConfig {
×
103
        return BimodalConfig{
×
104
                BimodalNodeWeight: DefaultBimodalNodeWeight,
×
105
                BimodalScaleMsat:  DefaultBimodalScaleMsat,
×
106
                BimodalDecayTime:  DefaultBimodalDecayTime,
×
107
        }
×
108
}
×
109

110
// BimodalEstimator returns node and pair probabilities based on historical
111
// payment results based on a liquidity distribution model of the LN. The main
112
// function is to estimate the direct channel probability based on a depleted
113
// liquidity distribution model, with additional information decay over time. A
114
// per-node probability can be mixed with the direct probability, taking into
115
// account successes/failures on other channels of the forwarder.
116
type BimodalEstimator struct {
117
        // BimodalConfig contains configuration options for our estimator.
118
        BimodalConfig
119
}
120

121
// NewBimodalEstimator creates a new BimodalEstimator.
122
func NewBimodalEstimator(cfg BimodalConfig) (*BimodalEstimator, error) {
4✔
123
        if err := cfg.validate(); err != nil {
4✔
124
                return nil, err
×
125
        }
×
126

127
        return &BimodalEstimator{
4✔
128
                BimodalConfig: cfg,
4✔
129
        }, nil
4✔
130
}
131

132
// Compile-time checks that interfaces are implemented.
133
var _ Estimator = (*BimodalEstimator)(nil)
134
var _ estimatorConfig = (*BimodalConfig)(nil)
135

136
// config returns the current configuration of the estimator.
137
func (p *BimodalEstimator) Config() estimatorConfig {
4✔
138
        return p.BimodalConfig
4✔
139
}
4✔
140

141
// String returns the estimator's configuration as a string representation.
142
func (p *BimodalEstimator) String() string {
4✔
143
        return fmt.Sprintf("estimator type: %v, decay time: %v, liquidity "+
4✔
144
                "scale: %v, node weight: %v", BimodalEstimatorName,
4✔
145
                p.BimodalDecayTime, p.BimodalScaleMsat, p.BimodalNodeWeight)
4✔
146
}
4✔
147

148
// PairProbability estimates the probability of successfully traversing to
149
// toNode based on historical payment outcomes for the from node. Those outcomes
150
// are passed in via the results parameter.
151
func (p *BimodalEstimator) PairProbability(now time.Time,
152
        results NodeResults, toNode route.Vertex, amt lnwire.MilliSatoshi,
153
        capacity btcutil.Amount) float64 {
×
154

×
155
        // We first compute the probability for the desired hop taking into
×
156
        // account previous knowledge.
×
157
        directProbability := p.directProbability(
×
158
                now, results, toNode, amt, lnwire.NewMSatFromSatoshis(capacity),
×
159
        )
×
160

×
161
        // The final probability is computed by taking into account other
×
162
        // channels of the from node.
×
163
        return p.calculateProbability(directProbability, now, results, toNode)
×
164
}
×
165

166
// LocalPairProbability computes the probability to reach toNode given a set of
167
// previous learnings.
168
func (p *BimodalEstimator) LocalPairProbability(now time.Time,
169
        results NodeResults, toNode route.Vertex) float64 {
4✔
170

4✔
171
        // For direct local probabilities we assume to know exactly how much we
4✔
172
        // can send over a channel, which assumes that channels are active and
4✔
173
        // have enough liquidity.
4✔
174
        directProbability := 1.0
4✔
175

4✔
176
        // If we had an unexpected failure for this node, we reduce the
4✔
177
        // probability for some time to avoid infinite retries.
4✔
178
        result, ok := results[toNode]
4✔
179
        if ok && !result.FailTime.IsZero() {
7✔
180
                timeAgo := now.Sub(result.FailTime)
3✔
181

3✔
182
                // We only expect results in the past to get a probability
3✔
183
                // between 0 and 1.
3✔
184
                if timeAgo < 0 {
3✔
185
                        timeAgo = 0
×
186
                }
×
187
                exponent := -float64(timeAgo) / float64(p.BimodalDecayTime)
3✔
188
                directProbability -= math.Exp(exponent)
3✔
189
        }
190

191
        return directProbability
4✔
192
}
193

194
// directProbability computes the probability to reach a node based on the
195
// liquidity distribution in the LN.
196
func (p *BimodalEstimator) directProbability(now time.Time,
197
        results NodeResults, toNode route.Vertex, amt lnwire.MilliSatoshi,
198
        capacity lnwire.MilliSatoshi) float64 {
×
199

×
200
        // We first determine the time-adjusted success and failure amounts to
×
201
        // then compute a probability. We know that we can send a zero amount.
×
202
        successAmount := lnwire.MilliSatoshi(0)
×
203

×
204
        // We know that we cannot send the full capacity.
×
205
        failAmount := capacity
×
206

×
207
        // If we have information about past successes or failures, we modify
×
208
        // them with a time decay.
×
209
        result, ok := results[toNode]
×
210
        if ok {
×
211
                // Apply a time decay for the amount we cannot send.
×
212
                if !result.FailTime.IsZero() {
×
213
                        failAmount = cannotSend(
×
214
                                result.FailAmt, capacity, now, result.FailTime,
×
215
                                p.BimodalDecayTime,
×
216
                        )
×
217
                }
×
218

219
                // Apply a time decay for the amount we can send.
220
                if !result.SuccessTime.IsZero() {
×
221
                        successAmount = canSend(
×
222
                                result.SuccessAmt, now, result.SuccessTime,
×
223
                                p.BimodalDecayTime,
×
224
                        )
×
225
                }
×
226
        }
227

228
        // Compute the direct channel probability.
229
        probability, err := p.probabilityFormula(
×
230
                capacity, successAmount, failAmount, amt,
×
231
        )
×
232
        if err != nil {
×
233
                log.Errorf("error computing probability to node: %v "+
×
234
                        "(node: %v, results: %v, amt: %v, capacity: %v)",
×
235
                        err, toNode, results, amt, capacity)
×
236

×
237
                return 0.0
×
238
        }
×
239

240
        return probability
×
241
}
242

243
// calculateProbability computes the total hop probability combining the channel
244
// probability and historic forwarding data of other channels of the node we try
245
// to send from.
246
//
247
// Goals:
248
// * We want to incentivize good routing nodes: the more routable channels a
249
// node has, the more we want to incentivize (vice versa for failures).
250
// -> We reduce/increase the direct probability depending on past
251
// failures/successes for other channels of the node.
252
//
253
// * We want to be forgiving/give other nodes a chance as well: we want to
254
// forget about (non-)routable channels over time.
255
// -> We weight the successes/failures with a time decay such that they will not
256
// influence the total probability if a long time went by.
257
//
258
// * If we don't have other info, we want to solely rely on the direct
259
// probability.
260
//
261
// * We want to be able to specify how important the other channels are compared
262
// to the direct channel.
263
// -> Introduce a node weight factor that weights the direct probability against
264
// the node-wide average. The larger the node weight, the more important other
265
// channels of the node are.
266
//
267
// How do failures on low fee nodes redirect routing to higher fee nodes?
268
// Assumptions:
269
// * attemptCostPPM of 1000 PPM
270
// * constant direct channel probability of P0 (usually 0.5 for large amounts)
271
// * node weight w of 0.2
272
//
273
// The question we want to answer is:
274
// How often would a zero-fee node be tried (even if there were failures for its
275
// other channels) over trying a high-fee node with 2000 PPM and no direct
276
// knowledge about the channel to send over?
277
//
278
// The probability of a route of length l is P(l) = l * P0.
279
//
280
// The total probability after n failures (with the implemented method here) is:
281
// P(l, n) = P(l-1) * P(n)
282
// = P(l-1) * (P0 + n*0) / (1 + n*w)
283
// = P(l) / (1 + n*w)
284
//
285
// Condition for a high-fee channel to overcome a low fee channel in the
286
// Dijkstra weight function (only looking at fee and probability PPM terms):
287
// highFeePPM + attemptCostPPM * 1/P(l) = 0PPM + attemptCostPPM * 1/P(l, n)
288
// highFeePPM/attemptCostPPM = 1/P(l, n) - 1/P(l) =
289
// = (1 + n*w)/P(l) - 1/P(l) =
290
// = n*w/P(l)
291
//
292
// Therefore:
293
// n = (highFeePPM/attemptCostPPM) * (P(l)/w) =
294
// = (2000/1000) * 0.5 * l / w = l/w
295
//
296
// For a one-hop route we get:
297
// n = 1/0.2 = 5 tolerated failures
298
//
299
// For a three-hop route we get:
300
// n = 3/0.2 = 15 tolerated failures
301
//
302
// For more details on the behavior see tests.
303
func (p *BimodalEstimator) calculateProbability(directProbability float64,
304
        now time.Time, results NodeResults, toNode route.Vertex) float64 {
19✔
305

19✔
306
        // If we don't take other channels into account, we can return early.
19✔
307
        if p.BimodalNodeWeight == 0.0 {
19✔
308
                return directProbability
×
309
        }
×
310

311
        // If we have up-to-date information about the channel we want to use,
312
        // i.e. the info stems from results not longer ago than the decay time,
313
        // we will only use the direct probability. This is needed in order to
314
        // avoid that other previous results (on all other channels of the same
315
        // routing node) will distort and pin the calculated probability even if
316
        // we have accurate direct information. This helps to dip the
317
        // probability below the min probability in case of failures, to start
318
        // the splitting process.
319
        directResult, ok := results[toNode]
19✔
320
        if ok {
38✔
321
                latest := directResult.SuccessTime
19✔
322
                if directResult.FailTime.After(latest) {
19✔
323
                        latest = directResult.FailTime
×
324
                }
×
325

326
                // We use BimonodalDecayTime to judge the currentness of the
327
                // data. It is the time scale on which we assume to have lost
328
                // information.
329
                if now.Sub(latest) < p.BimodalDecayTime {
20✔
330
                        log.Tracef("Using direct probability for node %v: %v",
1✔
331
                                toNode, directResult)
1✔
332

1✔
333
                        return directProbability
1✔
334
                }
1✔
335
        }
336

337
        // w is a parameter which determines how strongly the other channels of
338
        // a node should be incorporated, the higher the stronger.
339
        w := p.BimodalNodeWeight
18✔
340

18✔
341
        // dt determines the timeliness of the previous successes/failures
18✔
342
        // to be taken into account.
18✔
343
        dt := float64(p.BimodalDecayTime)
18✔
344

18✔
345
        // The direct channel probability is weighted fully, all other results
18✔
346
        // are weighted according to how recent the information is.
18✔
347
        totalProbabilities := directProbability
18✔
348
        totalWeights := 1.0
18✔
349

18✔
350
        for peer, result := range results {
94✔
351
                // We don't include the direct hop probability here because it
76✔
352
                // is already included in totalProbabilities.
76✔
353
                if peer == toNode {
94✔
354
                        continue
18✔
355
                }
356

357
                // We add probabilities weighted by how recent the info is.
358
                var weight float64
58✔
359
                if result.SuccessAmt > 0 {
88✔
360
                        exponent := -float64(now.Sub(result.SuccessTime)) / dt
30✔
361
                        weight = math.Exp(exponent)
30✔
362
                        totalProbabilities += w * weight
30✔
363
                        totalWeights += w * weight
30✔
364
                }
30✔
365
                if result.FailAmt > 0 {
86✔
366
                        exponent := -float64(now.Sub(result.FailTime)) / dt
28✔
367
                        weight = math.Exp(exponent)
28✔
368

28✔
369
                        // Failures don't add to total success probability.
28✔
370
                        totalWeights += w * weight
28✔
371
                }
28✔
372
        }
373

374
        return totalProbabilities / totalWeights
18✔
375
}
376

377
// canSend returns the sendable amount over the channel, respecting time decay.
378
// canSend approaches zero, if we wait for a much longer time than the decay
379
// time.
380
func canSend(successAmount lnwire.MilliSatoshi, now, successTime time.Time,
381
        decayConstant time.Duration) lnwire.MilliSatoshi {
3✔
382

3✔
383
        // The factor approaches 0 for successTime a long time in the past,
3✔
384
        // is 1 when the successTime is now.
3✔
385
        factor := math.Exp(
3✔
386
                -float64(now.Sub(successTime)) / float64(decayConstant),
3✔
387
        )
3✔
388

3✔
389
        canSend := factor * float64(successAmount)
3✔
390

3✔
391
        return lnwire.MilliSatoshi(canSend)
3✔
392
}
3✔
393

394
// cannotSend returns the not sendable amount over the channel, respecting time
395
// decay. cannotSend approaches the capacity, if we wait for a much longer time
396
// than the decay time.
397
func cannotSend(failAmount, capacity lnwire.MilliSatoshi, now,
398
        failTime time.Time, decayConstant time.Duration) lnwire.MilliSatoshi {
3✔
399

3✔
400
        if failAmount > capacity {
3✔
401
                failAmount = capacity
×
402
        }
×
403

404
        // The factor approaches 0 for failTime a long time in the past and it
405
        // is 1 when the failTime is now.
406
        factor := math.Exp(
3✔
407
                -float64(now.Sub(failTime)) / float64(decayConstant),
3✔
408
        )
3✔
409

3✔
410
        cannotSend := capacity - lnwire.MilliSatoshi(
3✔
411
                factor*float64(capacity-failAmount),
3✔
412
        )
3✔
413

3✔
414
        return cannotSend
3✔
415
}
416

417
// primitive computes the indefinite integral of our assumed (normalized)
418
// liquidity probability distribution. The distribution of liquidity x here is
419
// the function P(x) ~ exp(-x/s) + exp((x-c)/s), i.e., two exponentials residing
420
// at the ends of channels. This means that we expect liquidity to be at either
421
// side of the channel with capacity c. The s parameter (scale) defines how far
422
// the liquidity leaks into the channel. A very low scale assumes completely
423
// unbalanced channels, a very high scale assumes a random distribution. More
424
// details can be found in
425
// https://github.com/lightningnetwork/lnd/issues/5988#issuecomment-1131234858.
426
// Additionally, we add a constant term 1/c to the distribution to avoid
427
// normalization issues and to fall back to a uniform distribution should the
428
// previous success and fail amounts contradict a bimodal distribution.
429
func (p *BimodalEstimator) primitive(c, x float64) float64 {
54✔
430
        s := float64(p.BimodalScaleMsat)
54✔
431

54✔
432
        // The indefinite integral of P(x) is given by
54✔
433
        // Int P(x) dx = H(x) = s * (-e(-x/s) + e((x-c)/s) + x/(c*s)),
54✔
434
        // and its norm from 0 to c can be computed from it,
54✔
435
        // norm = [H(x)]_0^c = s * (-e(-c/s) + 1 + 1/s -(1 + e(-c/s))) =
54✔
436
        // = s * (-2*e(-c/s) + 2 + 1/s).
54✔
437
        // The prefactors s are left out, as they cancel out in the end.
54✔
438
        // norm can only become zero, if c is zero, which we sorted out before
54✔
439
        // calling this method.
54✔
440
        ecs := math.Exp(-c / s)
54✔
441
        norm := -2*ecs + 2 + 1/s
54✔
442

54✔
443
        // It would be possible to split the next term and reuse the factors
54✔
444
        // from before, but this can lead to numerical issues with large
54✔
445
        // numbers.
54✔
446
        excs := math.Exp((x - c) / s)
54✔
447
        exs := math.Exp(-x / s)
54✔
448

54✔
449
        // We end up with the primitive function of the normalized P(x).
54✔
450
        return (-exs + excs + x/(c*s)) / norm
54✔
451
}
54✔
452

453
// integral computes the integral of our liquidity distribution from the lower
454
// to the upper value.
455
func (p *BimodalEstimator) integral(capacity, lower, upper float64) float64 {
27✔
456
        if lower < 0 || lower > upper {
27✔
457
                log.Errorf("probability integral limits nonsensical: capacity:"+
×
458
                        "%v lower: %v upper: %v", capacity, lower, upper)
×
459

×
460
                return 0.0
×
461
        }
×
462

463
        return p.primitive(capacity, upper) - p.primitive(capacity, lower)
27✔
464
}
465

466
// probabilityFormula computes the expected probability for a payment of
467
// amountMsat given prior learnings for a channel of certain capacity.
468
// successAmountMsat and failAmountMsat stand for the unsettled success and
469
// failure amounts, respectively. The formula is derived using the formalism
470
// presented in Pickhardt et al., https://arxiv.org/abs/2103.08576.
471
func (p *BimodalEstimator) probabilityFormula(capacityMsat, successAmountMsat,
472
        failAmountMsat, amountMsat lnwire.MilliSatoshi) (float64, error) {
23✔
473

23✔
474
        // Convert to positive-valued floats.
23✔
475
        capacity := float64(capacityMsat)
23✔
476
        successAmount := float64(successAmountMsat)
23✔
477
        failAmount := float64(failAmountMsat)
23✔
478
        amount := float64(amountMsat)
23✔
479

23✔
480
        // In order for this formula to give reasonable results, we need to have
23✔
481
        // an estimate of the capacity of a channel (or edge between nodes).
23✔
482
        if capacity == 0.0 {
24✔
483
                return 0, ErrZeroCapacity
1✔
484
        }
1✔
485

486
        // We cannot send more than the capacity.
487
        if amount > capacity {
23✔
488
                return 0.0, nil
1✔
489
        }
1✔
490

491
        // Mission control may have some outdated values with regard to the
492
        // current channel capacity between a node pair, which is why we correct
493
        // the values.
494

495
        // failAmount should be capacity at max.
496
        if failAmount > capacity {
21✔
NEW
497
                log.Debugf("Correcting failAmount %v to capacity %v",
×
498
                        failAmount, capacity)
×
NEW
499

×
500
                failAmount = capacity
×
501
        }
×
502

503
        // successAmount should be capacity at max.
504
        if successAmount > capacity {
21✔
NEW
505
                log.Debugf("Correcting successAmount %v to capacity %v",
×
NEW
506
                        successAmount, capacity)
×
NEW
507

×
NEW
508
                successAmount = capacity
×
NEW
509
        }
×
510

511
        // The next statement is a safety check against an illogical condition.
512
        // We discard the knowledge for the channel in that case. Note that
513
        // this condition should only happen due to the two corrections above,
514
        // as mission control already enforces successAmount < failAmount.
515
        if failAmount <= successAmount {
23✔
516
                log.Tracef("fail amount (%v) is smaller than or equal the "+
2✔
517
                        "success amount (%v) for capacity (%v)",
2✔
518
                        failAmountMsat, successAmountMsat, capacityMsat)
2✔
519

2✔
520
                successAmount = 0
2✔
521
                failAmount = capacity
2✔
522
        }
2✔
523

524
        // We cannot send more than the fail amount.
525
        if amount >= failAmount {
25✔
526
                return 0.0, nil
4✔
527
        }
4✔
528

529
        // We can send the amount if it is smaller than the success amount.
530
        if amount <= successAmount {
23✔
531
                return 1.0, nil
6✔
532
        }
6✔
533

534
        // The success probability for payment amount a is the integral over the
535
        // prior distribution P(x), the probability to find liquidity between
536
        // the amount a and channel capacity c (or failAmount a_f):
537
        // P(X >= a | X < a_f) = Integral_{a}^{a_f} P(x) dx
538
        prob := p.integral(capacity, amount, failAmount)
11✔
539
        if math.IsNaN(prob) {
11✔
NEW
540
                return 0.0, fmt.Errorf("non-normalized probability is NaN, "+
×
NEW
541
                        "capacity: %v, amount: %v, fail amount: %v",
×
NEW
542
                        capacity, amount, failAmount)
×
NEW
543
        }
×
544

545
        // If we have payment information, we need to adjust the prior
546
        // distribution P(x) and get the posterior distribution by renormalizing
547
        // the prior distribution in such a way that the probability mass lies
548
        // between a_s and a_f.
549
        reNorm := p.integral(capacity, successAmount, failAmount)
11✔
550
        if math.IsNaN(reNorm) {
11✔
551
                return 0.0, fmt.Errorf("normalization factor is NaN, "+
×
552
                        "capacity: %v, success amount: %v, fail amount: %v",
×
553
                        capacity, successAmount, failAmount)
×
554
        }
×
555

556
        // The normalization factor can only be zero if the success amount is
557
        // equal or larger than the fail amount. This should not happen as we
558
        // have checked this scenario above.
559
        if reNorm == 0.0 {
11✔
560
                return 0.0, fmt.Errorf("normalization factor is zero, "+
×
561
                        "capacity: %v, success amount: %v, fail amount: %v",
×
562
                        capacity, successAmount, failAmount)
×
563
        }
×
564

565
        prob /= reNorm
11✔
566

11✔
567
        // Note that for payment amounts smaller than successAmount, we can get
11✔
568
        // a value larger than unity, which we cap here to get a proper
11✔
569
        // probability.
11✔
570
        if prob > 1.0 {
11✔
UNCOV
571
                if amount > successAmount {
×
572
                        return 0.0, fmt.Errorf("unexpected large probability "+
×
573
                                "(%v) capacity: %v, amount: %v, success "+
×
574
                                "amount: %v, fail amount: %v", prob, capacity,
×
575
                                amount, successAmount, failAmount)
×
576
                }
×
577

UNCOV
578
                return 1.0, nil
×
579
        } else if prob < 0.0 {
11✔
580
                return 0.0, fmt.Errorf("negative probability "+
×
581
                        "(%v) capacity: %v, amount: %v, success "+
×
582
                        "amount: %v, fail amount: %v", prob, capacity,
×
583
                        amount, successAmount, failAmount)
×
584
        }
×
585

586
        return prob, nil
11✔
587
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc