• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 16181619122

09 Jul 2025 10:33PM UTC coverage: 55.326% (-2.3%) from 57.611%
16181619122

Pull #10060

github

web-flow
Merge d15e8671f into 0e830da9d
Pull Request #10060: sweep: fix expected spending events being missed

9 of 26 new or added lines in 2 files covered. (34.62%)

23695 existing lines in 280 files now uncovered.

108518 of 196143 relevant lines covered (55.33%)

22354.81 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

58.78
/routing/probability_bimodal.go
1
package routing
2

3
import (
4
        "errors"
5
        "fmt"
6
        "math"
7
        "time"
8

9
        "github.com/btcsuite/btcd/btcutil"
10
        "github.com/lightningnetwork/lnd/lnwire"
11
        "github.com/lightningnetwork/lnd/routing/route"
12
)
13

14
const (
15
        // DefaultBimodalScaleMsat is the default value for BimodalScaleMsat in
16
        // BimodalConfig. It describes the distribution of funds in the LN based
17
        // on empirical findings. We assume an unbalanced network by default.
18
        DefaultBimodalScaleMsat = lnwire.MilliSatoshi(300_000_000)
19

20
        // DefaultBimodalNodeWeight is the default value for the
21
        // BimodalNodeWeight in BimodalConfig. It is chosen such that past
22
        // forwardings on other channels of a router are only slightly taken
23
        // into account.
24
        DefaultBimodalNodeWeight = 0.2
25

26
        // DefaultBimodalDecayTime is the default value for BimodalDecayTime.
27
        // We will forget about previous learnings about channel liquidity on
28
        // the timescale of about a week.
29
        DefaultBimodalDecayTime = 7 * 24 * time.Hour
30

31
        // BimodalScaleMsatMax is the maximum value for BimodalScaleMsat. We
32
        // limit it here to the fakeHopHintCapacity to avoid issues with hop
33
        // hint probability calculations.
34
        BimodalScaleMsatMax = lnwire.MilliSatoshi(
35
                1000 * fakeHopHintCapacity / 4,
36
        )
37

38
        // BimodalEstimatorName is used to identify the bimodal estimator.
39
        BimodalEstimatorName = "bimodal"
40
)
41

42
var (
43
        // ErrInvalidScale is returned when we get a scale below or equal zero.
44
        ErrInvalidScale = errors.New("scale must be >= 0 and sane")
45

46
        // ErrInvalidNodeWeight is returned when we get a node weight that is
47
        // out of range.
48
        ErrInvalidNodeWeight = errors.New("node weight must be in [0, 1]")
49

50
        // ErrInvalidDecayTime is returned when we get a decay time below zero.
51
        ErrInvalidDecayTime = errors.New("decay time must be larger than zero")
52

53
        // ErrZeroCapacity is returned when we encounter a channel with zero
54
        // capacity in probability estimation.
55
        ErrZeroCapacity = errors.New("capacity must be larger than zero")
56
)
57

58
// BimodalConfig contains configuration for our probability estimator.
59
type BimodalConfig struct {
60
        // BimodalNodeWeight defines how strongly other previous forwardings on
61
        // channels of a router should be taken into account when computing a
62
        // channel's probability to route. The allowed values are in the range
63
        // [0, 1], where a value of 0 means that only direct information about a
64
        // channel is taken into account.
65
        BimodalNodeWeight float64
66

67
        // BimodalScaleMsat describes the scale over which channels
68
        // statistically have some liquidity left. The value determines how
69
        // quickly the bimodal distribution drops off from the edges of a
70
        // channel. A larger value (compared to typical channel capacities)
71
        // means that the drop off is slow and that channel balances are
72
        // distributed more uniformly. A small value leads to the assumption of
73
        // very unbalanced channels.
74
        BimodalScaleMsat lnwire.MilliSatoshi
75

76
        // BimodalDecayTime is the scale for the exponential information decay
77
        // over time for previous successes or failures.
78
        BimodalDecayTime time.Duration
79
}
80

81
// validate checks the configuration of the estimator for allowed values.
UNCOV
82
func (p BimodalConfig) validate() error {
×
UNCOV
83
        if p.BimodalDecayTime <= 0 {
×
84
                return fmt.Errorf("%v: %w", BimodalEstimatorName,
×
85
                        ErrInvalidDecayTime)
×
86
        }
×
87

UNCOV
88
        if p.BimodalNodeWeight < 0 || p.BimodalNodeWeight > 1 {
×
89
                return fmt.Errorf("%v: %w", BimodalEstimatorName,
×
90
                        ErrInvalidNodeWeight)
×
91
        }
×
92

UNCOV
93
        if p.BimodalScaleMsat == 0 || p.BimodalScaleMsat > BimodalScaleMsatMax {
×
94
                return fmt.Errorf("%v: %w", BimodalEstimatorName,
×
95
                        ErrInvalidScale)
×
96
        }
×
97

UNCOV
98
        return nil
×
99
}
100

101
// DefaultBimodalConfig returns the default configuration for the estimator.
102
func DefaultBimodalConfig() BimodalConfig {
×
103
        return BimodalConfig{
×
104
                BimodalNodeWeight: DefaultBimodalNodeWeight,
×
105
                BimodalScaleMsat:  DefaultBimodalScaleMsat,
×
106
                BimodalDecayTime:  DefaultBimodalDecayTime,
×
107
        }
×
108
}
×
109

110
// BimodalEstimator returns node and pair probabilities based on historical
111
// payment results based on a liquidity distribution model of the LN. The main
112
// function is to estimate the direct channel probability based on a depleted
113
// liquidity distribution model, with additional information decay over time. A
114
// per-node probability can be mixed with the direct probability, taking into
115
// account successes/failures on other channels of the forwarder.
116
type BimodalEstimator struct {
117
        // BimodalConfig contains configuration options for our estimator.
118
        BimodalConfig
119
}
120

121
// NewBimodalEstimator creates a new BimodalEstimator.
UNCOV
122
func NewBimodalEstimator(cfg BimodalConfig) (*BimodalEstimator, error) {
×
UNCOV
123
        if err := cfg.validate(); err != nil {
×
124
                return nil, err
×
125
        }
×
126

UNCOV
127
        return &BimodalEstimator{
×
UNCOV
128
                BimodalConfig: cfg,
×
UNCOV
129
        }, nil
×
130
}
131

132
// Compile-time checks that interfaces are implemented.
133
var _ Estimator = (*BimodalEstimator)(nil)
134
var _ estimatorConfig = (*BimodalConfig)(nil)
135

136
// config returns the current configuration of the estimator.
UNCOV
137
func (p *BimodalEstimator) Config() estimatorConfig {
×
UNCOV
138
        return p.BimodalConfig
×
UNCOV
139
}
×
140

141
// String returns the estimator's configuration as a string representation.
UNCOV
142
func (p *BimodalEstimator) String() string {
×
UNCOV
143
        return fmt.Sprintf("estimator type: %v, decay time: %v, liquidity "+
×
UNCOV
144
                "scale: %v, node weight: %v", BimodalEstimatorName,
×
UNCOV
145
                p.BimodalDecayTime, p.BimodalScaleMsat, p.BimodalNodeWeight)
×
UNCOV
146
}
×
147

148
// PairProbability estimates the probability of successfully traversing to
149
// toNode based on historical payment outcomes for the from node. Those outcomes
150
// are passed in via the results parameter.
151
func (p *BimodalEstimator) PairProbability(now time.Time,
152
        results NodeResults, toNode route.Vertex, amt lnwire.MilliSatoshi,
153
        capacity btcutil.Amount) float64 {
×
154

×
155
        // We first compute the probability for the desired hop taking into
×
156
        // account previous knowledge.
×
157
        directProbability := p.directProbability(
×
158
                now, results, toNode, amt, lnwire.NewMSatFromSatoshis(capacity),
×
159
        )
×
160

×
161
        // The final probability is computed by taking into account other
×
162
        // channels of the from node.
×
163
        return p.calculateProbability(directProbability, now, results, toNode)
×
164
}
×
165

166
// LocalPairProbability computes the probability to reach toNode given a set of
167
// previous learnings.
168
func (p *BimodalEstimator) LocalPairProbability(now time.Time,
169
        results NodeResults, toNode route.Vertex) float64 {
4✔
170

4✔
171
        // For direct local probabilities we assume to know exactly how much we
4✔
172
        // can send over a channel, which assumes that channels are active and
4✔
173
        // have enough liquidity.
4✔
174
        directProbability := 1.0
4✔
175

4✔
176
        // If we had an unexpected failure for this node, we reduce the
4✔
177
        // probability for some time to avoid infinite retries.
4✔
178
        result, ok := results[toNode]
4✔
179
        if ok && !result.FailTime.IsZero() {
7✔
180
                timeAgo := now.Sub(result.FailTime)
3✔
181

3✔
182
                // We only expect results in the past to get a probability
3✔
183
                // between 0 and 1.
3✔
184
                if timeAgo < 0 {
3✔
185
                        timeAgo = 0
×
186
                }
×
187
                exponent := -float64(timeAgo) / float64(p.BimodalDecayTime)
3✔
188
                directProbability -= math.Exp(exponent)
3✔
189
        }
190

191
        return directProbability
4✔
192
}
193

194
// directProbability computes the probability to reach a node based on the
195
// liquidity distribution in the LN.
196
func (p *BimodalEstimator) directProbability(now time.Time,
197
        results NodeResults, toNode route.Vertex, amt lnwire.MilliSatoshi,
198
        capacity lnwire.MilliSatoshi) float64 {
×
199

×
200
        // We first determine the time-adjusted success and failure amounts to
×
201
        // then compute a probability. We know that we can send a zero amount.
×
202
        successAmount := lnwire.MilliSatoshi(0)
×
203

×
204
        // We know that we cannot send the full capacity.
×
205
        failAmount := capacity
×
206

×
207
        // If we have information about past successes or failures, we modify
×
208
        // them with a time decay.
×
209
        result, ok := results[toNode]
×
210
        if ok {
×
211
                // Apply a time decay for the amount we cannot send.
×
212
                if !result.FailTime.IsZero() {
×
213
                        failAmount = cannotSend(
×
214
                                result.FailAmt, capacity, now, result.FailTime,
×
215
                                p.BimodalDecayTime,
×
216
                        )
×
217
                }
×
218

219
                // Apply a time decay for the amount we can send.
220
                if !result.SuccessTime.IsZero() {
×
221
                        successAmount = canSend(
×
222
                                result.SuccessAmt, now, result.SuccessTime,
×
223
                                p.BimodalDecayTime,
×
224
                        )
×
225
                }
×
226
        }
227

228
        // Compute the direct channel probability.
229
        probability, err := p.probabilityFormula(
×
230
                capacity, successAmount, failAmount, amt,
×
231
        )
×
232
        if err != nil {
×
233
                log.Errorf("error computing probability to node: %v "+
×
234
                        "(node: %v, results: %v, amt: %v, capacity: %v)",
×
235
                        err, toNode, results, amt, capacity)
×
236

×
237
                return 0.0
×
238
        }
×
239

240
        return probability
×
241
}
242

243
// calculateProbability computes the total hop probability combining the channel
244
// probability and historic forwarding data of other channels of the node we try
245
// to send from.
246
//
247
// Goals:
248
// * We want to incentivize good routing nodes: the more routable channels a
249
// node has, the more we want to incentivize (vice versa for failures).
250
// -> We reduce/increase the direct probability depending on past
251
// failures/successes for other channels of the node.
252
//
253
// * We want to be forgiving/give other nodes a chance as well: we want to
254
// forget about (non-)routable channels over time.
255
// -> We weight the successes/failures with a time decay such that they will not
256
// influence the total probability if a long time went by.
257
//
258
// * If we don't have other info, we want to solely rely on the direct
259
// probability.
260
//
261
// * We want to be able to specify how important the other channels are compared
262
// to the direct channel.
263
// -> Introduce a node weight factor that weights the direct probability against
264
// the node-wide average. The larger the node weight, the more important other
265
// channels of the node are.
266
//
267
// How do failures on low fee nodes redirect routing to higher fee nodes?
268
// Assumptions:
269
// * attemptCostPPM of 1000 PPM
270
// * constant direct channel probability of P0 (usually 0.5 for large amounts)
271
// * node weight w of 0.2
272
//
273
// The question we want to answer is:
274
// How often would a zero-fee node be tried (even if there were failures for its
275
// other channels) over trying a high-fee node with 2000 PPM and no direct
276
// knowledge about the channel to send over?
277
//
278
// The probability of a route of length l is P(l) = l * P0.
279
//
280
// The total probability after n failures (with the implemented method here) is:
281
// P(l, n) = P(l-1) * P(n)
282
// = P(l-1) * (P0 + n*0) / (1 + n*w)
283
// = P(l) / (1 + n*w)
284
//
285
// Condition for a high-fee channel to overcome a low fee channel in the
286
// Dijkstra weight function (only looking at fee and probability PPM terms):
287
// highFeePPM + attemptCostPPM * 1/P(l) = 0PPM + attemptCostPPM * 1/P(l, n)
288
// highFeePPM/attemptCostPPM = 1/P(l, n) - 1/P(l) =
289
// = (1 + n*w)/P(l) - 1/P(l) =
290
// = n*w/P(l)
291
//
292
// Therefore:
293
// n = (highFeePPM/attemptCostPPM) * (P(l)/w) =
294
// = (2000/1000) * 0.5 * l / w = l/w
295
//
296
// For a one-hop route we get:
297
// n = 1/0.2 = 5 tolerated failures
298
//
299
// For a three-hop route we get:
300
// n = 3/0.2 = 15 tolerated failures
301
//
302
// For more details on the behavior see tests.
303
func (p *BimodalEstimator) calculateProbability(directProbability float64,
304
        now time.Time, results NodeResults, toNode route.Vertex) float64 {
19✔
305

19✔
306
        // If we don't take other channels into account, we can return early.
19✔
307
        if p.BimodalNodeWeight == 0.0 {
19✔
308
                return directProbability
×
309
        }
×
310

311
        // If we have up-to-date information about the channel we want to use,
312
        // i.e. the info stems from results not longer ago than the decay time,
313
        // we will only use the direct probability. This is needed in order to
314
        // avoid that other previous results (on all other channels of the same
315
        // routing node) will distort and pin the calculated probability even if
316
        // we have accurate direct information. This helps to dip the
317
        // probability below the min probability in case of failures, to start
318
        // the splitting process.
319
        directResult, ok := results[toNode]
19✔
320
        if ok {
38✔
321
                latest := directResult.SuccessTime
19✔
322
                if directResult.FailTime.After(latest) {
19✔
323
                        latest = directResult.FailTime
×
324
                }
×
325

326
                // We use BimonodalDecayTime to judge the currentness of the
327
                // data. It is the time scale on which we assume to have lost
328
                // information.
329
                if now.Sub(latest) < p.BimodalDecayTime {
20✔
330
                        log.Tracef("Using direct probability for node %v: %v",
1✔
331
                                toNode, directResult)
1✔
332

1✔
333
                        return directProbability
1✔
334
                }
1✔
335
        }
336

337
        // w is a parameter which determines how strongly the other channels of
338
        // a node should be incorporated, the higher the stronger.
339
        w := p.BimodalNodeWeight
18✔
340

18✔
341
        // dt determines the timeliness of the previous successes/failures
18✔
342
        // to be taken into account.
18✔
343
        dt := float64(p.BimodalDecayTime)
18✔
344

18✔
345
        // The direct channel probability is weighted fully, all other results
18✔
346
        // are weighted according to how recent the information is.
18✔
347
        totalProbabilities := directProbability
18✔
348
        totalWeights := 1.0
18✔
349

18✔
350
        for peer, result := range results {
94✔
351
                // We don't include the direct hop probability here because it
76✔
352
                // is already included in totalProbabilities.
76✔
353
                if peer == toNode {
94✔
354
                        continue
18✔
355
                }
356

357
                // We add probabilities weighted by how recent the info is.
358
                var weight float64
58✔
359
                if result.SuccessAmt > 0 {
88✔
360
                        exponent := -float64(now.Sub(result.SuccessTime)) / dt
30✔
361
                        weight = math.Exp(exponent)
30✔
362
                        totalProbabilities += w * weight
30✔
363
                        totalWeights += w * weight
30✔
364
                }
30✔
365
                if result.FailAmt > 0 {
86✔
366
                        exponent := -float64(now.Sub(result.FailTime)) / dt
28✔
367
                        weight = math.Exp(exponent)
28✔
368

28✔
369
                        // Failures don't add to total success probability.
28✔
370
                        totalWeights += w * weight
28✔
371
                }
28✔
372
        }
373

374
        return totalProbabilities / totalWeights
18✔
375
}
376

377
// canSend returns the sendable amount over the channel, respecting time decay.
378
// canSend approaches zero, if we wait for a much longer time than the decay
379
// time.
380
func canSend(successAmount lnwire.MilliSatoshi, now, successTime time.Time,
381
        decayConstant time.Duration) lnwire.MilliSatoshi {
3✔
382

3✔
383
        // The factor approaches 0 for successTime a long time in the past,
3✔
384
        // is 1 when the successTime is now.
3✔
385
        factor := math.Exp(
3✔
386
                -float64(now.Sub(successTime)) / float64(decayConstant),
3✔
387
        )
3✔
388

3✔
389
        canSend := factor * float64(successAmount)
3✔
390

3✔
391
        return lnwire.MilliSatoshi(canSend)
3✔
392
}
3✔
393

394
// cannotSend returns the not sendable amount over the channel, respecting time
395
// decay. cannotSend approaches the capacity, if we wait for a much longer time
396
// than the decay time.
397
func cannotSend(failAmount, capacity lnwire.MilliSatoshi, now,
398
        failTime time.Time, decayConstant time.Duration) lnwire.MilliSatoshi {
3✔
399

3✔
400
        if failAmount > capacity {
3✔
401
                failAmount = capacity
×
402
        }
×
403

404
        // The factor approaches 0 for failTime a long time in the past and it
405
        // is 1 when the failTime is now.
406
        factor := math.Exp(
3✔
407
                -float64(now.Sub(failTime)) / float64(decayConstant),
3✔
408
        )
3✔
409

3✔
410
        cannotSend := capacity - lnwire.MilliSatoshi(
3✔
411
                factor*float64(capacity-failAmount),
3✔
412
        )
3✔
413

3✔
414
        return cannotSend
3✔
415
}
416

417
// primitive computes the indefinite integral of our assumed (normalized)
418
// liquidity probability distribution. The distribution of liquidity x here is
419
// the function P(x) ~ exp(-x/s) + exp((x-c)/s) + 1/c, i.e., two exponentials
420
// residing at the ends of channels. This means that we expect liquidity to be
421
// at either side of the channel with capacity c. The s parameter (scale)
422
// defines how far the liquidity leaks into the channel. A very low scale
423
// assumes completely unbalanced channels, a very high scale assumes a random
424
// distribution. More details can be found in
425
// https://github.com/lightningnetwork/lnd/issues/5988#issuecomment-1131234858.
426
// Additionally, we add a constant term 1/c to the distribution to avoid
427
// normalization issues and to fall back to a uniform distribution should the
428
// previous success and fail amounts contradict a bimodal distribution.
429
func (p *BimodalEstimator) primitive(c, x float64) float64 {
70✔
430
        s := float64(p.BimodalScaleMsat)
70✔
431

70✔
432
        // The indefinite integral of P(x) is given by
70✔
433
        // Int P(x) dx = H(x) = s * (-e(-x/s) + e((x-c)/s) + x/(c*s)),
70✔
434
        // and its norm from 0 to c can be computed from it,
70✔
435
        // norm = [H(x)]_0^c = s * (-e(-c/s) + 1 + 1/s -(-1 + e(-c/s))) =
70✔
436
        // = s * (-2*e(-c/s) + 2 + 1/s).
70✔
437
        // The prefactors s are left out, as they cancel out in the end.
70✔
438
        // norm can only become zero, if c is zero, which we sorted out before
70✔
439
        // calling this method.
70✔
440
        ecs := math.Exp(-c / s)
70✔
441
        norm := -2*ecs + 2 + 1/s
70✔
442

70✔
443
        // It would be possible to split the next term and reuse the factors
70✔
444
        // from before, but this can lead to numerical issues with large
70✔
445
        // numbers.
70✔
446
        excs := math.Exp((x - c) / s)
70✔
447
        exs := math.Exp(-x / s)
70✔
448

70✔
449
        // We end up with the primitive function of the normalized P(x).
70✔
450
        return (-exs + excs + x/(c*s)) / norm
70✔
451
}
70✔
452

453
// integral computes the integral of our liquidity distribution from the lower
454
// to the upper value.
455
func (p *BimodalEstimator) integral(capacity, lower, upper float64) float64 {
35✔
456
        if lower < 0 || lower > upper {
35✔
457
                log.Errorf("probability integral limits nonsensical: capacity:"+
×
458
                        "%v lower: %v upper: %v", capacity, lower, upper)
×
459

×
460
                return 0.0
×
461
        }
×
462

463
        return p.primitive(capacity, upper) - p.primitive(capacity, lower)
35✔
464
}
465

466
// probabilityFormula computes the expected probability for a payment of
467
// amountMsat given prior learnings for a channel of certain capacity.
468
// successAmountMsat and failAmountMsat stand for the unsettled success and
469
// failure amounts, respectively. The formula is derived using the formalism
470
// presented in Pickhardt et al., https://arxiv.org/abs/2103.08576.
471
func (p *BimodalEstimator) probabilityFormula(capacityMsat, successAmountMsat,
472
        failAmountMsat, amountMsat lnwire.MilliSatoshi) (float64, error) {
37✔
473

37✔
474
        // Convert to positive-valued floats.
37✔
475
        capacity := float64(capacityMsat)
37✔
476
        successAmount := float64(successAmountMsat)
37✔
477
        failAmount := float64(failAmountMsat)
37✔
478
        amount := float64(amountMsat)
37✔
479

37✔
480
        // In order for this formula to give reasonable results, we need to have
37✔
481
        // an estimate of the capacity of a channel (or edge between nodes).
37✔
482
        if capacity == 0.0 {
38✔
483
                return 0, ErrZeroCapacity
1✔
484
        }
1✔
485

486
        // We cannot send more than the capacity.
487
        if amount > capacity {
38✔
488
                return 0.0, nil
2✔
489
        }
2✔
490

491
        // The next statement is a safety check against an illogical condition.
492
        // We discard the knowledge for the channel in that case since we have
493
        // inconsistent data.
494
        if failAmount <= successAmount {
41✔
495
                log.Warnf("Fail amount (%s) is smaller than or equal to the "+
7✔
496
                        "success amount (%s) for capacity (%s)",
7✔
497
                        failAmountMsat, successAmountMsat, capacityMsat)
7✔
498

7✔
499
                successAmount = 0
7✔
500
                failAmount = capacity
7✔
501
        }
7✔
502

503
        // Mission control may have some outdated values with regard to the
504
        // current channel capacity between a node pair. This can happen in case
505
        // a large parallel channel was closed or if a channel was downscaled
506
        // and can lead to success and/or failure amounts to be out of the range
507
        // [0, capacity]. We assume that the liquidity situation of the channel
508
        // is similar as before due to flow bias.
509

510
        // In case we have a large success we need to correct it to be in the
511
        // valid range. We set the success amount close to the capacity, because
512
        // we assume to still be able to send. Any possible failure (that must
513
        // in this case be larger than the capacity) is corrected as well.
514
        if successAmount >= capacity {
36✔
515
                log.Debugf("Correcting success amount %s and failure amount "+
2✔
516
                        "%s to capacity %s", successAmountMsat,
2✔
517
                        failAmount, capacityMsat)
2✔
518

2✔
519
                // We choose the success amount to be one less than the
2✔
520
                // capacity, to both fit success and failure amounts into the
2✔
521
                // capacity range in a consistent manner.
2✔
522
                successAmount = capacity - 1
2✔
523
                failAmount = capacity
2✔
524
        }
2✔
525

526
        // Having no or only a small success, but a large failure only needs
527
        // adjustment of the failure amount.
528
        if failAmount > capacity {
38✔
529
                log.Debugf("Correcting failure amount %s to capacity %s",
4✔
530
                        failAmountMsat, capacityMsat)
4✔
531

4✔
532
                failAmount = capacity
4✔
533
        }
4✔
534

535
        // We cannot send more than the fail amount.
536
        if amount >= failAmount {
39✔
537
                return 0.0, nil
5✔
538
        }
5✔
539

540
        // We can send the amount if it is smaller than the success amount.
541
        if amount <= successAmount {
43✔
542
                return 1.0, nil
14✔
543
        }
14✔
544

545
        // The success probability for payment amount a is the integral over the
546
        // prior distribution P(x), the probability to find liquidity between
547
        // the amount a and channel capacity c (or failAmount a_f):
548
        // P(X >= a | X < a_f) = Integral_{a}^{a_f} P(x) dx
549
        prob := p.integral(capacity, amount, failAmount)
15✔
550
        if math.IsNaN(prob) {
15✔
551
                return 0.0, fmt.Errorf("non-normalized probability is NaN, "+
×
552
                        "capacity: %v, amount: %v, fail amount: %v",
×
553
                        capacity, amount, failAmount)
×
554
        }
×
555

556
        // If we have payment information, we need to adjust the prior
557
        // distribution P(x) and get the posterior distribution by renormalizing
558
        // the prior distribution in such a way that the probability mass lies
559
        // between a_s and a_f.
560
        reNorm := p.integral(capacity, successAmount, failAmount)
15✔
561
        if math.IsNaN(reNorm) {
15✔
562
                return 0.0, fmt.Errorf("normalization factor is NaN, "+
×
563
                        "capacity: %v, success amount: %v, fail amount: %v",
×
564
                        capacity, successAmount, failAmount)
×
565
        }
×
566

567
        // The normalization factor can only be zero if the success amount is
568
        // equal or larger than the fail amount. This should not happen as we
569
        // have checked this scenario above.
570
        if reNorm == 0.0 {
15✔
571
                return 0.0, fmt.Errorf("normalization factor is zero, "+
×
572
                        "capacity: %v, success amount: %v, fail amount: %v",
×
573
                        capacity, successAmount, failAmount)
×
574
        }
×
575

576
        prob /= reNorm
15✔
577

15✔
578
        // Note that for payment amounts smaller than successAmount, we can get
15✔
579
        // a value larger than unity, which we cap here to get a proper
15✔
580
        // probability.
15✔
581
        if prob > 1.0 {
15✔
582
                if amount > successAmount {
×
583
                        return 0.0, fmt.Errorf("unexpected large probability "+
×
584
                                "(%v) capacity: %v, amount: %v, success "+
×
585
                                "amount: %v, fail amount: %v", prob, capacity,
×
586
                                amount, successAmount, failAmount)
×
587
                }
×
588

589
                return 1.0, nil
×
590
        } else if prob < 0.0 {
15✔
591
                return 0.0, fmt.Errorf("negative probability "+
×
592
                        "(%v) capacity: %v, amount: %v, success "+
×
593
                        "amount: %v, fail amount: %v", prob, capacity,
×
594
                        amount, successAmount, failAmount)
×
595
        }
×
596

597
        return prob, nil
15✔
598
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc