• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 12583319996

02 Jan 2025 01:38PM UTC coverage: 57.522% (-1.1%) from 58.598%
12583319996

Pull #9361

github

starius
fn/ContextGuard: use context.AfterFunc to wait

Simplifies context cancellation handling by using context.AfterFunc instead of a
goroutine to wait for context cancellation. This approach avoids the overhead of
a goroutine during the waiting period.

For ctxQuitUnsafe, since g.quit is closed only in the Quit method (which also
cancels all associated contexts), waiting on context cancellation ensures the
same behavior without unnecessary dependency on g.quit.

Added a test to ensure that the Create method does not launch any goroutines.
Pull Request #9361: fn: optimize context guard

102587 of 178344 relevant lines covered (57.52%)

24734.33 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

54.55
/routing/probability_bimodal.go
1
package routing
2

3
import (
4
        "fmt"
5
        "math"
6
        "time"
7

8
        "github.com/btcsuite/btcd/btcutil"
9
        "github.com/go-errors/errors"
10
        "github.com/lightningnetwork/lnd/lnwire"
11
        "github.com/lightningnetwork/lnd/routing/route"
12
)
13

14
const (
15
        // DefaultBimodalScaleMsat is the default value for BimodalScaleMsat in
16
        // BimodalConfig. It describes the distribution of funds in the LN based
17
        // on empirical findings. We assume an unbalanced network by default.
18
        DefaultBimodalScaleMsat = lnwire.MilliSatoshi(300_000_000)
19

20
        // DefaultBimodalNodeWeight is the default value for the
21
        // BimodalNodeWeight in BimodalConfig. It is chosen such that past
22
        // forwardings on other channels of a router are only slightly taken
23
        // into account.
24
        DefaultBimodalNodeWeight = 0.2
25

26
        // DefaultBimodalDecayTime is the default value for BimodalDecayTime.
27
        // We will forget about previous learnings about channel liquidity on
28
        // the timescale of about a week.
29
        DefaultBimodalDecayTime = 7 * 24 * time.Hour
30

31
        // BimodalScaleMsatMax is the maximum value for BimodalScaleMsat. We
32
        // limit it here to the fakeHopHintCapacity to avoid issues with hop
33
        // hint probability calculations.
34
        BimodalScaleMsatMax = lnwire.MilliSatoshi(
35
                1000 * fakeHopHintCapacity / 4,
36
        )
37

38
        // BimodalEstimatorName is used to identify the bimodal estimator.
39
        BimodalEstimatorName = "bimodal"
40
)
41

42
var (
43
        // ErrInvalidScale is returned when we get a scale below or equal zero.
44
        ErrInvalidScale = errors.New("scale must be >= 0 and sane")
45

46
        // ErrInvalidNodeWeight is returned when we get a node weight that is
47
        // out of range.
48
        ErrInvalidNodeWeight = errors.New("node weight must be in [0, 1]")
49

50
        // ErrInvalidDecayTime is returned when we get a decay time below zero.
51
        ErrInvalidDecayTime = errors.New("decay time must be larger than zero")
52

53
        // ErrZeroCapacity is returned when we encounter a channel with zero
54
        // capacity in probability estimation.
55
        ErrZeroCapacity = errors.New("capacity must be larger than zero")
56
)
57

58
// BimodalConfig contains configuration for our probability estimator.
59
type BimodalConfig struct {
60
        // BimodalNodeWeight defines how strongly other previous forwardings on
61
        // channels of a router should be taken into account when computing a
62
        // channel's probability to route. The allowed values are in the range
63
        // [0, 1], where a value of 0 means that only direct information about a
64
        // channel is taken into account.
65
        BimodalNodeWeight float64
66

67
        // BimodalScaleMsat describes the scale over which channels
68
        // statistically have some liquidity left. The value determines how
69
        // quickly the bimodal distribution drops off from the edges of a
70
        // channel. A larger value (compared to typical channel capacities)
71
        // means that the drop off is slow and that channel balances are
72
        // distributed more uniformly. A small value leads to the assumption of
73
        // very unbalanced channels.
74
        BimodalScaleMsat lnwire.MilliSatoshi
75

76
        // BimodalDecayTime is the scale for the exponential information decay
77
        // over time for previous successes or failures.
78
        BimodalDecayTime time.Duration
79
}
80

81
// validate checks the configuration of the estimator for allowed values.
82
func (p BimodalConfig) validate() error {
×
83
        if p.BimodalDecayTime <= 0 {
×
84
                return fmt.Errorf("%v: %w", BimodalEstimatorName,
×
85
                        ErrInvalidDecayTime)
×
86
        }
×
87

88
        if p.BimodalNodeWeight < 0 || p.BimodalNodeWeight > 1 {
×
89
                return fmt.Errorf("%v: %w", BimodalEstimatorName,
×
90
                        ErrInvalidNodeWeight)
×
91
        }
×
92

93
        if p.BimodalScaleMsat == 0 || p.BimodalScaleMsat > BimodalScaleMsatMax {
×
94
                return fmt.Errorf("%v: %w", BimodalEstimatorName,
×
95
                        ErrInvalidScale)
×
96
        }
×
97

98
        return nil
×
99
}
100

101
// DefaultBimodalConfig returns the default configuration for the estimator.
102
func DefaultBimodalConfig() BimodalConfig {
×
103
        return BimodalConfig{
×
104
                BimodalNodeWeight: DefaultBimodalNodeWeight,
×
105
                BimodalScaleMsat:  DefaultBimodalScaleMsat,
×
106
                BimodalDecayTime:  DefaultBimodalDecayTime,
×
107
        }
×
108
}
×
109

110
// BimodalEstimator returns node and pair probabilities based on historical
111
// payment results based on a liquidity distribution model of the LN. The main
112
// function is to estimate the direct channel probability based on a depleted
113
// liquidity distribution model, with additional information decay over time. A
114
// per-node probability can be mixed with the direct probability, taking into
115
// account successes/failures on other channels of the forwarder.
116
type BimodalEstimator struct {
117
        // BimodalConfig contains configuration options for our estimator.
118
        BimodalConfig
119
}
120

121
// NewBimodalEstimator creates a new BimodalEstimator.
122
func NewBimodalEstimator(cfg BimodalConfig) (*BimodalEstimator, error) {
×
123
        if err := cfg.validate(); err != nil {
×
124
                return nil, err
×
125
        }
×
126

127
        return &BimodalEstimator{
×
128
                BimodalConfig: cfg,
×
129
        }, nil
×
130
}
131

132
// Compile-time checks that interfaces are implemented.
133
var _ Estimator = (*BimodalEstimator)(nil)
134
var _ estimatorConfig = (*BimodalConfig)(nil)
135

136
// config returns the current configuration of the estimator.
137
func (p *BimodalEstimator) Config() estimatorConfig {
×
138
        return p.BimodalConfig
×
139
}
×
140

141
// String returns the estimator's configuration as a string representation.
142
func (p *BimodalEstimator) String() string {
×
143
        return fmt.Sprintf("estimator type: %v, decay time: %v, liquidity "+
×
144
                "scale: %v, node weight: %v", BimodalEstimatorName,
×
145
                p.BimodalDecayTime, p.BimodalScaleMsat, p.BimodalNodeWeight)
×
146
}
×
147

148
// PairProbability estimates the probability of successfully traversing to
149
// toNode based on historical payment outcomes for the from node. Those outcomes
150
// are passed in via the results parameter.
151
func (p *BimodalEstimator) PairProbability(now time.Time,
152
        results NodeResults, toNode route.Vertex, amt lnwire.MilliSatoshi,
153
        capacity btcutil.Amount) float64 {
×
154

×
155
        // We first compute the probability for the desired hop taking into
×
156
        // account previous knowledge.
×
157
        directProbability := p.directProbability(
×
158
                now, results, toNode, amt, lnwire.NewMSatFromSatoshis(capacity),
×
159
        )
×
160

×
161
        // The final probability is computed by taking into account other
×
162
        // channels of the from node.
×
163
        return p.calculateProbability(directProbability, now, results, toNode)
×
164
}
×
165

166
// LocalPairProbability computes the probability to reach toNode given a set of
167
// previous learnings.
168
func (p *BimodalEstimator) LocalPairProbability(now time.Time,
169
        results NodeResults, toNode route.Vertex) float64 {
4✔
170

4✔
171
        // For direct local probabilities we assume to know exactly how much we
4✔
172
        // can send over a channel, which assumes that channels are active and
4✔
173
        // have enough liquidity.
4✔
174
        directProbability := 1.0
4✔
175

4✔
176
        // If we had an unexpected failure for this node, we reduce the
4✔
177
        // probability for some time to avoid infinite retries.
4✔
178
        result, ok := results[toNode]
4✔
179
        if ok && !result.FailTime.IsZero() {
7✔
180
                timeAgo := now.Sub(result.FailTime)
3✔
181

3✔
182
                // We only expect results in the past to get a probability
3✔
183
                // between 0 and 1.
3✔
184
                if timeAgo < 0 {
3✔
185
                        timeAgo = 0
×
186
                }
×
187
                exponent := -float64(timeAgo) / float64(p.BimodalDecayTime)
3✔
188
                directProbability -= math.Exp(exponent)
3✔
189
        }
190

191
        return directProbability
4✔
192
}
193

194
// directProbability computes the probability to reach a node based on the
195
// liquidity distribution in the LN.
196
func (p *BimodalEstimator) directProbability(now time.Time,
197
        results NodeResults, toNode route.Vertex, amt lnwire.MilliSatoshi,
198
        capacity lnwire.MilliSatoshi) float64 {
×
199

×
200
        // We first determine the time-adjusted success and failure amounts to
×
201
        // then compute a probability. We know that we can send a zero amount.
×
202
        successAmount := lnwire.MilliSatoshi(0)
×
203

×
204
        // We know that we cannot send the full capacity.
×
205
        failAmount := capacity
×
206

×
207
        // If we have information about past successes or failures, we modify
×
208
        // them with a time decay.
×
209
        result, ok := results[toNode]
×
210
        if ok {
×
211
                // Apply a time decay for the amount we cannot send.
×
212
                if !result.FailTime.IsZero() {
×
213
                        failAmount = cannotSend(
×
214
                                result.FailAmt, capacity, now, result.FailTime,
×
215
                                p.BimodalDecayTime,
×
216
                        )
×
217
                }
×
218

219
                // Apply a time decay for the amount we can send.
220
                if !result.SuccessTime.IsZero() {
×
221
                        successAmount = canSend(
×
222
                                result.SuccessAmt, now, result.SuccessTime,
×
223
                                p.BimodalDecayTime,
×
224
                        )
×
225
                }
×
226
        }
227

228
        // Compute the direct channel probability.
229
        probability, err := p.probabilityFormula(
×
230
                capacity, successAmount, failAmount, amt,
×
231
        )
×
232
        if err != nil {
×
233
                log.Errorf("error computing probability to node: %v "+
×
234
                        "(node: %v, results: %v, amt: %v, capacity: %v)",
×
235
                        err, toNode, results, amt, capacity)
×
236

×
237
                return 0.0
×
238
        }
×
239

240
        return probability
×
241
}
242

243
// calculateProbability computes the total hop probability combining the channel
244
// probability and historic forwarding data of other channels of the node we try
245
// to send from.
246
//
247
// Goals:
248
// * We want to incentivize good routing nodes: the more routable channels a
249
// node has, the more we want to incentivize (vice versa for failures).
250
// -> We reduce/increase the direct probability depending on past
251
// failures/successes for other channels of the node.
252
//
253
// * We want to be forgiving/give other nodes a chance as well: we want to
254
// forget about (non-)routable channels over time.
255
// -> We weight the successes/failures with a time decay such that they will not
256
// influence the total probability if a long time went by.
257
//
258
// * If we don't have other info, we want to solely rely on the direct
259
// probability.
260
//
261
// * We want to be able to specify how important the other channels are compared
262
// to the direct channel.
263
// -> Introduce a node weight factor that weights the direct probability against
264
// the node-wide average. The larger the node weight, the more important other
265
// channels of the node are.
266
//
267
// How do failures on low fee nodes redirect routing to higher fee nodes?
268
// Assumptions:
269
// * attemptCostPPM of 1000 PPM
270
// * constant direct channel probability of P0 (usually 0.5 for large amounts)
271
// * node weight w of 0.2
272
//
273
// The question we want to answer is:
274
// How often would a zero-fee node be tried (even if there were failures for its
275
// other channels) over trying a high-fee node with 2000 PPM and no direct
276
// knowledge about the channel to send over?
277
//
278
// The probability of a route of length l is P(l) = l * P0.
279
//
280
// The total probability after n failures (with the implemented method here) is:
281
// P(l, n) = P(l-1) * P(n)
282
// = P(l-1) * (P0 + n*0) / (1 + n*w)
283
// = P(l) / (1 + n*w)
284
//
285
// Condition for a high-fee channel to overcome a low fee channel in the
286
// Dijkstra weight function (only looking at fee and probability PPM terms):
287
// highFeePPM + attemptCostPPM * 1/P(l) = 0PPM + attemptCostPPM * 1/P(l, n)
288
// highFeePPM/attemptCostPPM = 1/P(l, n) - 1/P(l) =
289
// = (1 + n*w)/P(l) - 1/P(l) =
290
// = n*w/P(l)
291
//
292
// Therefore:
293
// n = (highFeePPM/attemptCostPPM) * (P(l)/w) =
294
// = (2000/1000) * 0.5 * l / w = l/w
295
//
296
// For a one-hop route we get:
297
// n = 1/0.2 = 5 tolerated failures
298
//
299
// For a three-hop route we get:
300
// n = 3/0.2 = 15 tolerated failures
301
//
302
// For more details on the behavior see tests.
303
func (p *BimodalEstimator) calculateProbability(directProbability float64,
304
        now time.Time, results NodeResults, toNode route.Vertex) float64 {
19✔
305

19✔
306
        // If we don't take other channels into account, we can return early.
19✔
307
        if p.BimodalNodeWeight == 0.0 {
19✔
308
                return directProbability
×
309
        }
×
310

311
        // If we have up-to-date information about the channel we want to use,
312
        // i.e. the info stems from results not longer ago than the decay time,
313
        // we will only use the direct probability. This is needed in order to
314
        // avoid that other previous results (on all other channels of the same
315
        // routing node) will distort and pin the calculated probability even if
316
        // we have accurate direct information. This helps to dip the
317
        // probability below the min probability in case of failures, to start
318
        // the splitting process.
319
        directResult, ok := results[toNode]
19✔
320
        if ok {
38✔
321
                latest := directResult.SuccessTime
19✔
322
                if directResult.FailTime.After(latest) {
19✔
323
                        latest = directResult.FailTime
×
324
                }
×
325

326
                // We use BimonodalDecayTime to judge the currentness of the
327
                // data. It is the time scale on which we assume to have lost
328
                // information.
329
                if now.Sub(latest) < p.BimodalDecayTime {
20✔
330
                        log.Tracef("Using direct probability for node %v: %v",
1✔
331
                                toNode, directResult)
1✔
332

1✔
333
                        return directProbability
1✔
334
                }
1✔
335
        }
336

337
        // w is a parameter which determines how strongly the other channels of
338
        // a node should be incorporated, the higher the stronger.
339
        w := p.BimodalNodeWeight
18✔
340

18✔
341
        // dt determines the timeliness of the previous successes/failures
18✔
342
        // to be taken into account.
18✔
343
        dt := float64(p.BimodalDecayTime)
18✔
344

18✔
345
        // The direct channel probability is weighted fully, all other results
18✔
346
        // are weighted according to how recent the information is.
18✔
347
        totalProbabilities := directProbability
18✔
348
        totalWeights := 1.0
18✔
349

18✔
350
        for peer, result := range results {
94✔
351
                // We don't include the direct hop probability here because it
76✔
352
                // is already included in totalProbabilities.
76✔
353
                if peer == toNode {
94✔
354
                        continue
18✔
355
                }
356

357
                // We add probabilities weighted by how recent the info is.
358
                var weight float64
58✔
359
                if result.SuccessAmt > 0 {
88✔
360
                        exponent := -float64(now.Sub(result.SuccessTime)) / dt
30✔
361
                        weight = math.Exp(exponent)
30✔
362
                        totalProbabilities += w * weight
30✔
363
                        totalWeights += w * weight
30✔
364
                }
30✔
365
                if result.FailAmt > 0 {
86✔
366
                        exponent := -float64(now.Sub(result.FailTime)) / dt
28✔
367
                        weight = math.Exp(exponent)
28✔
368

28✔
369
                        // Failures don't add to total success probability.
28✔
370
                        totalWeights += w * weight
28✔
371
                }
28✔
372
        }
373

374
        return totalProbabilities / totalWeights
18✔
375
}
376

377
// canSend returns the sendable amount over the channel, respecting time decay.
378
// canSend approaches zero, if we wait for a much longer time than the decay
379
// time.
380
func canSend(successAmount lnwire.MilliSatoshi, now, successTime time.Time,
381
        decayConstant time.Duration) lnwire.MilliSatoshi {
3✔
382

3✔
383
        // The factor approaches 0 for successTime a long time in the past,
3✔
384
        // is 1 when the successTime is now.
3✔
385
        factor := math.Exp(
3✔
386
                -float64(now.Sub(successTime)) / float64(decayConstant),
3✔
387
        )
3✔
388

3✔
389
        canSend := factor * float64(successAmount)
3✔
390

3✔
391
        return lnwire.MilliSatoshi(canSend)
3✔
392
}
3✔
393

394
// cannotSend returns the not sendable amount over the channel, respecting time
395
// decay. cannotSend approaches the capacity, if we wait for a much longer time
396
// than the decay time.
397
func cannotSend(failAmount, capacity lnwire.MilliSatoshi, now,
398
        failTime time.Time, decayConstant time.Duration) lnwire.MilliSatoshi {
3✔
399

3✔
400
        if failAmount > capacity {
3✔
401
                failAmount = capacity
×
402
        }
×
403

404
        // The factor approaches 0 for failTime a long time in the past and it
405
        // is 1 when the failTime is now.
406
        factor := math.Exp(
3✔
407
                -float64(now.Sub(failTime)) / float64(decayConstant),
3✔
408
        )
3✔
409

3✔
410
        cannotSend := capacity - lnwire.MilliSatoshi(
3✔
411
                factor*float64(capacity-failAmount),
3✔
412
        )
3✔
413

3✔
414
        return cannotSend
3✔
415
}
416

417
// primitive computes the indefinite integral of our assumed (normalized)
418
// liquidity probability distribution. The distribution of liquidity x here is
419
// the function P(x) ~ exp(-x/s) + exp((x-c)/s), i.e., two exponentials residing
420
// at the ends of channels. This means that we expect liquidity to be at either
421
// side of the channel with capacity c. The s parameter (scale) defines how far
422
// the liquidity leaks into the channel. A very low scale assumes completely
423
// unbalanced channels, a very high scale assumes a random distribution. More
424
// details can be found in
425
// https://github.com/lightningnetwork/lnd/issues/5988#issuecomment-1131234858.
426
func (p *BimodalEstimator) primitive(c, x float64) float64 {
54✔
427
        s := float64(p.BimodalScaleMsat)
54✔
428

54✔
429
        // The indefinite integral of P(x) is given by
54✔
430
        // Int P(x) dx = H(x) = s * (-e(-x/s) + e((x-c)/s)),
54✔
431
        // and its norm from 0 to c can be computed from it,
54✔
432
        // norm = [H(x)]_0^c = s * (-e(-c/s) + 1 -(1 + e(-c/s))).
54✔
433
        ecs := math.Exp(-c / s)
54✔
434
        exs := math.Exp(-x / s)
54✔
435

54✔
436
        // It would be possible to split the next term and reuse the factors
54✔
437
        // from before, but this can lead to numerical issues with large
54✔
438
        // numbers.
54✔
439
        excs := math.Exp((x - c) / s)
54✔
440

54✔
441
        // norm can only become zero, if c is zero, which we sorted out before
54✔
442
        // calling this method.
54✔
443
        norm := -2*ecs + 2
54✔
444

54✔
445
        // We end up with the primitive function of the normalized P(x).
54✔
446
        return (-exs + excs) / norm
54✔
447
}
54✔
448

449
// integral computes the integral of our liquidity distribution from the lower
450
// to the upper value.
451
func (p *BimodalEstimator) integral(capacity, lower, upper float64) float64 {
27✔
452
        if lower < 0 || lower > upper {
27✔
453
                log.Errorf("probability integral limits nonsensical: capacity:"+
×
454
                        "%v lower: %v upper: %v", capacity, lower, upper)
×
455

×
456
                return 0.0
×
457
        }
×
458

459
        return p.primitive(capacity, upper) - p.primitive(capacity, lower)
27✔
460
}
461

462
// probabilityFormula computes the expected probability for a payment of
463
// amountMsat given prior learnings for a channel of certain capacity.
464
// successAmountMsat and failAmountMsat stand for the unsettled success and
465
// failure amounts, respectively. The formula is derived using the formalism
466
// presented in Pickhardt et al., https://arxiv.org/abs/2103.08576.
467
func (p *BimodalEstimator) probabilityFormula(capacityMsat, successAmountMsat,
468
        failAmountMsat, amountMsat lnwire.MilliSatoshi) (float64, error) {
19✔
469

19✔
470
        // Convert to positive-valued floats.
19✔
471
        capacity := float64(capacityMsat)
19✔
472
        successAmount := float64(successAmountMsat)
19✔
473
        failAmount := float64(failAmountMsat)
19✔
474
        amount := float64(amountMsat)
19✔
475

19✔
476
        // In order for this formula to give reasonable results, we need to have
19✔
477
        // an estimate of the capacity of a channel (or edge between nodes).
19✔
478
        if capacity == 0.0 {
20✔
479
                return 0, ErrZeroCapacity
1✔
480
        }
1✔
481

482
        // We cannot send more than the capacity.
483
        if amount > capacity {
19✔
484
                return 0.0, nil
1✔
485
        }
1✔
486

487
        // Mission control may have some outdated values, we correct them here.
488
        // TODO(bitromortac): there may be better decisions to make in these
489
        //  cases, e.g., resetting failAmount=cap and successAmount=0.
490

491
        // failAmount should be capacity at max.
492
        if failAmount > capacity {
17✔
493
                log.Debugf("Correcting failAmount %v to capacity %v",
×
494
                        failAmount, capacity)
×
495

×
496
                failAmount = capacity
×
497
        }
×
498

499
        // successAmount should be capacity at max.
500
        if successAmount > capacity {
17✔
501
                log.Debugf("Correcting successAmount %v to capacity %v",
×
502
                        successAmount, capacity)
×
503

×
504
                successAmount = capacity
×
505
        }
×
506

507
        // The next statement is a safety check against an illogical condition,
508
        // otherwise the renormalization integral would become zero. This may
509
        // happen if a large channel gets closed and smaller ones remain, but
510
        // it should recover with the time decay.
511
        if failAmount <= successAmount {
19✔
512
                log.Tracef("fail amount (%v) is smaller than or equal the "+
2✔
513
                        "success amount (%v) for capacity (%v)",
2✔
514
                        failAmountMsat, successAmountMsat, capacityMsat)
2✔
515

2✔
516
                return 0.0, nil
2✔
517
        }
2✔
518

519
        // We cannot send more than the fail amount.
520
        if amount >= failAmount {
19✔
521
                return 0.0, nil
4✔
522
        }
4✔
523

524
        // The success probability for payment amount a is the integral over the
525
        // prior distribution P(x), the probability to find liquidity between
526
        // the amount a and channel capacity c (or failAmount a_f):
527
        // P(X >= a | X < a_f) = Integral_{a}^{a_f} P(x) dx
528
        prob := p.integral(capacity, amount, failAmount)
11✔
529
        if math.IsNaN(prob) {
11✔
530
                return 0.0, fmt.Errorf("non-normalized probability is NaN, "+
×
531
                        "capacity: %v, amount: %v, fail amount: %v",
×
532
                        capacity, amount, failAmount)
×
533
        }
×
534

535
        // If we have payment information, we need to adjust the prior
536
        // distribution P(x) and get the posterior distribution by renormalizing
537
        // the prior distribution in such a way that the probability mass lies
538
        // between a_s and a_f.
539
        reNorm := p.integral(capacity, successAmount, failAmount)
11✔
540
        if math.IsNaN(reNorm) {
11✔
541
                return 0.0, fmt.Errorf("normalization factor is NaN, "+
×
542
                        "capacity: %v, success amount: %v, fail amount: %v",
×
543
                        capacity, successAmount, failAmount)
×
544
        }
×
545

546
        // The normalization factor can only be zero if the success amount is
547
        // equal or larger than the fail amount. This should not happen as we
548
        // have checked this scenario above.
549
        if reNorm == 0.0 {
11✔
550
                return 0.0, fmt.Errorf("normalization factor is zero, "+
×
551
                        "capacity: %v, success amount: %v, fail amount: %v",
×
552
                        capacity, successAmount, failAmount)
×
553
        }
×
554

555
        prob /= reNorm
11✔
556

11✔
557
        // Note that for payment amounts smaller than successAmount, we can get
11✔
558
        // a value larger than unity, which we cap here to get a proper
11✔
559
        // probability.
11✔
560
        if prob > 1.0 {
13✔
561
                if amount > successAmount {
2✔
562
                        return 0.0, fmt.Errorf("unexpected large probability "+
×
563
                                "(%v) capacity: %v, amount: %v, success "+
×
564
                                "amount: %v, fail amount: %v", prob, capacity,
×
565
                                amount, successAmount, failAmount)
×
566
                }
×
567

568
                return 1.0, nil
2✔
569
        } else if prob < 0.0 {
9✔
570
                return 0.0, fmt.Errorf("negative probability "+
×
571
                        "(%v) capacity: %v, amount: %v, success "+
×
572
                        "amount: %v, fail amount: %v", prob, capacity,
×
573
                        amount, successAmount, failAmount)
×
574
        }
×
575

576
        return prob, nil
9✔
577
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc