• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 15561477203

10 Jun 2025 01:54PM UTC coverage: 58.351% (-10.1%) from 68.487%
15561477203

Pull #9356

github

web-flow
Merge 6440b25db into c6d6d4c0b
Pull Request #9356: lnrpc: add incoming/outgoing channel ids filter to forwarding history request

33 of 36 new or added lines in 2 files covered. (91.67%)

28366 existing lines in 455 files now uncovered.

97715 of 167461 relevant lines covered (58.35%)

1.81 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

6.08
/routing/probability_bimodal.go
1
package routing
2

3
import (
4
        "fmt"
5
        "math"
6
        "time"
7

8
        "github.com/btcsuite/btcd/btcutil"
9
        "github.com/go-errors/errors"
10
        "github.com/lightningnetwork/lnd/lnwire"
11
        "github.com/lightningnetwork/lnd/routing/route"
12
)
13

14
const (
15
        // DefaultBimodalScaleMsat is the default value for BimodalScaleMsat in
16
        // BimodalConfig. It describes the distribution of funds in the LN based
17
        // on empirical findings. We assume an unbalanced network by default.
18
        DefaultBimodalScaleMsat = lnwire.MilliSatoshi(300_000_000)
19

20
        // DefaultBimodalNodeWeight is the default value for the
21
        // BimodalNodeWeight in BimodalConfig. It is chosen such that past
22
        // forwardings on other channels of a router are only slightly taken
23
        // into account.
24
        DefaultBimodalNodeWeight = 0.2
25

26
        // DefaultBimodalDecayTime is the default value for BimodalDecayTime.
27
        // We will forget about previous learnings about channel liquidity on
28
        // the timescale of about a week.
29
        DefaultBimodalDecayTime = 7 * 24 * time.Hour
30

31
        // BimodalScaleMsatMax is the maximum value for BimodalScaleMsat. We
32
        // limit it here to the fakeHopHintCapacity to avoid issues with hop
33
        // hint probability calculations.
34
        BimodalScaleMsatMax = lnwire.MilliSatoshi(
35
                1000 * fakeHopHintCapacity / 4,
36
        )
37

38
        // BimodalEstimatorName is used to identify the bimodal estimator.
39
        BimodalEstimatorName = "bimodal"
40
)
41

42
var (
43
        // ErrInvalidScale is returned when we get a scale below or equal zero.
44
        ErrInvalidScale = errors.New("scale must be >= 0 and sane")
45

46
        // ErrInvalidNodeWeight is returned when we get a node weight that is
47
        // out of range.
48
        ErrInvalidNodeWeight = errors.New("node weight must be in [0, 1]")
49

50
        // ErrInvalidDecayTime is returned when we get a decay time below zero.
51
        ErrInvalidDecayTime = errors.New("decay time must be larger than zero")
52

53
        // ErrZeroCapacity is returned when we encounter a channel with zero
54
        // capacity in probability estimation.
55
        ErrZeroCapacity = errors.New("capacity must be larger than zero")
56
)
57

58
// BimodalConfig contains configuration for our probability estimator.
59
type BimodalConfig struct {
60
        // BimodalNodeWeight defines how strongly other previous forwardings on
61
        // channels of a router should be taken into account when computing a
62
        // channel's probability to route. The allowed values are in the range
63
        // [0, 1], where a value of 0 means that only direct information about a
64
        // channel is taken into account.
65
        BimodalNodeWeight float64
66

67
        // BimodalScaleMsat describes the scale over which channels
68
        // statistically have some liquidity left. The value determines how
69
        // quickly the bimodal distribution drops off from the edges of a
70
        // channel. A larger value (compared to typical channel capacities)
71
        // means that the drop off is slow and that channel balances are
72
        // distributed more uniformly. A small value leads to the assumption of
73
        // very unbalanced channels.
74
        BimodalScaleMsat lnwire.MilliSatoshi
75

76
        // BimodalDecayTime is the scale for the exponential information decay
77
        // over time for previous successes or failures.
78
        BimodalDecayTime time.Duration
79
}
80

81
// validate checks the configuration of the estimator for allowed values.
82
func (p BimodalConfig) validate() error {
3✔
83
        if p.BimodalDecayTime <= 0 {
3✔
84
                return fmt.Errorf("%v: %w", BimodalEstimatorName,
×
85
                        ErrInvalidDecayTime)
×
86
        }
×
87

88
        if p.BimodalNodeWeight < 0 || p.BimodalNodeWeight > 1 {
3✔
89
                return fmt.Errorf("%v: %w", BimodalEstimatorName,
×
90
                        ErrInvalidNodeWeight)
×
91
        }
×
92

93
        if p.BimodalScaleMsat == 0 || p.BimodalScaleMsat > BimodalScaleMsatMax {
3✔
94
                return fmt.Errorf("%v: %w", BimodalEstimatorName,
×
95
                        ErrInvalidScale)
×
96
        }
×
97

98
        return nil
3✔
99
}
100

101
// DefaultBimodalConfig returns the default configuration for the estimator.
102
func DefaultBimodalConfig() BimodalConfig {
×
103
        return BimodalConfig{
×
104
                BimodalNodeWeight: DefaultBimodalNodeWeight,
×
105
                BimodalScaleMsat:  DefaultBimodalScaleMsat,
×
106
                BimodalDecayTime:  DefaultBimodalDecayTime,
×
107
        }
×
108
}
×
109

110
// BimodalEstimator returns node and pair probabilities based on historical
111
// payment results based on a liquidity distribution model of the LN. The main
112
// function is to estimate the direct channel probability based on a depleted
113
// liquidity distribution model, with additional information decay over time. A
114
// per-node probability can be mixed with the direct probability, taking into
115
// account successes/failures on other channels of the forwarder.
116
type BimodalEstimator struct {
117
        // BimodalConfig contains configuration options for our estimator.
118
        BimodalConfig
119
}
120

121
// NewBimodalEstimator creates a new BimodalEstimator.
122
func NewBimodalEstimator(cfg BimodalConfig) (*BimodalEstimator, error) {
3✔
123
        if err := cfg.validate(); err != nil {
3✔
124
                return nil, err
×
125
        }
×
126

127
        return &BimodalEstimator{
3✔
128
                BimodalConfig: cfg,
3✔
129
        }, nil
3✔
130
}
131

132
// Compile-time checks that interfaces are implemented.
133
var _ Estimator = (*BimodalEstimator)(nil)
134
var _ estimatorConfig = (*BimodalConfig)(nil)
135

136
// config returns the current configuration of the estimator.
137
func (p *BimodalEstimator) Config() estimatorConfig {
3✔
138
        return p.BimodalConfig
3✔
139
}
3✔
140

141
// String returns the estimator's configuration as a string representation.
142
func (p *BimodalEstimator) String() string {
3✔
143
        return fmt.Sprintf("estimator type: %v, decay time: %v, liquidity "+
3✔
144
                "scale: %v, node weight: %v", BimodalEstimatorName,
3✔
145
                p.BimodalDecayTime, p.BimodalScaleMsat, p.BimodalNodeWeight)
3✔
146
}
3✔
147

148
// PairProbability estimates the probability of successfully traversing to
149
// toNode based on historical payment outcomes for the from node. Those outcomes
150
// are passed in via the results parameter.
151
func (p *BimodalEstimator) PairProbability(now time.Time,
152
        results NodeResults, toNode route.Vertex, amt lnwire.MilliSatoshi,
153
        capacity btcutil.Amount) float64 {
×
154

×
155
        // We first compute the probability for the desired hop taking into
×
156
        // account previous knowledge.
×
157
        directProbability := p.directProbability(
×
158
                now, results, toNode, amt, lnwire.NewMSatFromSatoshis(capacity),
×
159
        )
×
160

×
161
        // The final probability is computed by taking into account other
×
162
        // channels of the from node.
×
163
        return p.calculateProbability(directProbability, now, results, toNode)
×
164
}
×
165

166
// LocalPairProbability computes the probability to reach toNode given a set of
167
// previous learnings.
168
func (p *BimodalEstimator) LocalPairProbability(now time.Time,
UNCOV
169
        results NodeResults, toNode route.Vertex) float64 {
×
UNCOV
170

×
UNCOV
171
        // For direct local probabilities we assume to know exactly how much we
×
UNCOV
172
        // can send over a channel, which assumes that channels are active and
×
UNCOV
173
        // have enough liquidity.
×
UNCOV
174
        directProbability := 1.0
×
UNCOV
175

×
UNCOV
176
        // If we had an unexpected failure for this node, we reduce the
×
UNCOV
177
        // probability for some time to avoid infinite retries.
×
UNCOV
178
        result, ok := results[toNode]
×
UNCOV
179
        if ok && !result.FailTime.IsZero() {
×
UNCOV
180
                timeAgo := now.Sub(result.FailTime)
×
UNCOV
181

×
UNCOV
182
                // We only expect results in the past to get a probability
×
UNCOV
183
                // between 0 and 1.
×
UNCOV
184
                if timeAgo < 0 {
×
185
                        timeAgo = 0
×
186
                }
×
UNCOV
187
                exponent := -float64(timeAgo) / float64(p.BimodalDecayTime)
×
UNCOV
188
                directProbability -= math.Exp(exponent)
×
189
        }
190

UNCOV
191
        return directProbability
×
192
}
193

194
// directProbability computes the probability to reach a node based on the
195
// liquidity distribution in the LN.
196
func (p *BimodalEstimator) directProbability(now time.Time,
197
        results NodeResults, toNode route.Vertex, amt lnwire.MilliSatoshi,
198
        capacity lnwire.MilliSatoshi) float64 {
×
199

×
200
        // We first determine the time-adjusted success and failure amounts to
×
201
        // then compute a probability. We know that we can send a zero amount.
×
202
        successAmount := lnwire.MilliSatoshi(0)
×
203

×
204
        // We know that we cannot send the full capacity.
×
205
        failAmount := capacity
×
206

×
207
        // If we have information about past successes or failures, we modify
×
208
        // them with a time decay.
×
209
        result, ok := results[toNode]
×
210
        if ok {
×
211
                // Apply a time decay for the amount we cannot send.
×
212
                if !result.FailTime.IsZero() {
×
213
                        failAmount = cannotSend(
×
214
                                result.FailAmt, capacity, now, result.FailTime,
×
215
                                p.BimodalDecayTime,
×
216
                        )
×
217
                }
×
218

219
                // Apply a time decay for the amount we can send.
220
                if !result.SuccessTime.IsZero() {
×
221
                        successAmount = canSend(
×
222
                                result.SuccessAmt, now, result.SuccessTime,
×
223
                                p.BimodalDecayTime,
×
224
                        )
×
225
                }
×
226
        }
227

228
        // Compute the direct channel probability.
229
        probability, err := p.probabilityFormula(
×
230
                capacity, successAmount, failAmount, amt,
×
231
        )
×
232
        if err != nil {
×
233
                log.Errorf("error computing probability to node: %v "+
×
234
                        "(node: %v, results: %v, amt: %v, capacity: %v)",
×
235
                        err, toNode, results, amt, capacity)
×
236

×
237
                return 0.0
×
238
        }
×
239

240
        return probability
×
241
}
242

243
// calculateProbability computes the total hop probability combining the channel
244
// probability and historic forwarding data of other channels of the node we try
245
// to send from.
246
//
247
// Goals:
248
// * We want to incentivize good routing nodes: the more routable channels a
249
// node has, the more we want to incentivize (vice versa for failures).
250
// -> We reduce/increase the direct probability depending on past
251
// failures/successes for other channels of the node.
252
//
253
// * We want to be forgiving/give other nodes a chance as well: we want to
254
// forget about (non-)routable channels over time.
255
// -> We weight the successes/failures with a time decay such that they will not
256
// influence the total probability if a long time went by.
257
//
258
// * If we don't have other info, we want to solely rely on the direct
259
// probability.
260
//
261
// * We want to be able to specify how important the other channels are compared
262
// to the direct channel.
263
// -> Introduce a node weight factor that weights the direct probability against
264
// the node-wide average. The larger the node weight, the more important other
265
// channels of the node are.
266
//
267
// How do failures on low fee nodes redirect routing to higher fee nodes?
268
// Assumptions:
269
// * attemptCostPPM of 1000 PPM
270
// * constant direct channel probability of P0 (usually 0.5 for large amounts)
271
// * node weight w of 0.2
272
//
273
// The question we want to answer is:
274
// How often would a zero-fee node be tried (even if there were failures for its
275
// other channels) over trying a high-fee node with 2000 PPM and no direct
276
// knowledge about the channel to send over?
277
//
278
// The probability of a route of length l is P(l) = l * P0.
279
//
280
// The total probability after n failures (with the implemented method here) is:
281
// P(l, n) = P(l-1) * P(n)
282
// = P(l-1) * (P0 + n*0) / (1 + n*w)
283
// = P(l) / (1 + n*w)
284
//
285
// Condition for a high-fee channel to overcome a low fee channel in the
286
// Dijkstra weight function (only looking at fee and probability PPM terms):
287
// highFeePPM + attemptCostPPM * 1/P(l) = 0PPM + attemptCostPPM * 1/P(l, n)
288
// highFeePPM/attemptCostPPM = 1/P(l, n) - 1/P(l) =
289
// = (1 + n*w)/P(l) - 1/P(l) =
290
// = n*w/P(l)
291
//
292
// Therefore:
293
// n = (highFeePPM/attemptCostPPM) * (P(l)/w) =
294
// = (2000/1000) * 0.5 * l / w = l/w
295
//
296
// For a one-hop route we get:
297
// n = 1/0.2 = 5 tolerated failures
298
//
299
// For a three-hop route we get:
300
// n = 3/0.2 = 15 tolerated failures
301
//
302
// For more details on the behavior see tests.
303
func (p *BimodalEstimator) calculateProbability(directProbability float64,
UNCOV
304
        now time.Time, results NodeResults, toNode route.Vertex) float64 {
×
UNCOV
305

×
UNCOV
306
        // If we don't take other channels into account, we can return early.
×
UNCOV
307
        if p.BimodalNodeWeight == 0.0 {
×
308
                return directProbability
×
309
        }
×
310

311
        // If we have up-to-date information about the channel we want to use,
312
        // i.e. the info stems from results not longer ago than the decay time,
313
        // we will only use the direct probability. This is needed in order to
314
        // avoid that other previous results (on all other channels of the same
315
        // routing node) will distort and pin the calculated probability even if
316
        // we have accurate direct information. This helps to dip the
317
        // probability below the min probability in case of failures, to start
318
        // the splitting process.
UNCOV
319
        directResult, ok := results[toNode]
×
UNCOV
320
        if ok {
×
UNCOV
321
                latest := directResult.SuccessTime
×
UNCOV
322
                if directResult.FailTime.After(latest) {
×
323
                        latest = directResult.FailTime
×
324
                }
×
325

326
                // We use BimonodalDecayTime to judge the currentness of the
327
                // data. It is the time scale on which we assume to have lost
328
                // information.
UNCOV
329
                if now.Sub(latest) < p.BimodalDecayTime {
×
UNCOV
330
                        log.Tracef("Using direct probability for node %v: %v",
×
UNCOV
331
                                toNode, directResult)
×
UNCOV
332

×
UNCOV
333
                        return directProbability
×
UNCOV
334
                }
×
335
        }
336

337
        // w is a parameter which determines how strongly the other channels of
338
        // a node should be incorporated, the higher the stronger.
UNCOV
339
        w := p.BimodalNodeWeight
×
UNCOV
340

×
UNCOV
341
        // dt determines the timeliness of the previous successes/failures
×
UNCOV
342
        // to be taken into account.
×
UNCOV
343
        dt := float64(p.BimodalDecayTime)
×
UNCOV
344

×
UNCOV
345
        // The direct channel probability is weighted fully, all other results
×
UNCOV
346
        // are weighted according to how recent the information is.
×
UNCOV
347
        totalProbabilities := directProbability
×
UNCOV
348
        totalWeights := 1.0
×
UNCOV
349

×
UNCOV
350
        for peer, result := range results {
×
UNCOV
351
                // We don't include the direct hop probability here because it
×
UNCOV
352
                // is already included in totalProbabilities.
×
UNCOV
353
                if peer == toNode {
×
UNCOV
354
                        continue
×
355
                }
356

357
                // We add probabilities weighted by how recent the info is.
UNCOV
358
                var weight float64
×
UNCOV
359
                if result.SuccessAmt > 0 {
×
UNCOV
360
                        exponent := -float64(now.Sub(result.SuccessTime)) / dt
×
UNCOV
361
                        weight = math.Exp(exponent)
×
UNCOV
362
                        totalProbabilities += w * weight
×
UNCOV
363
                        totalWeights += w * weight
×
UNCOV
364
                }
×
UNCOV
365
                if result.FailAmt > 0 {
×
UNCOV
366
                        exponent := -float64(now.Sub(result.FailTime)) / dt
×
UNCOV
367
                        weight = math.Exp(exponent)
×
UNCOV
368

×
UNCOV
369
                        // Failures don't add to total success probability.
×
UNCOV
370
                        totalWeights += w * weight
×
UNCOV
371
                }
×
372
        }
373

UNCOV
374
        return totalProbabilities / totalWeights
×
375
}
376

377
// canSend returns the sendable amount over the channel, respecting time decay.
378
// canSend approaches zero, if we wait for a much longer time than the decay
379
// time.
380
func canSend(successAmount lnwire.MilliSatoshi, now, successTime time.Time,
UNCOV
381
        decayConstant time.Duration) lnwire.MilliSatoshi {
×
UNCOV
382

×
UNCOV
383
        // The factor approaches 0 for successTime a long time in the past,
×
UNCOV
384
        // is 1 when the successTime is now.
×
UNCOV
385
        factor := math.Exp(
×
UNCOV
386
                -float64(now.Sub(successTime)) / float64(decayConstant),
×
UNCOV
387
        )
×
UNCOV
388

×
UNCOV
389
        canSend := factor * float64(successAmount)
×
UNCOV
390

×
UNCOV
391
        return lnwire.MilliSatoshi(canSend)
×
UNCOV
392
}
×
393

394
// cannotSend returns the not sendable amount over the channel, respecting time
395
// decay. cannotSend approaches the capacity, if we wait for a much longer time
396
// than the decay time.
397
func cannotSend(failAmount, capacity lnwire.MilliSatoshi, now,
UNCOV
398
        failTime time.Time, decayConstant time.Duration) lnwire.MilliSatoshi {
×
UNCOV
399

×
UNCOV
400
        if failAmount > capacity {
×
401
                failAmount = capacity
×
402
        }
×
403

404
        // The factor approaches 0 for failTime a long time in the past and it
405
        // is 1 when the failTime is now.
UNCOV
406
        factor := math.Exp(
×
UNCOV
407
                -float64(now.Sub(failTime)) / float64(decayConstant),
×
UNCOV
408
        )
×
UNCOV
409

×
UNCOV
410
        cannotSend := capacity - lnwire.MilliSatoshi(
×
UNCOV
411
                factor*float64(capacity-failAmount),
×
UNCOV
412
        )
×
UNCOV
413

×
UNCOV
414
        return cannotSend
×
415
}
416

417
// primitive computes the indefinite integral of our assumed (normalized)
418
// liquidity probability distribution. The distribution of liquidity x here is
419
// the function P(x) ~ exp(-x/s) + exp((x-c)/s) + 1/c, i.e., two exponentials
420
// residing at the ends of channels. This means that we expect liquidity to be
421
// at either side of the channel with capacity c. The s parameter (scale)
422
// defines how far the liquidity leaks into the channel. A very low scale
423
// assumes completely unbalanced channels, a very high scale assumes a random
424
// distribution. More details can be found in
425
// https://github.com/lightningnetwork/lnd/issues/5988#issuecomment-1131234858.
426
// Additionally, we add a constant term 1/c to the distribution to avoid
427
// normalization issues and to fall back to a uniform distribution should the
428
// previous success and fail amounts contradict a bimodal distribution.
UNCOV
429
func (p *BimodalEstimator) primitive(c, x float64) float64 {
×
UNCOV
430
        s := float64(p.BimodalScaleMsat)
×
UNCOV
431

×
UNCOV
432
        // The indefinite integral of P(x) is given by
×
UNCOV
433
        // Int P(x) dx = H(x) = s * (-e(-x/s) + e((x-c)/s) + x/(c*s)),
×
UNCOV
434
        // and its norm from 0 to c can be computed from it,
×
UNCOV
435
        // norm = [H(x)]_0^c = s * (-e(-c/s) + 1 + 1/s -(-1 + e(-c/s))) =
×
UNCOV
436
        // = s * (-2*e(-c/s) + 2 + 1/s).
×
UNCOV
437
        // The prefactors s are left out, as they cancel out in the end.
×
UNCOV
438
        // norm can only become zero, if c is zero, which we sorted out before
×
UNCOV
439
        // calling this method.
×
UNCOV
440
        ecs := math.Exp(-c / s)
×
UNCOV
441
        norm := -2*ecs + 2 + 1/s
×
UNCOV
442

×
UNCOV
443
        // It would be possible to split the next term and reuse the factors
×
UNCOV
444
        // from before, but this can lead to numerical issues with large
×
UNCOV
445
        // numbers.
×
UNCOV
446
        excs := math.Exp((x - c) / s)
×
UNCOV
447
        exs := math.Exp(-x / s)
×
UNCOV
448

×
UNCOV
449
        // We end up with the primitive function of the normalized P(x).
×
UNCOV
450
        return (-exs + excs + x/(c*s)) / norm
×
UNCOV
451
}
×
452

453
// integral computes the integral of our liquidity distribution from the lower
454
// to the upper value.
UNCOV
455
func (p *BimodalEstimator) integral(capacity, lower, upper float64) float64 {
×
UNCOV
456
        if lower < 0 || lower > upper {
×
457
                log.Errorf("probability integral limits nonsensical: capacity:"+
×
458
                        "%v lower: %v upper: %v", capacity, lower, upper)
×
459

×
460
                return 0.0
×
461
        }
×
462

UNCOV
463
        return p.primitive(capacity, upper) - p.primitive(capacity, lower)
×
464
}
465

466
// probabilityFormula computes the expected probability for a payment of
467
// amountMsat given prior learnings for a channel of certain capacity.
468
// successAmountMsat and failAmountMsat stand for the unsettled success and
469
// failure amounts, respectively. The formula is derived using the formalism
470
// presented in Pickhardt et al., https://arxiv.org/abs/2103.08576.
471
func (p *BimodalEstimator) probabilityFormula(capacityMsat, successAmountMsat,
UNCOV
472
        failAmountMsat, amountMsat lnwire.MilliSatoshi) (float64, error) {
×
UNCOV
473

×
UNCOV
474
        // Convert to positive-valued floats.
×
UNCOV
475
        capacity := float64(capacityMsat)
×
UNCOV
476
        successAmount := float64(successAmountMsat)
×
UNCOV
477
        failAmount := float64(failAmountMsat)
×
UNCOV
478
        amount := float64(amountMsat)
×
UNCOV
479

×
UNCOV
480
        // In order for this formula to give reasonable results, we need to have
×
UNCOV
481
        // an estimate of the capacity of a channel (or edge between nodes).
×
UNCOV
482
        if capacity == 0.0 {
×
UNCOV
483
                return 0, ErrZeroCapacity
×
UNCOV
484
        }
×
485

486
        // We cannot send more than the capacity.
UNCOV
487
        if amount > capacity {
×
UNCOV
488
                return 0.0, nil
×
UNCOV
489
        }
×
490

491
        // The next statement is a safety check against an illogical condition.
492
        // We discard the knowledge for the channel in that case since we have
493
        // inconsistent data.
UNCOV
494
        if failAmount <= successAmount {
×
UNCOV
495
                log.Warnf("Fail amount (%s) is smaller than or equal to the "+
×
UNCOV
496
                        "success amount (%s) for capacity (%s)",
×
UNCOV
497
                        failAmountMsat, successAmountMsat, capacityMsat)
×
UNCOV
498

×
UNCOV
499
                successAmount = 0
×
UNCOV
500
                failAmount = capacity
×
UNCOV
501
        }
×
502

503
        // Mission control may have some outdated values with regard to the
504
        // current channel capacity between a node pair. This can happen in case
505
        // a large parallel channel was closed or if a channel was downscaled
506
        // and can lead to success and/or failure amounts to be out of the range
507
        // [0, capacity]. We assume that the liquidity situation of the channel
508
        // is similar as before due to flow bias.
509

510
        // In case we have a large success we need to correct it to be in the
511
        // valid range. We set the success amount close to the capacity, because
512
        // we assume to still be able to send. Any possible failure (that must
513
        // in this case be larger than the capacity) is corrected as well.
UNCOV
514
        if successAmount >= capacity {
×
UNCOV
515
                log.Debugf("Correcting success amount %s and failure amount "+
×
UNCOV
516
                        "%s to capacity %s", successAmountMsat,
×
UNCOV
517
                        failAmount, capacityMsat)
×
UNCOV
518

×
UNCOV
519
                // We choose the success amount to be one less than the
×
UNCOV
520
                // capacity, to both fit success and failure amounts into the
×
UNCOV
521
                // capacity range in a consistent manner.
×
UNCOV
522
                successAmount = capacity - 1
×
UNCOV
523
                failAmount = capacity
×
UNCOV
524
        }
×
525

526
        // Having no or only a small success, but a large failure only needs
527
        // adjustment of the failure amount.
UNCOV
528
        if failAmount > capacity {
×
UNCOV
529
                log.Debugf("Correcting failure amount %s to capacity %s",
×
UNCOV
530
                        failAmountMsat, capacityMsat)
×
UNCOV
531

×
UNCOV
532
                failAmount = capacity
×
UNCOV
533
        }
×
534

535
        // We cannot send more than the fail amount.
UNCOV
536
        if amount >= failAmount {
×
UNCOV
537
                return 0.0, nil
×
UNCOV
538
        }
×
539

540
        // We can send the amount if it is smaller than the success amount.
UNCOV
541
        if amount <= successAmount {
×
UNCOV
542
                return 1.0, nil
×
UNCOV
543
        }
×
544

545
        // The success probability for payment amount a is the integral over the
546
        // prior distribution P(x), the probability to find liquidity between
547
        // the amount a and channel capacity c (or failAmount a_f):
548
        // P(X >= a | X < a_f) = Integral_{a}^{a_f} P(x) dx
UNCOV
549
        prob := p.integral(capacity, amount, failAmount)
×
UNCOV
550
        if math.IsNaN(prob) {
×
551
                return 0.0, fmt.Errorf("non-normalized probability is NaN, "+
×
552
                        "capacity: %v, amount: %v, fail amount: %v",
×
553
                        capacity, amount, failAmount)
×
554
        }
×
555

556
        // If we have payment information, we need to adjust the prior
557
        // distribution P(x) and get the posterior distribution by renormalizing
558
        // the prior distribution in such a way that the probability mass lies
559
        // between a_s and a_f.
UNCOV
560
        reNorm := p.integral(capacity, successAmount, failAmount)
×
UNCOV
561
        if math.IsNaN(reNorm) {
×
562
                return 0.0, fmt.Errorf("normalization factor is NaN, "+
×
563
                        "capacity: %v, success amount: %v, fail amount: %v",
×
564
                        capacity, successAmount, failAmount)
×
565
        }
×
566

567
        // The normalization factor can only be zero if the success amount is
568
        // equal or larger than the fail amount. This should not happen as we
569
        // have checked this scenario above.
UNCOV
570
        if reNorm == 0.0 {
×
571
                return 0.0, fmt.Errorf("normalization factor is zero, "+
×
572
                        "capacity: %v, success amount: %v, fail amount: %v",
×
573
                        capacity, successAmount, failAmount)
×
574
        }
×
575

UNCOV
576
        prob /= reNorm
×
UNCOV
577

×
UNCOV
578
        // Note that for payment amounts smaller than successAmount, we can get
×
UNCOV
579
        // a value larger than unity, which we cap here to get a proper
×
UNCOV
580
        // probability.
×
UNCOV
581
        if prob > 1.0 {
×
582
                if amount > successAmount {
×
583
                        return 0.0, fmt.Errorf("unexpected large probability "+
×
584
                                "(%v) capacity: %v, amount: %v, success "+
×
585
                                "amount: %v, fail amount: %v", prob, capacity,
×
586
                                amount, successAmount, failAmount)
×
587
                }
×
588

589
                return 1.0, nil
×
UNCOV
590
        } else if prob < 0.0 {
×
591
                return 0.0, fmt.Errorf("negative probability "+
×
592
                        "(%v) capacity: %v, amount: %v, success "+
×
593
                        "amount: %v, fail amount: %v", prob, capacity,
×
594
                        amount, successAmount, failAmount)
×
595
        }
×
596

UNCOV
597
        return prob, nil
×
598
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc