• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 13536249039

26 Feb 2025 03:42AM UTC coverage: 57.462% (-1.4%) from 58.835%
13536249039

Pull #8453

github

Roasbeef
peer: update chooseDeliveryScript to gen script if needed

In this commit, we update `chooseDeliveryScript` to generate a new
script if needed. This allows us to fold in a few other lines that
always followed this function into this expanded function.

The tests have been updated accordingly.
Pull Request #8453: [4/4] - multi: integrate new rbf coop close FSM into the existing peer flow

275 of 1318 new or added lines in 22 files covered. (20.86%)

19521 existing lines in 257 files now uncovered.

103858 of 180741 relevant lines covered (57.46%)

24750.23 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

81.21
/routing/probability_apriori.go
1
package routing
2

3
import (
4
        "errors"
5
        "fmt"
6
        "math"
7
        "time"
8

9
        "github.com/btcsuite/btcd/btcutil"
10
        "github.com/lightningnetwork/lnd/lnwire"
11
        "github.com/lightningnetwork/lnd/routing/route"
12
)
13

14
const (
15
        // CapacityFraction and capacitySmearingFraction define how
16
        // capacity-related probability reweighting works. CapacityFraction
17
        // defines the fraction of the channel capacity at which the effect
18
        // roughly sets in and capacitySmearingFraction defines over which range
19
        // the factor changes from 1 to minCapacityFactor.
20

21
        // DefaultCapacityFraction is the default value for CapacityFraction.
22
        // It is chosen such that the capacity factor is active but with a small
23
        // effect. This value together with capacitySmearingFraction leads to a
24
        // noticeable reduction in probability if the amount starts to come
25
        // close to 90% of a channel's capacity.
26
        DefaultCapacityFraction = 0.9999
27

28
        // capacitySmearingFraction defines how quickly the capacity factor
29
        // drops from 1 to minCapacityFactor. This value results in about a
30
        // variation over 20% of the capacity.
31
        capacitySmearingFraction = 0.025
32

33
        // minCapacityFactor is the minimal value the capacityFactor can take.
34
        // Having a too low value can lead to discarding of paths due to the
35
        // enforced minimal probability or to too high pathfinding weights.
36
        minCapacityFactor = 0.5
37

38
        // minCapacityFraction is the minimum allowed value for
39
        // CapacityFraction. The success probability in the random balance model
40
        // (which may not be an accurate description of the liquidity
41
        // distribution in the network) can be approximated with P(a) = 1 - a/c,
42
        // for amount a and capacity c. If we require a probability P(a) = 0.25,
43
        // this translates into a value of 0.75 for a/c. We limit this value in
44
        // order to not discard too many channels.
45
        minCapacityFraction = 0.75
46

47
        // AprioriEstimatorName is used to identify the apriori probability
48
        // estimator.
49
        AprioriEstimatorName = "apriori"
50
)
51

52
var (
53
        // ErrInvalidHalflife is returned when we get an invalid half life.
54
        ErrInvalidHalflife = errors.New("penalty half life must be >= 0")
55

56
        // ErrInvalidHopProbability is returned when we get an invalid hop
57
        // probability.
58
        ErrInvalidHopProbability = errors.New("hop probability must be in " +
59
                "[0, 1]")
60

61
        // ErrInvalidAprioriWeight is returned when we get an apriori weight
62
        // that is out of range.
63
        ErrInvalidAprioriWeight = errors.New("apriori weight must be in [0, 1]")
64

65
        // ErrInvalidCapacityFraction is returned when we get a capacity
66
        // fraction that is out of range.
67
        ErrInvalidCapacityFraction = fmt.Errorf("capacity fraction must be in "+
68
                "[%v, 1]", minCapacityFraction)
69
)
70

71
// AprioriConfig contains configuration for our probability estimator.
72
type AprioriConfig struct {
73
        // PenaltyHalfLife defines after how much time a penalized node or
74
        // channel is back at 50% probability.
75
        PenaltyHalfLife time.Duration
76

77
        // AprioriHopProbability is the assumed success probability of a hop in
78
        // a route when no other information is available.
79
        AprioriHopProbability float64
80

81
        // AprioriWeight is a value in the range [0, 1] that defines to what
82
        // extent historical results should be extrapolated to untried
83
        // connections. Setting it to one will completely ignore historical
84
        // results and always assume the configured a priori probability for
85
        // untried connections. A value of zero will ignore the a priori
86
        // probability completely and only base the probability on historical
87
        // results, unless there are none available.
88
        AprioriWeight float64
89

90
        // CapacityFraction is the fraction of a channel's capacity that we
91
        // consider to have liquidity. For amounts that come close to or exceed
92
        // the fraction, an additional penalty is applied. A value of 1.0
93
        // disables the capacityFactor.
94
        CapacityFraction float64
95
}
96

97
// validate checks the configuration of the estimator for allowed values.
98
func (p AprioriConfig) validate() error {
30✔
99
        if p.PenaltyHalfLife < 0 {
30✔
100
                return ErrInvalidHalflife
×
101
        }
×
102

103
        if p.AprioriHopProbability < 0 || p.AprioriHopProbability > 1 {
30✔
104
                return ErrInvalidHopProbability
×
105
        }
×
106

107
        if p.AprioriWeight < 0 || p.AprioriWeight > 1 {
30✔
108
                return ErrInvalidAprioriWeight
×
109
        }
×
110

111
        if p.CapacityFraction < minCapacityFraction || p.CapacityFraction > 1 {
30✔
112
                return ErrInvalidCapacityFraction
×
113
        }
×
114

115
        return nil
30✔
116
}
117

118
// DefaultAprioriConfig returns the default configuration for the estimator.
119
func DefaultAprioriConfig() AprioriConfig {
×
120
        return AprioriConfig{
×
121
                PenaltyHalfLife:       DefaultPenaltyHalfLife,
×
122
                AprioriHopProbability: DefaultAprioriHopProbability,
×
123
                AprioriWeight:         DefaultAprioriWeight,
×
124
                CapacityFraction:      DefaultCapacityFraction,
×
125
        }
×
126
}
×
127

128
// AprioriEstimator returns node and pair probabilities based on historical
129
// payment results. It uses a preconfigured success probability value for
130
// untried hops (AprioriHopProbability) and returns a high success probability
131
// for hops that could previously conduct a payment (prevSuccessProbability).
132
// Successful edges are retried until proven otherwise. Recently failed hops are
133
// penalized by an exponential time decay (PenaltyHalfLife), after which they
134
// are reconsidered for routing. If information was learned about a forwarding
135
// node, the information is taken into account to estimate a per node
136
// probability that mixes with the a priori probability (AprioriWeight).
137
type AprioriEstimator struct {
138
        // AprioriConfig contains configuration options for our estimator.
139
        AprioriConfig
140

141
        // prevSuccessProbability is the assumed probability for node pairs that
142
        // successfully relayed the previous attempt.
143
        prevSuccessProbability float64
144
}
145

146
// NewAprioriEstimator creates a new AprioriEstimator.
147
func NewAprioriEstimator(cfg AprioriConfig) (*AprioriEstimator, error) {
30✔
148
        if err := cfg.validate(); err != nil {
30✔
149
                return nil, err
×
150
        }
×
151

152
        return &AprioriEstimator{
30✔
153
                AprioriConfig:          cfg,
30✔
154
                prevSuccessProbability: prevSuccessProbability,
30✔
155
        }, nil
30✔
156
}
157

158
// Compile-time checks that interfaces are implemented.
159
var _ Estimator = (*AprioriEstimator)(nil)
160
var _ estimatorConfig = (*AprioriConfig)(nil)
161

162
// Config returns the estimator's configuration.
UNCOV
163
func (p *AprioriEstimator) Config() estimatorConfig {
×
UNCOV
164
        return p.AprioriConfig
×
UNCOV
165
}
×
166

167
// String returns the estimator's configuration as a string representation.
UNCOV
168
func (p *AprioriEstimator) String() string {
×
UNCOV
169
        return fmt.Sprintf("estimator type: %v, penalty halflife time: %v, "+
×
UNCOV
170
                "apriori hop probability: %v, apriori weight: %v, previous "+
×
UNCOV
171
                "success probability: %v, capacity fraction: %v",
×
UNCOV
172
                AprioriEstimatorName, p.PenaltyHalfLife,
×
UNCOV
173
                p.AprioriHopProbability, p.AprioriWeight,
×
UNCOV
174
                p.prevSuccessProbability, p.CapacityFraction)
×
UNCOV
175
}
×
176

177
// getNodeProbability calculates the probability for connections from a node
178
// that have not been tried before. The results parameter is a list of last
179
// payment results for that node.
180
func (p *AprioriEstimator) getNodeProbability(now time.Time,
181
        results NodeResults, amt lnwire.MilliSatoshi,
182
        capacity btcutil.Amount) float64 {
634✔
183

634✔
184
        // We reduce the apriori hop probability if the amount comes close to
634✔
185
        // the capacity.
634✔
186
        apriori := p.AprioriHopProbability * capacityFactor(
634✔
187
                amt, capacity, p.CapacityFraction,
634✔
188
        )
634✔
189

634✔
190
        // If the channel history is not to be taken into account, we can return
634✔
191
        // early here with the configured a priori probability.
634✔
192
        if p.AprioriWeight == 1 {
965✔
193
                return apriori
331✔
194
        }
331✔
195

196
        // If there is no channel history, our best estimate is still the a
197
        // priori probability.
198
        if len(results) == 0 {
490✔
199
                return apriori
187✔
200
        }
187✔
201

202
        // The value of the apriori weight is in the range [0, 1]. Convert it to
203
        // a factor that properly expresses the intention of the weight in the
204
        // following weight average calculation. When the apriori weight is 0,
205
        // the apriori factor is also 0. This means it won't have any effect on
206
        // the weighted average calculation below. When the apriori weight
207
        // approaches 1, the apriori factor goes to infinity. It will heavily
208
        // outweigh any observations that have been collected.
209
        aprioriFactor := 1/(1-p.AprioriWeight) - 1
116✔
210

116✔
211
        // Calculate a weighted average consisting of the apriori probability
116✔
212
        // and historical observations. This is the part that incentivizes nodes
116✔
213
        // to make sure that all (not just some) of their channels are in good
116✔
214
        // shape. Senders will steer around nodes that have shown a few
116✔
215
        // failures, even though there may be many channels still untried.
116✔
216
        //
116✔
217
        // If there is just a single observation and the apriori weight is 0,
116✔
218
        // this single observation will totally determine the node probability.
116✔
219
        // The node probability is returned for all other channels of the node.
116✔
220
        // This means that one failure will lead to the success probability
116✔
221
        // estimates for all other channels being 0 too. The probability for the
116✔
222
        // channel that was tried will not even recover, because it is
116✔
223
        // recovering to the node probability (which is zero). So one failure
116✔
224
        // effectively prunes all channels of the node forever. This is the most
116✔
225
        // aggressive way in which we can penalize nodes and unlikely to yield
116✔
226
        // good results in a real network.
116✔
227
        probabilitiesTotal := apriori * aprioriFactor
116✔
228
        totalWeight := aprioriFactor
116✔
229

116✔
230
        for _, result := range results {
309✔
231
                switch {
193✔
232
                // Weigh success with a constant high weight of 1. There is no
233
                // decay. Amt is never zero, so this clause is never executed
234
                // when result.SuccessAmt is zero.
235
                case amt <= result.SuccessAmt:
21✔
236
                        totalWeight++
21✔
237
                        probabilitiesTotal += p.prevSuccessProbability
21✔
238

239
                // Weigh failures in accordance with their age. The base
240
                // probability of a failure is considered zero, so nothing needs
241
                // to be added to probabilitiesTotal.
242
                case !result.FailTime.IsZero() && amt >= result.FailAmt:
163✔
243
                        age := now.Sub(result.FailTime)
163✔
244
                        totalWeight += p.getWeight(age)
163✔
245
                }
246
        }
247

248
        return probabilitiesTotal / totalWeight
116✔
249
}
250

251
// getWeight calculates a weight in the range [0, 1] that should be assigned to
252
// a payment result. Weight follows an exponential curve that starts at 1 when
253
// the result is fresh and asymptotically approaches zero over time. The rate at
254
// which this happens is controlled by the penaltyHalfLife parameter.
255
func (p *AprioriEstimator) getWeight(age time.Duration) float64 {
271✔
256
        exp := -age.Hours() / p.PenaltyHalfLife.Hours()
271✔
257
        return math.Pow(2, exp)
271✔
258
}
271✔
259

260
// capacityFactor is a multiplier that can be used to reduce the probability
261
// depending on how much of the capacity is sent. In other words, the factor
262
// sorts out channels that don't provide enough liquidity. Effectively, this
263
// leads to usage of larger channels in total to increase success probability,
264
// but it may also increase fees. The limits are 1 for amt == 0 and
265
// minCapacityFactor for amt >> capacityCutoffFraction. The function drops
266
// significantly when amt reaches cutoffMsat. smearingMsat determines over which
267
// scale the reduction takes place.
268
func capacityFactor(amt lnwire.MilliSatoshi, capacity btcutil.Amount,
269
        capacityCutoffFraction float64) float64 {
645✔
270

645✔
271
        // The special value of 1.0 for capacityFactor disables any effect from
645✔
272
        // this factor.
645✔
273
        if capacityCutoffFraction == 1 {
646✔
274
                return 1.0
1✔
275
        }
1✔
276

277
        // If we don't have information about the capacity, which can be the
278
        // case for hop hints or local channels, we return unity to not alter
279
        // anything.
280
        if capacity == 0 {
644✔
281
                return 1.0
×
282
        }
×
283

284
        capMsat := float64(lnwire.NewMSatFromSatoshis(capacity))
644✔
285
        amtMsat := float64(amt)
644✔
286

644✔
287
        if amtMsat > capMsat {
645✔
288
                return 0
1✔
289
        }
1✔
290

291
        cutoffMsat := capacityCutoffFraction * capMsat
643✔
292
        smearingMsat := capacitySmearingFraction * capMsat
643✔
293

643✔
294
        // We compute a logistic function mirrored around the y axis, centered
643✔
295
        // at cutoffMsat, decaying over the smearingMsat scale.
643✔
296
        denominator := 1 + math.Exp(-(amtMsat-cutoffMsat)/smearingMsat)
643✔
297

643✔
298
        // The numerator decides what the minimal value of this function will
643✔
299
        // be. The minimal value is set by minCapacityFactor.
643✔
300
        numerator := 1 - minCapacityFactor
643✔
301

643✔
302
        return 1 - numerator/denominator
643✔
303
}
304

305
// PairProbability estimates the probability of successfully traversing to
306
// toNode based on historical payment outcomes for the from node. Those outcomes
307
// are passed in via the results parameter.
308
func (p *AprioriEstimator) PairProbability(now time.Time,
309
        results NodeResults, toNode route.Vertex, amt lnwire.MilliSatoshi,
310
        capacity btcutil.Amount) float64 {
634✔
311

634✔
312
        nodeProbability := p.getNodeProbability(now, results, amt, capacity)
634✔
313

634✔
314
        return p.calculateProbability(
634✔
315
                now, results, nodeProbability, toNode, amt,
634✔
316
        )
634✔
317
}
634✔
318

319
// LocalPairProbability estimates the probability of successfully traversing
320
// our own local channels to toNode.
321
func (p *AprioriEstimator) LocalPairProbability(
322
        now time.Time, results NodeResults, toNode route.Vertex) float64 {
39✔
323

39✔
324
        // For local channels that have never been tried before, we assume them
39✔
325
        // to be successful. We have accurate balance and online status
39✔
326
        // information on our own channels, so when we select them in a route it
39✔
327
        // is close to certain that those channels will work.
39✔
328
        nodeProbability := p.prevSuccessProbability
39✔
329

39✔
330
        return p.calculateProbability(
39✔
331
                now, results, nodeProbability, toNode, lnwire.MaxMilliSatoshi,
39✔
332
        )
39✔
333
}
39✔
334

335
// calculateProbability estimates the probability of successfully traversing to
336
// toNode based on historical payment outcomes and a fall-back node probability.
337
func (p *AprioriEstimator) calculateProbability(
338
        now time.Time, results NodeResults,
339
        nodeProbability float64, toNode route.Vertex,
340
        amt lnwire.MilliSatoshi) float64 {
673✔
341

673✔
342
        // Retrieve the last pair outcome.
673✔
343
        lastPairResult, ok := results[toNode]
673✔
344

673✔
345
        // If there is no history for this pair, return the node probability
673✔
346
        // that is a probability estimate for untried channel.
673✔
347
        if !ok {
1,191✔
348
                return nodeProbability
518✔
349
        }
518✔
350

351
        // For successes, we have a fixed (high) probability. Those pairs will
352
        // be assumed good until proven otherwise. Amt is never zero, so this
353
        // clause is never executed when lastPairResult.SuccessAmt is zero.
354
        if amt <= lastPairResult.SuccessAmt {
172✔
355
                return p.prevSuccessProbability
17✔
356
        }
17✔
357

358
        // Take into account a minimum penalize amount. For balance errors, a
359
        // failure may be reported with such a minimum to prevent too aggressive
360
        // penalization. If the current amount is smaller than the amount that
361
        // previously triggered a failure, we act as if this is an untried
362
        // channel.
363
        if lastPairResult.FailTime.IsZero() || amt < lastPairResult.FailAmt {
168✔
364
                return nodeProbability
30✔
365
        }
30✔
366

367
        timeSinceLastFailure := now.Sub(lastPairResult.FailTime)
108✔
368

108✔
369
        // Calculate success probability based on the weight of the last
108✔
370
        // failure. When the failure is fresh, its weight is 1 and we'll return
108✔
371
        // probability 0. Over time the probability recovers to the node
108✔
372
        // probability. It would be as if this channel was never tried before.
108✔
373
        weight := p.getWeight(timeSinceLastFailure)
108✔
374
        probability := nodeProbability * (1 - weight)
108✔
375

108✔
376
        return probability
108✔
377
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc