• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 13211764208

08 Feb 2025 03:08AM UTC coverage: 49.288% (-9.5%) from 58.815%
13211764208

Pull #9489

github

calvinrzachman
itest: verify switchrpc server enforces send then track

We prevent the rpc server from allowing onion dispatches for
attempt IDs which have already been tracked by rpc clients.

This helps protect the client from leaking a duplicate onion
attempt. NOTE: This is not the only method for solving this
issue! The issue could be addressed via careful client side
programming which accounts for the uncertainty and async
nature of dispatching onions to a remote process via RPC.
This would require some lnd ChannelRouter changes for how
we intend to use these RPCs though.
Pull Request #9489: multi: add BuildOnion, SendOnion, and TrackOnion RPCs

474 of 990 new or added lines in 11 files covered. (47.88%)

27321 existing lines in 435 files now uncovered.

101192 of 205306 relevant lines covered (49.29%)

1.54 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

84.24
/routing/probability_apriori.go
1
package routing
2

3
import (
4
        "errors"
5
        "fmt"
6
        "math"
7
        "time"
8

9
        "github.com/btcsuite/btcd/btcutil"
10
        "github.com/lightningnetwork/lnd/lnwire"
11
        "github.com/lightningnetwork/lnd/routing/route"
12
)
13

14
const (
15
        // CapacityFraction and capacitySmearingFraction define how
16
        // capacity-related probability reweighting works. CapacityFraction
17
        // defines the fraction of the channel capacity at which the effect
18
        // roughly sets in and capacitySmearingFraction defines over which range
19
        // the factor changes from 1 to minCapacityFactor.
20

21
        // DefaultCapacityFraction is the default value for CapacityFraction.
22
        // It is chosen such that the capacity factor is active but with a small
23
        // effect. This value together with capacitySmearingFraction leads to a
24
        // noticeable reduction in probability if the amount starts to come
25
        // close to 90% of a channel's capacity.
26
        DefaultCapacityFraction = 0.9999
27

28
        // capacitySmearingFraction defines how quickly the capacity factor
29
        // drops from 1 to minCapacityFactor. This value results in about a
30
        // variation over 20% of the capacity.
31
        capacitySmearingFraction = 0.025
32

33
        // minCapacityFactor is the minimal value the capacityFactor can take.
34
        // Having a too low value can lead to discarding of paths due to the
35
        // enforced minimal probability or to too high pathfinding weights.
36
        minCapacityFactor = 0.5
37

38
        // minCapacityFraction is the minimum allowed value for
39
        // CapacityFraction. The success probability in the random balance model
40
        // (which may not be an accurate description of the liquidity
41
        // distribution in the network) can be approximated with P(a) = 1 - a/c,
42
        // for amount a and capacity c. If we require a probability P(a) = 0.25,
43
        // this translates into a value of 0.75 for a/c. We limit this value in
44
        // order to not discard too many channels.
45
        minCapacityFraction = 0.75
46

47
        // AprioriEstimatorName is used to identify the apriori probability
48
        // estimator.
49
        AprioriEstimatorName = "apriori"
50
)
51

52
var (
53
        // ErrInvalidHalflife is returned when we get an invalid half life.
54
        ErrInvalidHalflife = errors.New("penalty half life must be >= 0")
55

56
        // ErrInvalidHopProbability is returned when we get an invalid hop
57
        // probability.
58
        ErrInvalidHopProbability = errors.New("hop probability must be in " +
59
                "[0, 1]")
60

61
        // ErrInvalidAprioriWeight is returned when we get an apriori weight
62
        // that is out of range.
63
        ErrInvalidAprioriWeight = errors.New("apriori weight must be in [0, 1]")
64

65
        // ErrInvalidCapacityFraction is returned when we get a capacity
66
        // fraction that is out of range.
67
        ErrInvalidCapacityFraction = fmt.Errorf("capacity fraction must be in "+
68
                "[%v, 1]", minCapacityFraction)
69
)
70

71
// AprioriConfig contains configuration for our probability estimator.
72
type AprioriConfig struct {
73
        // PenaltyHalfLife defines after how much time a penalized node or
74
        // channel is back at 50% probability.
75
        PenaltyHalfLife time.Duration
76

77
        // AprioriHopProbability is the assumed success probability of a hop in
78
        // a route when no other information is available.
79
        AprioriHopProbability float64
80

81
        // AprioriWeight is a value in the range [0, 1] that defines to what
82
        // extent historical results should be extrapolated to untried
83
        // connections. Setting it to one will completely ignore historical
84
        // results and always assume the configured a priori probability for
85
        // untried connections. A value of zero will ignore the a priori
86
        // probability completely and only base the probability on historical
87
        // results, unless there are none available.
88
        AprioriWeight float64
89

90
        // CapacityFraction is the fraction of a channel's capacity that we
91
        // consider to have liquidity. For amounts that come close to or exceed
92
        // the fraction, an additional penalty is applied. A value of 1.0
93
        // disables the capacityFactor.
94
        CapacityFraction float64
95
}
96

97
// validate checks the configuration of the estimator for allowed values.
98
func (p AprioriConfig) validate() error {
3✔
99
        if p.PenaltyHalfLife < 0 {
3✔
100
                return ErrInvalidHalflife
×
101
        }
×
102

103
        if p.AprioriHopProbability < 0 || p.AprioriHopProbability > 1 {
3✔
104
                return ErrInvalidHopProbability
×
105
        }
×
106

107
        if p.AprioriWeight < 0 || p.AprioriWeight > 1 {
3✔
108
                return ErrInvalidAprioriWeight
×
109
        }
×
110

111
        if p.CapacityFraction < minCapacityFraction || p.CapacityFraction > 1 {
3✔
112
                return ErrInvalidCapacityFraction
×
113
        }
×
114

115
        return nil
3✔
116
}
117

118
// DefaultAprioriConfig returns the default configuration for the estimator.
119
func DefaultAprioriConfig() AprioriConfig {
×
120
        return AprioriConfig{
×
121
                PenaltyHalfLife:       DefaultPenaltyHalfLife,
×
122
                AprioriHopProbability: DefaultAprioriHopProbability,
×
123
                AprioriWeight:         DefaultAprioriWeight,
×
124
                CapacityFraction:      DefaultCapacityFraction,
×
125
        }
×
126
}
×
127

128
// AprioriEstimator returns node and pair probabilities based on historical
129
// payment results. It uses a preconfigured success probability value for
130
// untried hops (AprioriHopProbability) and returns a high success probability
131
// for hops that could previously conduct a payment (prevSuccessProbability).
132
// Successful edges are retried until proven otherwise. Recently failed hops are
133
// penalized by an exponential time decay (PenaltyHalfLife), after which they
134
// are reconsidered for routing. If information was learned about a forwarding
135
// node, the information is taken into account to estimate a per node
136
// probability that mixes with the a priori probability (AprioriWeight).
137
type AprioriEstimator struct {
138
        // AprioriConfig contains configuration options for our estimator.
139
        AprioriConfig
140

141
        // prevSuccessProbability is the assumed probability for node pairs that
142
        // successfully relayed the previous attempt.
143
        prevSuccessProbability float64
144
}
145

146
// NewAprioriEstimator creates a new AprioriEstimator.
147
func NewAprioriEstimator(cfg AprioriConfig) (*AprioriEstimator, error) {
3✔
148
        if err := cfg.validate(); err != nil {
3✔
149
                return nil, err
×
150
        }
×
151

152
        return &AprioriEstimator{
3✔
153
                AprioriConfig:          cfg,
3✔
154
                prevSuccessProbability: prevSuccessProbability,
3✔
155
        }, nil
3✔
156
}
157

158
// Compile-time checks that interfaces are implemented.
159
var _ Estimator = (*AprioriEstimator)(nil)
160
var _ estimatorConfig = (*AprioriConfig)(nil)
161

162
// Config returns the estimator's configuration.
163
func (p *AprioriEstimator) Config() estimatorConfig {
3✔
164
        return p.AprioriConfig
3✔
165
}
3✔
166

167
// String returns the estimator's configuration as a string representation.
168
func (p *AprioriEstimator) String() string {
3✔
169
        return fmt.Sprintf("estimator type: %v, penalty halflife time: %v, "+
3✔
170
                "apriori hop probability: %v, apriori weight: %v, previous "+
3✔
171
                "success probability: %v, capacity fraction: %v",
3✔
172
                AprioriEstimatorName, p.PenaltyHalfLife,
3✔
173
                p.AprioriHopProbability, p.AprioriWeight,
3✔
174
                p.prevSuccessProbability, p.CapacityFraction)
3✔
175
}
3✔
176

177
// getNodeProbability calculates the probability for connections from a node
178
// that have not been tried before. The results parameter is a list of last
179
// payment results for that node.
180
func (p *AprioriEstimator) getNodeProbability(now time.Time,
181
        results NodeResults, amt lnwire.MilliSatoshi,
182
        capacity btcutil.Amount) float64 {
3✔
183

3✔
184
        // We reduce the apriori hop probability if the amount comes close to
3✔
185
        // the capacity.
3✔
186
        apriori := p.AprioriHopProbability * capacityFactor(
3✔
187
                amt, capacity, p.CapacityFraction,
3✔
188
        )
3✔
189

3✔
190
        // If the channel history is not to be taken into account, we can return
3✔
191
        // early here with the configured a priori probability.
3✔
192
        if p.AprioriWeight == 1 {
3✔
UNCOV
193
                return apriori
×
UNCOV
194
        }
×
195

196
        // If there is no channel history, our best estimate is still the a
197
        // priori probability.
198
        if len(results) == 0 {
6✔
199
                return apriori
3✔
200
        }
3✔
201

202
        // The value of the apriori weight is in the range [0, 1]. Convert it to
203
        // a factor that properly expresses the intention of the weight in the
204
        // following weight average calculation. When the apriori weight is 0,
205
        // the apriori factor is also 0. This means it won't have any effect on
206
        // the weighted average calculation below. When the apriori weight
207
        // approaches 1, the apriori factor goes to infinity. It will heavily
208
        // outweigh any observations that have been collected.
209
        aprioriFactor := 1/(1-p.AprioriWeight) - 1
3✔
210

3✔
211
        // Calculate a weighted average consisting of the apriori probability
3✔
212
        // and historical observations. This is the part that incentivizes nodes
3✔
213
        // to make sure that all (not just some) of their channels are in good
3✔
214
        // shape. Senders will steer around nodes that have shown a few
3✔
215
        // failures, even though there may be many channels still untried.
3✔
216
        //
3✔
217
        // If there is just a single observation and the apriori weight is 0,
3✔
218
        // this single observation will totally determine the node probability.
3✔
219
        // The node probability is returned for all other channels of the node.
3✔
220
        // This means that one failure will lead to the success probability
3✔
221
        // estimates for all other channels being 0 too. The probability for the
3✔
222
        // channel that was tried will not even recover, because it is
3✔
223
        // recovering to the node probability (which is zero). So one failure
3✔
224
        // effectively prunes all channels of the node forever. This is the most
3✔
225
        // aggressive way in which we can penalize nodes and unlikely to yield
3✔
226
        // good results in a real network.
3✔
227
        probabilitiesTotal := apriori * aprioriFactor
3✔
228
        totalWeight := aprioriFactor
3✔
229

3✔
230
        for _, result := range results {
6✔
231
                switch {
3✔
232
                // Weigh success with a constant high weight of 1. There is no
233
                // decay. Amt is never zero, so this clause is never executed
234
                // when result.SuccessAmt is zero.
235
                case amt <= result.SuccessAmt:
3✔
236
                        totalWeight++
3✔
237
                        probabilitiesTotal += p.prevSuccessProbability
3✔
238

239
                // Weigh failures in accordance with their age. The base
240
                // probability of a failure is considered zero, so nothing needs
241
                // to be added to probabilitiesTotal.
242
                case !result.FailTime.IsZero() && amt >= result.FailAmt:
3✔
243
                        age := now.Sub(result.FailTime)
3✔
244
                        totalWeight += p.getWeight(age)
3✔
245
                }
246
        }
247

248
        return probabilitiesTotal / totalWeight
3✔
249
}
250

251
// getWeight calculates a weight in the range [0, 1] that should be assigned to
252
// a payment result. Weight follows an exponential curve that starts at 1 when
253
// the result is fresh and asymptotically approaches zero over time. The rate at
254
// which this happens is controlled by the penaltyHalfLife parameter.
255
func (p *AprioriEstimator) getWeight(age time.Duration) float64 {
3✔
256
        exp := -age.Hours() / p.PenaltyHalfLife.Hours()
3✔
257
        return math.Pow(2, exp)
3✔
258
}
3✔
259

260
// capacityFactor is a multiplier that can be used to reduce the probability
261
// depending on how much of the capacity is sent. In other words, the factor
262
// sorts out channels that don't provide enough liquidity. Effectively, this
263
// leads to usage of larger channels in total to increase success probability,
264
// but it may also increase fees. The limits are 1 for amt == 0 and
265
// minCapacityFactor for amt >> capacityCutoffFraction. The function drops
266
// significantly when amt reaches cutoffMsat. smearingMsat determines over which
267
// scale the reduction takes place.
268
func capacityFactor(amt lnwire.MilliSatoshi, capacity btcutil.Amount,
269
        capacityCutoffFraction float64) float64 {
3✔
270

3✔
271
        // The special value of 1.0 for capacityFactor disables any effect from
3✔
272
        // this factor.
3✔
273
        if capacityCutoffFraction == 1 {
3✔
UNCOV
274
                return 1.0
×
UNCOV
275
        }
×
276

277
        // If we don't have information about the capacity, which can be the
278
        // case for hop hints or local channels, we return unity to not alter
279
        // anything.
280
        if capacity == 0 {
3✔
281
                return 1.0
×
282
        }
×
283

284
        capMsat := float64(lnwire.NewMSatFromSatoshis(capacity))
3✔
285
        amtMsat := float64(amt)
3✔
286

3✔
287
        if amtMsat > capMsat {
3✔
UNCOV
288
                return 0
×
UNCOV
289
        }
×
290

291
        cutoffMsat := capacityCutoffFraction * capMsat
3✔
292
        smearingMsat := capacitySmearingFraction * capMsat
3✔
293

3✔
294
        // We compute a logistic function mirrored around the y axis, centered
3✔
295
        // at cutoffMsat, decaying over the smearingMsat scale.
3✔
296
        denominator := 1 + math.Exp(-(amtMsat-cutoffMsat)/smearingMsat)
3✔
297

3✔
298
        // The numerator decides what the minimal value of this function will
3✔
299
        // be. The minimal value is set by minCapacityFactor.
3✔
300
        numerator := 1 - minCapacityFactor
3✔
301

3✔
302
        return 1 - numerator/denominator
3✔
303
}
304

305
// PairProbability estimates the probability of successfully traversing to
306
// toNode based on historical payment outcomes for the from node. Those outcomes
307
// are passed in via the results parameter.
308
func (p *AprioriEstimator) PairProbability(now time.Time,
309
        results NodeResults, toNode route.Vertex, amt lnwire.MilliSatoshi,
310
        capacity btcutil.Amount) float64 {
3✔
311

3✔
312
        nodeProbability := p.getNodeProbability(now, results, amt, capacity)
3✔
313

3✔
314
        return p.calculateProbability(
3✔
315
                now, results, nodeProbability, toNode, amt,
3✔
316
        )
3✔
317
}
3✔
318

319
// LocalPairProbability estimates the probability of successfully traversing
320
// our own local channels to toNode.
321
func (p *AprioriEstimator) LocalPairProbability(
322
        now time.Time, results NodeResults, toNode route.Vertex) float64 {
3✔
323

3✔
324
        // For local channels that have never been tried before, we assume them
3✔
325
        // to be successful. We have accurate balance and online status
3✔
326
        // information on our own channels, so when we select them in a route it
3✔
327
        // is close to certain that those channels will work.
3✔
328
        nodeProbability := p.prevSuccessProbability
3✔
329

3✔
330
        return p.calculateProbability(
3✔
331
                now, results, nodeProbability, toNode, lnwire.MaxMilliSatoshi,
3✔
332
        )
3✔
333
}
3✔
334

335
// calculateProbability estimates the probability of successfully traversing to
336
// toNode based on historical payment outcomes and a fall-back node probability.
337
func (p *AprioriEstimator) calculateProbability(
338
        now time.Time, results NodeResults,
339
        nodeProbability float64, toNode route.Vertex,
340
        amt lnwire.MilliSatoshi) float64 {
3✔
341

3✔
342
        // Retrieve the last pair outcome.
3✔
343
        lastPairResult, ok := results[toNode]
3✔
344

3✔
345
        // If there is no history for this pair, return the node probability
3✔
346
        // that is a probability estimate for untried channel.
3✔
347
        if !ok {
6✔
348
                return nodeProbability
3✔
349
        }
3✔
350

351
        // For successes, we have a fixed (high) probability. Those pairs will
352
        // be assumed good until proven otherwise. Amt is never zero, so this
353
        // clause is never executed when lastPairResult.SuccessAmt is zero.
354
        if amt <= lastPairResult.SuccessAmt {
6✔
355
                return p.prevSuccessProbability
3✔
356
        }
3✔
357

358
        // Take into account a minimum penalize amount. For balance errors, a
359
        // failure may be reported with such a minimum to prevent too aggressive
360
        // penalization. If the current amount is smaller than the amount that
361
        // previously triggered a failure, we act as if this is an untried
362
        // channel.
363
        if lastPairResult.FailTime.IsZero() || amt < lastPairResult.FailAmt {
6✔
364
                return nodeProbability
3✔
365
        }
3✔
366

367
        timeSinceLastFailure := now.Sub(lastPairResult.FailTime)
3✔
368

3✔
369
        // Calculate success probability based on the weight of the last
3✔
370
        // failure. When the failure is fresh, its weight is 1 and we'll return
3✔
371
        // probability 0. Over time the probability recovers to the node
3✔
372
        // probability. It would be as if this channel was never tried before.
3✔
373
        weight := p.getWeight(timeSinceLastFailure)
3✔
374
        probability := nodeProbability * (1 - weight)
3✔
375

3✔
376
        return probability
3✔
377
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc