• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

lightningnetwork / lnd / 12041760086

27 Nov 2024 01:02AM UTC coverage: 59.001% (+0.002%) from 58.999%
12041760086

Pull #9242

github

aakselrod
github workflow: save postgres log to zip file
Pull Request #9242: Reapply #8644

8 of 39 new or added lines in 3 files covered. (20.51%)

82 existing lines in 18 files now uncovered.

133176 of 225719 relevant lines covered (59.0%)

19559.01 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

80.21
/htlcswitch/link.go
1
package htlcswitch
2

3
import (
4
        "bytes"
5
        crand "crypto/rand"
6
        "crypto/sha256"
7
        "errors"
8
        "fmt"
9
        prand "math/rand"
10
        "sync"
11
        "sync/atomic"
12
        "time"
13

14
        "github.com/btcsuite/btcd/btcutil"
15
        "github.com/btcsuite/btcd/wire"
16
        "github.com/btcsuite/btclog/v2"
17
        "github.com/lightningnetwork/lnd/build"
18
        "github.com/lightningnetwork/lnd/channeldb"
19
        "github.com/lightningnetwork/lnd/channeldb/models"
20
        "github.com/lightningnetwork/lnd/contractcourt"
21
        "github.com/lightningnetwork/lnd/fn"
22
        "github.com/lightningnetwork/lnd/htlcswitch/hodl"
23
        "github.com/lightningnetwork/lnd/htlcswitch/hop"
24
        "github.com/lightningnetwork/lnd/input"
25
        "github.com/lightningnetwork/lnd/invoices"
26
        "github.com/lightningnetwork/lnd/lnpeer"
27
        "github.com/lightningnetwork/lnd/lntypes"
28
        "github.com/lightningnetwork/lnd/lnutils"
29
        "github.com/lightningnetwork/lnd/lnwallet"
30
        "github.com/lightningnetwork/lnd/lnwallet/chainfee"
31
        "github.com/lightningnetwork/lnd/lnwire"
32
        "github.com/lightningnetwork/lnd/queue"
33
        "github.com/lightningnetwork/lnd/record"
34
        "github.com/lightningnetwork/lnd/ticker"
35
        "github.com/lightningnetwork/lnd/tlv"
36
)
37

38
func init() {
15✔
39
        prand.Seed(time.Now().UnixNano())
15✔
40
}
15✔
41

42
const (
43
        // DefaultMaxOutgoingCltvExpiry is the maximum outgoing time lock that
44
        // the node accepts for forwarded payments. The value is relative to the
45
        // current block height. The reason to have a maximum is to prevent
46
        // funds getting locked up unreasonably long. Otherwise, an attacker
47
        // willing to lock its own funds too, could force the funds of this node
48
        // to be locked up for an indefinite (max int32) number of blocks.
49
        //
50
        // The value 2016 corresponds to on average two weeks worth of blocks
51
        // and is based on the maximum number of hops (20), the default CLTV
52
        // delta (40), and some extra margin to account for the other lightning
53
        // implementations and past lnd versions which used to have a default
54
        // CLTV delta of 144.
55
        DefaultMaxOutgoingCltvExpiry = 2016
56

57
        // DefaultMinLinkFeeUpdateTimeout represents the minimum interval in
58
        // which a link should propose to update its commitment fee rate.
59
        DefaultMinLinkFeeUpdateTimeout = 10 * time.Minute
60

61
        // DefaultMaxLinkFeeUpdateTimeout represents the maximum interval in
62
        // which a link should propose to update its commitment fee rate.
63
        DefaultMaxLinkFeeUpdateTimeout = 60 * time.Minute
64

65
        // DefaultMaxLinkFeeAllocation is the highest allocation we'll allow
66
        // a channel's commitment fee to be of its balance. This only applies to
67
        // the initiator of the channel.
68
        DefaultMaxLinkFeeAllocation float64 = 0.5
69
)
70

71
// ExpectedFee computes the expected fee for a given htlc amount. The value
72
// returned from this function is to be used as a sanity check when forwarding
73
// HTLC's to ensure that an incoming HTLC properly adheres to our propagated
74
// forwarding policy.
75
//
76
// TODO(roasbeef): also add in current available channel bandwidth, inverse
77
// func
78
func ExpectedFee(f models.ForwardingPolicy,
79
        htlcAmt lnwire.MilliSatoshi) lnwire.MilliSatoshi {
82✔
80

82✔
81
        return f.BaseFee + (htlcAmt*f.FeeRate)/1000000
82✔
82
}
82✔
83

84
// ChannelLinkConfig defines the configuration for the channel link. ALL
85
// elements within the configuration MUST be non-nil for channel link to carry
86
// out its duties.
87
type ChannelLinkConfig struct {
88
        // FwrdingPolicy is the initial forwarding policy to be used when
89
        // deciding whether to forwarding incoming HTLC's or not. This value
90
        // can be updated with subsequent calls to UpdateForwardingPolicy
91
        // targeted at a given ChannelLink concrete interface implementation.
92
        FwrdingPolicy models.ForwardingPolicy
93

94
        // Circuits provides restricted access to the switch's circuit map,
95
        // allowing the link to open and close circuits.
96
        Circuits CircuitModifier
97

98
        // BestHeight returns the best known height.
99
        BestHeight func() uint32
100

101
        // ForwardPackets attempts to forward the batch of htlcs through the
102
        // switch. The function returns and error in case it fails to send one or
103
        // more packets. The link's quit signal should be provided to allow
104
        // cancellation of forwarding during link shutdown.
105
        ForwardPackets func(<-chan struct{}, bool, ...*htlcPacket) error
106

107
        // DecodeHopIterators facilitates batched decoding of HTLC Sphinx onion
108
        // blobs, which are then used to inform how to forward an HTLC.
109
        //
110
        // NOTE: This function assumes the same set of readers and preimages
111
        // are always presented for the same identifier.
112
        DecodeHopIterators func([]byte, []hop.DecodeHopIteratorRequest) (
113
                []hop.DecodeHopIteratorResponse, error)
114

115
        // ExtractErrorEncrypter function is responsible for decoding HTLC
116
        // Sphinx onion blob, and creating onion failure obfuscator.
117
        ExtractErrorEncrypter hop.ErrorEncrypterExtracter
118

119
        // FetchLastChannelUpdate retrieves the latest routing policy for a
120
        // target channel. This channel will typically be the outgoing channel
121
        // specified when we receive an incoming HTLC.  This will be used to
122
        // provide payment senders our latest policy when sending encrypted
123
        // error messages.
124
        FetchLastChannelUpdate func(lnwire.ShortChannelID) (
125
                *lnwire.ChannelUpdate1, error)
126

127
        // Peer is a lightning network node with which we have the channel link
128
        // opened.
129
        Peer lnpeer.Peer
130

131
        // Registry is a sub-system which responsible for managing the invoices
132
        // in thread-safe manner.
133
        Registry InvoiceDatabase
134

135
        // PreimageCache is a global witness beacon that houses any new
136
        // preimages discovered by other links. We'll use this to add new
137
        // witnesses that we discover which will notify any sub-systems
138
        // subscribed to new events.
139
        PreimageCache contractcourt.WitnessBeacon
140

141
        // OnChannelFailure is a function closure that we'll call if the
142
        // channel failed for some reason. Depending on the severity of the
143
        // error, the closure potentially must force close this channel and
144
        // disconnect the peer.
145
        //
146
        // NOTE: The method must return in order for the ChannelLink to be able
147
        // to shut down properly.
148
        OnChannelFailure func(lnwire.ChannelID, lnwire.ShortChannelID,
149
                LinkFailureError)
150

151
        // UpdateContractSignals is a function closure that we'll use to update
152
        // outside sub-systems with this channel's latest ShortChannelID.
153
        UpdateContractSignals func(*contractcourt.ContractSignals) error
154

155
        // NotifyContractUpdate is a function closure that we'll use to update
156
        // the contractcourt and more specifically the ChannelArbitrator of the
157
        // latest channel state.
158
        NotifyContractUpdate func(*contractcourt.ContractUpdate) error
159

160
        // ChainEvents is an active subscription to the chain watcher for this
161
        // channel to be notified of any on-chain activity related to this
162
        // channel.
163
        ChainEvents *contractcourt.ChainEventSubscription
164

165
        // FeeEstimator is an instance of a live fee estimator which will be
166
        // used to dynamically regulate the current fee of the commitment
167
        // transaction to ensure timely confirmation.
168
        FeeEstimator chainfee.Estimator
169

170
        // hodl.Mask is a bitvector composed of hodl.Flags, specifying breakpoints
171
        // for HTLC forwarding internal to the switch.
172
        //
173
        // NOTE: This should only be used for testing.
174
        HodlMask hodl.Mask
175

176
        // SyncStates is used to indicate that we need send the channel
177
        // reestablishment message to the remote peer. It should be done if our
178
        // clients have been restarted, or remote peer have been reconnected.
179
        SyncStates bool
180

181
        // BatchTicker is the ticker that determines the interval that we'll
182
        // use to check the batch to see if there're any updates we should
183
        // flush out. By batching updates into a single commit, we attempt to
184
        // increase throughput by maximizing the number of updates coalesced
185
        // into a single commit.
186
        BatchTicker ticker.Ticker
187

188
        // FwdPkgGCTicker is the ticker determining the frequency at which
189
        // garbage collection of forwarding packages occurs. We use a
190
        // time-based approach, as opposed to block epochs, as to not hinder
191
        // syncing.
192
        FwdPkgGCTicker ticker.Ticker
193

194
        // PendingCommitTicker is a ticker that allows the link to determine if
195
        // a locally initiated commitment dance gets stuck waiting for the
196
        // remote party to revoke.
197
        PendingCommitTicker ticker.Ticker
198

199
        // BatchSize is the max size of a batch of updates done to the link
200
        // before we do a state update.
201
        BatchSize uint32
202

203
        // UnsafeReplay will cause a link to replay the adds in its latest
204
        // commitment txn after the link is restarted. This should only be used
205
        // in testing, it is here to ensure the sphinx replay detection on the
206
        // receiving node is persistent.
207
        UnsafeReplay bool
208

209
        // MinUpdateTimeout represents the minimum interval in which a link
210
        // will propose to update its commitment fee rate. A random timeout will
211
        // be selected between this and MaxUpdateTimeout.
212
        MinUpdateTimeout time.Duration
213

214
        // MaxUpdateTimeout represents the maximum interval in which a link
215
        // will propose to update its commitment fee rate. A random timeout will
216
        // be selected between this and MinUpdateTimeout.
217
        MaxUpdateTimeout time.Duration
218

219
        // OutgoingCltvRejectDelta defines the number of blocks before expiry of
220
        // an htlc where we don't offer an htlc anymore. This should be at least
221
        // the outgoing broadcast delta, because in any case we don't want to
222
        // risk offering an htlc that triggers channel closure.
223
        OutgoingCltvRejectDelta uint32
224

225
        // TowerClient is an optional engine that manages the signing,
226
        // encrypting, and uploading of justice transactions to the daemon's
227
        // configured set of watchtowers for legacy channels.
228
        TowerClient TowerClient
229

230
        // MaxOutgoingCltvExpiry is the maximum outgoing timelock that the link
231
        // should accept for a forwarded HTLC. The value is relative to the
232
        // current block height.
233
        MaxOutgoingCltvExpiry uint32
234

235
        // MaxFeeAllocation is the highest allocation we'll allow a channel's
236
        // commitment fee to be of its balance. This only applies to the
237
        // initiator of the channel.
238
        MaxFeeAllocation float64
239

240
        // MaxAnchorsCommitFeeRate is the max commitment fee rate we'll use as
241
        // the initiator for channels of the anchor type.
242
        MaxAnchorsCommitFeeRate chainfee.SatPerKWeight
243

244
        // NotifyActiveLink allows the link to tell the ChannelNotifier when a
245
        // link is first started.
246
        NotifyActiveLink func(wire.OutPoint)
247

248
        // NotifyActiveChannel allows the link to tell the ChannelNotifier when
249
        // channels becomes active.
250
        NotifyActiveChannel func(wire.OutPoint)
251

252
        // NotifyInactiveChannel allows the switch to tell the ChannelNotifier
253
        // when channels become inactive.
254
        NotifyInactiveChannel func(wire.OutPoint)
255

256
        // NotifyInactiveLinkEvent allows the switch to tell the
257
        // ChannelNotifier when a channel link become inactive.
258
        NotifyInactiveLinkEvent func(wire.OutPoint)
259

260
        // HtlcNotifier is an instance of a htlcNotifier which we will pipe htlc
261
        // events through.
262
        HtlcNotifier htlcNotifier
263

264
        // FailAliasUpdate is a function used to fail an HTLC for an
265
        // option_scid_alias channel.
266
        FailAliasUpdate func(sid lnwire.ShortChannelID,
267
                incoming bool) *lnwire.ChannelUpdate1
268

269
        // GetAliases is used by the link and switch to fetch the set of
270
        // aliases for a given link.
271
        GetAliases func(base lnwire.ShortChannelID) []lnwire.ShortChannelID
272

273
        // PreviouslySentShutdown is an optional value that is set if, at the
274
        // time of the link being started, persisted shutdown info was found for
275
        // the channel. This value being set means that we previously sent a
276
        // Shutdown message to our peer, and so we should do so again on
277
        // re-establish and should not allow anymore HTLC adds on the outgoing
278
        // direction of the link.
279
        PreviouslySentShutdown fn.Option[lnwire.Shutdown]
280

281
        // Adds the option to disable forwarding payments in blinded routes
282
        // by failing back any blinding-related payloads as if they were
283
        // invalid.
284
        DisallowRouteBlinding bool
285

286
        // MaxFeeExposure is the threshold in milli-satoshis after which we'll
287
        // restrict the flow of HTLCs and fee updates.
288
        MaxFeeExposure lnwire.MilliSatoshi
289

290
        // ShouldFwdExpEndorsement is a closure that indicates whether the link
291
        // should forward experimental endorsement signals.
292
        ShouldFwdExpEndorsement func() bool
293
}
294

295
// channelLink is the service which drives a channel's commitment update
296
// state-machine. In the event that an HTLC needs to be propagated to another
297
// link, the forward handler from config is used which sends HTLC to the
298
// switch. Additionally, the link encapsulate logic of commitment protocol
299
// message ordering and updates.
300
type channelLink struct {
301
        // The following fields are only meant to be used *atomically*
302
        started       int32
303
        reestablished int32
304
        shutdown      int32
305

306
        // failed should be set to true in case a link error happens, making
307
        // sure we don't process any more updates.
308
        failed bool
309

310
        // keystoneBatch represents a volatile list of keystones that must be
311
        // written before attempting to sign the next commitment txn. These
312
        // represent all the HTLC's forwarded to the link from the switch. Once
313
        // we lock them into our outgoing commitment, then the circuit has a
314
        // keystone, and is fully opened.
315
        keystoneBatch []Keystone
316

317
        // openedCircuits is the set of all payment circuits that will be open
318
        // once we make our next commitment. After making the commitment we'll
319
        // ACK all these from our mailbox to ensure that they don't get
320
        // re-delivered if we reconnect.
321
        openedCircuits []CircuitKey
322

323
        // closedCircuits is the set of all payment circuits that will be
324
        // closed once we make our next commitment. After taking the commitment
325
        // we'll ACK all these to ensure that they don't get re-delivered if we
326
        // reconnect.
327
        closedCircuits []CircuitKey
328

329
        // channel is a lightning network channel to which we apply htlc
330
        // updates.
331
        channel *lnwallet.LightningChannel
332

333
        // cfg is a structure which carries all dependable fields/handlers
334
        // which may affect behaviour of the service.
335
        cfg ChannelLinkConfig
336

337
        // mailBox is the main interface between the outside world and the
338
        // link. All incoming messages will be sent over this mailBox. Messages
339
        // include new updates from our connected peer, and new packets to be
340
        // forwarded sent by the switch.
341
        mailBox MailBox
342

343
        // upstream is a channel that new messages sent from the remote peer to
344
        // the local peer will be sent across.
345
        upstream chan lnwire.Message
346

347
        // downstream is a channel in which new multi-hop HTLC's to be
348
        // forwarded will be sent across. Messages from this channel are sent
349
        // by the HTLC switch.
350
        downstream chan *htlcPacket
351

352
        // updateFeeTimer is the timer responsible for updating the link's
353
        // commitment fee every time it fires.
354
        updateFeeTimer *time.Timer
355

356
        // uncommittedPreimages stores a list of all preimages that have been
357
        // learned since receiving the last CommitSig from the remote peer. The
358
        // batch will be flushed just before accepting the subsequent CommitSig
359
        // or on shutdown to avoid doing a write for each preimage received.
360
        uncommittedPreimages []lntypes.Preimage
361

362
        sync.RWMutex
363

364
        // hodlQueue is used to receive exit hop htlc resolutions from invoice
365
        // registry.
366
        hodlQueue *queue.ConcurrentQueue
367

368
        // hodlMap stores related htlc data for a circuit key. It allows
369
        // resolving those htlcs when we receive a message on hodlQueue.
370
        hodlMap map[models.CircuitKey]hodlHtlc
371

372
        // log is a link-specific logging instance.
373
        log btclog.Logger
374

375
        // isOutgoingAddBlocked tracks whether the channelLink can send an
376
        // UpdateAddHTLC.
377
        isOutgoingAddBlocked atomic.Bool
378

379
        // isIncomingAddBlocked tracks whether the channelLink can receive an
380
        // UpdateAddHTLC.
381
        isIncomingAddBlocked atomic.Bool
382

383
        // flushHooks is a hookMap that is triggered when we reach a channel
384
        // state with no live HTLCs.
385
        flushHooks hookMap
386

387
        // outgoingCommitHooks is a hookMap that is triggered after we send our
388
        // next CommitSig.
389
        outgoingCommitHooks hookMap
390

391
        // incomingCommitHooks is a hookMap that is triggered after we receive
392
        // our next CommitSig.
393
        incomingCommitHooks hookMap
394

395
        // ContextGuard is a helper that encapsulates a wait group and quit
396
        // channel and allows contexts that either block or cancel on those
397
        // depending on the use case.
398
        *fn.ContextGuard
399
}
400

401
// hookMap is a data structure that is used to track the hooks that need to be
402
// called in various parts of the channelLink's lifecycle.
403
//
404
// WARNING: NOT thread-safe.
405
type hookMap struct {
406
        // allocIdx keeps track of the next id we haven't yet allocated.
407
        allocIdx atomic.Uint64
408

409
        // transient is a map of hooks that are only called the next time invoke
410
        // is called. These hooks are deleted during invoke.
411
        transient map[uint64]func()
412

413
        // newTransients is a channel that we use to accept new hooks into the
414
        // hookMap.
415
        newTransients chan func()
416
}
417

418
// newHookMap initializes a new empty hookMap.
419
func newHookMap() hookMap {
643✔
420
        return hookMap{
643✔
421
                allocIdx:      atomic.Uint64{},
643✔
422
                transient:     make(map[uint64]func()),
643✔
423
                newTransients: make(chan func()),
643✔
424
        }
643✔
425
}
643✔
426

427
// alloc allocates space in the hook map for the supplied hook, the second
428
// argument determines whether it goes into the transient or persistent part
429
// of the hookMap.
430
func (m *hookMap) alloc(hook func()) uint64 {
6✔
431
        // We assume we never overflow a uint64. Seems OK.
6✔
432
        hookID := m.allocIdx.Add(1)
6✔
433
        if hookID == 0 {
6✔
434
                panic("hookMap allocIdx overflow")
×
435
        }
436
        m.transient[hookID] = hook
6✔
437

6✔
438
        return hookID
6✔
439
}
440

441
// invoke is used on a hook map to call all the registered hooks and then clear
442
// out the transient hooks so they are not called again.
443
func (m *hookMap) invoke() {
2,729✔
444
        for _, hook := range m.transient {
2,735✔
445
                hook()
6✔
446
        }
6✔
447

448
        m.transient = make(map[uint64]func())
2,729✔
449
}
450

451
// hodlHtlc contains htlc data that is required for resolution.
452
type hodlHtlc struct {
453
        add        lnwire.UpdateAddHTLC
454
        sourceRef  channeldb.AddRef
455
        obfuscator hop.ErrorEncrypter
456
}
457

458
// NewChannelLink creates a new instance of a ChannelLink given a configuration
459
// and active channel that will be used to verify/apply updates to.
460
func NewChannelLink(cfg ChannelLinkConfig,
461
        channel *lnwallet.LightningChannel) ChannelLink {
217✔
462

217✔
463
        logPrefix := fmt.Sprintf("ChannelLink(%v):", channel.ChannelPoint())
217✔
464

217✔
465
        // If the max fee exposure isn't set, use the default.
217✔
466
        if cfg.MaxFeeExposure == 0 {
430✔
467
                cfg.MaxFeeExposure = DefaultMaxFeeExposure
213✔
468
        }
213✔
469

470
        return &channelLink{
217✔
471
                cfg:                 cfg,
217✔
472
                channel:             channel,
217✔
473
                hodlMap:             make(map[models.CircuitKey]hodlHtlc),
217✔
474
                hodlQueue:           queue.NewConcurrentQueue(10),
217✔
475
                log:                 build.NewPrefixLog(logPrefix, log),
217✔
476
                flushHooks:          newHookMap(),
217✔
477
                outgoingCommitHooks: newHookMap(),
217✔
478
                incomingCommitHooks: newHookMap(),
217✔
479
                ContextGuard:        fn.NewContextGuard(),
217✔
480
        }
217✔
481
}
482

483
// A compile time check to ensure channelLink implements the ChannelLink
484
// interface.
485
var _ ChannelLink = (*channelLink)(nil)
486

487
// Start starts all helper goroutines required for the operation of the channel
488
// link.
489
//
490
// NOTE: Part of the ChannelLink interface.
491
func (l *channelLink) Start() error {
215✔
492
        if !atomic.CompareAndSwapInt32(&l.started, 0, 1) {
215✔
493
                err := fmt.Errorf("channel link(%v): already started", l)
×
494
                l.log.Warn("already started")
×
495
                return err
×
496
        }
×
497

498
        l.log.Info("starting")
215✔
499

215✔
500
        // If the config supplied watchtower client, ensure the channel is
215✔
501
        // registered before trying to use it during operation.
215✔
502
        if l.cfg.TowerClient != nil {
219✔
503
                err := l.cfg.TowerClient.RegisterChannel(
4✔
504
                        l.ChanID(), l.channel.State().ChanType,
4✔
505
                )
4✔
506
                if err != nil {
4✔
507
                        return err
×
508
                }
×
509
        }
510

511
        l.mailBox.ResetMessages()
215✔
512
        l.hodlQueue.Start()
215✔
513

215✔
514
        // Before launching the htlcManager messages, revert any circuits that
215✔
515
        // were marked open in the switch's circuit map, but did not make it
215✔
516
        // into a commitment txn. We use the next local htlc index as the cut
215✔
517
        // off point, since all indexes below that are committed. This action
215✔
518
        // is only performed if the link's final short channel ID has been
215✔
519
        // assigned, otherwise we would try to trim the htlcs belonging to the
215✔
520
        // all-zero, hop.Source ID.
215✔
521
        if l.ShortChanID() != hop.Source {
430✔
522
                localHtlcIndex, err := l.channel.NextLocalHtlcIndex()
215✔
523
                if err != nil {
215✔
524
                        return fmt.Errorf("unable to retrieve next local "+
×
525
                                "htlc index: %v", err)
×
526
                }
×
527

528
                // NOTE: This is automatically done by the switch when it
529
                // starts up, but is necessary to prevent inconsistencies in
530
                // the case that the link flaps. This is a result of a link's
531
                // life-cycle being shorter than that of the switch.
532
                chanID := l.ShortChanID()
215✔
533
                err = l.cfg.Circuits.TrimOpenCircuits(chanID, localHtlcIndex)
215✔
534
                if err != nil {
215✔
535
                        return fmt.Errorf("unable to trim circuits above "+
×
536
                                "local htlc index %d: %v", localHtlcIndex, err)
×
537
                }
×
538

539
                // Since the link is live, before we start the link we'll update
540
                // the ChainArbitrator with the set of new channel signals for
541
                // this channel.
542
                //
543
                // TODO(roasbeef): split goroutines within channel arb to avoid
544
                go func() {
430✔
545
                        signals := &contractcourt.ContractSignals{
215✔
546
                                ShortChanID: l.channel.ShortChanID(),
215✔
547
                        }
215✔
548

215✔
549
                        err := l.cfg.UpdateContractSignals(signals)
215✔
550
                        if err != nil {
215✔
551
                                l.log.Errorf("unable to update signals")
×
552
                        }
×
553
                }()
554
        }
555

556
        l.updateFeeTimer = time.NewTimer(l.randomFeeUpdateTimeout())
215✔
557

215✔
558
        l.Wg.Add(1)
215✔
559
        go l.htlcManager()
215✔
560

215✔
561
        return nil
215✔
562
}
563

564
// Stop gracefully stops all active helper goroutines, then waits until they've
565
// exited.
566
//
567
// NOTE: Part of the ChannelLink interface.
568
func (l *channelLink) Stop() {
216✔
569
        if !atomic.CompareAndSwapInt32(&l.shutdown, 0, 1) {
228✔
570
                l.log.Warn("already stopped")
12✔
571
                return
12✔
572
        }
12✔
573

574
        l.log.Info("stopping")
204✔
575

204✔
576
        // As the link is stopping, we are no longer interested in htlc
204✔
577
        // resolutions coming from the invoice registry.
204✔
578
        l.cfg.Registry.HodlUnsubscribeAll(l.hodlQueue.ChanIn())
204✔
579

204✔
580
        if l.cfg.ChainEvents.Cancel != nil {
208✔
581
                l.cfg.ChainEvents.Cancel()
4✔
582
        }
4✔
583

584
        // Ensure the channel for the timer is drained.
585
        if l.updateFeeTimer != nil {
408✔
586
                if !l.updateFeeTimer.Stop() {
204✔
587
                        select {
×
588
                        case <-l.updateFeeTimer.C:
×
589
                        default:
×
590
                        }
591
                }
592
        }
593

594
        if l.hodlQueue != nil {
408✔
595
                l.hodlQueue.Stop()
204✔
596
        }
204✔
597

598
        close(l.Quit)
204✔
599
        l.Wg.Wait()
204✔
600

204✔
601
        // Now that the htlcManager has completely exited, reset the packet
204✔
602
        // courier. This allows the mailbox to revaluate any lingering Adds that
204✔
603
        // were delivered but didn't make it on a commitment to be failed back
204✔
604
        // if the link is offline for an extended period of time. The error is
204✔
605
        // ignored since it can only fail when the daemon is exiting.
204✔
606
        _ = l.mailBox.ResetPackets()
204✔
607

204✔
608
        // As a final precaution, we will attempt to flush any uncommitted
204✔
609
        // preimages to the preimage cache. The preimages should be re-delivered
204✔
610
        // after channel reestablishment, however this adds an extra layer of
204✔
611
        // protection in case the peer never returns. Without this, we will be
204✔
612
        // unable to settle any contracts depending on the preimages even though
204✔
613
        // we had learned them at some point.
204✔
614
        err := l.cfg.PreimageCache.AddPreimages(l.uncommittedPreimages...)
204✔
615
        if err != nil {
204✔
616
                l.log.Errorf("unable to add preimages=%v to cache: %v",
×
617
                        l.uncommittedPreimages, err)
×
618
        }
×
619
}
620

621
// WaitForShutdown blocks until the link finishes shutting down, which includes
622
// termination of all dependent goroutines.
623
func (l *channelLink) WaitForShutdown() {
×
624
        l.Wg.Wait()
×
625
}
×
626

627
// EligibleToForward returns a bool indicating if the channel is able to
628
// actively accept requests to forward HTLC's. We're able to forward HTLC's if
629
// we are eligible to update AND the channel isn't currently flushing the
630
// outgoing half of the channel.
631
func (l *channelLink) EligibleToForward() bool {
615✔
632
        return l.EligibleToUpdate() &&
615✔
633
                !l.IsFlushing(Outgoing)
615✔
634
}
615✔
635

636
// EligibleToUpdate returns a bool indicating if the channel is able to update
637
// channel state. We're able to update channel state if we know the remote
638
// party's next revocation point. Otherwise, we can't initiate new channel
639
// state. We also require that the short channel ID not be the all-zero source
640
// ID, meaning that the channel has had its ID finalized.
641
func (l *channelLink) EligibleToUpdate() bool {
618✔
642
        return l.channel.RemoteNextRevocation() != nil &&
618✔
643
                l.ShortChanID() != hop.Source &&
618✔
644
                l.isReestablished()
618✔
645
}
618✔
646

647
// EnableAdds sets the ChannelUpdateHandler state to allow UpdateAddHtlc's in
648
// the specified direction. It returns true if the state was changed and false
649
// if the desired state was already set before the method was called.
650
func (l *channelLink) EnableAdds(linkDirection LinkDirection) bool {
17✔
651
        if linkDirection == Outgoing {
27✔
652
                return l.isOutgoingAddBlocked.Swap(false)
10✔
653
        }
10✔
654

655
        return l.isIncomingAddBlocked.Swap(false)
7✔
656
}
657

658
// DisableAdds sets the ChannelUpdateHandler state to allow UpdateAddHtlc's in
659
// the specified direction. It returns true if the state was changed and false
660
// if the desired state was already set before the method was called.
661
func (l *channelLink) DisableAdds(linkDirection LinkDirection) bool {
16✔
662
        if linkDirection == Outgoing {
25✔
663
                return !l.isOutgoingAddBlocked.Swap(true)
9✔
664
        }
9✔
665

666
        return !l.isIncomingAddBlocked.Swap(true)
11✔
667
}
668

669
// IsFlushing returns true when UpdateAddHtlc's are disabled in the direction of
670
// the argument.
671
func (l *channelLink) IsFlushing(linkDirection LinkDirection) bool {
1,591✔
672
        if linkDirection == Outgoing {
2,709✔
673
                return l.isOutgoingAddBlocked.Load()
1,118✔
674
        }
1,118✔
675

676
        return l.isIncomingAddBlocked.Load()
477✔
677
}
678

679
// OnFlushedOnce adds a hook that will be called the next time the channel
680
// state reaches zero htlcs. This hook will only ever be called once. If the
681
// channel state already has zero htlcs, then this will be called immediately.
682
func (l *channelLink) OnFlushedOnce(hook func()) {
5✔
683
        select {
5✔
684
        case l.flushHooks.newTransients <- hook:
5✔
685
        case <-l.Quit:
×
686
        }
687
}
688

689
// OnCommitOnce adds a hook that will be called the next time a CommitSig
690
// message is sent in the argument's LinkDirection. This hook will only ever be
691
// called once. If no CommitSig is owed in the argument's LinkDirection, then
692
// we will call this hook be run immediately.
693
func (l *channelLink) OnCommitOnce(direction LinkDirection, hook func()) {
5✔
694
        var queue chan func()
5✔
695

5✔
696
        if direction == Outgoing {
10✔
697
                queue = l.outgoingCommitHooks.newTransients
5✔
698
        } else {
5✔
699
                queue = l.incomingCommitHooks.newTransients
×
700
        }
×
701

702
        select {
5✔
703
        case queue <- hook:
5✔
704
        case <-l.Quit:
×
705
        }
706
}
707

708
// isReestablished returns true if the link has successfully completed the
709
// channel reestablishment dance.
710
func (l *channelLink) isReestablished() bool {
618✔
711
        return atomic.LoadInt32(&l.reestablished) == 1
618✔
712
}
618✔
713

714
// markReestablished signals that the remote peer has successfully exchanged
715
// channel reestablish messages and that the channel is ready to process
716
// subsequent messages.
717
func (l *channelLink) markReestablished() {
215✔
718
        atomic.StoreInt32(&l.reestablished, 1)
215✔
719
}
215✔
720

721
// IsUnadvertised returns true if the underlying channel is unadvertised.
722
func (l *channelLink) IsUnadvertised() bool {
6✔
723
        state := l.channel.State()
6✔
724
        return state.ChannelFlags&lnwire.FFAnnounceChannel == 0
6✔
725
}
6✔
726

727
// sampleNetworkFee samples the current fee rate on the network to get into the
728
// chain in a timely manner. The returned value is expressed in fee-per-kw, as
729
// this is the native rate used when computing the fee for commitment
730
// transactions, and the second-level HTLC transactions.
731
func (l *channelLink) sampleNetworkFee() (chainfee.SatPerKWeight, error) {
4✔
732
        // We'll first query for the sat/kw recommended to be confirmed within 3
4✔
733
        // blocks.
4✔
734
        feePerKw, err := l.cfg.FeeEstimator.EstimateFeePerKW(3)
4✔
735
        if err != nil {
4✔
736
                return 0, err
×
737
        }
×
738

739
        l.log.Debugf("sampled fee rate for 3 block conf: %v sat/kw",
4✔
740
                int64(feePerKw))
4✔
741

4✔
742
        return feePerKw, nil
4✔
743
}
744

745
// shouldAdjustCommitFee returns true if we should update our commitment fee to
746
// match that of the network fee. We'll only update our commitment fee if the
747
// network fee is +/- 10% to our commitment fee or if our current commitment
748
// fee is below the minimum relay fee.
749
func shouldAdjustCommitFee(netFee, chanFee,
750
        minRelayFee chainfee.SatPerKWeight) bool {
14✔
751

14✔
752
        switch {
14✔
753
        // If the network fee is greater than our current commitment fee and
754
        // our current commitment fee is below the minimum relay fee then
755
        // we should switch to it no matter if it is less than a 10% increase.
756
        case netFee > chanFee && chanFee < minRelayFee:
1✔
757
                return true
1✔
758

759
        // If the network fee is greater than the commitment fee, then we'll
760
        // switch to it if it's at least 10% greater than the commit fee.
761
        case netFee > chanFee && netFee >= (chanFee+(chanFee*10)/100):
4✔
762
                return true
4✔
763

764
        // If the network fee is less than our commitment fee, then we'll
765
        // switch to it if it's at least 10% less than the commitment fee.
766
        case netFee < chanFee && netFee <= (chanFee-(chanFee*10)/100):
2✔
767
                return true
2✔
768

769
        // Otherwise, we won't modify our fee.
770
        default:
7✔
771
                return false
7✔
772
        }
773
}
774

775
// failCb is used to cut down on the argument verbosity.
776
type failCb func(update *lnwire.ChannelUpdate1) lnwire.FailureMessage
777

778
// createFailureWithUpdate creates a ChannelUpdate when failing an incoming or
779
// outgoing HTLC. It may return a FailureMessage that references a channel's
780
// alias. If the channel does not have an alias, then the regular channel
781
// update from disk will be returned.
782
func (l *channelLink) createFailureWithUpdate(incoming bool,
783
        outgoingScid lnwire.ShortChannelID, cb failCb) lnwire.FailureMessage {
26✔
784

26✔
785
        // Determine which SCID to use in case we need to use aliases in the
26✔
786
        // ChannelUpdate.
26✔
787
        scid := outgoingScid
26✔
788
        if incoming {
26✔
789
                scid = l.ShortChanID()
×
790
        }
×
791

792
        // Try using the FailAliasUpdate function. If it returns nil, fallback
793
        // to the non-alias behavior.
794
        update := l.cfg.FailAliasUpdate(scid, incoming)
26✔
795
        if update == nil {
46✔
796
                // Fallback to the non-alias behavior.
20✔
797
                var err error
20✔
798
                update, err = l.cfg.FetchLastChannelUpdate(l.ShortChanID())
20✔
799
                if err != nil {
20✔
800
                        return &lnwire.FailTemporaryNodeFailure{}
×
801
                }
×
802
        }
803

804
        return cb(update)
26✔
805
}
806

807
// syncChanState attempts to synchronize channel states with the remote party.
808
// This method is to be called upon reconnection after the initial funding
809
// flow. We'll compare out commitment chains with the remote party, and re-send
810
// either a danging commit signature, a revocation, or both.
811
func (l *channelLink) syncChanStates() error {
172✔
812
        chanState := l.channel.State()
172✔
813

172✔
814
        l.log.Infof("Attempting to re-synchronize channel: %v", chanState)
172✔
815

172✔
816
        // First, we'll generate our ChanSync message to send to the other
172✔
817
        // side. Based on this message, the remote party will decide if they
172✔
818
        // need to retransmit any data or not.
172✔
819
        localChanSyncMsg, err := chanState.ChanSyncMsg()
172✔
820
        if err != nil {
172✔
821
                return fmt.Errorf("unable to generate chan sync message for "+
×
822
                        "ChannelPoint(%v)", l.channel.ChannelPoint())
×
823
        }
×
824
        if err := l.cfg.Peer.SendMessage(true, localChanSyncMsg); err != nil {
172✔
825
                return fmt.Errorf("unable to send chan sync message for "+
×
826
                        "ChannelPoint(%v): %v", l.channel.ChannelPoint(), err)
×
827
        }
×
828

829
        var msgsToReSend []lnwire.Message
172✔
830

172✔
831
        // Next, we'll wait indefinitely to receive the ChanSync message. The
172✔
832
        // first message sent MUST be the ChanSync message.
172✔
833
        select {
172✔
834
        case msg := <-l.upstream:
172✔
835
                l.log.Tracef("Received msg=%v from peer(%x)", msg.MsgType(),
172✔
836
                        l.cfg.Peer.PubKey())
172✔
837

172✔
838
                remoteChanSyncMsg, ok := msg.(*lnwire.ChannelReestablish)
172✔
839
                if !ok {
172✔
840
                        return fmt.Errorf("first message sent to sync "+
×
841
                                "should be ChannelReestablish, instead "+
×
842
                                "received: %T", msg)
×
843
                }
×
844

845
                // If the remote party indicates that they think we haven't
846
                // done any state updates yet, then we'll retransmit the
847
                // channel_ready message first. We do this, as at this point
848
                // we can't be sure if they've really received the
849
                // ChannelReady message.
850
                if remoteChanSyncMsg.NextLocalCommitHeight == 1 &&
172✔
851
                        localChanSyncMsg.NextLocalCommitHeight == 1 &&
172✔
852
                        !l.channel.IsPending() {
338✔
853

166✔
854
                        l.log.Infof("resending ChannelReady message to peer")
166✔
855

166✔
856
                        nextRevocation, err := l.channel.NextRevocationKey()
166✔
857
                        if err != nil {
166✔
858
                                return fmt.Errorf("unable to create next "+
×
859
                                        "revocation: %v", err)
×
860
                        }
×
861

862
                        channelReadyMsg := lnwire.NewChannelReady(
166✔
863
                                l.ChanID(), nextRevocation,
166✔
864
                        )
166✔
865

166✔
866
                        // If this is a taproot channel, then we'll send the
166✔
867
                        // very same nonce that we sent above, as they should
166✔
868
                        // take the latest verification nonce we send.
166✔
869
                        if chanState.ChanType.IsTaproot() {
170✔
870
                                //nolint:lll
4✔
871
                                channelReadyMsg.NextLocalNonce = localChanSyncMsg.LocalNonce
4✔
872
                        }
4✔
873

874
                        // For channels that negotiated the option-scid-alias
875
                        // feature bit, ensure that we send over the alias in
876
                        // the channel_ready message. We'll send the first
877
                        // alias we find for the channel since it does not
878
                        // matter which alias we send. We'll error out if no
879
                        // aliases are found.
880
                        if l.negotiatedAliasFeature() {
170✔
881
                                aliases := l.getAliases()
4✔
882
                                if len(aliases) == 0 {
4✔
883
                                        // This shouldn't happen since we
×
884
                                        // always add at least one alias before
×
885
                                        // the channel reaches the link.
×
886
                                        return fmt.Errorf("no aliases found")
×
887
                                }
×
888

889
                                // getAliases returns a copy of the alias slice
890
                                // so it is ok to use a pointer to the first
891
                                // entry.
892
                                channelReadyMsg.AliasScid = &aliases[0]
4✔
893
                        }
894

895
                        err = l.cfg.Peer.SendMessage(false, channelReadyMsg)
166✔
896
                        if err != nil {
166✔
897
                                return fmt.Errorf("unable to re-send "+
×
898
                                        "ChannelReady: %v", err)
×
899
                        }
×
900
                }
901

902
                // In any case, we'll then process their ChanSync message.
903
                l.log.Info("received re-establishment message from remote side")
172✔
904

172✔
905
                var (
172✔
906
                        openedCircuits []CircuitKey
172✔
907
                        closedCircuits []CircuitKey
172✔
908
                )
172✔
909

172✔
910
                // We've just received a ChanSync message from the remote
172✔
911
                // party, so we'll process the message  in order to determine
172✔
912
                // if we need to re-transmit any messages to the remote party.
172✔
913
                ctx, cancel := l.WithCtxQuitNoTimeout()
172✔
914
                defer cancel()
172✔
915
                msgsToReSend, openedCircuits, closedCircuits, err =
172✔
916
                        l.channel.ProcessChanSyncMsg(ctx, remoteChanSyncMsg)
172✔
917
                if err != nil {
176✔
918
                        return err
4✔
919
                }
4✔
920

921
                // Repopulate any identifiers for circuits that may have been
922
                // opened or unclosed. This may happen if we needed to
923
                // retransmit a commitment signature message.
924
                l.openedCircuits = openedCircuits
172✔
925
                l.closedCircuits = closedCircuits
172✔
926

172✔
927
                // Ensure that all packets have been have been removed from the
172✔
928
                // link's mailbox.
172✔
929
                if err := l.ackDownStreamPackets(); err != nil {
172✔
930
                        return err
×
931
                }
×
932

933
                if len(msgsToReSend) > 0 {
177✔
934
                        l.log.Infof("sending %v updates to synchronize the "+
5✔
935
                                "state", len(msgsToReSend))
5✔
936
                }
5✔
937

938
                // If we have any messages to retransmit, we'll do so
939
                // immediately so we return to a synchronized state as soon as
940
                // possible.
941
                for _, msg := range msgsToReSend {
183✔
942
                        l.cfg.Peer.SendMessage(false, msg)
11✔
943
                }
11✔
944

945
        case <-l.Quit:
4✔
946
                return ErrLinkShuttingDown
4✔
947
        }
948

949
        return nil
172✔
950
}
951

952
// resolveFwdPkgs loads any forwarding packages for this link from disk, and
953
// reprocesses them in order. The primary goal is to make sure that any HTLCs
954
// we previously received are reinstated in memory, and forwarded to the switch
955
// if necessary. After a restart, this will also delete any previously
956
// completed packages.
957
func (l *channelLink) resolveFwdPkgs() error {
215✔
958
        fwdPkgs, err := l.channel.LoadFwdPkgs()
215✔
959
        if err != nil {
215✔
960
                return err
×
961
        }
×
962

963
        l.log.Debugf("loaded %d fwd pks", len(fwdPkgs))
215✔
964

215✔
965
        for _, fwdPkg := range fwdPkgs {
225✔
966
                if err := l.resolveFwdPkg(fwdPkg); err != nil {
11✔
967
                        return err
1✔
968
                }
1✔
969
        }
970

971
        // If any of our reprocessing steps require an update to the commitment
972
        // txn, we initiate a state transition to capture all relevant changes.
973
        if l.channel.NumPendingUpdates(lntypes.Local, lntypes.Remote) > 0 {
219✔
974
                return l.updateCommitTx()
4✔
975
        }
4✔
976

977
        return nil
215✔
978
}
979

980
// resolveFwdPkg interprets the FwdState of the provided package, either
981
// reprocesses any outstanding htlcs in the package, or performs garbage
982
// collection on the package.
983
func (l *channelLink) resolveFwdPkg(fwdPkg *channeldb.FwdPkg) error {
10✔
984
        // Remove any completed packages to clear up space.
10✔
985
        if fwdPkg.State == channeldb.FwdStateCompleted {
15✔
986
                l.log.Debugf("removing completed fwd pkg for height=%d",
5✔
987
                        fwdPkg.Height)
5✔
988

5✔
989
                err := l.channel.RemoveFwdPkgs(fwdPkg.Height)
5✔
990
                if err != nil {
6✔
991
                        l.log.Errorf("unable to remove fwd pkg for height=%d: "+
1✔
992
                                "%v", fwdPkg.Height, err)
1✔
993
                        return err
1✔
994
                }
1✔
995
        }
996

997
        // Otherwise this is either a new package or one has gone through
998
        // processing, but contains htlcs that need to be restored in memory.
999
        // We replay this forwarding package to make sure our local mem state
1000
        // is resurrected, we mimic any original responses back to the remote
1001
        // party, and re-forward the relevant HTLCs to the switch.
1002

1003
        // If the package is fully acked but not completed, it must still have
1004
        // settles and fails to propagate.
1005
        if !fwdPkg.SettleFailFilter.IsFull() {
14✔
1006
                l.processRemoteSettleFails(fwdPkg)
4✔
1007
        }
4✔
1008

1009
        // Finally, replay *ALL ADDS* in this forwarding package. The
1010
        // downstream logic is able to filter out any duplicates, but we must
1011
        // shove the entire, original set of adds down the pipeline so that the
1012
        // batch of adds presented to the sphinx router does not ever change.
1013
        if !fwdPkg.AckFilter.IsFull() {
17✔
1014
                l.processRemoteAdds(fwdPkg)
7✔
1015

7✔
1016
                // If the link failed during processing the adds, we must
7✔
1017
                // return to ensure we won't attempted to update the state
7✔
1018
                // further.
7✔
1019
                if l.failed {
7✔
1020
                        return fmt.Errorf("link failed while " +
×
1021
                                "processing remote adds")
×
1022
                }
×
1023
        }
1024

1025
        return nil
10✔
1026
}
1027

1028
// fwdPkgGarbager periodically reads all forwarding packages from disk and
1029
// removes those that can be discarded. It is safe to do this entirely in the
1030
// background, since all state is coordinated on disk. This also ensures the
1031
// link can continue to process messages and interleave database accesses.
1032
//
1033
// NOTE: This MUST be run as a goroutine.
1034
func (l *channelLink) fwdPkgGarbager() {
215✔
1035
        defer l.Wg.Done()
215✔
1036

215✔
1037
        l.cfg.FwdPkgGCTicker.Resume()
215✔
1038
        defer l.cfg.FwdPkgGCTicker.Stop()
215✔
1039

215✔
1040
        if err := l.loadAndRemove(); err != nil {
215✔
1041
                l.log.Warnf("unable to run initial fwd pkgs gc: %v", err)
×
1042
        }
×
1043

1044
        for {
440✔
1045
                select {
225✔
1046
                case <-l.cfg.FwdPkgGCTicker.Ticks():
10✔
1047
                        if err := l.loadAndRemove(); err != nil {
20✔
1048
                                l.log.Warnf("unable to remove fwd pkgs: %v",
10✔
1049
                                        err)
10✔
1050
                                continue
10✔
1051
                        }
1052
                case <-l.Quit:
204✔
1053
                        return
204✔
1054
                }
1055
        }
1056
}
1057

1058
// loadAndRemove loads all the channels forwarding packages and determines if
1059
// they can be removed. It is called once before the FwdPkgGCTicker ticks so that
1060
// a longer tick interval can be used.
1061
func (l *channelLink) loadAndRemove() error {
225✔
1062
        fwdPkgs, err := l.channel.LoadFwdPkgs()
225✔
1063
        if err != nil {
235✔
1064
                return err
10✔
1065
        }
10✔
1066

1067
        var removeHeights []uint64
215✔
1068
        for _, fwdPkg := range fwdPkgs {
224✔
1069
                if fwdPkg.State != channeldb.FwdStateCompleted {
18✔
1070
                        continue
9✔
1071
                }
1072

1073
                removeHeights = append(removeHeights, fwdPkg.Height)
4✔
1074
        }
1075

1076
        // If removeHeights is empty, return early so we don't use a db
1077
        // transaction.
1078
        if len(removeHeights) == 0 {
430✔
1079
                return nil
215✔
1080
        }
215✔
1081

1082
        return l.channel.RemoveFwdPkgs(removeHeights...)
4✔
1083
}
1084

1085
// handleChanSyncErr performs the error handling logic in the case where we
1086
// could not successfully syncChanStates with our channel peer.
1087
func (l *channelLink) handleChanSyncErr(err error) {
4✔
1088
        l.log.Warnf("error when syncing channel states: %v", err)
4✔
1089

4✔
1090
        var errDataLoss *lnwallet.ErrCommitSyncLocalDataLoss
4✔
1091

4✔
1092
        switch {
4✔
1093
        case errors.Is(err, ErrLinkShuttingDown):
4✔
1094
                l.log.Debugf("unable to sync channel states, link is " +
4✔
1095
                        "shutting down")
4✔
1096
                return
4✔
1097

1098
        // We failed syncing the commit chains, probably because the remote has
1099
        // lost state. We should force close the channel.
1100
        case errors.Is(err, lnwallet.ErrCommitSyncRemoteDataLoss):
4✔
1101
                fallthrough
4✔
1102

1103
        // The remote sent us an invalid last commit secret, we should force
1104
        // close the channel.
1105
        // TODO(halseth): and permanently ban the peer?
1106
        case errors.Is(err, lnwallet.ErrInvalidLastCommitSecret):
4✔
1107
                fallthrough
4✔
1108

1109
        // The remote sent us a commit point different from what they sent us
1110
        // before.
1111
        // TODO(halseth): ban peer?
1112
        case errors.Is(err, lnwallet.ErrInvalidLocalUnrevokedCommitPoint):
4✔
1113
                // We'll fail the link and tell the peer to force close the
4✔
1114
                // channel. Note that the database state is not updated here,
4✔
1115
                // but will be updated when the close transaction is ready to
4✔
1116
                // avoid that we go down before storing the transaction in the
4✔
1117
                // db.
4✔
1118
                l.failf(
4✔
1119
                        LinkFailureError{
4✔
1120
                                code:          ErrSyncError,
4✔
1121
                                FailureAction: LinkFailureForceClose,
4✔
1122
                        },
4✔
1123
                        "unable to synchronize channel states: %v", err,
4✔
1124
                )
4✔
1125

1126
        // We have lost state and cannot safely force close the channel. Fail
1127
        // the channel and wait for the remote to hopefully force close it. The
1128
        // remote has sent us its latest unrevoked commitment point, and we'll
1129
        // store it in the database, such that we can attempt to recover the
1130
        // funds if the remote force closes the channel.
1131
        case errors.As(err, &errDataLoss):
4✔
1132
                err := l.channel.MarkDataLoss(
4✔
1133
                        errDataLoss.CommitPoint,
4✔
1134
                )
4✔
1135
                if err != nil {
4✔
1136
                        l.log.Errorf("unable to mark channel data loss: %v",
×
1137
                                err)
×
1138
                }
×
1139

1140
        // We determined the commit chains were not possible to sync. We
1141
        // cautiously fail the channel, but don't force close.
1142
        // TODO(halseth): can we safely force close in any cases where this
1143
        // error is returned?
1144
        case errors.Is(err, lnwallet.ErrCannotSyncCommitChains):
×
1145
                if err := l.channel.MarkBorked(); err != nil {
×
1146
                        l.log.Errorf("unable to mark channel borked: %v", err)
×
1147
                }
×
1148

1149
        // Other, unspecified error.
1150
        default:
×
1151
        }
1152

1153
        l.failf(
4✔
1154
                LinkFailureError{
4✔
1155
                        code:          ErrRecoveryError,
4✔
1156
                        FailureAction: LinkFailureForceNone,
4✔
1157
                },
4✔
1158
                "unable to synchronize channel states: %v", err,
4✔
1159
        )
4✔
1160
}
1161

1162
// htlcManager is the primary goroutine which drives a channel's commitment
1163
// update state-machine in response to messages received via several channels.
1164
// This goroutine reads messages from the upstream (remote) peer, and also from
1165
// downstream channel managed by the channel link. In the event that an htlc
1166
// needs to be forwarded, then send-only forward handler is used which sends
1167
// htlc packets to the switch. Additionally, this goroutine handles acting upon
1168
// all timeouts for any active HTLCs, manages the channel's revocation window,
1169
// and also the htlc trickle queue+timer for this active channels.
1170
//
1171
// NOTE: This MUST be run as a goroutine.
1172
func (l *channelLink) htlcManager() {
215✔
1173
        defer func() {
420✔
1174
                l.cfg.BatchTicker.Stop()
205✔
1175
                l.Wg.Done()
205✔
1176
                l.log.Infof("exited")
205✔
1177
        }()
205✔
1178

1179
        l.log.Infof("HTLC manager started, bandwidth=%v", l.Bandwidth())
215✔
1180

215✔
1181
        // Notify any clients that the link is now in the switch via an
215✔
1182
        // ActiveLinkEvent. We'll also defer an inactive link notification for
215✔
1183
        // when the link exits to ensure that every active notification is
215✔
1184
        // matched by an inactive one.
215✔
1185
        l.cfg.NotifyActiveLink(l.ChannelPoint())
215✔
1186
        defer l.cfg.NotifyInactiveLinkEvent(l.ChannelPoint())
215✔
1187

215✔
1188
        // TODO(roasbeef): need to call wipe chan whenever D/C?
215✔
1189

215✔
1190
        // If this isn't the first time that this channel link has been
215✔
1191
        // created, then we'll need to check to see if we need to
215✔
1192
        // re-synchronize state with the remote peer. settledHtlcs is a map of
215✔
1193
        // HTLC's that we re-settled as part of the channel state sync.
215✔
1194
        if l.cfg.SyncStates {
387✔
1195
                err := l.syncChanStates()
172✔
1196
                if err != nil {
176✔
1197
                        l.handleChanSyncErr(err)
4✔
1198
                        return
4✔
1199
                }
4✔
1200
        }
1201

1202
        // If a shutdown message has previously been sent on this link, then we
1203
        // need to make sure that we have disabled any HTLC adds on the outgoing
1204
        // direction of the link and that we re-resend the same shutdown message
1205
        // that we previously sent.
1206
        l.cfg.PreviouslySentShutdown.WhenSome(func(shutdown lnwire.Shutdown) {
219✔
1207
                // Immediately disallow any new outgoing HTLCs.
4✔
1208
                if !l.DisableAdds(Outgoing) {
4✔
1209
                        l.log.Warnf("Outgoing link adds already disabled")
×
1210
                }
×
1211

1212
                // Re-send the shutdown message the peer. Since syncChanStates
1213
                // would have sent any outstanding CommitSig, it is fine for us
1214
                // to immediately queue the shutdown message now.
1215
                err := l.cfg.Peer.SendMessage(false, &shutdown)
4✔
1216
                if err != nil {
4✔
1217
                        l.log.Warnf("Error sending shutdown message: %v", err)
×
1218
                }
×
1219
        })
1220

1221
        // We've successfully reestablished the channel, mark it as such to
1222
        // allow the switch to forward HTLCs in the outbound direction.
1223
        l.markReestablished()
215✔
1224

215✔
1225
        // Now that we've received both channel_ready and channel reestablish,
215✔
1226
        // we can go ahead and send the active channel notification. We'll also
215✔
1227
        // defer the inactive notification for when the link exits to ensure
215✔
1228
        // that every active notification is matched by an inactive one.
215✔
1229
        l.cfg.NotifyActiveChannel(l.ChannelPoint())
215✔
1230
        defer l.cfg.NotifyInactiveChannel(l.ChannelPoint())
215✔
1231

215✔
1232
        // With the channel states synced, we now reset the mailbox to ensure
215✔
1233
        // we start processing all unacked packets in order. This is done here
215✔
1234
        // to ensure that all acknowledgments that occur during channel
215✔
1235
        // resynchronization have taken affect, causing us only to pull unacked
215✔
1236
        // packets after starting to read from the downstream mailbox.
215✔
1237
        l.mailBox.ResetPackets()
215✔
1238

215✔
1239
        // After cleaning up any memory pertaining to incoming packets, we now
215✔
1240
        // replay our forwarding packages to handle any htlcs that can be
215✔
1241
        // processed locally, or need to be forwarded out to the switch. We will
215✔
1242
        // only attempt to resolve packages if our short chan id indicates that
215✔
1243
        // the channel is not pending, otherwise we should have no htlcs to
215✔
1244
        // reforward.
215✔
1245
        if l.ShortChanID() != hop.Source {
430✔
1246
                err := l.resolveFwdPkgs()
215✔
1247
                switch err {
215✔
1248
                // No error was encountered, success.
1249
                case nil:
215✔
1250

1251
                // If the duplicate keystone error was encountered, we'll fail
1252
                // without sending an Error message to the peer.
1253
                case ErrDuplicateKeystone:
×
1254
                        l.failf(LinkFailureError{code: ErrCircuitError},
×
1255
                                "temporary circuit error: %v", err)
×
1256
                        return
×
1257

1258
                // A non-nil error was encountered, send an Error message to
1259
                // the peer.
1260
                default:
1✔
1261
                        l.failf(LinkFailureError{code: ErrInternalError},
1✔
1262
                                "unable to resolve fwd pkgs: %v", err)
1✔
1263
                        return
1✔
1264
                }
1265

1266
                // With our link's in-memory state fully reconstructed, spawn a
1267
                // goroutine to manage the reclamation of disk space occupied by
1268
                // completed forwarding packages.
1269
                l.Wg.Add(1)
215✔
1270
                go l.fwdPkgGarbager()
215✔
1271
        }
1272

1273
        for {
4,380✔
1274
                // We must always check if we failed at some point processing
4,165✔
1275
                // the last update before processing the next.
4,165✔
1276
                if l.failed {
4,180✔
1277
                        l.log.Errorf("link failed, exiting htlcManager")
15✔
1278
                        return
15✔
1279
                }
15✔
1280

1281
                // If the previous event resulted in a non-empty batch, resume
1282
                // the batch ticker so that it can be cleared. Otherwise pause
1283
                // the ticker to prevent waking up the htlcManager while the
1284
                // batch is empty.
1285
                numUpdates := l.channel.NumPendingUpdates(
4,154✔
1286
                        lntypes.Local, lntypes.Remote,
4,154✔
1287
                )
4,154✔
1288
                if numUpdates > 0 {
4,658✔
1289
                        l.cfg.BatchTicker.Resume()
504✔
1290
                        l.log.Tracef("BatchTicker resumed, "+
504✔
1291
                                "NumPendingUpdates(Local, Remote)=%d",
504✔
1292
                                numUpdates,
504✔
1293
                        )
504✔
1294
                } else {
4,158✔
1295
                        l.cfg.BatchTicker.Pause()
3,654✔
1296
                        l.log.Trace("BatchTicker paused due to zero " +
3,654✔
1297
                                "NumPendingUpdates(Local, Remote)")
3,654✔
1298
                }
3,654✔
1299

1300
                select {
4,154✔
1301
                // We have a new hook that needs to be run when we reach a clean
1302
                // channel state.
1303
                case hook := <-l.flushHooks.newTransients:
5✔
1304
                        if l.channel.IsChannelClean() {
9✔
1305
                                hook()
4✔
1306
                        } else {
9✔
1307
                                l.flushHooks.alloc(hook)
5✔
1308
                        }
5✔
1309

1310
                // We have a new hook that needs to be run when we have
1311
                // committed all of our updates.
1312
                case hook := <-l.outgoingCommitHooks.newTransients:
5✔
1313
                        if !l.channel.OweCommitment() {
9✔
1314
                                hook()
4✔
1315
                        } else {
5✔
1316
                                l.outgoingCommitHooks.alloc(hook)
1✔
1317
                        }
1✔
1318

1319
                // We have a new hook that needs to be run when our peer has
1320
                // committed all of their updates.
1321
                case hook := <-l.incomingCommitHooks.newTransients:
×
1322
                        if !l.channel.NeedCommitment() {
×
1323
                                hook()
×
1324
                        } else {
×
1325
                                l.incomingCommitHooks.alloc(hook)
×
1326
                        }
×
1327

1328
                // Our update fee timer has fired, so we'll check the network
1329
                // fee to see if we should adjust our commitment fee.
1330
                case <-l.updateFeeTimer.C:
4✔
1331
                        l.updateFeeTimer.Reset(l.randomFeeUpdateTimeout())
4✔
1332

4✔
1333
                        // If we're not the initiator of the channel, don't we
4✔
1334
                        // don't control the fees, so we can ignore this.
4✔
1335
                        if !l.channel.IsInitiator() {
4✔
1336
                                continue
×
1337
                        }
1338

1339
                        // If we are the initiator, then we'll sample the
1340
                        // current fee rate to get into the chain within 3
1341
                        // blocks.
1342
                        netFee, err := l.sampleNetworkFee()
4✔
1343
                        if err != nil {
4✔
1344
                                l.log.Errorf("unable to sample network fee: %v",
×
1345
                                        err)
×
1346
                                continue
×
1347
                        }
1348

1349
                        minRelayFee := l.cfg.FeeEstimator.RelayFeePerKW()
4✔
1350

4✔
1351
                        newCommitFee := l.channel.IdealCommitFeeRate(
4✔
1352
                                netFee, minRelayFee,
4✔
1353
                                l.cfg.MaxAnchorsCommitFeeRate,
4✔
1354
                                l.cfg.MaxFeeAllocation,
4✔
1355
                        )
4✔
1356

4✔
1357
                        // We determine if we should adjust the commitment fee
4✔
1358
                        // based on the current commitment fee, the suggested
4✔
1359
                        // new commitment fee and the current minimum relay fee
4✔
1360
                        // rate.
4✔
1361
                        commitFee := l.channel.CommitFeeRate()
4✔
1362
                        if !shouldAdjustCommitFee(
4✔
1363
                                newCommitFee, commitFee, minRelayFee,
4✔
1364
                        ) {
5✔
1365

1✔
1366
                                continue
1✔
1367
                        }
1368

1369
                        // If we do, then we'll send a new UpdateFee message to
1370
                        // the remote party, to be locked in with a new update.
1371
                        if err := l.updateChannelFee(newCommitFee); err != nil {
3✔
1372
                                l.log.Errorf("unable to update fee rate: %v",
×
1373
                                        err)
×
1374
                                continue
×
1375
                        }
1376

1377
                // The underlying channel has notified us of a unilateral close
1378
                // carried out by the remote peer. In the case of such an
1379
                // event, we'll wipe the channel state from the peer, and mark
1380
                // the contract as fully settled. Afterwards we can exit.
1381
                //
1382
                // TODO(roasbeef): add force closure? also breach?
1383
                case <-l.cfg.ChainEvents.RemoteUnilateralClosure:
4✔
1384
                        l.log.Warnf("remote peer has closed on-chain")
4✔
1385

4✔
1386
                        // TODO(roasbeef): remove all together
4✔
1387
                        go func() {
8✔
1388
                                chanPoint := l.channel.ChannelPoint()
4✔
1389
                                l.cfg.Peer.WipeChannel(&chanPoint)
4✔
1390
                        }()
4✔
1391

1392
                        return
4✔
1393

1394
                case <-l.cfg.BatchTicker.Ticks():
198✔
1395
                        // Attempt to extend the remote commitment chain
198✔
1396
                        // including all the currently pending entries. If the
198✔
1397
                        // send was unsuccessful, then abandon the update,
198✔
1398
                        // waiting for the revocation window to open up.
198✔
1399
                        if !l.updateCommitTxOrFail() {
198✔
1400
                                return
×
1401
                        }
×
1402

1403
                case <-l.cfg.PendingCommitTicker.Ticks():
1✔
1404
                        l.failf(
1✔
1405
                                LinkFailureError{
1✔
1406
                                        code:          ErrRemoteUnresponsive,
1✔
1407
                                        FailureAction: LinkFailureDisconnect,
1✔
1408
                                },
1✔
1409
                                "unable to complete dance",
1✔
1410
                        )
1✔
1411
                        return
1✔
1412

1413
                // A message from the switch was just received. This indicates
1414
                // that the link is an intermediate hop in a multi-hop HTLC
1415
                // circuit.
1416
                case pkt := <-l.downstream:
524✔
1417
                        l.handleDownstreamPkt(pkt)
524✔
1418

1419
                // A message from the connected peer was just received. This
1420
                // indicates that we have a new incoming HTLC, either directly
1421
                // for us, or part of a multi-hop HTLC circuit.
1422
                case msg := <-l.upstream:
3,179✔
1423
                        l.handleUpstreamMsg(msg)
3,179✔
1424

1425
                // A htlc resolution is received. This means that we now have a
1426
                // resolution for a previously accepted htlc.
1427
                case hodlItem := <-l.hodlQueue.ChanOut():
59✔
1428
                        htlcResolution := hodlItem.(invoices.HtlcResolution)
59✔
1429
                        err := l.processHodlQueue(htlcResolution)
59✔
1430
                        switch err {
59✔
1431
                        // No error, success.
1432
                        case nil:
59✔
1433

1434
                        // If the duplicate keystone error was encountered,
1435
                        // fail back gracefully.
1436
                        case ErrDuplicateKeystone:
×
1437
                                l.failf(LinkFailureError{
×
1438
                                        code: ErrCircuitError,
×
1439
                                }, "process hodl queue: "+
×
1440
                                        "temporary circuit error: %v",
×
1441
                                        err,
×
1442
                                )
×
1443

1444
                        // Send an Error message to the peer.
UNCOV
1445
                        default:
×
UNCOV
1446
                                l.failf(LinkFailureError{
×
UNCOV
1447
                                        code: ErrInternalError,
×
UNCOV
1448
                                }, "process hodl queue: unable to update "+
×
UNCOV
1449
                                        "commitment: %v", err,
×
UNCOV
1450
                                )
×
1451
                        }
1452

1453
                case <-l.Quit:
193✔
1454
                        return
193✔
1455
                }
1456
        }
1457
}
1458

1459
// processHodlQueue processes a received htlc resolution and continues reading
1460
// from the hodl queue until no more resolutions remain. When this function
1461
// returns without an error, the commit tx should be updated.
1462
func (l *channelLink) processHodlQueue(
1463
        firstResolution invoices.HtlcResolution) error {
59✔
1464

59✔
1465
        // Try to read all waiting resolution messages, so that they can all be
59✔
1466
        // processed in a single commitment tx update.
59✔
1467
        htlcResolution := firstResolution
59✔
1468
loop:
59✔
1469
        for {
118✔
1470
                // Lookup all hodl htlcs that can be failed or settled with this event.
59✔
1471
                // The hodl htlc must be present in the map.
59✔
1472
                circuitKey := htlcResolution.CircuitKey()
59✔
1473
                hodlHtlc, ok := l.hodlMap[circuitKey]
59✔
1474
                if !ok {
59✔
1475
                        return fmt.Errorf("hodl htlc not found: %v", circuitKey)
×
1476
                }
×
1477

1478
                if err := l.processHtlcResolution(htlcResolution, hodlHtlc); err != nil {
59✔
1479
                        return err
×
1480
                }
×
1481

1482
                // Clean up hodl map.
1483
                delete(l.hodlMap, circuitKey)
59✔
1484

59✔
1485
                select {
59✔
1486
                case item := <-l.hodlQueue.ChanOut():
4✔
1487
                        htlcResolution = item.(invoices.HtlcResolution)
4✔
1488
                default:
59✔
1489
                        break loop
59✔
1490
                }
1491
        }
1492

1493
        // Update the commitment tx.
1494
        if err := l.updateCommitTx(); err != nil {
59✔
UNCOV
1495
                return err
×
UNCOV
1496
        }
×
1497

1498
        return nil
59✔
1499
}
1500

1501
// processHtlcResolution applies a received htlc resolution to the provided
1502
// htlc. When this function returns without an error, the commit tx should be
1503
// updated.
1504
func (l *channelLink) processHtlcResolution(resolution invoices.HtlcResolution,
1505
        htlc hodlHtlc) error {
205✔
1506

205✔
1507
        circuitKey := resolution.CircuitKey()
205✔
1508

205✔
1509
        // Determine required action for the resolution based on the type of
205✔
1510
        // resolution we have received.
205✔
1511
        switch res := resolution.(type) {
205✔
1512
        // Settle htlcs that returned a settle resolution using the preimage
1513
        // in the resolution.
1514
        case *invoices.HtlcSettleResolution:
201✔
1515
                l.log.Debugf("received settle resolution for %v "+
201✔
1516
                        "with outcome: %v", circuitKey, res.Outcome)
201✔
1517

201✔
1518
                return l.settleHTLC(
201✔
1519
                        res.Preimage, htlc.add.ID, htlc.sourceRef,
201✔
1520
                )
201✔
1521

1522
        // For htlc failures, we get the relevant failure message based
1523
        // on the failure resolution and then fail the htlc.
1524
        case *invoices.HtlcFailResolution:
8✔
1525
                l.log.Debugf("received cancel resolution for "+
8✔
1526
                        "%v with outcome: %v", circuitKey, res.Outcome)
8✔
1527

8✔
1528
                // Get the lnwire failure message based on the resolution
8✔
1529
                // result.
8✔
1530
                failure := getResolutionFailure(res, htlc.add.Amount)
8✔
1531

8✔
1532
                l.sendHTLCError(
8✔
1533
                        htlc.add, htlc.sourceRef, failure, htlc.obfuscator,
8✔
1534
                        true,
8✔
1535
                )
8✔
1536
                return nil
8✔
1537

1538
        // Fail if we do not get a settle of fail resolution, since we
1539
        // are only expecting to handle settles and fails.
1540
        default:
×
1541
                return fmt.Errorf("unknown htlc resolution type: %T",
×
1542
                        resolution)
×
1543
        }
1544
}
1545

1546
// getResolutionFailure returns the wire message that a htlc resolution should
1547
// be failed with.
1548
func getResolutionFailure(resolution *invoices.HtlcFailResolution,
1549
        amount lnwire.MilliSatoshi) *LinkError {
8✔
1550

8✔
1551
        // If the resolution has been resolved as part of a MPP timeout,
8✔
1552
        // we need to fail the htlc with lnwire.FailMppTimeout.
8✔
1553
        if resolution.Outcome == invoices.ResultMppTimeout {
8✔
1554
                return NewDetailedLinkError(
×
1555
                        &lnwire.FailMPPTimeout{}, resolution.Outcome,
×
1556
                )
×
1557
        }
×
1558

1559
        // If the htlc is not a MPP timeout, we fail it with
1560
        // FailIncorrectDetails. This error is sent for invoice payment
1561
        // failures such as underpayment/ expiry too soon and hodl invoices
1562
        // (which return FailIncorrectDetails to avoid leaking information).
1563
        incorrectDetails := lnwire.NewFailIncorrectDetails(
8✔
1564
                amount, uint32(resolution.AcceptHeight),
8✔
1565
        )
8✔
1566

8✔
1567
        return NewDetailedLinkError(incorrectDetails, resolution.Outcome)
8✔
1568
}
1569

1570
// randomFeeUpdateTimeout returns a random timeout between the bounds defined
1571
// within the link's configuration that will be used to determine when the link
1572
// should propose an update to its commitment fee rate.
1573
func (l *channelLink) randomFeeUpdateTimeout() time.Duration {
219✔
1574
        lower := int64(l.cfg.MinUpdateTimeout)
219✔
1575
        upper := int64(l.cfg.MaxUpdateTimeout)
219✔
1576
        return time.Duration(prand.Int63n(upper-lower) + lower)
219✔
1577
}
219✔
1578

1579
// handleDownstreamUpdateAdd processes an UpdateAddHTLC packet sent from the
1580
// downstream HTLC Switch.
1581
func (l *channelLink) handleDownstreamUpdateAdd(pkt *htlcPacket) error {
483✔
1582
        htlc, ok := pkt.htlc.(*lnwire.UpdateAddHTLC)
483✔
1583
        if !ok {
483✔
1584
                return errors.New("not an UpdateAddHTLC packet")
×
1585
        }
×
1586

1587
        // If we are flushing the link in the outgoing direction we can't add
1588
        // new htlcs to the link and we need to bounce it
1589
        if l.IsFlushing(Outgoing) {
483✔
1590
                l.mailBox.FailAdd(pkt)
×
1591

×
1592
                return NewDetailedLinkError(
×
1593
                        &lnwire.FailPermanentChannelFailure{},
×
1594
                        OutgoingFailureLinkNotEligible,
×
1595
                )
×
1596
        }
×
1597

1598
        // If hodl.AddOutgoing mode is active, we exit early to simulate
1599
        // arbitrary delays between the switch adding an ADD to the
1600
        // mailbox, and the HTLC being added to the commitment state.
1601
        if l.cfg.HodlMask.Active(hodl.AddOutgoing) {
483✔
1602
                l.log.Warnf(hodl.AddOutgoing.Warning())
×
1603
                l.mailBox.AckPacket(pkt.inKey())
×
1604
                return nil
×
1605
        }
×
1606

1607
        // Check if we can add the HTLC here without exceededing the max fee
1608
        // exposure threshold.
1609
        if l.isOverexposedWithHtlc(htlc, false) {
487✔
1610
                l.log.Debugf("Unable to handle downstream HTLC - max fee " +
4✔
1611
                        "exposure exceeded")
4✔
1612

4✔
1613
                l.mailBox.FailAdd(pkt)
4✔
1614

4✔
1615
                return NewDetailedLinkError(
4✔
1616
                        lnwire.NewTemporaryChannelFailure(nil),
4✔
1617
                        OutgoingFailureDownstreamHtlcAdd,
4✔
1618
                )
4✔
1619
        }
4✔
1620

1621
        // A new payment has been initiated via the downstream channel,
1622
        // so we add the new HTLC to our local log, then update the
1623
        // commitment chains.
1624
        htlc.ChanID = l.ChanID()
479✔
1625
        openCircuitRef := pkt.inKey()
479✔
1626

479✔
1627
        // We enforce the fee buffer for the commitment transaction because
479✔
1628
        // we are in control of adding this htlc. Nothing has locked-in yet so
479✔
1629
        // we can securely enforce the fee buffer which is only relevant if we
479✔
1630
        // are the initiator of the channel.
479✔
1631
        index, err := l.channel.AddHTLC(htlc, &openCircuitRef)
479✔
1632
        if err != nil {
484✔
1633
                // The HTLC was unable to be added to the state machine,
5✔
1634
                // as a result, we'll signal the switch to cancel the
5✔
1635
                // pending payment.
5✔
1636
                l.log.Warnf("Unable to handle downstream add HTLC: %v",
5✔
1637
                        err)
5✔
1638

5✔
1639
                // Remove this packet from the link's mailbox, this
5✔
1640
                // prevents it from being reprocessed if the link
5✔
1641
                // restarts and resets it mailbox. If this response
5✔
1642
                // doesn't make it back to the originating link, it will
5✔
1643
                // be rejected upon attempting to reforward the Add to
5✔
1644
                // the switch, since the circuit was never fully opened,
5✔
1645
                // and the forwarding package shows it as
5✔
1646
                // unacknowledged.
5✔
1647
                l.mailBox.FailAdd(pkt)
5✔
1648

5✔
1649
                return NewDetailedLinkError(
5✔
1650
                        lnwire.NewTemporaryChannelFailure(nil),
5✔
1651
                        OutgoingFailureDownstreamHtlcAdd,
5✔
1652
                )
5✔
1653
        }
5✔
1654

1655
        l.log.Tracef("received downstream htlc: payment_hash=%x, "+
478✔
1656
                "local_log_index=%v, pend_updates=%v",
478✔
1657
                htlc.PaymentHash[:], index,
478✔
1658
                l.channel.NumPendingUpdates(lntypes.Local, lntypes.Remote))
478✔
1659

478✔
1660
        pkt.outgoingChanID = l.ShortChanID()
478✔
1661
        pkt.outgoingHTLCID = index
478✔
1662
        htlc.ID = index
478✔
1663

478✔
1664
        l.log.Debugf("queueing keystone of ADD open circuit: %s->%s",
478✔
1665
                pkt.inKey(), pkt.outKey())
478✔
1666

478✔
1667
        l.openedCircuits = append(l.openedCircuits, pkt.inKey())
478✔
1668
        l.keystoneBatch = append(l.keystoneBatch, pkt.keystone())
478✔
1669

478✔
1670
        _ = l.cfg.Peer.SendMessage(false, htlc)
478✔
1671

478✔
1672
        // Send a forward event notification to htlcNotifier.
478✔
1673
        l.cfg.HtlcNotifier.NotifyForwardingEvent(
478✔
1674
                newHtlcKey(pkt),
478✔
1675
                HtlcInfo{
478✔
1676
                        IncomingTimeLock: pkt.incomingTimeout,
478✔
1677
                        IncomingAmt:      pkt.incomingAmount,
478✔
1678
                        OutgoingTimeLock: htlc.Expiry,
478✔
1679
                        OutgoingAmt:      htlc.Amount,
478✔
1680
                },
478✔
1681
                getEventType(pkt),
478✔
1682
        )
478✔
1683

478✔
1684
        l.tryBatchUpdateCommitTx()
478✔
1685

478✔
1686
        return nil
478✔
1687
}
1688

1689
// handleDownstreamPkt processes an HTLC packet sent from the downstream HTLC
1690
// Switch. Possible messages sent by the switch include requests to forward new
1691
// HTLCs, timeout previously cleared HTLCs, and finally to settle currently
1692
// cleared HTLCs with the upstream peer.
1693
//
1694
// TODO(roasbeef): add sync ntfn to ensure switch always has consistent view?
1695
func (l *channelLink) handleDownstreamPkt(pkt *htlcPacket) {
524✔
1696
        switch htlc := pkt.htlc.(type) {
524✔
1697
        case *lnwire.UpdateAddHTLC:
483✔
1698
                // Handle add message. The returned error can be ignored,
483✔
1699
                // because it is also sent through the mailbox.
483✔
1700
                _ = l.handleDownstreamUpdateAdd(pkt)
483✔
1701

1702
        case *lnwire.UpdateFulfillHTLC:
27✔
1703
                // If hodl.SettleOutgoing mode is active, we exit early to
27✔
1704
                // simulate arbitrary delays between the switch adding the
27✔
1705
                // SETTLE to the mailbox, and the HTLC being added to the
27✔
1706
                // commitment state.
27✔
1707
                if l.cfg.HodlMask.Active(hodl.SettleOutgoing) {
27✔
1708
                        l.log.Warnf(hodl.SettleOutgoing.Warning())
×
1709
                        l.mailBox.AckPacket(pkt.inKey())
×
1710
                        return
×
1711
                }
×
1712

1713
                // An HTLC we forward to the switch has just settled somewhere
1714
                // upstream. Therefore we settle the HTLC within the our local
1715
                // state machine.
1716
                inKey := pkt.inKey()
27✔
1717
                err := l.channel.SettleHTLC(
27✔
1718
                        htlc.PaymentPreimage,
27✔
1719
                        pkt.incomingHTLCID,
27✔
1720
                        pkt.sourceRef,
27✔
1721
                        pkt.destRef,
27✔
1722
                        &inKey,
27✔
1723
                )
27✔
1724
                if err != nil {
27✔
1725
                        l.log.Errorf("unable to settle incoming HTLC for "+
×
1726
                                "circuit-key=%v: %v", inKey, err)
×
1727

×
1728
                        // If the HTLC index for Settle response was not known
×
1729
                        // to our commitment state, it has already been
×
1730
                        // cleaned up by a prior response. We'll thus try to
×
1731
                        // clean up any lingering state to ensure we don't
×
1732
                        // continue reforwarding.
×
1733
                        if _, ok := err.(lnwallet.ErrUnknownHtlcIndex); ok {
×
1734
                                l.cleanupSpuriousResponse(pkt)
×
1735
                        }
×
1736

1737
                        // Remove the packet from the link's mailbox to ensure
1738
                        // it doesn't get replayed after a reconnection.
1739
                        l.mailBox.AckPacket(inKey)
×
1740

×
1741
                        return
×
1742
                }
1743

1744
                l.log.Debugf("queueing removal of SETTLE closed circuit: "+
27✔
1745
                        "%s->%s", pkt.inKey(), pkt.outKey())
27✔
1746

27✔
1747
                l.closedCircuits = append(l.closedCircuits, pkt.inKey())
27✔
1748

27✔
1749
                // With the HTLC settled, we'll need to populate the wire
27✔
1750
                // message to target the specific channel and HTLC to be
27✔
1751
                // canceled.
27✔
1752
                htlc.ChanID = l.ChanID()
27✔
1753
                htlc.ID = pkt.incomingHTLCID
27✔
1754

27✔
1755
                // Then we send the HTLC settle message to the connected peer
27✔
1756
                // so we can continue the propagation of the settle message.
27✔
1757
                l.cfg.Peer.SendMessage(false, htlc)
27✔
1758

27✔
1759
                // Send a settle event notification to htlcNotifier.
27✔
1760
                l.cfg.HtlcNotifier.NotifySettleEvent(
27✔
1761
                        newHtlcKey(pkt),
27✔
1762
                        htlc.PaymentPreimage,
27✔
1763
                        getEventType(pkt),
27✔
1764
                )
27✔
1765

27✔
1766
                // Immediately update the commitment tx to minimize latency.
27✔
1767
                l.updateCommitTxOrFail()
27✔
1768

1769
        case *lnwire.UpdateFailHTLC:
22✔
1770
                // If hodl.FailOutgoing mode is active, we exit early to
22✔
1771
                // simulate arbitrary delays between the switch adding a FAIL to
22✔
1772
                // the mailbox, and the HTLC being added to the commitment
22✔
1773
                // state.
22✔
1774
                if l.cfg.HodlMask.Active(hodl.FailOutgoing) {
22✔
1775
                        l.log.Warnf(hodl.FailOutgoing.Warning())
×
1776
                        l.mailBox.AckPacket(pkt.inKey())
×
1777
                        return
×
1778
                }
×
1779

1780
                // An HTLC cancellation has been triggered somewhere upstream,
1781
                // we'll remove then HTLC from our local state machine.
1782
                inKey := pkt.inKey()
22✔
1783
                err := l.channel.FailHTLC(
22✔
1784
                        pkt.incomingHTLCID,
22✔
1785
                        htlc.Reason,
22✔
1786
                        pkt.sourceRef,
22✔
1787
                        pkt.destRef,
22✔
1788
                        &inKey,
22✔
1789
                )
22✔
1790
                if err != nil {
28✔
1791
                        l.log.Errorf("unable to cancel incoming HTLC for "+
6✔
1792
                                "circuit-key=%v: %v", inKey, err)
6✔
1793

6✔
1794
                        // If the HTLC index for Fail response was not known to
6✔
1795
                        // our commitment state, it has already been cleaned up
6✔
1796
                        // by a prior response. We'll thus try to clean up any
6✔
1797
                        // lingering state to ensure we don't continue
6✔
1798
                        // reforwarding.
6✔
1799
                        if _, ok := err.(lnwallet.ErrUnknownHtlcIndex); ok {
8✔
1800
                                l.cleanupSpuriousResponse(pkt)
2✔
1801
                        }
2✔
1802

1803
                        // Remove the packet from the link's mailbox to ensure
1804
                        // it doesn't get replayed after a reconnection.
1805
                        l.mailBox.AckPacket(inKey)
6✔
1806

6✔
1807
                        return
6✔
1808
                }
1809

1810
                l.log.Debugf("queueing removal of FAIL closed circuit: %s->%s",
20✔
1811
                        pkt.inKey(), pkt.outKey())
20✔
1812

20✔
1813
                l.closedCircuits = append(l.closedCircuits, pkt.inKey())
20✔
1814

20✔
1815
                // With the HTLC removed, we'll need to populate the wire
20✔
1816
                // message to target the specific channel and HTLC to be
20✔
1817
                // canceled. The "Reason" field will have already been set
20✔
1818
                // within the switch.
20✔
1819
                htlc.ChanID = l.ChanID()
20✔
1820
                htlc.ID = pkt.incomingHTLCID
20✔
1821

20✔
1822
                // We send the HTLC message to the peer which initially created
20✔
1823
                // the HTLC. If the incoming blinding point is non-nil, we
20✔
1824
                // know that we are a relaying node in a blinded path.
20✔
1825
                // Otherwise, we're either an introduction node or not part of
20✔
1826
                // a blinded path at all.
20✔
1827
                if err := l.sendIncomingHTLCFailureMsg(
20✔
1828
                        htlc.ID,
20✔
1829
                        pkt.obfuscator,
20✔
1830
                        htlc.Reason,
20✔
1831
                ); err != nil {
20✔
1832
                        l.log.Errorf("unable to send HTLC failure: %v",
×
1833
                                err)
×
1834

×
1835
                        return
×
1836
                }
×
1837

1838
                // If the packet does not have a link failure set, it failed
1839
                // further down the route so we notify a forwarding failure.
1840
                // Otherwise, we notify a link failure because it failed at our
1841
                // node.
1842
                if pkt.linkFailure != nil {
34✔
1843
                        l.cfg.HtlcNotifier.NotifyLinkFailEvent(
14✔
1844
                                newHtlcKey(pkt),
14✔
1845
                                newHtlcInfo(pkt),
14✔
1846
                                getEventType(pkt),
14✔
1847
                                pkt.linkFailure,
14✔
1848
                                false,
14✔
1849
                        )
14✔
1850
                } else {
24✔
1851
                        l.cfg.HtlcNotifier.NotifyForwardingFailEvent(
10✔
1852
                                newHtlcKey(pkt), getEventType(pkt),
10✔
1853
                        )
10✔
1854
                }
10✔
1855

1856
                // Immediately update the commitment tx to minimize latency.
1857
                l.updateCommitTxOrFail()
20✔
1858
        }
1859
}
1860

1861
// tryBatchUpdateCommitTx updates the commitment transaction if the batch is
1862
// full.
1863
func (l *channelLink) tryBatchUpdateCommitTx() {
478✔
1864
        pending := l.channel.NumPendingUpdates(lntypes.Local, lntypes.Remote)
478✔
1865
        if pending < uint64(l.cfg.BatchSize) {
933✔
1866
                return
455✔
1867
        }
455✔
1868

1869
        l.updateCommitTxOrFail()
27✔
1870
}
1871

1872
// cleanupSpuriousResponse attempts to ack any AddRef or SettleFailRef
1873
// associated with this packet. If successful in doing so, it will also purge
1874
// the open circuit from the circuit map and remove the packet from the link's
1875
// mailbox.
1876
func (l *channelLink) cleanupSpuriousResponse(pkt *htlcPacket) {
2✔
1877
        inKey := pkt.inKey()
2✔
1878

2✔
1879
        l.log.Debugf("cleaning up spurious response for incoming "+
2✔
1880
                "circuit-key=%v", inKey)
2✔
1881

2✔
1882
        // If the htlc packet doesn't have a source reference, it is unsafe to
2✔
1883
        // proceed, as skipping this ack may cause the htlc to be reforwarded.
2✔
1884
        if pkt.sourceRef == nil {
3✔
1885
                l.log.Errorf("unable to cleanup response for incoming "+
1✔
1886
                        "circuit-key=%v, does not contain source reference",
1✔
1887
                        inKey)
1✔
1888
                return
1✔
1889
        }
1✔
1890

1891
        // If the source reference is present,  we will try to prevent this link
1892
        // from resending the packet to the switch. To do so, we ack the AddRef
1893
        // of the incoming HTLC belonging to this link.
1894
        err := l.channel.AckAddHtlcs(*pkt.sourceRef)
1✔
1895
        if err != nil {
1✔
1896
                l.log.Errorf("unable to ack AddRef for incoming "+
×
1897
                        "circuit-key=%v: %v", inKey, err)
×
1898

×
1899
                // If this operation failed, it is unsafe to attempt removal of
×
1900
                // the destination reference or circuit, so we exit early. The
×
1901
                // cleanup may proceed with a different packet in the future
×
1902
                // that succeeds on this step.
×
1903
                return
×
1904
        }
×
1905

1906
        // Now that we know this link will stop retransmitting Adds to the
1907
        // switch, we can begin to teardown the response reference and circuit
1908
        // map.
1909
        //
1910
        // If the packet includes a destination reference, then a response for
1911
        // this HTLC was locked into the outgoing channel. Attempt to remove
1912
        // this reference, so we stop retransmitting the response internally.
1913
        // Even if this fails, we will proceed in trying to delete the circuit.
1914
        // When retransmitting responses, the destination references will be
1915
        // cleaned up if an open circuit is not found in the circuit map.
1916
        if pkt.destRef != nil {
1✔
1917
                err := l.channel.AckSettleFails(*pkt.destRef)
×
1918
                if err != nil {
×
1919
                        l.log.Errorf("unable to ack SettleFailRef "+
×
1920
                                "for incoming circuit-key=%v: %v",
×
1921
                                inKey, err)
×
1922
                }
×
1923
        }
1924

1925
        l.log.Debugf("deleting circuit for incoming circuit-key=%x", inKey)
1✔
1926

1✔
1927
        // With all known references acked, we can now safely delete the circuit
1✔
1928
        // from the switch's circuit map, as the state is no longer needed.
1✔
1929
        err = l.cfg.Circuits.DeleteCircuits(inKey)
1✔
1930
        if err != nil {
1✔
1931
                l.log.Errorf("unable to delete circuit for "+
×
1932
                        "circuit-key=%v: %v", inKey, err)
×
1933
        }
×
1934
}
1935

1936
// handleUpstreamMsg processes wire messages related to commitment state
1937
// updates from the upstream peer. The upstream peer is the peer whom we have a
1938
// direct channel with, updating our respective commitment chains.
1939
func (l *channelLink) handleUpstreamMsg(msg lnwire.Message) {
3,179✔
1940
        switch msg := msg.(type) {
3,179✔
1941
        case *lnwire.UpdateAddHTLC:
453✔
1942
                if l.IsFlushing(Incoming) {
453✔
1943
                        // This is forbidden by the protocol specification.
×
1944
                        // The best chance we have to deal with this is to drop
×
1945
                        // the connection. This should roll back the channel
×
1946
                        // state to the last CommitSig. If the remote has
×
1947
                        // already sent a CommitSig we haven't received yet,
×
1948
                        // channel state will be re-synchronized with a
×
1949
                        // ChannelReestablish message upon reconnection and the
×
1950
                        // protocol state that caused us to flush the link will
×
1951
                        // be rolled back. In the event that there was some
×
1952
                        // non-deterministic behavior in the remote that caused
×
1953
                        // them to violate the protocol, we have a decent shot
×
1954
                        // at correcting it this way, since reconnecting will
×
1955
                        // put us in the cleanest possible state to try again.
×
1956
                        //
×
1957
                        // In addition to the above, it is possible for us to
×
1958
                        // hit this case in situations where we improperly
×
1959
                        // handle message ordering due to concurrency choices.
×
1960
                        // An issue has been filed to address this here:
×
1961
                        // https://github.com/lightningnetwork/lnd/issues/8393
×
1962
                        l.failf(
×
1963
                                LinkFailureError{
×
1964
                                        code:             ErrInvalidUpdate,
×
1965
                                        FailureAction:    LinkFailureDisconnect,
×
1966
                                        PermanentFailure: false,
×
1967
                                        Warning:          true,
×
1968
                                },
×
1969
                                "received add while link is flushing",
×
1970
                        )
×
1971

×
1972
                        return
×
1973
                }
×
1974

1975
                // Disallow htlcs with blinding points set if we haven't
1976
                // enabled the feature. This saves us from having to process
1977
                // the onion at all, but will only catch blinded payments
1978
                // where we are a relaying node (as the blinding point will
1979
                // be in the payload when we're the introduction node).
1980
                if msg.BlindingPoint.IsSome() && l.cfg.DisallowRouteBlinding {
453✔
1981
                        l.failf(LinkFailureError{code: ErrInvalidUpdate},
×
1982
                                "blinding point included when route blinding "+
×
1983
                                        "is disabled")
×
1984

×
1985
                        return
×
1986
                }
×
1987

1988
                // We have to check the limit here rather than later in the
1989
                // switch because the counterparty can keep sending HTLC's
1990
                // without sending a revoke. This would mean that the switch
1991
                // check would only occur later.
1992
                if l.isOverexposedWithHtlc(msg, true) {
453✔
1993
                        l.failf(LinkFailureError{code: ErrInternalError},
×
1994
                                "peer sent us an HTLC that exceeded our max "+
×
1995
                                        "fee exposure")
×
1996

×
1997
                        return
×
1998
                }
×
1999

2000
                // We just received an add request from an upstream peer, so we
2001
                // add it to our state machine, then add the HTLC to our
2002
                // "settle" list in the event that we know the preimage.
2003
                index, err := l.channel.ReceiveHTLC(msg)
453✔
2004
                if err != nil {
453✔
2005
                        l.failf(LinkFailureError{code: ErrInvalidUpdate},
×
2006
                                "unable to handle upstream add HTLC: %v", err)
×
2007
                        return
×
2008
                }
×
2009

2010
                l.log.Tracef("receive upstream htlc with payment hash(%x), "+
453✔
2011
                        "assigning index: %v", msg.PaymentHash[:], index)
453✔
2012

2013
        case *lnwire.UpdateFulfillHTLC:
231✔
2014
                pre := msg.PaymentPreimage
231✔
2015
                idx := msg.ID
231✔
2016

231✔
2017
                // Before we pipeline the settle, we'll check the set of active
231✔
2018
                // htlc's to see if the related UpdateAddHTLC has been fully
231✔
2019
                // locked-in.
231✔
2020
                var lockedin bool
231✔
2021
                htlcs := l.channel.ActiveHtlcs()
231✔
2022
                for _, add := range htlcs {
805✔
2023
                        // The HTLC will be outgoing and match idx.
574✔
2024
                        if !add.Incoming && add.HtlcIndex == idx {
803✔
2025
                                lockedin = true
229✔
2026
                                break
229✔
2027
                        }
2028
                }
2029

2030
                if !lockedin {
233✔
2031
                        l.failf(
2✔
2032
                                LinkFailureError{code: ErrInvalidUpdate},
2✔
2033
                                "unable to handle upstream settle",
2✔
2034
                        )
2✔
2035
                        return
2✔
2036
                }
2✔
2037

2038
                if err := l.channel.ReceiveHTLCSettle(pre, idx); err != nil {
233✔
2039
                        l.failf(
4✔
2040
                                LinkFailureError{
4✔
2041
                                        code:          ErrInvalidUpdate,
4✔
2042
                                        FailureAction: LinkFailureForceClose,
4✔
2043
                                },
4✔
2044
                                "unable to handle upstream settle HTLC: %v", err,
4✔
2045
                        )
4✔
2046
                        return
4✔
2047
                }
4✔
2048

2049
                settlePacket := &htlcPacket{
229✔
2050
                        outgoingChanID: l.ShortChanID(),
229✔
2051
                        outgoingHTLCID: idx,
229✔
2052
                        htlc: &lnwire.UpdateFulfillHTLC{
229✔
2053
                                PaymentPreimage: pre,
229✔
2054
                        },
229✔
2055
                }
229✔
2056

229✔
2057
                // Add the newly discovered preimage to our growing list of
229✔
2058
                // uncommitted preimage. These will be written to the witness
229✔
2059
                // cache just before accepting the next commitment signature
229✔
2060
                // from the remote peer.
229✔
2061
                l.uncommittedPreimages = append(l.uncommittedPreimages, pre)
229✔
2062

229✔
2063
                // Pipeline this settle, send it to the switch.
229✔
2064
                go l.forwardBatch(false, settlePacket)
229✔
2065

2066
        case *lnwire.UpdateFailMalformedHTLC:
7✔
2067
                // Convert the failure type encoded within the HTLC fail
7✔
2068
                // message to the proper generic lnwire error code.
7✔
2069
                var failure lnwire.FailureMessage
7✔
2070
                switch msg.FailureCode {
7✔
2071
                case lnwire.CodeInvalidOnionVersion:
5✔
2072
                        failure = &lnwire.FailInvalidOnionVersion{
5✔
2073
                                OnionSHA256: msg.ShaOnionBlob,
5✔
2074
                        }
5✔
2075
                case lnwire.CodeInvalidOnionHmac:
×
2076
                        failure = &lnwire.FailInvalidOnionHmac{
×
2077
                                OnionSHA256: msg.ShaOnionBlob,
×
2078
                        }
×
2079

2080
                case lnwire.CodeInvalidOnionKey:
×
2081
                        failure = &lnwire.FailInvalidOnionKey{
×
2082
                                OnionSHA256: msg.ShaOnionBlob,
×
2083
                        }
×
2084

2085
                // Handle malformed errors that are part of a blinded route.
2086
                // This case is slightly different, because we expect every
2087
                // relaying node in the blinded portion of the route to send
2088
                // malformed errors. If we're also a relaying node, we're
2089
                // likely going to switch this error out anyway for our own
2090
                // malformed error, but we handle the case here for
2091
                // completeness.
2092
                case lnwire.CodeInvalidBlinding:
4✔
2093
                        failure = &lnwire.FailInvalidBlinding{
4✔
2094
                                OnionSHA256: msg.ShaOnionBlob,
4✔
2095
                        }
4✔
2096

2097
                default:
2✔
2098
                        l.log.Warnf("unexpected failure code received in "+
2✔
2099
                                "UpdateFailMailformedHTLC: %v", msg.FailureCode)
2✔
2100

2✔
2101
                        // We don't just pass back the error we received from
2✔
2102
                        // our successor. Otherwise we might report a failure
2✔
2103
                        // that penalizes us more than needed. If the onion that
2✔
2104
                        // we forwarded was correct, the node should have been
2✔
2105
                        // able to send back its own failure. The node did not
2✔
2106
                        // send back its own failure, so we assume there was a
2✔
2107
                        // problem with the onion and report that back. We reuse
2✔
2108
                        // the invalid onion key failure because there is no
2✔
2109
                        // specific error for this case.
2✔
2110
                        failure = &lnwire.FailInvalidOnionKey{
2✔
2111
                                OnionSHA256: msg.ShaOnionBlob,
2✔
2112
                        }
2✔
2113
                }
2114

2115
                // With the error parsed, we'll convert the into it's opaque
2116
                // form.
2117
                var b bytes.Buffer
7✔
2118
                if err := lnwire.EncodeFailure(&b, failure, 0); err != nil {
7✔
2119
                        l.log.Errorf("unable to encode malformed error: %v", err)
×
2120
                        return
×
2121
                }
×
2122

2123
                // If remote side have been unable to parse the onion blob we
2124
                // have sent to it, than we should transform the malformed HTLC
2125
                // message to the usual HTLC fail message.
2126
                err := l.channel.ReceiveFailHTLC(msg.ID, b.Bytes())
7✔
2127
                if err != nil {
7✔
2128
                        l.failf(LinkFailureError{code: ErrInvalidUpdate},
×
2129
                                "unable to handle upstream fail HTLC: %v", err)
×
2130
                        return
×
2131
                }
×
2132

2133
        case *lnwire.UpdateFailHTLC:
124✔
2134
                // Verify that the failure reason is at least 256 bytes plus
124✔
2135
                // overhead.
124✔
2136
                const minimumFailReasonLength = lnwire.FailureMessageLength +
124✔
2137
                        2 + 2 + 32
124✔
2138

124✔
2139
                if len(msg.Reason) < minimumFailReasonLength {
125✔
2140
                        // We've received a reason with a non-compliant length.
1✔
2141
                        // Older nodes happily relay back these failures that
1✔
2142
                        // may originate from a node further downstream.
1✔
2143
                        // Therefore we can't just fail the channel.
1✔
2144
                        //
1✔
2145
                        // We want to be compliant ourselves, so we also can't
1✔
2146
                        // pass back the reason unmodified. And we must make
1✔
2147
                        // sure that we don't hit the magic length check of 260
1✔
2148
                        // bytes in processRemoteSettleFails either.
1✔
2149
                        //
1✔
2150
                        // Because the reason is unreadable for the payer
1✔
2151
                        // anyway, we just replace it by a compliant-length
1✔
2152
                        // series of random bytes.
1✔
2153
                        msg.Reason = make([]byte, minimumFailReasonLength)
1✔
2154
                        _, err := crand.Read(msg.Reason[:])
1✔
2155
                        if err != nil {
1✔
2156
                                l.log.Errorf("Random generation error: %v", err)
×
2157

×
2158
                                return
×
2159
                        }
×
2160
                }
2161

2162
                // Add fail to the update log.
2163
                idx := msg.ID
124✔
2164
                err := l.channel.ReceiveFailHTLC(idx, msg.Reason[:])
124✔
2165
                if err != nil {
124✔
2166
                        l.failf(LinkFailureError{code: ErrInvalidUpdate},
×
2167
                                "unable to handle upstream fail HTLC: %v", err)
×
2168
                        return
×
2169
                }
×
2170

2171
        case *lnwire.CommitSig:
1,196✔
2172
                // Since we may have learned new preimages for the first time,
1,196✔
2173
                // we'll add them to our preimage cache. By doing this, we
1,196✔
2174
                // ensure any contested contracts watched by any on-chain
1,196✔
2175
                // arbitrators can now sweep this HTLC on-chain. We delay
1,196✔
2176
                // committing the preimages until just before accepting the new
1,196✔
2177
                // remote commitment, as afterwards the peer won't resend the
1,196✔
2178
                // Settle messages on the next channel reestablishment. Doing so
1,196✔
2179
                // allows us to more effectively batch this operation, instead
1,196✔
2180
                // of doing a single write per preimage.
1,196✔
2181
                err := l.cfg.PreimageCache.AddPreimages(
1,196✔
2182
                        l.uncommittedPreimages...,
1,196✔
2183
                )
1,196✔
2184
                if err != nil {
1,196✔
2185
                        l.failf(
×
2186
                                LinkFailureError{code: ErrInternalError},
×
2187
                                "unable to add preimages=%v to cache: %v",
×
2188
                                l.uncommittedPreimages, err,
×
2189
                        )
×
2190
                        return
×
2191
                }
×
2192

2193
                // Instead of truncating the slice to conserve memory
2194
                // allocations, we simply set the uncommitted preimage slice to
2195
                // nil so that a new one will be initialized if any more
2196
                // witnesses are discovered. We do this because the maximum size
2197
                // that the slice can occupy is 15KB, and we want to ensure we
2198
                // release that memory back to the runtime.
2199
                l.uncommittedPreimages = nil
1,196✔
2200

1,196✔
2201
                // We just received a new updates to our local commitment
1,196✔
2202
                // chain, validate this new commitment, closing the link if
1,196✔
2203
                // invalid.
1,196✔
2204
                auxSigBlob, err := msg.CustomRecords.Serialize()
1,196✔
2205
                if err != nil {
1,196✔
2206
                        l.failf(
×
2207
                                LinkFailureError{code: ErrInvalidCommitment},
×
2208
                                "unable to serialize custom records: %v", err,
×
2209
                        )
×
2210

×
2211
                        return
×
2212
                }
×
2213
                err = l.channel.ReceiveNewCommitment(&lnwallet.CommitSigs{
1,196✔
2214
                        CommitSig:  msg.CommitSig,
1,196✔
2215
                        HtlcSigs:   msg.HtlcSigs,
1,196✔
2216
                        PartialSig: msg.PartialSig,
1,196✔
2217
                        AuxSigBlob: auxSigBlob,
1,196✔
2218
                })
1,196✔
2219
                if err != nil {
1,196✔
2220
                        // If we were unable to reconstruct their proposed
×
2221
                        // commitment, then we'll examine the type of error. If
×
2222
                        // it's an InvalidCommitSigError, then we'll send a
×
2223
                        // direct error.
×
2224
                        var sendData []byte
×
2225
                        switch err.(type) {
×
2226
                        case *lnwallet.InvalidCommitSigError:
×
2227
                                sendData = []byte(err.Error())
×
2228
                        case *lnwallet.InvalidHtlcSigError:
×
2229
                                sendData = []byte(err.Error())
×
2230
                        }
2231
                        l.failf(
×
2232
                                LinkFailureError{
×
2233
                                        code:          ErrInvalidCommitment,
×
2234
                                        FailureAction: LinkFailureForceClose,
×
2235
                                        SendData:      sendData,
×
2236
                                },
×
2237
                                "ChannelPoint(%v): unable to accept new "+
×
2238
                                        "commitment: %v",
×
2239
                                l.channel.ChannelPoint(), err,
×
2240
                        )
×
2241
                        return
×
2242
                }
2243

2244
                // As we've just accepted a new state, we'll now
2245
                // immediately send the remote peer a revocation for our prior
2246
                // state.
2247
                nextRevocation, currentHtlcs, finalHTLCs, err :=
1,196✔
2248
                        l.channel.RevokeCurrentCommitment()
1,196✔
2249
                if err != nil {
1,196✔
2250
                        l.log.Errorf("unable to revoke commitment: %v", err)
×
2251

×
2252
                        // We need to fail the channel in case revoking our
×
2253
                        // local commitment does not succeed. We might have
×
2254
                        // already advanced our channel state which would lead
×
2255
                        // us to proceed with an unclean state.
×
2256
                        //
×
2257
                        // NOTE: We do not trigger a force close because this
×
2258
                        // could resolve itself in case our db was just busy
×
2259
                        // not accepting new transactions.
×
2260
                        l.failf(
×
2261
                                LinkFailureError{
×
2262
                                        code:          ErrInternalError,
×
2263
                                        Warning:       true,
×
2264
                                        FailureAction: LinkFailureDisconnect,
×
2265
                                },
×
2266
                                "ChannelPoint(%v): unable to accept new "+
×
2267
                                        "commitment: %v",
×
2268
                                l.channel.ChannelPoint(), err,
×
2269
                        )
×
2270
                        return
×
2271
                }
×
2272

2273
                // As soon as we are ready to send our next revocation, we can
2274
                // invoke the incoming commit hooks.
2275
                l.RWMutex.Lock()
1,196✔
2276
                l.incomingCommitHooks.invoke()
1,196✔
2277
                l.RWMutex.Unlock()
1,196✔
2278

1,196✔
2279
                l.cfg.Peer.SendMessage(false, nextRevocation)
1,196✔
2280

1,196✔
2281
                // Notify the incoming htlcs of which the resolutions were
1,196✔
2282
                // locked in.
1,196✔
2283
                for id, settled := range finalHTLCs {
1,531✔
2284
                        l.cfg.HtlcNotifier.NotifyFinalHtlcEvent(
335✔
2285
                                models.CircuitKey{
335✔
2286
                                        ChanID: l.ShortChanID(),
335✔
2287
                                        HtlcID: id,
335✔
2288
                                },
335✔
2289
                                channeldb.FinalHtlcInfo{
335✔
2290
                                        Settled:  settled,
335✔
2291
                                        Offchain: true,
335✔
2292
                                },
335✔
2293
                        )
335✔
2294
                }
335✔
2295

2296
                // Since we just revoked our commitment, we may have a new set
2297
                // of HTLC's on our commitment, so we'll send them using our
2298
                // function closure NotifyContractUpdate.
2299
                newUpdate := &contractcourt.ContractUpdate{
1,196✔
2300
                        HtlcKey: contractcourt.LocalHtlcSet,
1,196✔
2301
                        Htlcs:   currentHtlcs,
1,196✔
2302
                }
1,196✔
2303
                err = l.cfg.NotifyContractUpdate(newUpdate)
1,196✔
2304
                if err != nil {
1,196✔
2305
                        l.log.Errorf("unable to notify contract update: %v",
×
2306
                                err)
×
2307
                        return
×
2308
                }
×
2309

2310
                select {
1,196✔
2311
                case <-l.Quit:
1✔
2312
                        return
1✔
2313
                default:
1,195✔
2314
                }
2315

2316
                // If the remote party initiated the state transition,
2317
                // we'll reply with a signature to provide them with their
2318
                // version of the latest commitment. Otherwise, both commitment
2319
                // chains are fully synced from our PoV, then we don't need to
2320
                // reply with a signature as both sides already have a
2321
                // commitment with the latest accepted.
2322
                if l.channel.OweCommitment() {
1,859✔
2323
                        if !l.updateCommitTxOrFail() {
664✔
2324
                                return
×
2325
                        }
×
2326
                }
2327

2328
                // Now that we have finished processing the incoming CommitSig
2329
                // and sent out our RevokeAndAck, we invoke the flushHooks if
2330
                // the channel state is clean.
2331
                l.RWMutex.Lock()
1,195✔
2332
                if l.channel.IsChannelClean() {
1,382✔
2333
                        l.flushHooks.invoke()
187✔
2334
                }
187✔
2335
                l.RWMutex.Unlock()
1,195✔
2336

2337
        case *lnwire.RevokeAndAck:
1,184✔
2338
                // We've received a revocation from the remote chain, if valid,
1,184✔
2339
                // this moves the remote chain forward, and expands our
1,184✔
2340
                // revocation window.
1,184✔
2341

1,184✔
2342
                // We now process the message and advance our remote commit
1,184✔
2343
                // chain.
1,184✔
2344
                fwdPkg, remoteHTLCs, err := l.channel.ReceiveRevocation(msg)
1,184✔
2345
                if err != nil {
1,184✔
2346
                        // TODO(halseth): force close?
×
2347
                        l.failf(
×
2348
                                LinkFailureError{
×
2349
                                        code:          ErrInvalidRevocation,
×
2350
                                        FailureAction: LinkFailureDisconnect,
×
2351
                                },
×
2352
                                "unable to accept revocation: %v", err,
×
2353
                        )
×
2354
                        return
×
2355
                }
×
2356

2357
                // The remote party now has a new primary commitment, so we'll
2358
                // update the contract court to be aware of this new set (the
2359
                // prior old remote pending).
2360
                newUpdate := &contractcourt.ContractUpdate{
1,184✔
2361
                        HtlcKey: contractcourt.RemoteHtlcSet,
1,184✔
2362
                        Htlcs:   remoteHTLCs,
1,184✔
2363
                }
1,184✔
2364
                err = l.cfg.NotifyContractUpdate(newUpdate)
1,184✔
2365
                if err != nil {
1,184✔
2366
                        l.log.Errorf("unable to notify contract update: %v",
×
2367
                                err)
×
2368
                        return
×
2369
                }
×
2370

2371
                select {
1,184✔
2372
                case <-l.Quit:
2✔
2373
                        return
2✔
2374
                default:
1,182✔
2375
                }
2376

2377
                // If we have a tower client for this channel type, we'll
2378
                // create a backup for the current state.
2379
                if l.cfg.TowerClient != nil {
1,186✔
2380
                        state := l.channel.State()
4✔
2381
                        chanID := l.ChanID()
4✔
2382

4✔
2383
                        err = l.cfg.TowerClient.BackupState(
4✔
2384
                                &chanID, state.RemoteCommitment.CommitHeight-1,
4✔
2385
                        )
4✔
2386
                        if err != nil {
4✔
2387
                                l.failf(LinkFailureError{
×
2388
                                        code: ErrInternalError,
×
2389
                                }, "unable to queue breach backup: %v", err)
×
2390
                                return
×
2391
                        }
×
2392
                }
2393

2394
                l.processRemoteSettleFails(fwdPkg)
1,182✔
2395
                l.processRemoteAdds(fwdPkg)
1,182✔
2396

1,182✔
2397
                // If the link failed during processing the adds, we must
1,182✔
2398
                // return to ensure we won't attempted to update the state
1,182✔
2399
                // further.
1,182✔
2400
                if l.failed {
1,186✔
2401
                        return
4✔
2402
                }
4✔
2403

2404
                // The revocation window opened up. If there are pending local
2405
                // updates, try to update the commit tx. Pending updates could
2406
                // already have been present because of a previously failed
2407
                // update to the commit tx or freshly added in by
2408
                // processRemoteAdds. Also in case there are no local updates,
2409
                // but there are still remote updates that are not in the remote
2410
                // commit tx yet, send out an update.
2411
                if l.channel.OweCommitment() {
1,499✔
2412
                        if !l.updateCommitTxOrFail() {
323✔
2413
                                return
6✔
2414
                        }
6✔
2415
                }
2416

2417
                // Now that we have finished processing the RevokeAndAck, we
2418
                // can invoke the flushHooks if the channel state is clean.
2419
                l.RWMutex.Lock()
1,176✔
2420
                if l.channel.IsChannelClean() {
1,339✔
2421
                        l.flushHooks.invoke()
163✔
2422
                }
163✔
2423
                l.RWMutex.Unlock()
1,176✔
2424

2425
        case *lnwire.UpdateFee:
3✔
2426
                // Check and see if their proposed fee-rate would make us
3✔
2427
                // exceed the fee threshold.
3✔
2428
                fee := chainfee.SatPerKWeight(msg.FeePerKw)
3✔
2429

3✔
2430
                isDust, err := l.exceedsFeeExposureLimit(fee)
3✔
2431
                if err != nil {
3✔
2432
                        // This shouldn't typically happen. If it does, it
×
2433
                        // indicates something is wrong with our channel state.
×
2434
                        l.log.Errorf("Unable to determine if fee threshold " +
×
2435
                                "exceeded")
×
2436
                        l.failf(LinkFailureError{code: ErrInternalError},
×
2437
                                "error calculating fee exposure: %v", err)
×
2438

×
2439
                        return
×
2440
                }
×
2441

2442
                if isDust {
3✔
2443
                        // The proposed fee-rate makes us exceed the fee
×
2444
                        // threshold.
×
2445
                        l.failf(LinkFailureError{code: ErrInternalError},
×
2446
                                "fee threshold exceeded: %v", err)
×
2447
                        return
×
2448
                }
×
2449

2450
                // We received fee update from peer. If we are the initiator we
2451
                // will fail the channel, if not we will apply the update.
2452
                if err := l.channel.ReceiveUpdateFee(fee); err != nil {
3✔
2453
                        l.failf(LinkFailureError{code: ErrInvalidUpdate},
×
2454
                                "error receiving fee update: %v", err)
×
2455
                        return
×
2456
                }
×
2457

2458
                // Update the mailbox's feerate as well.
2459
                l.mailBox.SetFeeRate(fee)
3✔
2460

2461
        // In the case where we receive a warning message from our peer, just
2462
        // log it and move on. We choose not to disconnect from our peer,
2463
        // although we "MAY" do so according to the specification.
2464
        case *lnwire.Warning:
1✔
2465
                l.log.Warnf("received warning message from peer: %v",
1✔
2466
                        msg.Warning())
1✔
2467

2468
        case *lnwire.Error:
4✔
2469
                // Error received from remote, MUST fail channel, but should
4✔
2470
                // only print the contents of the error message if all
4✔
2471
                // characters are printable ASCII.
4✔
2472
                l.failf(
4✔
2473
                        LinkFailureError{
4✔
2474
                                code: ErrRemoteError,
4✔
2475

4✔
2476
                                // TODO(halseth): we currently don't fail the
4✔
2477
                                // channel permanently, as there are some sync
4✔
2478
                                // issues with other implementations that will
4✔
2479
                                // lead to them sending an error message, but
4✔
2480
                                // we can recover from on next connection. See
4✔
2481
                                // https://github.com/ElementsProject/lightning/issues/4212
4✔
2482
                                PermanentFailure: false,
4✔
2483
                        },
4✔
2484
                        "ChannelPoint(%v): received error from peer: %v",
4✔
2485
                        l.channel.ChannelPoint(), msg.Error(),
4✔
2486
                )
4✔
2487
        default:
×
2488
                l.log.Warnf("received unknown message of type %T", msg)
×
2489
        }
2490

2491
}
2492

2493
// ackDownStreamPackets is responsible for removing htlcs from a link's mailbox
2494
// for packets delivered from server, and cleaning up any circuits closed by
2495
// signing a previous commitment txn. This method ensures that the circuits are
2496
// removed from the circuit map before removing them from the link's mailbox,
2497
// otherwise it could be possible for some circuit to be missed if this link
2498
// flaps.
2499
func (l *channelLink) ackDownStreamPackets() error {
1,372✔
2500
        // First, remove the downstream Add packets that were included in the
1,372✔
2501
        // previous commitment signature. This will prevent the Adds from being
1,372✔
2502
        // replayed if this link disconnects.
1,372✔
2503
        for _, inKey := range l.openedCircuits {
1,839✔
2504
                // In order to test the sphinx replay logic of the remote
467✔
2505
                // party, unsafe replay does not acknowledge the packets from
467✔
2506
                // the mailbox. We can then force a replay of any Add packets
467✔
2507
                // held in memory by disconnecting and reconnecting the link.
467✔
2508
                if l.cfg.UnsafeReplay {
471✔
2509
                        continue
4✔
2510
                }
2511

2512
                l.log.Debugf("removing Add packet %s from mailbox", inKey)
467✔
2513
                l.mailBox.AckPacket(inKey)
467✔
2514
        }
2515

2516
        // Now, we will delete all circuits closed by the previous commitment
2517
        // signature, which is the result of downstream Settle/Fail packets. We
2518
        // batch them here to ensure circuits are closed atomically and for
2519
        // performance.
2520
        err := l.cfg.Circuits.DeleteCircuits(l.closedCircuits...)
1,372✔
2521
        switch err {
1,372✔
2522
        case nil:
1,372✔
2523
                // Successful deletion.
2524

2525
        default:
×
2526
                l.log.Errorf("unable to delete %d circuits: %v",
×
2527
                        len(l.closedCircuits), err)
×
2528
                return err
×
2529
        }
2530

2531
        // With the circuits removed from memory and disk, we now ack any
2532
        // Settle/Fails in the mailbox to ensure they do not get redelivered
2533
        // after startup. If forgive is enabled and we've reached this point,
2534
        // the circuits must have been removed at some point, so it is now safe
2535
        // to un-queue the corresponding Settle/Fails.
2536
        for _, inKey := range l.closedCircuits {
1,415✔
2537
                l.log.Debugf("removing Fail/Settle packet %s from mailbox",
43✔
2538
                        inKey)
43✔
2539
                l.mailBox.AckPacket(inKey)
43✔
2540
        }
43✔
2541

2542
        // Lastly, reset our buffers to be empty while keeping any acquired
2543
        // growth in the backing array.
2544
        l.openedCircuits = l.openedCircuits[:0]
1,372✔
2545
        l.closedCircuits = l.closedCircuits[:0]
1,372✔
2546

1,372✔
2547
        return nil
1,372✔
2548
}
2549

2550
// updateCommitTxOrFail updates the commitment tx and if that fails, it fails
2551
// the link.
2552
func (l *channelLink) updateCommitTxOrFail() bool {
1,233✔
2553
        err := l.updateCommitTx()
1,233✔
2554
        switch err {
1,233✔
2555
        // No error encountered, success.
2556
        case nil:
1,224✔
2557

2558
        // A duplicate keystone error should be resolved and is not fatal, so
2559
        // we won't send an Error message to the peer.
2560
        case ErrDuplicateKeystone:
×
2561
                l.failf(LinkFailureError{code: ErrCircuitError},
×
2562
                        "temporary circuit error: %v", err)
×
2563
                return false
×
2564

2565
        // Any other error is treated results in an Error message being sent to
2566
        // the peer.
2567
        default:
9✔
2568
                l.failf(LinkFailureError{code: ErrInternalError},
9✔
2569
                        "unable to update commitment: %v", err)
9✔
2570
                return false
9✔
2571
        }
2572

2573
        return true
1,224✔
2574
}
2575

2576
// updateCommitTx signs, then sends an update to the remote peer adding a new
2577
// commitment to their commitment chain which includes all the latest updates
2578
// we've received+processed up to this point.
2579
func (l *channelLink) updateCommitTx() error {
1,291✔
2580
        // Preemptively write all pending keystones to disk, just in case the
1,291✔
2581
        // HTLCs we have in memory are included in the subsequent attempt to
1,291✔
2582
        // sign a commitment state.
1,291✔
2583
        err := l.cfg.Circuits.OpenCircuits(l.keystoneBatch...)
1,291✔
2584
        if err != nil {
1,291✔
2585
                // If ErrDuplicateKeystone is returned, the caller will catch
×
2586
                // it.
×
2587
                return err
×
2588
        }
×
2589

2590
        // Reset the batch, but keep the backing buffer to avoid reallocating.
2591
        l.keystoneBatch = l.keystoneBatch[:0]
1,291✔
2592

1,291✔
2593
        // If hodl.Commit mode is active, we will refrain from attempting to
1,291✔
2594
        // commit any in-memory modifications to the channel state. Exiting here
1,291✔
2595
        // permits testing of either the switch or link's ability to trim
1,291✔
2596
        // circuits that have been opened, but unsuccessfully committed.
1,291✔
2597
        if l.cfg.HodlMask.Active(hodl.Commit) {
1,299✔
2598
                l.log.Warnf(hodl.Commit.Warning())
8✔
2599
                return nil
8✔
2600
        }
8✔
2601

2602
        ctx, done := l.WithCtxQuitNoTimeout()
1,287✔
2603
        defer done()
1,287✔
2604

1,287✔
2605
        newCommit, err := l.channel.SignNextCommitment(ctx)
1,287✔
2606
        if err == lnwallet.ErrNoWindow {
1,374✔
2607
                l.cfg.PendingCommitTicker.Resume()
87✔
2608
                l.log.Trace("PendingCommitTicker resumed")
87✔
2609

87✔
2610
                n := l.channel.NumPendingUpdates(lntypes.Local, lntypes.Remote)
87✔
2611
                l.log.Tracef("revocation window exhausted, unable to send: "+
87✔
2612
                        "%v, pend_updates=%v, dangling_closes%v", n,
87✔
2613
                        lnutils.SpewLogClosure(l.openedCircuits),
87✔
2614
                        lnutils.SpewLogClosure(l.closedCircuits))
87✔
2615

87✔
2616
                return nil
87✔
2617
        } else if err != nil {
1,291✔
2618
                return err
×
2619
        }
×
2620

2621
        if err := l.ackDownStreamPackets(); err != nil {
1,204✔
2622
                return err
×
2623
        }
×
2624

2625
        l.cfg.PendingCommitTicker.Pause()
1,204✔
2626
        l.log.Trace("PendingCommitTicker paused after ackDownStreamPackets")
1,204✔
2627

1,204✔
2628
        // The remote party now has a new pending commitment, so we'll update
1,204✔
2629
        // the contract court to be aware of this new set (the prior old remote
1,204✔
2630
        // pending).
1,204✔
2631
        newUpdate := &contractcourt.ContractUpdate{
1,204✔
2632
                HtlcKey: contractcourt.RemotePendingHtlcSet,
1,204✔
2633
                Htlcs:   newCommit.PendingHTLCs,
1,204✔
2634
        }
1,204✔
2635
        err = l.cfg.NotifyContractUpdate(newUpdate)
1,204✔
2636
        if err != nil {
1,204✔
2637
                l.log.Errorf("unable to notify contract update: %v", err)
×
2638
                return err
×
2639
        }
×
2640

2641
        select {
1,204✔
2642
        case <-l.Quit:
9✔
2643
                return ErrLinkShuttingDown
9✔
2644
        default:
1,195✔
2645
        }
2646

2647
        auxBlobRecords, err := lnwire.ParseCustomRecords(newCommit.AuxSigBlob)
1,195✔
2648
        if err != nil {
1,195✔
2649
                return fmt.Errorf("error parsing aux sigs: %w", err)
×
2650
        }
×
2651

2652
        commitSig := &lnwire.CommitSig{
1,195✔
2653
                ChanID:        l.ChanID(),
1,195✔
2654
                CommitSig:     newCommit.CommitSig,
1,195✔
2655
                HtlcSigs:      newCommit.HtlcSigs,
1,195✔
2656
                PartialSig:    newCommit.PartialSig,
1,195✔
2657
                CustomRecords: auxBlobRecords,
1,195✔
2658
        }
1,195✔
2659
        l.cfg.Peer.SendMessage(false, commitSig)
1,195✔
2660

1,195✔
2661
        // Now that we have sent out a new CommitSig, we invoke the outgoing set
1,195✔
2662
        // of commit hooks.
1,195✔
2663
        l.RWMutex.Lock()
1,195✔
2664
        l.outgoingCommitHooks.invoke()
1,195✔
2665
        l.RWMutex.Unlock()
1,195✔
2666

1,195✔
2667
        return nil
1,195✔
2668
}
2669

2670
// Peer returns the representation of remote peer with which we have the
2671
// channel link opened.
2672
//
2673
// NOTE: Part of the ChannelLink interface.
2674
func (l *channelLink) PeerPubKey() [33]byte {
441✔
2675
        return l.cfg.Peer.PubKey()
441✔
2676
}
441✔
2677

2678
// ChannelPoint returns the channel outpoint for the channel link.
2679
// NOTE: Part of the ChannelLink interface.
2680
func (l *channelLink) ChannelPoint() wire.OutPoint {
848✔
2681
        return l.channel.ChannelPoint()
848✔
2682
}
848✔
2683

2684
// ShortChanID returns the short channel ID for the channel link. The short
2685
// channel ID encodes the exact location in the main chain that the original
2686
// funding output can be found.
2687
//
2688
// NOTE: Part of the ChannelLink interface.
2689
func (l *channelLink) ShortChanID() lnwire.ShortChannelID {
4,846✔
2690
        l.RLock()
4,846✔
2691
        defer l.RUnlock()
4,846✔
2692

4,846✔
2693
        return l.channel.ShortChanID()
4,846✔
2694
}
4,846✔
2695

2696
// UpdateShortChanID updates the short channel ID for a link. This may be
2697
// required in the event that a link is created before the short chan ID for it
2698
// is known, or a re-org occurs, and the funding transaction changes location
2699
// within the chain.
2700
//
2701
// NOTE: Part of the ChannelLink interface.
2702
func (l *channelLink) UpdateShortChanID() (lnwire.ShortChannelID, error) {
4✔
2703
        chanID := l.ChanID()
4✔
2704

4✔
2705
        // Refresh the channel state's short channel ID by loading it from disk.
4✔
2706
        // This ensures that the channel state accurately reflects the updated
4✔
2707
        // short channel ID.
4✔
2708
        err := l.channel.State().Refresh()
4✔
2709
        if err != nil {
4✔
2710
                l.log.Errorf("unable to refresh short_chan_id for chan_id=%v: "+
×
2711
                        "%v", chanID, err)
×
2712
                return hop.Source, err
×
2713
        }
×
2714

2715
        return hop.Source, nil
4✔
2716
}
2717

2718
// ChanID returns the channel ID for the channel link. The channel ID is a more
2719
// compact representation of a channel's full outpoint.
2720
//
2721
// NOTE: Part of the ChannelLink interface.
2722
func (l *channelLink) ChanID() lnwire.ChannelID {
3,915✔
2723
        return lnwire.NewChanIDFromOutPoint(l.channel.ChannelPoint())
3,915✔
2724
}
3,915✔
2725

2726
// Bandwidth returns the total amount that can flow through the channel link at
2727
// this given instance. The value returned is expressed in millisatoshi and can
2728
// be used by callers when making forwarding decisions to determine if a link
2729
// can accept an HTLC.
2730
//
2731
// NOTE: Part of the ChannelLink interface.
2732
func (l *channelLink) Bandwidth() lnwire.MilliSatoshi {
814✔
2733
        // Get the balance available on the channel for new HTLCs. This takes
814✔
2734
        // the channel reserve into account so HTLCs up to this value won't
814✔
2735
        // violate it.
814✔
2736
        return l.channel.AvailableBalance()
814✔
2737
}
814✔
2738

2739
// MayAddOutgoingHtlc indicates whether we can add an outgoing htlc with the
2740
// amount provided to the link. This check does not reserve a space, since
2741
// forwards or other payments may use the available slot, so it should be
2742
// considered best-effort.
2743
func (l *channelLink) MayAddOutgoingHtlc(amt lnwire.MilliSatoshi) error {
4✔
2744
        return l.channel.MayAddOutgoingHtlc(amt)
4✔
2745
}
4✔
2746

2747
// getDustSum is a wrapper method that calls the underlying channel's dust sum
2748
// method.
2749
//
2750
// NOTE: Part of the dustHandler interface.
2751
func (l *channelLink) getDustSum(whoseCommit lntypes.ChannelParty,
2752
        dryRunFee fn.Option[chainfee.SatPerKWeight]) lnwire.MilliSatoshi {
2,523✔
2753

2,523✔
2754
        return l.channel.GetDustSum(whoseCommit, dryRunFee)
2,523✔
2755
}
2,523✔
2756

2757
// getFeeRate is a wrapper method that retrieves the underlying channel's
2758
// feerate.
2759
//
2760
// NOTE: Part of the dustHandler interface.
2761
func (l *channelLink) getFeeRate() chainfee.SatPerKWeight {
671✔
2762
        return l.channel.CommitFeeRate()
671✔
2763
}
671✔
2764

2765
// getDustClosure returns a closure that can be used by the switch or mailbox
2766
// to evaluate whether a given HTLC is dust.
2767
//
2768
// NOTE: Part of the dustHandler interface.
2769
func (l *channelLink) getDustClosure() dustClosure {
1,599✔
2770
        localDustLimit := l.channel.State().LocalChanCfg.DustLimit
1,599✔
2771
        remoteDustLimit := l.channel.State().RemoteChanCfg.DustLimit
1,599✔
2772
        chanType := l.channel.State().ChanType
1,599✔
2773

1,599✔
2774
        return dustHelper(chanType, localDustLimit, remoteDustLimit)
1,599✔
2775
}
1,599✔
2776

2777
// getCommitFee returns either the local or remote CommitFee in satoshis. This
2778
// is used so that the Switch can have access to the commitment fee without
2779
// needing to have a *LightningChannel. This doesn't include dust.
2780
//
2781
// NOTE: Part of the dustHandler interface.
2782
func (l *channelLink) getCommitFee(remote bool) btcutil.Amount {
1,887✔
2783
        if remote {
2,846✔
2784
                return l.channel.State().RemoteCommitment.CommitFee
959✔
2785
        }
959✔
2786

2787
        return l.channel.State().LocalCommitment.CommitFee
932✔
2788
}
2789

2790
// exceedsFeeExposureLimit returns whether or not the new proposed fee-rate
2791
// increases the total dust and fees within the channel past the configured
2792
// fee threshold. It first calculates the dust sum over every update in the
2793
// update log with the proposed fee-rate and taking into account both the local
2794
// and remote dust limits. It uses every update in the update log instead of
2795
// what is actually on the local and remote commitments because it is assumed
2796
// that in a worst-case scenario, every update in the update log could
2797
// theoretically be on either commitment transaction and this needs to be
2798
// accounted for with this fee-rate. It then calculates the local and remote
2799
// commitment fees given the proposed fee-rate. Finally, it tallies the results
2800
// and determines if the fee threshold has been exceeded.
2801
func (l *channelLink) exceedsFeeExposureLimit(
2802
        feePerKw chainfee.SatPerKWeight) (bool, error) {
6✔
2803

6✔
2804
        dryRunFee := fn.Some[chainfee.SatPerKWeight](feePerKw)
6✔
2805

6✔
2806
        // Get the sum of dust for both the local and remote commitments using
6✔
2807
        // this "dry-run" fee.
6✔
2808
        localDustSum := l.getDustSum(lntypes.Local, dryRunFee)
6✔
2809
        remoteDustSum := l.getDustSum(lntypes.Remote, dryRunFee)
6✔
2810

6✔
2811
        // Calculate the local and remote commitment fees using this dry-run
6✔
2812
        // fee.
6✔
2813
        localFee, remoteFee, err := l.channel.CommitFeeTotalAt(feePerKw)
6✔
2814
        if err != nil {
6✔
2815
                return false, err
×
2816
        }
×
2817

2818
        // Finally, check whether the max fee exposure was exceeded on either
2819
        // future commitment transaction with the fee-rate.
2820
        totalLocalDust := localDustSum + lnwire.NewMSatFromSatoshis(localFee)
6✔
2821
        if totalLocalDust > l.cfg.MaxFeeExposure {
6✔
2822
                l.log.Debugf("ChannelLink(%v): exceeds fee exposure limit: "+
×
2823
                        "local dust: %v, local fee: %v", l.ShortChanID(),
×
2824
                        totalLocalDust, localFee)
×
2825

×
2826
                return true, nil
×
2827
        }
×
2828

2829
        totalRemoteDust := remoteDustSum + lnwire.NewMSatFromSatoshis(
6✔
2830
                remoteFee,
6✔
2831
        )
6✔
2832

6✔
2833
        if totalRemoteDust > l.cfg.MaxFeeExposure {
6✔
2834
                l.log.Debugf("ChannelLink(%v): exceeds fee exposure limit: "+
×
2835
                        "remote dust: %v, remote fee: %v", l.ShortChanID(),
×
2836
                        totalRemoteDust, remoteFee)
×
2837

×
2838
                return true, nil
×
2839
        }
×
2840

2841
        return false, nil
6✔
2842
}
2843

2844
// isOverexposedWithHtlc calculates whether the proposed HTLC will make the
2845
// channel exceed the fee threshold. It first fetches the largest fee-rate that
2846
// may be on any unrevoked commitment transaction. Then, using this fee-rate,
2847
// determines if the to-be-added HTLC is dust. If the HTLC is dust, it adds to
2848
// the overall dust sum. If it is not dust, it contributes to weight, which
2849
// also adds to the overall dust sum by an increase in fees. If the dust sum on
2850
// either commitment exceeds the configured fee threshold, this function
2851
// returns true.
2852
func (l *channelLink) isOverexposedWithHtlc(htlc *lnwire.UpdateAddHTLC,
2853
        incoming bool) bool {
932✔
2854

932✔
2855
        dustClosure := l.getDustClosure()
932✔
2856

932✔
2857
        feeRate := l.channel.WorstCaseFeeRate()
932✔
2858

932✔
2859
        amount := htlc.Amount.ToSatoshis()
932✔
2860

932✔
2861
        // See if this HTLC is dust on both the local and remote commitments.
932✔
2862
        isLocalDust := dustClosure(feeRate, incoming, lntypes.Local, amount)
932✔
2863
        isRemoteDust := dustClosure(feeRate, incoming, lntypes.Remote, amount)
932✔
2864

932✔
2865
        // Calculate the dust sum for the local and remote commitments.
932✔
2866
        localDustSum := l.getDustSum(
932✔
2867
                lntypes.Local, fn.None[chainfee.SatPerKWeight](),
932✔
2868
        )
932✔
2869
        remoteDustSum := l.getDustSum(
932✔
2870
                lntypes.Remote, fn.None[chainfee.SatPerKWeight](),
932✔
2871
        )
932✔
2872

932✔
2873
        // Grab the larger of the local and remote commitment fees w/o dust.
932✔
2874
        commitFee := l.getCommitFee(false)
932✔
2875

932✔
2876
        if l.getCommitFee(true) > commitFee {
963✔
2877
                commitFee = l.getCommitFee(true)
31✔
2878
        }
31✔
2879

2880
        commitFeeMSat := lnwire.NewMSatFromSatoshis(commitFee)
932✔
2881

932✔
2882
        localDustSum += commitFeeMSat
932✔
2883
        remoteDustSum += commitFeeMSat
932✔
2884

932✔
2885
        // Calculate the additional fee increase if this is a non-dust HTLC.
932✔
2886
        weight := lntypes.WeightUnit(input.HTLCWeight)
932✔
2887
        additional := lnwire.NewMSatFromSatoshis(
932✔
2888
                feeRate.FeeForWeight(weight),
932✔
2889
        )
932✔
2890

932✔
2891
        if isLocalDust {
1,569✔
2892
                // If this is dust, it doesn't contribute to weight but does
637✔
2893
                // contribute to the overall dust sum.
637✔
2894
                localDustSum += lnwire.NewMSatFromSatoshis(amount)
637✔
2895
        } else {
936✔
2896
                // Account for the fee increase that comes with an increase in
299✔
2897
                // weight.
299✔
2898
                localDustSum += additional
299✔
2899
        }
299✔
2900

2901
        if localDustSum > l.cfg.MaxFeeExposure {
936✔
2902
                // The max fee exposure was exceeded.
4✔
2903
                l.log.Debugf("ChannelLink(%v): HTLC %v makes the channel "+
4✔
2904
                        "overexposed, total local dust: %v (current commit "+
4✔
2905
                        "fee: %v)", l.ShortChanID(), htlc, localDustSum)
4✔
2906

4✔
2907
                return true
4✔
2908
        }
4✔
2909

2910
        if isRemoteDust {
1,562✔
2911
                // If this is dust, it doesn't contribute to weight but does
634✔
2912
                // contribute to the overall dust sum.
634✔
2913
                remoteDustSum += lnwire.NewMSatFromSatoshis(amount)
634✔
2914
        } else {
932✔
2915
                // Account for the fee increase that comes with an increase in
298✔
2916
                // weight.
298✔
2917
                remoteDustSum += additional
298✔
2918
        }
298✔
2919

2920
        if remoteDustSum > l.cfg.MaxFeeExposure {
928✔
2921
                // The max fee exposure was exceeded.
×
2922
                l.log.Debugf("ChannelLink(%v): HTLC %v makes the channel "+
×
2923
                        "overexposed, total remote dust: %v (current commit "+
×
2924
                        "fee: %v)", l.ShortChanID(), htlc, remoteDustSum)
×
2925

×
2926
                return true
×
2927
        }
×
2928

2929
        return false
928✔
2930
}
2931

2932
// dustClosure is a function that evaluates whether an HTLC is dust. It returns
2933
// true if the HTLC is dust. It takes in a feerate, a boolean denoting whether
2934
// the HTLC is incoming (i.e. one that the remote sent), a boolean denoting
2935
// whether to evaluate on the local or remote commit, and finally an HTLC
2936
// amount to test.
2937
type dustClosure func(feerate chainfee.SatPerKWeight, incoming bool,
2938
        whoseCommit lntypes.ChannelParty, amt btcutil.Amount) bool
2939

2940
// dustHelper is used to construct the dustClosure.
2941
func dustHelper(chantype channeldb.ChannelType, localDustLimit,
2942
        remoteDustLimit btcutil.Amount) dustClosure {
1,799✔
2943

1,799✔
2944
        isDust := func(feerate chainfee.SatPerKWeight, incoming bool,
1,799✔
2945
                whoseCommit lntypes.ChannelParty, amt btcutil.Amount) bool {
11,721✔
2946

9,922✔
2947
                var dustLimit btcutil.Amount
9,922✔
2948
                if whoseCommit.IsLocal() {
14,885✔
2949
                        dustLimit = localDustLimit
4,963✔
2950
                } else {
9,926✔
2951
                        dustLimit = remoteDustLimit
4,963✔
2952
                }
4,963✔
2953

2954
                return lnwallet.HtlcIsDust(
9,922✔
2955
                        chantype, incoming, whoseCommit, feerate, amt,
9,922✔
2956
                        dustLimit,
9,922✔
2957
                )
9,922✔
2958
        }
2959

2960
        return isDust
1,799✔
2961
}
2962

2963
// zeroConfConfirmed returns whether or not the zero-conf channel has
2964
// confirmed on-chain.
2965
//
2966
// Part of the scidAliasHandler interface.
2967
func (l *channelLink) zeroConfConfirmed() bool {
7✔
2968
        return l.channel.State().ZeroConfConfirmed()
7✔
2969
}
7✔
2970

2971
// confirmedScid returns the confirmed SCID for a zero-conf channel. This
2972
// should not be called for non-zero-conf channels.
2973
//
2974
// Part of the scidAliasHandler interface.
2975
func (l *channelLink) confirmedScid() lnwire.ShortChannelID {
7✔
2976
        return l.channel.State().ZeroConfRealScid()
7✔
2977
}
7✔
2978

2979
// isZeroConf returns whether or not the underlying channel is a zero-conf
2980
// channel.
2981
//
2982
// Part of the scidAliasHandler interface.
2983
func (l *channelLink) isZeroConf() bool {
215✔
2984
        return l.channel.State().IsZeroConf()
215✔
2985
}
215✔
2986

2987
// negotiatedAliasFeature returns whether or not the underlying channel has
2988
// negotiated the option-scid-alias feature bit. This will be true for both
2989
// option-scid-alias and zero-conf channel-types. It will also be true for
2990
// channels with the feature bit but without the above channel-types.
2991
//
2992
// Part of the scidAliasFeature interface.
2993
func (l *channelLink) negotiatedAliasFeature() bool {
374✔
2994
        return l.channel.State().NegotiatedAliasFeature()
374✔
2995
}
374✔
2996

2997
// getAliases returns the set of aliases for the underlying channel.
2998
//
2999
// Part of the scidAliasHandler interface.
3000
func (l *channelLink) getAliases() []lnwire.ShortChannelID {
221✔
3001
        return l.cfg.GetAliases(l.ShortChanID())
221✔
3002
}
221✔
3003

3004
// attachFailAliasUpdate sets the link's FailAliasUpdate function.
3005
//
3006
// Part of the scidAliasHandler interface.
3007
func (l *channelLink) attachFailAliasUpdate(closure func(
3008
        sid lnwire.ShortChannelID, incoming bool) *lnwire.ChannelUpdate1) {
216✔
3009

216✔
3010
        l.Lock()
216✔
3011
        l.cfg.FailAliasUpdate = closure
216✔
3012
        l.Unlock()
216✔
3013
}
216✔
3014

3015
// AttachMailBox updates the current mailbox used by this link, and hooks up
3016
// the mailbox's message and packet outboxes to the link's upstream and
3017
// downstream chans, respectively.
3018
func (l *channelLink) AttachMailBox(mailbox MailBox) {
215✔
3019
        l.Lock()
215✔
3020
        l.mailBox = mailbox
215✔
3021
        l.upstream = mailbox.MessageOutBox()
215✔
3022
        l.downstream = mailbox.PacketOutBox()
215✔
3023
        l.Unlock()
215✔
3024

215✔
3025
        // Set the mailbox's fee rate. This may be refreshing a feerate that was
215✔
3026
        // never committed.
215✔
3027
        l.mailBox.SetFeeRate(l.getFeeRate())
215✔
3028

215✔
3029
        // Also set the mailbox's dust closure so that it can query whether HTLC's
215✔
3030
        // are dust given the current feerate.
215✔
3031
        l.mailBox.SetDustClosure(l.getDustClosure())
215✔
3032
}
215✔
3033

3034
// UpdateForwardingPolicy updates the forwarding policy for the target
3035
// ChannelLink. Once updated, the link will use the new forwarding policy to
3036
// govern if it an incoming HTLC should be forwarded or not. We assume that
3037
// fields that are zero are intentionally set to zero, so we'll use newPolicy to
3038
// update all of the link's FwrdingPolicy's values.
3039
//
3040
// NOTE: Part of the ChannelLink interface.
3041
func (l *channelLink) UpdateForwardingPolicy(
3042
        newPolicy models.ForwardingPolicy) {
16✔
3043

16✔
3044
        l.Lock()
16✔
3045
        defer l.Unlock()
16✔
3046

16✔
3047
        l.cfg.FwrdingPolicy = newPolicy
16✔
3048
}
16✔
3049

3050
// CheckHtlcForward should return a nil error if the passed HTLC details
3051
// satisfy the current forwarding policy fo the target link. Otherwise,
3052
// a LinkError with a valid protocol failure message should be returned
3053
// in order to signal to the source of the HTLC, the policy consistency
3054
// issue.
3055
//
3056
// NOTE: Part of the ChannelLink interface.
3057
func (l *channelLink) CheckHtlcForward(payHash [32]byte,
3058
        incomingHtlcAmt, amtToForward lnwire.MilliSatoshi,
3059
        incomingTimeout, outgoingTimeout uint32,
3060
        inboundFee models.InboundFee,
3061
        heightNow uint32, originalScid lnwire.ShortChannelID) *LinkError {
53✔
3062

53✔
3063
        l.RLock()
53✔
3064
        policy := l.cfg.FwrdingPolicy
53✔
3065
        l.RUnlock()
53✔
3066

53✔
3067
        // Using the outgoing HTLC amount, we'll calculate the outgoing
53✔
3068
        // fee this incoming HTLC must carry in order to satisfy the constraints
53✔
3069
        // of the outgoing link.
53✔
3070
        outFee := ExpectedFee(policy, amtToForward)
53✔
3071

53✔
3072
        // Then calculate the inbound fee that we charge based on the sum of
53✔
3073
        // outgoing HTLC amount and outgoing fee.
53✔
3074
        inFee := inboundFee.CalcFee(amtToForward + outFee)
53✔
3075

53✔
3076
        // Add up both fee components. It is important to calculate both fees
53✔
3077
        // separately. An alternative way of calculating is to first determine
53✔
3078
        // an aggregate fee and apply that to the outgoing HTLC amount. However,
53✔
3079
        // rounding may cause the result to be slightly higher than in the case
53✔
3080
        // of separately rounded fee components. This potentially causes failed
53✔
3081
        // forwards for senders and is something to be avoided.
53✔
3082
        expectedFee := inFee + int64(outFee)
53✔
3083

53✔
3084
        // If the actual fee is less than our expected fee, then we'll reject
53✔
3085
        // this HTLC as it didn't provide a sufficient amount of fees, or the
53✔
3086
        // values have been tampered with, or the send used incorrect/dated
53✔
3087
        // information to construct the forwarding information for this hop. In
53✔
3088
        // any case, we'll cancel this HTLC.
53✔
3089
        actualFee := int64(incomingHtlcAmt) - int64(amtToForward)
53✔
3090
        if incomingHtlcAmt < amtToForward || actualFee < expectedFee {
63✔
3091
                l.log.Warnf("outgoing htlc(%x) has insufficient fee: "+
10✔
3092
                        "expected %v, got %v: incoming=%v, outgoing=%v, "+
10✔
3093
                        "inboundFee=%v",
10✔
3094
                        payHash[:], expectedFee, actualFee,
10✔
3095
                        incomingHtlcAmt, amtToForward, inboundFee,
10✔
3096
                )
10✔
3097

10✔
3098
                // As part of the returned error, we'll send our latest routing
10✔
3099
                // policy so the sending node obtains the most up to date data.
10✔
3100
                cb := func(upd *lnwire.ChannelUpdate1) lnwire.FailureMessage {
20✔
3101
                        return lnwire.NewFeeInsufficient(amtToForward, *upd)
10✔
3102
                }
10✔
3103
                failure := l.createFailureWithUpdate(false, originalScid, cb)
10✔
3104
                return NewLinkError(failure)
10✔
3105
        }
3106

3107
        // Check whether the outgoing htlc satisfies the channel policy.
3108
        err := l.canSendHtlc(
47✔
3109
                policy, payHash, amtToForward, outgoingTimeout, heightNow,
47✔
3110
                originalScid,
47✔
3111
        )
47✔
3112
        if err != nil {
64✔
3113
                return err
17✔
3114
        }
17✔
3115

3116
        // Finally, we'll ensure that the time-lock on the outgoing HTLC meets
3117
        // the following constraint: the incoming time-lock minus our time-lock
3118
        // delta should equal the outgoing time lock. Otherwise, whether the
3119
        // sender messed up, or an intermediate node tampered with the HTLC.
3120
        timeDelta := policy.TimeLockDelta
34✔
3121
        if incomingTimeout < outgoingTimeout+timeDelta {
36✔
3122
                l.log.Warnf("incoming htlc(%x) has incorrect time-lock value: "+
2✔
3123
                        "expected at least %v block delta, got %v block delta",
2✔
3124
                        payHash[:], timeDelta, incomingTimeout-outgoingTimeout)
2✔
3125

2✔
3126
                // Grab the latest routing policy so the sending node is up to
2✔
3127
                // date with our current policy.
2✔
3128
                cb := func(upd *lnwire.ChannelUpdate1) lnwire.FailureMessage {
4✔
3129
                        return lnwire.NewIncorrectCltvExpiry(
2✔
3130
                                incomingTimeout, *upd,
2✔
3131
                        )
2✔
3132
                }
2✔
3133
                failure := l.createFailureWithUpdate(false, originalScid, cb)
2✔
3134
                return NewLinkError(failure)
2✔
3135
        }
3136

3137
        return nil
32✔
3138
}
3139

3140
// CheckHtlcTransit should return a nil error if the passed HTLC details
3141
// satisfy the current channel policy.  Otherwise, a LinkError with a
3142
// valid protocol failure message should be returned in order to signal
3143
// the violation. This call is intended to be used for locally initiated
3144
// payments for which there is no corresponding incoming htlc.
3145
func (l *channelLink) CheckHtlcTransit(payHash [32]byte,
3146
        amt lnwire.MilliSatoshi, timeout uint32,
3147
        heightNow uint32) *LinkError {
410✔
3148

410✔
3149
        l.RLock()
410✔
3150
        policy := l.cfg.FwrdingPolicy
410✔
3151
        l.RUnlock()
410✔
3152

410✔
3153
        // We pass in hop.Source here as this is only used in the Switch when
410✔
3154
        // trying to send over a local link. This causes the fallback mechanism
410✔
3155
        // to occur.
410✔
3156
        return l.canSendHtlc(
410✔
3157
                policy, payHash, amt, timeout, heightNow, hop.Source,
410✔
3158
        )
410✔
3159
}
410✔
3160

3161
// canSendHtlc checks whether the given htlc parameters satisfy
3162
// the channel's amount and time lock constraints.
3163
func (l *channelLink) canSendHtlc(policy models.ForwardingPolicy,
3164
        payHash [32]byte, amt lnwire.MilliSatoshi, timeout uint32,
3165
        heightNow uint32, originalScid lnwire.ShortChannelID) *LinkError {
453✔
3166

453✔
3167
        // As our first sanity check, we'll ensure that the passed HTLC isn't
453✔
3168
        // too small for the next hop. If so, then we'll cancel the HTLC
453✔
3169
        // directly.
453✔
3170
        if amt < policy.MinHTLCOut {
465✔
3171
                l.log.Warnf("outgoing htlc(%x) is too small: min_htlc=%v, "+
12✔
3172
                        "htlc_value=%v", payHash[:], policy.MinHTLCOut,
12✔
3173
                        amt)
12✔
3174

12✔
3175
                // As part of the returned error, we'll send our latest routing
12✔
3176
                // policy so the sending node obtains the most up to date data.
12✔
3177
                cb := func(upd *lnwire.ChannelUpdate1) lnwire.FailureMessage {
24✔
3178
                        return lnwire.NewAmountBelowMinimum(amt, *upd)
12✔
3179
                }
12✔
3180
                failure := l.createFailureWithUpdate(false, originalScid, cb)
12✔
3181
                return NewLinkError(failure)
12✔
3182
        }
3183

3184
        // Next, ensure that the passed HTLC isn't too large. If so, we'll
3185
        // cancel the HTLC directly.
3186
        if policy.MaxHTLC != 0 && amt > policy.MaxHTLC {
452✔
3187
                l.log.Warnf("outgoing htlc(%x) is too large: max_htlc=%v, "+
7✔
3188
                        "htlc_value=%v", payHash[:], policy.MaxHTLC, amt)
7✔
3189

7✔
3190
                // As part of the returned error, we'll send our latest routing
7✔
3191
                // policy so the sending node obtains the most up-to-date data.
7✔
3192
                cb := func(upd *lnwire.ChannelUpdate1) lnwire.FailureMessage {
14✔
3193
                        return lnwire.NewTemporaryChannelFailure(upd)
7✔
3194
                }
7✔
3195
                failure := l.createFailureWithUpdate(false, originalScid, cb)
7✔
3196
                return NewDetailedLinkError(failure, OutgoingFailureHTLCExceedsMax)
7✔
3197
        }
3198

3199
        // We want to avoid offering an HTLC which will expire in the near
3200
        // future, so we'll reject an HTLC if the outgoing expiration time is
3201
        // too close to the current height.
3202
        if timeout <= heightNow+l.cfg.OutgoingCltvRejectDelta {
444✔
3203
                l.log.Warnf("htlc(%x) has an expiry that's too soon: "+
2✔
3204
                        "outgoing_expiry=%v, best_height=%v", payHash[:],
2✔
3205
                        timeout, heightNow)
2✔
3206

2✔
3207
                cb := func(upd *lnwire.ChannelUpdate1) lnwire.FailureMessage {
4✔
3208
                        return lnwire.NewExpiryTooSoon(*upd)
2✔
3209
                }
2✔
3210
                failure := l.createFailureWithUpdate(false, originalScid, cb)
2✔
3211
                return NewLinkError(failure)
2✔
3212
        }
3213

3214
        // Check absolute max delta.
3215
        if timeout > l.cfg.MaxOutgoingCltvExpiry+heightNow {
441✔
3216
                l.log.Warnf("outgoing htlc(%x) has a time lock too far in "+
1✔
3217
                        "the future: got %v, but maximum is %v", payHash[:],
1✔
3218
                        timeout-heightNow, l.cfg.MaxOutgoingCltvExpiry)
1✔
3219

1✔
3220
                return NewLinkError(&lnwire.FailExpiryTooFar{})
1✔
3221
        }
1✔
3222

3223
        // Check to see if there is enough balance in this channel.
3224
        if amt > l.Bandwidth() {
444✔
3225
                l.log.Warnf("insufficient bandwidth to route htlc: %v is "+
5✔
3226
                        "larger than %v", amt, l.Bandwidth())
5✔
3227
                cb := func(upd *lnwire.ChannelUpdate1) lnwire.FailureMessage {
10✔
3228
                        return lnwire.NewTemporaryChannelFailure(upd)
5✔
3229
                }
5✔
3230
                failure := l.createFailureWithUpdate(false, originalScid, cb)
5✔
3231
                return NewDetailedLinkError(
5✔
3232
                        failure, OutgoingFailureInsufficientBalance,
5✔
3233
                )
5✔
3234
        }
3235

3236
        return nil
438✔
3237
}
3238

3239
// Stats returns the statistics of channel link.
3240
//
3241
// NOTE: Part of the ChannelLink interface.
3242
func (l *channelLink) Stats() (uint64, lnwire.MilliSatoshi, lnwire.MilliSatoshi) {
8✔
3243
        snapshot := l.channel.StateSnapshot()
8✔
3244

8✔
3245
        return snapshot.ChannelCommitment.CommitHeight,
8✔
3246
                snapshot.TotalMSatSent,
8✔
3247
                snapshot.TotalMSatReceived
8✔
3248
}
8✔
3249

3250
// String returns the string representation of channel link.
3251
//
3252
// NOTE: Part of the ChannelLink interface.
3253
func (l *channelLink) String() string {
×
3254
        return l.channel.ChannelPoint().String()
×
3255
}
×
3256

3257
// handleSwitchPacket handles the switch packets. This packets which might be
3258
// forwarded to us from another channel link in case the htlc update came from
3259
// another peer or if the update was created by user
3260
//
3261
// NOTE: Part of the packetHandler interface.
3262
func (l *channelLink) handleSwitchPacket(pkt *htlcPacket) error {
482✔
3263
        l.log.Tracef("received switch packet inkey=%v, outkey=%v",
482✔
3264
                pkt.inKey(), pkt.outKey())
482✔
3265

482✔
3266
        return l.mailBox.AddPacket(pkt)
482✔
3267
}
482✔
3268

3269
// HandleChannelUpdate handles the htlc requests as settle/add/fail which sent
3270
// to us from remote peer we have a channel with.
3271
//
3272
// NOTE: Part of the ChannelLink interface.
3273
func (l *channelLink) HandleChannelUpdate(message lnwire.Message) {
3,347✔
3274
        select {
3,347✔
3275
        case <-l.Quit:
×
3276
                // Return early if the link is already in the process of
×
3277
                // quitting. It doesn't make sense to hand the message to the
×
3278
                // mailbox here.
×
3279
                return
×
3280
        default:
3,347✔
3281
        }
3282

3283
        err := l.mailBox.AddMessage(message)
3,347✔
3284
        if err != nil {
3,347✔
3285
                l.log.Errorf("failed to add Message to mailbox: %v", err)
×
3286
        }
×
3287
}
3288

3289
// updateChannelFee updates the commitment fee-per-kw on this channel by
3290
// committing to an update_fee message.
3291
func (l *channelLink) updateChannelFee(feePerKw chainfee.SatPerKWeight) error {
3✔
3292
        l.log.Infof("updating commit fee to %v", feePerKw)
3✔
3293

3✔
3294
        // We skip sending the UpdateFee message if the channel is not
3✔
3295
        // currently eligible to forward messages.
3✔
3296
        if !l.EligibleToUpdate() {
3✔
3297
                l.log.Debugf("skipping fee update for inactive channel")
×
3298
                return nil
×
3299
        }
×
3300

3301
        // Check and see if our proposed fee-rate would make us exceed the fee
3302
        // threshold.
3303
        thresholdExceeded, err := l.exceedsFeeExposureLimit(feePerKw)
3✔
3304
        if err != nil {
3✔
3305
                // This shouldn't typically happen. If it does, it indicates
×
3306
                // something is wrong with our channel state.
×
3307
                return err
×
3308
        }
×
3309

3310
        if thresholdExceeded {
3✔
3311
                return fmt.Errorf("link fee threshold exceeded")
×
3312
        }
×
3313

3314
        // First, we'll update the local fee on our commitment.
3315
        if err := l.channel.UpdateFee(feePerKw); err != nil {
3✔
3316
                return err
×
3317
        }
×
3318

3319
        // The fee passed the channel's validation checks, so we update the
3320
        // mailbox feerate.
3321
        l.mailBox.SetFeeRate(feePerKw)
3✔
3322

3✔
3323
        // We'll then attempt to send a new UpdateFee message, and also lock it
3✔
3324
        // in immediately by triggering a commitment update.
3✔
3325
        msg := lnwire.NewUpdateFee(l.ChanID(), uint32(feePerKw))
3✔
3326
        if err := l.cfg.Peer.SendMessage(false, msg); err != nil {
3✔
3327
                return err
×
3328
        }
×
3329
        return l.updateCommitTx()
3✔
3330
}
3331

3332
// processRemoteSettleFails accepts a batch of settle/fail payment descriptors
3333
// after receiving a revocation from the remote party, and reprocesses them in
3334
// the context of the provided forwarding package. Any settles or fails that
3335
// have already been acknowledged in the forwarding package will not be sent to
3336
// the switch.
3337
func (l *channelLink) processRemoteSettleFails(fwdPkg *channeldb.FwdPkg) {
1,182✔
3338
        if len(fwdPkg.SettleFails) == 0 {
2,050✔
3339
                return
868✔
3340
        }
868✔
3341

3342
        l.log.Debugf("settle-fail-filter: %v", fwdPkg.SettleFailFilter)
318✔
3343

318✔
3344
        var switchPackets []*htlcPacket
318✔
3345
        for i, update := range fwdPkg.SettleFails {
636✔
3346
                destRef := fwdPkg.DestRef(uint16(i))
318✔
3347

318✔
3348
                // Skip any settles or fails that have already been
318✔
3349
                // acknowledged by the incoming link that originated the
318✔
3350
                // forwarded Add.
318✔
3351
                if fwdPkg.SettleFailFilter.Contains(uint16(i)) {
318✔
3352
                        continue
×
3353
                }
3354

3355
                // TODO(roasbeef): rework log entries to a shared
3356
                // interface.
3357

3358
                switch msg := update.UpdateMsg.(type) {
318✔
3359
                // A settle for an HTLC we previously forwarded HTLC has been
3360
                // received. So we'll forward the HTLC to the switch which will
3361
                // handle propagating the settle to the prior hop.
3362
                case *lnwire.UpdateFulfillHTLC:
195✔
3363
                        // If hodl.SettleIncoming is requested, we will not
195✔
3364
                        // forward the SETTLE to the switch and will not signal
195✔
3365
                        // a free slot on the commitment transaction.
195✔
3366
                        if l.cfg.HodlMask.Active(hodl.SettleIncoming) {
195✔
3367
                                l.log.Warnf(hodl.SettleIncoming.Warning())
×
3368
                                continue
×
3369
                        }
3370

3371
                        settlePacket := &htlcPacket{
195✔
3372
                                outgoingChanID: l.ShortChanID(),
195✔
3373
                                outgoingHTLCID: msg.ID,
195✔
3374
                                destRef:        &destRef,
195✔
3375
                                htlc:           msg,
195✔
3376
                        }
195✔
3377

195✔
3378
                        // Add the packet to the batch to be forwarded, and
195✔
3379
                        // notify the overflow queue that a spare spot has been
195✔
3380
                        // freed up within the commitment state.
195✔
3381
                        switchPackets = append(switchPackets, settlePacket)
195✔
3382

3383
                // A failureCode message for a previously forwarded HTLC has
3384
                // been received. As a result a new slot will be freed up in
3385
                // our commitment state, so we'll forward this to the switch so
3386
                // the backwards undo can continue.
3387
                case *lnwire.UpdateFailHTLC:
127✔
3388
                        // If hodl.SettleIncoming is requested, we will not
127✔
3389
                        // forward the FAIL to the switch and will not signal a
127✔
3390
                        // free slot on the commitment transaction.
127✔
3391
                        if l.cfg.HodlMask.Active(hodl.FailIncoming) {
127✔
3392
                                l.log.Warnf(hodl.FailIncoming.Warning())
×
3393
                                continue
×
3394
                        }
3395

3396
                        // Fetch the reason the HTLC was canceled so we can
3397
                        // continue to propagate it. This failure originated
3398
                        // from another node, so the linkFailure field is not
3399
                        // set on the packet.
3400
                        failPacket := &htlcPacket{
127✔
3401
                                outgoingChanID: l.ShortChanID(),
127✔
3402
                                outgoingHTLCID: msg.ID,
127✔
3403
                                destRef:        &destRef,
127✔
3404
                                htlc:           msg,
127✔
3405
                        }
127✔
3406

127✔
3407
                        l.log.Debugf("Failed to send HTLC with ID=%d", msg.ID)
127✔
3408

127✔
3409
                        // If the failure message lacks an HMAC (but includes
127✔
3410
                        // the 4 bytes for encoding the message and padding
127✔
3411
                        // lengths, then this means that we received it as an
127✔
3412
                        // UpdateFailMalformedHTLC. As a result, we'll signal
127✔
3413
                        // that we need to convert this error within the switch
127✔
3414
                        // to an actual error, by encrypting it as if we were
127✔
3415
                        // the originating hop.
127✔
3416
                        convertedErrorSize := lnwire.FailureMessageLength + 4
127✔
3417
                        if len(msg.Reason) == convertedErrorSize {
134✔
3418
                                failPacket.convertedError = true
7✔
3419
                        }
7✔
3420

3421
                        // Add the packet to the batch to be forwarded, and
3422
                        // notify the overflow queue that a spare spot has been
3423
                        // freed up within the commitment state.
3424
                        switchPackets = append(switchPackets, failPacket)
127✔
3425
                }
3426
        }
3427

3428
        // Only spawn the task forward packets we have a non-zero number.
3429
        if len(switchPackets) > 0 {
636✔
3430
                go l.forwardBatch(false, switchPackets...)
318✔
3431
        }
318✔
3432
}
3433

3434
// processRemoteAdds serially processes each of the Add payment descriptors
3435
// which have been "locked-in" by receiving a revocation from the remote party.
3436
// The forwarding package provided instructs how to process this batch,
3437
// indicating whether this is the first time these Adds are being processed, or
3438
// whether we are reprocessing as a result of a failure or restart. Adds that
3439
// have already been acknowledged in the forwarding package will be ignored.
3440
//
3441
//nolint:funlen
3442
func (l *channelLink) processRemoteAdds(fwdPkg *channeldb.FwdPkg) {
1,185✔
3443
        l.log.Tracef("processing %d remote adds for height %d",
1,185✔
3444
                len(fwdPkg.Adds), fwdPkg.Height)
1,185✔
3445

1,185✔
3446
        decodeReqs := make(
1,185✔
3447
                []hop.DecodeHopIteratorRequest, 0, len(fwdPkg.Adds),
1,185✔
3448
        )
1,185✔
3449
        for _, update := range fwdPkg.Adds {
1,638✔
3450
                if msg, ok := update.UpdateMsg.(*lnwire.UpdateAddHTLC); ok {
906✔
3451
                        // Before adding the new htlc to the state machine,
453✔
3452
                        // parse the onion object in order to obtain the
453✔
3453
                        // routing information with DecodeHopIterator function
453✔
3454
                        // which process the Sphinx packet.
453✔
3455
                        onionReader := bytes.NewReader(msg.OnionBlob[:])
453✔
3456

453✔
3457
                        req := hop.DecodeHopIteratorRequest{
453✔
3458
                                OnionReader:    onionReader,
453✔
3459
                                RHash:          msg.PaymentHash[:],
453✔
3460
                                IncomingCltv:   msg.Expiry,
453✔
3461
                                IncomingAmount: msg.Amount,
453✔
3462
                                BlindingPoint:  msg.BlindingPoint,
453✔
3463
                        }
453✔
3464

453✔
3465
                        decodeReqs = append(decodeReqs, req)
453✔
3466
                }
453✔
3467
        }
3468

3469
        // Atomically decode the incoming htlcs, simultaneously checking for
3470
        // replay attempts. A particular index in the returned, spare list of
3471
        // channel iterators should only be used if the failure code at the
3472
        // same index is lnwire.FailCodeNone.
3473
        decodeResps, sphinxErr := l.cfg.DecodeHopIterators(
1,185✔
3474
                fwdPkg.ID(), decodeReqs,
1,185✔
3475
        )
1,185✔
3476
        if sphinxErr != nil {
1,185✔
3477
                l.failf(LinkFailureError{code: ErrInternalError},
×
3478
                        "unable to decode hop iterators: %v", sphinxErr)
×
3479
                return
×
3480
        }
×
3481

3482
        var switchPackets []*htlcPacket
1,185✔
3483

1,185✔
3484
        for i, update := range fwdPkg.Adds {
1,638✔
3485
                idx := uint16(i)
453✔
3486

453✔
3487
                //nolint:forcetypeassert
453✔
3488
                add := *update.UpdateMsg.(*lnwire.UpdateAddHTLC)
453✔
3489
                sourceRef := fwdPkg.SourceRef(idx)
453✔
3490

453✔
3491
                if fwdPkg.State == channeldb.FwdStateProcessed &&
453✔
3492
                        fwdPkg.AckFilter.Contains(idx) {
453✔
3493

×
3494
                        // If this index is already found in the ack filter,
×
3495
                        // the response to this forwarding decision has already
×
3496
                        // been committed by one of our commitment txns. ADDs
×
3497
                        // in this state are waiting for the rest of the fwding
×
3498
                        // package to get acked before being garbage collected.
×
3499
                        continue
×
3500
                }
3501

3502
                // An incoming HTLC add has been full-locked in. As a result we
3503
                // can now examine the forwarding details of the HTLC, and the
3504
                // HTLC itself to decide if: we should forward it, cancel it,
3505
                // or are able to settle it (and it adheres to our fee related
3506
                // constraints).
3507

3508
                // Before adding the new htlc to the state machine, parse the
3509
                // onion object in order to obtain the routing information with
3510
                // DecodeHopIterator function which process the Sphinx packet.
3511
                chanIterator, failureCode := decodeResps[i].Result()
453✔
3512
                if failureCode != lnwire.CodeNone {
459✔
3513
                        // If we're unable to process the onion blob then we
6✔
3514
                        // should send the malformed htlc error to payment
6✔
3515
                        // sender.
6✔
3516
                        l.sendMalformedHTLCError(
6✔
3517
                                add.ID, failureCode, add.OnionBlob, &sourceRef,
6✔
3518
                        )
6✔
3519

6✔
3520
                        l.log.Errorf("unable to decode onion hop "+
6✔
3521
                                "iterator: %v", failureCode)
6✔
3522
                        continue
6✔
3523
                }
3524

3525
                heightNow := l.cfg.BestHeight()
451✔
3526

451✔
3527
                pld, routeRole, pldErr := chanIterator.HopPayload()
451✔
3528
                if pldErr != nil {
455✔
3529
                        // If we're unable to process the onion payload, or we
4✔
3530
                        // received invalid onion payload failure, then we
4✔
3531
                        // should send an error back to the caller so the HTLC
4✔
3532
                        // can be canceled.
4✔
3533
                        var failedType uint64
4✔
3534

4✔
3535
                        // We need to get the underlying error value, so we
4✔
3536
                        // can't use errors.As as suggested by the linter.
4✔
3537
                        //nolint:errorlint
4✔
3538
                        if e, ok := pldErr.(hop.ErrInvalidPayload); ok {
4✔
3539
                                failedType = uint64(e.Type)
×
3540
                        }
×
3541

3542
                        // If we couldn't parse the payload, make our best
3543
                        // effort at creating an error encrypter that knows
3544
                        // what blinding type we were, but if we couldn't
3545
                        // parse the payload we have no way of knowing whether
3546
                        // we were the introduction node or not.
3547
                        //
3548
                        //nolint:lll
3549
                        obfuscator, failCode := chanIterator.ExtractErrorEncrypter(
4✔
3550
                                l.cfg.ExtractErrorEncrypter,
4✔
3551
                                // We need our route role here because we
4✔
3552
                                // couldn't parse or validate the payload.
4✔
3553
                                routeRole == hop.RouteRoleIntroduction,
4✔
3554
                        )
4✔
3555
                        if failCode != lnwire.CodeNone {
4✔
3556
                                l.log.Errorf("could not extract error "+
×
3557
                                        "encrypter: %v", pldErr)
×
3558

×
3559
                                // We can't process this htlc, send back
×
3560
                                // malformed.
×
3561
                                l.sendMalformedHTLCError(
×
3562
                                        add.ID, failureCode, add.OnionBlob,
×
3563
                                        &sourceRef,
×
3564
                                )
×
3565

×
3566
                                continue
×
3567
                        }
3568

3569
                        // TODO: currently none of the test unit infrastructure
3570
                        // is setup to handle TLV payloads, so testing this
3571
                        // would require implementing a separate mock iterator
3572
                        // for TLV payloads that also supports injecting invalid
3573
                        // payloads. Deferring this non-trival effort till a
3574
                        // later date
3575
                        failure := lnwire.NewInvalidOnionPayload(failedType, 0)
4✔
3576

4✔
3577
                        l.sendHTLCError(
4✔
3578
                                add, sourceRef, NewLinkError(failure),
4✔
3579
                                obfuscator, false,
4✔
3580
                        )
4✔
3581

4✔
3582
                        l.log.Errorf("unable to decode forwarding "+
4✔
3583
                                "instructions: %v", pldErr)
4✔
3584

4✔
3585
                        continue
4✔
3586
                }
3587

3588
                // Retrieve onion obfuscator from onion blob in order to
3589
                // produce initial obfuscation of the onion failureCode.
3590
                obfuscator, failureCode := chanIterator.ExtractErrorEncrypter(
451✔
3591
                        l.cfg.ExtractErrorEncrypter,
451✔
3592
                        routeRole == hop.RouteRoleIntroduction,
451✔
3593
                )
451✔
3594
                if failureCode != lnwire.CodeNone {
452✔
3595
                        // If we're unable to process the onion blob than we
1✔
3596
                        // should send the malformed htlc error to payment
1✔
3597
                        // sender.
1✔
3598
                        l.sendMalformedHTLCError(
1✔
3599
                                add.ID, failureCode, add.OnionBlob,
1✔
3600
                                &sourceRef,
1✔
3601
                        )
1✔
3602

1✔
3603
                        l.log.Errorf("unable to decode onion "+
1✔
3604
                                "obfuscator: %v", failureCode)
1✔
3605

1✔
3606
                        continue
1✔
3607
                }
3608

3609
                fwdInfo := pld.ForwardingInfo()
450✔
3610

450✔
3611
                // Check whether the payload we've just processed uses our
450✔
3612
                // node as the introduction point (gave us a blinding key in
450✔
3613
                // the payload itself) and fail it back if we don't support
450✔
3614
                // route blinding.
450✔
3615
                if fwdInfo.NextBlinding.IsSome() &&
450✔
3616
                        l.cfg.DisallowRouteBlinding {
454✔
3617

4✔
3618
                        failure := lnwire.NewInvalidBlinding(
4✔
3619
                                fn.Some(add.OnionBlob),
4✔
3620
                        )
4✔
3621

4✔
3622
                        l.sendHTLCError(
4✔
3623
                                add, sourceRef, NewLinkError(failure),
4✔
3624
                                obfuscator, false,
4✔
3625
                        )
4✔
3626

4✔
3627
                        l.log.Error("rejected htlc that uses use as an " +
4✔
3628
                                "introduction point when we do not support " +
4✔
3629
                                "route blinding")
4✔
3630

4✔
3631
                        continue
4✔
3632
                }
3633

3634
                switch fwdInfo.NextHop {
450✔
3635
                case hop.Exit:
414✔
3636
                        err := l.processExitHop(
414✔
3637
                                add, sourceRef, obfuscator, fwdInfo,
414✔
3638
                                heightNow, pld,
414✔
3639
                        )
414✔
3640
                        if err != nil {
418✔
3641
                                l.failf(LinkFailureError{
4✔
3642
                                        code: ErrInternalError,
4✔
3643
                                }, err.Error()) //nolint
4✔
3644

4✔
3645
                                return
4✔
3646
                        }
4✔
3647

3648
                // There are additional channels left within this route. So
3649
                // we'll simply do some forwarding package book-keeping.
3650
                default:
40✔
3651
                        // If hodl.AddIncoming is requested, we will not
40✔
3652
                        // validate the forwarded ADD, nor will we send the
40✔
3653
                        // packet to the htlc switch.
40✔
3654
                        if l.cfg.HodlMask.Active(hodl.AddIncoming) {
40✔
3655
                                l.log.Warnf(hodl.AddIncoming.Warning())
×
3656
                                continue
×
3657
                        }
3658

3659
                        endorseValue := l.experimentalEndorsement(
40✔
3660
                                record.CustomSet(add.CustomRecords),
40✔
3661
                        )
40✔
3662
                        endorseType := uint64(
40✔
3663
                                lnwire.ExperimentalEndorsementType,
40✔
3664
                        )
40✔
3665

40✔
3666
                        switch fwdPkg.State {
40✔
3667
                        case channeldb.FwdStateProcessed:
4✔
3668
                                // This add was not forwarded on the previous
4✔
3669
                                // processing phase, run it through our
4✔
3670
                                // validation pipeline to reproduce an error.
4✔
3671
                                // This may trigger a different error due to
4✔
3672
                                // expiring timelocks, but we expect that an
4✔
3673
                                // error will be reproduced.
4✔
3674
                                if !fwdPkg.FwdFilter.Contains(idx) {
4✔
3675
                                        break
×
3676
                                }
3677

3678
                                // Otherwise, it was already processed, we can
3679
                                // can collect it and continue.
3680
                                outgoingAdd := &lnwire.UpdateAddHTLC{
4✔
3681
                                        Expiry:        fwdInfo.OutgoingCTLV,
4✔
3682
                                        Amount:        fwdInfo.AmountToForward,
4✔
3683
                                        PaymentHash:   add.PaymentHash,
4✔
3684
                                        BlindingPoint: fwdInfo.NextBlinding,
4✔
3685
                                }
4✔
3686

4✔
3687
                                endorseValue.WhenSome(func(e byte) {
8✔
3688
                                        custRecords := map[uint64][]byte{
4✔
3689
                                                endorseType: {e},
4✔
3690
                                        }
4✔
3691

4✔
3692
                                        outgoingAdd.CustomRecords = custRecords
4✔
3693
                                })
4✔
3694

3695
                                // Finally, we'll encode the onion packet for
3696
                                // the _next_ hop using the hop iterator
3697
                                // decoded for the current hop.
3698
                                buf := bytes.NewBuffer(
4✔
3699
                                        outgoingAdd.OnionBlob[0:0],
4✔
3700
                                )
4✔
3701

4✔
3702
                                // We know this cannot fail, as this ADD
4✔
3703
                                // was marked forwarded in a previous
4✔
3704
                                // round of processing.
4✔
3705
                                chanIterator.EncodeNextHop(buf)
4✔
3706

4✔
3707
                                inboundFee := l.cfg.FwrdingPolicy.InboundFee
4✔
3708

4✔
3709
                                //nolint:lll
4✔
3710
                                updatePacket := &htlcPacket{
4✔
3711
                                        incomingChanID:       l.ShortChanID(),
4✔
3712
                                        incomingHTLCID:       add.ID,
4✔
3713
                                        outgoingChanID:       fwdInfo.NextHop,
4✔
3714
                                        sourceRef:            &sourceRef,
4✔
3715
                                        incomingAmount:       add.Amount,
4✔
3716
                                        amount:               outgoingAdd.Amount,
4✔
3717
                                        htlc:                 outgoingAdd,
4✔
3718
                                        obfuscator:           obfuscator,
4✔
3719
                                        incomingTimeout:      add.Expiry,
4✔
3720
                                        outgoingTimeout:      fwdInfo.OutgoingCTLV,
4✔
3721
                                        inOnionCustomRecords: pld.CustomRecords(),
4✔
3722
                                        inboundFee:           inboundFee,
4✔
3723
                                        inWireCustomRecords:  add.CustomRecords.Copy(),
4✔
3724
                                }
4✔
3725
                                switchPackets = append(
4✔
3726
                                        switchPackets, updatePacket,
4✔
3727
                                )
4✔
3728

4✔
3729
                                continue
4✔
3730
                        }
3731

3732
                        // TODO(roasbeef): ensure don't accept outrageous
3733
                        // timeout for htlc
3734

3735
                        // With all our forwarding constraints met, we'll
3736
                        // create the outgoing HTLC using the parameters as
3737
                        // specified in the forwarding info.
3738
                        addMsg := &lnwire.UpdateAddHTLC{
40✔
3739
                                Expiry:        fwdInfo.OutgoingCTLV,
40✔
3740
                                Amount:        fwdInfo.AmountToForward,
40✔
3741
                                PaymentHash:   add.PaymentHash,
40✔
3742
                                BlindingPoint: fwdInfo.NextBlinding,
40✔
3743
                        }
40✔
3744

40✔
3745
                        endorseValue.WhenSome(func(e byte) {
80✔
3746
                                addMsg.CustomRecords = map[uint64][]byte{
40✔
3747
                                        endorseType: {e},
40✔
3748
                                }
40✔
3749
                        })
40✔
3750

3751
                        // Finally, we'll encode the onion packet for the
3752
                        // _next_ hop using the hop iterator decoded for the
3753
                        // current hop.
3754
                        buf := bytes.NewBuffer(addMsg.OnionBlob[0:0])
40✔
3755
                        err := chanIterator.EncodeNextHop(buf)
40✔
3756
                        if err != nil {
40✔
3757
                                l.log.Errorf("unable to encode the "+
×
3758
                                        "remaining route %v", err)
×
3759

×
3760
                                cb := func(upd *lnwire.ChannelUpdate1) lnwire.FailureMessage { //nolint:lll
×
3761
                                        return lnwire.NewTemporaryChannelFailure(upd)
×
3762
                                }
×
3763

3764
                                failure := l.createFailureWithUpdate(
×
3765
                                        true, hop.Source, cb,
×
3766
                                )
×
3767

×
3768
                                l.sendHTLCError(
×
3769
                                        add, sourceRef, NewLinkError(failure),
×
3770
                                        obfuscator, false,
×
3771
                                )
×
3772
                                continue
×
3773
                        }
3774

3775
                        // Now that this add has been reprocessed, only append
3776
                        // it to our list of packets to forward to the switch
3777
                        // this is the first time processing the add. If the
3778
                        // fwd pkg has already been processed, then we entered
3779
                        // the above section to recreate a previous error.  If
3780
                        // the packet had previously been forwarded, it would
3781
                        // have been added to switchPackets at the top of this
3782
                        // section.
3783
                        if fwdPkg.State == channeldb.FwdStateLockedIn {
80✔
3784
                                inboundFee := l.cfg.FwrdingPolicy.InboundFee
40✔
3785

40✔
3786
                                //nolint:lll
40✔
3787
                                updatePacket := &htlcPacket{
40✔
3788
                                        incomingChanID:       l.ShortChanID(),
40✔
3789
                                        incomingHTLCID:       add.ID,
40✔
3790
                                        outgoingChanID:       fwdInfo.NextHop,
40✔
3791
                                        sourceRef:            &sourceRef,
40✔
3792
                                        incomingAmount:       add.Amount,
40✔
3793
                                        amount:               addMsg.Amount,
40✔
3794
                                        htlc:                 addMsg,
40✔
3795
                                        obfuscator:           obfuscator,
40✔
3796
                                        incomingTimeout:      add.Expiry,
40✔
3797
                                        outgoingTimeout:      fwdInfo.OutgoingCTLV,
40✔
3798
                                        inOnionCustomRecords: pld.CustomRecords(),
40✔
3799
                                        inboundFee:           inboundFee,
40✔
3800
                                        inWireCustomRecords:  add.CustomRecords.Copy(),
40✔
3801
                                }
40✔
3802

40✔
3803
                                fwdPkg.FwdFilter.Set(idx)
40✔
3804
                                switchPackets = append(switchPackets,
40✔
3805
                                        updatePacket)
40✔
3806
                        }
40✔
3807
                }
3808
        }
3809

3810
        // Commit the htlcs we are intending to forward if this package has not
3811
        // been fully processed.
3812
        if fwdPkg.State == channeldb.FwdStateLockedIn {
2,367✔
3813
                err := l.channel.SetFwdFilter(fwdPkg.Height, fwdPkg.FwdFilter)
1,182✔
3814
                if err != nil {
1,182✔
3815
                        l.failf(LinkFailureError{code: ErrInternalError},
×
3816
                                "unable to set fwd filter: %v", err)
×
3817
                        return
×
3818
                }
×
3819
        }
3820

3821
        if len(switchPackets) == 0 {
2,334✔
3822
                return
1,149✔
3823
        }
1,149✔
3824

3825
        replay := fwdPkg.State != channeldb.FwdStateLockedIn
40✔
3826

40✔
3827
        l.log.Debugf("forwarding %d packets to switch: replay=%v",
40✔
3828
                len(switchPackets), replay)
40✔
3829

40✔
3830
        // NOTE: This call is made synchronous so that we ensure all circuits
40✔
3831
        // are committed in the exact order that they are processed in the link.
40✔
3832
        // Failing to do this could cause reorderings/gaps in the range of
40✔
3833
        // opened circuits, which violates assumptions made by the circuit
40✔
3834
        // trimming.
40✔
3835
        l.forwardBatch(replay, switchPackets...)
40✔
3836
}
3837

3838
// experimentalEndorsement returns the value to set for our outgoing
3839
// experimental endorsement field, and a boolean indicating whether it should
3840
// be populated on the outgoing htlc.
3841
func (l *channelLink) experimentalEndorsement(
3842
        customUpdateAdd record.CustomSet) fn.Option[byte] {
40✔
3843

40✔
3844
        // Only relay experimental signal if we are within the experiment
40✔
3845
        // period.
40✔
3846
        if !l.cfg.ShouldFwdExpEndorsement() {
44✔
3847
                return fn.None[byte]()
4✔
3848
        }
4✔
3849

3850
        // If we don't have any custom records or the experimental field is
3851
        // not set, just forward a zero value.
3852
        if len(customUpdateAdd) == 0 {
80✔
3853
                return fn.Some[byte](lnwire.ExperimentalUnendorsed)
40✔
3854
        }
40✔
3855

3856
        t := uint64(lnwire.ExperimentalEndorsementType)
4✔
3857
        value, set := customUpdateAdd[t]
4✔
3858
        if !set {
4✔
3859
                return fn.Some[byte](lnwire.ExperimentalUnendorsed)
×
3860
        }
×
3861

3862
        // We expect at least one byte for this field, consider it invalid if
3863
        // it has no data and just forward a zero value.
3864
        if len(value) == 0 {
4✔
3865
                return fn.Some[byte](lnwire.ExperimentalUnendorsed)
×
3866
        }
×
3867

3868
        // Only forward endorsed if the incoming link is endorsed.
3869
        if value[0] == lnwire.ExperimentalEndorsed {
8✔
3870
                return fn.Some[byte](lnwire.ExperimentalEndorsed)
4✔
3871
        }
4✔
3872

3873
        // Forward as unendorsed otherwise, including cases where we've
3874
        // received an invalid value that uses more than 3 bits of information.
3875
        return fn.Some[byte](lnwire.ExperimentalUnendorsed)
4✔
3876
}
3877

3878
// processExitHop handles an htlc for which this link is the exit hop. It
3879
// returns a boolean indicating whether the commitment tx needs an update.
3880
func (l *channelLink) processExitHop(add lnwire.UpdateAddHTLC,
3881
        sourceRef channeldb.AddRef, obfuscator hop.ErrorEncrypter,
3882
        fwdInfo hop.ForwardingInfo, heightNow uint32,
3883
        payload invoices.Payload) error {
414✔
3884

414✔
3885
        // If hodl.ExitSettle is requested, we will not validate the final hop's
414✔
3886
        // ADD, nor will we settle the corresponding invoice or respond with the
414✔
3887
        // preimage.
414✔
3888
        if l.cfg.HodlMask.Active(hodl.ExitSettle) {
525✔
3889
                l.log.Warnf("%s for htlc(rhash=%x,htlcIndex=%v)",
111✔
3890
                        hodl.ExitSettle.Warning(), add.PaymentHash, add.ID)
111✔
3891

111✔
3892
                return nil
111✔
3893
        }
111✔
3894

3895
        // As we're the exit hop, we'll double check the hop-payload included in
3896
        // the HTLC to ensure that it was crafted correctly by the sender and
3897
        // is compatible with the HTLC we were extended.
3898
        //
3899
        // For a special case, if the fwdInfo doesn't have any blinded path
3900
        // information, and the incoming HTLC had special extra data, then
3901
        // we'll skip this amount check. The invoice acceptor will make sure we
3902
        // reject the HTLC if it's not containing the correct amount after
3903
        // examining the custom data.
3904
        hasBlindedPath := fwdInfo.NextBlinding.IsSome()
307✔
3905
        customHTLC := len(add.CustomRecords) > 0 && !hasBlindedPath
307✔
3906
        log.Tracef("Exit hop has_blinded_path=%v custom_htlc_bypass=%v",
307✔
3907
                hasBlindedPath, customHTLC)
307✔
3908

307✔
3909
        if !customHTLC && add.Amount < fwdInfo.AmountToForward {
407✔
3910
                l.log.Errorf("onion payload of incoming htlc(%x) has "+
100✔
3911
                        "incompatible value: expected <=%v, got %v",
100✔
3912
                        add.PaymentHash, add.Amount, fwdInfo.AmountToForward)
100✔
3913

100✔
3914
                failure := NewLinkError(
100✔
3915
                        lnwire.NewFinalIncorrectHtlcAmount(add.Amount),
100✔
3916
                )
100✔
3917
                l.sendHTLCError(add, sourceRef, failure, obfuscator, true)
100✔
3918

100✔
3919
                return nil
100✔
3920
        }
100✔
3921

3922
        // We'll also ensure that our time-lock value has been computed
3923
        // correctly.
3924
        if add.Expiry < fwdInfo.OutgoingCTLV {
208✔
3925
                l.log.Errorf("onion payload of incoming htlc(%x) has "+
1✔
3926
                        "incompatible time-lock: expected <=%v, got %v",
1✔
3927
                        add.PaymentHash, add.Expiry, fwdInfo.OutgoingCTLV)
1✔
3928

1✔
3929
                failure := NewLinkError(
1✔
3930
                        lnwire.NewFinalIncorrectCltvExpiry(add.Expiry),
1✔
3931
                )
1✔
3932

1✔
3933
                l.sendHTLCError(add, sourceRef, failure, obfuscator, true)
1✔
3934

1✔
3935
                return nil
1✔
3936
        }
1✔
3937

3938
        // Notify the invoiceRegistry of the exit hop htlc. If we crash right
3939
        // after this, this code will be re-executed after restart. We will
3940
        // receive back a resolution event.
3941
        invoiceHash := lntypes.Hash(add.PaymentHash)
206✔
3942

206✔
3943
        circuitKey := models.CircuitKey{
206✔
3944
                ChanID: l.ShortChanID(),
206✔
3945
                HtlcID: add.ID,
206✔
3946
        }
206✔
3947

206✔
3948
        event, err := l.cfg.Registry.NotifyExitHopHtlc(
206✔
3949
                invoiceHash, add.Amount, add.Expiry, int32(heightNow),
206✔
3950
                circuitKey, l.hodlQueue.ChanIn(), add.CustomRecords, payload,
206✔
3951
        )
206✔
3952
        if err != nil {
210✔
3953
                return err
4✔
3954
        }
4✔
3955

3956
        // Create a hodlHtlc struct and decide either resolved now or later.
3957
        htlc := hodlHtlc{
206✔
3958
                add:        add,
206✔
3959
                sourceRef:  sourceRef,
206✔
3960
                obfuscator: obfuscator,
206✔
3961
        }
206✔
3962

206✔
3963
        // If the event is nil, the invoice is being held, so we save payment
206✔
3964
        // descriptor for future reference.
206✔
3965
        if event == nil {
266✔
3966
                l.hodlMap[circuitKey] = htlc
60✔
3967
                return nil
60✔
3968
        }
60✔
3969

3970
        // Process the received resolution.
3971
        return l.processHtlcResolution(event, htlc)
150✔
3972
}
3973

3974
// settleHTLC settles the HTLC on the channel.
3975
func (l *channelLink) settleHTLC(preimage lntypes.Preimage,
3976
        htlcIndex uint64, sourceRef channeldb.AddRef) error {
201✔
3977

201✔
3978
        hash := preimage.Hash()
201✔
3979

201✔
3980
        l.log.Infof("settling htlc %v as exit hop", hash)
201✔
3981

201✔
3982
        err := l.channel.SettleHTLC(
201✔
3983
                preimage, htlcIndex, &sourceRef, nil, nil,
201✔
3984
        )
201✔
3985
        if err != nil {
201✔
3986
                return fmt.Errorf("unable to settle htlc: %w", err)
×
3987
        }
×
3988

3989
        // If the link is in hodl.BogusSettle mode, replace the preimage with a
3990
        // fake one before sending it to the peer.
3991
        if l.cfg.HodlMask.Active(hodl.BogusSettle) {
205✔
3992
                l.log.Warnf(hodl.BogusSettle.Warning())
4✔
3993
                preimage = [32]byte{}
4✔
3994
                copy(preimage[:], bytes.Repeat([]byte{2}, 32))
4✔
3995
        }
4✔
3996

3997
        // HTLC was successfully settled locally send notification about it
3998
        // remote peer.
3999
        l.cfg.Peer.SendMessage(false, &lnwire.UpdateFulfillHTLC{
201✔
4000
                ChanID:          l.ChanID(),
201✔
4001
                ID:              htlcIndex,
201✔
4002
                PaymentPreimage: preimage,
201✔
4003
        })
201✔
4004

201✔
4005
        // Once we have successfully settled the htlc, notify a settle event.
201✔
4006
        l.cfg.HtlcNotifier.NotifySettleEvent(
201✔
4007
                HtlcKey{
201✔
4008
                        IncomingCircuit: models.CircuitKey{
201✔
4009
                                ChanID: l.ShortChanID(),
201✔
4010
                                HtlcID: htlcIndex,
201✔
4011
                        },
201✔
4012
                },
201✔
4013
                preimage,
201✔
4014
                HtlcEventTypeReceive,
201✔
4015
        )
201✔
4016

201✔
4017
        return nil
201✔
4018
}
4019

4020
// forwardBatch forwards the given htlcPackets to the switch, and waits on the
4021
// err chan for the individual responses. This method is intended to be spawned
4022
// as a goroutine so the responses can be handled in the background.
4023
func (l *channelLink) forwardBatch(replay bool, packets ...*htlcPacket) {
579✔
4024
        // Don't forward packets for which we already have a response in our
579✔
4025
        // mailbox. This could happen if a packet fails and is buffered in the
579✔
4026
        // mailbox, and the incoming link flaps.
579✔
4027
        var filteredPkts = make([]*htlcPacket, 0, len(packets))
579✔
4028
        for _, pkt := range packets {
1,158✔
4029
                if l.mailBox.HasPacket(pkt.inKey()) {
583✔
4030
                        continue
4✔
4031
                }
4032

4033
                filteredPkts = append(filteredPkts, pkt)
579✔
4034
        }
4035

4036
        err := l.cfg.ForwardPackets(l.Quit, replay, filteredPkts...)
579✔
4037
        if err != nil {
590✔
4038
                log.Errorf("Unhandled error while reforwarding htlc "+
11✔
4039
                        "settle/fail over htlcswitch: %v", err)
11✔
4040
        }
11✔
4041
}
4042

4043
// sendHTLCError functions cancels HTLC and send cancel message back to the
4044
// peer from which HTLC was received.
4045
func (l *channelLink) sendHTLCError(add lnwire.UpdateAddHTLC,
4046
        sourceRef channeldb.AddRef, failure *LinkError,
4047
        e hop.ErrorEncrypter, isReceive bool) {
109✔
4048

109✔
4049
        reason, err := e.EncryptFirstHop(failure.WireMessage())
109✔
4050
        if err != nil {
109✔
4051
                l.log.Errorf("unable to obfuscate error: %v", err)
×
4052
                return
×
4053
        }
×
4054

4055
        err = l.channel.FailHTLC(add.ID, reason, &sourceRef, nil, nil)
109✔
4056
        if err != nil {
109✔
4057
                l.log.Errorf("unable cancel htlc: %v", err)
×
4058
                return
×
4059
        }
×
4060

4061
        // Send the appropriate failure message depending on whether we're
4062
        // in a blinded route or not.
4063
        if err := l.sendIncomingHTLCFailureMsg(
109✔
4064
                add.ID, e, reason,
109✔
4065
        ); err != nil {
109✔
4066
                l.log.Errorf("unable to send HTLC failure: %v", err)
×
4067
                return
×
4068
        }
×
4069

4070
        // Notify a link failure on our incoming link. Outgoing htlc information
4071
        // is not available at this point, because we have not decrypted the
4072
        // onion, so it is excluded.
4073
        var eventType HtlcEventType
109✔
4074
        if isReceive {
218✔
4075
                eventType = HtlcEventTypeReceive
109✔
4076
        } else {
113✔
4077
                eventType = HtlcEventTypeForward
4✔
4078
        }
4✔
4079

4080
        l.cfg.HtlcNotifier.NotifyLinkFailEvent(
109✔
4081
                HtlcKey{
109✔
4082
                        IncomingCircuit: models.CircuitKey{
109✔
4083
                                ChanID: l.ShortChanID(),
109✔
4084
                                HtlcID: add.ID,
109✔
4085
                        },
109✔
4086
                },
109✔
4087
                HtlcInfo{
109✔
4088
                        IncomingTimeLock: add.Expiry,
109✔
4089
                        IncomingAmt:      add.Amount,
109✔
4090
                },
109✔
4091
                eventType,
109✔
4092
                failure,
109✔
4093
                true,
109✔
4094
        )
109✔
4095
}
4096

4097
// sendPeerHTLCFailure handles sending a HTLC failure message back to the
4098
// peer from which the HTLC was received. This function is primarily used to
4099
// handle the special requirements of route blinding, specifically:
4100
// - Forwarding nodes must switch out any errors with MalformedFailHTLC
4101
// - Introduction nodes should return regular HTLC failure messages.
4102
//
4103
// It accepts the original opaque failure, which will be used in the case
4104
// that we're not part of a blinded route and an error encrypter that'll be
4105
// used if we are the introduction node and need to present an error as if
4106
// we're the failing party.
4107
func (l *channelLink) sendIncomingHTLCFailureMsg(htlcIndex uint64,
4108
        e hop.ErrorEncrypter,
4109
        originalFailure lnwire.OpaqueReason) error {
125✔
4110

125✔
4111
        var msg lnwire.Message
125✔
4112
        switch {
125✔
4113
        // Our circuit's error encrypter will be nil if this was a locally
4114
        // initiated payment. We can only hit a blinded error for a locally
4115
        // initiated payment if we allow ourselves to be picked as the
4116
        // introduction node for our own payments and in that case we
4117
        // shouldn't reach this code. To prevent the HTLC getting stuck,
4118
        // we fail it back and log an error.
4119
        // code.
4120
        case e == nil:
×
4121
                msg = &lnwire.UpdateFailHTLC{
×
4122
                        ChanID: l.ChanID(),
×
4123
                        ID:     htlcIndex,
×
4124
                        Reason: originalFailure,
×
4125
                }
×
4126

×
4127
                l.log.Errorf("Unexpected blinded failure when "+
×
4128
                        "we are the sending node, incoming htlc: %v(%v)",
×
4129
                        l.ShortChanID(), htlcIndex)
×
4130

4131
        // For cleartext hops (ie, non-blinded/normal) we don't need any
4132
        // transformation on the error message and can just send the original.
4133
        case !e.Type().IsBlinded():
125✔
4134
                msg = &lnwire.UpdateFailHTLC{
125✔
4135
                        ChanID: l.ChanID(),
125✔
4136
                        ID:     htlcIndex,
125✔
4137
                        Reason: originalFailure,
125✔
4138
                }
125✔
4139

4140
        // When we're the introduction node, we need to convert the error to
4141
        // a UpdateFailHTLC.
4142
        case e.Type() == hop.EncrypterTypeIntroduction:
4✔
4143
                l.log.Debugf("Introduction blinded node switching out failure "+
4✔
4144
                        "error: %v", htlcIndex)
4✔
4145

4✔
4146
                // The specification does not require that we set the onion
4✔
4147
                // blob.
4✔
4148
                failureMsg := lnwire.NewInvalidBlinding(
4✔
4149
                        fn.None[[lnwire.OnionPacketSize]byte](),
4✔
4150
                )
4✔
4151
                reason, err := e.EncryptFirstHop(failureMsg)
4✔
4152
                if err != nil {
4✔
4153
                        return err
×
4154
                }
×
4155

4156
                msg = &lnwire.UpdateFailHTLC{
4✔
4157
                        ChanID: l.ChanID(),
4✔
4158
                        ID:     htlcIndex,
4✔
4159
                        Reason: reason,
4✔
4160
                }
4✔
4161

4162
        // If we are a relaying node, we need to switch out any error that
4163
        // we've received to a malformed HTLC error.
4164
        case e.Type() == hop.EncrypterTypeRelaying:
4✔
4165
                l.log.Debugf("Relaying blinded node switching out malformed "+
4✔
4166
                        "error: %v", htlcIndex)
4✔
4167

4✔
4168
                msg = &lnwire.UpdateFailMalformedHTLC{
4✔
4169
                        ChanID:      l.ChanID(),
4✔
4170
                        ID:          htlcIndex,
4✔
4171
                        FailureCode: lnwire.CodeInvalidBlinding,
4✔
4172
                }
4✔
4173

4174
        default:
×
4175
                return fmt.Errorf("unexpected encrypter: %d", e)
×
4176
        }
4177

4178
        if err := l.cfg.Peer.SendMessage(false, msg); err != nil {
125✔
4179
                l.log.Warnf("Send update fail failed: %v", err)
×
4180
        }
×
4181

4182
        return nil
125✔
4183
}
4184

4185
// sendMalformedHTLCError helper function which sends the malformed HTLC update
4186
// to the payment sender.
4187
func (l *channelLink) sendMalformedHTLCError(htlcIndex uint64,
4188
        code lnwire.FailCode, onionBlob [lnwire.OnionPacketSize]byte,
4189
        sourceRef *channeldb.AddRef) {
7✔
4190

7✔
4191
        shaOnionBlob := sha256.Sum256(onionBlob[:])
7✔
4192
        err := l.channel.MalformedFailHTLC(htlcIndex, code, shaOnionBlob, sourceRef)
7✔
4193
        if err != nil {
7✔
4194
                l.log.Errorf("unable cancel htlc: %v", err)
×
4195
                return
×
4196
        }
×
4197

4198
        l.cfg.Peer.SendMessage(false, &lnwire.UpdateFailMalformedHTLC{
7✔
4199
                ChanID:       l.ChanID(),
7✔
4200
                ID:           htlcIndex,
7✔
4201
                ShaOnionBlob: shaOnionBlob,
7✔
4202
                FailureCode:  code,
7✔
4203
        })
7✔
4204
}
4205

4206
// failf is a function which is used to encapsulate the action necessary for
4207
// properly failing the link. It takes a LinkFailureError, which will be passed
4208
// to the OnChannelFailure closure, in order for it to determine if we should
4209
// force close the channel, and if we should send an error message to the
4210
// remote peer.
4211
func (l *channelLink) failf(linkErr LinkFailureError, format string,
4212
        a ...interface{}) {
16✔
4213

16✔
4214
        reason := fmt.Errorf(format, a...)
16✔
4215

16✔
4216
        // Return if we have already notified about a failure.
16✔
4217
        if l.failed {
20✔
4218
                l.log.Warnf("ignoring link failure (%v), as link already "+
4✔
4219
                        "failed", reason)
4✔
4220
                return
4✔
4221
        }
4✔
4222

4223
        l.log.Errorf("failing link: %s with error: %v", reason, linkErr)
16✔
4224

16✔
4225
        // Set failed, such that we won't process any more updates, and notify
16✔
4226
        // the peer about the failure.
16✔
4227
        l.failed = true
16✔
4228
        l.cfg.OnChannelFailure(l.ChanID(), l.ShortChanID(), linkErr)
16✔
4229
}
4230

4231
// FundingCustomBlob returns the custom funding blob of the channel that this
4232
// link is associated with. The funding blob represents static information about
4233
// the channel that was created at channel funding time.
4234
func (l *channelLink) FundingCustomBlob() fn.Option[tlv.Blob] {
×
4235
        if l.channel == nil {
×
4236
                return fn.None[tlv.Blob]()
×
4237
        }
×
4238

4239
        if l.channel.State() == nil {
×
4240
                return fn.None[tlv.Blob]()
×
4241
        }
×
4242

4243
        return l.channel.State().CustomBlob
×
4244
}
4245

4246
// CommitmentCustomBlob returns the custom blob of the current local commitment
4247
// of the channel that this link is associated with.
4248
func (l *channelLink) CommitmentCustomBlob() fn.Option[tlv.Blob] {
×
4249
        if l.channel == nil {
×
4250
                return fn.None[tlv.Blob]()
×
4251
        }
×
4252

4253
        return l.channel.LocalCommitmentBlob()
×
4254
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc